aboutsummaryrefslogtreecommitdiffstats
path: root/roms/opensbi/lib/sbi
diff options
context:
space:
mode:
authorAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
committerAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
commitaf1a266670d040d2f4083ff309d732d648afba2a (patch)
tree2fc46203448ddcc6f81546d379abfaeb323575e9 /roms/opensbi/lib/sbi
parente02cda008591317b1625707ff8e115a4841aa889 (diff)
Add submodule dependency filesHEADmaster
Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec
Diffstat (limited to 'roms/opensbi/lib/sbi')
-rw-r--r--roms/opensbi/lib/sbi/objects.mk43
-rw-r--r--roms/opensbi/lib/sbi/riscv_asm.c295
-rw-r--r--roms/opensbi/lib/sbi/riscv_atomic.c255
-rw-r--r--roms/opensbi/lib/sbi/riscv_hardfp.S171
-rw-r--r--roms/opensbi/lib/sbi/riscv_locks.c45
-rw-r--r--roms/opensbi/lib/sbi/sbi_bitmap.c40
-rw-r--r--roms/opensbi/lib/sbi/sbi_bitops.c200
-rw-r--r--roms/opensbi/lib/sbi/sbi_console.c398
-rw-r--r--roms/opensbi/lib/sbi/sbi_domain.c539
-rw-r--r--roms/opensbi/lib/sbi/sbi_ecall.c175
-rw-r--r--roms/opensbi/lib/sbi/sbi_ecall_base.c79
-rw-r--r--roms/opensbi/lib/sbi/sbi_ecall_hsm.c59
-rw-r--r--roms/opensbi/lib/sbi/sbi_ecall_legacy.c124
-rw-r--r--roms/opensbi/lib/sbi/sbi_ecall_replace.c196
-rw-r--r--roms/opensbi/lib/sbi/sbi_ecall_vendor.c40
-rw-r--r--roms/opensbi/lib/sbi/sbi_emulate_csr.c188
-rw-r--r--roms/opensbi/lib/sbi/sbi_expected_trap.S56
-rw-r--r--roms/opensbi/lib/sbi/sbi_fifo.c192
-rw-r--r--roms/opensbi/lib/sbi/sbi_hart.c536
-rw-r--r--roms/opensbi/lib/sbi/sbi_hfence.S135
-rw-r--r--roms/opensbi/lib/sbi/sbi_hsm.c291
-rw-r--r--roms/opensbi/lib/sbi/sbi_illegal_insn.c143
-rw-r--r--roms/opensbi/lib/sbi/sbi_init.c475
-rw-r--r--roms/opensbi/lib/sbi/sbi_ipi.c254
-rw-r--r--roms/opensbi/lib/sbi/sbi_math.c23
-rw-r--r--roms/opensbi/lib/sbi/sbi_misaligned_ldst.c243
-rw-r--r--roms/opensbi/lib/sbi/sbi_platform.c90
-rw-r--r--roms/opensbi/lib/sbi/sbi_scratch.c99
-rw-r--r--roms/opensbi/lib/sbi/sbi_string.c188
-rw-r--r--roms/opensbi/lib/sbi/sbi_system.c56
-rw-r--r--roms/opensbi/lib/sbi/sbi_timer.c136
-rw-r--r--roms/opensbi/lib/sbi/sbi_tlb.c429
-rw-r--r--roms/opensbi/lib/sbi/sbi_trap.c293
-rw-r--r--roms/opensbi/lib/sbi/sbi_unpriv.c165
34 files changed, 6651 insertions, 0 deletions
diff --git a/roms/opensbi/lib/sbi/objects.mk b/roms/opensbi/lib/sbi/objects.mk
new file mode 100644
index 000000000..6f2c06f5b
--- /dev/null
+++ b/roms/opensbi/lib/sbi/objects.mk
@@ -0,0 +1,43 @@
+#
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+#
+# Authors:
+# Anup Patel <anup.patel@wdc.com>
+#
+
+libsbi-objs-y += riscv_asm.o
+libsbi-objs-y += riscv_atomic.o
+libsbi-objs-y += riscv_hardfp.o
+libsbi-objs-y += riscv_locks.o
+
+libsbi-objs-y += sbi_bitmap.o
+libsbi-objs-y += sbi_bitops.o
+libsbi-objs-y += sbi_console.o
+libsbi-objs-y += sbi_domain.o
+libsbi-objs-y += sbi_ecall.o
+libsbi-objs-y += sbi_ecall_base.o
+libsbi-objs-y += sbi_ecall_hsm.o
+libsbi-objs-y += sbi_ecall_legacy.o
+libsbi-objs-y += sbi_ecall_replace.o
+libsbi-objs-y += sbi_ecall_vendor.o
+libsbi-objs-y += sbi_emulate_csr.o
+libsbi-objs-y += sbi_fifo.o
+libsbi-objs-y += sbi_hart.o
+libsbi-objs-y += sbi_math.o
+libsbi-objs-y += sbi_hfence.o
+libsbi-objs-y += sbi_hsm.o
+libsbi-objs-y += sbi_illegal_insn.o
+libsbi-objs-y += sbi_init.o
+libsbi-objs-y += sbi_ipi.o
+libsbi-objs-y += sbi_misaligned_ldst.o
+libsbi-objs-y += sbi_platform.o
+libsbi-objs-y += sbi_scratch.o
+libsbi-objs-y += sbi_string.o
+libsbi-objs-y += sbi_system.o
+libsbi-objs-y += sbi_timer.o
+libsbi-objs-y += sbi_tlb.o
+libsbi-objs-y += sbi_trap.o
+libsbi-objs-y += sbi_unpriv.o
+libsbi-objs-y += sbi_expected_trap.o
diff --git a/roms/opensbi/lib/sbi/riscv_asm.c b/roms/opensbi/lib/sbi/riscv_asm.c
new file mode 100644
index 000000000..8c54c1114
--- /dev/null
+++ b/roms/opensbi/lib/sbi/riscv_asm.c
@@ -0,0 +1,295 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_platform.h>
+
+/* determine CPU extension, return non-zero support */
+int misa_extension_imp(char ext)
+{
+ unsigned long misa = csr_read(CSR_MISA);
+
+ if (misa) {
+ if ('A' <= ext && ext <= 'Z')
+ return misa & (1 << (ext - 'A'));
+ if ('a' <= ext && ext <= 'z')
+ return misa & (1 << (ext - 'a'));
+ return 0;
+ }
+
+ return sbi_platform_misa_extension(sbi_platform_thishart_ptr(), ext);
+}
+
+int misa_xlen(void)
+{
+ long r;
+
+ if (csr_read(CSR_MISA) == 0)
+ return sbi_platform_misa_xlen(sbi_platform_thishart_ptr());
+
+ __asm__ __volatile__(
+ "csrr t0, misa\n\t"
+ "slti t1, t0, 0\n\t"
+ "slli t1, t1, 1\n\t"
+ "slli t0, t0, 1\n\t"
+ "slti t0, t0, 0\n\t"
+ "add %0, t0, t1"
+ : "=r"(r)
+ :
+ : "t0", "t1");
+
+ return r ? r : -1;
+}
+
+void misa_string(int xlen, char *out, unsigned int out_sz)
+{
+ unsigned int i, pos = 0;
+ const char valid_isa_order[] = "iemafdqclbjtpvnsuhkorwxyzg";
+
+ if (!out)
+ return;
+
+ if (5 <= (out_sz - pos)) {
+ out[pos++] = 'r';
+ out[pos++] = 'v';
+ switch (xlen) {
+ case 1:
+ out[pos++] = '3';
+ out[pos++] = '2';
+ break;
+ case 2:
+ out[pos++] = '6';
+ out[pos++] = '4';
+ break;
+ case 3:
+ out[pos++] = '1';
+ out[pos++] = '2';
+ out[pos++] = '8';
+ break;
+ default:
+ return;
+ }
+ }
+
+ for (i = 0; i < array_size(valid_isa_order) && (pos < out_sz); i++) {
+ if (misa_extension_imp(valid_isa_order[i]))
+ out[pos++] = valid_isa_order[i];
+ }
+
+ if (pos < out_sz)
+ out[pos++] = '\0';
+}
+
+unsigned long csr_read_num(int csr_num)
+{
+#define switchcase_csr_read(__csr_num, __val) \
+ case __csr_num: \
+ __val = csr_read(__csr_num); \
+ break;
+#define switchcase_csr_read_2(__csr_num, __val) \
+ switchcase_csr_read(__csr_num + 0, __val) \
+ switchcase_csr_read(__csr_num + 1, __val)
+#define switchcase_csr_read_4(__csr_num, __val) \
+ switchcase_csr_read_2(__csr_num + 0, __val) \
+ switchcase_csr_read_2(__csr_num + 2, __val)
+#define switchcase_csr_read_8(__csr_num, __val) \
+ switchcase_csr_read_4(__csr_num + 0, __val) \
+ switchcase_csr_read_4(__csr_num + 4, __val)
+#define switchcase_csr_read_16(__csr_num, __val) \
+ switchcase_csr_read_8(__csr_num + 0, __val) \
+ switchcase_csr_read_8(__csr_num + 8, __val)
+#define switchcase_csr_read_32(__csr_num, __val) \
+ switchcase_csr_read_16(__csr_num + 0, __val) \
+ switchcase_csr_read_16(__csr_num + 16, __val)
+#define switchcase_csr_read_64(__csr_num, __val) \
+ switchcase_csr_read_32(__csr_num + 0, __val) \
+ switchcase_csr_read_32(__csr_num + 32, __val)
+
+ unsigned long ret = 0;
+
+ switch (csr_num) {
+ switchcase_csr_read_16(CSR_PMPCFG0, ret)
+ switchcase_csr_read_64(CSR_PMPADDR0, ret)
+ default:
+ break;
+ };
+
+ return ret;
+
+#undef switchcase_csr_read_64
+#undef switchcase_csr_read_32
+#undef switchcase_csr_read_16
+#undef switchcase_csr_read_8
+#undef switchcase_csr_read_4
+#undef switchcase_csr_read_2
+#undef switchcase_csr_read
+}
+
+void csr_write_num(int csr_num, unsigned long val)
+{
+#define switchcase_csr_write(__csr_num, __val) \
+ case __csr_num: \
+ csr_write(__csr_num, __val); \
+ break;
+#define switchcase_csr_write_2(__csr_num, __val) \
+ switchcase_csr_write(__csr_num + 0, __val) \
+ switchcase_csr_write(__csr_num + 1, __val)
+#define switchcase_csr_write_4(__csr_num, __val) \
+ switchcase_csr_write_2(__csr_num + 0, __val) \
+ switchcase_csr_write_2(__csr_num + 2, __val)
+#define switchcase_csr_write_8(__csr_num, __val) \
+ switchcase_csr_write_4(__csr_num + 0, __val) \
+ switchcase_csr_write_4(__csr_num + 4, __val)
+#define switchcase_csr_write_16(__csr_num, __val) \
+ switchcase_csr_write_8(__csr_num + 0, __val) \
+ switchcase_csr_write_8(__csr_num + 8, __val)
+#define switchcase_csr_write_32(__csr_num, __val) \
+ switchcase_csr_write_16(__csr_num + 0, __val) \
+ switchcase_csr_write_16(__csr_num + 16, __val)
+#define switchcase_csr_write_64(__csr_num, __val) \
+ switchcase_csr_write_32(__csr_num + 0, __val) \
+ switchcase_csr_write_32(__csr_num + 32, __val)
+
+ switch (csr_num) {
+ switchcase_csr_write_16(CSR_PMPCFG0, val)
+ switchcase_csr_write_64(CSR_PMPADDR0, val)
+ default:
+ break;
+ };
+
+#undef switchcase_csr_write_64
+#undef switchcase_csr_write_32
+#undef switchcase_csr_write_16
+#undef switchcase_csr_write_8
+#undef switchcase_csr_write_4
+#undef switchcase_csr_write_2
+#undef switchcase_csr_write
+}
+
+static unsigned long ctz(unsigned long x)
+{
+ unsigned long ret = 0;
+
+ while (!(x & 1UL)) {
+ ret++;
+ x = x >> 1;
+ }
+
+ return ret;
+}
+
+int pmp_set(unsigned int n, unsigned long prot, unsigned long addr,
+ unsigned long log2len)
+{
+ int pmpcfg_csr, pmpcfg_shift, pmpaddr_csr;
+ unsigned long cfgmask, pmpcfg;
+ unsigned long addrmask, pmpaddr;
+
+ /* check parameters */
+ if (n >= PMP_COUNT || log2len > __riscv_xlen || log2len < PMP_SHIFT)
+ return SBI_EINVAL;
+
+ /* calculate PMP register and offset */
+#if __riscv_xlen == 32
+ pmpcfg_csr = CSR_PMPCFG0 + (n >> 2);
+ pmpcfg_shift = (n & 3) << 3;
+#elif __riscv_xlen == 64
+ pmpcfg_csr = (CSR_PMPCFG0 + (n >> 2)) & ~1;
+ pmpcfg_shift = (n & 7) << 3;
+#else
+ pmpcfg_csr = -1;
+ pmpcfg_shift = -1;
+#endif
+ pmpaddr_csr = CSR_PMPADDR0 + n;
+ if (pmpcfg_csr < 0 || pmpcfg_shift < 0)
+ return SBI_ENOTSUPP;
+
+ /* encode PMP config */
+ prot |= (log2len == PMP_SHIFT) ? PMP_A_NA4 : PMP_A_NAPOT;
+ cfgmask = ~(0xffUL << pmpcfg_shift);
+ pmpcfg = (csr_read_num(pmpcfg_csr) & cfgmask);
+ pmpcfg |= ((prot << pmpcfg_shift) & ~cfgmask);
+
+ /* encode PMP address */
+ if (log2len == PMP_SHIFT) {
+ pmpaddr = (addr >> PMP_SHIFT);
+ } else {
+ if (log2len == __riscv_xlen) {
+ pmpaddr = -1UL;
+ } else {
+ addrmask = (1UL << (log2len - PMP_SHIFT)) - 1;
+ pmpaddr = ((addr >> PMP_SHIFT) & ~addrmask);
+ pmpaddr |= (addrmask >> 1);
+ }
+ }
+
+ /* write csrs */
+ csr_write_num(pmpaddr_csr, pmpaddr);
+ csr_write_num(pmpcfg_csr, pmpcfg);
+
+ return 0;
+}
+
+int pmp_get(unsigned int n, unsigned long *prot_out, unsigned long *addr_out,
+ unsigned long *log2len)
+{
+ int pmpcfg_csr, pmpcfg_shift, pmpaddr_csr;
+ unsigned long cfgmask, pmpcfg, prot;
+ unsigned long t1, addr, len;
+
+ /* check parameters */
+ if (n >= PMP_COUNT || !prot_out || !addr_out || !log2len)
+ return SBI_EINVAL;
+ *prot_out = *addr_out = *log2len = 0;
+
+ /* calculate PMP register and offset */
+#if __riscv_xlen == 32
+ pmpcfg_csr = CSR_PMPCFG0 + (n >> 2);
+ pmpcfg_shift = (n & 3) << 3;
+#elif __riscv_xlen == 64
+ pmpcfg_csr = (CSR_PMPCFG0 + (n >> 2)) & ~1;
+ pmpcfg_shift = (n & 7) << 3;
+#else
+ pmpcfg_csr = -1;
+ pmpcfg_shift = -1;
+#endif
+ pmpaddr_csr = CSR_PMPADDR0 + n;
+ if (pmpcfg_csr < 0 || pmpcfg_shift < 0)
+ return SBI_ENOTSUPP;
+
+ /* decode PMP config */
+ cfgmask = (0xffUL << pmpcfg_shift);
+ pmpcfg = csr_read_num(pmpcfg_csr) & cfgmask;
+ prot = pmpcfg >> pmpcfg_shift;
+
+ /* decode PMP address */
+ if ((prot & PMP_A) == PMP_A_NAPOT) {
+ addr = csr_read_num(pmpaddr_csr);
+ if (addr == -1UL) {
+ addr = 0;
+ len = __riscv_xlen;
+ } else {
+ t1 = ctz(~addr);
+ addr = (addr & ~((1UL << t1) - 1)) << PMP_SHIFT;
+ len = (t1 + PMP_SHIFT + 1);
+ }
+ } else {
+ addr = csr_read_num(pmpaddr_csr) << PMP_SHIFT;
+ len = PMP_SHIFT;
+ }
+
+ /* return details */
+ *prot_out = prot;
+ *addr_out = addr;
+ *log2len = len;
+
+ return 0;
+}
diff --git a/roms/opensbi/lib/sbi/riscv_atomic.c b/roms/opensbi/lib/sbi/riscv_atomic.c
new file mode 100644
index 000000000..558bca8c1
--- /dev/null
+++ b/roms/opensbi/lib/sbi/riscv_atomic.c
@@ -0,0 +1,255 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_bitops.h>
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_barrier.h>
+
+long atomic_read(atomic_t *atom)
+{
+ long ret = atom->counter;
+ rmb();
+ return ret;
+}
+
+void atomic_write(atomic_t *atom, long value)
+{
+ atom->counter = value;
+ wmb();
+}
+
+long atomic_add_return(atomic_t *atom, long value)
+{
+ long ret;
+
+ __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
+ : "+A"(atom->counter), "=r"(ret)
+ : "r"(value)
+ : "memory");
+
+ return ret + value;
+}
+
+long atomic_sub_return(atomic_t *atom, long value)
+{
+ long ret;
+
+ __asm__ __volatile__(" amoadd.w.aqrl %1, %2, %0"
+ : "+A"(atom->counter), "=r"(ret)
+ : "r"(-value)
+ : "memory");
+
+ return ret - value;
+}
+
+#define __axchg(ptr, new, size) \
+ ({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ break; \
+ } \
+ __ret; \
+ })
+
+#define axchg(ptr, x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __axchg((ptr), _x_, sizeof(*(ptr))); \
+ })
+
+
+#define __xchg(ptr, new, size) \
+ ({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__("0: lr.w %0, %2\n" \
+ " sc.w.rl %1, %z3, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__("0: lr.d %0, %2\n" \
+ " sc.d.rl %1, %z3, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__new) \
+ : "memory"); \
+ break; \
+ default: \
+ break; \
+ } \
+ __ret; \
+ })
+
+#define xchg(ptr, n) \
+ ({ \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __xchg((ptr), _n_, sizeof(*(ptr))); \
+ })
+
+#define __cmpxchg(ptr, old, new, size) \
+ ({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__("0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__old), "rJ"(__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__("0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r"(__ret), "=&r"(__rc), \
+ "+A"(*__ptr) \
+ : "rJ"(__old), "rJ"(__new) \
+ : "memory"); \
+ break; \
+ default: \
+ break; \
+ } \
+ __ret; \
+ })
+
+#define cmpxchg(ptr, o, n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) \
+ __cmpxchg((ptr), _o_, _n_, sizeof(*(ptr))); \
+ })
+
+long atomic_cmpxchg(atomic_t *atom, long oldval, long newval)
+{
+#ifdef __riscv_atomic
+ return __sync_val_compare_and_swap(&atom->counter, oldval, newval);
+#else
+ return cmpxchg(&atom->counter, oldval, newval);
+#endif
+}
+
+long atomic_xchg(atomic_t *atom, long newval)
+{
+ /* Atomically set new value and return old value. */
+#ifdef __riscv_atomic
+ return axchg(&atom->counter, newval);
+#else
+ return xchg(&atom->counter, newval);
+#endif
+}
+
+unsigned int atomic_raw_xchg_uint(volatile unsigned int *ptr,
+ unsigned int newval)
+{
+ /* Atomically set new value and return old value. */
+#ifdef __riscv_atomic
+ return axchg(ptr, newval);
+#else
+ return xchg(ptr, newval);
+#endif
+}
+
+unsigned long atomic_raw_xchg_ulong(volatile unsigned long *ptr,
+ unsigned long newval)
+{
+ /* Atomically set new value and return old value. */
+#ifdef __riscv_atomic
+ return axchg(ptr, newval);
+#else
+ return xchg(ptr, newval);
+#endif
+}
+
+#if (__SIZEOF_POINTER__ == 8)
+#define __AMO(op) "amo" #op ".d"
+#elif (__SIZEOF_POINTER__ == 4)
+#define __AMO(op) "amo" #op ".w"
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#define __atomic_op_bit_ord(op, mod, nr, addr, ord) \
+ ({ \
+ unsigned long __res, __mask; \
+ __mask = BIT_MASK(nr); \
+ __asm__ __volatile__(__AMO(op) #ord " %0, %2, %1" \
+ : "=r"(__res), "+A"(addr[BIT_WORD(nr)]) \
+ : "r"(mod(__mask)) \
+ : "memory"); \
+ __res; \
+ })
+
+#define __atomic_op_bit(op, mod, nr, addr) \
+ __atomic_op_bit_ord(op, mod, nr, addr, .aqrl)
+
+/* Bitmask modifiers */
+#define __NOP(x) (x)
+#define __NOT(x) (~(x))
+
+inline int atomic_raw_set_bit(int nr, volatile unsigned long *addr)
+{
+ return __atomic_op_bit(or, __NOP, nr, addr);
+}
+
+inline int atomic_raw_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return __atomic_op_bit(and, __NOT, nr, addr);
+}
+
+inline int atomic_set_bit(int nr, atomic_t *atom)
+{
+ return atomic_raw_set_bit(nr, (unsigned long *)&atom->counter);
+}
+
+inline int atomic_clear_bit(int nr, atomic_t *atom)
+{
+ return atomic_raw_clear_bit(nr, (unsigned long *)&atom->counter);
+}
diff --git a/roms/opensbi/lib/sbi/riscv_hardfp.S b/roms/opensbi/lib/sbi/riscv_hardfp.S
new file mode 100644
index 000000000..f363908a4
--- /dev/null
+++ b/roms/opensbi/lib/sbi/riscv_hardfp.S
@@ -0,0 +1,171 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifdef __riscv_flen
+
+#if __riscv_flen != 64
+# error single-float only is not supported
+#endif
+
+#define get_f32(which) fmv.x.s a0, which; jr t0
+#define put_f32(which) fmv.s.x which, a0; jr t0
+#if __riscv_xlen == 64
+# define get_f64(which) fmv.x.d a0, which; jr t0
+# define put_f64(which) fmv.d.x which, a0; jr t0
+#else
+# define get_f64(which) fsd which, 0(a0); jr t0
+# define put_f64(which) fld which, 0(a0); jr t0
+#endif
+
+ .text
+ .option norvc
+ .globl get_f32_reg
+ get_f32_reg:
+ get_f32(f0)
+ get_f32(f1)
+ get_f32(f2)
+ get_f32(f3)
+ get_f32(f4)
+ get_f32(f5)
+ get_f32(f6)
+ get_f32(f7)
+ get_f32(f8)
+ get_f32(f9)
+ get_f32(f10)
+ get_f32(f11)
+ get_f32(f12)
+ get_f32(f13)
+ get_f32(f14)
+ get_f32(f15)
+ get_f32(f16)
+ get_f32(f17)
+ get_f32(f18)
+ get_f32(f19)
+ get_f32(f20)
+ get_f32(f21)
+ get_f32(f22)
+ get_f32(f23)
+ get_f32(f24)
+ get_f32(f25)
+ get_f32(f26)
+ get_f32(f27)
+ get_f32(f28)
+ get_f32(f29)
+ get_f32(f30)
+ get_f32(f31)
+
+ .text
+ .globl put_f32_reg
+ put_f32_reg:
+ put_f32(f0)
+ put_f32(f1)
+ put_f32(f2)
+ put_f32(f3)
+ put_f32(f4)
+ put_f32(f5)
+ put_f32(f6)
+ put_f32(f7)
+ put_f32(f8)
+ put_f32(f9)
+ put_f32(f10)
+ put_f32(f11)
+ put_f32(f12)
+ put_f32(f13)
+ put_f32(f14)
+ put_f32(f15)
+ put_f32(f16)
+ put_f32(f17)
+ put_f32(f18)
+ put_f32(f19)
+ put_f32(f20)
+ put_f32(f21)
+ put_f32(f22)
+ put_f32(f23)
+ put_f32(f24)
+ put_f32(f25)
+ put_f32(f26)
+ put_f32(f27)
+ put_f32(f28)
+ put_f32(f29)
+ put_f32(f30)
+ put_f32(f31)
+
+ .text
+ .globl get_f64_reg
+ get_f64_reg:
+ get_f64(f0)
+ get_f64(f1)
+ get_f64(f2)
+ get_f64(f3)
+ get_f64(f4)
+ get_f64(f5)
+ get_f64(f6)
+ get_f64(f7)
+ get_f64(f8)
+ get_f64(f9)
+ get_f64(f10)
+ get_f64(f11)
+ get_f64(f12)
+ get_f64(f13)
+ get_f64(f14)
+ get_f64(f15)
+ get_f64(f16)
+ get_f64(f17)
+ get_f64(f18)
+ get_f64(f19)
+ get_f64(f20)
+ get_f64(f21)
+ get_f64(f22)
+ get_f64(f23)
+ get_f64(f24)
+ get_f64(f25)
+ get_f64(f26)
+ get_f64(f27)
+ get_f64(f28)
+ get_f64(f29)
+ get_f64(f30)
+ get_f64(f31)
+
+ .text
+ .globl put_f64_reg
+ put_f64_reg:
+ put_f64(f0)
+ put_f64(f1)
+ put_f64(f2)
+ put_f64(f3)
+ put_f64(f4)
+ put_f64(f5)
+ put_f64(f6)
+ put_f64(f7)
+ put_f64(f8)
+ put_f64(f9)
+ put_f64(f10)
+ put_f64(f11)
+ put_f64(f12)
+ put_f64(f13)
+ put_f64(f14)
+ put_f64(f15)
+ put_f64(f16)
+ put_f64(f17)
+ put_f64(f18)
+ put_f64(f19)
+ put_f64(f20)
+ put_f64(f21)
+ put_f64(f22)
+ put_f64(f23)
+ put_f64(f24)
+ put_f64(f25)
+ put_f64(f26)
+ put_f64(f27)
+ put_f64(f28)
+ put_f64(f29)
+ put_f64(f30)
+ put_f64(f31)
+
+#endif
diff --git a/roms/opensbi/lib/sbi/riscv_locks.c b/roms/opensbi/lib/sbi/riscv_locks.c
new file mode 100644
index 000000000..4d1d9c0c1
--- /dev/null
+++ b/roms/opensbi/lib/sbi/riscv_locks.c
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_locks.h>
+
+int spin_lock_check(spinlock_t *lock)
+{
+ return (lock->lock == __RISCV_SPIN_UNLOCKED) ? 0 : 1;
+}
+
+int spin_trylock(spinlock_t *lock)
+{
+ int tmp = 1, busy;
+
+ __asm__ __volatile__(
+ " amoswap.w %0, %2, %1\n" RISCV_ACQUIRE_BARRIER
+ : "=r"(busy), "+A"(lock->lock)
+ : "r"(tmp)
+ : "memory");
+
+ return !busy;
+}
+
+void spin_lock(spinlock_t *lock)
+{
+ while (1) {
+ if (spin_lock_check(lock))
+ continue;
+
+ if (spin_trylock(lock))
+ break;
+ }
+}
+
+void spin_unlock(spinlock_t *lock)
+{
+ __smp_store_release(&lock->lock, __RISCV_SPIN_UNLOCKED);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_bitmap.c b/roms/opensbi/lib/sbi/sbi_bitmap.c
new file mode 100644
index 000000000..e74b6bbec
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_bitmap.c
@@ -0,0 +1,40 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_bitmap.h>
+
+void __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] & bitmap2[k];
+}
+
+void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] | bitmap2[k];
+}
+
+void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, int bits)
+{
+ int k;
+ int nr = BITS_TO_LONGS(bits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = bitmap1[k] ^ bitmap2[k];
+}
diff --git a/roms/opensbi/lib/sbi/sbi_bitops.c b/roms/opensbi/lib/sbi/sbi_bitops.c
new file mode 100644
index 000000000..de9d0457b
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_bitops.c
@@ -0,0 +1,200 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_bitops.h>
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+unsigned long find_first_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + __ffs(tmp);
+}
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+
+ tmp = (*p) | (~0UL << size);
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found:
+ return result + ffz(tmp);
+}
+
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit, or size.
+ */
+unsigned long find_last_bit(const unsigned long *addr,
+ unsigned long size)
+{
+ unsigned long words;
+ unsigned long tmp;
+
+ /* Start at final word. */
+ words = size / BITS_PER_LONG;
+
+ /* Partial final word? */
+ if (size & (BITS_PER_LONG-1)) {
+ tmp = (addr[words] & (~0UL >> (BITS_PER_LONG
+ - (size & (BITS_PER_LONG-1)))));
+ if (tmp)
+ goto found;
+ }
+
+ while (words) {
+ tmp = addr[--words];
+ if (tmp) {
+found:
+ return words * BITS_PER_LONG + __fls(tmp);
+ }
+ }
+
+ /* Not found */
+ return size;
+}
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+unsigned long find_next_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG-1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp |= ~0UL >> (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if (~(tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+ if (tmp == ~0UL) /* Are any bits zero? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + ffz(tmp);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_console.c b/roms/opensbi/lib/sbi/sbi_console.c
new file mode 100644
index 000000000..7189b9bf2
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_console.c
@@ -0,0 +1,398 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_scratch.h>
+
+static const struct sbi_platform *console_plat = NULL;
+static spinlock_t console_out_lock = SPIN_LOCK_INITIALIZER;
+
+bool sbi_isprintable(char c)
+{
+ if (((31 < c) && (c < 127)) || (c == '\f') || (c == '\r') ||
+ (c == '\n') || (c == '\t')) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+int sbi_getc(void)
+{
+ return sbi_platform_console_getc(console_plat);
+}
+
+void sbi_putc(char ch)
+{
+ if (ch == '\n')
+ sbi_platform_console_putc(console_plat, '\r');
+ sbi_platform_console_putc(console_plat, ch);
+}
+
+void sbi_puts(const char *str)
+{
+ spin_lock(&console_out_lock);
+ while (*str) {
+ sbi_putc(*str);
+ str++;
+ }
+ spin_unlock(&console_out_lock);
+}
+
+void sbi_gets(char *s, int maxwidth, char endchar)
+{
+ int ch;
+ char *retval = s;
+
+ while ((ch = sbi_getc()) != endchar && ch >= 0 && maxwidth > 1) {
+ *retval = (char)ch;
+ retval++;
+ maxwidth--;
+ }
+ *retval = '\0';
+}
+
+#define PAD_RIGHT 1
+#define PAD_ZERO 2
+#define PAD_ALTERNATE 4
+#define PRINT_BUF_LEN 64
+
+#define va_start(v, l) __builtin_va_start((v), l)
+#define va_end __builtin_va_end
+#define va_arg __builtin_va_arg
+typedef __builtin_va_list va_list;
+
+static void printc(char **out, u32 *out_len, char ch)
+{
+ if (out) {
+ if (*out) {
+ if (out_len && (0 < *out_len)) {
+ **out = ch;
+ ++(*out);
+ (*out_len)--;
+ } else {
+ **out = ch;
+ ++(*out);
+ }
+ }
+ } else {
+ sbi_putc(ch);
+ }
+}
+
+static int prints(char **out, u32 *out_len, const char *string, int width,
+ int flags)
+{
+ int pc = 0;
+ char padchar = ' ';
+
+ if (width > 0) {
+ int len = 0;
+ const char *ptr;
+ for (ptr = string; *ptr; ++ptr)
+ ++len;
+ if (len >= width)
+ width = 0;
+ else
+ width -= len;
+ if (flags & PAD_ZERO)
+ padchar = '0';
+ }
+ if (!(flags & PAD_RIGHT)) {
+ for (; width > 0; --width) {
+ printc(out, out_len, padchar);
+ ++pc;
+ }
+ }
+ for (; *string; ++string) {
+ printc(out, out_len, *string);
+ ++pc;
+ }
+ for (; width > 0; --width) {
+ printc(out, out_len, padchar);
+ ++pc;
+ }
+
+ return pc;
+}
+
+static int printi(char **out, u32 *out_len, long long i, int b, int sg,
+ int width, int flags, int letbase)
+{
+ char print_buf[PRINT_BUF_LEN];
+ char *s;
+ int neg = 0, pc = 0;
+ u64 t;
+ unsigned long long u = i;
+
+ if (sg && b == 10 && i < 0) {
+ neg = 1;
+ u = -i;
+ }
+
+ s = print_buf + PRINT_BUF_LEN - 1;
+ *s = '\0';
+
+ if (!u) {
+ *--s = '0';
+ } else {
+ while (u) {
+ t = u % b;
+ u = u / b;
+ if (t >= 10)
+ t += letbase - '0' - 10;
+ *--s = t + '0';
+ }
+ }
+
+ if (flags & PAD_ALTERNATE) {
+ if ((b == 16) && (letbase == 'A')) {
+ *--s = 'X';
+ } else if ((b == 16) && (letbase == 'a')) {
+ *--s = 'x';
+ }
+ *--s = '0';
+ }
+
+ if (neg) {
+ if (width && (flags & PAD_ZERO)) {
+ printc(out, out_len, '-');
+ ++pc;
+ --width;
+ } else {
+ *--s = '-';
+ }
+ }
+
+ return pc + prints(out, out_len, s, width, flags);
+}
+
+static int print(char **out, u32 *out_len, const char *format, va_list args)
+{
+ int width, flags, acnt = 0;
+ int pc = 0;
+ char scr[2];
+ unsigned long long tmp;
+
+ for (; *format != 0; ++format) {
+ if (*format == '%') {
+ ++format;
+ width = flags = 0;
+ if (*format == '\0')
+ break;
+ if (*format == '%')
+ goto out;
+ /* Get flags */
+ if (*format == '-') {
+ ++format;
+ flags = PAD_RIGHT;
+ }
+ if (*format == '#') {
+ ++format;
+ flags |= PAD_ALTERNATE;
+ }
+ while (*format == '0') {
+ ++format;
+ flags |= PAD_ZERO;
+ }
+ /* Get width */
+ for (; *format >= '0' && *format <= '9'; ++format) {
+ width *= 10;
+ width += *format - '0';
+ }
+ if (*format == 's') {
+ char *s = va_arg(args, char *);
+ acnt += sizeof(char *);
+ pc += prints(out, out_len, s ? s : "(null)",
+ width, flags);
+ continue;
+ }
+ if ((*format == 'd') || (*format == 'i')) {
+ pc += printi(out, out_len, va_arg(args, int),
+ 10, 1, width, flags, '0');
+ acnt += sizeof(int);
+ continue;
+ }
+ if (*format == 'x') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned int), 16, 0,
+ width, flags, 'a');
+ acnt += sizeof(unsigned int);
+ continue;
+ }
+ if (*format == 'X') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned int), 16, 0,
+ width, flags, 'A');
+ acnt += sizeof(unsigned int);
+ continue;
+ }
+ if (*format == 'u') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned int), 10, 0,
+ width, flags, 'a');
+ acnt += sizeof(unsigned int);
+ continue;
+ }
+ if (*format == 'p') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned long), 16, 0,
+ width, flags, 'a');
+ acnt += sizeof(unsigned long);
+ continue;
+ }
+ if (*format == 'P') {
+ pc += printi(out, out_len,
+ va_arg(args, unsigned long), 16, 0,
+ width, flags, 'A');
+ acnt += sizeof(unsigned long);
+ continue;
+ }
+ if (*format == 'l' && *(format + 1) == 'l') {
+ while (acnt &
+ (sizeof(unsigned long long) - 1)) {
+ va_arg(args, int);
+ acnt += sizeof(int);
+ }
+ if (sizeof(unsigned long long) ==
+ sizeof(unsigned long)) {
+ tmp = va_arg(args, unsigned long long);
+ acnt += sizeof(unsigned long long);
+ } else {
+ ((unsigned long *)&tmp)[0] =
+ va_arg(args, unsigned long);
+ ((unsigned long *)&tmp)[1] =
+ va_arg(args, unsigned long);
+ acnt += 2 * sizeof(unsigned long);
+ }
+ if (*(format + 2) == 'u') {
+ format += 2;
+ pc += printi(out, out_len, tmp, 10, 0,
+ width, flags, 'a');
+ } else if (*(format + 2) == 'x') {
+ format += 2;
+ pc += printi(out, out_len, tmp, 16, 0,
+ width, flags, 'a');
+ } else if (*(format + 2) == 'X') {
+ format += 2;
+ pc += printi(out, out_len, tmp, 16, 0,
+ width, flags, 'A');
+ } else {
+ format += 1;
+ pc += printi(out, out_len, tmp, 10, 1,
+ width, flags, '0');
+ }
+ continue;
+ } else if (*format == 'l') {
+ if (*(format + 1) == 'u') {
+ format += 1;
+ pc += printi(
+ out, out_len,
+ va_arg(args, unsigned long), 10,
+ 0, width, flags, 'a');
+ } else if (*(format + 1) == 'x') {
+ format += 1;
+ pc += printi(
+ out, out_len,
+ va_arg(args, unsigned long), 16,
+ 0, width, flags, 'a');
+ acnt += sizeof(unsigned long);
+ } else if (*(format + 1) == 'X') {
+ format += 1;
+ pc += printi(
+ out, out_len,
+ va_arg(args, unsigned long), 16,
+ 0, width, flags, 'A');
+ acnt += sizeof(unsigned long);
+ } else {
+ pc += printi(out, out_len,
+ va_arg(args, long), 10, 1,
+ width, flags, '0');
+ acnt += sizeof(long);
+ }
+ }
+ if (*format == 'c') {
+ /* char are converted to int then pushed on the stack */
+ scr[0] = va_arg(args, int);
+ scr[1] = '\0';
+ pc += prints(out, out_len, scr, width, flags);
+ acnt += sizeof(int);
+ continue;
+ }
+ } else {
+ out:
+ printc(out, out_len, *format);
+ ++pc;
+ }
+ }
+ if (out)
+ **out = '\0';
+
+ return pc;
+}
+
+int sbi_sprintf(char *out, const char *format, ...)
+{
+ va_list args;
+ int retval;
+
+ va_start(args, format);
+ retval = print(&out, NULL, format, args);
+ va_end(args);
+
+ return retval;
+}
+
+int sbi_snprintf(char *out, u32 out_sz, const char *format, ...)
+{
+ va_list args;
+ int retval;
+
+ va_start(args, format);
+ retval = print(&out, &out_sz, format, args);
+ va_end(args);
+
+ return retval;
+}
+
+int sbi_printf(const char *format, ...)
+{
+ va_list args;
+ int retval;
+
+ spin_lock(&console_out_lock);
+ va_start(args, format);
+ retval = print(NULL, NULL, format, args);
+ va_end(args);
+ spin_unlock(&console_out_lock);
+
+ return retval;
+}
+
+int sbi_dprintf(const char *format, ...)
+{
+ va_list args;
+ int retval = 0;
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+
+ va_start(args, format);
+ if (scratch->options & SBI_SCRATCH_DEBUG_PRINTS)
+ retval = print(NULL, NULL, format, args);
+ va_end(args);
+
+ return retval;
+}
+
+int sbi_console_init(struct sbi_scratch *scratch)
+{
+ console_plat = sbi_platform_ptr(scratch);
+
+ return sbi_platform_console_init(console_plat);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_domain.c b/roms/opensbi/lib/sbi/sbi_domain.c
new file mode 100644
index 000000000..195c9413c
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_domain.c
@@ -0,0 +1,539 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_hartmask.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_math.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_string.h>
+
+struct sbi_domain *hartid_to_domain_table[SBI_HARTMASK_MAX_BITS] = { 0 };
+struct sbi_domain *domidx_to_domain_table[SBI_DOMAIN_MAX_INDEX] = { 0 };
+
+static u32 domain_count = 0;
+
+static struct sbi_hartmask root_hmask = { 0 };
+
+#define ROOT_FW_REGION 0
+#define ROOT_ALL_REGION 1
+#define ROOT_END_REGION 2
+static struct sbi_domain_memregion root_memregs[ROOT_END_REGION + 1] = { 0 };
+
+static struct sbi_domain root = {
+ .name = "root",
+ .possible_harts = &root_hmask,
+ .regions = root_memregs,
+ .system_reset_allowed = TRUE,
+};
+
+bool sbi_domain_is_assigned_hart(const struct sbi_domain *dom, u32 hartid)
+{
+ if (dom)
+ return sbi_hartmask_test_hart(hartid, &dom->assigned_harts);
+
+ return FALSE;
+}
+
+ulong sbi_domain_get_assigned_hartmask(const struct sbi_domain *dom,
+ ulong hbase)
+{
+ ulong ret, bword, boff;
+
+ if (!dom)
+ return 0;
+
+ bword = BIT_WORD(hbase);
+ boff = BIT_WORD_OFFSET(hbase);
+
+ ret = sbi_hartmask_bits(&dom->assigned_harts)[bword++] >> boff;
+ if (boff && bword < BIT_WORD(SBI_HARTMASK_MAX_BITS)) {
+ ret |= (sbi_hartmask_bits(&dom->assigned_harts)[bword] &
+ (BIT(boff) - 1UL)) << (BITS_PER_LONG - boff);
+ }
+
+ return ret;
+}
+
+void sbi_domain_memregion_initfw(struct sbi_domain_memregion *reg)
+{
+ if (!reg)
+ return;
+
+ sbi_memcpy(reg, &root_memregs[ROOT_FW_REGION], sizeof(*reg));
+}
+
+bool sbi_domain_check_addr(const struct sbi_domain *dom,
+ unsigned long addr, unsigned long mode,
+ unsigned long access_flags)
+{
+ bool mmio = FALSE;
+ struct sbi_domain_memregion *reg;
+ unsigned long rstart, rend, rflags, rwx = 0;
+
+ if (!dom)
+ return FALSE;
+
+ if (access_flags & SBI_DOMAIN_READ)
+ rwx |= SBI_DOMAIN_MEMREGION_READABLE;
+ if (access_flags & SBI_DOMAIN_WRITE)
+ rwx |= SBI_DOMAIN_MEMREGION_WRITEABLE;
+ if (access_flags & SBI_DOMAIN_EXECUTE)
+ rwx |= SBI_DOMAIN_MEMREGION_EXECUTABLE;
+ if (access_flags & SBI_DOMAIN_MMIO)
+ mmio = TRUE;
+
+ sbi_domain_for_each_memregion(dom, reg) {
+ rflags = reg->flags;
+ if (mode == PRV_M && !(rflags & SBI_DOMAIN_MEMREGION_MMODE))
+ continue;
+
+ rstart = reg->base;
+ rend = (reg->order < __riscv_xlen) ?
+ rstart + ((1UL << reg->order) - 1) : -1UL;
+ if (rstart <= addr && addr <= rend) {
+ if ((mmio && !(rflags & SBI_DOMAIN_MEMREGION_MMIO)) ||
+ (!mmio && (rflags & SBI_DOMAIN_MEMREGION_MMIO)))
+ return FALSE;
+ return ((rflags & rwx) == rwx) ? TRUE : FALSE;
+ }
+ }
+
+ return (mode == PRV_M) ? TRUE : FALSE;
+}
+
+/* Check if region complies with constraints */
+static bool is_region_valid(const struct sbi_domain_memregion *reg)
+{
+ if (reg->order < 3 || __riscv_xlen < reg->order)
+ return FALSE;
+
+ if (reg->base & (BIT(reg->order) - 1))
+ return FALSE;
+
+ return TRUE;
+}
+
+/** Check if regionA is sub-region of regionB */
+static bool is_region_subset(const struct sbi_domain_memregion *regA,
+ const struct sbi_domain_memregion *regB)
+{
+ ulong regA_start = regA->base;
+ ulong regA_end = regA->base + (BIT(regA->order) - 1);
+ ulong regB_start = regB->base;
+ ulong regB_end = regB->base + (BIT(regA->order) - 1);
+
+ if ((regB_start <= regA_start) &&
+ (regA_start < regB_end) &&
+ (regB_start < regA_end) &&
+ (regA_end <= regB_end))
+ return TRUE;
+
+ return FALSE;
+}
+
+/** Check if regionA conflicts regionB */
+static bool is_region_conflict(const struct sbi_domain_memregion *regA,
+ const struct sbi_domain_memregion *regB)
+{
+ if ((is_region_subset(regA, regB) || is_region_subset(regB, regA)) &&
+ regA->flags == regB->flags)
+ return TRUE;
+
+ return FALSE;
+}
+
+/** Check if regionA should be placed before regionB */
+static bool is_region_before(const struct sbi_domain_memregion *regA,
+ const struct sbi_domain_memregion *regB)
+{
+ if (regA->order < regB->order)
+ return TRUE;
+
+ if ((regA->order == regB->order) &&
+ (regA->base < regB->base))
+ return TRUE;
+
+ return FALSE;
+}
+
+static int sanitize_domain(const struct sbi_platform *plat,
+ struct sbi_domain *dom)
+{
+ u32 i, j, count;
+ bool have_fw_reg;
+ struct sbi_domain_memregion treg, *reg, *reg1;
+
+ /* Check possible HARTs */
+ if (!dom->possible_harts) {
+ sbi_printf("%s: %s possible HART mask is NULL\n",
+ __func__, dom->name);
+ return SBI_EINVAL;
+ }
+ sbi_hartmask_for_each_hart(i, dom->possible_harts) {
+ if (sbi_platform_hart_invalid(plat, i)) {
+ sbi_printf("%s: %s possible HART mask has invalid "
+ "hart %d\n", __func__, dom->name, i);
+ return SBI_EINVAL;
+ }
+ };
+
+ /* Check memory regions */
+ if (!dom->regions) {
+ sbi_printf("%s: %s regions is NULL\n",
+ __func__, dom->name);
+ return SBI_EINVAL;
+ }
+ sbi_domain_for_each_memregion(dom, reg) {
+ if (!is_region_valid(reg)) {
+ sbi_printf("%s: %s has invalid region base=0x%lx "
+ "order=%lu flags=0x%lx\n", __func__,
+ dom->name, reg->base, reg->order,
+ reg->flags);
+ return SBI_EINVAL;
+ }
+ }
+
+ /* Count memory regions and check presence of firmware region */
+ count = 0;
+ have_fw_reg = FALSE;
+ sbi_domain_for_each_memregion(dom, reg) {
+ if (reg->order == root_memregs[ROOT_FW_REGION].order &&
+ reg->base == root_memregs[ROOT_FW_REGION].base &&
+ reg->flags == root_memregs[ROOT_FW_REGION].flags)
+ have_fw_reg = TRUE;
+ count++;
+ }
+ if (!have_fw_reg) {
+ sbi_printf("%s: %s does not have firmware region\n",
+ __func__, dom->name);
+ return SBI_EINVAL;
+ }
+
+ /* Sort the memory regions */
+ for (i = 0; i < (count - 1); i++) {
+ reg = &dom->regions[i];
+ for (j = i + 1; j < count; j++) {
+ reg1 = &dom->regions[j];
+
+ if (is_region_conflict(reg1, reg)) {
+ sbi_printf("%s: %s conflict between regions "
+ "(base=0x%lx order=%lu flags=0x%lx) and "
+ "(base=0x%lx order=%lu flags=0x%lx)\n",
+ __func__, dom->name,
+ reg->base, reg->order, reg->flags,
+ reg1->base, reg1->order, reg1->flags);
+ return SBI_EINVAL;
+ }
+
+ if (!is_region_before(reg1, reg))
+ continue;
+
+ sbi_memcpy(&treg, reg1, sizeof(treg));
+ sbi_memcpy(reg1, reg, sizeof(treg));
+ sbi_memcpy(reg, &treg, sizeof(treg));
+ }
+ }
+
+ /*
+ * We don't need to check boot HART id of domain because if boot
+ * HART id is not possible/assigned to this domain then it won't
+ * be started at boot-time by sbi_domain_finalize().
+ */
+
+ /*
+ * Check next mode
+ *
+ * We only allow next mode to be S-mode or U-mode.so that we can
+ * protect M-mode context and enforce checks on memory accesses.
+ */
+ if (dom->next_mode != PRV_S &&
+ dom->next_mode != PRV_U) {
+ sbi_printf("%s: %s invalid next booting stage mode 0x%lx\n",
+ __func__, dom->name, dom->next_mode);
+ return SBI_EINVAL;
+ }
+
+ /* Check next address and next mode*/
+ if (!sbi_domain_check_addr(dom, dom->next_addr, dom->next_mode,
+ SBI_DOMAIN_EXECUTE)) {
+ sbi_printf("%s: %s next booting stage addres 0x%lx can't "
+ "execute\n", __func__, dom->name, dom->next_addr);
+ return SBI_EINVAL;
+ }
+
+ return 0;
+}
+
+void sbi_domain_dump(const struct sbi_domain *dom, const char *suffix)
+{
+ u32 i, k;
+ unsigned long rstart, rend;
+ struct sbi_domain_memregion *reg;
+
+ sbi_printf("Domain%d Name %s: %s\n",
+ dom->index, suffix, dom->name);
+
+ sbi_printf("Domain%d Boot HART %s: %d\n",
+ dom->index, suffix, dom->boot_hartid);
+
+ k = 0;
+ sbi_printf("Domain%d HARTs %s: ", dom->index, suffix);
+ sbi_hartmask_for_each_hart(i, dom->possible_harts)
+ sbi_printf("%s%d%s", (k++) ? "," : "",
+ i, sbi_domain_is_assigned_hart(dom, i) ? "*" : "");
+ sbi_printf("\n");
+
+ i = 0;
+ sbi_domain_for_each_memregion(dom, reg) {
+ rstart = reg->base;
+ rend = (reg->order < __riscv_xlen) ?
+ rstart + ((1UL << reg->order) - 1) : -1UL;
+
+#if __riscv_xlen == 32
+ sbi_printf("Domain%d Region%02d %s: 0x%08lx-0x%08lx ",
+#else
+ sbi_printf("Domain%d Region%02d %s: 0x%016lx-0x%016lx ",
+#endif
+ dom->index, i, suffix, rstart, rend);
+
+ k = 0;
+ if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
+ sbi_printf("%cM", (k++) ? ',' : '(');
+ if (reg->flags & SBI_DOMAIN_MEMREGION_MMIO)
+ sbi_printf("%cI", (k++) ? ',' : '(');
+ if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
+ sbi_printf("%cR", (k++) ? ',' : '(');
+ if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
+ sbi_printf("%cW", (k++) ? ',' : '(');
+ if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
+ sbi_printf("%cX", (k++) ? ',' : '(');
+ sbi_printf("%s\n", (k++) ? ")" : "()");
+
+ i++;
+ }
+
+#if __riscv_xlen == 32
+ sbi_printf("Domain%d Next Address%s: 0x%08lx\n",
+#else
+ sbi_printf("Domain%d Next Address%s: 0x%016lx\n",
+#endif
+ dom->index, suffix, dom->next_addr);
+
+#if __riscv_xlen == 32
+ sbi_printf("Domain%d Next Arg1 %s: 0x%08lx\n",
+#else
+ sbi_printf("Domain%d Next Arg1 %s: 0x%016lx\n",
+#endif
+ dom->index, suffix, dom->next_arg1);
+
+ sbi_printf("Domain%d Next Mode %s: ", dom->index, suffix);
+ switch (dom->next_mode) {
+ case PRV_M:
+ sbi_printf("M-mode\n");
+ break;
+ case PRV_S:
+ sbi_printf("S-mode\n");
+ break;
+ case PRV_U:
+ sbi_printf("U-mode\n");
+ break;
+ default:
+ sbi_printf("Unknown\n");
+ break;
+ };
+
+ sbi_printf("Domain%d SysReset %s: %s\n",
+ dom->index, suffix, (dom->system_reset_allowed) ? "yes" : "no");
+}
+
+void sbi_domain_dump_all(const char *suffix)
+{
+ u32 i;
+ const struct sbi_domain *dom;
+
+ sbi_domain_for_each(i, dom) {
+ sbi_domain_dump(dom, suffix);
+ sbi_printf("\n");
+ }
+}
+
+int sbi_domain_register(struct sbi_domain *dom,
+ const struct sbi_hartmask *assign_mask)
+{
+ u32 i;
+ int rc;
+ struct sbi_domain *tdom;
+ u32 cold_hartid = current_hartid();
+ const struct sbi_platform *plat = sbi_platform_thishart_ptr();
+
+ if (!dom || !assign_mask)
+ return SBI_EINVAL;
+
+ /* Check if domain already discovered */
+ sbi_domain_for_each(i, tdom) {
+ if (tdom == dom)
+ return SBI_EALREADY;
+ }
+
+ /*
+ * Ensure that we have room for Domain Index to
+ * HART ID mapping
+ */
+ if (SBI_DOMAIN_MAX_INDEX <= domain_count) {
+ sbi_printf("%s: No room for %s\n",
+ __func__, dom->name);
+ return SBI_ENOSPC;
+ }
+
+ /* Sanitize discovered domain */
+ rc = sanitize_domain(plat, dom);
+ if (rc) {
+ sbi_printf("%s: sanity checks failed for"
+ " %s (error %d)\n", __func__,
+ dom->name, rc);
+ return rc;
+ }
+
+ /* Assign index to domain */
+ dom->index = domain_count++;
+ domidx_to_domain_table[dom->index] = dom;
+
+ /* Clear assigned HARTs of domain */
+ sbi_hartmask_clear_all(&dom->assigned_harts);
+
+ /* Assign domain to HART if HART is a possible HART */
+ sbi_hartmask_for_each_hart(i, assign_mask) {
+ if (!sbi_hartmask_test_hart(i, dom->possible_harts))
+ continue;
+
+ tdom = hartid_to_domain_table[i];
+ if (tdom)
+ sbi_hartmask_clear_hart(i,
+ &tdom->assigned_harts);
+ hartid_to_domain_table[i] = dom;
+ sbi_hartmask_set_hart(i, &dom->assigned_harts);
+
+ /*
+ * If cold boot HART is assigned to this domain then
+ * override boot HART of this domain.
+ */
+ if (i == cold_hartid &&
+ dom->boot_hartid != cold_hartid) {
+ sbi_printf("Domain%d Boot HARTID forced to"
+ " %d\n", dom->index, cold_hartid);
+ dom->boot_hartid = cold_hartid;
+ }
+ }
+
+ return 0;
+}
+
+int sbi_domain_finalize(struct sbi_scratch *scratch, u32 cold_hartid)
+{
+ int rc;
+ u32 i, dhart;
+ struct sbi_domain *dom;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* Initialize and populate domains for the platform */
+ rc = sbi_platform_domains_init(plat);
+ if (rc) {
+ sbi_printf("%s: platform domains_init() failed (error %d)\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* Startup boot HART of domains */
+ sbi_domain_for_each(i, dom) {
+ /* Domain boot HART */
+ dhart = dom->boot_hartid;
+
+ /* Ignore of boot HART is off limits */
+ if (SBI_HARTMASK_MAX_BITS <= dhart)
+ continue;
+
+ /* Ignore if boot HART not possible for this domain */
+ if (!sbi_hartmask_test_hart(dhart, dom->possible_harts))
+ continue;
+
+ /* Ignore if boot HART assigned different domain */
+ if (sbi_hartid_to_domain(dhart) != dom ||
+ !sbi_hartmask_test_hart(dhart, &dom->assigned_harts))
+ continue;
+
+ /* Startup boot HART of domain */
+ if (dhart == cold_hartid) {
+ scratch->next_addr = dom->next_addr;
+ scratch->next_mode = dom->next_mode;
+ scratch->next_arg1 = dom->next_arg1;
+ } else {
+ rc = sbi_hsm_hart_start(scratch, NULL, dhart,
+ dom->next_addr,
+ dom->next_mode,
+ dom->next_arg1);
+ if (rc) {
+ sbi_printf("%s: failed to start boot HART %d"
+ " for %s (error %d)\n", __func__,
+ dhart, dom->name, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int sbi_domain_init(struct sbi_scratch *scratch, u32 cold_hartid)
+{
+ u32 i;
+ struct sbi_domain_memregion *memregs;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* Root domain firmware memory region */
+ root_memregs[ROOT_FW_REGION].order = log2roundup(scratch->fw_size);
+ root_memregs[ROOT_FW_REGION].base = scratch->fw_start &
+ ~((1UL << root_memregs[0].order) - 1UL);
+ root_memregs[ROOT_FW_REGION].flags = 0;
+
+ /* Root domain allow everything memory region */
+ root_memregs[ROOT_ALL_REGION].order = __riscv_xlen;
+ root_memregs[ROOT_ALL_REGION].base = 0;
+ root_memregs[ROOT_ALL_REGION].flags = (SBI_DOMAIN_MEMREGION_READABLE |
+ SBI_DOMAIN_MEMREGION_WRITEABLE |
+ SBI_DOMAIN_MEMREGION_EXECUTABLE);
+
+ /* Root domain memory region end */
+ root_memregs[ROOT_END_REGION].order = 0;
+
+ /* Use platform specific root memory regions when available */
+ memregs = sbi_platform_domains_root_regions(plat);
+ if (memregs)
+ root.regions = memregs;
+
+ /* Root domain boot HART id is same as coldboot HART id */
+ root.boot_hartid = cold_hartid;
+
+ /* Root domain next booting stage details */
+ root.next_arg1 = scratch->next_arg1;
+ root.next_addr = scratch->next_addr;
+ root.next_mode = scratch->next_mode;
+
+ /* Root domain possible and assigned HARTs */
+ for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
+ if (sbi_platform_hart_invalid(plat, i))
+ continue;
+ sbi_hartmask_set_hart(i, &root_hmask);
+ }
+
+ return sbi_domain_register(&root, &root_hmask);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_ecall.c b/roms/opensbi/lib/sbi/sbi_ecall.c
new file mode 100644
index 000000000..e92a53930
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ecall.c
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_trap.h>
+
+u16 sbi_ecall_version_major(void)
+{
+ return SBI_ECALL_VERSION_MAJOR;
+}
+
+u16 sbi_ecall_version_minor(void)
+{
+ return SBI_ECALL_VERSION_MINOR;
+}
+
+static unsigned long ecall_impid = SBI_OPENSBI_IMPID;
+
+unsigned long sbi_ecall_get_impid(void)
+{
+ return ecall_impid;
+}
+
+void sbi_ecall_set_impid(unsigned long impid)
+{
+ ecall_impid = impid;
+}
+
+static SBI_LIST_HEAD(ecall_exts_list);
+
+struct sbi_ecall_extension *sbi_ecall_find_extension(unsigned long extid)
+{
+ struct sbi_ecall_extension *t, *ret = NULL;
+
+ sbi_list_for_each_entry(t, &ecall_exts_list, head) {
+ if (t->extid_start <= extid && extid <= t->extid_end) {
+ ret = t;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int sbi_ecall_register_extension(struct sbi_ecall_extension *ext)
+{
+ struct sbi_ecall_extension *t;
+
+ if (!ext || (ext->extid_end < ext->extid_start) || !ext->handle)
+ return SBI_EINVAL;
+
+ sbi_list_for_each_entry(t, &ecall_exts_list, head) {
+ unsigned long start = t->extid_start;
+ unsigned long end = t->extid_end;
+ if (end < ext->extid_start || ext->extid_end < start)
+ /* no overlap */;
+ else
+ return SBI_EINVAL;
+ }
+
+ SBI_INIT_LIST_HEAD(&ext->head);
+ sbi_list_add_tail(&ext->head, &ecall_exts_list);
+
+ return 0;
+}
+
+void sbi_ecall_unregister_extension(struct sbi_ecall_extension *ext)
+{
+ bool found = FALSE;
+ struct sbi_ecall_extension *t;
+
+ if (!ext)
+ return;
+
+ sbi_list_for_each_entry(t, &ecall_exts_list, head) {
+ if (t == ext) {
+ found = TRUE;
+ break;
+ }
+ }
+
+ if (found)
+ sbi_list_del_init(&ext->head);
+}
+
+int sbi_ecall_handler(struct sbi_trap_regs *regs)
+{
+ int ret = 0;
+ struct sbi_ecall_extension *ext;
+ unsigned long extension_id = regs->a7;
+ unsigned long func_id = regs->a6;
+ struct sbi_trap_info trap = {0};
+ unsigned long out_val = 0;
+ bool is_0_1_spec = 0;
+
+ ext = sbi_ecall_find_extension(extension_id);
+ if (ext && ext->handle) {
+ ret = ext->handle(extension_id, func_id,
+ regs, &out_val, &trap);
+ if (extension_id >= SBI_EXT_0_1_SET_TIMER &&
+ extension_id <= SBI_EXT_0_1_SHUTDOWN)
+ is_0_1_spec = 1;
+ } else {
+ ret = SBI_ENOTSUPP;
+ }
+
+ if (ret == SBI_ETRAP) {
+ trap.epc = regs->mepc;
+ sbi_trap_redirect(regs, &trap);
+ } else {
+ if (ret < SBI_LAST_ERR) {
+ sbi_printf("%s: Invalid error %d for ext=0x%lx "
+ "func=0x%lx\n", __func__, ret,
+ extension_id, func_id);
+ ret = SBI_ERR_FAILED;
+ }
+
+ /*
+ * This function should return non-zero value only in case of
+ * fatal error. However, there is no good way to distinguish
+ * between a fatal and non-fatal errors yet. That's why we treat
+ * every return value except ETRAP as non-fatal and just return
+ * accordingly for now. Once fatal errors are defined, that
+ * case should be handled differently.
+ */
+ regs->mepc += 4;
+ regs->a0 = ret;
+ if (!is_0_1_spec)
+ regs->a1 = out_val;
+ }
+
+ return 0;
+}
+
+int sbi_ecall_init(void)
+{
+ int ret;
+
+ /* The order of below registrations is performance optimized */
+ ret = sbi_ecall_register_extension(&ecall_time);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_rfence);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_ipi);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_base);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_hsm);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_srst);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_legacy);
+ if (ret)
+ return ret;
+ ret = sbi_ecall_register_extension(&ecall_vendor);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_ecall_base.c b/roms/opensbi/lib/sbi/sbi_ecall_base.c
new file mode 100644
index 000000000..786d2ac67
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ecall_base.c
@@ -0,0 +1,79 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_version.h>
+#include <sbi/riscv_asm.h>
+
+static int sbi_ecall_base_probe(unsigned long extid, unsigned long *out_val)
+{
+ struct sbi_ecall_extension *ext;
+
+ ext = sbi_ecall_find_extension(extid);
+ if (!ext) {
+ *out_val = 0;
+ return 0;
+ }
+
+ if (ext->probe)
+ return ext->probe(extid, out_val);
+
+ *out_val = 1;
+ return 0;
+}
+
+static int sbi_ecall_base_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ int ret = 0;
+
+ switch (funcid) {
+ case SBI_EXT_BASE_GET_SPEC_VERSION:
+ *out_val = (SBI_ECALL_VERSION_MAJOR <<
+ SBI_SPEC_VERSION_MAJOR_OFFSET) &
+ (SBI_SPEC_VERSION_MAJOR_MASK <<
+ SBI_SPEC_VERSION_MAJOR_OFFSET);
+ *out_val = *out_val | SBI_ECALL_VERSION_MINOR;
+ break;
+ case SBI_EXT_BASE_GET_IMP_ID:
+ *out_val = sbi_ecall_get_impid();
+ break;
+ case SBI_EXT_BASE_GET_IMP_VERSION:
+ *out_val = OPENSBI_VERSION;
+ break;
+ case SBI_EXT_BASE_GET_MVENDORID:
+ *out_val = csr_read(CSR_MVENDORID);
+ break;
+ case SBI_EXT_BASE_GET_MARCHID:
+ *out_val = csr_read(CSR_MARCHID);
+ break;
+ case SBI_EXT_BASE_GET_MIMPID:
+ *out_val = csr_read(CSR_MIMPID);
+ break;
+ case SBI_EXT_BASE_PROBE_EXT:
+ ret = sbi_ecall_base_probe(regs->a0, out_val);
+ break;
+ default:
+ ret = SBI_ENOTSUPP;
+ }
+
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_base = {
+ .extid_start = SBI_EXT_BASE,
+ .extid_end = SBI_EXT_BASE,
+ .handle = sbi_ecall_base_handler,
+};
diff --git a/roms/opensbi/lib/sbi/sbi_ecall_hsm.c b/roms/opensbi/lib/sbi/sbi_ecall_hsm.c
new file mode 100644
index 000000000..df29d5196
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ecall_hsm.c
@@ -0,0 +1,59 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_version.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/riscv_asm.h>
+
+static int sbi_ecall_hsm_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ ulong smode;
+ int ret = 0, hstate;
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+
+ switch (funcid) {
+ case SBI_EXT_HSM_HART_START:
+ smode = csr_read(CSR_MSTATUS);
+ smode = (smode & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
+ ret = sbi_hsm_hart_start(scratch, sbi_domain_thishart_ptr(),
+ regs->a0, regs->a1, smode, regs->a2);
+ break;
+ case SBI_EXT_HSM_HART_STOP:
+ ret = sbi_hsm_hart_stop(scratch, TRUE);
+ break;
+ case SBI_EXT_HSM_HART_GET_STATUS:
+ hstate = sbi_hsm_hart_get_state(sbi_domain_thishart_ptr(),
+ regs->a0);
+ ret = sbi_hsm_hart_state_to_status(hstate);
+ break;
+ default:
+ ret = SBI_ENOTSUPP;
+ };
+ if (ret >= 0) {
+ *out_val = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_hsm = {
+ .extid_start = SBI_EXT_HSM,
+ .extid_end = SBI_EXT_HSM,
+ .handle = sbi_ecall_hsm_handler,
+};
diff --git a/roms/opensbi/lib/sbi/sbi_ecall_legacy.c b/roms/opensbi/lib/sbi/sbi_ecall_legacy.c
new file mode 100644
index 000000000..1a7fe26e4
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ecall_legacy.c
@@ -0,0 +1,124 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_unpriv.h>
+#include <sbi/sbi_hart.h>
+
+static int sbi_load_hart_mask_unpriv(ulong *pmask, ulong *hmask,
+ struct sbi_trap_info *uptrap)
+{
+ ulong mask = 0;
+
+ if (pmask) {
+ mask = sbi_load_ulong(pmask, uptrap);
+ if (uptrap->cause)
+ return SBI_ETRAP;
+ } else {
+ sbi_hsm_hart_started_mask(sbi_domain_thishart_ptr(),
+ 0, &mask);
+ }
+ *hmask = mask;
+
+ return 0;
+}
+
+static int sbi_ecall_legacy_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ int ret = 0;
+ struct sbi_tlb_info tlb_info;
+ u32 source_hart = current_hartid();
+ ulong hmask = 0;
+
+ switch (extid) {
+ case SBI_EXT_0_1_SET_TIMER:
+#if __riscv_xlen == 32
+ sbi_timer_event_start((((u64)regs->a1 << 32) | (u64)regs->a0));
+#else
+ sbi_timer_event_start((u64)regs->a0);
+#endif
+ break;
+ case SBI_EXT_0_1_CONSOLE_PUTCHAR:
+ sbi_putc(regs->a0);
+ break;
+ case SBI_EXT_0_1_CONSOLE_GETCHAR:
+ ret = sbi_getc();
+ break;
+ case SBI_EXT_0_1_CLEAR_IPI:
+ sbi_ipi_clear_smode();
+ break;
+ case SBI_EXT_0_1_SEND_IPI:
+ ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0,
+ &hmask, out_trap);
+ if (ret != SBI_ETRAP)
+ ret = sbi_ipi_send_smode(hmask, 0);
+ break;
+ case SBI_EXT_0_1_REMOTE_FENCE_I:
+ ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0,
+ &hmask, out_trap);
+ if (ret != SBI_ETRAP) {
+ SBI_TLB_INFO_INIT(&tlb_info, 0, 0, 0, 0,
+ sbi_tlb_local_fence_i,
+ source_hart);
+ ret = sbi_tlb_request(hmask, 0, &tlb_info);
+ }
+ break;
+ case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
+ ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0,
+ &hmask, out_trap);
+ if (ret != SBI_ETRAP) {
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a1, regs->a2, 0, 0,
+ sbi_tlb_local_sfence_vma,
+ source_hart);
+ ret = sbi_tlb_request(hmask, 0, &tlb_info);
+ }
+ break;
+ case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
+ ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0,
+ &hmask, out_trap);
+ if (ret != SBI_ETRAP) {
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a1,
+ regs->a2, regs->a3, 0,
+ sbi_tlb_local_sfence_vma_asid,
+ source_hart);
+ ret = sbi_tlb_request(hmask, 0, &tlb_info);
+ }
+ break;
+ case SBI_EXT_0_1_SHUTDOWN:
+ sbi_system_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
+ SBI_SRST_RESET_REASON_NONE);
+ break;
+ default:
+ ret = SBI_ENOTSUPP;
+ };
+
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_legacy = {
+ .extid_start = SBI_EXT_0_1_SET_TIMER,
+ .extid_end = SBI_EXT_0_1_SHUTDOWN,
+ .handle = sbi_ecall_legacy_handler,
+};
diff --git a/roms/opensbi/lib/sbi/sbi_ecall_replace.c b/roms/opensbi/lib/sbi/sbi_ecall_replace.c
new file mode 100644
index 000000000..a7935d973
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ecall_replace.c
@@ -0,0 +1,196 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_trap.h>
+
+static int sbi_ecall_time_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ int ret = 0;
+
+ if (funcid == SBI_EXT_TIME_SET_TIMER) {
+#if __riscv_xlen == 32
+ sbi_timer_event_start((((u64)regs->a1 << 32) | (u64)regs->a0));
+#else
+ sbi_timer_event_start((u64)regs->a0);
+#endif
+ } else
+ ret = SBI_ENOTSUPP;
+
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_time = {
+ .extid_start = SBI_EXT_TIME,
+ .extid_end = SBI_EXT_TIME,
+ .handle = sbi_ecall_time_handler,
+};
+
+static int sbi_ecall_rfence_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ int ret = 0;
+ unsigned long vmid;
+ struct sbi_tlb_info tlb_info;
+ u32 source_hart = current_hartid();
+
+ if (funcid >= SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA &&
+ funcid <= SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID)
+ if (!misa_extension('H'))
+ return SBI_ENOTSUPP;
+
+ switch (funcid) {
+ case SBI_EXT_RFENCE_REMOTE_FENCE_I:
+ SBI_TLB_INFO_INIT(&tlb_info, 0, 0, 0, 0,
+ sbi_tlb_local_fence_i, source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a2, regs->a3, 0, 0,
+ sbi_tlb_local_hfence_gvma, source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a2, regs->a3, 0, regs->a4,
+ sbi_tlb_local_hfence_gvma_vmid,
+ source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
+ vmid = (csr_read(CSR_HGATP) & HGATP_VMID_MASK);
+ vmid = vmid >> HGATP_VMID_SHIFT;
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a2, regs->a3, 0, vmid,
+ sbi_tlb_local_hfence_vvma, source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
+ vmid = (csr_read(CSR_HGATP) & HGATP_VMID_MASK);
+ vmid = vmid >> HGATP_VMID_SHIFT;
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a2, regs->a3, regs->a4,
+ vmid, sbi_tlb_local_hfence_vvma_asid,
+ source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a2, regs->a3, 0, 0,
+ sbi_tlb_local_sfence_vma, source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ SBI_TLB_INFO_INIT(&tlb_info, regs->a2, regs->a3, regs->a4, 0,
+ sbi_tlb_local_sfence_vma_asid, source_hart);
+ ret = sbi_tlb_request(regs->a0, regs->a1, &tlb_info);
+ break;
+ default:
+ ret = SBI_ENOTSUPP;
+ };
+
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_rfence = {
+ .extid_start = SBI_EXT_RFENCE,
+ .extid_end = SBI_EXT_RFENCE,
+ .handle = sbi_ecall_rfence_handler,
+};
+
+static int sbi_ecall_ipi_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ int ret = 0;
+
+ if (funcid == SBI_EXT_IPI_SEND_IPI)
+ ret = sbi_ipi_send_smode(regs->a0, regs->a1);
+ else
+ ret = SBI_ENOTSUPP;
+
+ return ret;
+}
+
+struct sbi_ecall_extension ecall_ipi = {
+ .extid_start = SBI_EXT_IPI,
+ .extid_end = SBI_EXT_IPI,
+ .handle = sbi_ecall_ipi_handler,
+};
+
+static int sbi_ecall_srst_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ if (funcid == SBI_EXT_SRST_RESET) {
+ if ((((u32)-1U) <= ((u64)regs->a0)) ||
+ (((u32)-1U) <= ((u64)regs->a1)))
+ return SBI_EINVAL;
+
+ switch (regs->a0) {
+ case SBI_SRST_RESET_TYPE_SHUTDOWN:
+ case SBI_SRST_RESET_TYPE_COLD_REBOOT:
+ case SBI_SRST_RESET_TYPE_WARM_REBOOT:
+ break;
+ default:
+ return SBI_ENOTSUPP;
+ }
+
+ switch (regs->a1) {
+ case SBI_SRST_RESET_REASON_NONE:
+ case SBI_SRST_RESET_REASON_SYSFAIL:
+ break;
+ default:
+ return SBI_ENOTSUPP;
+ }
+
+ if (sbi_system_reset_supported(regs->a0, regs->a1))
+ sbi_system_reset(regs->a0, regs->a1);
+ }
+
+ return SBI_ENOTSUPP;
+}
+
+static int sbi_ecall_srst_probe(unsigned long extid, unsigned long *out_val)
+{
+ u32 type, count = 0;
+
+ /*
+ * At least one standard reset types should be supported by
+ * the platform for SBI SRST extension to be usable.
+ */
+
+ for (type = 0; type <= SBI_SRST_RESET_TYPE_LAST; type++) {
+ if (sbi_system_reset_supported(type,
+ SBI_SRST_RESET_REASON_NONE))
+ count++;
+ }
+
+ *out_val = (count) ? 1 : 0;
+ return 0;
+}
+
+struct sbi_ecall_extension ecall_srst = {
+ .extid_start = SBI_EXT_SRST,
+ .extid_end = SBI_EXT_SRST,
+ .handle = sbi_ecall_srst_handler,
+ .probe = sbi_ecall_srst_probe,
+};
diff --git a/roms/opensbi/lib/sbi/sbi_ecall_vendor.c b/roms/opensbi/lib/sbi/sbi_ecall_vendor.c
new file mode 100644
index 000000000..925282963
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ecall_vendor.c
@@ -0,0 +1,40 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_trap.h>
+
+static int sbi_ecall_vendor_probe(unsigned long extid,
+ unsigned long *out_val)
+{
+ *out_val = sbi_platform_vendor_ext_check(sbi_platform_thishart_ptr(),
+ extid);
+ return 0;
+}
+
+static int sbi_ecall_vendor_handler(unsigned long extid, unsigned long funcid,
+ const struct sbi_trap_regs *regs,
+ unsigned long *out_val,
+ struct sbi_trap_info *out_trap)
+{
+ return sbi_platform_vendor_ext_provider(sbi_platform_thishart_ptr(),
+ extid, funcid, regs,
+ out_val, out_trap);
+}
+
+struct sbi_ecall_extension ecall_vendor = {
+ .extid_start = SBI_EXT_VENDOR_START,
+ .extid_end = SBI_EXT_VENDOR_END,
+ .probe = sbi_ecall_vendor_probe,
+ .handle = sbi_ecall_vendor_handler,
+};
diff --git a/roms/opensbi/lib/sbi/sbi_emulate_csr.c b/roms/opensbi/lib/sbi/sbi_emulate_csr.c
new file mode 100644
index 000000000..bee7761c4
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_emulate_csr.c
@@ -0,0 +1,188 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_emulate_csr.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_trap.h>
+
+static bool hpm_allowed(int hpm_num, ulong prev_mode, bool virt)
+{
+ ulong cen = -1UL;
+
+ if (prev_mode <= PRV_S) {
+ cen &= csr_read(CSR_MCOUNTEREN);
+ if (virt)
+ cen &= csr_read(CSR_HCOUNTEREN);
+ }
+ if (prev_mode == PRV_U)
+ cen &= csr_read(CSR_SCOUNTEREN);
+
+ return ((cen >> hpm_num) & 1) ? TRUE : FALSE;
+}
+
+int sbi_emulate_csr_read(int csr_num, struct sbi_trap_regs *regs,
+ ulong *csr_val)
+{
+ int ret = 0;
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+ ulong prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
+#if __riscv_xlen == 32
+ bool virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE;
+#else
+ bool virt = (regs->mstatus & MSTATUS_MPV) ? TRUE : FALSE;
+#endif
+
+ switch (csr_num) {
+ case CSR_HTIMEDELTA:
+ if (prev_mode == PRV_S && !virt)
+ *csr_val = sbi_timer_get_delta();
+ else
+ ret = SBI_ENOTSUPP;
+ break;
+ case CSR_CYCLE:
+ if (!hpm_allowed(csr_num - CSR_CYCLE, prev_mode, virt))
+ return SBI_ENOTSUPP;
+ *csr_val = csr_read(CSR_MCYCLE);
+ break;
+ case CSR_TIME:
+ /*
+ * We emulate TIME CSR for both Host (HS/U-mode) and
+ * Guest (VS/VU-mode).
+ *
+ * Faster TIME CSR reads are critical for good performance
+ * in S-mode software so we don't check CSR permissions.
+ */
+ *csr_val = (virt) ? sbi_timer_virt_value():
+ sbi_timer_value();
+ break;
+ case CSR_INSTRET:
+ if (!hpm_allowed(csr_num - CSR_CYCLE, prev_mode, virt))
+ return SBI_ENOTSUPP;
+ *csr_val = csr_read(CSR_MINSTRET);
+ break;
+
+#if __riscv_xlen == 32
+ case CSR_HTIMEDELTAH:
+ if (prev_mode == PRV_S && !virt)
+ *csr_val = sbi_timer_get_delta() >> 32;
+ else
+ ret = SBI_ENOTSUPP;
+ break;
+ case CSR_CYCLEH:
+ if (!hpm_allowed(csr_num - CSR_CYCLEH, prev_mode, virt))
+ return SBI_ENOTSUPP;
+ *csr_val = csr_read(CSR_MCYCLEH);
+ break;
+ case CSR_TIMEH:
+ /* Refer comments on TIME CSR above. */
+ *csr_val = (virt) ? sbi_timer_virt_value() >> 32:
+ sbi_timer_value() >> 32;
+ break;
+ case CSR_INSTRETH:
+ if (!hpm_allowed(csr_num - CSR_CYCLEH, prev_mode, virt))
+ return SBI_ENOTSUPP;
+ *csr_val = csr_read(CSR_MINSTRETH);
+ break;
+#endif
+
+#define switchcase_hpm(__uref, __mref, __csr) \
+ case __csr: \
+ if ((sbi_hart_mhpm_count(scratch) + 3) <= (__csr - __uref))\
+ return SBI_ENOTSUPP; \
+ if (!hpm_allowed(__csr - __uref, prev_mode, virt)) \
+ return SBI_ENOTSUPP; \
+ *csr_val = csr_read(__mref + __csr - __uref); \
+ break;
+#define switchcase_hpm_2(__uref, __mref, __csr) \
+ switchcase_hpm(__uref, __mref, __csr + 0) \
+ switchcase_hpm(__uref, __mref, __csr + 1)
+#define switchcase_hpm_4(__uref, __mref, __csr) \
+ switchcase_hpm_2(__uref, __mref, __csr + 0) \
+ switchcase_hpm_2(__uref, __mref, __csr + 2)
+#define switchcase_hpm_8(__uref, __mref, __csr) \
+ switchcase_hpm_4(__uref, __mref, __csr + 0) \
+ switchcase_hpm_4(__uref, __mref, __csr + 4)
+#define switchcase_hpm_16(__uref, __mref, __csr) \
+ switchcase_hpm_8(__uref, __mref, __csr + 0) \
+ switchcase_hpm_8(__uref, __mref, __csr + 8)
+
+ switchcase_hpm(CSR_CYCLE, CSR_MCYCLE, CSR_HPMCOUNTER3)
+ switchcase_hpm_4(CSR_CYCLE, CSR_MCYCLE, CSR_HPMCOUNTER4)
+ switchcase_hpm_8(CSR_CYCLE, CSR_MCYCLE, CSR_HPMCOUNTER8)
+ switchcase_hpm_16(CSR_CYCLE, CSR_MCYCLE, CSR_HPMCOUNTER16)
+
+#if __riscv_xlen == 32
+ switchcase_hpm(CSR_CYCLEH, CSR_MCYCLEH, CSR_HPMCOUNTER3H)
+ switchcase_hpm_4(CSR_CYCLEH, CSR_MCYCLEH, CSR_HPMCOUNTER4H)
+ switchcase_hpm_8(CSR_CYCLEH, CSR_MCYCLEH, CSR_HPMCOUNTER8H)
+ switchcase_hpm_16(CSR_CYCLEH, CSR_MCYCLEH, CSR_HPMCOUNTER16H)
+#endif
+
+#undef switchcase_hpm_16
+#undef switchcase_hpm_8
+#undef switchcase_hpm_4
+#undef switchcase_hpm_2
+#undef switchcase_hpm
+
+ default:
+ ret = SBI_ENOTSUPP;
+ break;
+ };
+
+ if (ret)
+ sbi_dprintf("%s: hartid%d: invalid csr_num=0x%x\n",
+ __func__, current_hartid(), csr_num);
+
+ return ret;
+}
+
+int sbi_emulate_csr_write(int csr_num, struct sbi_trap_regs *regs,
+ ulong csr_val)
+{
+ int ret = 0;
+ ulong prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
+#if __riscv_xlen == 32
+ bool virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE;
+#else
+ bool virt = (regs->mstatus & MSTATUS_MPV) ? TRUE : FALSE;
+#endif
+
+ switch (csr_num) {
+ case CSR_HTIMEDELTA:
+ if (prev_mode == PRV_S && !virt)
+ sbi_timer_set_delta(csr_val);
+ else
+ ret = SBI_ENOTSUPP;
+ break;
+#if __riscv_xlen == 32
+ case CSR_HTIMEDELTAH:
+ if (prev_mode == PRV_S && !virt)
+ sbi_timer_set_delta_upper(csr_val);
+ else
+ ret = SBI_ENOTSUPP;
+ break;
+#endif
+ default:
+ ret = SBI_ENOTSUPP;
+ break;
+ };
+
+ if (ret)
+ sbi_dprintf("%s: hartid%d: invalid csr_num=0x%x\n",
+ __func__, current_hartid(), csr_num);
+
+ return ret;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_expected_trap.S b/roms/opensbi/lib/sbi/sbi_expected_trap.S
new file mode 100644
index 000000000..24891c74c
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_expected_trap.S
@@ -0,0 +1,56 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_trap.h>
+
+ /*
+ * We assume that faulting instruction is is 4-byte long and blindly
+ * increment SEPC by 4.
+ *
+ * The trap info will be saved as follows:
+ * A3 <- pointer struct sbi_trap_info
+ * A4 <- temporary
+ */
+
+ .align 3
+ .global __sbi_expected_trap
+__sbi_expected_trap:
+ /* Without H-extension so, MTVAL2 and MTINST CSRs not available */
+ csrr a4, CSR_MEPC
+ REG_S a4, SBI_TRAP_INFO_OFFSET(epc)(a3)
+ csrr a4, CSR_MCAUSE
+ REG_S a4, SBI_TRAP_INFO_OFFSET(cause)(a3)
+ csrr a4, CSR_MTVAL
+ REG_S a4, SBI_TRAP_INFO_OFFSET(tval)(a3)
+ REG_S zero, SBI_TRAP_INFO_OFFSET(tval2)(a3)
+ REG_S zero, SBI_TRAP_INFO_OFFSET(tinst)(a3)
+ csrr a4, CSR_MEPC
+ addi a4, a4, 4
+ csrw CSR_MEPC, a4
+ mret
+
+ .align 3
+ .global __sbi_expected_trap_hext
+__sbi_expected_trap_hext:
+ /* With H-extension so, MTVAL2 and MTINST CSRs available */
+ csrr a4, CSR_MEPC
+ REG_S a4, SBI_TRAP_INFO_OFFSET(epc)(a3)
+ csrr a4, CSR_MCAUSE
+ REG_S a4, SBI_TRAP_INFO_OFFSET(cause)(a3)
+ csrr a4, CSR_MTVAL
+ REG_S a4, SBI_TRAP_INFO_OFFSET(tval)(a3)
+ csrr a4, CSR_MTVAL2
+ REG_S a4, SBI_TRAP_INFO_OFFSET(tval2)(a3)
+ csrr a4, CSR_MTINST
+ REG_S a4, SBI_TRAP_INFO_OFFSET(tinst)(a3)
+ csrr a4, CSR_MEPC
+ addi a4, a4, 4
+ csrw CSR_MEPC, a4
+ mret
diff --git a/roms/opensbi/lib/sbi/sbi_fifo.c b/roms/opensbi/lib/sbi/sbi_fifo.c
new file mode 100644
index 000000000..8d1dbf044
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_fifo.c
@@ -0,0 +1,192 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra<atish.patra@wdc.com>
+ *
+ */
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_fifo.h>
+#include <sbi/sbi_string.h>
+
+void sbi_fifo_init(struct sbi_fifo *fifo, void *queue_mem, u16 entries,
+ u16 entry_size)
+{
+ fifo->queue = queue_mem;
+ fifo->num_entries = entries;
+ fifo->entry_size = entry_size;
+ SPIN_LOCK_INIT(&fifo->qlock);
+ fifo->avail = fifo->tail = 0;
+ sbi_memset(fifo->queue, 0, (size_t)entries * entry_size);
+}
+
+/* Note: must be called with fifo->qlock held */
+static inline bool __sbi_fifo_is_full(struct sbi_fifo *fifo)
+{
+ return (fifo->avail == fifo->num_entries) ? TRUE : FALSE;
+}
+
+u16 sbi_fifo_avail(struct sbi_fifo *fifo)
+{
+ u16 ret;
+
+ if (!fifo)
+ return 0;
+
+ spin_lock(&fifo->qlock);
+ ret = fifo->avail;
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+bool sbi_fifo_is_full(struct sbi_fifo *fifo)
+{
+ bool ret;
+
+ spin_lock(&fifo->qlock);
+ ret = __sbi_fifo_is_full(fifo);
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+/* Note: must be called with fifo->qlock held */
+static inline void __sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
+{
+ u32 head;
+
+ head = (u32)fifo->tail + fifo->avail;
+ if (head >= fifo->num_entries)
+ head = head - fifo->num_entries;
+
+ sbi_memcpy(fifo->queue + head * fifo->entry_size, data, fifo->entry_size);
+
+ fifo->avail++;
+}
+
+
+/* Note: must be called with fifo->qlock held */
+static inline bool __sbi_fifo_is_empty(struct sbi_fifo *fifo)
+{
+ return (fifo->avail == 0) ? TRUE : FALSE;
+}
+
+bool sbi_fifo_is_empty(struct sbi_fifo *fifo)
+{
+ bool ret;
+
+ spin_lock(&fifo->qlock);
+ ret = __sbi_fifo_is_empty(fifo);
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+/* Note: must be called with fifo->qlock held */
+static inline void __sbi_fifo_reset(struct sbi_fifo *fifo)
+{
+ size_t size = (size_t)fifo->num_entries * fifo->entry_size;
+
+ fifo->avail = 0;
+ fifo->tail = 0;
+ sbi_memset(fifo->queue, 0, size);
+}
+
+bool sbi_fifo_reset(struct sbi_fifo *fifo)
+{
+ if (!fifo)
+ return FALSE;
+
+ spin_lock(&fifo->qlock);
+ __sbi_fifo_reset(fifo);
+ spin_unlock(&fifo->qlock);
+
+ return TRUE;
+}
+
+/**
+ * Provide a helper function to do inplace update to the fifo.
+ * Note: The callback function is called with lock being held.
+ *
+ * **Do not** invoke any other fifo function from callback. Otherwise, it will
+ * lead to deadlock.
+ */
+int sbi_fifo_inplace_update(struct sbi_fifo *fifo, void *in,
+ int (*fptr)(void *in, void *data))
+{
+ int i, index = 0;
+ int ret = SBI_FIFO_UNCHANGED;
+ void *entry;
+
+ if (!fifo || !in)
+ return ret;
+
+ spin_lock(&fifo->qlock);
+
+ if (__sbi_fifo_is_empty(fifo)) {
+ spin_unlock(&fifo->qlock);
+ return ret;
+ }
+
+ for (i = 0; i < fifo->avail; i++) {
+ index = fifo->tail + i;
+ if (index >= fifo->num_entries)
+ index = index - fifo->num_entries;
+ entry = (void *)fifo->queue + (u32)index * fifo->entry_size;
+ ret = fptr(in, entry);
+
+ if (ret == SBI_FIFO_SKIP || ret == SBI_FIFO_UPDATED) {
+ break;
+ }
+ }
+ spin_unlock(&fifo->qlock);
+
+ return ret;
+}
+
+int sbi_fifo_enqueue(struct sbi_fifo *fifo, void *data)
+{
+ if (!fifo || !data)
+ return SBI_EINVAL;
+
+ spin_lock(&fifo->qlock);
+
+ if (__sbi_fifo_is_full(fifo)) {
+ spin_unlock(&fifo->qlock);
+ return SBI_ENOSPC;
+ }
+ __sbi_fifo_enqueue(fifo, data);
+
+ spin_unlock(&fifo->qlock);
+
+ return 0;
+}
+
+int sbi_fifo_dequeue(struct sbi_fifo *fifo, void *data)
+{
+ if (!fifo || !data)
+ return SBI_EINVAL;
+
+ spin_lock(&fifo->qlock);
+
+ if (__sbi_fifo_is_empty(fifo)) {
+ spin_unlock(&fifo->qlock);
+ return SBI_ENOENT;
+ }
+
+ sbi_memcpy(data, fifo->queue + (u32)fifo->tail * fifo->entry_size,
+ fifo->entry_size);
+
+ fifo->avail--;
+ fifo->tail++;
+ if (fifo->tail >= fifo->num_entries)
+ fifo->tail = 0;
+
+ spin_unlock(&fifo->qlock);
+
+ return 0;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_hart.c b/roms/opensbi/lib/sbi/sbi_hart.c
new file mode 100644
index 000000000..fc86e9f31
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_hart.c
@@ -0,0 +1,536 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_fp.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_csr_detect.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_math.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_string.h>
+#include <sbi/sbi_trap.h>
+
+extern void __sbi_expected_trap(void);
+extern void __sbi_expected_trap_hext(void);
+
+void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
+
+struct hart_features {
+ unsigned long features;
+ unsigned int pmp_count;
+ unsigned int pmp_addr_bits;
+ unsigned long pmp_gran;
+ unsigned int mhpm_count;
+};
+static unsigned long hart_features_offset;
+
+static void mstatus_init(struct sbi_scratch *scratch)
+{
+ unsigned long mstatus_val = 0;
+
+ /* Enable FPU */
+ if (misa_extension('D') || misa_extension('F'))
+ mstatus_val |= MSTATUS_FS;
+
+ /* Enable Vector context */
+ if (misa_extension('V'))
+ mstatus_val |= MSTATUS_VS;
+
+ csr_write(CSR_MSTATUS, mstatus_val);
+
+ /* Enable user/supervisor use of perf counters */
+ if (misa_extension('S') &&
+ sbi_hart_has_feature(scratch, SBI_HART_HAS_SCOUNTEREN))
+ csr_write(CSR_SCOUNTEREN, -1);
+ if (sbi_hart_has_feature(scratch, SBI_HART_HAS_MCOUNTEREN))
+ csr_write(CSR_MCOUNTEREN, -1);
+
+ /* Disable all interrupts */
+ csr_write(CSR_MIE, 0);
+
+ /* Disable S-mode paging */
+ if (misa_extension('S'))
+ csr_write(CSR_SATP, 0);
+}
+
+static int fp_init(struct sbi_scratch *scratch)
+{
+#ifdef __riscv_flen
+ int i;
+#endif
+
+ if (!misa_extension('D') && !misa_extension('F'))
+ return 0;
+
+ if (!(csr_read(CSR_MSTATUS) & MSTATUS_FS))
+ return SBI_EINVAL;
+
+#ifdef __riscv_flen
+ for (i = 0; i < 32; i++)
+ init_fp_reg(i);
+ csr_write(CSR_FCSR, 0);
+#endif
+
+ return 0;
+}
+
+static int delegate_traps(struct sbi_scratch *scratch)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ unsigned long interrupts, exceptions;
+
+ if (!misa_extension('S'))
+ /* No delegation possible as mideleg does not exist */
+ return 0;
+
+ /* Send M-mode interrupts and most exceptions to S-mode */
+ interrupts = MIP_SSIP | MIP_STIP | MIP_SEIP;
+ exceptions = (1U << CAUSE_MISALIGNED_FETCH) | (1U << CAUSE_BREAKPOINT) |
+ (1U << CAUSE_USER_ECALL);
+ if (sbi_platform_has_mfaults_delegation(plat))
+ exceptions |= (1U << CAUSE_FETCH_PAGE_FAULT) |
+ (1U << CAUSE_LOAD_PAGE_FAULT) |
+ (1U << CAUSE_STORE_PAGE_FAULT);
+
+ /*
+ * If hypervisor extension available then we only handle hypervisor
+ * calls (i.e. ecalls from HS-mode) in M-mode.
+ *
+ * The HS-mode will additionally handle supervisor calls (i.e. ecalls
+ * from VS-mode), Guest page faults and Virtual interrupts.
+ */
+ if (misa_extension('H')) {
+ exceptions |= (1U << CAUSE_VIRTUAL_SUPERVISOR_ECALL);
+ exceptions |= (1U << CAUSE_FETCH_GUEST_PAGE_FAULT);
+ exceptions |= (1U << CAUSE_LOAD_GUEST_PAGE_FAULT);
+ exceptions |= (1U << CAUSE_VIRTUAL_INST_FAULT);
+ exceptions |= (1U << CAUSE_STORE_GUEST_PAGE_FAULT);
+ }
+
+ csr_write(CSR_MIDELEG, interrupts);
+ csr_write(CSR_MEDELEG, exceptions);
+
+ return 0;
+}
+
+void sbi_hart_delegation_dump(struct sbi_scratch *scratch,
+ const char *prefix, const char *suffix)
+{
+ if (!misa_extension('S'))
+ /* No delegation possible as mideleg does not exist*/
+ return;
+
+#if __riscv_xlen == 32
+ sbi_printf("%sMIDELEG%s: 0x%08lx\n",
+ prefix, suffix, csr_read(CSR_MIDELEG));
+ sbi_printf("%sMEDELEG%s: 0x%08lx\n",
+ prefix, suffix, csr_read(CSR_MEDELEG));
+#else
+ sbi_printf("%sMIDELEG%s: 0x%016lx\n",
+ prefix, suffix, csr_read(CSR_MIDELEG));
+ sbi_printf("%sMEDELEG%s: 0x%016lx\n",
+ prefix, suffix, csr_read(CSR_MEDELEG));
+#endif
+}
+
+unsigned int sbi_hart_mhpm_count(struct sbi_scratch *scratch)
+{
+ struct hart_features *hfeatures =
+ sbi_scratch_offset_ptr(scratch, hart_features_offset);
+
+ return hfeatures->mhpm_count;
+}
+
+unsigned int sbi_hart_pmp_count(struct sbi_scratch *scratch)
+{
+ struct hart_features *hfeatures =
+ sbi_scratch_offset_ptr(scratch, hart_features_offset);
+
+ return hfeatures->pmp_count;
+}
+
+unsigned long sbi_hart_pmp_granularity(struct sbi_scratch *scratch)
+{
+ struct hart_features *hfeatures =
+ sbi_scratch_offset_ptr(scratch, hart_features_offset);
+
+ return hfeatures->pmp_gran;
+}
+
+unsigned int sbi_hart_pmp_addrbits(struct sbi_scratch *scratch)
+{
+ struct hart_features *hfeatures =
+ sbi_scratch_offset_ptr(scratch, hart_features_offset);
+
+ return hfeatures->pmp_addr_bits;
+}
+
+int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
+{
+ struct sbi_domain_memregion *reg;
+ struct sbi_domain *dom = sbi_domain_thishart_ptr();
+ unsigned int pmp_idx = 0, pmp_flags, pmp_bits, pmp_gran_log2;
+ unsigned int pmp_count = sbi_hart_pmp_count(scratch);
+ unsigned long pmp_addr = 0, pmp_addr_max = 0;
+
+ if (!pmp_count)
+ return 0;
+
+ pmp_gran_log2 = log2roundup(sbi_hart_pmp_granularity(scratch));
+ pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
+ pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
+
+ sbi_domain_for_each_memregion(dom, reg) {
+ if (pmp_count <= pmp_idx)
+ break;
+
+ pmp_flags = 0;
+ if (reg->flags & SBI_DOMAIN_MEMREGION_READABLE)
+ pmp_flags |= PMP_R;
+ if (reg->flags & SBI_DOMAIN_MEMREGION_WRITEABLE)
+ pmp_flags |= PMP_W;
+ if (reg->flags & SBI_DOMAIN_MEMREGION_EXECUTABLE)
+ pmp_flags |= PMP_X;
+ if (reg->flags & SBI_DOMAIN_MEMREGION_MMODE)
+ pmp_flags |= PMP_L;
+
+ pmp_addr = reg->base >> PMP_SHIFT;
+ if (pmp_gran_log2 <= reg->order && pmp_addr < pmp_addr_max)
+ pmp_set(pmp_idx++, pmp_flags, reg->base, reg->order);
+ else {
+ sbi_printf("Can not configure pmp for domain %s", dom->name);
+ sbi_printf("because memory region address %lx or size %lx is not in range\n",
+ reg->base, reg->order);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Check whether a particular hart feature is available
+ *
+ * @param scratch pointer to the HART scratch space
+ * @param feature the feature to check
+ * @returns true (feature available) or false (feature not available)
+ */
+bool sbi_hart_has_feature(struct sbi_scratch *scratch, unsigned long feature)
+{
+ struct hart_features *hfeatures =
+ sbi_scratch_offset_ptr(scratch, hart_features_offset);
+
+ if (hfeatures->features & feature)
+ return true;
+ else
+ return false;
+}
+
+static unsigned long hart_get_features(struct sbi_scratch *scratch)
+{
+ struct hart_features *hfeatures =
+ sbi_scratch_offset_ptr(scratch, hart_features_offset);
+
+ return hfeatures->features;
+}
+
+static inline char *sbi_hart_feature_id2string(unsigned long feature)
+{
+ char *fstr = NULL;
+
+ if (!feature)
+ return NULL;
+
+ switch (feature) {
+ case SBI_HART_HAS_SCOUNTEREN:
+ fstr = "scounteren";
+ break;
+ case SBI_HART_HAS_MCOUNTEREN:
+ fstr = "mcounteren";
+ break;
+ case SBI_HART_HAS_TIME:
+ fstr = "time";
+ break;
+ default:
+ break;
+ }
+
+ return fstr;
+}
+
+/**
+ * Get the hart features in string format
+ *
+ * @param scratch pointer to the HART scratch space
+ * @param features_str pointer to a char array where the features string will be
+ * updated
+ * @param nfstr length of the features_str. The feature string will be truncated
+ * if nfstr is not long enough.
+ */
+void sbi_hart_get_features_str(struct sbi_scratch *scratch,
+ char *features_str, int nfstr)
+{
+ unsigned long features, feat = 1UL;
+ char *temp;
+ int offset = 0;
+
+ if (!features_str || nfstr <= 0)
+ return;
+ sbi_memset(features_str, 0, nfstr);
+
+ features = hart_get_features(scratch);
+ if (!features)
+ goto done;
+
+ do {
+ if (features & feat) {
+ temp = sbi_hart_feature_id2string(feat);
+ if (temp) {
+ sbi_snprintf(features_str + offset, nfstr,
+ "%s,", temp);
+ offset = offset + sbi_strlen(temp) + 1;
+ }
+ }
+ feat = feat << 1;
+ } while (feat <= SBI_HART_HAS_LAST_FEATURE);
+
+done:
+ if (offset)
+ features_str[offset - 1] = '\0';
+ else
+ sbi_strncpy(features_str, "none", nfstr);
+}
+
+static unsigned long hart_pmp_get_allowed_addr(void)
+{
+ unsigned long val = 0;
+ struct sbi_trap_info trap = {0};
+
+ csr_write_allowed(CSR_PMPADDR0, (ulong)&trap, PMP_ADDR_MASK); \
+ if (!trap.cause) {
+ val = csr_read_allowed(CSR_PMPADDR0, (ulong)&trap);
+ if (trap.cause)
+ val = 0;
+ }
+
+ return val;
+}
+
+static void hart_detect_features(struct sbi_scratch *scratch)
+{
+ struct sbi_trap_info trap = {0};
+ struct hart_features *hfeatures;
+ unsigned long val;
+
+ /* Reset hart features */
+ hfeatures = sbi_scratch_offset_ptr(scratch, hart_features_offset);
+ hfeatures->features = 0;
+ hfeatures->pmp_count = 0;
+ hfeatures->mhpm_count = 0;
+
+#define __check_csr(__csr, __rdonly, __wrval, __field, __skip) \
+ val = csr_read_allowed(__csr, (ulong)&trap); \
+ if (!trap.cause) { \
+ if (__rdonly) { \
+ (hfeatures->__field)++; \
+ } else { \
+ csr_write_allowed(__csr, (ulong)&trap, __wrval);\
+ if (!trap.cause) { \
+ if (csr_swap(__csr, val) == __wrval) \
+ (hfeatures->__field)++; \
+ else \
+ goto __skip; \
+ } else { \
+ goto __skip; \
+ } \
+ } \
+ } else { \
+ goto __skip; \
+ }
+#define __check_csr_2(__csr, __rdonly, __wrval, __field, __skip) \
+ __check_csr(__csr + 0, __rdonly, __wrval, __field, __skip) \
+ __check_csr(__csr + 1, __rdonly, __wrval, __field, __skip)
+#define __check_csr_4(__csr, __rdonly, __wrval, __field, __skip) \
+ __check_csr_2(__csr + 0, __rdonly, __wrval, __field, __skip) \
+ __check_csr_2(__csr + 2, __rdonly, __wrval, __field, __skip)
+#define __check_csr_8(__csr, __rdonly, __wrval, __field, __skip) \
+ __check_csr_4(__csr + 0, __rdonly, __wrval, __field, __skip) \
+ __check_csr_4(__csr + 4, __rdonly, __wrval, __field, __skip)
+#define __check_csr_16(__csr, __rdonly, __wrval, __field, __skip) \
+ __check_csr_8(__csr + 0, __rdonly, __wrval, __field, __skip) \
+ __check_csr_8(__csr + 8, __rdonly, __wrval, __field, __skip)
+#define __check_csr_32(__csr, __rdonly, __wrval, __field, __skip) \
+ __check_csr_16(__csr + 0, __rdonly, __wrval, __field, __skip) \
+ __check_csr_16(__csr + 16, __rdonly, __wrval, __field, __skip)
+#define __check_csr_64(__csr, __rdonly, __wrval, __field, __skip) \
+ __check_csr_32(__csr + 0, __rdonly, __wrval, __field, __skip) \
+ __check_csr_32(__csr + 32, __rdonly, __wrval, __field, __skip)
+
+ /**
+ * Detect the allowed address bits & granularity. At least PMPADDR0
+ * should be implemented.
+ */
+ val = hart_pmp_get_allowed_addr();
+ if (val) {
+ hfeatures->pmp_gran = 1 << (__ffs(val) + 2);
+ hfeatures->pmp_addr_bits = __fls(val) + 1;
+ /* Detect number of PMP regions. At least PMPADDR0 should be implemented*/
+ __check_csr_64(CSR_PMPADDR0, 0, val, pmp_count, __pmp_skip);
+ }
+__pmp_skip:
+
+ /* Detect number of MHPM counters */
+ __check_csr(CSR_MHPMCOUNTER3, 0, 1UL, mhpm_count, __mhpm_skip);
+ __check_csr_4(CSR_MHPMCOUNTER4, 0, 1UL, mhpm_count, __mhpm_skip);
+ __check_csr_8(CSR_MHPMCOUNTER8, 0, 1UL, mhpm_count, __mhpm_skip);
+ __check_csr_16(CSR_MHPMCOUNTER16, 0, 1UL, mhpm_count, __mhpm_skip);
+__mhpm_skip:
+
+#undef __check_csr_64
+#undef __check_csr_32
+#undef __check_csr_16
+#undef __check_csr_8
+#undef __check_csr_4
+#undef __check_csr_2
+#undef __check_csr
+
+ /* Detect if hart supports SCOUNTEREN feature */
+ trap.cause = 0;
+ val = csr_read_allowed(CSR_SCOUNTEREN, (unsigned long)&trap);
+ if (!trap.cause) {
+ csr_write_allowed(CSR_SCOUNTEREN, (unsigned long)&trap, val);
+ if (!trap.cause)
+ hfeatures->features |= SBI_HART_HAS_SCOUNTEREN;
+ }
+
+ /* Detect if hart supports MCOUNTEREN feature */
+ trap.cause = 0;
+ val = csr_read_allowed(CSR_MCOUNTEREN, (unsigned long)&trap);
+ if (!trap.cause) {
+ csr_write_allowed(CSR_MCOUNTEREN, (unsigned long)&trap, val);
+ if (!trap.cause)
+ hfeatures->features |= SBI_HART_HAS_MCOUNTEREN;
+ }
+
+ /* Detect if hart supports time CSR */
+ trap.cause = 0;
+ csr_read_allowed(CSR_TIME, (unsigned long)&trap);
+ if (!trap.cause)
+ hfeatures->features |= SBI_HART_HAS_TIME;
+}
+
+int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ int rc;
+
+ if (cold_boot) {
+ if (misa_extension('H'))
+ sbi_hart_expected_trap = &__sbi_expected_trap_hext;
+
+ hart_features_offset = sbi_scratch_alloc_offset(
+ sizeof(struct hart_features),
+ "HART_FEATURES");
+ if (!hart_features_offset)
+ return SBI_ENOMEM;
+ }
+
+ hart_detect_features(scratch);
+
+ mstatus_init(scratch);
+
+ rc = fp_init(scratch);
+ if (rc)
+ return rc;
+
+ rc = delegate_traps(scratch);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+void __attribute__((noreturn)) sbi_hart_hang(void)
+{
+ while (1)
+ wfi();
+ __builtin_unreachable();
+}
+
+void __attribute__((noreturn))
+sbi_hart_switch_mode(unsigned long arg0, unsigned long arg1,
+ unsigned long next_addr, unsigned long next_mode,
+ bool next_virt)
+{
+#if __riscv_xlen == 32
+ unsigned long val, valH;
+#else
+ unsigned long val;
+#endif
+
+ switch (next_mode) {
+ case PRV_M:
+ break;
+ case PRV_S:
+ if (!misa_extension('S'))
+ sbi_hart_hang();
+ break;
+ case PRV_U:
+ if (!misa_extension('U'))
+ sbi_hart_hang();
+ break;
+ default:
+ sbi_hart_hang();
+ }
+
+ val = csr_read(CSR_MSTATUS);
+ val = INSERT_FIELD(val, MSTATUS_MPP, next_mode);
+ val = INSERT_FIELD(val, MSTATUS_MPIE, 0);
+#if __riscv_xlen == 32
+ if (misa_extension('H')) {
+ valH = csr_read(CSR_MSTATUSH);
+ if (next_virt)
+ valH = INSERT_FIELD(valH, MSTATUSH_MPV, 1);
+ else
+ valH = INSERT_FIELD(valH, MSTATUSH_MPV, 0);
+ csr_write(CSR_MSTATUSH, valH);
+ }
+#else
+ if (misa_extension('H')) {
+ if (next_virt)
+ val = INSERT_FIELD(val, MSTATUS_MPV, 1);
+ else
+ val = INSERT_FIELD(val, MSTATUS_MPV, 0);
+ }
+#endif
+ csr_write(CSR_MSTATUS, val);
+ csr_write(CSR_MEPC, next_addr);
+
+ if (next_mode == PRV_S) {
+ csr_write(CSR_STVEC, next_addr);
+ csr_write(CSR_SSCRATCH, 0);
+ csr_write(CSR_SIE, 0);
+ csr_write(CSR_SATP, 0);
+ } else if (next_mode == PRV_U) {
+ if (misa_extension('N')) {
+ csr_write(CSR_UTVEC, next_addr);
+ csr_write(CSR_USCRATCH, 0);
+ csr_write(CSR_UIE, 0);
+ }
+ }
+
+ register unsigned long a0 asm("a0") = arg0;
+ register unsigned long a1 asm("a1") = arg1;
+ __asm__ __volatile__("mret" : : "r"(a0), "r"(a1));
+ __builtin_unreachable();
+}
diff --git a/roms/opensbi/lib/sbi/sbi_hfence.S b/roms/opensbi/lib/sbi/sbi_hfence.S
new file mode 100644
index 000000000..d05becbf2
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_hfence.S
@@ -0,0 +1,135 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Atish Patra <anup.patel@wdc.com>
+ */
+
+ /*
+ * HFENCE.GVMA rs1, rs2
+ * HFENCE.GVMA zero, rs2
+ * HFENCE.GVMA rs1
+ * HFENCE.GVMA
+ *
+ * rs1!=zero and rs2!=zero ==> HFENCE.GVMA rs1, rs2
+ * rs1==zero and rs2!=zero ==> HFENCE.GVMA zero, rs2
+ * rs1!=zero and rs2==zero ==> HFENCE.GVMA rs1
+ * rs1==zero and rs2==zero ==> HFENCE.GVMA
+ *
+ * Instruction encoding of HFENCE.GVMA is:
+ * 0110001 rs2(5) rs1(5) 000 00000 1110011
+ */
+
+ .align 3
+ .global __sbi_hfence_gvma_vmid_gpa
+__sbi_hfence_gvma_vmid_gpa:
+ /*
+ * rs1 = a0 (GPA)
+ * rs2 = a1 (VMID)
+ * HFENCE.GVMA a0, a1
+ * 0110001 01011 01010 000 00000 1110011
+ */
+ .word 0x62b50073
+ ret
+
+ .align 3
+ .global __sbi_hfence_gvma_vmid
+__sbi_hfence_gvma_vmid:
+ /*
+ * rs1 = zero
+ * rs2 = a0 (VMID)
+ * HFENCE.GVMA zero, a0
+ * 0110001 01010 00000 000 00000 1110011
+ */
+ .word 0x62a00073
+ ret
+
+ .align 3
+ .global __sbi_hfence_gvma_gpa
+__sbi_hfence_gvma_gpa:
+ /*
+ * rs1 = a0 (GPA)
+ * rs2 = zero
+ * HFENCE.GVMA a0
+ * 0110001 00000 01010 000 00000 1110011
+ */
+ .word 0x62050073
+ ret
+
+ .align 3
+ .global __sbi_hfence_gvma_all
+__sbi_hfence_gvma_all:
+ /*
+ * rs1 = zero
+ * rs2 = zero
+ * HFENCE.GVMA
+ * 0110001 00000 00000 000 00000 1110011
+ */
+ .word 0x62000073
+ ret
+
+ /*
+ * HFENCE.VVMA rs1, rs2
+ * HFENCE.VVMA zero, rs2
+ * HFENCE.VVMA rs1
+ * HFENCE.VVMA
+ *
+ * rs1!=zero and rs2!=zero ==> HFENCE.VVMA rs1, rs2
+ * rs1==zero and rs2!=zero ==> HFENCE.VVMA zero, rs2
+ * rs1!=zero and rs2==zero ==> HFENCE.VVMA rs1
+ * rs1==zero and rs2==zero ==> HFENCE.vVMA
+ *
+ * Instruction encoding of HFENCE.VVMA is:
+ * 0010001 rs2(5) rs1(5) 000 00000 1110011
+ */
+
+ .align 3
+ .global __sbi_hfence_vvma_asid_va
+__sbi_hfence_vvma_asid_va:
+ /*
+ * rs1 = a0 (VA)
+ * rs2 = a1 (ASID)
+ * HFENCE.VVMA a0, a1
+ * 0010001 01011 01010 000 00000 1110011
+ */
+ .word 0x22b50073
+ ret
+
+ .align 3
+ .global __sbi_hfence_vvma_asid
+__sbi_hfence_vvma_asid:
+ /*
+ * rs1 = zero
+ * rs2 = a0 (ASID)
+ * HFENCE.VVMA zero, a0
+ * 0010001 01010 00000 000 00000 1110011
+ */
+ .word 0x22a00073
+ ret
+
+ .align 3
+ .global __sbi_hfence_vvma_va
+__sbi_hfence_vvma_va:
+ /*
+ * rs1 = a0 (VA)
+ * rs2 = zero
+ * HFENCE.VVMA zero, a0
+ * 0010001 00000 01010 000 00000 1110011
+ */
+ .word 0x22050073
+ ret
+
+ .align 3
+ .global __sbi_hfence_vvma_all
+__sbi_hfence_vvma_all:
+ /*
+ * rs1 = zero
+ * rs2 = zero
+ * HFENCE.VVMA
+ * 0010001 00000 00000 000 00000 1110011
+ */
+ .word 0x22000073
+ ret
diff --git a/roms/opensbi/lib/sbi/sbi_hsm.c b/roms/opensbi/lib/sbi/sbi_hsm.c
new file mode 100644
index 000000000..e1b2b2c3d
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_hsm.c
@@ -0,0 +1,291 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_ecall_interface.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_hartmask.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_init.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_console.h>
+
+static unsigned long hart_data_offset;
+
+/** Per hart specific data to manage state transition **/
+struct sbi_hsm_data {
+ atomic_t state;
+};
+
+int sbi_hsm_hart_state_to_status(int state)
+{
+ int ret;
+
+ switch (state) {
+ case SBI_HART_STOPPED:
+ ret = SBI_HSM_HART_STATUS_STOPPED;
+ break;
+ case SBI_HART_STOPPING:
+ ret = SBI_HSM_HART_STATUS_STOP_PENDING;
+ break;
+ case SBI_HART_STARTING:
+ ret = SBI_HSM_HART_STATUS_START_PENDING;
+ break;
+ case SBI_HART_STARTED:
+ ret = SBI_HSM_HART_STATUS_STARTED;
+ break;
+ default:
+ ret = SBI_EINVAL;
+ }
+
+ return ret;
+}
+
+static inline int __sbi_hsm_hart_get_state(u32 hartid)
+{
+ struct sbi_hsm_data *hdata;
+ struct sbi_scratch *scratch;
+
+ scratch = sbi_hartid_to_scratch(hartid);
+ if (!scratch)
+ return SBI_HART_UNKNOWN;
+
+ hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset);
+ return atomic_read(&hdata->state);
+}
+
+int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid)
+{
+ if (!sbi_domain_is_assigned_hart(dom, hartid))
+ return SBI_HART_UNKNOWN;
+
+ return __sbi_hsm_hart_get_state(hartid);
+}
+
+static bool sbi_hsm_hart_started(const struct sbi_domain *dom, u32 hartid)
+{
+ if (sbi_hsm_hart_get_state(dom, hartid) == SBI_HART_STARTED)
+ return TRUE;
+ else
+ return FALSE;
+}
+
+/**
+ * Get ulong HART mask for given HART base ID
+ * @param dom the domain to be used for output HART mask
+ * @param hbase the HART base ID
+ * @param out_hmask the output ulong HART mask
+ * @return 0 on success and SBI_Exxx (< 0) on failure
+ * Note: the output HART mask will be set to zero on failure as well.
+ */
+int sbi_hsm_hart_started_mask(const struct sbi_domain *dom,
+ ulong hbase, ulong *out_hmask)
+{
+ ulong i, hmask, dmask;
+ ulong hend = sbi_scratch_last_hartid() + 1;
+
+ *out_hmask = 0;
+ if (hend <= hbase)
+ return SBI_EINVAL;
+ if (BITS_PER_LONG < (hend - hbase))
+ hend = hbase + BITS_PER_LONG;
+
+ dmask = sbi_domain_get_assigned_hartmask(dom, hbase);
+ for (i = hbase; i < hend; i++) {
+ hmask = 1UL << (i - hbase);
+ if ((dmask & hmask) &&
+ (__sbi_hsm_hart_get_state(i) == SBI_HART_STARTED))
+ *out_hmask |= hmask;
+ }
+
+ return 0;
+}
+
+void sbi_hsm_prepare_next_jump(struct sbi_scratch *scratch, u32 hartid)
+{
+ u32 oldstate;
+ struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
+ hart_data_offset);
+
+ oldstate = atomic_cmpxchg(&hdata->state, SBI_HART_STARTING,
+ SBI_HART_STARTED);
+ if (oldstate != SBI_HART_STARTING)
+ sbi_hart_hang();
+}
+
+static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid)
+{
+ unsigned long saved_mie;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
+ hart_data_offset);
+ /* Save MIE CSR */
+ saved_mie = csr_read(CSR_MIE);
+
+ /* Set MSIE bit to receive IPI */
+ csr_set(CSR_MIE, MIP_MSIP);
+
+ /* Wait for hart_add call*/
+ while (atomic_read(&hdata->state) != SBI_HART_STARTING) {
+ wfi();
+ };
+
+ /* Restore MIE CSR */
+ csr_write(CSR_MIE, saved_mie);
+
+ /* Clear current HART IPI */
+ sbi_platform_ipi_clear(plat, hartid);
+}
+
+int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot)
+{
+ u32 i;
+ struct sbi_scratch *rscratch;
+ struct sbi_hsm_data *hdata;
+
+ if (cold_boot) {
+ hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata),
+ "HART_DATA");
+ if (!hart_data_offset)
+ return SBI_ENOMEM;
+
+ /* Initialize hart state data for every hart */
+ for (i = 0; i <= sbi_scratch_last_hartid(); i++) {
+ rscratch = sbi_hartid_to_scratch(i);
+ if (!rscratch)
+ continue;
+
+ hdata = sbi_scratch_offset_ptr(rscratch,
+ hart_data_offset);
+ ATOMIC_INIT(&hdata->state,
+ (i == hartid) ? SBI_HART_STARTING : SBI_HART_STOPPED);
+ }
+ } else {
+ sbi_hsm_hart_wait(scratch, hartid);
+ }
+
+ return 0;
+}
+
+void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch)
+{
+ u32 hstate;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
+ hart_data_offset);
+ void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr;
+
+ hstate = atomic_cmpxchg(&hdata->state, SBI_HART_STOPPING,
+ SBI_HART_STOPPED);
+ if (hstate != SBI_HART_STOPPING)
+ goto fail_exit;
+
+ if (sbi_platform_has_hart_hotplug(plat)) {
+ sbi_platform_hart_stop(plat);
+ /* It should never reach here */
+ goto fail_exit;
+ }
+
+ /**
+ * As platform is lacking support for hotplug, directly jump to warmboot
+ * and wait for interrupts in warmboot. We do it preemptively in order
+ * preserve the hart states and reuse the code path for hotplug.
+ */
+ jump_warmboot();
+
+fail_exit:
+ /* It should never reach here */
+ sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid());
+ sbi_hart_hang();
+}
+
+int sbi_hsm_hart_start(struct sbi_scratch *scratch,
+ const struct sbi_domain *dom,
+ u32 hartid, ulong saddr, ulong smode, ulong priv)
+{
+ unsigned long init_count;
+ unsigned int hstate;
+ struct sbi_scratch *rscratch;
+ struct sbi_hsm_data *hdata;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* For now, we only allow start mode to be S-mode or U-mode. */
+ if (smode != PRV_S && smode != PRV_U)
+ return SBI_EINVAL;
+ if (dom && !sbi_domain_is_assigned_hart(dom, hartid))
+ return SBI_EINVAL;
+ if (dom && !sbi_domain_check_addr(dom, saddr, smode,
+ SBI_DOMAIN_EXECUTE))
+ return SBI_EINVAL;
+
+ rscratch = sbi_hartid_to_scratch(hartid);
+ if (!rscratch)
+ return SBI_EINVAL;
+ hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset);
+ hstate = atomic_cmpxchg(&hdata->state, SBI_HART_STOPPED,
+ SBI_HART_STARTING);
+ if (hstate == SBI_HART_STARTED)
+ return SBI_EALREADY;
+
+ /**
+ * if a hart is already transition to start or stop, another start call
+ * is considered as invalid request.
+ */
+ if (hstate != SBI_HART_STOPPED)
+ return SBI_EINVAL;
+
+ init_count = sbi_init_count(hartid);
+ rscratch->next_arg1 = priv;
+ rscratch->next_addr = saddr;
+ rscratch->next_mode = smode;
+
+ if (sbi_platform_has_hart_hotplug(plat) ||
+ (sbi_platform_has_hart_secondary_boot(plat) && !init_count)) {
+ return sbi_platform_hart_start(plat, hartid,
+ scratch->warmboot_addr);
+ } else {
+ sbi_platform_ipi_send(plat, hartid);
+ }
+
+ return 0;
+}
+
+int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow)
+{
+ int oldstate;
+ u32 hartid = current_hartid();
+ struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch,
+ hart_data_offset);
+
+ if (!sbi_hsm_hart_started(sbi_domain_thishart_ptr(), hartid))
+ return SBI_EINVAL;
+
+ oldstate = atomic_cmpxchg(&hdata->state, SBI_HART_STARTED,
+ SBI_HART_STOPPING);
+ if (oldstate != SBI_HART_STARTED) {
+ sbi_printf("%s: ERR: The hart is in invalid state [%u]\n",
+ __func__, oldstate);
+ return SBI_EDENIED;
+ }
+
+ if (exitnow)
+ sbi_exit(scratch);
+
+ return 0;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_illegal_insn.c b/roms/opensbi/lib/sbi/sbi_illegal_insn.c
new file mode 100644
index 000000000..9af3d24d7
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_illegal_insn.c
@@ -0,0 +1,143 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_emulate_csr.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_illegal_insn.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_unpriv.h>
+
+typedef int (*illegal_insn_func)(ulong insn, struct sbi_trap_regs *regs);
+
+static int truly_illegal_insn(ulong insn, struct sbi_trap_regs *regs)
+{
+ struct sbi_trap_info trap;
+
+ trap.epc = regs->mepc;
+ trap.cause = CAUSE_ILLEGAL_INSTRUCTION;
+ trap.tval = insn;
+ trap.tval2 = 0;
+ trap.tinst = 0;
+
+ return sbi_trap_redirect(regs, &trap);
+}
+
+static int system_opcode_insn(ulong insn, struct sbi_trap_regs *regs)
+{
+ int do_write, rs1_num = (insn >> 15) & 0x1f;
+ ulong rs1_val = GET_RS1(insn, regs);
+ int csr_num = (u32)insn >> 20;
+ ulong csr_val, new_csr_val;
+
+ /* TODO: Ensure that we got CSR read/write instruction */
+
+ if (sbi_emulate_csr_read(csr_num, regs, &csr_val))
+ return truly_illegal_insn(insn, regs);
+
+ do_write = rs1_num;
+ switch (GET_RM(insn)) {
+ case 1:
+ new_csr_val = rs1_val;
+ do_write = 1;
+ break;
+ case 2:
+ new_csr_val = csr_val | rs1_val;
+ break;
+ case 3:
+ new_csr_val = csr_val & ~rs1_val;
+ break;
+ case 5:
+ new_csr_val = rs1_num;
+ do_write = 1;
+ break;
+ case 6:
+ new_csr_val = csr_val | rs1_num;
+ break;
+ case 7:
+ new_csr_val = csr_val & ~rs1_num;
+ break;
+ default:
+ return truly_illegal_insn(insn, regs);
+ };
+
+ if (do_write && sbi_emulate_csr_write(csr_num, regs, new_csr_val))
+ return truly_illegal_insn(insn, regs);
+
+ SET_RD(insn, regs, csr_val);
+
+ regs->mepc += 4;
+
+ return 0;
+}
+
+static illegal_insn_func illegal_insn_table[32] = {
+ truly_illegal_insn, /* 0 */
+ truly_illegal_insn, /* 1 */
+ truly_illegal_insn, /* 2 */
+ truly_illegal_insn, /* 3 */
+ truly_illegal_insn, /* 4 */
+ truly_illegal_insn, /* 5 */
+ truly_illegal_insn, /* 6 */
+ truly_illegal_insn, /* 7 */
+ truly_illegal_insn, /* 8 */
+ truly_illegal_insn, /* 9 */
+ truly_illegal_insn, /* 10 */
+ truly_illegal_insn, /* 11 */
+ truly_illegal_insn, /* 12 */
+ truly_illegal_insn, /* 13 */
+ truly_illegal_insn, /* 14 */
+ truly_illegal_insn, /* 15 */
+ truly_illegal_insn, /* 16 */
+ truly_illegal_insn, /* 17 */
+ truly_illegal_insn, /* 18 */
+ truly_illegal_insn, /* 19 */
+ truly_illegal_insn, /* 20 */
+ truly_illegal_insn, /* 21 */
+ truly_illegal_insn, /* 22 */
+ truly_illegal_insn, /* 23 */
+ truly_illegal_insn, /* 24 */
+ truly_illegal_insn, /* 25 */
+ truly_illegal_insn, /* 26 */
+ truly_illegal_insn, /* 27 */
+ system_opcode_insn, /* 28 */
+ truly_illegal_insn, /* 29 */
+ truly_illegal_insn, /* 30 */
+ truly_illegal_insn /* 31 */
+};
+
+int sbi_illegal_insn_handler(ulong insn, struct sbi_trap_regs *regs)
+{
+ struct sbi_trap_info uptrap;
+
+ /*
+ * We only deal with 32-bit (or longer) illegal instructions. If we
+ * see instruction is zero OR instruction is 16-bit then we fetch and
+ * check the instruction encoding using unprivilege access.
+ *
+ * The program counter (PC) in RISC-V world is always 2-byte aligned
+ * so handling only 32-bit (or longer) illegal instructions also help
+ * the case where MTVAL CSR contains instruction address for illegal
+ * instruction trap.
+ */
+
+ if (unlikely((insn & 3) != 3)) {
+ insn = sbi_get_insn(regs->mepc, &uptrap);
+ if (uptrap.cause) {
+ uptrap.epc = regs->mepc;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+ if ((insn & 3) != 3)
+ return truly_illegal_insn(insn, regs);
+ }
+
+ return illegal_insn_table[(insn & 0x7c) >> 2](insn, regs);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_init.c b/roms/opensbi/lib/sbi/sbi_init.c
new file mode 100644
index 000000000..0e824588e
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_init.c
@@ -0,0 +1,475 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_hartmask.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_string.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_version.h>
+
+#define BANNER \
+ " ____ _____ ____ _____\n" \
+ " / __ \\ / ____| _ \\_ _|\n" \
+ " | | | |_ __ ___ _ __ | (___ | |_) || |\n" \
+ " | | | | '_ \\ / _ \\ '_ \\ \\___ \\| _ < | |\n" \
+ " | |__| | |_) | __/ | | |____) | |_) || |_\n" \
+ " \\____/| .__/ \\___|_| |_|_____/|____/_____|\n" \
+ " | |\n" \
+ " |_|\n\n"
+
+static void sbi_boot_print_banner(struct sbi_scratch *scratch)
+{
+ if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS)
+ return;
+
+#ifdef OPENSBI_VERSION_GIT
+ sbi_printf("\nOpenSBI %s\n", OPENSBI_VERSION_GIT);
+#else
+ sbi_printf("\nOpenSBI v%d.%d\n", OPENSBI_VERSION_MAJOR,
+ OPENSBI_VERSION_MINOR);
+#endif
+
+ sbi_printf(BANNER);
+}
+
+static void sbi_boot_print_general(struct sbi_scratch *scratch)
+{
+ char str[128];
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS)
+ return;
+
+ /* Platform details */
+ sbi_printf("Platform Name : %s\n",
+ sbi_platform_name(plat));
+ sbi_platform_get_features_str(plat, str, sizeof(str));
+ sbi_printf("Platform Features : %s\n", str);
+ sbi_printf("Platform HART Count : %u\n",
+ sbi_platform_hart_count(plat));
+
+ /* Firmware details */
+ sbi_printf("Firmware Base : 0x%lx\n", scratch->fw_start);
+ sbi_printf("Firmware Size : %d KB\n",
+ (u32)(scratch->fw_size / 1024));
+
+ /* SBI details */
+ sbi_printf("Runtime SBI Version : %d.%d\n",
+ sbi_ecall_version_major(), sbi_ecall_version_minor());
+ sbi_printf("\n");
+}
+
+static void sbi_boot_print_domains(struct sbi_scratch *scratch)
+{
+ if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS)
+ return;
+
+ /* Domain details */
+ sbi_domain_dump_all(" ");
+}
+
+static void sbi_boot_print_hart(struct sbi_scratch *scratch, u32 hartid)
+{
+ int xlen;
+ char str[128];
+ const struct sbi_domain *dom = sbi_domain_thishart_ptr();
+
+ if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS)
+ return;
+
+ /* Determine MISA XLEN and MISA string */
+ xlen = misa_xlen();
+ if (xlen < 1) {
+ sbi_printf("Error %d getting MISA XLEN\n", xlen);
+ sbi_hart_hang();
+ }
+
+ /* Boot HART details */
+ sbi_printf("Boot HART ID : %u\n", hartid);
+ sbi_printf("Boot HART Domain : %s\n", dom->name);
+ misa_string(xlen, str, sizeof(str));
+ sbi_printf("Boot HART ISA : %s\n", str);
+ sbi_hart_get_features_str(scratch, str, sizeof(str));
+ sbi_printf("Boot HART Features : %s\n", str);
+ sbi_printf("Boot HART PMP Count : %d\n",
+ sbi_hart_pmp_count(scratch));
+ sbi_printf("Boot HART PMP Granularity : %lu\n",
+ sbi_hart_pmp_granularity(scratch));
+ sbi_printf("Boot HART PMP Address Bits: %d\n",
+ sbi_hart_pmp_addrbits(scratch));
+ sbi_printf("Boot HART MHPM Count : %d\n",
+ sbi_hart_mhpm_count(scratch));
+ sbi_printf("Boot HART MHPM Count : %d\n",
+ sbi_hart_mhpm_count(scratch));
+ sbi_hart_delegation_dump(scratch, "Boot HART ", " ");
+}
+
+static spinlock_t coldboot_lock = SPIN_LOCK_INITIALIZER;
+static struct sbi_hartmask coldboot_wait_hmask = { 0 };
+
+static unsigned long coldboot_done;
+
+static void wait_for_coldboot(struct sbi_scratch *scratch, u32 hartid)
+{
+ unsigned long saved_mie, cmip;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* Save MIE CSR */
+ saved_mie = csr_read(CSR_MIE);
+
+ /* Set MSIE bit to receive IPI */
+ csr_set(CSR_MIE, MIP_MSIP);
+
+ /* Acquire coldboot lock */
+ spin_lock(&coldboot_lock);
+
+ /* Mark current HART as waiting */
+ sbi_hartmask_set_hart(hartid, &coldboot_wait_hmask);
+
+ /* Release coldboot lock */
+ spin_unlock(&coldboot_lock);
+
+ /* Wait for coldboot to finish using WFI */
+ while (!__smp_load_acquire(&coldboot_done)) {
+ do {
+ wfi();
+ cmip = csr_read(CSR_MIP);
+ } while (!(cmip & MIP_MSIP));
+ };
+
+ /* Acquire coldboot lock */
+ spin_lock(&coldboot_lock);
+
+ /* Unmark current HART as waiting */
+ sbi_hartmask_clear_hart(hartid, &coldboot_wait_hmask);
+
+ /* Release coldboot lock */
+ spin_unlock(&coldboot_lock);
+
+ /* Restore MIE CSR */
+ csr_write(CSR_MIE, saved_mie);
+
+ /* Clear current HART IPI */
+ sbi_platform_ipi_clear(plat, hartid);
+}
+
+static void wake_coldboot_harts(struct sbi_scratch *scratch, u32 hartid)
+{
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* Mark coldboot done */
+ __smp_store_release(&coldboot_done, 1);
+
+ /* Acquire coldboot lock */
+ spin_lock(&coldboot_lock);
+
+ /* Send an IPI to all HARTs waiting for coldboot */
+ for (int i = 0; i <= sbi_scratch_last_hartid(); i++) {
+ if ((i != hartid) &&
+ sbi_hartmask_test_hart(i, &coldboot_wait_hmask))
+ sbi_platform_ipi_send(plat, i);
+ }
+
+ /* Release coldboot lock */
+ spin_unlock(&coldboot_lock);
+}
+
+static unsigned long init_count_offset;
+
+static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid)
+{
+ int rc;
+ unsigned long *init_count;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ /* Note: This has to be first thing in coldboot init sequence */
+ rc = sbi_scratch_init(scratch);
+ if (rc)
+ sbi_hart_hang();
+
+ /* Note: This has to be second thing in coldboot init sequence */
+ rc = sbi_domain_init(scratch, hartid);
+ if (rc)
+ sbi_hart_hang();
+
+ init_count_offset = sbi_scratch_alloc_offset(__SIZEOF_POINTER__,
+ "INIT_COUNT");
+ if (!init_count_offset)
+ sbi_hart_hang();
+
+ rc = sbi_hsm_init(scratch, hartid, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_platform_early_init(plat, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_hart_init(scratch, TRUE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_console_init(scratch);
+ if (rc)
+ sbi_hart_hang();
+
+ sbi_boot_print_banner(scratch);
+
+ rc = sbi_platform_irqchip_init(plat, TRUE);
+ if (rc) {
+ sbi_printf("%s: platform irqchip init failed (error %d)\n",
+ __func__, rc);
+ sbi_hart_hang();
+ }
+
+ rc = sbi_ipi_init(scratch, TRUE);
+ if (rc) {
+ sbi_printf("%s: ipi init failed (error %d)\n", __func__, rc);
+ sbi_hart_hang();
+ }
+
+ rc = sbi_tlb_init(scratch, TRUE);
+ if (rc) {
+ sbi_printf("%s: tlb init failed (error %d)\n", __func__, rc);
+ sbi_hart_hang();
+ }
+
+ rc = sbi_timer_init(scratch, TRUE);
+ if (rc) {
+ sbi_printf("%s: timer init failed (error %d)\n", __func__, rc);
+ sbi_hart_hang();
+ }
+
+ rc = sbi_ecall_init();
+ if (rc) {
+ sbi_printf("%s: ecall init failed (error %d)\n", __func__, rc);
+ sbi_hart_hang();
+ }
+
+ sbi_boot_print_general(scratch);
+
+ /*
+ * Note: Finalize domains after HSM initialization so that we
+ * can startup non-root domains.
+ * Note: Finalize domains before HART PMP configuration so
+ * that we use correct domain for configuring PMP.
+ */
+ rc = sbi_domain_finalize(scratch, hartid);
+ if (rc) {
+ sbi_printf("%s: domain finalize failed (error %d)\n",
+ __func__, rc);
+ sbi_hart_hang();
+ }
+
+ sbi_boot_print_domains(scratch);
+
+ rc = sbi_hart_pmp_configure(scratch);
+ if (rc) {
+ sbi_printf("%s: PMP configure failed (error %d)\n",
+ __func__, rc);
+ sbi_hart_hang();
+ }
+
+ /*
+ * Note: Platform final initialization should be last so that
+ * it sees correct domain assignment and PMP configuration.
+ */
+ rc = sbi_platform_final_init(plat, TRUE);
+ if (rc) {
+ sbi_printf("%s: platform final init failed (error %d)\n",
+ __func__, rc);
+ sbi_hart_hang();
+ }
+
+ sbi_boot_print_hart(scratch, hartid);
+
+ wake_coldboot_harts(scratch, hartid);
+
+ init_count = sbi_scratch_offset_ptr(scratch, init_count_offset);
+ (*init_count)++;
+
+ sbi_hsm_prepare_next_jump(scratch, hartid);
+ sbi_hart_switch_mode(hartid, scratch->next_arg1, scratch->next_addr,
+ scratch->next_mode, FALSE);
+}
+
+static void __noreturn init_warmboot(struct sbi_scratch *scratch, u32 hartid)
+{
+ int rc;
+ unsigned long *init_count;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ wait_for_coldboot(scratch, hartid);
+
+ if (!init_count_offset)
+ sbi_hart_hang();
+
+ rc = sbi_hsm_init(scratch, hartid, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_platform_early_init(plat, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_hart_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_platform_irqchip_init(plat, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_ipi_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_tlb_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_timer_init(scratch, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_hart_pmp_configure(scratch);
+ if (rc)
+ sbi_hart_hang();
+
+ rc = sbi_platform_final_init(plat, FALSE);
+ if (rc)
+ sbi_hart_hang();
+
+ init_count = sbi_scratch_offset_ptr(scratch, init_count_offset);
+ (*init_count)++;
+
+ sbi_hsm_prepare_next_jump(scratch, hartid);
+ sbi_hart_switch_mode(hartid, scratch->next_arg1,
+ scratch->next_addr,
+ scratch->next_mode, FALSE);
+}
+
+static atomic_t coldboot_lottery = ATOMIC_INITIALIZER(0);
+
+/**
+ * Initialize OpenSBI library for current HART and jump to next
+ * booting stage.
+ *
+ * The function expects following:
+ * 1. The 'mscratch' CSR is pointing to sbi_scratch of current HART
+ * 2. Stack pointer (SP) is setup for current HART
+ * 3. Interrupts are disabled in MSTATUS CSR
+ * 4. All interrupts are disabled in MIE CSR
+ *
+ * @param scratch pointer to sbi_scratch of current HART
+ */
+void __noreturn sbi_init(struct sbi_scratch *scratch)
+{
+ bool next_mode_supported = FALSE;
+ bool coldboot = FALSE;
+ u32 hartid = current_hartid();
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if ((SBI_HARTMASK_MAX_BITS <= hartid) ||
+ sbi_platform_hart_invalid(plat, hartid))
+ sbi_hart_hang();
+
+ switch (scratch->next_mode) {
+ case PRV_M:
+ next_mode_supported = TRUE;
+ break;
+ case PRV_S:
+ if (misa_extension('S'))
+ next_mode_supported = TRUE;
+ break;
+ case PRV_U:
+ if (misa_extension('U'))
+ next_mode_supported = TRUE;
+ break;
+ default:
+ sbi_hart_hang();
+ }
+
+ /*
+ * Only the HART supporting privilege mode specified in the
+ * scratch->next_mode should be allowed to become the coldboot
+ * HART because the coldboot HART will be directly jumping to
+ * the next booting stage.
+ *
+ * We use a lottery mechanism to select coldboot HART among
+ * HARTs which satisfy above condition.
+ */
+
+ if (next_mode_supported && atomic_xchg(&coldboot_lottery, 1) == 0)
+ coldboot = TRUE;
+
+ if (coldboot)
+ init_coldboot(scratch, hartid);
+ else
+ init_warmboot(scratch, hartid);
+}
+
+unsigned long sbi_init_count(u32 hartid)
+{
+ struct sbi_scratch *scratch;
+ unsigned long *init_count;
+
+ if (!init_count_offset)
+ return 0;
+
+ scratch = sbi_hartid_to_scratch(hartid);
+ if (!scratch)
+ return 0;
+
+ init_count = sbi_scratch_offset_ptr(scratch, init_count_offset);
+
+ return *init_count;
+}
+
+/**
+ * Exit OpenSBI library for current HART and stop HART
+ *
+ * The function expects following:
+ * 1. The 'mscratch' CSR is pointing to sbi_scratch of current HART
+ * 2. Stack pointer (SP) is setup for current HART
+ *
+ * @param scratch pointer to sbi_scratch of current HART
+ */
+void __noreturn sbi_exit(struct sbi_scratch *scratch)
+{
+ u32 hartid = current_hartid();
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (sbi_platform_hart_invalid(plat, hartid))
+ sbi_hart_hang();
+
+ sbi_platform_early_exit(plat);
+
+ sbi_timer_exit(scratch);
+
+ sbi_ipi_exit(scratch);
+
+ sbi_platform_irqchip_exit(plat);
+
+ sbi_platform_final_exit(plat);
+
+ sbi_hsm_exit(scratch);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_ipi.c b/roms/opensbi/lib/sbi/sbi_ipi.c
new file mode 100644
index 000000000..43478328b
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_ipi.c
@@ -0,0 +1,254 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_init.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_platform.h>
+
+struct sbi_ipi_data {
+ unsigned long ipi_type;
+};
+
+static unsigned long ipi_data_off;
+
+static const struct sbi_ipi_event_ops *ipi_ops_array[SBI_IPI_EVENT_MAX];
+
+static int sbi_ipi_send(struct sbi_scratch *scratch, u32 remote_hartid,
+ u32 event, void *data)
+{
+ int ret;
+ struct sbi_scratch *remote_scratch = NULL;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_ipi_data *ipi_data;
+ const struct sbi_ipi_event_ops *ipi_ops;
+
+ if ((SBI_IPI_EVENT_MAX <= event) ||
+ !ipi_ops_array[event])
+ return SBI_EINVAL;
+ ipi_ops = ipi_ops_array[event];
+
+ remote_scratch = sbi_hartid_to_scratch(remote_hartid);
+ if (!remote_scratch)
+ return SBI_EINVAL;
+
+ ipi_data = sbi_scratch_offset_ptr(remote_scratch, ipi_data_off);
+
+ if (ipi_ops->update) {
+ ret = ipi_ops->update(scratch, remote_scratch,
+ remote_hartid, data);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Set IPI type on remote hart's scratch area and
+ * trigger the interrupt
+ */
+ atomic_raw_set_bit(event, &ipi_data->ipi_type);
+ smp_wmb();
+ sbi_platform_ipi_send(plat, remote_hartid);
+
+ if (ipi_ops->sync)
+ ipi_ops->sync(scratch);
+
+ return 0;
+}
+
+/**
+ * As this this function only handlers scalar values of hart mask, it must be
+ * set to all online harts if the intention is to send IPIs to all the harts.
+ * If hmask is zero, no IPIs will be sent.
+ */
+int sbi_ipi_send_many(ulong hmask, ulong hbase, u32 event, void *data)
+{
+ int rc;
+ ulong i, m;
+ struct sbi_domain *dom = sbi_domain_thishart_ptr();
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+
+ if (hbase != -1UL) {
+ rc = sbi_hsm_hart_started_mask(dom, hbase, &m);
+ if (rc)
+ return rc;
+ m &= hmask;
+
+ /* Send IPIs */
+ for (i = hbase; m; i++, m >>= 1) {
+ if (m & 1UL)
+ sbi_ipi_send(scratch, i, event, data);
+ }
+ } else {
+ hbase = 0;
+ while (!sbi_hsm_hart_started_mask(dom, hbase, &m)) {
+ /* Send IPIs */
+ for (i = hbase; m; i++, m >>= 1) {
+ if (m & 1UL)
+ sbi_ipi_send(scratch, i, event, data);
+ }
+ hbase += BITS_PER_LONG;
+ }
+ }
+
+ return 0;
+}
+
+int sbi_ipi_event_create(const struct sbi_ipi_event_ops *ops)
+{
+ int i, ret = SBI_ENOSPC;
+
+ if (!ops || !ops->process)
+ return SBI_EINVAL;
+
+ for (i = 0; i < SBI_IPI_EVENT_MAX; i++) {
+ if (!ipi_ops_array[i]) {
+ ret = i;
+ ipi_ops_array[i] = ops;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void sbi_ipi_event_destroy(u32 event)
+{
+ if (SBI_IPI_EVENT_MAX <= event)
+ return;
+
+ ipi_ops_array[event] = NULL;
+}
+
+static void sbi_ipi_process_smode(struct sbi_scratch *scratch)
+{
+ csr_set(CSR_MIP, MIP_SSIP);
+}
+
+static struct sbi_ipi_event_ops ipi_smode_ops = {
+ .name = "IPI_SMODE",
+ .process = sbi_ipi_process_smode,
+};
+
+static u32 ipi_smode_event = SBI_IPI_EVENT_MAX;
+
+int sbi_ipi_send_smode(ulong hmask, ulong hbase)
+{
+ return sbi_ipi_send_many(hmask, hbase, ipi_smode_event, NULL);
+}
+
+void sbi_ipi_clear_smode(void)
+{
+ csr_clear(CSR_MIP, MIP_SSIP);
+}
+
+static void sbi_ipi_process_halt(struct sbi_scratch *scratch)
+{
+ sbi_hsm_hart_stop(scratch, TRUE);
+}
+
+static struct sbi_ipi_event_ops ipi_halt_ops = {
+ .name = "IPI_HALT",
+ .process = sbi_ipi_process_halt,
+};
+
+static u32 ipi_halt_event = SBI_IPI_EVENT_MAX;
+
+int sbi_ipi_send_halt(ulong hmask, ulong hbase)
+{
+ return sbi_ipi_send_many(hmask, hbase, ipi_halt_event, NULL);
+}
+
+void sbi_ipi_process(void)
+{
+ unsigned long ipi_type;
+ unsigned int ipi_event;
+ const struct sbi_ipi_event_ops *ipi_ops;
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ struct sbi_ipi_data *ipi_data =
+ sbi_scratch_offset_ptr(scratch, ipi_data_off);
+
+ u32 hartid = current_hartid();
+ sbi_platform_ipi_clear(plat, hartid);
+
+ ipi_type = atomic_raw_xchg_ulong(&ipi_data->ipi_type, 0);
+ ipi_event = 0;
+ while (ipi_type) {
+ if (!(ipi_type & 1UL))
+ goto skip;
+
+ ipi_ops = ipi_ops_array[ipi_event];
+ if (ipi_ops && ipi_ops->process)
+ ipi_ops->process(scratch);
+
+skip:
+ ipi_type = ipi_type >> 1;
+ ipi_event++;
+ };
+}
+
+int sbi_ipi_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ int ret;
+ struct sbi_ipi_data *ipi_data;
+
+ if (cold_boot) {
+ ipi_data_off = sbi_scratch_alloc_offset(sizeof(*ipi_data),
+ "IPI_DATA");
+ if (!ipi_data_off)
+ return SBI_ENOMEM;
+ ret = sbi_ipi_event_create(&ipi_smode_ops);
+ if (ret < 0)
+ return ret;
+ ipi_smode_event = ret;
+ ret = sbi_ipi_event_create(&ipi_halt_ops);
+ if (ret < 0)
+ return ret;
+ ipi_halt_event = ret;
+ } else {
+ if (!ipi_data_off)
+ return SBI_ENOMEM;
+ if (SBI_IPI_EVENT_MAX <= ipi_smode_event ||
+ SBI_IPI_EVENT_MAX <= ipi_halt_event)
+ return SBI_ENOSPC;
+ }
+
+ ipi_data = sbi_scratch_offset_ptr(scratch, ipi_data_off);
+ ipi_data->ipi_type = 0x00;
+
+ /* Platform init */
+ ret = sbi_platform_ipi_init(sbi_platform_ptr(scratch), cold_boot);
+ if (ret)
+ return ret;
+
+ /* Enable software interrupts */
+ csr_set(CSR_MIE, MIP_MSIP);
+
+ return 0;
+}
+
+void sbi_ipi_exit(struct sbi_scratch *scratch)
+{
+ /* Disable software interrupts */
+ csr_clear(CSR_MIE, MIP_MSIP);
+
+ /* Process pending IPIs */
+ sbi_ipi_process();
+
+ /* Platform exit */
+ sbi_platform_ipi_exit(sbi_platform_ptr(scratch));
+}
diff --git a/roms/opensbi/lib/sbi/sbi_math.c b/roms/opensbi/lib/sbi/sbi_math.c
new file mode 100644
index 000000000..8ba0831d8
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_math.c
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Common helper functions used across OpenSBI project.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+unsigned long log2roundup(unsigned long x)
+{
+ unsigned long ret = 0;
+
+ while (ret < __riscv_xlen) {
+ if (x <= (1UL << ret))
+ break;
+ ret++;
+ }
+
+ return ret;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_misaligned_ldst.c b/roms/opensbi/lib/sbi/sbi_misaligned_ldst.c
new file mode 100644
index 000000000..5057cb5ec
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_misaligned_ldst.c
@@ -0,0 +1,243 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/riscv_fp.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_misaligned_ldst.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_unpriv.h>
+
+union reg_data {
+ u8 data_bytes[8];
+ ulong data_ulong;
+ u64 data_u64;
+};
+
+int sbi_misaligned_load_handler(ulong addr, ulong tval2, ulong tinst,
+ struct sbi_trap_regs *regs)
+{
+ ulong insn, insn_len;
+ union reg_data val;
+ struct sbi_trap_info uptrap;
+ int i, fp = 0, shift = 0, len = 0;
+
+ if (tinst & 0x1) {
+ /*
+ * Bit[0] == 1 implies trapped instruction value is
+ * transformed instruction or custom instruction.
+ */
+ insn = tinst | INSN_16BIT_MASK;
+ insn_len = (tinst & 0x2) ? INSN_LEN(insn) : 2;
+ } else {
+ /*
+ * Bit[0] == 0 implies trapped instruction value is
+ * zero or special value.
+ */
+ insn = sbi_get_insn(regs->mepc, &uptrap);
+ if (uptrap.cause) {
+ uptrap.epc = regs->mepc;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+ insn_len = INSN_LEN(insn);
+ }
+
+ if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+#if __riscv_xlen == 64
+ } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
+ len = 4;
+#endif
+#ifdef __riscv_flen
+ } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
+ fp = 1;
+ len = 8;
+ } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
+ fp = 1;
+ len = 4;
+#endif
+ } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
+ len = 2;
+ shift = 8 * (sizeof(ulong) - len);
+ } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
+ len = 2;
+#if __riscv_xlen >= 64
+ } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ shift = 8 * (sizeof(ulong) - len);
+#endif
+ } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ shift = 8 * (sizeof(ulong) - len);
+#ifdef __riscv_flen
+ } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
+ fp = 1;
+ len = 8;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
+ fp = 1;
+ len = 8;
+#if __riscv_xlen == 32
+ } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
+ fp = 1;
+ len = 4;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
+ fp = 1;
+ len = 4;
+#endif
+#endif
+ } else {
+ uptrap.epc = regs->mepc;
+ uptrap.cause = CAUSE_MISALIGNED_LOAD;
+ uptrap.tval = addr;
+ uptrap.tval2 = tval2;
+ uptrap.tinst = tinst;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+
+ val.data_u64 = 0;
+ for (i = 0; i < len; i++) {
+ val.data_bytes[i] = sbi_load_u8((void *)(addr + i),
+ &uptrap);
+ if (uptrap.cause) {
+ uptrap.epc = regs->mepc;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+ }
+
+ if (!fp)
+ SET_RD(insn, regs, ((long)(val.data_ulong << shift)) >> shift);
+#ifdef __riscv_flen
+ else if (len == 8)
+ SET_F64_RD(insn, regs, val.data_u64);
+ else
+ SET_F32_RD(insn, regs, val.data_ulong);
+#endif
+
+ regs->mepc += insn_len;
+
+ return 0;
+}
+
+int sbi_misaligned_store_handler(ulong addr, ulong tval2, ulong tinst,
+ struct sbi_trap_regs *regs)
+{
+ ulong insn, insn_len;
+ union reg_data val;
+ struct sbi_trap_info uptrap;
+ int i, len = 0;
+
+ if (tinst & 0x1) {
+ /*
+ * Bit[0] == 1 implies trapped instruction value is
+ * transformed instruction or custom instruction.
+ */
+ insn = tinst | INSN_16BIT_MASK;
+ insn_len = (tinst & 0x2) ? INSN_LEN(insn) : 2;
+ } else {
+ /*
+ * Bit[0] == 0 implies trapped instruction value is
+ * zero or special value.
+ */
+ insn = sbi_get_insn(regs->mepc, &uptrap);
+ if (uptrap.cause) {
+ uptrap.epc = regs->mepc;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+ insn_len = INSN_LEN(insn);
+ }
+
+ val.data_ulong = GET_RS2(insn, regs);
+
+ if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
+ len = 4;
+#if __riscv_xlen == 64
+ } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
+ len = 8;
+#endif
+#ifdef __riscv_flen
+ } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
+ len = 8;
+ val.data_u64 = GET_F64_RS2(insn, regs);
+ } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
+ len = 4;
+ val.data_ulong = GET_F32_RS2(insn, regs);
+#endif
+ } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
+ len = 2;
+#if __riscv_xlen >= 64
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ val.data_ulong = GET_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ val.data_ulong = GET_RS2C(insn, regs);
+#endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ val.data_ulong = GET_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ val.data_ulong = GET_RS2C(insn, regs);
+#ifdef __riscv_flen
+ } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
+ len = 8;
+ val.data_u64 = GET_F64_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
+ len = 8;
+ val.data_u64 = GET_F64_RS2C(insn, regs);
+#if __riscv_xlen == 32
+ } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
+ len = 4;
+ val.data_ulong = GET_F32_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
+ len = 4;
+ val.data_ulong = GET_F32_RS2C(insn, regs);
+#endif
+#endif
+ } else {
+ uptrap.epc = regs->mepc;
+ uptrap.cause = CAUSE_MISALIGNED_STORE;
+ uptrap.tval = addr;
+ uptrap.tval2 = tval2;
+ uptrap.tinst = tinst;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+
+ for (i = 0; i < len; i++) {
+ sbi_store_u8((void *)(addr + i), val.data_bytes[i],
+ &uptrap);
+ if (uptrap.cause) {
+ uptrap.epc = regs->mepc;
+ return sbi_trap_redirect(regs, &uptrap);
+ }
+ }
+
+ regs->mepc += insn_len;
+
+ return 0;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_platform.c b/roms/opensbi/lib/sbi/sbi_platform.c
new file mode 100644
index 000000000..568d95661
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_platform.c
@@ -0,0 +1,90 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_string.h>
+
+static inline char *sbi_platform_feature_id2string(unsigned long feature)
+{
+ char *fstr = NULL;
+
+ if (!feature)
+ return NULL;
+
+ switch (feature) {
+ case SBI_PLATFORM_HAS_TIMER_VALUE:
+ fstr = "timer";
+ break;
+ case SBI_PLATFORM_HAS_HART_HOTPLUG:
+ fstr = "hotplug";
+ break;
+ case SBI_PLATFORM_HAS_MFAULTS_DELEGATION:
+ fstr = "mfdeleg";
+ break;
+ case SBI_PLATFORM_HAS_HART_SECONDARY_BOOT:
+ fstr = "sec_boot";
+ break;
+ default:
+ break;
+ }
+
+ return fstr;
+}
+
+void sbi_platform_get_features_str(const struct sbi_platform *plat,
+ char *features_str, int nfstr)
+{
+ unsigned long features, feat = 1UL;
+ char *temp;
+ int offset = 0;
+
+ if (!plat || !features_str || !nfstr)
+ return;
+ sbi_memset(features_str, 0, nfstr);
+
+ features = sbi_platform_get_features(plat);
+ if (!features)
+ goto done;
+
+ do {
+ if (features & feat) {
+ temp = sbi_platform_feature_id2string(feat);
+ if (temp) {
+ sbi_snprintf(features_str + offset, nfstr,
+ "%s,", temp);
+ offset = offset + sbi_strlen(temp) + 1;
+ }
+ }
+ feat = feat << 1;
+ } while (feat <= SBI_PLATFORM_HAS_LAST_FEATURE);
+
+done:
+ if (offset)
+ features_str[offset - 1] = '\0';
+ else
+ sbi_strncpy(features_str, "none", nfstr);
+}
+
+u32 sbi_platform_hart_index(const struct sbi_platform *plat, u32 hartid)
+{
+ u32 i;
+
+ if (!plat)
+ return -1U;
+ if (plat->hart_index2id) {
+ for (i = 0; i < plat->hart_count; i++) {
+ if (plat->hart_index2id[i] == hartid)
+ return i;
+ }
+ return -1U;
+ }
+
+ return hartid;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_scratch.c b/roms/opensbi/lib/sbi/sbi_scratch.c
new file mode 100644
index 000000000..96bae5be4
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_scratch.c
@@ -0,0 +1,99 @@
+ /*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_locks.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_hartmask.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_string.h>
+
+u32 last_hartid_having_scratch = SBI_HARTMASK_MAX_BITS;
+struct sbi_scratch *hartid_to_scratch_table[SBI_HARTMASK_MAX_BITS] = { 0 };
+
+static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER;
+static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET;
+
+typedef struct sbi_scratch *(*hartid2scratch)(ulong hartid, ulong hartindex);
+
+int sbi_scratch_init(struct sbi_scratch *scratch)
+{
+ u32 i;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) {
+ if (sbi_platform_hart_invalid(plat, i))
+ continue;
+ hartid_to_scratch_table[i] =
+ ((hartid2scratch)scratch->hartid_to_scratch)(i,
+ sbi_platform_hart_index(plat, i));
+ if (hartid_to_scratch_table[i])
+ last_hartid_having_scratch = i;
+ }
+
+ return 0;
+}
+
+unsigned long sbi_scratch_alloc_offset(unsigned long size, const char *owner)
+{
+ u32 i;
+ void *ptr;
+ unsigned long ret = 0;
+ struct sbi_scratch *rscratch;
+
+ /*
+ * We have a simple brain-dead allocator which never expects
+ * anything to be free-ed hence it keeps incrementing the
+ * next allocation offset until it runs-out of space.
+ *
+ * In future, we will have more sophisticated allocator which
+ * will allow us to re-claim free-ed space.
+ */
+
+ if (!size)
+ return 0;
+
+ if (size & (__SIZEOF_POINTER__ - 1))
+ size = (size & ~(__SIZEOF_POINTER__ - 1)) + __SIZEOF_POINTER__;
+
+ spin_lock(&extra_lock);
+
+ if (SBI_SCRATCH_SIZE < (extra_offset + size))
+ goto done;
+
+ ret = extra_offset;
+ extra_offset += size;
+
+done:
+ spin_unlock(&extra_lock);
+
+ if (ret) {
+ for (i = 0; i < sbi_scratch_last_hartid(); i++) {
+ rscratch = sbi_hartid_to_scratch(i);
+ if (!rscratch)
+ continue;
+ ptr = sbi_scratch_offset_ptr(rscratch, ret);
+ sbi_memset(ptr, 0, size);
+ }
+ }
+
+ return ret;
+}
+
+void sbi_scratch_free_offset(unsigned long offset)
+{
+ if ((offset < SBI_SCRATCH_EXTRA_SPACE_OFFSET) ||
+ (SBI_SCRATCH_SIZE <= offset))
+ return;
+
+ /*
+ * We don't actually free-up because it's a simple
+ * brain-dead allocator.
+ */
+}
diff --git a/roms/opensbi/lib/sbi/sbi_string.c b/roms/opensbi/lib/sbi/sbi_string.c
new file mode 100644
index 000000000..7805ba4ad
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_string.c
@@ -0,0 +1,188 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+/*
+ * Simple libc functions. These are not optimized at all and might have some
+ * bugs as well. Use any optimized routines from newlib or glibc if required.
+ */
+
+#include <sbi/sbi_string.h>
+
+/*
+ Provides sbi_strcmp for the completeness of supporting string functions.
+ it is not recommended to use sbi_strcmp() but use sbi_strncmp instead.
+*/
+int sbi_strcmp(const char *a, const char *b)
+{
+ /* search first diff or end of string */
+ for (; *a == *b && *a != '\0'; a++, b++)
+ ;
+
+ return *a - *b;
+}
+
+int sbi_strncmp(const char *a, const char *b, size_t count)
+{
+ /* search first diff or end of string */
+ for (; count > 0 && *a == *b && *a != '\0'; a++, b++, count--)
+ ;
+
+ return *a - *b;
+}
+
+size_t sbi_strlen(const char *str)
+{
+ unsigned long ret = 0;
+
+ while (*str != '\0') {
+ ret++;
+ str++;
+ }
+
+ return ret;
+}
+
+size_t sbi_strnlen(const char *str, size_t count)
+{
+ unsigned long ret = 0;
+
+ while (*str != '\0' && ret < count) {
+ ret++;
+ str++;
+ count--;
+ }
+
+ return ret;
+}
+
+char *sbi_strcpy(char *dest, const char *src)
+{
+ char *ret = dest;
+
+ while (*src != '\0') {
+ *dest++ = *src++;
+ }
+
+ return ret;
+}
+
+char *sbi_strncpy(char *dest, const char *src, size_t count)
+{
+ char *ret = dest;
+
+ while (count-- && *src != '\0') {
+ *dest++ = *src++;
+ }
+
+ return ret;
+}
+
+char *sbi_strchr(const char *s, int c)
+{
+ while (*s != '\0' && *s != (char)c)
+ s++;
+
+ if (*s == '\0')
+ return NULL;
+ else
+ return (char *)s;
+}
+
+char *sbi_strrchr(const char *s, int c)
+{
+ const char *last = s + sbi_strlen(s);
+
+ while (last > s && *last != (char)c)
+ last--;
+
+ if (*last != (char)c)
+ return NULL;
+ else
+ return (char *)last;
+}
+void *sbi_memset(void *s, int c, size_t count)
+{
+ char *temp = s;
+
+ while (count > 0) {
+ count--;
+ *temp++ = c;
+ }
+
+ return s;
+}
+
+void *sbi_memcpy(void *dest, const void *src, size_t count)
+{
+ char *temp1 = dest;
+ const char *temp2 = src;
+
+ while (count > 0) {
+ *temp1++ = *temp2++;
+ count--;
+ }
+
+ return dest;
+}
+
+void *sbi_memmove(void *dest, const void *src, size_t count)
+{
+ char *temp1 = (char *)dest;
+ const char *temp2 = (char *)src;
+
+ if (src == dest)
+ return dest;
+
+ if (dest < src) {
+ while (count > 0) {
+ *temp1++ = *temp2++;
+ count--;
+ }
+ } else {
+ temp1 = dest + count - 1;
+ temp2 = src + count - 1;
+
+ while (count > 0) {
+ *temp1-- = *temp2--;
+ count--;
+ }
+ }
+
+ return dest;
+}
+
+int sbi_memcmp(const void *s1, const void *s2, size_t count)
+{
+ const char *temp1 = s1;
+ const char *temp2 = s2;
+
+ for (; count > 0 && (*temp1 == *temp2); count--) {
+ temp1++;
+ temp2++;
+ }
+
+ if (count > 0)
+ return *(unsigned char *)temp1 - *(unsigned char *)temp2;
+ else
+ return 0;
+}
+
+void *sbi_memchr(const void *s, int c, size_t count)
+{
+ const unsigned char *temp = s;
+
+ while (count > 0) {
+ if ((unsigned char)c == *temp++) {
+ return (void *)(temp - 1);
+ }
+ count--;
+ }
+
+ return NULL;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_system.c b/roms/opensbi/lib/sbi/sbi_system.c
new file mode 100644
index 000000000..08a8b47c7
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_system.c
@@ -0,0 +1,56 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_domain.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_hsm.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_system.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_init.h>
+
+bool sbi_system_reset_supported(u32 reset_type, u32 reset_reason)
+{
+ if (sbi_platform_system_reset_check(sbi_platform_thishart_ptr(),
+ reset_type, reset_reason))
+ return TRUE;
+
+ return FALSE;
+}
+
+void __noreturn sbi_system_reset(u32 reset_type, u32 reset_reason)
+{
+ ulong hbase = 0, hmask;
+ u32 cur_hartid = current_hartid();
+ struct sbi_domain *dom = sbi_domain_thishart_ptr();
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+
+ /* Send HALT IPI to every hart other than the current hart */
+ while (!sbi_hsm_hart_started_mask(dom, hbase, &hmask)) {
+ if (hbase <= cur_hartid)
+ hmask &= ~(1UL << (cur_hartid - hbase));
+ if (hmask)
+ sbi_ipi_send_halt(hmask, hbase);
+ hbase += BITS_PER_LONG;
+ }
+
+ /* Stop current HART */
+ sbi_hsm_hart_stop(scratch, FALSE);
+
+ /* Platform specific reset if domain allowed system reset */
+ if (dom->system_reset_allowed)
+ sbi_platform_system_reset(sbi_platform_ptr(scratch),
+ reset_type, reset_reason);
+
+ /* If platform specific reset did not work then do sbi_exit() */
+ sbi_exit(scratch);
+}
diff --git a/roms/opensbi/lib/sbi/sbi_timer.c b/roms/opensbi/lib/sbi/sbi_timer.c
new file mode 100644
index 000000000..b571b1740
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_timer.c
@@ -0,0 +1,136 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_platform.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_timer.h>
+
+static unsigned long time_delta_off;
+static u64 (*get_time_val)(const struct sbi_platform *plat);
+
+#if __riscv_xlen == 32
+static u64 get_ticks(const struct sbi_platform *plat)
+{
+ u32 lo, hi, tmp;
+ __asm__ __volatile__("1:\n"
+ "rdtimeh %0\n"
+ "rdtime %1\n"
+ "rdtimeh %2\n"
+ "bne %0, %2, 1b"
+ : "=&r"(hi), "=&r"(lo), "=&r"(tmp));
+ return ((u64)hi << 32) | lo;
+}
+#else
+static u64 get_ticks(const struct sbi_platform *plat)
+{
+ unsigned long n;
+
+ __asm__ __volatile__("rdtime %0" : "=r"(n));
+ return n;
+}
+#endif
+
+u64 sbi_timer_value(void)
+{
+ return get_time_val(sbi_platform_thishart_ptr());
+}
+
+u64 sbi_timer_virt_value(void)
+{
+ u64 *time_delta = sbi_scratch_offset_ptr(sbi_scratch_thishart_ptr(),
+ time_delta_off);
+
+ return sbi_timer_value() + *time_delta;
+}
+
+u64 sbi_timer_get_delta(void)
+{
+ u64 *time_delta = sbi_scratch_offset_ptr(sbi_scratch_thishart_ptr(),
+ time_delta_off);
+
+ return *time_delta;
+}
+
+void sbi_timer_set_delta(ulong delta)
+{
+ u64 *time_delta = sbi_scratch_offset_ptr(sbi_scratch_thishart_ptr(),
+ time_delta_off);
+
+ *time_delta = (u64)delta;
+}
+
+void sbi_timer_set_delta_upper(ulong delta_upper)
+{
+ u64 *time_delta = sbi_scratch_offset_ptr(sbi_scratch_thishart_ptr(),
+ time_delta_off);
+
+ *time_delta &= 0xffffffffULL;
+ *time_delta |= ((u64)delta_upper << 32);
+}
+
+void sbi_timer_event_start(u64 next_event)
+{
+ sbi_platform_timer_event_start(sbi_platform_thishart_ptr(), next_event);
+ csr_clear(CSR_MIP, MIP_STIP);
+ csr_set(CSR_MIE, MIP_MTIP);
+}
+
+void sbi_timer_process(void)
+{
+ csr_clear(CSR_MIE, MIP_MTIP);
+ csr_set(CSR_MIP, MIP_STIP);
+}
+
+int sbi_timer_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ u64 *time_delta;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+ int ret;
+
+ if (cold_boot) {
+ time_delta_off = sbi_scratch_alloc_offset(sizeof(*time_delta),
+ "TIME_DELTA");
+ if (!time_delta_off)
+ return SBI_ENOMEM;
+ } else {
+ if (!time_delta_off)
+ return SBI_ENOMEM;
+ }
+
+ time_delta = sbi_scratch_offset_ptr(scratch, time_delta_off);
+ *time_delta = 0;
+
+ ret = sbi_platform_timer_init(plat, cold_boot);
+ if (ret)
+ return ret;
+
+ if (sbi_hart_has_feature(scratch, SBI_HART_HAS_TIME))
+ get_time_val = get_ticks;
+ else if (sbi_platform_has_timer_value(plat))
+ get_time_val = sbi_platform_timer_value;
+ else
+ /* There is no method to provide timer value */
+ return SBI_ENODEV;
+
+ return 0;
+}
+
+void sbi_timer_exit(struct sbi_scratch *scratch)
+{
+ sbi_platform_timer_event_stop(sbi_platform_ptr(scratch));
+
+ csr_clear(CSR_MIP, MIP_STIP);
+ csr_clear(CSR_MIE, MIP_MTIP);
+
+ sbi_platform_timer_exit(sbi_platform_ptr(scratch));
+}
diff --git a/roms/opensbi/lib/sbi/sbi_tlb.c b/roms/opensbi/lib/sbi/sbi_tlb.c
new file mode 100644
index 000000000..73f59e868
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_tlb.c
@@ -0,0 +1,429 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_atomic.h>
+#include <sbi/riscv_barrier.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_fifo.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_tlb.h>
+#include <sbi/sbi_hfence.h>
+#include <sbi/sbi_string.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_platform.h>
+
+static unsigned long tlb_sync_off;
+static unsigned long tlb_fifo_off;
+static unsigned long tlb_fifo_mem_off;
+static unsigned long tlb_range_flush_limit;
+
+static void sbi_tlb_flush_all(void)
+{
+ __asm__ __volatile("sfence.vma");
+}
+
+void sbi_tlb_local_hfence_vvma(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long vmid = tinfo->vmid;
+ unsigned long i, hgatp;
+
+ hgatp = csr_swap(CSR_HGATP,
+ (vmid << HGATP_VMID_SHIFT) & HGATP_VMID_MASK);
+
+ if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
+ __sbi_hfence_vvma_all();
+ goto done;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __sbi_hfence_vvma_va(start+i);
+ }
+
+done:
+ csr_write(CSR_HGATP, hgatp);
+}
+
+void sbi_tlb_local_hfence_gvma(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long i;
+
+ if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
+ __sbi_hfence_gvma_all();
+ return;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __sbi_hfence_gvma_gpa(start+i);
+ }
+}
+
+void sbi_tlb_local_sfence_vma(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long i;
+
+ if ((start == 0 && size == 0) || (size == SBI_TLB_FLUSH_ALL)) {
+ sbi_tlb_flush_all();
+ return;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __asm__ __volatile__("sfence.vma %0"
+ :
+ : "r"(start + i)
+ : "memory");
+ }
+}
+
+void sbi_tlb_local_hfence_vvma_asid(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long asid = tinfo->asid;
+ unsigned long vmid = tinfo->vmid;
+ unsigned long i, hgatp;
+
+ hgatp = csr_swap(CSR_HGATP,
+ (vmid << HGATP_VMID_SHIFT) & HGATP_VMID_MASK);
+
+ if (start == 0 && size == 0) {
+ __sbi_hfence_vvma_all();
+ goto done;
+ }
+
+ if (size == SBI_TLB_FLUSH_ALL) {
+ __sbi_hfence_vvma_asid(asid);
+ goto done;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __sbi_hfence_vvma_asid_va(start + i, asid);
+ }
+
+done:
+ csr_write(CSR_HGATP, hgatp);
+}
+
+void sbi_tlb_local_hfence_gvma_vmid(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long vmid = tinfo->vmid;
+ unsigned long i;
+
+ if (start == 0 && size == 0) {
+ __sbi_hfence_gvma_all();
+ return;
+ }
+
+ if (size == SBI_TLB_FLUSH_ALL) {
+ __sbi_hfence_gvma_vmid(vmid);
+ return;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __sbi_hfence_gvma_vmid_gpa(start + i, vmid);
+ }
+}
+
+void sbi_tlb_local_sfence_vma_asid(struct sbi_tlb_info *tinfo)
+{
+ unsigned long start = tinfo->start;
+ unsigned long size = tinfo->size;
+ unsigned long asid = tinfo->asid;
+ unsigned long i;
+
+ if (start == 0 && size == 0) {
+ sbi_tlb_flush_all();
+ return;
+ }
+
+ /* Flush entire MM context for a given ASID */
+ if (size == SBI_TLB_FLUSH_ALL) {
+ __asm__ __volatile__("sfence.vma x0, %0"
+ :
+ : "r"(asid)
+ : "memory");
+ return;
+ }
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ __asm__ __volatile__("sfence.vma %0, %1"
+ :
+ : "r"(start + i), "r"(asid)
+ : "memory");
+ }
+}
+
+void sbi_tlb_local_fence_i(struct sbi_tlb_info *tinfo)
+{
+ __asm__ __volatile("fence.i");
+}
+
+static void sbi_tlb_entry_process(struct sbi_tlb_info *tinfo)
+{
+ u32 rhartid;
+ struct sbi_scratch *rscratch = NULL;
+ unsigned long *rtlb_sync = NULL;
+
+ tinfo->local_fn(tinfo);
+
+ sbi_hartmask_for_each_hart(rhartid, &tinfo->smask) {
+ rscratch = sbi_hartid_to_scratch(rhartid);
+ if (!rscratch)
+ continue;
+
+ rtlb_sync = sbi_scratch_offset_ptr(rscratch, tlb_sync_off);
+ while (atomic_raw_xchg_ulong(rtlb_sync, 1)) ;
+ }
+}
+
+static void sbi_tlb_process_count(struct sbi_scratch *scratch, int count)
+{
+ struct sbi_tlb_info tinfo;
+ u32 deq_count = 0;
+ struct sbi_fifo *tlb_fifo =
+ sbi_scratch_offset_ptr(scratch, tlb_fifo_off);
+
+ while (!sbi_fifo_dequeue(tlb_fifo, &tinfo)) {
+ sbi_tlb_entry_process(&tinfo);
+ deq_count++;
+ if (deq_count > count)
+ break;
+
+ }
+}
+
+static void sbi_tlb_process(struct sbi_scratch *scratch)
+{
+ struct sbi_tlb_info tinfo;
+ struct sbi_fifo *tlb_fifo =
+ sbi_scratch_offset_ptr(scratch, tlb_fifo_off);
+
+ while (!sbi_fifo_dequeue(tlb_fifo, &tinfo))
+ sbi_tlb_entry_process(&tinfo);
+}
+
+static void sbi_tlb_sync(struct sbi_scratch *scratch)
+{
+ unsigned long *tlb_sync =
+ sbi_scratch_offset_ptr(scratch, tlb_sync_off);
+
+ while (!atomic_raw_xchg_ulong(tlb_sync, 0)) {
+ /*
+ * While we are waiting for remote hart to set the sync,
+ * consume fifo requests to avoid deadlock.
+ */
+ sbi_tlb_process_count(scratch, 1);
+ }
+
+ return;
+}
+
+static inline int __sbi_tlb_range_check(struct sbi_tlb_info *curr,
+ struct sbi_tlb_info *next)
+{
+ unsigned long curr_end;
+ unsigned long next_end;
+ int ret = SBI_FIFO_UNCHANGED;
+
+ if (!curr || !next)
+ return ret;
+
+ next_end = next->start + next->size;
+ curr_end = curr->start + curr->size;
+ if (next->start <= curr->start && next_end > curr_end) {
+ curr->start = next->start;
+ curr->size = next->size;
+ sbi_hartmask_or(&curr->smask, &curr->smask, &next->smask);
+ ret = SBI_FIFO_UPDATED;
+ } else if (next->start >= curr->start && next_end <= curr_end) {
+ sbi_hartmask_or(&curr->smask, &curr->smask, &next->smask);
+ ret = SBI_FIFO_SKIP;
+ }
+
+ return ret;
+}
+
+/**
+ * Call back to decide if an inplace fifo update is required or next entry can
+ * can be skipped. Here are the different cases that are being handled.
+ *
+ * Case1:
+ * if next flush request range lies within one of the existing entry, skip
+ * the next entry.
+ * Case2:
+ * if flush request range in current fifo entry lies within next flush
+ * request, update the current entry.
+ *
+ * Note:
+ * We can not issue a fifo reset anymore if a complete vma flush is requested.
+ * This is because we are queueing FENCE.I requests as well now.
+ * To ease up the pressure in enqueue/fifo sync path, try to dequeue 1 element
+ * before continuing the while loop. This method is preferred over wfi/ipi because
+ * of MMIO cost involved in later method.
+ */
+static int sbi_tlb_update_cb(void *in, void *data)
+{
+ struct sbi_tlb_info *curr;
+ struct sbi_tlb_info *next;
+ int ret = SBI_FIFO_UNCHANGED;
+
+ if (!in || !data)
+ return ret;
+
+ curr = (struct sbi_tlb_info *)data;
+ next = (struct sbi_tlb_info *)in;
+
+ if (next->local_fn == sbi_tlb_local_sfence_vma_asid &&
+ curr->local_fn == sbi_tlb_local_sfence_vma_asid) {
+ if (next->asid == curr->asid)
+ ret = __sbi_tlb_range_check(curr, next);
+ } else if (next->local_fn == sbi_tlb_local_sfence_vma &&
+ curr->local_fn == sbi_tlb_local_sfence_vma) {
+ ret = __sbi_tlb_range_check(curr, next);
+ }
+
+ return ret;
+}
+
+static int sbi_tlb_update(struct sbi_scratch *scratch,
+ struct sbi_scratch *remote_scratch,
+ u32 remote_hartid, void *data)
+{
+ int ret;
+ struct sbi_fifo *tlb_fifo_r;
+ struct sbi_tlb_info *tinfo = data;
+ u32 curr_hartid = current_hartid();
+
+ /*
+ * If address range to flush is too big then simply
+ * upgrade it to flush all because we can only flush
+ * 4KB at a time.
+ */
+ if (tinfo->size > tlb_range_flush_limit) {
+ tinfo->start = 0;
+ tinfo->size = SBI_TLB_FLUSH_ALL;
+ }
+
+ /*
+ * If the request is to queue a tlb flush entry for itself
+ * then just do a local flush and return;
+ */
+ if (remote_hartid == curr_hartid) {
+ tinfo->local_fn(tinfo);
+ return -1;
+ }
+
+ tlb_fifo_r = sbi_scratch_offset_ptr(remote_scratch, tlb_fifo_off);
+
+ ret = sbi_fifo_inplace_update(tlb_fifo_r, data, sbi_tlb_update_cb);
+ if (ret != SBI_FIFO_UNCHANGED) {
+ return 1;
+ }
+
+ while (sbi_fifo_enqueue(tlb_fifo_r, data) < 0) {
+ /**
+ * For now, Busy loop until there is space in the fifo.
+ * There may be case where target hart is also
+ * enqueue in source hart's fifo. Both hart may busy
+ * loop leading to a deadlock.
+ * TODO: Introduce a wait/wakeup event mechanism to handle
+ * this properly.
+ */
+ sbi_tlb_process_count(scratch, 1);
+ sbi_dprintf("hart%d: hart%d tlb fifo full\n",
+ curr_hartid, remote_hartid);
+ }
+
+ return 0;
+}
+
+static struct sbi_ipi_event_ops tlb_ops = {
+ .name = "IPI_TLB",
+ .update = sbi_tlb_update,
+ .sync = sbi_tlb_sync,
+ .process = sbi_tlb_process,
+};
+
+static u32 tlb_event = SBI_IPI_EVENT_MAX;
+
+int sbi_tlb_request(ulong hmask, ulong hbase, struct sbi_tlb_info *tinfo)
+{
+ if (!tinfo->local_fn)
+ return SBI_EINVAL;
+
+ return sbi_ipi_send_many(hmask, hbase, tlb_event, tinfo);
+}
+
+int sbi_tlb_init(struct sbi_scratch *scratch, bool cold_boot)
+{
+ int ret;
+ void *tlb_mem;
+ unsigned long *tlb_sync;
+ struct sbi_fifo *tlb_q;
+ const struct sbi_platform *plat = sbi_platform_ptr(scratch);
+
+ if (cold_boot) {
+ tlb_sync_off = sbi_scratch_alloc_offset(sizeof(*tlb_sync),
+ "IPI_TLB_SYNC");
+ if (!tlb_sync_off)
+ return SBI_ENOMEM;
+ tlb_fifo_off = sbi_scratch_alloc_offset(sizeof(*tlb_q),
+ "IPI_TLB_FIFO");
+ if (!tlb_fifo_off) {
+ sbi_scratch_free_offset(tlb_sync_off);
+ return SBI_ENOMEM;
+ }
+ tlb_fifo_mem_off = sbi_scratch_alloc_offset(
+ SBI_TLB_FIFO_NUM_ENTRIES * SBI_TLB_INFO_SIZE,
+ "IPI_TLB_FIFO_MEM");
+ if (!tlb_fifo_mem_off) {
+ sbi_scratch_free_offset(tlb_fifo_off);
+ sbi_scratch_free_offset(tlb_sync_off);
+ return SBI_ENOMEM;
+ }
+ ret = sbi_ipi_event_create(&tlb_ops);
+ if (ret < 0) {
+ sbi_scratch_free_offset(tlb_fifo_mem_off);
+ sbi_scratch_free_offset(tlb_fifo_off);
+ sbi_scratch_free_offset(tlb_sync_off);
+ return ret;
+ }
+ tlb_event = ret;
+ tlb_range_flush_limit = sbi_platform_tlbr_flush_limit(plat);
+ } else {
+ if (!tlb_sync_off ||
+ !tlb_fifo_off ||
+ !tlb_fifo_mem_off)
+ return SBI_ENOMEM;
+ if (SBI_IPI_EVENT_MAX <= tlb_event)
+ return SBI_ENOSPC;
+ }
+
+ tlb_sync = sbi_scratch_offset_ptr(scratch, tlb_sync_off);
+ tlb_q = sbi_scratch_offset_ptr(scratch, tlb_fifo_off);
+ tlb_mem = sbi_scratch_offset_ptr(scratch, tlb_fifo_mem_off);
+
+ *tlb_sync = 0;
+
+ sbi_fifo_init(tlb_q, tlb_mem,
+ SBI_TLB_FIFO_NUM_ENTRIES, SBI_TLB_INFO_SIZE);
+
+ return 0;
+}
diff --git a/roms/opensbi/lib/sbi/sbi_trap.c b/roms/opensbi/lib/sbi/sbi_trap.c
new file mode 100644
index 000000000..b7349d2c9
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_trap.c
@@ -0,0 +1,293 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_asm.h>
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_console.h>
+#include <sbi/sbi_ecall.h>
+#include <sbi/sbi_error.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_illegal_insn.h>
+#include <sbi/sbi_ipi.h>
+#include <sbi/sbi_misaligned_ldst.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_timer.h>
+#include <sbi/sbi_trap.h>
+
+static void __noreturn sbi_trap_error(const char *msg, int rc,
+ ulong mcause, ulong mtval, ulong mtval2,
+ ulong mtinst, struct sbi_trap_regs *regs)
+{
+ u32 hartid = current_hartid();
+
+ sbi_printf("%s: hart%d: %s (error %d)\n", __func__, hartid, msg, rc);
+ sbi_printf("%s: hart%d: mcause=0x%" PRILX " mtval=0x%" PRILX "\n",
+ __func__, hartid, mcause, mtval);
+ if (misa_extension('H')) {
+ sbi_printf("%s: hart%d: mtval2=0x%" PRILX
+ " mtinst=0x%" PRILX "\n",
+ __func__, hartid, mtval2, mtinst);
+ }
+ sbi_printf("%s: hart%d: mepc=0x%" PRILX " mstatus=0x%" PRILX "\n",
+ __func__, hartid, regs->mepc, regs->mstatus);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "ra", regs->ra, "sp", regs->sp);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "gp", regs->gp, "tp", regs->tp);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s0", regs->s0, "s1", regs->s1);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a0", regs->a0, "a1", regs->a1);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a2", regs->a2, "a3", regs->a3);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a4", regs->a4, "a5", regs->a5);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "a6", regs->a6, "a7", regs->a7);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s2", regs->s2, "s3", regs->s3);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s4", regs->s4, "s5", regs->s5);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s6", regs->s6, "s7", regs->s7);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s8", regs->s8, "s9", regs->s9);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "s10", regs->s10, "s11", regs->s11);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "t0", regs->t0, "t1", regs->t1);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "t2", regs->t2, "t3", regs->t3);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX " %s=0x%" PRILX "\n", __func__,
+ hartid, "t4", regs->t4, "t5", regs->t5);
+ sbi_printf("%s: hart%d: %s=0x%" PRILX "\n", __func__, hartid, "t6",
+ regs->t6);
+
+ sbi_hart_hang();
+}
+
+/**
+ * Redirect trap to lower privledge mode (S-mode or U-mode)
+ *
+ * @param regs pointer to register state
+ * @param trap pointer to trap details
+ *
+ * @return 0 on success and negative error code on failure
+ */
+int sbi_trap_redirect(struct sbi_trap_regs *regs,
+ struct sbi_trap_info *trap)
+{
+ ulong hstatus, vsstatus, prev_mode;
+#if __riscv_xlen == 32
+ bool prev_virt = (regs->mstatusH & MSTATUSH_MPV) ? TRUE : FALSE;
+#else
+ bool prev_virt = (regs->mstatus & MSTATUS_MPV) ? TRUE : FALSE;
+#endif
+ /* By default, we redirect to HS-mode */
+ bool next_virt = FALSE;
+
+ /* Sanity check on previous mode */
+ prev_mode = (regs->mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT;
+ if (prev_mode != PRV_S && prev_mode != PRV_U)
+ return SBI_ENOTSUPP;
+
+ /* For certain exceptions from VS/VU-mode we redirect to VS-mode */
+ if (misa_extension('H') && prev_virt) {
+ switch (trap->cause) {
+ case CAUSE_FETCH_PAGE_FAULT:
+ case CAUSE_LOAD_PAGE_FAULT:
+ case CAUSE_STORE_PAGE_FAULT:
+ next_virt = TRUE;
+ break;
+ default:
+ break;
+ };
+ }
+
+ /* Update MSTATUS MPV bits */
+#if __riscv_xlen == 32
+ regs->mstatusH &= ~MSTATUSH_MPV;
+ regs->mstatusH |= (next_virt) ? MSTATUSH_MPV : 0UL;
+#else
+ regs->mstatus &= ~MSTATUS_MPV;
+ regs->mstatus |= (next_virt) ? MSTATUS_MPV : 0UL;
+#endif
+
+ /* Update HSTATUS for VS/VU-mode to HS-mode transition */
+ if (misa_extension('H') && prev_virt && !next_virt) {
+ /* Update HSTATUS SPVP and SPV bits */
+ hstatus = csr_read(CSR_HSTATUS);
+ hstatus &= ~HSTATUS_SPVP;
+ hstatus |= (prev_mode == PRV_S) ? HSTATUS_SPVP : 0;
+ hstatus &= ~HSTATUS_SPV;
+ hstatus |= (prev_virt) ? HSTATUS_SPV : 0;
+ csr_write(CSR_HSTATUS, hstatus);
+ csr_write(CSR_HTVAL, trap->tval2);
+ csr_write(CSR_HTINST, trap->tinst);
+ }
+
+ /* Update exception related CSRs */
+ if (next_virt) {
+ /* Update VS-mode exception info */
+ csr_write(CSR_VSTVAL, trap->tval);
+ csr_write(CSR_VSEPC, trap->epc);
+ csr_write(CSR_VSCAUSE, trap->cause);
+
+ /* Set MEPC to VS-mode exception vector base */
+ regs->mepc = csr_read(CSR_VSTVEC);
+
+ /* Set MPP to VS-mode */
+ regs->mstatus &= ~MSTATUS_MPP;
+ regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT);
+
+ /* Get VS-mode SSTATUS CSR */
+ vsstatus = csr_read(CSR_VSSTATUS);
+
+ /* Set SPP for VS-mode */
+ vsstatus &= ~SSTATUS_SPP;
+ if (prev_mode == PRV_S)
+ vsstatus |= (1UL << SSTATUS_SPP_SHIFT);
+
+ /* Set SPIE for VS-mode */
+ vsstatus &= ~SSTATUS_SPIE;
+ if (vsstatus & SSTATUS_SIE)
+ vsstatus |= (1UL << SSTATUS_SPIE_SHIFT);
+
+ /* Clear SIE for VS-mode */
+ vsstatus &= ~SSTATUS_SIE;
+
+ /* Update VS-mode SSTATUS CSR */
+ csr_write(CSR_VSSTATUS, vsstatus);
+ } else {
+ /* Update S-mode exception info */
+ csr_write(CSR_STVAL, trap->tval);
+ csr_write(CSR_SEPC, trap->epc);
+ csr_write(CSR_SCAUSE, trap->cause);
+
+ /* Set MEPC to S-mode exception vector base */
+ regs->mepc = csr_read(CSR_STVEC);
+
+ /* Set MPP to S-mode */
+ regs->mstatus &= ~MSTATUS_MPP;
+ regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT);
+
+ /* Set SPP for S-mode */
+ regs->mstatus &= ~MSTATUS_SPP;
+ if (prev_mode == PRV_S)
+ regs->mstatus |= (1UL << MSTATUS_SPP_SHIFT);
+
+ /* Set SPIE for S-mode */
+ regs->mstatus &= ~MSTATUS_SPIE;
+ if (regs->mstatus & MSTATUS_SIE)
+ regs->mstatus |= (1UL << MSTATUS_SPIE_SHIFT);
+
+ /* Clear SIE for S-mode */
+ regs->mstatus &= ~MSTATUS_SIE;
+ }
+
+ return 0;
+}
+
+/**
+ * Handle trap/interrupt
+ *
+ * This function is called by firmware linked to OpenSBI
+ * library for handling trap/interrupt. It expects the
+ * following:
+ * 1. The 'mscratch' CSR is pointing to sbi_scratch of current HART
+ * 2. The 'mcause' CSR is having exception/interrupt cause
+ * 3. The 'mtval' CSR is having additional trap information
+ * 4. The 'mtval2' CSR is having additional trap information
+ * 5. The 'mtinst' CSR is having decoded trap instruction
+ * 6. Stack pointer (SP) is setup for current HART
+ * 7. Interrupts are disabled in MSTATUS CSR
+ *
+ * @param regs pointer to register state
+ */
+void sbi_trap_handler(struct sbi_trap_regs *regs)
+{
+ int rc = SBI_ENOTSUPP;
+ const char *msg = "trap handler failed";
+ ulong mcause = csr_read(CSR_MCAUSE);
+ ulong mtval = csr_read(CSR_MTVAL), mtval2 = 0, mtinst = 0;
+ struct sbi_trap_info trap;
+
+ if (misa_extension('H')) {
+ mtval2 = csr_read(CSR_MTVAL2);
+ mtinst = csr_read(CSR_MTINST);
+ }
+
+ if (mcause & (1UL << (__riscv_xlen - 1))) {
+ mcause &= ~(1UL << (__riscv_xlen - 1));
+ switch (mcause) {
+ case IRQ_M_TIMER:
+ sbi_timer_process();
+ break;
+ case IRQ_M_SOFT:
+ sbi_ipi_process();
+ break;
+ default:
+ msg = "unhandled external interrupt";
+ goto trap_error;
+ };
+ return;
+ }
+
+ switch (mcause) {
+ case CAUSE_ILLEGAL_INSTRUCTION:
+ rc = sbi_illegal_insn_handler(mtval, regs);
+ msg = "illegal instruction handler failed";
+ break;
+ case CAUSE_MISALIGNED_LOAD:
+ rc = sbi_misaligned_load_handler(mtval, mtval2, mtinst, regs);
+ msg = "misaligned load handler failed";
+ break;
+ case CAUSE_MISALIGNED_STORE:
+ rc = sbi_misaligned_store_handler(mtval, mtval2, mtinst, regs);
+ msg = "misaligned store handler failed";
+ break;
+ case CAUSE_SUPERVISOR_ECALL:
+ case CAUSE_MACHINE_ECALL:
+ rc = sbi_ecall_handler(regs);
+ msg = "ecall handler failed";
+ break;
+ default:
+ /* If the trap came from S or U mode, redirect it there */
+ trap.epc = regs->mepc;
+ trap.cause = mcause;
+ trap.tval = mtval;
+ trap.tval2 = mtval2;
+ trap.tinst = mtinst;
+ rc = sbi_trap_redirect(regs, &trap);
+ break;
+ };
+
+trap_error:
+ if (rc)
+ sbi_trap_error(msg, rc, mcause, mtval, mtval2, mtinst, regs);
+}
+
+typedef void (*trap_exit_t)(const struct sbi_trap_regs *regs);
+
+/**
+ * Exit trap/interrupt handling
+ *
+ * This function is called by non-firmware code to abruptly exit
+ * trap/interrupt handling and resume execution at context pointed
+ * by given register state.
+ *
+ * @param regs pointer to register state
+ */
+void __noreturn sbi_trap_exit(const struct sbi_trap_regs *regs)
+{
+ struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+
+ ((trap_exit_t)scratch->trap_exit)(regs);
+ __builtin_unreachable();
+}
diff --git a/roms/opensbi/lib/sbi/sbi_unpriv.c b/roms/opensbi/lib/sbi/sbi_unpriv.c
new file mode 100644
index 000000000..42461241f
--- /dev/null
+++ b/roms/opensbi/lib/sbi/sbi_unpriv.c
@@ -0,0 +1,165 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#include <sbi/riscv_encoding.h>
+#include <sbi/sbi_bitops.h>
+#include <sbi/sbi_hart.h>
+#include <sbi/sbi_scratch.h>
+#include <sbi/sbi_trap.h>
+#include <sbi/sbi_unpriv.h>
+
+/**
+ * a3 must a pointer to the sbi_trap_info and a4 is used as a temporary
+ * register in the trap handler. Make sure that compiler doesn't use a3 & a4.
+ */
+#define DEFINE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
+ type sbi_load_##type(const type *addr, \
+ struct sbi_trap_info *trap) \
+ { \
+ register ulong tinfo asm("a3"); \
+ register ulong mstatus = 0; \
+ register ulong mtvec = sbi_hart_expected_trap_addr(); \
+ type ret = 0; \
+ trap->cause = 0; \
+ asm volatile( \
+ "add %[tinfo], %[taddr], zero\n" \
+ "csrrw %[mtvec], " STR(CSR_MTVEC) ", %[mtvec]\n" \
+ "csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ #insn " %[ret], %[addr]\n" \
+ ".option pop\n" \
+ "csrw " STR(CSR_MSTATUS) ", %[mstatus]\n" \
+ "csrw " STR(CSR_MTVEC) ", %[mtvec]" \
+ : [mstatus] "+&r"(mstatus), [mtvec] "+&r"(mtvec), \
+ [tinfo] "+&r"(tinfo), [ret] "=&r"(ret) \
+ : [addr] "m"(*addr), [mprv] "r"(MSTATUS_MPRV), \
+ [taddr] "r"((ulong)trap) \
+ : "a4", "memory"); \
+ return ret; \
+ }
+
+#define DEFINE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
+ void sbi_store_##type(type *addr, type val, \
+ struct sbi_trap_info *trap) \
+ { \
+ register ulong tinfo asm("a3") = (ulong)trap; \
+ register ulong mstatus = 0; \
+ register ulong mtvec = sbi_hart_expected_trap_addr(); \
+ trap->cause = 0; \
+ asm volatile( \
+ "add %[tinfo], %[taddr], zero\n" \
+ "csrrw %[mtvec], " STR(CSR_MTVEC) ", %[mtvec]\n" \
+ "csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ #insn " %[val], %[addr]\n" \
+ ".option pop\n" \
+ "csrw " STR(CSR_MSTATUS) ", %[mstatus]\n" \
+ "csrw " STR(CSR_MTVEC) ", %[mtvec]" \
+ : [mstatus] "+&r"(mstatus), [mtvec] "+&r"(mtvec), \
+ [tinfo] "+&r"(tinfo) \
+ : [addr] "m"(*addr), [mprv] "r"(MSTATUS_MPRV), \
+ [val] "r"(val), [taddr] "r"((ulong)trap) \
+ : "a4", "memory"); \
+ }
+
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
+#if __riscv_xlen == 64
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
+DEFINE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
+#else
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
+DEFINE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
+
+u64 sbi_load_u64(const u64 *addr,
+ struct sbi_trap_info *trap)
+{
+ u64 ret = sbi_load_u32((u32 *)addr, trap);
+
+ if (trap->cause)
+ return 0;
+ ret |= ((u64)sbi_load_u32((u32 *)addr + 1, trap) << 32);
+ if (trap->cause)
+ return 0;
+
+ return ret;
+}
+
+void sbi_store_u64(u64 *addr, u64 val,
+ struct sbi_trap_info *trap)
+{
+ sbi_store_u32((u32 *)addr, val, trap);
+ if (trap->cause)
+ return;
+
+ sbi_store_u32((u32 *)addr + 1, val >> 32, trap);
+ if (trap->cause)
+ return;
+}
+#endif
+
+ulong sbi_get_insn(ulong mepc, struct sbi_trap_info *trap)
+{
+ register ulong tinfo asm("a3");
+ register ulong ttmp asm("a4");
+ register ulong mstatus = 0;
+ register ulong mtvec = sbi_hart_expected_trap_addr();
+ ulong insn = 0;
+
+ trap->cause = 0;
+
+ asm volatile(
+ "add %[tinfo], %[taddr], zero\n"
+ "csrrw %[mtvec], " STR(CSR_MTVEC) ", %[mtvec]\n"
+ "csrrs %[mstatus], " STR(CSR_MSTATUS) ", %[mprv]\n"
+ "lhu %[insn], (%[addr])\n"
+ "andi %[ttmp], %[insn], 3\n"
+ "addi %[ttmp], %[ttmp], -3\n"
+ "bne %[ttmp], zero, 2f\n"
+ "lhu %[ttmp], 2(%[addr])\n"
+ "sll %[ttmp], %[ttmp], 16\n"
+ "add %[insn], %[insn], %[ttmp]\n"
+ "2: csrw " STR(CSR_MSTATUS) ", %[mstatus]\n"
+ "csrw " STR(CSR_MTVEC) ", %[mtvec]"
+ : [mstatus] "+&r"(mstatus), [mtvec] "+&r"(mtvec),
+ [tinfo] "+&r"(tinfo), [ttmp] "+&r"(ttmp),
+ [insn] "=&r"(insn)
+ : [mprv] "r"(MSTATUS_MPRV | MSTATUS_MXR),
+ [taddr] "r"((ulong)trap), [addr] "r"(mepc)
+ : "memory");
+
+ switch (trap->cause) {
+ case CAUSE_LOAD_ACCESS:
+ trap->cause = CAUSE_FETCH_ACCESS;
+ trap->tval = mepc;
+ break;
+ case CAUSE_LOAD_PAGE_FAULT:
+ trap->cause = CAUSE_FETCH_PAGE_FAULT;
+ trap->tval = mepc;
+ break;
+ case CAUSE_LOAD_GUEST_PAGE_FAULT:
+ trap->cause = CAUSE_FETCH_GUEST_PAGE_FAULT;
+ trap->tval = mepc;
+ break;
+ default:
+ break;
+ };
+
+ return insn;
+}