aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/arch/mips/lib
diff options
context:
space:
mode:
Diffstat (limited to 'roms/u-boot/arch/mips/lib')
-rw-r--r--roms/u-boot/arch/mips/lib/Makefile17
-rw-r--r--roms/u-boot/arch/mips/lib/ashldi3.c25
-rw-r--r--roms/u-boot/arch/mips/lib/ashrdi3.c27
-rw-r--r--roms/u-boot/arch/mips/lib/asm-offsets.c60
-rw-r--r--roms/u-boot/arch/mips/lib/boot.c24
-rw-r--r--roms/u-boot/arch/mips/lib/bootm.c338
-rw-r--r--roms/u-boot/arch/mips/lib/cache.c199
-rw-r--r--roms/u-boot/arch/mips/lib/cache_init.S439
-rw-r--r--roms/u-boot/arch/mips/lib/genex.S223
-rw-r--r--roms/u-boot/arch/mips/lib/libgcc.h25
-rw-r--r--roms/u-boot/arch/mips/lib/lshrdi3.c25
-rw-r--r--roms/u-boot/arch/mips/lib/reloc.c167
-rw-r--r--roms/u-boot/arch/mips/lib/spl.c22
-rw-r--r--roms/u-boot/arch/mips/lib/stack.c20
-rw-r--r--roms/u-boot/arch/mips/lib/traps.c141
15 files changed, 1752 insertions, 0 deletions
diff --git a/roms/u-boot/arch/mips/lib/Makefile b/roms/u-boot/arch/mips/lib/Makefile
new file mode 100644
index 000000000..9ee1fcb5c
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2003-2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+obj-y += cache.o
+obj-y += cache_init.o
+obj-y += genex.o
+obj-y += reloc.o
+obj-y += stack.o
+obj-y += traps.o
+
+obj-$(CONFIG_CMD_BOOTM) += bootm.o
+obj-$(CONFIG_CMD_GO) += boot.o
+obj-$(CONFIG_SPL_BUILD) += spl.o
+
+lib-$(CONFIG_USE_PRIVATE_LIBGCC) += ashldi3.o ashrdi3.o lshrdi3.o
diff --git a/roms/u-boot/arch/mips/lib/ashldi3.c b/roms/u-boot/arch/mips/lib/ashldi3.c
new file mode 100644
index 000000000..9b50d866a
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/ashldi3.c
@@ -0,0 +1,25 @@
+#include "libgcc.h"
+
+long long __ashldi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.low = 0;
+ w.s.high = (unsigned int) uu.s.low << -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.low >> bm;
+
+ w.s.low = (unsigned int) uu.s.low << b;
+ w.s.high = ((unsigned int) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+}
diff --git a/roms/u-boot/arch/mips/lib/ashrdi3.c b/roms/u-boot/arch/mips/lib/ashrdi3.c
new file mode 100644
index 000000000..f30359b73
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/ashrdi3.c
@@ -0,0 +1,27 @@
+#include "libgcc.h"
+
+long long __ashrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high =
+ uu.s.high >> 31;
+ w.s.low = uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
diff --git a/roms/u-boot/arch/mips/lib/asm-offsets.c b/roms/u-boot/arch/mips/lib/asm-offsets.c
new file mode 100644
index 000000000..22dc14b10
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/asm-offsets.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * offset.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+
+#include <asm/ptrace.h>
+#include <linux/stddef.h>
+#include <linux/kbuild.h>
+
+void output_ptreg_defines(void)
+{
+ COMMENT("MIPS pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_LO, pt_regs, lo);
+ OFFSET(PT_HI, pt_regs, hi);
+ OFFSET(PT_EPC, pt_regs, cp0_epc);
+ OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+ OFFSET(PT_STATUS, pt_regs, cp0_status);
+ OFFSET(PT_CAUSE, pt_regs, cp0_cause);
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+}
diff --git a/roms/u-boot/arch/mips/lib/boot.c b/roms/u-boot/arch/mips/lib/boot.c
new file mode 100644
index 000000000..1b29d637c
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/boot.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+#include <common.h>
+#include <command.h>
+#include <cpu_func.h>
+#include <asm/global_data.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+unsigned long do_go_exec(ulong (*entry)(int, char * const []),
+ int argc, char * const argv[])
+{
+ /*
+ * Flush cache before jumping to application. Let's flush the
+ * whole SDRAM area, since we don't know the size of the image
+ * that was loaded.
+ */
+ flush_cache(gd->ram_base, gd->ram_top - gd->ram_base);
+
+ return entry(argc, argv);
+}
diff --git a/roms/u-boot/arch/mips/lib/bootm.c b/roms/u-boot/arch/mips/lib/bootm.c
new file mode 100644
index 000000000..fde90fced
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/bootm.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2003
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ */
+
+#include <common.h>
+#include <bootstage.h>
+#include <env.h>
+#include <image.h>
+#include <fdt_support.h>
+#include <lmb.h>
+#include <log.h>
+#include <asm/addrspace.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define LINUX_MAX_ENVS 256
+#define LINUX_MAX_ARGS 256
+
+static int linux_argc;
+static char **linux_argv;
+static char *linux_argp;
+
+static char **linux_env;
+static char *linux_env_p;
+static int linux_env_idx;
+
+static ulong arch_get_sp(void)
+{
+ ulong ret;
+
+ __asm__ __volatile__("move %0, $sp" : "=r"(ret) : );
+
+ return ret;
+}
+
+void arch_lmb_reserve(struct lmb *lmb)
+{
+ ulong sp;
+
+ sp = arch_get_sp();
+ debug("## Current stack ends at 0x%08lx\n", sp);
+
+ /* adjust sp by 4K to be safe */
+ sp -= 4096;
+ lmb_reserve(lmb, sp, gd->ram_top - sp);
+}
+
+static void linux_cmdline_init(void)
+{
+ linux_argc = 1;
+ linux_argv = (char **)CKSEG1ADDR(gd->bd->bi_boot_params);
+ linux_argv[0] = 0;
+ linux_argp = (char *)(linux_argv + LINUX_MAX_ARGS);
+}
+
+static void linux_cmdline_set(const char *value, size_t len)
+{
+ linux_argv[linux_argc] = linux_argp;
+ memcpy(linux_argp, value, len);
+ linux_argp[len] = 0;
+
+ linux_argp += len + 1;
+ linux_argc++;
+}
+
+static void linux_cmdline_dump(void)
+{
+ int i;
+
+ debug("## cmdline argv at 0x%p, argp at 0x%p\n",
+ linux_argv, linux_argp);
+
+ for (i = 1; i < linux_argc; i++)
+ debug(" arg %03d: %s\n", i, linux_argv[i]);
+}
+
+static void linux_cmdline_legacy(bootm_headers_t *images)
+{
+ const char *bootargs, *next, *quote;
+
+ linux_cmdline_init();
+
+ bootargs = env_get("bootargs");
+ if (!bootargs)
+ return;
+
+ next = bootargs;
+
+ while (bootargs && *bootargs && linux_argc < LINUX_MAX_ARGS) {
+ quote = strchr(bootargs, '"');
+ next = strchr(bootargs, ' ');
+
+ while (next && quote && quote < next) {
+ /*
+ * we found a left quote before the next blank
+ * now we have to find the matching right quote
+ */
+ next = strchr(quote + 1, '"');
+ if (next) {
+ quote = strchr(next + 1, '"');
+ next = strchr(next + 1, ' ');
+ }
+ }
+
+ if (!next)
+ next = bootargs + strlen(bootargs);
+
+ linux_cmdline_set(bootargs, next - bootargs);
+
+ if (*next)
+ next++;
+
+ bootargs = next;
+ }
+}
+
+static void linux_cmdline_append(bootm_headers_t *images)
+{
+ char buf[24];
+ ulong mem, rd_start, rd_size;
+
+ /* append mem */
+ mem = gd->ram_size >> 20;
+ sprintf(buf, "mem=%luM", mem);
+ linux_cmdline_set(buf, strlen(buf));
+
+ /* append rd_start and rd_size */
+ rd_start = images->initrd_start;
+ rd_size = images->initrd_end - images->initrd_start;
+
+ if (rd_size) {
+ sprintf(buf, "rd_start=0x%08lX", rd_start);
+ linux_cmdline_set(buf, strlen(buf));
+ sprintf(buf, "rd_size=0x%lX", rd_size);
+ linux_cmdline_set(buf, strlen(buf));
+ }
+}
+
+static void linux_env_init(void)
+{
+ linux_env = (char **)(((ulong) linux_argp + 15) & ~15);
+ linux_env[0] = 0;
+ linux_env_p = (char *)(linux_env + LINUX_MAX_ENVS);
+ linux_env_idx = 0;
+}
+
+static void linux_env_set(const char *env_name, const char *env_val)
+{
+ if (linux_env_idx < LINUX_MAX_ENVS - 1) {
+ linux_env[linux_env_idx] = linux_env_p;
+
+ strcpy(linux_env_p, env_name);
+ linux_env_p += strlen(env_name);
+
+ if (CONFIG_IS_ENABLED(MALTA)) {
+ linux_env_p++;
+ linux_env[++linux_env_idx] = linux_env_p;
+ } else {
+ *linux_env_p++ = '=';
+ }
+
+ strcpy(linux_env_p, env_val);
+ linux_env_p += strlen(env_val);
+
+ linux_env_p++;
+ linux_env[++linux_env_idx] = 0;
+ }
+}
+
+static void linux_env_legacy(bootm_headers_t *images)
+{
+ char env_buf[12];
+ const char *cp;
+ ulong rd_start, rd_size;
+
+ if (CONFIG_IS_ENABLED(MEMSIZE_IN_BYTES)) {
+ sprintf(env_buf, "%lu", (ulong)gd->ram_size);
+ debug("## Giving linux memsize in bytes, %lu\n",
+ (ulong)gd->ram_size);
+ } else {
+ sprintf(env_buf, "%lu", (ulong)(gd->ram_size >> 20));
+ debug("## Giving linux memsize in MB, %lu\n",
+ (ulong)(gd->ram_size >> 20));
+ }
+
+ rd_start = CKSEG1ADDR(images->initrd_start);
+ rd_size = images->initrd_end - images->initrd_start;
+
+ linux_env_init();
+
+ linux_env_set("memsize", env_buf);
+
+ sprintf(env_buf, "0x%08lX", rd_start);
+ linux_env_set("initrd_start", env_buf);
+
+ sprintf(env_buf, "0x%lX", rd_size);
+ linux_env_set("initrd_size", env_buf);
+
+ sprintf(env_buf, "0x%08X", (uint) (gd->bd->bi_flashstart));
+ linux_env_set("flash_start", env_buf);
+
+ sprintf(env_buf, "0x%X", (uint) (gd->bd->bi_flashsize));
+ linux_env_set("flash_size", env_buf);
+
+ cp = env_get("ethaddr");
+ if (cp)
+ linux_env_set("ethaddr", cp);
+
+ cp = env_get("eth1addr");
+ if (cp)
+ linux_env_set("eth1addr", cp);
+
+ if (CONFIG_IS_ENABLED(MALTA)) {
+ sprintf(env_buf, "%un8r", gd->baudrate);
+ linux_env_set("modetty0", env_buf);
+ }
+}
+
+static int boot_reloc_fdt(bootm_headers_t *images)
+{
+ /*
+ * In case of legacy uImage's, relocation of FDT is already done
+ * by do_bootm_states() and should not repeated in 'bootm prep'.
+ */
+ if (images->state & BOOTM_STATE_FDT) {
+ debug("## FDT already relocated\n");
+ return 0;
+ }
+
+#if CONFIG_IS_ENABLED(MIPS_BOOT_FDT) && CONFIG_IS_ENABLED(OF_LIBFDT)
+ boot_fdt_add_mem_rsv_regions(&images->lmb, images->ft_addr);
+ return boot_relocate_fdt(&images->lmb, &images->ft_addr,
+ &images->ft_len);
+#else
+ return 0;
+#endif
+}
+
+#if CONFIG_IS_ENABLED(MIPS_BOOT_FDT) && CONFIG_IS_ENABLED(OF_LIBFDT)
+int arch_fixup_fdt(void *blob)
+{
+ u64 mem_start = virt_to_phys((void *)gd->ram_base);
+ u64 mem_size = gd->ram_size;
+
+ return fdt_fixup_memory_banks(blob, &mem_start, &mem_size, 1);
+}
+#endif
+
+static int boot_setup_fdt(bootm_headers_t *images)
+{
+ images->initrd_start = virt_to_phys((void *)images->initrd_start);
+ images->initrd_end = virt_to_phys((void *)images->initrd_end);
+ return image_setup_libfdt(images, images->ft_addr, images->ft_len,
+ &images->lmb);
+}
+
+static void boot_prep_linux(bootm_headers_t *images)
+{
+ if (CONFIG_IS_ENABLED(MIPS_BOOT_FDT) && images->ft_len) {
+ boot_reloc_fdt(images);
+ boot_setup_fdt(images);
+ } else {
+ if (CONFIG_IS_ENABLED(MIPS_BOOT_CMDLINE_LEGACY)) {
+ linux_cmdline_legacy(images);
+
+ if (!CONFIG_IS_ENABLED(MIPS_BOOT_ENV_LEGACY))
+ linux_cmdline_append(images);
+
+ linux_cmdline_dump();
+ }
+
+ if (CONFIG_IS_ENABLED(MIPS_BOOT_ENV_LEGACY))
+ linux_env_legacy(images);
+ }
+}
+
+static void boot_jump_linux(bootm_headers_t *images)
+{
+ typedef void __noreturn (*kernel_entry_t)(int, ulong, ulong, ulong);
+ kernel_entry_t kernel = (kernel_entry_t) images->ep;
+ ulong linux_extra = 0;
+
+ debug("## Transferring control to Linux (at address %p) ...\n", kernel);
+
+ bootstage_mark(BOOTSTAGE_ID_RUN_OS);
+
+ if (CONFIG_IS_ENABLED(MALTA))
+ linux_extra = gd->ram_size;
+
+#if CONFIG_IS_ENABLED(BOOTSTAGE_FDT)
+ bootstage_fdt_add_report();
+#endif
+#if CONFIG_IS_ENABLED(BOOTSTAGE_REPORT)
+ bootstage_report();
+#endif
+
+ if (CONFIG_IS_ENABLED(RESTORE_EXCEPTION_VECTOR_BASE))
+ trap_restore();
+
+ if (images->ft_len)
+ kernel(-2, (ulong)images->ft_addr, 0, 0);
+ else
+ kernel(linux_argc, (ulong)linux_argv, (ulong)linux_env,
+ linux_extra);
+}
+
+int do_bootm_linux(int flag, int argc, char *const argv[],
+ bootm_headers_t *images)
+{
+ /* No need for those on MIPS */
+ if (flag & BOOTM_STATE_OS_BD_T)
+ return -1;
+
+ /*
+ * Cmdline init has been moved to 'bootm prep' because it has to be
+ * done after relocation of ramdisk to always pass correct values
+ * for rd_start and rd_size to Linux kernel.
+ */
+ if (flag & BOOTM_STATE_OS_CMDLINE)
+ return 0;
+
+ if (flag & BOOTM_STATE_OS_PREP) {
+ boot_prep_linux(images);
+ return 0;
+ }
+
+ if (flag & (BOOTM_STATE_OS_GO | BOOTM_STATE_OS_FAKE_GO)) {
+ boot_jump_linux(images);
+ return 0;
+ }
+
+ /* does not return */
+ return 1;
+}
diff --git a/roms/u-boot/arch/mips/lib/cache.c b/roms/u-boot/arch/mips/lib/cache.c
new file mode 100644
index 000000000..51a8f4334
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/cache.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2003
+ * Wolfgang Denk, DENX Software Engineering, <wd@denx.de>
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <asm/cache.h>
+#include <asm/cacheops.h>
+#include <asm/cm.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/mipsregs.h>
+#include <asm/system.h>
+#include <linux/bug.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static void probe_l2(void)
+{
+#ifdef CONFIG_MIPS_L2_CACHE
+ unsigned long conf2, sl;
+ bool l2c = false;
+
+ if (!(read_c0_config1() & MIPS_CONF_M))
+ return;
+
+ conf2 = read_c0_config2();
+
+ if (__mips_isa_rev >= 6) {
+ l2c = conf2 & MIPS_CONF_M;
+ if (l2c)
+ l2c = read_c0_config3() & MIPS_CONF_M;
+ if (l2c)
+ l2c = read_c0_config4() & MIPS_CONF_M;
+ if (l2c)
+ l2c = read_c0_config5() & MIPS_CONF5_L2C;
+ }
+
+ if (l2c && config_enabled(CONFIG_MIPS_CM)) {
+ gd->arch.l2_line_size = mips_cm_l2_line_size();
+ } else if (l2c) {
+ /* We don't know how to retrieve L2 config on this system */
+ BUG();
+ } else {
+ sl = (conf2 & MIPS_CONF2_SL) >> MIPS_CONF2_SL_SHF;
+ gd->arch.l2_line_size = sl ? (2 << sl) : 0;
+ }
+#endif
+}
+
+void mips_cache_probe(void)
+{
+#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
+ unsigned long conf1, il, dl;
+
+ conf1 = read_c0_config1();
+
+ il = (conf1 & MIPS_CONF1_IL) >> MIPS_CONF1_IL_SHF;
+ dl = (conf1 & MIPS_CONF1_DL) >> MIPS_CONF1_DL_SHF;
+
+ gd->arch.l1i_line_size = il ? (2 << il) : 0;
+ gd->arch.l1d_line_size = dl ? (2 << dl) : 0;
+#endif
+ probe_l2();
+}
+
+static inline unsigned long icache_line_size(void)
+{
+#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
+ return gd->arch.l1i_line_size;
+#else
+ return CONFIG_SYS_ICACHE_LINE_SIZE;
+#endif
+}
+
+static inline unsigned long dcache_line_size(void)
+{
+#ifdef CONFIG_SYS_CACHE_SIZE_AUTO
+ return gd->arch.l1d_line_size;
+#else
+ return CONFIG_SYS_DCACHE_LINE_SIZE;
+#endif
+}
+
+static inline unsigned long scache_line_size(void)
+{
+#ifdef CONFIG_MIPS_L2_CACHE
+ return gd->arch.l2_line_size;
+#else
+ return CONFIG_SYS_SCACHE_LINE_SIZE;
+#endif
+}
+
+#define cache_loop(start, end, lsize, ops...) do { \
+ const void *addr = (const void *)(start & ~(lsize - 1)); \
+ const void *aend = (const void *)((end - 1) & ~(lsize - 1)); \
+ const unsigned int cache_ops[] = { ops }; \
+ unsigned int i; \
+ \
+ if (!lsize) \
+ break; \
+ \
+ for (; addr <= aend; addr += lsize) { \
+ for (i = 0; i < ARRAY_SIZE(cache_ops); i++) \
+ mips_cache(cache_ops[i], addr); \
+ } \
+} while (0)
+
+void __weak flush_cache(ulong start_addr, ulong size)
+{
+ unsigned long ilsize = icache_line_size();
+ unsigned long dlsize = dcache_line_size();
+ unsigned long slsize = scache_line_size();
+
+ /* aend will be miscalculated when size is zero, so we return here */
+ if (size == 0)
+ return;
+
+ if ((ilsize == dlsize) && !slsize) {
+ /* flush I-cache & D-cache simultaneously */
+ cache_loop(start_addr, start_addr + size, ilsize,
+ HIT_WRITEBACK_INV_D, HIT_INVALIDATE_I);
+ goto ops_done;
+ }
+
+ /* flush D-cache */
+ cache_loop(start_addr, start_addr + size, dlsize, HIT_WRITEBACK_INV_D);
+
+ /* flush L2 cache */
+ cache_loop(start_addr, start_addr + size, slsize, HIT_WRITEBACK_INV_SD);
+
+ /* flush I-cache */
+ cache_loop(start_addr, start_addr + size, ilsize, HIT_INVALIDATE_I);
+
+ops_done:
+ /* ensure cache ops complete before any further memory accesses */
+ sync();
+
+ /* ensure the pipeline doesn't contain now-invalid instructions */
+ instruction_hazard_barrier();
+}
+
+void __weak flush_dcache_range(ulong start_addr, ulong stop)
+{
+ unsigned long lsize = dcache_line_size();
+ unsigned long slsize = scache_line_size();
+
+ /* aend will be miscalculated when size is zero, so we return here */
+ if (start_addr == stop)
+ return;
+
+ cache_loop(start_addr, stop, lsize, HIT_WRITEBACK_INV_D);
+
+ /* flush L2 cache */
+ cache_loop(start_addr, stop, slsize, HIT_WRITEBACK_INV_SD);
+
+ /* ensure cache ops complete before any further memory accesses */
+ sync();
+}
+
+void __weak invalidate_dcache_range(ulong start_addr, ulong stop)
+{
+ unsigned long lsize = dcache_line_size();
+ unsigned long slsize = scache_line_size();
+
+ /* aend will be miscalculated when size is zero, so we return here */
+ if (start_addr == stop)
+ return;
+
+ /* invalidate L2 cache */
+ cache_loop(start_addr, stop, slsize, HIT_INVALIDATE_SD);
+
+ cache_loop(start_addr, stop, lsize, HIT_INVALIDATE_D);
+
+ /* ensure cache ops complete before any further memory accesses */
+ sync();
+}
+
+int dcache_status(void)
+{
+ unsigned int cca = read_c0_config() & CONF_CM_CMASK;
+ return cca != CONF_CM_UNCACHED;
+}
+
+void dcache_enable(void)
+{
+ puts("Not supported!\n");
+}
+
+void dcache_disable(void)
+{
+ /* change CCA to uncached */
+ change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
+
+ /* ensure the pipeline doesn't contain now-invalid instructions */
+ instruction_hazard_barrier();
+}
diff --git a/roms/u-boot/arch/mips/lib/cache_init.S b/roms/u-boot/arch/mips/lib/cache_init.S
new file mode 100644
index 000000000..602741c65
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/cache_init.S
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Cache-handling routined for MIPS CPUs
+ *
+ * Copyright (c) 2003 Wolfgang Denk <wd@denx.de>
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/cacheops.h>
+#include <asm/cm.h>
+
+ .macro f_fill64 dst, offset, val
+ LONG_S \val, (\offset + 0 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 1 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 2 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 3 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 4 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 5 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 6 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 7 * LONGSIZE)(\dst)
+#if LONGSIZE == 4
+ LONG_S \val, (\offset + 8 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 9 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 10 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 11 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 12 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 13 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 14 * LONGSIZE)(\dst)
+ LONG_S \val, (\offset + 15 * LONGSIZE)(\dst)
+#endif
+ .endm
+
+ .macro cache_loop curr, end, line_sz, op
+10: cache \op, 0(\curr)
+ PTR_ADDU \curr, \curr, \line_sz
+ bne \curr, \end, 10b
+ .endm
+
+ .macro l1_info sz, line_sz, off
+ .set push
+ .set noat
+
+ mfc0 $1, CP0_CONFIG, 1
+
+ /* detect line size */
+ srl \line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF
+ andi \line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF)
+ move \sz, zero
+ beqz \line_sz, 10f
+ li \sz, 2
+ sllv \line_sz, \sz, \line_sz
+
+ /* detect associativity */
+ srl \sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF
+ andi \sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF)
+ addiu \sz, \sz, 1
+
+ /* sz *= line_sz */
+ mul \sz, \sz, \line_sz
+
+ /* detect log32(sets) */
+ srl $1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF
+ andi $1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF)
+ addiu $1, $1, 1
+ andi $1, $1, 0x7
+
+ /* sz <<= log32(sets) */
+ sllv \sz, \sz, $1
+
+ /* sz *= 32 */
+ li $1, 32
+ mul \sz, \sz, $1
+10:
+ .set pop
+ .endm
+
+ /*
+ * The changing of Kernel mode cacheability must be done from KSEG1.
+ * If the code is executing from KSEG0, jump to KSEG1 during the execution
+ * of change_k0_cca. change_k0_cca itself clears all hazards when returning.
+ */
+ .macro change_k0_cca_kseg1 mode
+ PTR_LA t0, change_k0_cca
+ li t1, CPHYSADDR(~0)
+ and t0, t0, t1
+ PTR_LI t1, CKSEG1
+ or t0, t0, t1
+ li a0, \mode
+ jalr t0
+ .endm
+
+/*
+ * mips_cache_reset - low level initialisation of the primary caches
+ *
+ * This routine initialises the primary caches to ensure that they have good
+ * parity. It must be called by the ROM before any cached locations are used
+ * to prevent the possibility of data with bad parity being written to memory.
+ *
+ * To initialise the instruction cache it is essential that a source of data
+ * with good parity is available. This routine will initialise an area of
+ * memory starting at location zero to be used as a source of parity.
+ *
+ * Note that this function does not follow the standard calling convention &
+ * may clobber typically callee-saved registers.
+ *
+ * RETURNS: N/A
+ *
+ */
+#define R_RETURN s0
+#define R_IC_SIZE s1
+#define R_IC_LINE s2
+#define R_DC_SIZE s3
+#define R_DC_LINE s4
+#define R_L2_SIZE s5
+#define R_L2_LINE s6
+#define R_L2_BYPASSED s7
+#define R_L2_L2C t8
+LEAF(mips_cache_reset)
+ move R_RETURN, ra
+
+#ifdef CONFIG_MIPS_L2_CACHE
+ /*
+ * For there to be an L2 present, Config2 must be present. If it isn't
+ * then we proceed knowing there's no L2 cache.
+ */
+ move R_L2_SIZE, zero
+ move R_L2_LINE, zero
+ move R_L2_BYPASSED, zero
+ move R_L2_L2C, zero
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, l2_probe_done
+
+ /*
+ * From MIPSr6 onwards the L2 cache configuration might not be reported
+ * by Config2. The Config5.L2C bit indicates whether this is the case,
+ * and if it is then we need knowledge of where else to look. For cores
+ * from Imagination Technologies this is a CM GCR.
+ */
+# if __mips_isa_rev >= 6
+ /* Check that Config5 exists */
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, l2_probe_cop0
+ mfc0 t0, CP0_CONFIG, 3
+ bgez t0, l2_probe_cop0
+ mfc0 t0, CP0_CONFIG, 4
+ bgez t0, l2_probe_cop0
+
+ /* Check Config5.L2C is set */
+ mfc0 t0, CP0_CONFIG, 5
+ and R_L2_L2C, t0, MIPS_CONF5_L2C
+ beqz R_L2_L2C, l2_probe_cop0
+
+ /* Config5.L2C is set */
+# ifdef CONFIG_MIPS_CM
+ /* The CM will provide L2 configuration */
+ PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
+ lw t1, GCR_L2_CONFIG(t0)
+ bgez t1, l2_probe_done
+
+ ext R_L2_LINE, t1, \
+ GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS
+ beqz R_L2_LINE, l2_probe_done
+ li t2, 2
+ sllv R_L2_LINE, t2, R_L2_LINE
+
+ ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS
+ addiu t2, t2, 1
+ mul R_L2_SIZE, R_L2_LINE, t2
+
+ ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS
+ sllv R_L2_SIZE, R_L2_SIZE, t2
+ li t2, 64
+ mul R_L2_SIZE, R_L2_SIZE, t2
+
+ /* Bypass the L2 cache so that we can init the L1s early */
+ or t1, t1, GCR_L2_CONFIG_BYPASS
+ sw t1, GCR_L2_CONFIG(t0)
+ sync
+ li R_L2_BYPASSED, 1
+
+ /* Zero the L2 tag registers */
+ sw zero, GCR_L2_TAG_ADDR(t0)
+ sw zero, GCR_L2_TAG_ADDR_UPPER(t0)
+ sw zero, GCR_L2_TAG_STATE(t0)
+ sw zero, GCR_L2_TAG_STATE_UPPER(t0)
+ sw zero, GCR_L2_DATA(t0)
+ sw zero, GCR_L2_DATA_UPPER(t0)
+ sync
+# else
+ /* We don't know how to retrieve L2 configuration on this system */
+# endif
+ b l2_probe_done
+# endif
+
+ /*
+ * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2
+ * cache configuration from the cop0 Config2 register.
+ */
+l2_probe_cop0:
+ mfc0 t0, CP0_CONFIG, 2
+
+ srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF
+ andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF
+ beqz R_L2_LINE, l2_probe_done
+ li t1, 2
+ sllv R_L2_LINE, t1, R_L2_LINE
+
+ srl t1, t0, MIPS_CONF2_SA_SHF
+ andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF
+ addiu t1, t1, 1
+ mul R_L2_SIZE, R_L2_LINE, t1
+
+ srl t1, t0, MIPS_CONF2_SS_SHF
+ andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF
+ sllv R_L2_SIZE, R_L2_SIZE, t1
+ li t1, 64
+ mul R_L2_SIZE, R_L2_SIZE, t1
+
+ /* Attempt to bypass the L2 so that we can init the L1s early */
+ or t0, t0, MIPS_CONF2_L2B
+ mtc0 t0, CP0_CONFIG, 2
+ ehb
+ mfc0 t0, CP0_CONFIG, 2
+ and R_L2_BYPASSED, t0, MIPS_CONF2_L2B
+
+ /* Zero the L2 tag registers */
+ mtc0 zero, CP0_TAGLO, 4
+ ehb
+l2_probe_done:
+#endif
+
+#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
+ li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE
+ li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE
+#else
+ l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF
+#endif
+
+#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
+ li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE
+ li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE
+#else
+ l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF
+#endif
+
+#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
+
+ /* Determine the largest L1 cache size */
+#ifndef CONFIG_SYS_CACHE_SIZE_AUTO
+#if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
+ li v0, CONFIG_SYS_ICACHE_SIZE
+#else
+ li v0, CONFIG_SYS_DCACHE_SIZE
+#endif
+#else
+ move v0, R_IC_SIZE
+ sltu t1, R_IC_SIZE, R_DC_SIZE
+ movn v0, R_DC_SIZE, t1
+#endif
+ /*
+ * Now clear that much memory starting from zero.
+ */
+ PTR_LI a0, CKSEG1ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU a1, a0, v0
+2: PTR_ADDIU a0, 64
+ f_fill64 a0, -64, zero
+ bne a0, a1, 2b
+
+#endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
+
+#ifdef CONFIG_MIPS_L2_CACHE
+ /*
+ * If the L2 is bypassed, init the L1 first so that we can execute the
+ * rest of the cache initialisation using the L1 instruction cache.
+ */
+ bnez R_L2_BYPASSED, l1_init
+
+l2_init:
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU t1, t0, R_L2_SIZE
+1: cache INDEX_STORE_TAG_SD, 0(t0)
+ PTR_ADDU t0, t0, R_L2_LINE
+ bne t0, t1, 1b
+
+ /*
+ * If the L2 was bypassed then we already initialised the L1s before
+ * the L2, so we are now done.
+ */
+ bnez R_L2_BYPASSED, l2_unbypass
+#endif
+
+ /*
+ * The TagLo registers used depend upon the CPU implementation, but the
+ * architecture requires that it is safe for software to write to both
+ * TagLo selects 0 & 2 covering supported cases.
+ */
+l1_init:
+ mtc0 zero, CP0_TAGLO
+ mtc0 zero, CP0_TAGLO, 2
+ ehb
+
+ /*
+ * The caches are probably in an indeterminate state, so we force good
+ * parity into them by doing an invalidate for each line. If
+ * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to
+ * perform a load/fill & a further invalidate for each line, assuming
+ * that the bottom of RAM (having just been cleared) will generate good
+ * parity for the cache.
+ */
+
+ /*
+ * Initialize the I-cache first,
+ */
+ blez R_IC_SIZE, 1f
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU t1, t0, R_IC_SIZE
+ /* clear tag to invalidate */
+ cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
+#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
+ /* fill once, so data field parity is correct */
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ cache_loop t0, t1, R_IC_LINE, FILL
+ /* invalidate again - prudent but not strictly neccessary */
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
+#endif
+ sync
+
+ /*
+ * Enable use of the I-cache by setting Config.K0.
+ */
+ change_k0_cca_kseg1 CONF_CM_CACHABLE_NONCOHERENT
+
+ /*
+ * then initialize D-cache.
+ */
+1: blez R_DC_SIZE, 3f
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ PTR_ADDU t1, t0, R_DC_SIZE
+ /* clear all tags */
+ cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
+#ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
+ /* load from each line (in cached space) */
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+2: LONG_L zero, 0(t0)
+ PTR_ADDU t0, R_DC_LINE
+ bne t0, t1, 2b
+ /* clear all tags */
+ PTR_LI t0, CKSEG0ADDR(CONFIG_MIPS_CACHE_INDEX_BASE)
+ cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
+#endif
+3:
+
+#ifdef CONFIG_MIPS_L2_CACHE
+ /* If the L2 isn't bypassed then we're done */
+ beqz R_L2_BYPASSED, return
+
+ /* The L2 is bypassed - go initialise it */
+ b l2_init
+
+l2_unbypass:
+# if __mips_isa_rev >= 6
+ beqz R_L2_L2C, 1f
+
+ li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
+ lw t1, GCR_L2_CONFIG(t0)
+ xor t1, t1, GCR_L2_CONFIG_BYPASS
+ sw t1, GCR_L2_CONFIG(t0)
+ sync
+ ehb
+ b 2f
+# endif
+1: mfc0 t0, CP0_CONFIG, 2
+ xor t0, t0, MIPS_CONF2_L2B
+ mtc0 t0, CP0_CONFIG, 2
+ ehb
+
+2:
+# ifdef CONFIG_MIPS_CM
+ /* Config3 must exist for a CM to be present */
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, 2f
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, 2f
+
+ /* Check Config3.CMGCR to determine CM presence */
+ mfc0 t0, CP0_CONFIG, 3
+ and t0, t0, MIPS_CONF3_CMGCR
+ beqz t0, 2f
+
+ /* Change Config.K0 to a coherent CCA */
+ change_k0_cca_kseg1 CONF_CM_CACHABLE_COW
+
+ /*
+ * Join the coherent domain such that the caches of this core are kept
+ * coherent with those of other cores.
+ */
+ PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
+ lw t1, GCR_REV(t0)
+ li t2, GCR_REV_CM3
+ li t3, GCR_Cx_COHERENCE_EN
+ bge t1, t2, 1f
+ li t3, GCR_Cx_COHERENCE_DOM_EN
+1: sw t3, GCR_Cx_COHERENCE(t0)
+ ehb
+2:
+# endif
+#endif
+
+return:
+ /* Ensure all cache operations complete before returning */
+ sync
+ jr R_RETURN
+ END(mips_cache_reset)
+
+LEAF(mips_cache_disable)
+ move R_RETURN, ra
+ change_k0_cca_kseg1 CONF_CM_UNCACHED
+ jr R_RETURN
+ END(mips_cache_disable)
+
+LEAF(change_k0_cca)
+ mfc0 t0, CP0_CONFIG
+#if __mips_isa_rev >= 2
+ ins t0, a0, 0, 3
+#else
+ xor a0, a0, t0
+ andi a0, a0, CONF_CM_CMASK
+ xor a0, a0, t0
+#endif
+ mtc0 a0, CP0_CONFIG
+
+ jr.hb ra
+ END(change_k0_cca)
diff --git a/roms/u-boot/arch/mips/lib/genex.S b/roms/u-boot/arch/mips/lib/genex.S
new file mode 100644
index 000000000..aba8c4882
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/genex.S
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+
+#define STATMASK 0x1f
+
+ .set noreorder
+
+ /*
+ * Macros copied and adapted from Linux MIPS
+ */
+ .macro SAVE_AT
+ .set push
+ .set noat
+ LONG_S $1, PT_R1(sp)
+ .set pop
+ .endm
+
+ .macro SAVE_TEMP
+#if __mips_isa_rev < 6
+ mfhi v1
+#endif
+#ifdef CONFIG_32BIT
+ LONG_S $8, PT_R8(sp)
+ LONG_S $9, PT_R9(sp)
+#endif
+ LONG_S $10, PT_R10(sp)
+ LONG_S $11, PT_R11(sp)
+ LONG_S $12, PT_R12(sp)
+#if __mips_isa_rev < 6
+ LONG_S v1, PT_HI(sp)
+ mflo v1
+#endif
+ LONG_S $13, PT_R13(sp)
+ LONG_S $14, PT_R14(sp)
+ LONG_S $15, PT_R15(sp)
+ LONG_S $24, PT_R24(sp)
+#if __mips_isa_rev < 6
+ LONG_S v1, PT_LO(sp)
+#endif
+ .endm
+
+ .macro SAVE_STATIC
+ LONG_S $16, PT_R16(sp)
+ LONG_S $17, PT_R17(sp)
+ LONG_S $18, PT_R18(sp)
+ LONG_S $19, PT_R19(sp)
+ LONG_S $20, PT_R20(sp)
+ LONG_S $21, PT_R21(sp)
+ LONG_S $22, PT_R22(sp)
+ LONG_S $23, PT_R23(sp)
+ LONG_S $30, PT_R30(sp)
+ .endm
+
+ .macro SAVE_SOME
+ .set push
+ .set noat
+ PTR_SUBU k1, sp, PT_SIZE
+ LONG_S sp, PT_R29(k1)
+ move sp, k1
+
+ LONG_S $3, PT_R3(sp)
+ LONG_S $0, PT_R0(sp)
+ mfc0 v1, CP0_STATUS
+ LONG_S $2, PT_R2(sp)
+ LONG_S v1, PT_STATUS(sp)
+ LONG_S $4, PT_R4(sp)
+ mfc0 v1, CP0_CAUSE
+ LONG_S $5, PT_R5(sp)
+ LONG_S v1, PT_CAUSE(sp)
+ LONG_S $6, PT_R6(sp)
+ MFC0 v1, CP0_EPC
+ LONG_S $7, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ LONG_S $8, PT_R8(sp)
+ LONG_S $9, PT_R9(sp)
+#endif
+ LONG_S v1, PT_EPC(sp)
+ LONG_S $25, PT_R25(sp)
+ LONG_S $28, PT_R28(sp)
+ LONG_S $31, PT_R31(sp)
+ .set pop
+ .endm
+
+ .macro RESTORE_AT
+ .set push
+ .set noat
+ LONG_L $1, PT_R1(sp)
+ .set pop
+ .endm
+
+ .macro RESTORE_TEMP
+#if __mips_isa_rev < 6
+ LONG_L $24, PT_LO(sp)
+ mtlo $24
+ LONG_L $24, PT_HI(sp)
+ mthi $24
+#endif
+#ifdef CONFIG_32BIT
+ LONG_L $8, PT_R8(sp)
+ LONG_L $9, PT_R9(sp)
+#endif
+ LONG_L $10, PT_R10(sp)
+ LONG_L $11, PT_R11(sp)
+ LONG_L $12, PT_R12(sp)
+ LONG_L $13, PT_R13(sp)
+ LONG_L $14, PT_R14(sp)
+ LONG_L $15, PT_R15(sp)
+ LONG_L $24, PT_R24(sp)
+ .endm
+
+ .macro RESTORE_STATIC
+ LONG_L $16, PT_R16(sp)
+ LONG_L $17, PT_R17(sp)
+ LONG_L $18, PT_R18(sp)
+ LONG_L $19, PT_R19(sp)
+ LONG_L $20, PT_R20(sp)
+ LONG_L $21, PT_R21(sp)
+ LONG_L $22, PT_R22(sp)
+ LONG_L $23, PT_R23(sp)
+ LONG_L $30, PT_R30(sp)
+ .endm
+
+ .macro RESTORE_SOME
+ .set push
+ .set reorder
+ .set noat
+ mfc0 a0, CP0_STATUS
+ ori a0, STATMASK
+ xori a0, STATMASK
+ mtc0 a0, CP0_STATUS
+ li v1, ST0_CU1 | ST0_FR | ST0_IM
+ and a0, v1
+ LONG_L v0, PT_STATUS(sp)
+ nor v1, $0, v1
+ and v0, v1
+ or v0, a0
+ mtc0 v0, CP0_STATUS
+ LONG_L v1, PT_EPC(sp)
+ MTC0 v1, CP0_EPC
+ LONG_L $31, PT_R31(sp)
+ LONG_L $28, PT_R28(sp)
+ LONG_L $25, PT_R25(sp)
+#ifdef CONFIG_64BIT
+ LONG_L $8, PT_R8(sp)
+ LONG_L $9, PT_R9(sp)
+#endif
+ LONG_L $7, PT_R7(sp)
+ LONG_L $6, PT_R6(sp)
+ LONG_L $5, PT_R5(sp)
+ LONG_L $4, PT_R4(sp)
+ LONG_L $3, PT_R3(sp)
+ LONG_L $2, PT_R2(sp)
+ .set pop
+ .endm
+
+ .macro RESTORE_SP
+ LONG_L sp, PT_R29(sp)
+ .endm
+
+NESTED(except_vec3_generic, 0, sp)
+ PTR_LA k1, handle_reserved
+ jr k1
+ nop
+ END(except_vec3_generic)
+
+NESTED(except_vec_ejtag_debug, 0, sp)
+ PTR_LA k1, handle_ejtag_debug
+ jr k1
+ nop
+ END(except_vec_ejtag_debug)
+
+NESTED(handle_reserved, PT_SIZE, sp)
+ SAVE_SOME
+ SAVE_AT
+ SAVE_TEMP
+ SAVE_STATIC
+
+ PTR_LA t9, do_reserved
+ jr t9
+ move a0, sp
+ END(handle_reserved)
+
+NESTED(handle_ejtag_debug, PT_SIZE, sp)
+ .set push
+ .set noat
+ MTC0 k1, CP0_DESAVE
+
+ /* Check for SDBBP */
+ MFC0 k1, CP0_DEBUG
+ sll k1, k1, 30
+ bgez k1, ejtag_return
+ nop
+
+ SAVE_SOME
+ SAVE_AT
+ SAVE_TEMP
+ SAVE_STATIC
+
+ PTR_LA t9, do_ejtag_debug
+ jalr t9
+ move a0, sp
+
+ RESTORE_TEMP
+ RESTORE_STATIC
+ RESTORE_AT
+ RESTORE_SOME
+ RESTORE_SP
+
+ejtag_return:
+ MFC0 k1, CP0_DESAVE
+ deret
+ .set pop
+ END(handle_ejtag_debug)
diff --git a/roms/u-boot/arch/mips/lib/libgcc.h b/roms/u-boot/arch/mips/lib/libgcc.h
new file mode 100644
index 000000000..05909d58e
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/libgcc.h
@@ -0,0 +1,25 @@
+#ifndef __ASM_LIBGCC_H
+#define __ASM_LIBGCC_H
+
+#include <asm/byteorder.h>
+
+typedef int word_type __attribute__ ((mode (__word__)));
+
+#ifdef __BIG_ENDIAN
+struct DWstruct {
+ int high, low;
+};
+#elif defined(__LITTLE_ENDIAN)
+struct DWstruct {
+ int low, high;
+};
+#else
+#error I feel sick.
+#endif
+
+typedef union {
+ struct DWstruct s;
+ long long ll;
+} DWunion;
+
+#endif /* __ASM_LIBGCC_H */
diff --git a/roms/u-boot/arch/mips/lib/lshrdi3.c b/roms/u-boot/arch/mips/lib/lshrdi3.c
new file mode 100644
index 000000000..bb340accb
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/lshrdi3.c
@@ -0,0 +1,25 @@
+#include "libgcc.h"
+
+long long __lshrdi3(long long u, word_type b)
+{
+ DWunion uu, w;
+ word_type bm;
+
+ if (b == 0)
+ return u;
+
+ uu.ll = u;
+ bm = 32 - b;
+
+ if (bm <= 0) {
+ w.s.high = 0;
+ w.s.low = (unsigned int) uu.s.high >> -bm;
+ } else {
+ const unsigned int carries = (unsigned int) uu.s.high << bm;
+
+ w.s.high = (unsigned int) uu.s.high >> b;
+ w.s.low = ((unsigned int) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+}
diff --git a/roms/u-boot/arch/mips/lib/reloc.c b/roms/u-boot/arch/mips/lib/reloc.c
new file mode 100644
index 000000000..67c8af2f3
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/reloc.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPS Relocation
+ *
+ * Copyright (c) 2017 Imagination Technologies Ltd.
+ *
+ * Relocation data, found in the .rel section, is generated by the mips-relocs
+ * tool & contains a record of all locations in the U-Boot binary that need to
+ * be fixed up during relocation.
+ *
+ * The data is a sequence of unsigned integers, which are of somewhat arbitrary
+ * size. This is achieved by encoding integers as a sequence of bytes, each of
+ * which contains 7 bits of data with the most significant bit indicating
+ * whether any further bytes need to be read. The least significant bits of the
+ * integer are found in the first byte - ie. it somewhat resembles little
+ * endian.
+ *
+ * Each pair of two integers represents a relocation that must be applied. The
+ * first integer represents the type of relocation as a standard ELF relocation
+ * type (ie. R_MIPS_*). The second integer represents the offset at which to
+ * apply the relocation, relative to the previous relocation or for the first
+ * relocation the start of the relocated .text section.
+ *
+ * The end of the relocation data is indicated when type R_MIPS_NONE (0) is
+ * read, at which point no further integers should be read. That is, the
+ * terminating R_MIPS_NONE reloc includes no offset.
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <init.h>
+#include <asm/relocs.h>
+#include <asm/sections.h>
+#include <linux/bitops.h>
+
+/**
+ * read_uint() - Read an unsigned integer from the buffer
+ * @buf: pointer to a pointer to the reloc buffer
+ *
+ * Read one whole unsigned integer from the relocation data pointed to by @buf,
+ * advancing @buf past the bytes encoding the integer.
+ *
+ * Returns: the integer read from @buf
+ */
+static unsigned long read_uint(uint8_t **buf)
+{
+ unsigned long val = 0;
+ unsigned int shift = 0;
+ uint8_t new;
+
+ do {
+ new = *(*buf)++;
+ val |= (new & 0x7f) << shift;
+ shift += 7;
+ } while (new & 0x80);
+
+ return val;
+}
+
+/**
+ * apply_reloc() - Apply a single relocation
+ * @type: the type of reloc (R_MIPS_*)
+ * @addr: the address that the reloc should be applied to
+ * @off: the relocation offset, ie. number of bytes we're moving U-Boot by
+ *
+ * Apply a single relocation of type @type at @addr. This function is
+ * intentionally simple, and does the bare minimum needed to fixup the
+ * relocated U-Boot - in particular, it does not check for overflows.
+ */
+static void apply_reloc(unsigned int type, void *addr, long off, uint8_t *buf)
+{
+ uint32_t u32;
+
+ switch (type) {
+ case R_MIPS_26:
+ u32 = *(uint32_t *)addr;
+ u32 = (u32 & GENMASK(31, 26)) |
+ ((u32 + (off >> 2)) & GENMASK(25, 0));
+ *(uint32_t *)addr = u32;
+ break;
+
+ case R_MIPS_32:
+ *(uint32_t *)addr += off;
+ break;
+
+ case R_MIPS_64:
+ *(uint64_t *)addr += off;
+ break;
+
+ case R_MIPS_HI16:
+ *(uint32_t *)addr += off >> 16;
+ break;
+
+ default:
+ panic("Unhandled reloc type %u (@ %p), bss used before relocation?\n",
+ type, buf);
+ }
+}
+
+/**
+ * relocate_code() - Relocate U-Boot, generally from flash to DDR
+ * @start_addr_sp: new stack pointer
+ * @new_gd: pointer to relocated global data
+ * @relocaddr: the address to relocate to
+ *
+ * Relocate U-Boot from its current location (generally in flash) to a new one
+ * (generally in DDR). This function will copy the U-Boot binary & apply
+ * relocations as necessary, then jump to board_init_r in the new build of
+ * U-Boot. As such, this function does not return.
+ */
+void relocate_code(ulong start_addr_sp, gd_t *new_gd, ulong relocaddr)
+{
+ unsigned long addr, length, bss_len;
+ uint8_t *buf, *bss_start;
+ unsigned int type;
+ long off;
+
+ /*
+ * Ensure that we're relocating by an offset which is a multiple of
+ * 64KiB, ie. doesn't change the least significant 16 bits of any
+ * addresses. This allows us to discard R_MIPS_LO16 relocs, saving
+ * space in the U-Boot binary & complexity in handling them.
+ */
+ off = relocaddr - (unsigned long)__text_start;
+ if (off & 0xffff)
+ panic("Mis-aligned relocation\n");
+
+ /* Copy U-Boot to RAM */
+ length = __image_copy_end - __text_start;
+ memcpy((void *)relocaddr, __text_start, length);
+
+ /* Now apply relocations to the copy in RAM */
+ buf = __rel_start;
+ addr = relocaddr;
+ while (true) {
+ type = read_uint(&buf);
+ if (type == R_MIPS_NONE)
+ break;
+
+ addr += read_uint(&buf) << 2;
+ apply_reloc(type, (void *)addr, off, buf);
+ }
+
+ /* Ensure the icache is coherent */
+ flush_cache(relocaddr, length);
+
+ /* Clear the .bss section */
+ bss_start = (uint8_t *)((unsigned long)__bss_start + off);
+ bss_len = (unsigned long)&__bss_end - (unsigned long)__bss_start;
+ memset(bss_start, 0, bss_len);
+
+ /* Jump to the relocated U-Boot */
+ asm volatile(
+ "move $29, %0\n"
+ " move $4, %1\n"
+ " move $5, %2\n"
+ " move $31, $0\n"
+ " jr %3"
+ : /* no outputs */
+ : "r"(start_addr_sp),
+ "r"(new_gd),
+ "r"(relocaddr),
+ "r"((unsigned long)board_init_r + off));
+
+ /* Since we jumped to the new U-Boot above, we won't get here */
+ unreachable();
+}
diff --git a/roms/u-boot/arch/mips/lib/spl.c b/roms/u-boot/arch/mips/lib/spl.c
new file mode 100644
index 000000000..f96fda5b2
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/spl.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Stefan Roese <sr@denx.de>
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <spl.h>
+
+void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
+{
+ typedef void __noreturn (*image_entry_noargs_t)(void);
+ image_entry_noargs_t image_entry =
+ (image_entry_noargs_t)spl_image->entry_point;
+
+ /* Flush cache before jumping to application */
+ flush_cache((unsigned long)spl_image->load_addr, spl_image->size);
+
+ debug("image entry point: 0x%lx\n", spl_image->entry_point);
+ image_entry();
+}
diff --git a/roms/u-boot/arch/mips/lib/stack.c b/roms/u-boot/arch/mips/lib/stack.c
new file mode 100644
index 000000000..930d21856
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/stack.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <init.h>
+#include <log.h>
+#include <asm/global_data.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int arch_reserve_stacks(void)
+{
+ /* reserve space for exception vector table */
+ gd->start_addr_sp -= 0x500;
+ gd->start_addr_sp &= ~0xFFF;
+ gd->irq_sp = gd->start_addr_sp;
+ debug("Reserving %d Bytes for exception vector at: %08lx\n",
+ 0x500, gd->start_addr_sp);
+
+ return 0;
+}
diff --git a/roms/u-boot/arch/mips/lib/traps.c b/roms/u-boot/arch/mips/lib/traps.c
new file mode 100644
index 000000000..7577fdd25
--- /dev/null
+++ b/roms/u-boot/arch/mips/lib/traps.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
+ * Copyright (C) 1995, 1996 Paul M. Antoine
+ * Copyright (C) 1998 Ulf Carlsson
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+
+#include <common.h>
+#include <asm/global_data.h>
+#include <asm/ptrace.h>
+#include <cpu_func.h>
+#include <hang.h>
+#include <init.h>
+#include <log.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/system.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static unsigned long saved_ebase;
+
+static void show_regs(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int cause = regs->cp0_cause;
+ unsigned int exccode;
+ int i;
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printf("$%2d :", i);
+ if (i == 0)
+ printf(" %0*lx", field, 0UL);
+ else if (i == 26 || i == 27)
+ printf(" %*s", field, "");
+ else
+ printf(" %0*lx", field, regs->regs[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ puts("\n");
+ }
+
+ printf("Hi : %0*lx\n", field, regs->hi);
+ printf("Lo : %0*lx\n", field, regs->lo);
+
+ /*
+ * Saved cp0 registers
+ */
+ printf("epc : %0*lx (text %0*lx)\n", field, regs->cp0_epc,
+ field, regs->cp0_epc - gd->reloc_off);
+ printf("ra : %0*lx (text %0*lx)\n", field, regs->regs[31],
+ field, regs->regs[31] - gd->reloc_off);
+
+ printf("Status: %08x\n", (uint32_t) regs->cp0_status);
+
+ exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ printf("Cause : %08x (ExcCode %02x)\n", cause, exccode);
+
+ if (1 <= exccode && exccode <= 5)
+ printf("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
+
+ printf("PrId : %08x\n", read_c0_prid());
+}
+
+void do_reserved(const struct pt_regs *regs)
+{
+ puts("\nOoops:\n");
+ show_regs(regs);
+ hang();
+}
+
+void do_ejtag_debug(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long depc;
+ unsigned int debug;
+
+ depc = read_c0_depc();
+ debug = read_c0_debug();
+
+ printf("SDBBP EJTAG debug exception: c0_depc = %0*lx, DEBUG = %08x\n",
+ field, depc, debug);
+}
+
+static void set_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ unsigned long ebase = gd->irq_sp;
+
+ memcpy((void *)(ebase + offset), addr, size);
+ flush_cache(ebase + offset, size);
+}
+
+static void trap_init(ulong reloc_addr)
+{
+ unsigned long ebase = gd->irq_sp;
+
+ set_handler(0x180, &except_vec3_generic, 0x80);
+ set_handler(0x280, &except_vec_ejtag_debug, 0x80);
+
+ saved_ebase = read_c0_ebase() & 0xfffff000;
+
+ /* Set WG bit on Octeon to enable writing to bits 63:30 */
+ if (IS_ENABLED(CONFIG_ARCH_OCTEON))
+ ebase |= MIPS_EBASE_WG;
+
+ write_c0_ebase(ebase);
+ clear_c0_status(ST0_BEV);
+ execution_hazard_barrier();
+}
+
+void trap_restore(void)
+{
+ set_c0_status(ST0_BEV);
+ execution_hazard_barrier();
+
+#ifdef CONFIG_OVERRIDE_EXCEPTION_VECTOR_BASE
+ write_c0_ebase(CONFIG_NEW_EXCEPTION_VECTOR_BASE & 0xfffff000);
+#else
+ write_c0_ebase(saved_ebase);
+#endif
+
+ clear_c0_status(ST0_BEV);
+ execution_hazard_barrier();
+}
+
+int arch_initr_trap(void)
+{
+ trap_init(CONFIG_SYS_SDRAM_BASE);
+
+ return 0;
+}