aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'roms/u-boot/drivers/mtd')
-rw-r--r--roms/u-boot/drivers/mtd/Kconfig117
-rw-r--r--roms/u-boot/drivers/mtd/Makefile42
-rw-r--r--roms/u-boot/drivers/mtd/altera_qspi.c407
-rw-r--r--roms/u-boot/drivers/mtd/cfi_flash.c2530
-rw-r--r--roms/u-boot/drivers/mtd/cfi_mtd.c265
-rw-r--r--roms/u-boot/drivers/mtd/hbmc-am654.c106
-rw-r--r--roms/u-boot/drivers/mtd/jedec_flash.c493
-rw-r--r--roms/u-boot/drivers/mtd/mtd-uclass.c36
-rw-r--r--roms/u-boot/drivers/mtd/mtd_uboot.c361
-rw-r--r--roms/u-boot/drivers/mtd/mtdconcat.c974
-rw-r--r--roms/u-boot/drivers/mtd/mtdcore.c1859
-rw-r--r--roms/u-boot/drivers/mtd/mtdcore.h17
-rw-r--r--roms/u-boot/drivers/mtd/mtdpart.c1000
-rw-r--r--roms/u-boot/drivers/mtd/nand/Kconfig6
-rw-r--r--roms/u-boot/drivers/mtd/nand/Makefile10
-rw-r--r--roms/u-boot/drivers/mtd/nand/bbt.c135
-rw-r--r--roms/u-boot/drivers/mtd/nand/core.c256
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/Kconfig446
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/Makefile82
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/am335x_spl_bch.c226
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/arasan_nfc.c1325
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/atmel_nand.c1526
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/atmel_nand_ecc.h203
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/Makefile9
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm63158_nand.c125
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c118
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm68360_nand.c124
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6838_nand.c124
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6858_nand.c125
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.c2754
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.h63
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.c39
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.h11
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/cortina_nand.c1390
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/cortina_nand.h293
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/davinci_nand.c839
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/denali.c1376
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/denali.h327
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/denali_dt.c189
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/denali_spl.c237
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_nand.c812
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_spl.c172
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_nand.c1069
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_spl.c308
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/fsl_upm.c186
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/fsmc_nand.c518
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/kb9202_nand.c133
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/kirkwood_nand.c91
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/kmeter1_nand.c123
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_mlc.c766
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_slc.c585
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/mxc_nand.c1309
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/mxc_nand.h208
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/mxc_nand_spl.c351
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/mxs_nand.c1599
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/mxs_nand_dt.c185
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/mxs_nand_spl.c298
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand.c175
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_base.c5324
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_bbt.c1376
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_bch.c233
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_ecc.c174
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_ids.c213
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_plat.c64
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_spl_load.c41
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_spl_loaders.c132
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_spl_simple.c240
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_timings.c335
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/nand_util.c907
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.c425
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.h131
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/octeontx_bch_regs.h167
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/octeontx_nand.c2258
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/omap_elm.c193
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/omap_gpmc.c1038
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.c1956
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.h64
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/stm32_fmc2_nand.c1048
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/sunxi_nand.c1858
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/sunxi_nand_spl.c550
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/tegra_nand.c1007
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/tegra_nand.h240
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/vf610_nfc.c820
-rw-r--r--roms/u-boot/drivers/mtd/nand/raw/zynq_nand.c1299
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/Kconfig7
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/Makefile4
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/core.c1255
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/gigadevice.c210
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/macronix.c148
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/micron.c259
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/toshiba.c289
-rw-r--r--roms/u-boot/drivers/mtd/nand/spi/winbond.c153
-rw-r--r--roms/u-boot/drivers/mtd/onenand/Makefile11
-rw-r--r--roms/u-boot/drivers/mtd/onenand/onenand_base.c2802
-rw-r--r--roms/u-boot/drivers/mtd/onenand/onenand_bbt.c265
-rw-r--r--roms/u-boot/drivers/mtd/onenand/onenand_spl.c200
-rw-r--r--roms/u-boot/drivers/mtd/onenand/onenand_uboot.c56
-rw-r--r--roms/u-boot/drivers/mtd/onenand/samsung.c568
-rw-r--r--roms/u-boot/drivers/mtd/pic32_flash.c448
-rw-r--r--roms/u-boot/drivers/mtd/renesas_rpc_hf.c401
-rw-r--r--roms/u-boot/drivers/mtd/spi/Kconfig217
-rw-r--r--roms/u-boot/drivers/mtd/spi/Makefile23
-rw-r--r--roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c91
-rw-r--r--roms/u-boot/drivers/mtd/spi/sandbox.c606
-rw-r--r--roms/u-boot/drivers/mtd/spi/sf-uclass.c109
-rw-r--r--roms/u-boot/drivers/mtd/spi/sf_dataflash.c698
-rw-r--r--roms/u-boot/drivers/mtd/spi/sf_internal.h96
-rw-r--r--roms/u-boot/drivers/mtd/spi/sf_mtd.c148
-rw-r--r--roms/u-boot/drivers/mtd/spi/sf_probe.c183
-rw-r--r--roms/u-boot/drivers/mtd/spi/spi-nor-core.c2660
-rw-r--r--roms/u-boot/drivers/mtd/spi/spi-nor-ids.c341
-rw-r--r--roms/u-boot/drivers/mtd/spi/spi-nor-tiny.c817
-rw-r--r--roms/u-boot/drivers/mtd/st_smi.c565
-rw-r--r--roms/u-boot/drivers/mtd/stm32_flash.c151
-rw-r--r--roms/u-boot/drivers/mtd/stm32_flash.h27
-rw-r--r--roms/u-boot/drivers/mtd/ubi/Kconfig106
-rw-r--r--roms/u-boot/drivers/mtd/ubi/Makefile9
-rw-r--r--roms/u-boot/drivers/mtd/ubi/attach.c1766
-rw-r--r--roms/u-boot/drivers/mtd/ubi/build.c1554
-rw-r--r--roms/u-boot/drivers/mtd/ubi/crc32.c511
-rw-r--r--roms/u-boot/drivers/mtd/ubi/crc32defs.h32
-rw-r--r--roms/u-boot/drivers/mtd/ubi/crc32table.h136
-rw-r--r--roms/u-boot/drivers/mtd/ubi/debug.c563
-rw-r--r--roms/u-boot/drivers/mtd/ubi/debug.h143
-rw-r--r--roms/u-boot/drivers/mtd/ubi/eba.c1474
-rw-r--r--roms/u-boot/drivers/mtd/ubi/fastmap-wl.c395
-rw-r--r--roms/u-boot/drivers/mtd/ubi/fastmap.c1658
-rw-r--r--roms/u-boot/drivers/mtd/ubi/io.c1434
-rw-r--r--roms/u-boot/drivers/mtd/ubi/kapi.c864
-rw-r--r--roms/u-boot/drivers/mtd/ubi/misc.c145
-rw-r--r--roms/u-boot/drivers/mtd/ubi/ubi-media.h510
-rw-r--r--roms/u-boot/drivers/mtd/ubi/ubi.h1127
-rw-r--r--roms/u-boot/drivers/mtd/ubi/upd.c433
-rw-r--r--roms/u-boot/drivers/mtd/ubi/vmt.c822
-rw-r--r--roms/u-boot/drivers/mtd/ubi/vtbl.c871
-rw-r--r--roms/u-boot/drivers/mtd/ubi/wl.c1882
-rw-r--r--roms/u-boot/drivers/mtd/ubi/wl.h34
-rw-r--r--roms/u-boot/drivers/mtd/ubispl/Makefile1
-rw-r--r--roms/u-boot/drivers/mtd/ubispl/ubi-wrapper.h105
-rw-r--r--roms/u-boot/drivers/mtd/ubispl/ubispl.c1139
-rw-r--r--roms/u-boot/drivers/mtd/ubispl/ubispl.h142
141 files changed, 83210 insertions, 0 deletions
diff --git a/roms/u-boot/drivers/mtd/Kconfig b/roms/u-boot/drivers/mtd/Kconfig
new file mode 100644
index 000000000..ad50c5e87
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/Kconfig
@@ -0,0 +1,117 @@
+menu "MTD Support"
+
+config MTD_PARTITIONS
+ bool
+
+config MTD
+ bool "Enable MTD layer"
+ help
+ Enable the MTD stack, necessary to interact with NAND, NOR,
+ SPI-NOR, SPI-NAND, OneNAND, etc.
+
+config DM_MTD
+ bool "Enable Driver Model for MTD drivers"
+ depends on DM
+ help
+ Enable driver model for Memory Technology Devices (MTD), such as
+ flash, RAM and similar chips, often used for solid state file
+ systems on embedded devices.
+
+config MTD_NOR_FLASH
+ bool "Enable parallel NOR flash support"
+ help
+ Enable support for parallel NOR flash.
+
+config SYS_MTDPARTS_RUNTIME
+ bool "Allow MTDPARTS to be configured at runtime"
+ depends on MTD
+ help
+ This option allows to call the function board_mtdparts_default to
+ dynamically build the variables mtdids and mtdparts at runtime.
+
+config FLASH_CFI_DRIVER
+ bool "Enable CFI Flash driver"
+ help
+ The Common Flash Interface specification was developed by Intel,
+ AMD and other flash manufactures. It provides a universal method
+ for probing the capabilities of flash devices. If you wish to
+ support any device that is CFI-compliant, you need to enable this
+ option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
+ for more information on CFI.
+
+config CFI_FLASH
+ bool "Enable Driver Model for CFI Flash driver"
+ depends on DM_MTD
+ help
+ The Common Flash Interface specification was developed by Intel,
+ AMD and other flash manufactures. It provides a universal method
+ for probing the capabilities of flash devices. If you wish to
+ support any device that is CFI-compliant, you need to enable this
+ option. Visit <http://www.amd.com/products/nvd/overview/cfi.html>
+ for more information on CFI.
+
+config SYS_FLASH_USE_BUFFER_WRITE
+ bool "Enable buffered writes to flash"
+ depends on FLASH_CFI_DRIVER
+ help
+ Use buffered writes to flash.
+
+config FLASH_CFI_MTD
+ bool "Enable CFI MTD driver"
+ depends on FLASH_CFI_DRIVER
+ help
+ This option enables the building of the cfi_mtd driver
+ in the drivers directory. The driver exports CFI flash
+ to the MTD layer.
+
+config SYS_FLASH_PROTECTION
+ bool "Use hardware flash protection"
+ depends on FLASH_CFI_DRIVER
+ help
+ If defined, hardware flash sectors protection is used
+ instead of U-Boot software protection.
+
+config SYS_FLASH_CFI
+ bool "Define extra elements in CFI for flash geometry"
+ depends on FLASH_CFI_DRIVER
+ help
+ Define if the flash driver uses extra elements in the
+ common flash structure for storing flash geometry.
+
+config ALTERA_QSPI
+ bool "Altera Generic Quad SPI Controller"
+ depends on DM_MTD
+ help
+ This enables access to Altera EPCQ/EPCS flash chips using the
+ Altera Generic Quad SPI Controller. The controller converts SPI
+ NOR flash to parallel flash interface. Please find details on the
+ "Embedded Peripherals IP User Guide" of Altera.
+
+config FLASH_PIC32
+ bool "Microchip PIC32 Flash driver"
+ depends on MACH_PIC32 && DM_MTD
+ help
+ This enables access to Microchip PIC32 internal non-CFI flash
+ chips through PIC32 Non-Volatile-Memory Controller.
+
+config RENESAS_RPC_HF
+ bool "Renesas RCar Gen3 RPC HyperFlash driver"
+ depends on RCAR_GEN3 && DM_MTD
+ help
+ This enables access to HyperFlash memory through the Renesas
+ RCar Gen3 RPC controller.
+
+config HBMC_AM654
+ bool "HyperBus controller driver for AM65x SoC"
+ depends on SYSCON
+ help
+ This is the driver for HyperBus controller on TI's AM65x and
+ other SoCs
+
+source "drivers/mtd/nand/Kconfig"
+
+source "drivers/mtd/spi/Kconfig"
+
+source "drivers/mtd/ubi/Kconfig"
+
+endmenu
diff --git a/roms/u-boot/drivers/mtd/Makefile b/roms/u-boot/drivers/mtd/Makefile
new file mode 100644
index 000000000..6d77ebfaa
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2000-2007
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+mtd-$(CONFIG_MTD) += mtdcore.o mtd_uboot.o
+mtd-$(CONFIG_DM_MTD) += mtd-uclass.o
+mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+mtd-$(CONFIG_MTD_CONCAT) += mtdconcat.o
+mtd-$(CONFIG_ALTERA_QSPI) += altera_qspi.o
+mtd-$(CONFIG_FLASH_CFI_DRIVER) += cfi_flash.o
+mtd-$(CONFIG_FLASH_CFI_MTD) += cfi_mtd.o
+mtd-$(CONFIG_FLASH_CFI_LEGACY) += jedec_flash.o
+mtd-$(CONFIG_FLASH_PIC32) += pic32_flash.o
+mtd-$(CONFIG_ST_SMI) += st_smi.o
+mtd-$(CONFIG_STM32_FLASH) += stm32_flash.o
+mtd-$(CONFIG_RENESAS_RPC_HF) += renesas_rpc_hf.o
+mtd-$(CONFIG_HBMC_AM654) += hbmc-am654.o
+
+# U-Boot build
+ifeq ($(CONFIG_SPL_BUILD)$(CONFIG_TPL_BUILD),)
+
+ifneq ($(mtd-y),)
+obj-y += mtd.o
+endif
+obj-y += nand/
+obj-y += onenand/
+obj-y += spi/
+obj-$(CONFIG_MTD_UBI) += ubi/
+
+#SPL/TPL build
+else
+
+ifneq ($(mtd-y),)
+obj-$(CONFIG_SPL_MTD_SUPPORT) += mtd.o
+endif
+obj-$(CONFIG_$(SPL_TPL_)NAND_SUPPORT) += nand/
+obj-$(CONFIG_SPL_ONENAND_SUPPORT) += onenand/
+obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_SUPPORT) += spi/
+obj-$(CONFIG_SPL_UBI) += ubispl/
+
+endif
diff --git a/roms/u-boot/drivers/mtd/altera_qspi.c b/roms/u-boot/drivers/mtd/altera_qspi.c
new file mode 100644
index 000000000..7bac599a5
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/altera_qspi.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015 Thomas Chou <thomas@wytron.com.tw>
+ */
+
+#include <common.h>
+#include <console.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <flash.h>
+#include <log.h>
+#include <mtd.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* The STATUS register */
+#define QUADSPI_SR_BP0 BIT(2)
+#define QUADSPI_SR_BP1 BIT(3)
+#define QUADSPI_SR_BP2 BIT(4)
+#define QUADSPI_SR_BP2_0 GENMASK(4, 2)
+#define QUADSPI_SR_BP3 BIT(6)
+#define QUADSPI_SR_TB BIT(5)
+
+/*
+ * The QUADSPI_MEM_OP register is used to do memory protect and erase operations
+ */
+#define QUADSPI_MEM_OP_BULK_ERASE 0x00000001
+#define QUADSPI_MEM_OP_SECTOR_ERASE 0x00000002
+#define QUADSPI_MEM_OP_SECTOR_PROTECT 0x00000003
+
+/*
+ * The QUADSPI_ISR register is used to determine whether an invalid write or
+ * erase operation trigerred an interrupt
+ */
+#define QUADSPI_ISR_ILLEGAL_ERASE BIT(0)
+#define QUADSPI_ISR_ILLEGAL_WRITE BIT(1)
+
+struct altera_qspi_regs {
+ u32 rd_status;
+ u32 rd_sid;
+ u32 rd_rdid;
+ u32 mem_op;
+ u32 isr;
+ u32 imr;
+ u32 chip_select;
+};
+
+struct altera_qspi_plat {
+ struct altera_qspi_regs *regs;
+ void *base;
+ unsigned long size;
+};
+
+static uint flash_verbose;
+flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS]; /* FLASH chips info */
+
+static void altera_qspi_get_locked_range(struct mtd_info *mtd, loff_t *ofs,
+ uint64_t *len);
+
+void flash_print_info(flash_info_t *info)
+{
+ struct mtd_info *mtd = info->mtd;
+ loff_t ofs;
+ u64 len;
+
+ printf("Altera QSPI flash Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+ altera_qspi_get_locked_range(mtd, &ofs, &len);
+ printf(" %08lX +%lX", info->start[0], info->size);
+ if (len) {
+ printf(", protected %08llX +%llX",
+ info->start[0] + ofs, len);
+ }
+ putc('\n');
+}
+
+void flash_set_verbose(uint v)
+{
+ flash_verbose = v;
+}
+
+int flash_erase(flash_info_t *info, int s_first, int s_last)
+{
+ struct mtd_info *mtd = info->mtd;
+ struct erase_info instr;
+ int ret;
+
+ memset(&instr, 0, sizeof(instr));
+ instr.mtd = mtd;
+ instr.addr = mtd->erasesize * s_first;
+ instr.len = mtd->erasesize * (s_last + 1 - s_first);
+ flash_set_verbose(1);
+ ret = mtd_erase(mtd, &instr);
+ flash_set_verbose(0);
+ if (ret)
+ return ERR_PROTECTED;
+
+ puts(" done\n");
+ return 0;
+}
+
+int write_buff(flash_info_t *info, uchar *src, ulong addr, ulong cnt)
+{
+ struct mtd_info *mtd = info->mtd;
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ ulong base = (ulong)pdata->base;
+ loff_t to = addr - base;
+ size_t retlen;
+ int ret;
+
+ ret = mtd_write(mtd, to, cnt, &retlen, src);
+ if (ret)
+ return ERR_PROTECTED;
+
+ return 0;
+}
+
+unsigned long flash_init(void)
+{
+ struct udevice *dev;
+
+ /* probe every MTD device */
+ for (uclass_first_device(UCLASS_MTD, &dev);
+ dev;
+ uclass_next_device(&dev)) {
+ }
+
+ return flash_info[0].size;
+}
+
+static int altera_qspi_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ struct altera_qspi_regs *regs = pdata->regs;
+ size_t addr = instr->addr;
+ size_t len = instr->len;
+ size_t end = addr + len;
+ u32 sect;
+ u32 stat;
+ u32 *flash, *last;
+
+ instr->state = MTD_ERASING;
+ addr &= ~(mtd->erasesize - 1); /* get lower aligned address */
+ while (addr < end) {
+ if (ctrlc()) {
+ if (flash_verbose)
+ putc('\n');
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ instr->state = MTD_ERASE_FAILED;
+ mtd_erase_callback(instr);
+ return -EIO;
+ }
+ flash = pdata->base + addr;
+ last = pdata->base + addr + mtd->erasesize;
+ /* skip erase if sector is blank */
+ while (flash < last) {
+ if (readl(flash) != 0xffffffff)
+ break;
+ flash++;
+ }
+ if (flash < last) {
+ sect = addr / mtd->erasesize;
+ sect <<= 8;
+ sect |= QUADSPI_MEM_OP_SECTOR_ERASE;
+ debug("erase %08x\n", sect);
+ writel(sect, &regs->mem_op);
+ stat = readl(&regs->isr);
+ if (stat & QUADSPI_ISR_ILLEGAL_ERASE) {
+ /* erase failed, sector might be protected */
+ debug("erase %08x fail %x\n", sect, stat);
+ writel(stat, &regs->isr); /* clear isr */
+ instr->fail_addr = addr;
+ instr->state = MTD_ERASE_FAILED;
+ mtd_erase_callback(instr);
+ return -EIO;
+ }
+ if (flash_verbose)
+ putc('.');
+ } else {
+ if (flash_verbose)
+ putc(',');
+ }
+ addr += mtd->erasesize;
+ }
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static int altera_qspi_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+
+ memcpy_fromio(buf, pdata->base + from, len);
+ *retlen = len;
+
+ return 0;
+}
+
+static int altera_qspi_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ struct altera_qspi_regs *regs = pdata->regs;
+ u32 stat;
+
+ memcpy_toio(pdata->base + to, buf, len);
+ /* check whether write triggered a illegal write interrupt */
+ stat = readl(&regs->isr);
+ if (stat & QUADSPI_ISR_ILLEGAL_WRITE) {
+ /* write failed, sector might be protected */
+ debug("write fail %x\n", stat);
+ writel(stat, &regs->isr); /* clear isr */
+ return -EIO;
+ }
+ *retlen = len;
+
+ return 0;
+}
+
+static void altera_qspi_sync(struct mtd_info *mtd)
+{
+}
+
+static void altera_qspi_get_locked_range(struct mtd_info *mtd, loff_t *ofs,
+ uint64_t *len)
+{
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ struct altera_qspi_regs *regs = pdata->regs;
+ int shift0 = ffs(QUADSPI_SR_BP2_0) - 1;
+ int shift3 = ffs(QUADSPI_SR_BP3) - 1 - 3;
+ u32 stat = readl(&regs->rd_status);
+ unsigned pow = ((stat & QUADSPI_SR_BP2_0) >> shift0) |
+ ((stat & QUADSPI_SR_BP3) >> shift3);
+
+ *ofs = 0;
+ *len = 0;
+ if (pow) {
+ *len = mtd->erasesize << (pow - 1);
+ if (*len > mtd->size)
+ *len = mtd->size;
+ if (!(stat & QUADSPI_SR_TB))
+ *ofs = mtd->size - *len;
+ }
+}
+
+static int altera_qspi_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ struct altera_qspi_regs *regs = pdata->regs;
+ u32 sector_start, sector_end;
+ u32 num_sectors;
+ u32 mem_op;
+ u32 sr_bp;
+ u32 sr_tb;
+
+ num_sectors = mtd->size / mtd->erasesize;
+ sector_start = ofs / mtd->erasesize;
+ sector_end = (ofs + len) / mtd->erasesize;
+
+ if (sector_start >= num_sectors / 2) {
+ sr_bp = fls(num_sectors - 1 - sector_start) + 1;
+ sr_tb = 0;
+ } else if (sector_end < num_sectors / 2) {
+ sr_bp = fls(sector_end) + 1;
+ sr_tb = 1;
+ } else {
+ sr_bp = 15;
+ sr_tb = 0;
+ }
+
+ mem_op = (sr_tb << 12) | (sr_bp << 8);
+ mem_op |= QUADSPI_MEM_OP_SECTOR_PROTECT;
+ debug("lock %08x\n", mem_op);
+ writel(mem_op, &regs->mem_op);
+
+ return 0;
+}
+
+static int altera_qspi_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct udevice *dev = mtd->dev;
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ struct altera_qspi_regs *regs = pdata->regs;
+ u32 mem_op;
+
+ mem_op = QUADSPI_MEM_OP_SECTOR_PROTECT;
+ debug("unlock %08x\n", mem_op);
+ writel(mem_op, &regs->mem_op);
+
+ return 0;
+}
+
+static int altera_qspi_probe(struct udevice *dev)
+{
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ struct altera_qspi_regs *regs = pdata->regs;
+ unsigned long base = (unsigned long)pdata->base;
+ struct mtd_info *mtd;
+ flash_info_t *flash = &flash_info[0];
+ u32 rdid;
+ int i;
+
+ rdid = readl(&regs->rd_rdid);
+ debug("rdid %x\n", rdid);
+
+ mtd = dev_get_uclass_priv(dev);
+ mtd->dev = dev;
+ mtd->name = "nor0";
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = 1 << ((rdid & 0xff) - 6);
+ mtd->writesize = 1;
+ mtd->writebufsize = mtd->writesize;
+ mtd->_erase = altera_qspi_erase;
+ mtd->_read = altera_qspi_read;
+ mtd->_write = altera_qspi_write;
+ mtd->_sync = altera_qspi_sync;
+ mtd->_lock = altera_qspi_lock;
+ mtd->_unlock = altera_qspi_unlock;
+ mtd->numeraseregions = 0;
+ mtd->erasesize = 0x10000;
+ if (add_mtd_device(mtd))
+ return -ENOMEM;
+
+ flash->mtd = mtd;
+ flash->size = mtd->size;
+ flash->sector_count = mtd->size / mtd->erasesize;
+ flash->flash_id = rdid;
+ flash->start[0] = base;
+ for (i = 1; i < flash->sector_count; i++)
+ flash->start[i] = flash->start[i - 1] + mtd->erasesize;
+ gd->bd->bi_flashstart = base;
+
+ return 0;
+}
+
+static int altera_qspi_of_to_plat(struct udevice *dev)
+{
+ struct altera_qspi_plat *pdata = dev_get_plat(dev);
+ void *blob = (void *)gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ const char *list, *end;
+ const fdt32_t *cell;
+ void *base;
+ unsigned long addr, size;
+ int parent, addrc, sizec;
+ int len, idx;
+
+ /*
+ * decode regs. there are multiple reg tuples, and they need to
+ * match with reg-names.
+ */
+ parent = fdt_parent_offset(blob, node);
+ fdt_support_default_count_cells(blob, parent, &addrc, &sizec);
+ list = fdt_getprop(blob, node, "reg-names", &len);
+ if (!list)
+ return -ENOENT;
+ end = list + len;
+ cell = fdt_getprop(blob, node, "reg", &len);
+ if (!cell)
+ return -ENOENT;
+ idx = 0;
+ while (list < end) {
+ addr = fdt_translate_address((void *)blob,
+ node, cell + idx);
+ size = fdt_addr_to_cpu(cell[idx + addrc]);
+ base = map_physmem(addr, size, MAP_NOCACHE);
+ len = strlen(list);
+ if (strcmp(list, "avl_csr") == 0) {
+ pdata->regs = base;
+ } else if (strcmp(list, "avl_mem") == 0) {
+ pdata->base = base;
+ pdata->size = size;
+ }
+ idx += addrc + sizec;
+ list += (len + 1);
+ }
+
+ return 0;
+}
+
+static const struct udevice_id altera_qspi_ids[] = {
+ { .compatible = "altr,quadspi-1.0" },
+ {}
+};
+
+U_BOOT_DRIVER(altera_qspi) = {
+ .name = "altera_qspi",
+ .id = UCLASS_MTD,
+ .of_match = altera_qspi_ids,
+ .of_to_plat = altera_qspi_of_to_plat,
+ .plat_auto = sizeof(struct altera_qspi_plat),
+ .probe = altera_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/mtd/cfi_flash.c b/roms/u-boot/drivers/mtd/cfi_flash.c
new file mode 100644
index 000000000..9c27fea5d
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/cfi_flash.c
@@ -0,0 +1,2530 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2002-2004
+ * Brad Kemp, Seranoa Networks, Brad.Kemp@seranoa.com
+ *
+ * Copyright (C) 2003 Arabella Software Ltd.
+ * Yuli Barcohen <yuli@arabellasw.com>
+ *
+ * Copyright (C) 2004
+ * Ed Okerson
+ *
+ * Copyright (C) 2006
+ * Tolunay Orkun <listmember@orkun.us>
+ */
+
+/* The DEBUG define must be before common to enable debugging */
+/* #define DEBUG */
+
+#include <common.h>
+#include <console.h>
+#include <dm.h>
+#include <env.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <flash.h>
+#include <init.h>
+#include <irq_func.h>
+#include <log.h>
+#include <asm/global_data.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <env_internal.h>
+#include <linux/delay.h>
+#include <mtd/cfi_flash.h>
+#include <watchdog.h>
+
+/*
+ * This file implements a Common Flash Interface (CFI) driver for
+ * U-Boot.
+ *
+ * The width of the port and the width of the chips are determined at
+ * initialization. These widths are used to calculate the address for
+ * access CFI data structures.
+ *
+ * References
+ * JEDEC Standard JESD68 - Common Flash Interface (CFI)
+ * JEDEC Standard JEP137-A Common Flash Interface (CFI) ID Codes
+ * Intel Application Note 646 Common Flash Interface (CFI) and Command Sets
+ * Intel 290667-008 3 Volt Intel StrataFlash Memory datasheet
+ * AMD CFI Specification, Release 2.0 December 1, 2001
+ * AMD/Spansion Application Note: Migration from Single-byte to Three-byte
+ * Device IDs, Publication Number 25538 Revision A, November 8, 2001
+ *
+ * Define CONFIG_SYS_WRITE_SWAPPED_DATA, if you have to swap the Bytes between
+ * reading and writing ... (yes there is such a Hardware).
+ */
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static uint flash_offset_cfi[2] = { FLASH_OFFSET_CFI, FLASH_OFFSET_CFI_ALT };
+#ifdef CONFIG_FLASH_CFI_MTD
+static uint flash_verbose = 1;
+#else
+#define flash_verbose 1
+#endif
+
+flash_info_t flash_info[CFI_MAX_FLASH_BANKS]; /* FLASH chips info */
+
+/*
+ * Check if chip width is defined. If not, start detecting with 8bit.
+ */
+#ifndef CONFIG_SYS_FLASH_CFI_WIDTH
+#define CONFIG_SYS_FLASH_CFI_WIDTH FLASH_CFI_8BIT
+#endif
+
+#ifdef CONFIG_CFI_FLASH_USE_WEAK_ACCESSORS
+#define __maybe_weak __weak
+#else
+#define __maybe_weak static
+#endif
+
+/*
+ * 0xffff is an undefined value for the configuration register. When
+ * this value is returned, the configuration register shall not be
+ * written at all (default mode).
+ */
+static u16 cfi_flash_config_reg(int i)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_CONFIG_REGS
+ return ((u16 [])CONFIG_SYS_CFI_FLASH_CONFIG_REGS)[i];
+#else
+ return 0xffff;
+#endif
+}
+
+#if defined(CONFIG_SYS_MAX_FLASH_BANKS_DETECT)
+int cfi_flash_num_flash_banks = CONFIG_SYS_MAX_FLASH_BANKS_DETECT;
+#else
+int cfi_flash_num_flash_banks;
+#endif
+
+#ifdef CONFIG_CFI_FLASH /* for driver model */
+static void cfi_flash_init_dm(void)
+{
+ struct udevice *dev;
+
+ cfi_flash_num_flash_banks = 0;
+ /*
+ * The uclass_first_device() will probe the first device and
+ * uclass_next_device() will probe the rest if they exist. So
+ * that cfi_flash_probe() will get called assigning the base
+ * addresses that are available.
+ */
+ for (uclass_first_device(UCLASS_MTD, &dev);
+ dev;
+ uclass_next_device(&dev)) {
+ }
+}
+
+phys_addr_t cfi_flash_bank_addr(int i)
+{
+ return flash_info[i].base;
+}
+#else
+__weak phys_addr_t cfi_flash_bank_addr(int i)
+{
+ return ((phys_addr_t [])CONFIG_SYS_FLASH_BANKS_LIST)[i];
+}
+#endif
+
+__weak unsigned long cfi_flash_bank_size(int i)
+{
+#ifdef CONFIG_SYS_FLASH_BANKS_SIZES
+ return ((unsigned long [])CONFIG_SYS_FLASH_BANKS_SIZES)[i];
+#else
+ return 0;
+#endif
+}
+
+__maybe_weak void flash_write8(u8 value, void *addr)
+{
+ __raw_writeb(value, addr);
+}
+
+__maybe_weak void flash_write16(u16 value, void *addr)
+{
+ __raw_writew(value, addr);
+}
+
+__maybe_weak void flash_write32(u32 value, void *addr)
+{
+ __raw_writel(value, addr);
+}
+
+__maybe_weak void flash_write64(u64 value, void *addr)
+{
+ /* No architectures currently implement __raw_writeq() */
+ *(volatile u64 *)addr = value;
+}
+
+__maybe_weak u8 flash_read8(void *addr)
+{
+ return __raw_readb(addr);
+}
+
+__maybe_weak u16 flash_read16(void *addr)
+{
+ return __raw_readw(addr);
+}
+
+__maybe_weak u32 flash_read32(void *addr)
+{
+ return __raw_readl(addr);
+}
+
+__maybe_weak u64 flash_read64(void *addr)
+{
+ /* No architectures currently implement __raw_readq() */
+ return *(volatile u64 *)addr;
+}
+
+/*-----------------------------------------------------------------------
+ */
+#if defined(CONFIG_ENV_IS_IN_FLASH) || defined(CONFIG_ENV_ADDR_REDUND) || \
+ (defined(CONFIG_SYS_MONITOR_BASE) && \
+ (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE))
+static flash_info_t *flash_get_info(ulong base)
+{
+ int i;
+ flash_info_t *info;
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ info = &flash_info[i];
+ if (info->size && info->start[0] <= base &&
+ base <= info->start[0] + info->size - 1)
+ return info;
+ }
+
+ return NULL;
+}
+#endif
+
+unsigned long flash_sector_size(flash_info_t *info, flash_sect_t sect)
+{
+ if (sect != (info->sector_count - 1))
+ return info->start[sect + 1] - info->start[sect];
+ else
+ return info->start[0] + info->size - info->start[sect];
+}
+
+/*-----------------------------------------------------------------------
+ * create an address based on the offset and the port width
+ */
+static inline void *
+flash_map(flash_info_t *info, flash_sect_t sect, uint offset)
+{
+ unsigned int byte_offset = offset * info->portwidth;
+
+ return (void *)(info->start[sect] + (byte_offset << info->chip_lsb));
+}
+
+static inline void flash_unmap(flash_info_t *info, flash_sect_t sect,
+ unsigned int offset, void *addr)
+{
+}
+
+/*-----------------------------------------------------------------------
+ * make a proper sized command based on the port and chip widths
+ */
+static void flash_make_cmd(flash_info_t *info, u32 cmd, void *cmdbuf)
+{
+ int i;
+ int cword_offset;
+ int cp_offset;
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ u32 cmd_le = cpu_to_le32(cmd);
+#endif
+ uchar val;
+ uchar *cp = (uchar *) cmdbuf;
+
+ for (i = info->portwidth; i > 0; i--) {
+ cword_offset = (info->portwidth - i) % info->chipwidth;
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ cp_offset = info->portwidth - i;
+ val = *((uchar *)&cmd_le + cword_offset);
+#else
+ cp_offset = i - 1;
+ val = *((uchar *)&cmd + sizeof(u32) - cword_offset - 1);
+#endif
+ cp[cp_offset] = (cword_offset >= sizeof(u32)) ? 0x00 : val;
+ }
+}
+
+#ifdef DEBUG
+/*-----------------------------------------------------------------------
+ * Debug support
+ */
+static void print_longlong(char *str, unsigned long long data)
+{
+ int i;
+ char *cp;
+
+ cp = (char *)&data;
+ for (i = 0; i < 8; i++)
+ sprintf(&str[i * 2], "%2.2x", *cp++);
+}
+
+static void flash_printqry(struct cfi_qry *qry)
+{
+ u8 *p = (u8 *)qry;
+ int x, y;
+
+ for (x = 0; x < sizeof(struct cfi_qry); x += 16) {
+ debug("%02x : ", x);
+ for (y = 0; y < 16; y++)
+ debug("%2.2x ", p[x + y]);
+ debug(" ");
+ for (y = 0; y < 16; y++) {
+ unsigned char c = p[x + y];
+
+ if (c >= 0x20 && c <= 0x7e)
+ debug("%c", c);
+ else
+ debug(".");
+ }
+ debug("\n");
+ }
+}
+#endif
+
+/*-----------------------------------------------------------------------
+ * read a character at a port width address
+ */
+static inline uchar flash_read_uchar(flash_info_t *info, uint offset)
+{
+ uchar *cp;
+ uchar retval;
+
+ cp = flash_map(info, 0, offset);
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ retval = flash_read8(cp);
+#else
+ retval = flash_read8(cp + info->portwidth - 1);
+#endif
+ flash_unmap(info, 0, offset, cp);
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ * read a word at a port width address, assume 16bit bus
+ */
+static inline ushort flash_read_word(flash_info_t *info, uint offset)
+{
+ ushort *addr, retval;
+
+ addr = flash_map(info, 0, offset);
+ retval = flash_read16(addr);
+ flash_unmap(info, 0, offset, addr);
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ * read a long word by picking the least significant byte of each maximum
+ * port size word. Swap for ppc format.
+ */
+static ulong flash_read_long (flash_info_t *info, flash_sect_t sect,
+ uint offset)
+{
+ uchar *addr;
+ ulong retval;
+
+#ifdef DEBUG
+ int x;
+#endif
+ addr = flash_map(info, sect, offset);
+
+#ifdef DEBUG
+ debug("long addr is at %p info->portwidth = %d\n", addr,
+ info->portwidth);
+ for (x = 0; x < 4 * info->portwidth; x++)
+ debug("addr[%x] = 0x%x\n", x, flash_read8(addr + x));
+#endif
+#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ retval = ((flash_read8(addr) << 16) |
+ (flash_read8(addr + info->portwidth) << 24) |
+ (flash_read8(addr + 2 * info->portwidth)) |
+ (flash_read8(addr + 3 * info->portwidth) << 8));
+#else
+ retval = ((flash_read8(addr + 2 * info->portwidth - 1) << 24) |
+ (flash_read8(addr + info->portwidth - 1) << 16) |
+ (flash_read8(addr + 4 * info->portwidth - 1) << 8) |
+ (flash_read8(addr + 3 * info->portwidth - 1)));
+#endif
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*
+ * Write a proper sized command to the correct address
+ */
+static void flash_write_cmd(flash_info_t *info, flash_sect_t sect,
+ uint offset, u32 cmd)
+{
+ void *addr;
+ cfiword_t cword;
+
+ addr = flash_map(info, sect, offset);
+ flash_make_cmd(info, cmd, &cword);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ debug("fwc addr %p cmd %x %x 8bit x %d bit\n", addr, cmd,
+ cword.w8, info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ flash_write8(cword.w8, addr);
+ break;
+ case FLASH_CFI_16BIT:
+ debug("fwc addr %p cmd %x %4.4x 16bit x %d bit\n", addr,
+ cmd, cword.w16,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ flash_write16(cword.w16, addr);
+ break;
+ case FLASH_CFI_32BIT:
+ debug("fwc addr %p cmd %x %8.8x 32bit x %d bit\n", addr,
+ cmd, cword.w32,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ flash_write32(cword.w32, addr);
+ break;
+ case FLASH_CFI_64BIT:
+#ifdef DEBUG
+ {
+ char str[20];
+
+ print_longlong(str, cword.w64);
+
+ debug("fwrite addr %p cmd %x %s 64 bit x %d bit\n",
+ addr, cmd, str,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ }
+#endif
+ flash_write64(cword.w64, addr);
+ break;
+ }
+
+ /* Ensure all the instructions are fully finished */
+ sync();
+
+ flash_unmap(info, sect, offset, addr);
+}
+
+static void flash_unlock_seq(flash_info_t *info, flash_sect_t sect)
+{
+ flash_write_cmd(info, sect, info->addr_unlock1, AMD_CMD_UNLOCK_START);
+ flash_write_cmd(info, sect, info->addr_unlock2, AMD_CMD_UNLOCK_ACK);
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_isequal(flash_info_t *info, flash_sect_t sect, uint offset,
+ uchar cmd)
+{
+ void *addr;
+ cfiword_t cword;
+ int retval;
+
+ addr = flash_map(info, sect, offset);
+ flash_make_cmd(info, cmd, &cword);
+
+ debug("is= cmd %x(%c) addr %p ", cmd, cmd, addr);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ debug("is= %x %x\n", flash_read8(addr), cword.w8);
+ retval = (flash_read8(addr) == cword.w8);
+ break;
+ case FLASH_CFI_16BIT:
+ debug("is= %4.4x %4.4x\n", flash_read16(addr), cword.w16);
+ retval = (flash_read16(addr) == cword.w16);
+ break;
+ case FLASH_CFI_32BIT:
+ debug("is= %8.8x %8.8x\n", flash_read32(addr), cword.w32);
+ retval = (flash_read32(addr) == cword.w32);
+ break;
+ case FLASH_CFI_64BIT:
+#ifdef DEBUG
+ {
+ char str1[20];
+ char str2[20];
+
+ print_longlong(str1, flash_read64(addr));
+ print_longlong(str2, cword.w64);
+ debug("is= %s %s\n", str1, str2);
+ }
+#endif
+ retval = (flash_read64(addr) == cword.w64);
+ break;
+ default:
+ retval = 0;
+ break;
+ }
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_isset(flash_info_t *info, flash_sect_t sect, uint offset,
+ uchar cmd)
+{
+ void *addr;
+ cfiword_t cword;
+ int retval;
+
+ addr = flash_map(info, sect, offset);
+ flash_make_cmd(info, cmd, &cword);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ retval = ((flash_read8(addr) & cword.w8) == cword.w8);
+ break;
+ case FLASH_CFI_16BIT:
+ retval = ((flash_read16(addr) & cword.w16) == cword.w16);
+ break;
+ case FLASH_CFI_32BIT:
+ retval = ((flash_read32(addr) & cword.w32) == cword.w32);
+ break;
+ case FLASH_CFI_64BIT:
+ retval = ((flash_read64(addr) & cword.w64) == cword.w64);
+ break;
+ default:
+ retval = 0;
+ break;
+ }
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_toggle(flash_info_t *info, flash_sect_t sect, uint offset,
+ uchar cmd)
+{
+ u8 *addr;
+ cfiword_t cword;
+ int retval;
+
+ addr = flash_map(info, sect, offset);
+ flash_make_cmd(info, cmd, &cword);
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ retval = flash_read8(addr) != flash_read8(addr);
+ break;
+ case FLASH_CFI_16BIT:
+ retval = flash_read16(addr) != flash_read16(addr);
+ break;
+ case FLASH_CFI_32BIT:
+ retval = flash_read32(addr) != flash_read32(addr);
+ break;
+ case FLASH_CFI_64BIT:
+ retval = ((flash_read32(addr) != flash_read32(addr)) ||
+ (flash_read32(addr + 4) != flash_read32(addr + 4)));
+ break;
+ default:
+ retval = 0;
+ break;
+ }
+ flash_unmap(info, sect, offset, addr);
+
+ return retval;
+}
+
+/*
+ * flash_is_busy - check to see if the flash is busy
+ *
+ * This routine checks the status of the chip and returns true if the
+ * chip is busy.
+ */
+static int flash_is_busy(flash_info_t *info, flash_sect_t sect)
+{
+ int retval;
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ retval = !flash_isset(info, sect, 0, FLASH_STATUS_DONE);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+#endif
+ if (info->sr_supported) {
+ flash_write_cmd(info, sect, info->addr_unlock1,
+ FLASH_CMD_READ_STATUS);
+ retval = !flash_isset(info, sect, 0,
+ FLASH_STATUS_DONE);
+ } else {
+ retval = flash_toggle(info, sect, 0,
+ AMD_STATUS_TOGGLE);
+ }
+
+ break;
+ default:
+ retval = 0;
+ }
+ debug("%s: %d\n", __func__, retval);
+ return retval;
+}
+
+/*-----------------------------------------------------------------------
+ * wait for XSR.7 to be set. Time out with an error if it does not.
+ * This routine does not set the flash to read-array mode.
+ */
+static int flash_status_check(flash_info_t *info, flash_sect_t sector,
+ ulong tout, char *prompt)
+{
+ ulong start;
+
+#if CONFIG_SYS_HZ != 1000
+ /* Avoid overflow for large HZ */
+ if ((ulong)CONFIG_SYS_HZ > 100000)
+ tout *= (ulong)CONFIG_SYS_HZ / 1000;
+ else
+ tout = DIV_ROUND_UP(tout * (ulong)CONFIG_SYS_HZ, 1000);
+#endif
+
+ /* Wait for command completion */
+#ifdef CONFIG_SYS_LOW_RES_TIMER
+ reset_timer();
+#endif
+ start = get_timer(0);
+ WATCHDOG_RESET();
+ while (flash_is_busy(info, sector)) {
+ if (get_timer(start) > tout) {
+ printf("Flash %s timeout at address %lx data %lx\n",
+ prompt, info->start[sector],
+ flash_read_long(info, sector, 0));
+ flash_write_cmd(info, sector, 0, info->cmd_reset);
+ udelay(1);
+ return ERR_TIMEOUT;
+ }
+ udelay(1); /* also triggers watchdog */
+ }
+ return ERR_OK;
+}
+
+/*-----------------------------------------------------------------------
+ * Wait for XSR.7 to be set, if it times out print an error, otherwise
+ * do a full status check.
+ *
+ * This routine sets the flash to read-array mode.
+ */
+static int flash_full_status_check(flash_info_t *info, flash_sect_t sector,
+ ulong tout, char *prompt)
+{
+ int retcode;
+
+ retcode = flash_status_check(info, sector, tout, prompt);
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ case CFI_CMDSET_INTEL_STANDARD:
+ if (retcode == ERR_OK &&
+ !flash_isset(info, sector, 0, FLASH_STATUS_DONE)) {
+ retcode = ERR_INVAL;
+ printf("Flash %s error at address %lx\n", prompt,
+ info->start[sector]);
+ if (flash_isset(info, sector, 0, FLASH_STATUS_ECLBS |
+ FLASH_STATUS_PSLBS)) {
+ puts("Command Sequence Error.\n");
+ } else if (flash_isset(info, sector, 0,
+ FLASH_STATUS_ECLBS)) {
+ puts("Block Erase Error.\n");
+ retcode = ERR_NOT_ERASED;
+ } else if (flash_isset(info, sector, 0,
+ FLASH_STATUS_PSLBS)) {
+ puts("Locking Error\n");
+ }
+ if (flash_isset(info, sector, 0, FLASH_STATUS_DPS)) {
+ puts("Block locked.\n");
+ retcode = ERR_PROTECTED;
+ }
+ if (flash_isset(info, sector, 0, FLASH_STATUS_VPENS))
+ puts("Vpp Low Error.\n");
+ }
+ flash_write_cmd(info, sector, 0, info->cmd_reset);
+ udelay(1);
+ break;
+ default:
+ break;
+ }
+ return retcode;
+}
+
+static int use_flash_status_poll(flash_info_t *info)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_STATUS_POLL
+ if (info->vendor == CFI_CMDSET_AMD_EXTENDED ||
+ info->vendor == CFI_CMDSET_AMD_STANDARD)
+ return 1;
+#endif
+ return 0;
+}
+
+static int flash_status_poll(flash_info_t *info, void *src, void *dst,
+ ulong tout, char *prompt)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_STATUS_POLL
+ ulong start;
+ int ready;
+
+#if CONFIG_SYS_HZ != 1000
+ /* Avoid overflow for large HZ */
+ if ((ulong)CONFIG_SYS_HZ > 100000)
+ tout *= (ulong)CONFIG_SYS_HZ / 1000;
+ else
+ tout = DIV_ROUND_UP(tout * (ulong)CONFIG_SYS_HZ, 1000);
+#endif
+
+ /* Wait for command completion */
+#ifdef CONFIG_SYS_LOW_RES_TIMER
+ reset_timer();
+#endif
+ start = get_timer(0);
+ WATCHDOG_RESET();
+ while (1) {
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ ready = flash_read8(dst) == flash_read8(src);
+ break;
+ case FLASH_CFI_16BIT:
+ ready = flash_read16(dst) == flash_read16(src);
+ break;
+ case FLASH_CFI_32BIT:
+ ready = flash_read32(dst) == flash_read32(src);
+ break;
+ case FLASH_CFI_64BIT:
+ ready = flash_read64(dst) == flash_read64(src);
+ break;
+ default:
+ ready = 0;
+ break;
+ }
+ if (ready)
+ break;
+ if (get_timer(start) > tout) {
+ printf("Flash %s timeout at address %lx data %lx\n",
+ prompt, (ulong)dst, (ulong)flash_read8(dst));
+ return ERR_TIMEOUT;
+ }
+ udelay(1); /* also triggers watchdog */
+ }
+#endif /* CONFIG_SYS_CFI_FLASH_STATUS_POLL */
+ return ERR_OK;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static void flash_add_byte(flash_info_t *info, cfiword_t *cword, uchar c)
+{
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ unsigned short w;
+ unsigned int l;
+ unsigned long long ll;
+#endif
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ cword->w8 = c;
+ break;
+ case FLASH_CFI_16BIT:
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ w = c;
+ w <<= 8;
+ cword->w16 = (cword->w16 >> 8) | w;
+#else
+ cword->w16 = (cword->w16 << 8) | c;
+#endif
+ break;
+ case FLASH_CFI_32BIT:
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ l = c;
+ l <<= 24;
+ cword->w32 = (cword->w32 >> 8) | l;
+#else
+ cword->w32 = (cword->w32 << 8) | c;
+#endif
+ break;
+ case FLASH_CFI_64BIT:
+#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA)
+ ll = c;
+ ll <<= 56;
+ cword->w64 = (cword->w64 >> 8) | ll;
+#else
+ cword->w64 = (cword->w64 << 8) | c;
+#endif
+ break;
+ }
+}
+
+/*
+ * Loop through the sector table starting from the previously found sector.
+ * Searches forwards or backwards, dependent on the passed address.
+ */
+static flash_sect_t find_sector(flash_info_t *info, ulong addr)
+{
+ static flash_sect_t saved_sector; /* previously found sector */
+ static flash_info_t *saved_info; /* previously used flash bank */
+ flash_sect_t sector = saved_sector;
+
+ if (info != saved_info || sector >= info->sector_count)
+ sector = 0;
+
+ while ((sector < info->sector_count - 1) &&
+ (info->start[sector] < addr))
+ sector++;
+ while ((info->start[sector] > addr) && (sector > 0))
+ /*
+ * also decrements the sector in case of an overshot
+ * in the first loop
+ */
+ sector--;
+
+ saved_sector = sector;
+ saved_info = info;
+ return sector;
+}
+
+/*-----------------------------------------------------------------------
+ */
+static int flash_write_cfiword(flash_info_t *info, ulong dest, cfiword_t cword)
+{
+ void *dstaddr = (void *)dest;
+ int flag;
+ flash_sect_t sect = 0;
+ char sect_found = 0;
+
+ /* Check if Flash is (sufficiently) erased */
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flag = ((flash_read8(dstaddr) & cword.w8) == cword.w8);
+ break;
+ case FLASH_CFI_16BIT:
+ flag = ((flash_read16(dstaddr) & cword.w16) == cword.w16);
+ break;
+ case FLASH_CFI_32BIT:
+ flag = ((flash_read32(dstaddr) & cword.w32) == cword.w32);
+ break;
+ case FLASH_CFI_64BIT:
+ flag = ((flash_read64(dstaddr) & cword.w64) == cword.w64);
+ break;
+ default:
+ flag = 0;
+ break;
+ }
+ if (!flag)
+ return ERR_NOT_ERASED;
+
+ /* Disable interrupts which might cause a timeout here */
+ flag = disable_interrupts();
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ case CFI_CMDSET_INTEL_STANDARD:
+ flash_write_cmd(info, 0, 0, FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_WRITE);
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_STANDARD:
+ sect = find_sector(info, dest);
+ flash_unlock_seq(info, sect);
+ flash_write_cmd(info, sect, info->addr_unlock1, AMD_CMD_WRITE);
+ sect_found = 1;
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ sect = find_sector(info, dest);
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0, info->addr_unlock1, AMD_CMD_WRITE);
+ sect_found = 1;
+ break;
+#endif
+ }
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flash_write8(cword.w8, dstaddr);
+ break;
+ case FLASH_CFI_16BIT:
+ flash_write16(cword.w16, dstaddr);
+ break;
+ case FLASH_CFI_32BIT:
+ flash_write32(cword.w32, dstaddr);
+ break;
+ case FLASH_CFI_64BIT:
+ flash_write64(cword.w64, dstaddr);
+ break;
+ }
+
+ /* re-enable interrupts if necessary */
+ if (flag)
+ enable_interrupts();
+
+ if (!sect_found)
+ sect = find_sector(info, dest);
+
+ if (use_flash_status_poll(info))
+ return flash_status_poll(info, &cword, dstaddr,
+ info->write_tout, "write");
+ else
+ return flash_full_status_check(info, sect,
+ info->write_tout, "write");
+}
+
+#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE
+
+static int flash_write_cfibuffer(flash_info_t *info, ulong dest, uchar *cp,
+ int len)
+{
+ flash_sect_t sector;
+ int cnt;
+ int retcode;
+ u8 *src = cp;
+ u8 *dst = (u8 *)dest;
+ u8 *dst2 = dst;
+ int flag = 1;
+ uint offset = 0;
+ unsigned int shift;
+ uchar write_cmd;
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ shift = 0;
+ break;
+ case FLASH_CFI_16BIT:
+ shift = 1;
+ break;
+ case FLASH_CFI_32BIT:
+ shift = 2;
+ break;
+ case FLASH_CFI_64BIT:
+ shift = 3;
+ break;
+ default:
+ retcode = ERR_INVAL;
+ goto out_unmap;
+ }
+
+ cnt = len >> shift;
+
+ while ((cnt-- > 0) && (flag == 1)) {
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flag = ((flash_read8(dst2) & flash_read8(src)) ==
+ flash_read8(src));
+ src += 1, dst2 += 1;
+ break;
+ case FLASH_CFI_16BIT:
+ flag = ((flash_read16(dst2) & flash_read16(src)) ==
+ flash_read16(src));
+ src += 2, dst2 += 2;
+ break;
+ case FLASH_CFI_32BIT:
+ flag = ((flash_read32(dst2) & flash_read32(src)) ==
+ flash_read32(src));
+ src += 4, dst2 += 4;
+ break;
+ case FLASH_CFI_64BIT:
+ flag = ((flash_read64(dst2) & flash_read64(src)) ==
+ flash_read64(src));
+ src += 8, dst2 += 8;
+ break;
+ }
+ }
+ if (!flag) {
+ retcode = ERR_NOT_ERASED;
+ goto out_unmap;
+ }
+
+ src = cp;
+ sector = find_sector(info, dest);
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ write_cmd = (info->vendor == CFI_CMDSET_INTEL_PROG_REGIONS) ?
+ FLASH_CMD_WRITE_BUFFER_PROG :
+ FLASH_CMD_WRITE_TO_BUFFER;
+ flash_write_cmd(info, sector, 0, FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd(info, sector, 0, FLASH_CMD_READ_STATUS);
+ flash_write_cmd(info, sector, 0, write_cmd);
+ retcode = flash_status_check(info, sector,
+ info->buffer_write_tout,
+ "write to buffer");
+ if (retcode == ERR_OK) {
+ /* reduce the number of loops by the width of
+ * the port
+ */
+ cnt = len >> shift;
+ flash_write_cmd(info, sector, 0, cnt - 1);
+ while (cnt-- > 0) {
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ flash_write8(flash_read8(src), dst);
+ src += 1, dst += 1;
+ break;
+ case FLASH_CFI_16BIT:
+ flash_write16(flash_read16(src), dst);
+ src += 2, dst += 2;
+ break;
+ case FLASH_CFI_32BIT:
+ flash_write32(flash_read32(src), dst);
+ src += 4, dst += 4;
+ break;
+ case FLASH_CFI_64BIT:
+ flash_write64(flash_read64(src), dst);
+ src += 8, dst += 8;
+ break;
+ default:
+ retcode = ERR_INVAL;
+ goto out_unmap;
+ }
+ }
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_WRITE_BUFFER_CONFIRM);
+ retcode = flash_full_status_check(
+ info, sector, info->buffer_write_tout,
+ "buffer write");
+ }
+
+ break;
+
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ flash_unlock_seq(info, sector);
+
+#ifdef CONFIG_FLASH_SPANSION_S29WS_N
+ offset = ((unsigned long)dst - info->start[sector]) >> shift;
+#endif
+ flash_write_cmd(info, sector, offset, AMD_CMD_WRITE_TO_BUFFER);
+ cnt = len >> shift;
+ flash_write_cmd(info, sector, offset, cnt - 1);
+
+ switch (info->portwidth) {
+ case FLASH_CFI_8BIT:
+ while (cnt-- > 0) {
+ flash_write8(flash_read8(src), dst);
+ src += 1, dst += 1;
+ }
+ break;
+ case FLASH_CFI_16BIT:
+ while (cnt-- > 0) {
+ flash_write16(flash_read16(src), dst);
+ src += 2, dst += 2;
+ }
+ break;
+ case FLASH_CFI_32BIT:
+ while (cnt-- > 0) {
+ flash_write32(flash_read32(src), dst);
+ src += 4, dst += 4;
+ }
+ break;
+ case FLASH_CFI_64BIT:
+ while (cnt-- > 0) {
+ flash_write64(flash_read64(src), dst);
+ src += 8, dst += 8;
+ }
+ break;
+ default:
+ retcode = ERR_INVAL;
+ goto out_unmap;
+ }
+
+ flash_write_cmd(info, sector, 0, AMD_CMD_WRITE_BUFFER_CONFIRM);
+ if (use_flash_status_poll(info))
+ retcode = flash_status_poll(info, src - (1 << shift),
+ dst - (1 << shift),
+ info->buffer_write_tout,
+ "buffer write");
+ else
+ retcode = flash_full_status_check(info, sector,
+ info->buffer_write_tout,
+ "buffer write");
+ break;
+
+ default:
+ debug("Unknown Command Set\n");
+ retcode = ERR_INVAL;
+ break;
+ }
+
+out_unmap:
+ return retcode;
+}
+#endif /* CONFIG_SYS_FLASH_USE_BUFFER_WRITE */
+
+/*-----------------------------------------------------------------------
+ */
+int flash_erase(flash_info_t *info, int s_first, int s_last)
+{
+ int rcode = 0;
+ int prot;
+ flash_sect_t sect;
+ int st;
+
+ if (info->flash_id != FLASH_MAN_CFI) {
+ puts("Can't erase unknown flash type - aborted\n");
+ return 1;
+ }
+ if (s_first < 0 || s_first > s_last) {
+ puts("- no sectors to erase\n");
+ return 1;
+ }
+
+ prot = 0;
+ for (sect = s_first; sect <= s_last; ++sect)
+ if (info->protect[sect])
+ prot++;
+ if (prot) {
+ printf("- Warning: %d protected sectors will not be erased!\n",
+ prot);
+ } else if (flash_verbose) {
+ putc('\n');
+ }
+
+ for (sect = s_first; sect <= s_last; sect++) {
+ if (ctrlc()) {
+ printf("\n");
+ return 1;
+ }
+
+ if (info->protect[sect] == 0) { /* not protected */
+#ifdef CONFIG_SYS_FLASH_CHECK_BLANK_BEFORE_ERASE
+ int k;
+ int size;
+ int erased;
+ u32 *flash;
+
+ /*
+ * Check if whole sector is erased
+ */
+ size = flash_sector_size(info, sect);
+ erased = 1;
+ flash = (u32 *)info->start[sect];
+ /* divide by 4 for longword access */
+ size = size >> 2;
+ for (k = 0; k < size; k++) {
+ if (flash_read32(flash++) != 0xffffffff) {
+ erased = 0;
+ break;
+ }
+ }
+ if (erased) {
+ if (flash_verbose)
+ putc(',');
+ continue;
+ }
+#endif
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ flash_write_cmd(info, sect, 0,
+ FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd(info, sect, 0,
+ FLASH_CMD_BLOCK_ERASE);
+ flash_write_cmd(info, sect, 0,
+ FLASH_CMD_ERASE_CONFIRM);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ flash_unlock_seq(info, sect);
+ flash_write_cmd(info, sect,
+ info->addr_unlock1,
+ AMD_CMD_ERASE_START);
+ flash_unlock_seq(info, sect);
+ flash_write_cmd(info, sect, 0,
+ info->cmd_erase_sector);
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0, info->addr_unlock1,
+ AMD_CMD_ERASE_START);
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, sect, 0,
+ AMD_CMD_ERASE_SECTOR);
+ break;
+#endif
+ default:
+ debug("Unknown flash vendor %d\n",
+ info->vendor);
+ break;
+ }
+
+ if (use_flash_status_poll(info)) {
+ cfiword_t cword;
+ void *dest;
+
+ cword.w64 = 0xffffffffffffffffULL;
+ dest = flash_map(info, sect, 0);
+ st = flash_status_poll(info, &cword, dest,
+ info->erase_blk_tout,
+ "erase");
+ flash_unmap(info, sect, 0, dest);
+ } else {
+ st = flash_full_status_check(info, sect,
+ info->erase_blk_tout,
+ "erase");
+ }
+
+ if (st)
+ rcode = 1;
+ else if (flash_verbose)
+ putc('.');
+ }
+ }
+
+ if (flash_verbose)
+ puts(" done\n");
+
+ return rcode;
+}
+
+#ifdef CONFIG_SYS_FLASH_EMPTY_INFO
+static int sector_erased(flash_info_t *info, int i)
+{
+ int k;
+ int size;
+ u32 *flash;
+
+ /*
+ * Check if whole sector is erased
+ */
+ size = flash_sector_size(info, i);
+ flash = (u32 *)info->start[i];
+ /* divide by 4 for longword access */
+ size = size >> 2;
+
+ for (k = 0; k < size; k++) {
+ if (flash_read32(flash++) != 0xffffffff)
+ return 0; /* not erased */
+ }
+
+ return 1; /* erased */
+}
+#endif /* CONFIG_SYS_FLASH_EMPTY_INFO */
+
+void flash_print_info(flash_info_t *info)
+{
+ int i;
+
+ if (info->flash_id != FLASH_MAN_CFI) {
+ puts("missing or unknown FLASH type\n");
+ return;
+ }
+
+ printf("%s flash (%d x %d)",
+ info->name,
+ (info->portwidth << 3), (info->chipwidth << 3));
+ if (info->size < 1024 * 1024)
+ printf(" Size: %ld kB in %d Sectors\n",
+ info->size >> 10, info->sector_count);
+ else
+ printf(" Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+ printf(" ");
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ printf("Intel Prog Regions");
+ break;
+ case CFI_CMDSET_INTEL_STANDARD:
+ printf("Intel Standard");
+ break;
+ case CFI_CMDSET_INTEL_EXTENDED:
+ printf("Intel Extended");
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ printf("AMD Standard");
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ printf("AMD Extended");
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ printf("AMD Legacy");
+ break;
+#endif
+ default:
+ printf("Unknown (%d)", info->vendor);
+ break;
+ }
+ printf(" command set, Manufacturer ID: 0x%02X, Device ID: 0x",
+ info->manufacturer_id);
+ printf(info->chipwidth == FLASH_CFI_16BIT ? "%04X" : "%02X",
+ info->device_id);
+ if ((info->device_id & 0xff) == 0x7E) {
+ printf(info->chipwidth == FLASH_CFI_16BIT ? "%04X" : "%02X",
+ info->device_id2);
+ }
+ if (info->vendor == CFI_CMDSET_AMD_STANDARD && info->legacy_unlock)
+ printf("\n Advanced Sector Protection (PPB) enabled");
+ printf("\n Erase timeout: %ld ms, write timeout: %ld ms\n",
+ info->erase_blk_tout, info->write_tout);
+ if (info->buffer_size > 1) {
+ printf(" Buffer write timeout: %ld ms, ",
+ info->buffer_write_tout);
+ printf("buffer size: %d bytes\n", info->buffer_size);
+ }
+
+ puts("\n Sector Start Addresses:");
+ for (i = 0; i < info->sector_count; ++i) {
+ if (ctrlc())
+ break;
+ if ((i % 5) == 0)
+ putc('\n');
+#ifdef CONFIG_SYS_FLASH_EMPTY_INFO
+ /* print empty and read-only info */
+ printf(" %08lX %c %s ",
+ info->start[i],
+ sector_erased(info, i) ? 'E' : ' ',
+ info->protect[i] ? "RO" : " ");
+#else /* ! CONFIG_SYS_FLASH_EMPTY_INFO */
+ printf(" %08lX %s ",
+ info->start[i],
+ info->protect[i] ? "RO" : " ");
+#endif
+ }
+ putc('\n');
+}
+
+/*-----------------------------------------------------------------------
+ * This is used in a few places in write_buf() to show programming
+ * progress. Making it a function is nasty because it needs to do side
+ * effect updates to digit and dots. Repeated code is nasty too, so
+ * we define it once here.
+ */
+#ifdef CONFIG_FLASH_SHOW_PROGRESS
+#define FLASH_SHOW_PROGRESS(scale, dots, digit, dots_sub) \
+ if (flash_verbose) { \
+ dots -= dots_sub; \
+ if (scale > 0 && dots <= 0) { \
+ if ((digit % 5) == 0) \
+ printf("%d", digit / 5); \
+ else \
+ putc('.'); \
+ digit--; \
+ dots += scale; \
+ } \
+ }
+#else
+#define FLASH_SHOW_PROGRESS(scale, dots, digit, dots_sub)
+#endif
+
+/*-----------------------------------------------------------------------
+ * Copy memory to flash, returns:
+ * 0 - OK
+ * 1 - write timeout
+ * 2 - Flash not erased
+ */
+int write_buff(flash_info_t *info, uchar *src, ulong addr, ulong cnt)
+{
+ ulong wp;
+ uchar *p;
+ int aln;
+ cfiword_t cword;
+ int i, rc;
+#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE
+ int buffered_size;
+#endif
+#ifdef CONFIG_FLASH_SHOW_PROGRESS
+ int digit = CONFIG_FLASH_SHOW_PROGRESS;
+ int scale = 0;
+ int dots = 0;
+
+ /*
+ * Suppress if there are fewer than CONFIG_FLASH_SHOW_PROGRESS writes.
+ */
+ if (cnt >= CONFIG_FLASH_SHOW_PROGRESS) {
+ scale = (int)((cnt + CONFIG_FLASH_SHOW_PROGRESS - 1) /
+ CONFIG_FLASH_SHOW_PROGRESS);
+ }
+#endif
+
+ /* get lower aligned address */
+ wp = (addr & ~(info->portwidth - 1));
+
+ /* handle unaligned start */
+ aln = addr - wp;
+ if (aln != 0) {
+ cword.w32 = 0;
+ p = (uchar *)wp;
+ for (i = 0; i < aln; ++i)
+ flash_add_byte(info, &cword, flash_read8(p + i));
+
+ for (; (i < info->portwidth) && (cnt > 0); i++) {
+ flash_add_byte(info, &cword, *src++);
+ cnt--;
+ }
+ for (; (cnt == 0) && (i < info->portwidth); ++i)
+ flash_add_byte(info, &cword, flash_read8(p + i));
+
+ rc = flash_write_cfiword(info, wp, cword);
+ if (rc != 0)
+ return rc;
+
+ wp += i;
+ FLASH_SHOW_PROGRESS(scale, dots, digit, i);
+ }
+
+ /* handle the aligned part */
+#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE
+ buffered_size = (info->portwidth / info->chipwidth);
+ buffered_size *= info->buffer_size;
+ while (cnt >= info->portwidth) {
+ /* prohibit buffer write when buffer_size is 1 */
+ if (info->buffer_size == 1) {
+ cword.w32 = 0;
+ for (i = 0; i < info->portwidth; i++)
+ flash_add_byte(info, &cword, *src++);
+ rc = flash_write_cfiword(info, wp, cword);
+ if (rc != 0)
+ return rc;
+ wp += info->portwidth;
+ cnt -= info->portwidth;
+ continue;
+ }
+
+ /* write buffer until next buffered_size aligned boundary */
+ i = buffered_size - (wp % buffered_size);
+ if (i > cnt)
+ i = cnt;
+ rc = flash_write_cfibuffer(info, wp, src, i);
+ if (rc != ERR_OK)
+ return rc;
+ i -= i & (info->portwidth - 1);
+ wp += i;
+ src += i;
+ cnt -= i;
+ FLASH_SHOW_PROGRESS(scale, dots, digit, i);
+ /* Only check every once in a while */
+ if ((cnt & 0xFFFF) < buffered_size && ctrlc())
+ return ERR_ABORTED;
+ }
+#else
+ while (cnt >= info->portwidth) {
+ cword.w32 = 0;
+ for (i = 0; i < info->portwidth; i++)
+ flash_add_byte(info, &cword, *src++);
+ rc = flash_write_cfiword(info, wp, cword);
+ if (rc != 0)
+ return rc;
+ wp += info->portwidth;
+ cnt -= info->portwidth;
+ FLASH_SHOW_PROGRESS(scale, dots, digit, info->portwidth);
+ /* Only check every once in a while */
+ if ((cnt & 0xFFFF) < info->portwidth && ctrlc())
+ return ERR_ABORTED;
+ }
+#endif /* CONFIG_SYS_FLASH_USE_BUFFER_WRITE */
+
+ if (cnt == 0)
+ return (0);
+
+ /*
+ * handle unaligned tail bytes
+ */
+ cword.w32 = 0;
+ p = (uchar *)wp;
+ for (i = 0; (i < info->portwidth) && (cnt > 0); ++i) {
+ flash_add_byte(info, &cword, *src++);
+ --cnt;
+ }
+ for (; i < info->portwidth; ++i)
+ flash_add_byte(info, &cword, flash_read8(p + i));
+
+ return flash_write_cfiword(info, wp, cword);
+}
+
+static inline int manufact_match(flash_info_t *info, u32 manu)
+{
+ return info->manufacturer_id == ((manu & FLASH_VENDMASK) >> 16);
+}
+
+/*-----------------------------------------------------------------------
+ */
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+
+static int cfi_protect_bugfix(flash_info_t *info, long sector, int prot)
+{
+ if (manufact_match(info, INTEL_MANUFACT) &&
+ info->device_id == NUMONYX_256MBIT) {
+ /*
+ * see errata called
+ * "Numonyx Axcell P33/P30 Specification Update" :)
+ */
+ flash_write_cmd(info, sector, 0, FLASH_CMD_READ_ID);
+ if (!flash_isequal(info, sector, FLASH_OFFSET_PROTECT,
+ prot)) {
+ /*
+ * cmd must come before FLASH_CMD_PROTECT + 20us
+ * Disable interrupts which might cause a timeout here.
+ */
+ int flag = disable_interrupts();
+ unsigned short cmd;
+
+ if (prot)
+ cmd = FLASH_CMD_PROTECT_SET;
+ else
+ cmd = FLASH_CMD_PROTECT_CLEAR;
+
+ flash_write_cmd(info, sector, 0, FLASH_CMD_PROTECT);
+ flash_write_cmd(info, sector, 0, cmd);
+ /* re-enable interrupts if necessary */
+ if (flag)
+ enable_interrupts();
+ }
+ return 1;
+ }
+ return 0;
+}
+
+int flash_real_protect(flash_info_t *info, long sector, int prot)
+{
+ int retcode = 0;
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ if (!cfi_protect_bugfix(info, sector, prot)) {
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_PROTECT);
+ if (prot)
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_PROTECT_SET);
+ else
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_PROTECT_CLEAR);
+ }
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_STANDARD:
+ /* U-Boot only checks the first byte */
+ if (manufact_match(info, ATM_MANUFACT)) {
+ if (prot) {
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0,
+ info->addr_unlock1,
+ ATM_CMD_SOFTLOCK_START);
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, sector, 0,
+ ATM_CMD_LOCK_SECT);
+ } else {
+ flash_write_cmd(info, 0,
+ info->addr_unlock1,
+ AMD_CMD_UNLOCK_START);
+ if (info->device_id == ATM_ID_BV6416)
+ flash_write_cmd(info, sector,
+ 0, ATM_CMD_UNLOCK_SECT);
+ }
+ }
+ if (info->legacy_unlock) {
+ int flag = disable_interrupts();
+ int lock_flag;
+
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0, info->addr_unlock1,
+ AMD_CMD_SET_PPB_ENTRY);
+ lock_flag = flash_isset(info, sector, 0, 0x01);
+ if (prot) {
+ if (lock_flag) {
+ flash_write_cmd(info, sector, 0,
+ AMD_CMD_PPB_LOCK_BC1);
+ flash_write_cmd(info, sector, 0,
+ AMD_CMD_PPB_LOCK_BC2);
+ }
+ debug("sector %ld %slocked\n", sector,
+ lock_flag ? "" : "already ");
+ } else {
+ if (!lock_flag) {
+ debug("unlock %ld\n", sector);
+ flash_write_cmd(info, 0, 0,
+ AMD_CMD_PPB_UNLOCK_BC1);
+ flash_write_cmd(info, 0, 0,
+ AMD_CMD_PPB_UNLOCK_BC2);
+ }
+ debug("sector %ld %sunlocked\n", sector,
+ !lock_flag ? "" : "already ");
+ }
+ if (flag)
+ enable_interrupts();
+
+ if (flash_status_check(info, sector,
+ info->erase_blk_tout,
+ prot ? "protect" : "unprotect"))
+ printf("status check error\n");
+
+ flash_write_cmd(info, 0, 0,
+ AMD_CMD_SET_PPB_EXIT_BC1);
+ flash_write_cmd(info, 0, 0,
+ AMD_CMD_SET_PPB_EXIT_BC2);
+ }
+ break;
+#ifdef CONFIG_FLASH_CFI_LEGACY
+ case CFI_CMDSET_AMD_LEGACY:
+ flash_write_cmd(info, sector, 0, FLASH_CMD_CLEAR_STATUS);
+ flash_write_cmd(info, sector, 0, FLASH_CMD_PROTECT);
+ if (prot)
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_PROTECT_SET);
+ else
+ flash_write_cmd(info, sector, 0,
+ FLASH_CMD_PROTECT_CLEAR);
+#endif
+ };
+
+ /*
+ * Flash needs to be in status register read mode for
+ * flash_full_status_check() to work correctly
+ */
+ flash_write_cmd(info, sector, 0, FLASH_CMD_READ_STATUS);
+ retcode = flash_full_status_check(info, sector, info->erase_blk_tout,
+ prot ? "protect" : "unprotect");
+ if (retcode == 0) {
+ info->protect[sector] = prot;
+
+ /*
+ * On some of Intel's flash chips (marked via legacy_unlock)
+ * unprotect unprotects all locking.
+ */
+ if (prot == 0 && info->legacy_unlock) {
+ flash_sect_t i;
+
+ for (i = 0; i < info->sector_count; i++) {
+ if (info->protect[i])
+ flash_real_protect(info, i, 1);
+ }
+ }
+ }
+ return retcode;
+}
+
+/*-----------------------------------------------------------------------
+ * flash_read_user_serial - read the OneTimeProgramming cells
+ */
+void flash_read_user_serial(flash_info_t *info, void *buffer, int offset,
+ int len)
+{
+ uchar *src;
+ uchar *dst;
+
+ dst = buffer;
+ src = flash_map(info, 0, FLASH_OFFSET_USER_PROTECTION);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_READ_ID);
+ memcpy(dst, src + offset, len);
+ flash_write_cmd(info, 0, 0, info->cmd_reset);
+ udelay(1);
+ flash_unmap(info, 0, FLASH_OFFSET_USER_PROTECTION, src);
+}
+
+/*
+ * flash_read_factory_serial - read the device Id from the protection area
+ */
+void flash_read_factory_serial(flash_info_t *info, void *buffer, int offset,
+ int len)
+{
+ uchar *src;
+
+ src = flash_map(info, 0, FLASH_OFFSET_INTEL_PROTECTION);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_READ_ID);
+ memcpy(buffer, src + offset, len);
+ flash_write_cmd(info, 0, 0, info->cmd_reset);
+ udelay(1);
+ flash_unmap(info, 0, FLASH_OFFSET_INTEL_PROTECTION, src);
+}
+
+#endif /* CONFIG_SYS_FLASH_PROTECTION */
+
+/*-----------------------------------------------------------------------
+ * Reverse the order of the erase regions in the CFI QRY structure.
+ * This is needed for chips that are either a) correctly detected as
+ * top-boot, or b) buggy.
+ */
+static void cfi_reverse_geometry(struct cfi_qry *qry)
+{
+ unsigned int i, j;
+ u32 tmp;
+
+ for (i = 0, j = qry->num_erase_regions - 1; i < j; i++, j--) {
+ tmp = get_unaligned(&qry->erase_region_info[i]);
+ put_unaligned(get_unaligned(&qry->erase_region_info[j]),
+ &qry->erase_region_info[i]);
+ put_unaligned(tmp, &qry->erase_region_info[j]);
+ }
+}
+
+/*-----------------------------------------------------------------------
+ * read jedec ids from device and set corresponding fields in info struct
+ *
+ * Note: assume cfi->vendor, cfi->portwidth and cfi->chipwidth are correct
+ *
+ */
+static void cmdset_intel_read_jedec_ids(flash_info_t *info)
+{
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+ udelay(1);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_READ_ID);
+ udelay(1000); /* some flash are slow to respond */
+ info->manufacturer_id = flash_read_uchar(info,
+ FLASH_OFFSET_MANUFACTURER_ID);
+ info->device_id = (info->chipwidth == FLASH_CFI_16BIT) ?
+ flash_read_word(info, FLASH_OFFSET_DEVICE_ID) :
+ flash_read_uchar(info, FLASH_OFFSET_DEVICE_ID);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+}
+
+static int cmdset_intel_init(flash_info_t *info, struct cfi_qry *qry)
+{
+ info->cmd_reset = FLASH_CMD_RESET;
+
+ cmdset_intel_read_jedec_ids(info);
+ flash_write_cmd(info, 0, info->cfi_offset, FLASH_CMD_CFI);
+
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ /* read legacy lock/unlock bit from intel flash */
+ if (info->ext_addr) {
+ info->legacy_unlock =
+ flash_read_uchar(info, info->ext_addr + 5) & 0x08;
+ }
+#endif
+
+ return 0;
+}
+
+static void cmdset_amd_read_jedec_ids(flash_info_t *info)
+{
+ ushort bank_id = 0;
+ uchar manu_id;
+ uchar feature;
+
+ flash_write_cmd(info, 0, 0, AMD_CMD_RESET);
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0, info->addr_unlock1, FLASH_CMD_READ_ID);
+ udelay(1000); /* some flash are slow to respond */
+
+ manu_id = flash_read_uchar(info, FLASH_OFFSET_MANUFACTURER_ID);
+ /* JEDEC JEP106Z specifies ID codes up to bank 7 */
+ while (manu_id == FLASH_CONTINUATION_CODE && bank_id < 0x800) {
+ bank_id += 0x100;
+ manu_id = flash_read_uchar(info,
+ bank_id | FLASH_OFFSET_MANUFACTURER_ID);
+ }
+ info->manufacturer_id = manu_id;
+
+ debug("info->ext_addr = 0x%x, cfi_version = 0x%x\n",
+ info->ext_addr, info->cfi_version);
+ if (info->ext_addr && info->cfi_version >= 0x3134) {
+ /* read software feature (at 0x53) */
+ feature = flash_read_uchar(info, info->ext_addr + 0x13);
+ debug("feature = 0x%x\n", feature);
+ info->sr_supported = feature & 0x1;
+ }
+
+ switch (info->chipwidth) {
+ case FLASH_CFI_8BIT:
+ info->device_id = flash_read_uchar(info,
+ FLASH_OFFSET_DEVICE_ID);
+ if (info->device_id == 0x7E) {
+ /* AMD 3-byte (expanded) device ids */
+ info->device_id2 = flash_read_uchar(info,
+ FLASH_OFFSET_DEVICE_ID2);
+ info->device_id2 <<= 8;
+ info->device_id2 |= flash_read_uchar(info,
+ FLASH_OFFSET_DEVICE_ID3);
+ }
+ break;
+ case FLASH_CFI_16BIT:
+ info->device_id = flash_read_word(info,
+ FLASH_OFFSET_DEVICE_ID);
+ if ((info->device_id & 0xff) == 0x7E) {
+ /* AMD 3-byte (expanded) device ids */
+ info->device_id2 = flash_read_uchar(info,
+ FLASH_OFFSET_DEVICE_ID2);
+ info->device_id2 <<= 8;
+ info->device_id2 |= flash_read_uchar(info,
+ FLASH_OFFSET_DEVICE_ID3);
+ }
+ break;
+ default:
+ break;
+ }
+ flash_write_cmd(info, 0, 0, AMD_CMD_RESET);
+ udelay(1);
+}
+
+static int cmdset_amd_init(flash_info_t *info, struct cfi_qry *qry)
+{
+ info->cmd_reset = AMD_CMD_RESET;
+ info->cmd_erase_sector = AMD_CMD_ERASE_SECTOR;
+
+ cmdset_amd_read_jedec_ids(info);
+ flash_write_cmd(info, 0, info->cfi_offset, FLASH_CMD_CFI);
+
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ if (info->ext_addr) {
+ /* read sector protect/unprotect scheme (at 0x49) */
+ if (flash_read_uchar(info, info->ext_addr + 9) == 0x8)
+ info->legacy_unlock = 1;
+ }
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_FLASH_CFI_LEGACY
+static void flash_read_jedec_ids(flash_info_t *info)
+{
+ info->manufacturer_id = 0;
+ info->device_id = 0;
+ info->device_id2 = 0;
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ cmdset_intel_read_jedec_ids(info);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ cmdset_amd_read_jedec_ids(info);
+ break;
+ default:
+ break;
+ }
+}
+
+/*-----------------------------------------------------------------------
+ * Call board code to request info about non-CFI flash.
+ * board_flash_get_legacy needs to fill in at least:
+ * info->portwidth, info->chipwidth and info->interface for Jedec probing.
+ */
+static int flash_detect_legacy(phys_addr_t base, int banknum)
+{
+ flash_info_t *info = &flash_info[banknum];
+
+ if (board_flash_get_legacy(base, banknum, info)) {
+ /* board code may have filled info completely. If not, we
+ * use JEDEC ID probing.
+ */
+ if (!info->vendor) {
+ int modes[] = {
+ CFI_CMDSET_AMD_STANDARD,
+ CFI_CMDSET_INTEL_STANDARD
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); i++) {
+ info->vendor = modes[i];
+ info->start[0] =
+ (ulong)map_physmem(base,
+ info->portwidth,
+ MAP_NOCACHE);
+ if (info->portwidth == FLASH_CFI_8BIT &&
+ info->interface == FLASH_CFI_X8X16) {
+ info->addr_unlock1 = 0x2AAA;
+ info->addr_unlock2 = 0x5555;
+ } else {
+ info->addr_unlock1 = 0x5555;
+ info->addr_unlock2 = 0x2AAA;
+ }
+ flash_read_jedec_ids(info);
+ debug("JEDEC PROBE: ID %x %x %x\n",
+ info->manufacturer_id,
+ info->device_id,
+ info->device_id2);
+ if (jedec_flash_match(info, info->start[0]))
+ break;
+
+ unmap_physmem((void *)info->start[0],
+ info->portwidth);
+ }
+ }
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ info->cmd_reset = FLASH_CMD_RESET;
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_LEGACY:
+ info->cmd_reset = AMD_CMD_RESET;
+ break;
+ }
+ info->flash_id = FLASH_MAN_CFI;
+ return 1;
+ }
+ return 0; /* use CFI */
+}
+#else
+static inline int flash_detect_legacy(phys_addr_t base, int banknum)
+{
+ return 0; /* use CFI */
+}
+#endif
+
+/*-----------------------------------------------------------------------
+ * detect if flash is compatible with the Common Flash Interface (CFI)
+ * http://www.jedec.org/download/search/jesd68.pdf
+ */
+static void flash_read_cfi(flash_info_t *info, void *buf, unsigned int start,
+ size_t len)
+{
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = flash_read_uchar(info, start + i);
+}
+
+static void __flash_cmd_reset(flash_info_t *info)
+{
+ /*
+ * We do not yet know what kind of commandset to use, so we issue
+ * the reset command in both Intel and AMD variants, in the hope
+ * that AMD flash roms ignore the Intel command.
+ */
+ flash_write_cmd(info, 0, 0, AMD_CMD_RESET);
+ udelay(1);
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+}
+
+void flash_cmd_reset(flash_info_t *info)
+ __attribute__((weak, alias("__flash_cmd_reset")));
+
+static int __flash_detect_cfi(flash_info_t *info, struct cfi_qry *qry)
+{
+ int cfi_offset;
+
+ /* Issue FLASH reset command */
+ flash_cmd_reset(info);
+
+ for (cfi_offset = 0; cfi_offset < ARRAY_SIZE(flash_offset_cfi);
+ cfi_offset++) {
+ flash_write_cmd(info, 0, flash_offset_cfi[cfi_offset],
+ FLASH_CMD_CFI);
+ if (flash_isequal(info, 0, FLASH_OFFSET_CFI_RESP, 'Q') &&
+ flash_isequal(info, 0, FLASH_OFFSET_CFI_RESP + 1, 'R') &&
+ flash_isequal(info, 0, FLASH_OFFSET_CFI_RESP + 2, 'Y')) {
+ flash_read_cfi(info, qry, FLASH_OFFSET_CFI_RESP,
+ sizeof(struct cfi_qry));
+ info->interface = le16_to_cpu(qry->interface_desc);
+ /* Some flash chips can support multiple bus widths.
+ * In this case, override the interface width and
+ * limit it to the port width.
+ */
+ if ((info->interface == FLASH_CFI_X8X16) &&
+ (info->portwidth == FLASH_CFI_8BIT)) {
+ debug("Overriding 16-bit interface width to"
+ " 8-bit port width\n");
+ info->interface = FLASH_CFI_X8;
+ } else if ((info->interface == FLASH_CFI_X16X32) &&
+ (info->portwidth == FLASH_CFI_16BIT)) {
+ debug("Overriding 16-bit interface width to"
+ " 16-bit port width\n");
+ info->interface = FLASH_CFI_X16;
+ }
+
+ info->cfi_offset = flash_offset_cfi[cfi_offset];
+ debug("device interface is %d\n",
+ info->interface);
+ debug("found port %d chip %d chip_lsb %d ",
+ info->portwidth, info->chipwidth, info->chip_lsb);
+ debug("port %d bits chip %d bits\n",
+ info->portwidth << CFI_FLASH_SHIFT_WIDTH,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+
+ /* calculate command offsets as in the Linux driver */
+ info->addr_unlock1 = 0x555;
+ info->addr_unlock2 = 0x2aa;
+
+ /*
+ * modify the unlock address if we are
+ * in compatibility mode
+ */
+ if (/* x8/x16 in x8 mode */
+ (info->chipwidth == FLASH_CFI_BY8 &&
+ info->interface == FLASH_CFI_X8X16) ||
+ /* x16/x32 in x16 mode */
+ (info->chipwidth == FLASH_CFI_BY16 &&
+ info->interface == FLASH_CFI_X16X32)) {
+ info->addr_unlock1 = 0xaaa;
+ info->addr_unlock2 = 0x555;
+ }
+
+ info->name = "CFI conformant";
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int flash_detect_cfi(flash_info_t *info, struct cfi_qry *qry)
+{
+ debug("flash detect cfi\n");
+
+ for (info->portwidth = CONFIG_SYS_FLASH_CFI_WIDTH;
+ info->portwidth <= FLASH_CFI_64BIT; info->portwidth <<= 1) {
+ for (info->chipwidth = FLASH_CFI_BY8;
+ info->chipwidth <= info->portwidth;
+ info->chipwidth <<= 1) {
+ /*
+ * First, try detection without shifting the addresses
+ * for 8bit devices (16bit wide connection)
+ */
+ info->chip_lsb = 0;
+ if (__flash_detect_cfi(info, qry))
+ return 1;
+
+ /*
+ * Not detected, so let's try with shifting
+ * for 8bit devices
+ */
+ info->chip_lsb = 1;
+ if (__flash_detect_cfi(info, qry))
+ return 1;
+ }
+ }
+ debug("not found\n");
+ return 0;
+}
+
+/*
+ * Manufacturer-specific quirks. Add workarounds for geometry
+ * reversal, etc. here.
+ */
+static void flash_fixup_amd(flash_info_t *info, struct cfi_qry *qry)
+{
+ /* check if flash geometry needs reversal */
+ if (qry->num_erase_regions > 1) {
+ /* reverse geometry if top boot part */
+ if (info->cfi_version < 0x3131) {
+ /* CFI < 1.1, try to guess from device id */
+ if ((info->device_id & 0x80) != 0)
+ cfi_reverse_geometry(qry);
+ } else if (flash_read_uchar(info, info->ext_addr + 0xf) == 3) {
+ /* CFI >= 1.1, deduct from top/bottom flag */
+ /* note: ext_addr is valid since cfi_version > 0 */
+ cfi_reverse_geometry(qry);
+ }
+ }
+}
+
+static void flash_fixup_atmel(flash_info_t *info, struct cfi_qry *qry)
+{
+ int reverse_geometry = 0;
+
+ /* Check the "top boot" bit in the PRI */
+ if (info->ext_addr && !(flash_read_uchar(info, info->ext_addr + 6) & 1))
+ reverse_geometry = 1;
+
+ /* AT49BV6416(T) list the erase regions in the wrong order.
+ * However, the device ID is identical with the non-broken
+ * AT49BV642D they differ in the high byte.
+ */
+ if (info->device_id == 0xd6 || info->device_id == 0xd2)
+ reverse_geometry = !reverse_geometry;
+
+ if (reverse_geometry)
+ cfi_reverse_geometry(qry);
+}
+
+static void flash_fixup_stm(flash_info_t *info, struct cfi_qry *qry)
+{
+ /* check if flash geometry needs reversal */
+ if (qry->num_erase_regions > 1) {
+ /* reverse geometry if top boot part */
+ if (info->cfi_version < 0x3131) {
+ /* CFI < 1.1, guess by device id */
+ if (info->device_id == 0x22CA || /* M29W320DT */
+ info->device_id == 0x2256 || /* M29W320ET */
+ info->device_id == 0x22D7) { /* M29W800DT */
+ cfi_reverse_geometry(qry);
+ }
+ } else if (flash_read_uchar(info, info->ext_addr + 0xf) == 3) {
+ /* CFI >= 1.1, deduct from top/bottom flag */
+ /* note: ext_addr is valid since cfi_version > 0 */
+ cfi_reverse_geometry(qry);
+ }
+ }
+}
+
+static void flash_fixup_sst(flash_info_t *info, struct cfi_qry *qry)
+{
+ /*
+ * SST, for many recent nor parallel flashes, says they are
+ * CFI-conformant. This is not true, since qry struct.
+ * reports a std. AMD command set (0x0002), while SST allows to
+ * erase two different sector sizes for the same memory.
+ * 64KB sector (SST call it block) needs 0x30 to be erased.
+ * 4KB sector (SST call it sector) needs 0x50 to be erased.
+ * Since CFI query detect the 4KB number of sectors, users expects
+ * a sector granularity of 4KB, and it is here set.
+ */
+ if (info->device_id == 0x5D23 || /* SST39VF3201B */
+ info->device_id == 0x5C23) { /* SST39VF3202B */
+ /* set sector granularity to 4KB */
+ info->cmd_erase_sector = 0x50;
+ }
+}
+
+static void flash_fixup_num(flash_info_t *info, struct cfi_qry *qry)
+{
+ /*
+ * The M29EW devices seem to report the CFI information wrong
+ * when it's in 8 bit mode.
+ * There's an app note from Numonyx on this issue.
+ * So adjust the buffer size for M29EW while operating in 8-bit mode
+ */
+ if (qry->max_buf_write_size > 0x8 &&
+ info->device_id == 0x7E &&
+ (info->device_id2 == 0x2201 ||
+ info->device_id2 == 0x2301 ||
+ info->device_id2 == 0x2801 ||
+ info->device_id2 == 0x4801)) {
+ debug("Adjusted buffer size on Numonyx flash");
+ debug(" M29EW family in 8 bit mode\n");
+ qry->max_buf_write_size = 0x8;
+ }
+}
+
+/*
+ * The following code cannot be run from FLASH!
+ *
+ */
+ulong flash_get_size(phys_addr_t base, int banknum)
+{
+ flash_info_t *info = &flash_info[banknum];
+ int i, j;
+ flash_sect_t sect_cnt;
+ phys_addr_t sector;
+ unsigned long tmp;
+ int size_ratio;
+ uchar num_erase_regions;
+ int erase_region_size;
+ int erase_region_count;
+ struct cfi_qry qry;
+ unsigned long max_size;
+
+ memset(&qry, 0, sizeof(qry));
+
+ info->ext_addr = 0;
+ info->cfi_version = 0;
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ info->legacy_unlock = 0;
+#endif
+
+ info->start[0] = (ulong)map_physmem(base, info->portwidth, MAP_NOCACHE);
+
+ if (flash_detect_cfi(info, &qry)) {
+ info->vendor = le16_to_cpu(get_unaligned(&qry.p_id));
+ info->ext_addr = le16_to_cpu(get_unaligned(&qry.p_adr));
+ num_erase_regions = qry.num_erase_regions;
+
+ if (info->ext_addr) {
+ info->cfi_version = (ushort)flash_read_uchar(info,
+ info->ext_addr + 3) << 8;
+ info->cfi_version |= (ushort)flash_read_uchar(info,
+ info->ext_addr + 4);
+ }
+
+#ifdef DEBUG
+ flash_printqry(&qry);
+#endif
+
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_STANDARD:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ cmdset_intel_init(info, &qry);
+ break;
+ case CFI_CMDSET_AMD_STANDARD:
+ case CFI_CMDSET_AMD_EXTENDED:
+ cmdset_amd_init(info, &qry);
+ break;
+ default:
+ printf("CFI: Unknown command set 0x%x\n",
+ info->vendor);
+ /*
+ * Unfortunately, this means we don't know how
+ * to get the chip back to Read mode. Might
+ * as well try an Intel-style reset...
+ */
+ flash_write_cmd(info, 0, 0, FLASH_CMD_RESET);
+ return 0;
+ }
+
+ /* Do manufacturer-specific fixups */
+ switch (info->manufacturer_id) {
+ case 0x0001: /* AMD */
+ case 0x0037: /* AMIC */
+ flash_fixup_amd(info, &qry);
+ break;
+ case 0x001f:
+ flash_fixup_atmel(info, &qry);
+ break;
+ case 0x0020:
+ flash_fixup_stm(info, &qry);
+ break;
+ case 0x00bf: /* SST */
+ flash_fixup_sst(info, &qry);
+ break;
+ case 0x0089: /* Numonyx */
+ flash_fixup_num(info, &qry);
+ break;
+ }
+
+ debug("manufacturer is %d\n", info->vendor);
+ debug("manufacturer id is 0x%x\n", info->manufacturer_id);
+ debug("device id is 0x%x\n", info->device_id);
+ debug("device id2 is 0x%x\n", info->device_id2);
+ debug("cfi version is 0x%04x\n", info->cfi_version);
+
+ size_ratio = info->portwidth / info->chipwidth;
+ /* if the chip is x8/x16 reduce the ratio by half */
+ if (info->interface == FLASH_CFI_X8X16 &&
+ info->chipwidth == FLASH_CFI_BY8) {
+ size_ratio >>= 1;
+ }
+ debug("size_ratio %d port %d bits chip %d bits\n",
+ size_ratio, info->portwidth << CFI_FLASH_SHIFT_WIDTH,
+ info->chipwidth << CFI_FLASH_SHIFT_WIDTH);
+ info->size = 1 << qry.dev_size;
+ /* multiply the size by the number of chips */
+ info->size *= size_ratio;
+ max_size = cfi_flash_bank_size(banknum);
+ if (max_size && info->size > max_size) {
+ debug("[truncated from %ldMiB]", info->size >> 20);
+ info->size = max_size;
+ }
+ debug("found %d erase regions\n", num_erase_regions);
+ sect_cnt = 0;
+ sector = base;
+ for (i = 0; i < num_erase_regions; i++) {
+ if (i > NUM_ERASE_REGIONS) {
+ printf("%d erase regions found, only %d used\n",
+ num_erase_regions, NUM_ERASE_REGIONS);
+ break;
+ }
+
+ tmp = le32_to_cpu(get_unaligned(
+ &qry.erase_region_info[i]));
+ debug("erase region %u: 0x%08lx\n", i, tmp);
+
+ erase_region_count = (tmp & 0xffff) + 1;
+ tmp >>= 16;
+ erase_region_size =
+ (tmp & 0xffff) ? ((tmp & 0xffff) * 256) : 128;
+ debug("erase_region_count = %d ", erase_region_count);
+ debug("erase_region_size = %d\n", erase_region_size);
+ for (j = 0; j < erase_region_count; j++) {
+ if (sector - base >= info->size)
+ break;
+ if (sect_cnt >= CONFIG_SYS_MAX_FLASH_SECT) {
+ printf("ERROR: too many flash sectors\n");
+ break;
+ }
+ info->start[sect_cnt] =
+ (ulong)map_physmem(sector,
+ info->portwidth,
+ MAP_NOCACHE);
+ sector += (erase_region_size * size_ratio);
+
+ /*
+ * Only read protection status from
+ * supported devices (intel...)
+ */
+ switch (info->vendor) {
+ case CFI_CMDSET_INTEL_PROG_REGIONS:
+ case CFI_CMDSET_INTEL_EXTENDED:
+ case CFI_CMDSET_INTEL_STANDARD:
+ /*
+ * Set flash to read-id mode. Otherwise
+ * reading protected status is not
+ * guaranteed.
+ */
+ flash_write_cmd(info, sect_cnt, 0,
+ FLASH_CMD_READ_ID);
+ info->protect[sect_cnt] =
+ flash_isset(info, sect_cnt,
+ FLASH_OFFSET_PROTECT,
+ FLASH_STATUS_PROTECT);
+ flash_write_cmd(info, sect_cnt, 0,
+ FLASH_CMD_RESET);
+ break;
+ case CFI_CMDSET_AMD_EXTENDED:
+ case CFI_CMDSET_AMD_STANDARD:
+ if (!info->legacy_unlock) {
+ /* default: not protected */
+ info->protect[sect_cnt] = 0;
+ break;
+ }
+
+ /* Read protection (PPB) from sector */
+ flash_write_cmd(info, 0, 0,
+ info->cmd_reset);
+ flash_unlock_seq(info, 0);
+ flash_write_cmd(info, 0,
+ info->addr_unlock1,
+ AMD_CMD_SET_PPB_ENTRY);
+ info->protect[sect_cnt] =
+ !flash_isset(info, sect_cnt,
+ 0, 0x01);
+ flash_write_cmd(info, 0, 0,
+ info->cmd_reset);
+ break;
+ default:
+ /* default: not protected */
+ info->protect[sect_cnt] = 0;
+ }
+
+ sect_cnt++;
+ }
+ }
+
+ info->sector_count = sect_cnt;
+ info->buffer_size = 1 << le16_to_cpu(qry.max_buf_write_size);
+ tmp = 1 << qry.block_erase_timeout_typ;
+ info->erase_blk_tout = tmp *
+ (1 << qry.block_erase_timeout_max);
+ tmp = (1 << qry.buf_write_timeout_typ) *
+ (1 << qry.buf_write_timeout_max);
+
+ /* round up when converting to ms */
+ info->buffer_write_tout = (tmp + 999) / 1000;
+ tmp = (1 << qry.word_write_timeout_typ) *
+ (1 << qry.word_write_timeout_max);
+ /* round up when converting to ms */
+ info->write_tout = (tmp + 999) / 1000;
+ info->flash_id = FLASH_MAN_CFI;
+ if (info->interface == FLASH_CFI_X8X16 &&
+ info->chipwidth == FLASH_CFI_BY8) {
+ /* XXX - Need to test on x8/x16 in parallel. */
+ info->portwidth >>= 1;
+ }
+
+ flash_write_cmd(info, 0, 0, info->cmd_reset);
+ }
+
+ return (info->size);
+}
+
+#ifdef CONFIG_FLASH_CFI_MTD
+void flash_set_verbose(uint v)
+{
+ flash_verbose = v;
+}
+#endif
+
+static void cfi_flash_set_config_reg(u32 base, u16 val)
+{
+#ifdef CONFIG_SYS_CFI_FLASH_CONFIG_REGS
+ /*
+ * Only set this config register if really defined
+ * to a valid value (0xffff is invalid)
+ */
+ if (val == 0xffff)
+ return;
+
+ /*
+ * Set configuration register. Data is "encrypted" in the 16 lower
+ * address bits.
+ */
+ flash_write16(FLASH_CMD_SETUP, (void *)(base + (val << 1)));
+ flash_write16(FLASH_CMD_SET_CR_CONFIRM, (void *)(base + (val << 1)));
+
+ /*
+ * Finally issue reset-command to bring device back to
+ * read-array mode
+ */
+ flash_write16(FLASH_CMD_RESET, (void *)base);
+#endif
+}
+
+/*-----------------------------------------------------------------------
+ */
+
+static void flash_protect_default(void)
+{
+#if defined(CONFIG_SYS_FLASH_AUTOPROTECT_LIST)
+ int i;
+ struct apl_s {
+ ulong start;
+ ulong size;
+ } apl[] = CONFIG_SYS_FLASH_AUTOPROTECT_LIST;
+#endif
+
+ /* Monitor protection ON by default */
+#if defined(CONFIG_SYS_MONITOR_BASE) && \
+ (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE) && \
+ (!defined(CONFIG_MONITOR_IS_IN_RAM))
+ flash_protect(FLAG_PROTECT_SET,
+ CONFIG_SYS_MONITOR_BASE,
+ CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1,
+ flash_get_info(CONFIG_SYS_MONITOR_BASE));
+#endif
+
+ /* Environment protection ON by default */
+#ifdef CONFIG_ENV_IS_IN_FLASH
+ flash_protect(FLAG_PROTECT_SET,
+ CONFIG_ENV_ADDR,
+ CONFIG_ENV_ADDR + CONFIG_ENV_SECT_SIZE - 1,
+ flash_get_info(CONFIG_ENV_ADDR));
+#endif
+
+ /* Redundant environment protection ON by default */
+#ifdef CONFIG_ENV_ADDR_REDUND
+ flash_protect(FLAG_PROTECT_SET,
+ CONFIG_ENV_ADDR_REDUND,
+ CONFIG_ENV_ADDR_REDUND + CONFIG_ENV_SECT_SIZE - 1,
+ flash_get_info(CONFIG_ENV_ADDR_REDUND));
+#endif
+
+#if defined(CONFIG_SYS_FLASH_AUTOPROTECT_LIST)
+ for (i = 0; i < ARRAY_SIZE(apl); i++) {
+ debug("autoprotecting from %08lx to %08lx\n",
+ apl[i].start, apl[i].start + apl[i].size - 1);
+ flash_protect(FLAG_PROTECT_SET,
+ apl[i].start,
+ apl[i].start + apl[i].size - 1,
+ flash_get_info(apl[i].start));
+ }
+#endif
+}
+
+unsigned long flash_init(void)
+{
+ unsigned long size = 0;
+ int i;
+
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ /* read environment from EEPROM */
+ char s[64];
+
+ env_get_f("unlock", s, sizeof(s));
+#endif
+
+#ifdef CONFIG_CFI_FLASH /* for driver model */
+ cfi_flash_init_dm();
+#endif
+
+ /* Init: no FLASHes known */
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; ++i) {
+ flash_info[i].flash_id = FLASH_UNKNOWN;
+
+ /* Optionally write flash configuration register */
+ cfi_flash_set_config_reg(cfi_flash_bank_addr(i),
+ cfi_flash_config_reg(i));
+
+ if (!flash_detect_legacy(cfi_flash_bank_addr(i), i))
+ flash_get_size(cfi_flash_bank_addr(i), i);
+ size += flash_info[i].size;
+ if (flash_info[i].flash_id == FLASH_UNKNOWN) {
+#ifndef CONFIG_SYS_FLASH_QUIET_TEST
+ printf("## Unknown flash on Bank %d ", i + 1);
+ printf("- Size = 0x%08lx = %ld MB\n",
+ flash_info[i].size,
+ flash_info[i].size >> 20);
+#endif /* CONFIG_SYS_FLASH_QUIET_TEST */
+ }
+#ifdef CONFIG_SYS_FLASH_PROTECTION
+ else if (strcmp(s, "yes") == 0) {
+ /*
+ * Only the U-Boot image and it's environment
+ * is protected, all other sectors are
+ * unprotected (unlocked) if flash hardware
+ * protection is used (CONFIG_SYS_FLASH_PROTECTION)
+ * and the environment variable "unlock" is
+ * set to "yes".
+ */
+ if (flash_info[i].legacy_unlock) {
+ int k;
+
+ /*
+ * Disable legacy_unlock temporarily,
+ * since flash_real_protect would
+ * relock all other sectors again
+ * otherwise.
+ */
+ flash_info[i].legacy_unlock = 0;
+
+ /*
+ * Legacy unlocking (e.g. Intel J3) ->
+ * unlock only one sector. This will
+ * unlock all sectors.
+ */
+ flash_real_protect(&flash_info[i], 0, 0);
+
+ flash_info[i].legacy_unlock = 1;
+
+ /*
+ * Manually mark other sectors as
+ * unlocked (unprotected)
+ */
+ for (k = 1; k < flash_info[i].sector_count; k++)
+ flash_info[i].protect[k] = 0;
+ } else {
+ /*
+ * No legancy unlocking -> unlock all sectors
+ */
+ flash_protect(FLAG_PROTECT_CLEAR,
+ flash_info[i].start[0],
+ flash_info[i].start[0]
+ + flash_info[i].size - 1,
+ &flash_info[i]);
+ }
+ }
+#endif /* CONFIG_SYS_FLASH_PROTECTION */
+ }
+
+ flash_protect_default();
+#ifdef CONFIG_FLASH_CFI_MTD
+ cfi_mtd_init();
+#endif
+
+ return (size);
+}
+
+#ifdef CONFIG_CFI_FLASH /* for driver model */
+static int cfi_flash_probe(struct udevice *dev)
+{
+ fdt_addr_t addr;
+ int idx;
+
+ for (idx = 0; idx < CFI_MAX_FLASH_BANKS; idx++) {
+ addr = dev_read_addr_index(dev, idx);
+ if (addr == FDT_ADDR_T_NONE)
+ break;
+
+ flash_info[cfi_flash_num_flash_banks].dev = dev;
+ flash_info[cfi_flash_num_flash_banks].base = addr;
+ cfi_flash_num_flash_banks++;
+ }
+ gd->bd->bi_flashstart = flash_info[0].base;
+
+ return 0;
+}
+
+static const struct udevice_id cfi_flash_ids[] = {
+ { .compatible = "cfi-flash" },
+ { .compatible = "jedec-flash" },
+ {}
+};
+
+U_BOOT_DRIVER(cfi_flash) = {
+ .name = "cfi_flash",
+ .id = UCLASS_MTD,
+ .of_match = cfi_flash_ids,
+ .probe = cfi_flash_probe,
+};
+#endif /* CONFIG_CFI_FLASH */
diff --git a/roms/u-boot/drivers/mtd/cfi_mtd.c b/roms/u-boot/drivers/mtd/cfi_mtd.c
new file mode 100644
index 000000000..78293caa2
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/cfi_mtd.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2008 Semihalf
+ *
+ * Written by: Piotr Ziecik <kosmo@semihalf.com>
+ */
+
+#include <common.h>
+#include <dma.h>
+#include <flash.h>
+#include <malloc.h>
+
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
+#include <mtd/cfi_flash.h>
+
+static struct mtd_info cfi_mtd_info[CFI_MAX_FLASH_BANKS];
+static char cfi_mtd_names[CFI_MAX_FLASH_BANKS][16];
+#ifdef CONFIG_MTD_CONCAT
+static char c_mtd_name[16];
+#endif
+
+static int cfi_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ flash_info_t *fi = mtd->priv;
+ size_t a_start = fi->start[0] + instr->addr;
+ size_t a_end = a_start + instr->len;
+ int s_first = -1;
+ int s_last = -1;
+ int error, sect;
+
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if (a_start == fi->start[sect])
+ s_first = sect;
+
+ if (sect < fi->sector_count - 1) {
+ if (a_end == fi->start[sect + 1]) {
+ s_last = sect;
+ break;
+ }
+ } else {
+ s_last = sect;
+ break;
+ }
+ }
+
+ if (s_first >= 0 && s_first <= s_last) {
+ instr->state = MTD_ERASING;
+
+ flash_set_verbose(0);
+ error = flash_erase(fi, s_first, s_last);
+ flash_set_verbose(1);
+
+ if (error) {
+ instr->state = MTD_ERASE_FAILED;
+ return -EIO;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cfi_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ flash_info_t *fi = mtd->priv;
+ u_char *f = (u_char*)(fi->start[0]) + from;
+
+ if (dma_memcpy(buf, f, len) < 0)
+ memcpy(buf, f, len);
+ *retlen = len;
+
+ return 0;
+}
+
+static int cfi_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ flash_info_t *fi = mtd->priv;
+ u_long t = fi->start[0] + to;
+ int error;
+
+ flash_set_verbose(0);
+ error = write_buff(fi, (u_char*)buf, t, len);
+ flash_set_verbose(1);
+
+ if (!error) {
+ *retlen = len;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static void cfi_mtd_sync(struct mtd_info *mtd)
+{
+ /*
+ * This function should wait until all pending operations
+ * finish. However this driver is fully synchronous, so
+ * this function returns immediately
+ */
+}
+
+static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ flash_info_t *fi = mtd->priv;
+
+ flash_set_verbose(0);
+ flash_protect(FLAG_PROTECT_SET, fi->start[0] + ofs,
+ fi->start[0] + ofs + len - 1, fi);
+ flash_set_verbose(1);
+
+ return 0;
+}
+
+static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ flash_info_t *fi = mtd->priv;
+
+ flash_set_verbose(0);
+ flash_protect(FLAG_PROTECT_CLEAR, fi->start[0] + ofs,
+ fi->start[0] + ofs + len - 1, fi);
+ flash_set_verbose(1);
+
+ return 0;
+}
+
+static int cfi_mtd_set_erasesize(struct mtd_info *mtd, flash_info_t *fi)
+{
+ int sect_size = 0;
+ int sect_size_old = 0;
+ int sect;
+ int regions = 0;
+ int numblocks = 0;
+ ulong offset;
+ ulong base_addr;
+
+ /*
+ * First detect the number of eraseregions so that we can allocate
+ * the array of eraseregions correctly
+ */
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if (sect_size_old != flash_sector_size(fi, sect))
+ regions++;
+ sect_size_old = flash_sector_size(fi, sect);
+ }
+
+ switch (regions) {
+ case 0:
+ return 1;
+ case 1: /* flash has uniform erase size */
+ mtd->numeraseregions = 0;
+ mtd->erasesize = sect_size_old;
+ return 0;
+ }
+
+ mtd->numeraseregions = regions;
+ mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info) * regions);
+
+ /*
+ * Now detect the largest sector and fill the eraseregions
+ */
+ regions = 0;
+ base_addr = offset = fi->start[0];
+ sect_size_old = flash_sector_size(fi, 0);
+ for (sect = 0; sect < fi->sector_count; sect++) {
+ if (sect_size_old != flash_sector_size(fi, sect)) {
+ mtd->eraseregions[regions].offset = offset - base_addr;
+ mtd->eraseregions[regions].erasesize = sect_size_old;
+ mtd->eraseregions[regions].numblocks = numblocks;
+ /* Now start counting the next eraseregions */
+ numblocks = 0;
+ regions++;
+ offset = fi->start[sect];
+ }
+ numblocks++;
+
+ /*
+ * Select the largest sector size as erasesize (e.g. for UBI)
+ */
+ if (flash_sector_size(fi, sect) > sect_size)
+ sect_size = flash_sector_size(fi, sect);
+
+ sect_size_old = flash_sector_size(fi, sect);
+ }
+
+ /*
+ * Set the last region
+ */
+ mtd->eraseregions[regions].offset = offset - base_addr;
+ mtd->eraseregions[regions].erasesize = sect_size_old;
+ mtd->eraseregions[regions].numblocks = numblocks;
+
+ mtd->erasesize = sect_size;
+
+ return 0;
+}
+
+int cfi_mtd_init(void)
+{
+ struct mtd_info *mtd;
+ flash_info_t *fi;
+ int error, i;
+#ifdef CONFIG_MTD_CONCAT
+ int devices_found = 0;
+ struct mtd_info *mtd_list[CONFIG_SYS_MAX_FLASH_BANKS];
+#endif
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ fi = &flash_info[i];
+ mtd = &cfi_mtd_info[i];
+
+ memset(mtd, 0, sizeof(struct mtd_info));
+
+ error = cfi_mtd_set_erasesize(mtd, fi);
+ if (error)
+ continue;
+
+ sprintf(cfi_mtd_names[i], "nor%d", i);
+ mtd->name = cfi_mtd_names[i];
+ mtd->type = MTD_NORFLASH;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = fi->size;
+ mtd->writesize = 1;
+ mtd->writebufsize = mtd->writesize;
+
+ mtd->_erase = cfi_mtd_erase;
+ mtd->_read = cfi_mtd_read;
+ mtd->_write = cfi_mtd_write;
+ mtd->_sync = cfi_mtd_sync;
+ mtd->_lock = cfi_mtd_lock;
+ mtd->_unlock = cfi_mtd_unlock;
+ mtd->priv = fi;
+
+ if (add_mtd_device(mtd))
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_CONCAT
+ mtd_list[devices_found++] = mtd;
+#endif
+ }
+
+#ifdef CONFIG_MTD_CONCAT
+ if (devices_found > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ sprintf(c_mtd_name, "nor%d", devices_found);
+ mtd = mtd_concat_create(mtd_list, devices_found, c_mtd_name);
+
+ if (mtd == NULL)
+ return -ENXIO;
+
+ if (add_mtd_device(mtd))
+ return -ENOMEM;
+ }
+#endif /* CONFIG_MTD_CONCAT */
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/hbmc-am654.c b/roms/u-boot/drivers/mtd/hbmc-am654.c
new file mode 100644
index 000000000..c86e504da
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/hbmc-am654.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+// Author: Vignesh Raghavendra <vigneshr@ti.com>
+
+#include <common.h>
+#include <asm/io.h>
+#include <dm.h>
+#include <regmap.h>
+#include <syscon.h>
+#include <dm/device_compat.h>
+
+#define FSS_SYSC_REG 0x4
+
+#define HYPERBUS_CALIB_COUNT 25
+
+struct am654_hbmc_priv {
+ void __iomem *mmiobase;
+ bool calibrated;
+};
+
+/* Calibrate by looking for "QRY" string within the CFI space */
+static int am654_hyperbus_calibrate(struct udevice *dev)
+{
+ struct am654_hbmc_priv *priv = dev_get_priv(dev);
+ int count = HYPERBUS_CALIB_COUNT;
+ int pass_count = 0;
+ u16 qry[3];
+
+ if (priv->calibrated)
+ return 0;
+
+ writew(0xF0, priv->mmiobase);
+ writew(0x98, priv->mmiobase + 0xaa);
+
+ while (count--) {
+ qry[0] = readw(priv->mmiobase + 0x20);
+ qry[1] = readw(priv->mmiobase + 0x22);
+ qry[2] = readw(priv->mmiobase + 0x24);
+
+ if (qry[0] == 'Q' && qry[1] == 'R' && qry[2] == 'Y')
+ pass_count++;
+ else
+ pass_count = 0;
+ if (pass_count == 5)
+ break;
+ }
+ writew(0xF0, priv->mmiobase);
+ writew(0xFF, priv->mmiobase);
+
+ return pass_count == 5;
+}
+
+static int am654_select_hbmc(struct udevice *dev)
+{
+ struct regmap *regmap = syscon_get_regmap(dev_get_parent(dev));
+
+ return regmap_update_bits(regmap, FSS_SYSC_REG, 0x2, 0x2);
+}
+
+static int am654_hbmc_bind(struct udevice *dev)
+{
+ return dm_scan_fdt_dev(dev);
+}
+
+static int am654_hbmc_probe(struct udevice *dev)
+{
+ struct am654_hbmc_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ priv->mmiobase = devfdt_remap_addr_index(dev, 1);
+ if (dev_read_bool(dev, "mux-controls")) {
+ ret = am654_select_hbmc(dev);
+ if (ret) {
+ dev_err(dev, "Failed to select HBMC mux\n");
+ return ret;
+ }
+ }
+
+ if (!priv->calibrated) {
+ ret = am654_hyperbus_calibrate(dev);
+ if (!ret) {
+ dev_err(dev, "Calibration Failed\n");
+ return -EIO;
+ }
+ }
+ priv->calibrated = true;
+
+ return 0;
+}
+
+static const struct udevice_id am654_hbmc_dt_ids[] = {
+ {
+ .compatible = "ti,am654-hbmc",
+ },
+ { /* end of table */ }
+};
+
+U_BOOT_DRIVER(hbmc_am654) = {
+ .name = "hbmc-am654",
+ .id = UCLASS_MTD,
+ .of_match = am654_hbmc_dt_ids,
+ .probe = am654_hbmc_probe,
+ .bind = am654_hbmc_bind,
+ .priv_auto = sizeof(struct am654_hbmc_priv),
+};
diff --git a/roms/u-boot/drivers/mtd/jedec_flash.c b/roms/u-boot/drivers/mtd/jedec_flash.c
new file mode 100644
index 000000000..859c7fd4e
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/jedec_flash.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2007
+ * Michael Schwingen, <michael@schwingen.org>
+ *
+ * based in great part on jedec_probe.c from linux kernel:
+ * (C) 2000 Red Hat. GPL'd.
+ * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+ */
+
+/* The DEBUG define must be before common to enable debugging */
+/*#define DEBUG*/
+
+#include <common.h>
+#include <flash.h>
+#include <log.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#define P_ID_AMD_STD CFI_CMDSET_AMD_LEGACY
+
+/* AMD */
+#define AM29DL800BB 0x22CB
+#define AM29DL800BT 0x224A
+
+#define AM29F400BB 0x22AB
+#define AM29F800BB 0x2258
+#define AM29F800BT 0x22D6
+#define AM29LV400BB 0x22BA
+#define AM29LV400BT 0x22B9
+#define AM29LV800BB 0x225B
+#define AM29LV800BT 0x22DA
+#define AM29LV160DT 0x22C4
+#define AM29LV160DB 0x2249
+#define AM29F017D 0x003D
+#define AM29F016D 0x00AD
+#define AM29F080 0x00D5
+#define AM29F040 0x00A4
+#define AM29LV040B 0x004F
+#define AM29F032B 0x0041
+#define AM29F002T 0x00B0
+
+/* SST */
+#define SST39LF800 0x2781
+#define SST39LF160 0x2782
+#define SST39VF1601 0x234b
+#define SST39LF512 0x00D4
+#define SST39LF010 0x00D5
+#define SST39LF020 0x00D6
+#define SST39LF040 0x00D7
+#define SST39SF010A 0x00B5
+#define SST39SF020A 0x00B6
+
+/* STM */
+#define STM29F400BB 0x00D6
+
+/* MXIC */
+#define MX29LV040 0x004F
+
+/* WINBOND */
+#define W39L040A 0x00D6
+
+/* AMIC */
+#define A29L040 0x0092
+
+/* EON */
+#define EN29LV040A 0x004F
+
+/*
+ * Unlock address sets for AMD command sets.
+ * Intel command sets use the MTD_UADDR_UNNECESSARY.
+ * Each identifier, except MTD_UADDR_UNNECESSARY, and
+ * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
+ * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
+ * initialization need not require initializing all of the
+ * unlock addresses for all bit widths.
+ */
+enum uaddr {
+ MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
+ MTD_UADDR_0x0555_0x02AA,
+ MTD_UADDR_0x0555_0x0AAA,
+ MTD_UADDR_0x5555_0x2AAA,
+ MTD_UADDR_0x0AAA_0x0555,
+ MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
+ MTD_UADDR_UNNECESSARY, /* Does not require any address */
+};
+
+
+struct unlock_addr {
+ u32 addr1;
+ u32 addr2;
+};
+
+
+/*
+ * I don't like the fact that the first entry in unlock_addrs[]
+ * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
+ * should not be used. The problem is that structures with
+ * initializers have extra fields initialized to 0. It is _very_
+ * desireable to have the unlock address entries for unsupported
+ * data widths automatically initialized - that means that
+ * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
+ * must go unused.
+ */
+static const struct unlock_addr unlock_addrs[] = {
+ [MTD_UADDR_NOT_SUPPORTED] = {
+ .addr1 = 0xffff,
+ .addr2 = 0xffff
+ },
+
+ [MTD_UADDR_0x0555_0x02AA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x02aa
+ },
+
+ [MTD_UADDR_0x0555_0x0AAA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x0aaa
+ },
+
+ [MTD_UADDR_0x5555_0x2AAA] = {
+ .addr1 = 0x5555,
+ .addr2 = 0x2aaa
+ },
+
+ [MTD_UADDR_0x0AAA_0x0555] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0555
+ },
+
+ [MTD_UADDR_DONT_CARE] = {
+ .addr1 = 0x0000, /* Doesn't matter which address */
+ .addr2 = 0x0000 /* is used - must be last entry */
+ },
+
+ [MTD_UADDR_UNNECESSARY] = {
+ .addr1 = 0x0000,
+ .addr2 = 0x0000
+ }
+};
+
+
+struct amd_flash_info {
+ const __u16 mfr_id;
+ const __u16 dev_id;
+ const char *name;
+ const int DevSize;
+ const int NumEraseRegions;
+ const int CmdSet;
+ const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
+ const ulong regions[6];
+};
+
+#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
+
+#define SIZE_64KiB 16
+#define SIZE_128KiB 17
+#define SIZE_256KiB 18
+#define SIZE_512KiB 19
+#define SIZE_1MiB 20
+#define SIZE_2MiB 21
+#define SIZE_4MiB 22
+#define SIZE_8MiB 23
+
+static const struct amd_flash_info jedec_table[] = {
+#ifdef CONFIG_SYS_FLASH_LEGACY_256Kx8
+ {
+ .mfr_id = (u16)SST_MANUFACT,
+ .dev_id = SST39LF020,
+ .name = "SST 39LF020",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ },
+#endif
+#ifdef CONFIG_SYS_FLASH_LEGACY_512Kx8
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV040B,
+ .name = "AMD AM29LV040B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ },
+ {
+ .mfr_id = (u16)SST_MANUFACT,
+ .dev_id = SST39LF040,
+ .name = "SST 39LF040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ },
+ {
+ .mfr_id = (u16)STM_MANUFACT,
+ .dev_id = STM_ID_M29W040B,
+ .name = "ST Micro M29W040B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ },
+ {
+ .mfr_id = (u16)MX_MANUFACT,
+ .dev_id = MX29LV040,
+ .name = "MXIC MX29LV040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+ {
+ .mfr_id = (u16)WINB_MANUFACT,
+ .dev_id = W39L040A,
+ .name = "WINBOND W39L040A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+ {
+ .mfr_id = (u16)AMIC_MANUFACT,
+ .dev_id = A29L040,
+ .name = "AMIC A29L040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+ {
+ .mfr_id = (u16)EON_MANUFACT,
+ .dev_id = EN29LV040A,
+ .name = "EON EN29LV040A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 8),
+ }
+ },
+#endif
+#ifdef CONFIG_SYS_FLASH_LEGACY_512Kx16
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29F400BB,
+ .name = "AMD AM29F400BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x10000, 7),
+ }
+ },
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV400BB,
+ .name = "AMD AM29LV400BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7),
+ }
+ },
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x10000, 15),
+ }
+ },
+ {
+ .mfr_id = (u16)AMD_MANUFACT,
+ .dev_id = AM29LV800BT,
+ .name = "AMD AM29LV800BT",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x04000, 1),
+ }
+ },
+ {
+ .mfr_id = (u16)MX_MANUFACT,
+ .dev_id = AM29LV800BT,
+ .name = "MXIC MX29LV800BT",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x04000, 1),
+ }
+ },
+ {
+ .mfr_id = (u16)EON_ALT_MANU,
+ .dev_id = AM29LV800BT,
+ .name = "EON EN29LV800BT",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x04000, 1),
+ }
+ },
+ {
+ .mfr_id = (u16)STM_MANUFACT,
+ .dev_id = STM29F400BB,
+ .name = "ST Micro M29F400BB",
+ .uaddr = {
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = CFI_CMDSET_AMD_LEGACY,
+ .NumEraseRegions = 4,
+ .regions = {
+ ERASEINFO(0x04000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x10000, 7),
+ }
+ },
+#endif
+};
+
+static inline void fill_info(flash_info_t *info, const struct amd_flash_info *jedec_entry, ulong base)
+{
+ int i,j;
+ int sect_cnt;
+ int size_ratio;
+ int total_size;
+ enum uaddr uaddr_idx;
+
+ size_ratio = info->portwidth / info->chipwidth;
+
+ debug("Found JEDEC Flash: %s\n", jedec_entry->name);
+ info->vendor = jedec_entry->CmdSet;
+ /* Todo: do we need device-specific timeouts? */
+ info->erase_blk_tout = 30000;
+ info->buffer_write_tout = 1000;
+ info->write_tout = 100;
+ info->name = jedec_entry->name;
+
+ /* copy unlock addresses from device table to CFI info struct. This
+ is just here because the addresses are in the table anyway - if
+ the flash is not detected due to wrong unlock addresses,
+ flash_detect_legacy would have to try all of them before we even
+ get here. */
+ switch(info->chipwidth) {
+ case FLASH_CFI_8BIT:
+ uaddr_idx = jedec_entry->uaddr[0];
+ break;
+ case FLASH_CFI_16BIT:
+ uaddr_idx = jedec_entry->uaddr[1];
+ break;
+ case FLASH_CFI_32BIT:
+ uaddr_idx = jedec_entry->uaddr[2];
+ break;
+ default:
+ uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
+ break;
+ }
+
+ debug("unlock address index %d\n", uaddr_idx);
+ info->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
+ info->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
+ debug("unlock addresses are 0x%lx/0x%lx\n",
+ info->addr_unlock1, info->addr_unlock2);
+
+ sect_cnt = 0;
+ total_size = 0;
+ for (i = 0; i < jedec_entry->NumEraseRegions; i++) {
+ ulong erase_region_size = jedec_entry->regions[i] >> 8;
+ ulong erase_region_count = (jedec_entry->regions[i] & 0xff) + 1;
+
+ total_size += erase_region_size * erase_region_count;
+ debug("erase_region_count = %ld erase_region_size = %ld\n",
+ erase_region_count, erase_region_size);
+ for (j = 0; j < erase_region_count; j++) {
+ if (sect_cnt >= CONFIG_SYS_MAX_FLASH_SECT) {
+ printf("ERROR: too many flash sectors\n");
+ break;
+ }
+ info->start[sect_cnt] = base;
+ base += (erase_region_size * size_ratio);
+ sect_cnt++;
+ }
+ }
+ info->sector_count = sect_cnt;
+ info->size = total_size * size_ratio;
+}
+
+/*-----------------------------------------------------------------------
+ * match jedec ids against table. If a match is found, fill flash_info entry
+ */
+int jedec_flash_match(flash_info_t *info, ulong base)
+{
+ int ret = 0;
+ int i;
+ ulong mask = 0xFFFF;
+ if (info->chipwidth == 1)
+ mask = 0xFF;
+
+ for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
+ if ((jedec_table[i].mfr_id & mask) == (info->manufacturer_id & mask) &&
+ (jedec_table[i].dev_id & mask) == (info->device_id & mask)) {
+ fill_info(info, &jedec_table[i], base);
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
diff --git a/roms/u-boot/drivers/mtd/mtd-uclass.c b/roms/u-boot/drivers/mtd/mtd-uclass.c
new file mode 100644
index 000000000..9f5f672ba
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/mtd-uclass.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015 Thomas Chou <thomas@wytron.com.tw>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/device-internal.h>
+#include <errno.h>
+#include <mtd.h>
+
+/**
+ * mtd_probe - Probe the device @dev if not already done
+ *
+ * @dev: U-Boot device to probe
+ *
+ * @return 0 on success, an error otherwise.
+ */
+int mtd_probe(struct udevice *dev)
+{
+ if (device_active(dev))
+ return 0;
+
+ return device_probe(dev);
+}
+
+/*
+ * Implement a MTD uclass which should include most flash drivers.
+ * The uclass private is pointed to mtd_info.
+ */
+
+UCLASS_DRIVER(mtd) = {
+ .id = UCLASS_MTD,
+ .name = "mtd",
+ .per_device_auto = sizeof(struct mtd_info),
+};
diff --git a/roms/u-boot/drivers/mtd/mtd_uboot.c b/roms/u-boot/drivers/mtd/mtd_uboot.c
new file mode 100644
index 000000000..c53ec657a
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/mtd_uboot.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2014
+ * Heiko Schocher, DENX Software Engineering, hs@denx.de.
+ */
+#include <common.h>
+#include <env.h>
+#include <log.h>
+#include <malloc.h>
+#include <dm/device.h>
+#include <dm/uclass-internal.h>
+#include <linux/err.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <asm/global_data.h>
+#include <mtd.h>
+
+#define MTD_NAME_MAX_LEN 20
+
+void board_mtdparts_default(const char **mtdids, const char **mtdparts);
+
+static const char *get_mtdids(void)
+{
+ __maybe_unused const char *mtdparts = NULL;
+ const char *mtdids = env_get("mtdids");
+
+ if (mtdids)
+ return mtdids;
+
+#if defined(CONFIG_SYS_MTDPARTS_RUNTIME)
+ board_mtdparts_default(&mtdids, &mtdparts);
+#elif defined(MTDIDS_DEFAULT)
+ mtdids = MTDIDS_DEFAULT;
+#elif defined(CONFIG_MTDIDS_DEFAULT)
+ mtdids = CONFIG_MTDIDS_DEFAULT;
+#endif
+
+ if (mtdids)
+ env_set("mtdids", mtdids);
+
+ return mtdids;
+}
+
+/**
+ * mtd_search_alternate_name - Search an alternate name for @mtdname thanks to
+ * the mtdids legacy environment variable.
+ *
+ * The mtdids string is a list of comma-separated 'dev_id=mtd_id' tupples.
+ * Check if one of the mtd_id matches mtdname, in this case save dev_id in
+ * altname.
+ *
+ * @mtdname: Current MTD device name
+ * @altname: Alternate name to return
+ * @max_len: Length of the alternate name buffer
+ *
+ * @return 0 on success, an error otherwise.
+ */
+int mtd_search_alternate_name(const char *mtdname, char *altname,
+ unsigned int max_len)
+{
+ const char *mtdids, *equal, *comma, *dev_id, *mtd_id;
+ int dev_id_len, mtd_id_len;
+
+ mtdids = get_mtdids();
+ if (!mtdids)
+ return -EINVAL;
+
+ do {
+ /* Find the '=' sign */
+ dev_id = mtdids;
+ equal = strchr(dev_id, '=');
+ if (!equal)
+ break;
+ dev_id_len = equal - mtdids;
+ mtd_id = equal + 1;
+
+ /* Find the end of the tupple */
+ comma = strchr(mtdids, ',');
+ if (comma)
+ mtd_id_len = comma - mtd_id;
+ else
+ mtd_id_len = &mtdids[strlen(mtdids)] - mtd_id + 1;
+
+ if (!dev_id_len || !mtd_id_len)
+ return -EINVAL;
+
+ if (dev_id_len + 1 > max_len)
+ continue;
+
+ /* Compare the name we search with the current mtd_id */
+ if (!strncmp(mtdname, mtd_id, mtd_id_len)) {
+ strncpy(altname, dev_id, dev_id_len);
+ altname[dev_id_len] = 0;
+
+ return 0;
+ }
+
+ /* Go to the next tupple */
+ mtdids = comma + 1;
+ } while (comma);
+
+ return -EINVAL;
+}
+
+#if IS_ENABLED(CONFIG_DM_MTD)
+static void mtd_probe_uclass_mtd_devs(void)
+{
+ struct udevice *dev;
+ int idx = 0;
+
+ /* Probe devices with DM compliant drivers */
+ while (!uclass_find_device(UCLASS_MTD, idx, &dev) && dev) {
+ mtd_probe(dev);
+ idx++;
+ }
+}
+#else
+static void mtd_probe_uclass_mtd_devs(void) { }
+#endif
+
+#if defined(CONFIG_MTD_PARTITIONS)
+
+#define MTDPARTS_MAXLEN 512
+
+static const char *get_mtdparts(void)
+{
+ __maybe_unused const char *mtdids = NULL;
+ static char tmp_parts[MTDPARTS_MAXLEN];
+ const char *mtdparts = NULL;
+
+ if (gd->flags & GD_FLG_ENV_READY)
+ mtdparts = env_get("mtdparts");
+ else if (env_get_f("mtdparts", tmp_parts, sizeof(tmp_parts)) != -1)
+ mtdparts = tmp_parts;
+
+ if (mtdparts)
+ return mtdparts;
+
+#if defined(CONFIG_SYS_MTDPARTS_RUNTIME)
+ board_mtdparts_default(&mtdids, &mtdparts);
+#elif defined(MTDPARTS_DEFAULT)
+ mtdparts = MTDPARTS_DEFAULT;
+#elif defined(CONFIG_MTDPARTS_DEFAULT)
+ mtdparts = CONFIG_MTDPARTS_DEFAULT;
+#endif
+
+ if (mtdparts)
+ env_set("mtdparts", mtdparts);
+
+ return mtdparts;
+}
+
+static int mtd_del_parts(struct mtd_info *mtd, bool quiet)
+{
+ int ret;
+
+ if (!mtd_has_partitions(mtd))
+ return 0;
+
+ /* do not delete partitions if they are in use. */
+ if (mtd_partitions_used(mtd)) {
+ if (!quiet)
+ printf("\"%s\" partitions still in use, can't delete them\n",
+ mtd->name);
+ return -EACCES;
+ }
+
+ ret = del_mtd_partitions(mtd);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static bool mtd_del_all_parts_failed;
+
+static void mtd_del_all_parts(void)
+{
+ struct mtd_info *mtd;
+ int ret = 0;
+
+ mtd_del_all_parts_failed = false;
+
+ /*
+ * It is not safe to remove entries from the mtd_for_each_device loop
+ * as it uses idr indexes and the partitions removal is done in bulk
+ * (all partitions of one device at the same time), so break and
+ * iterate from start each time a new partition is found and deleted.
+ */
+ do {
+ mtd_for_each_device(mtd) {
+ ret = mtd_del_parts(mtd, false);
+ if (ret > 0)
+ break;
+ else if (ret < 0)
+ mtd_del_all_parts_failed = true;
+ }
+ } while (ret > 0);
+}
+
+int mtd_probe_devices(void)
+{
+ static char *old_mtdparts;
+ static char *old_mtdids;
+ const char *mtdparts = get_mtdparts();
+ const char *mtdids = get_mtdids();
+ const char *mtdparts_next = mtdparts;
+ struct mtd_info *mtd;
+
+ mtd_probe_uclass_mtd_devs();
+
+ /*
+ * Check if mtdparts/mtdids changed, if the MTD dev list was updated
+ * or if our previous attempt to delete existing partititions failed.
+ * In any of these cases we want to update the partitions, otherwise,
+ * everything is up-to-date and we can return 0 directly.
+ */
+ if ((!mtdparts && !old_mtdparts && !mtdids && !old_mtdids) ||
+ (mtdparts && old_mtdparts && mtdids && old_mtdids &&
+ !mtd_dev_list_updated() && !mtd_del_all_parts_failed &&
+ !strcmp(mtdparts, old_mtdparts) &&
+ !strcmp(mtdids, old_mtdids)))
+ return 0;
+
+ /* Update the local copy of mtdparts */
+ free(old_mtdparts);
+ free(old_mtdids);
+ old_mtdparts = strdup(mtdparts);
+ old_mtdids = strdup(mtdids);
+
+ /*
+ * Remove all old parts. Note that partition removal can fail in case
+ * one of the partition is still being used by an MTD user, so this
+ * does not guarantee that all old partitions are gone.
+ */
+ mtd_del_all_parts();
+
+ /*
+ * Call mtd_dev_list_updated() to clear updates generated by our own
+ * parts removal loop.
+ */
+ mtd_dev_list_updated();
+
+ /* If either mtdparts or mtdids is empty, then exit */
+ if (!mtdparts || !mtdids)
+ return 0;
+
+ /* Start the parsing by ignoring the extra 'mtdparts=' prefix, if any */
+ if (!strncmp(mtdparts, "mtdparts=", sizeof("mtdparts=") - 1))
+ mtdparts += 9;
+
+ /* For each MTD device in mtdparts */
+ for (; mtdparts[0] != '\0'; mtdparts = mtdparts_next) {
+ char mtd_name[MTD_NAME_MAX_LEN], *colon;
+ struct mtd_partition *parts;
+ unsigned int mtd_name_len;
+ int nparts, ret;
+
+ mtdparts_next = strchr(mtdparts, ';');
+ if (!mtdparts_next)
+ mtdparts_next = mtdparts + strlen(mtdparts);
+ else
+ mtdparts_next++;
+
+ colon = strchr(mtdparts, ':');
+ if (colon > mtdparts_next)
+ colon = NULL;
+
+ if (!colon) {
+ printf("Wrong mtdparts: %s\n", mtdparts);
+ return -EINVAL;
+ }
+
+ mtd_name_len = (unsigned int)(colon - mtdparts);
+ if (mtd_name_len + 1 > sizeof(mtd_name)) {
+ printf("MTD name too long: %s\n", mtdparts);
+ return -EINVAL;
+ }
+
+ strncpy(mtd_name, mtdparts, mtd_name_len);
+ mtd_name[mtd_name_len] = '\0';
+ /* Move the pointer forward (including the ':') */
+ mtdparts += mtd_name_len + 1;
+ mtd = get_mtd_device_nm(mtd_name);
+ if (IS_ERR_OR_NULL(mtd)) {
+ char linux_name[MTD_NAME_MAX_LEN];
+
+ /*
+ * The MTD device named "mtd_name" does not exist. Try
+ * to find a correspondance with an MTD device having
+ * the same type and number as defined in the mtdids.
+ */
+ debug("No device named %s\n", mtd_name);
+ ret = mtd_search_alternate_name(mtd_name, linux_name,
+ MTD_NAME_MAX_LEN);
+ if (!ret)
+ mtd = get_mtd_device_nm(linux_name);
+
+ /*
+ * If no device could be found, move the mtdparts
+ * pointer forward until the next set of partitions.
+ */
+ if (ret || IS_ERR_OR_NULL(mtd)) {
+ printf("Could not find a valid device for %s\n",
+ mtd_name);
+ mtdparts = mtdparts_next;
+ continue;
+ }
+ }
+
+ /*
+ * Call mtd_del_parts() again, even if it's already been called
+ * in mtd_del_all_parts(). We need to know if old partitions are
+ * still around (because they are still being used by someone),
+ * and if they are, we shouldn't create new partitions, so just
+ * skip this MTD device and try the next one.
+ */
+ ret = mtd_del_parts(mtd, true);
+ if (ret < 0)
+ continue;
+
+ /*
+ * Parse the MTD device partitions. It will update the mtdparts
+ * pointer, create an array of parts (that must be freed), and
+ * return the number of partition structures in the array.
+ */
+ ret = mtd_parse_partitions(mtd, &mtdparts, &parts, &nparts);
+ if (ret) {
+ printf("Could not parse device %s\n", mtd->name);
+ put_mtd_device(mtd);
+ return -EINVAL;
+ }
+
+ if (!nparts)
+ continue;
+
+ /* Create the new MTD partitions */
+ add_mtd_partitions(mtd, parts, nparts);
+
+ /* Free the structures allocated during the parsing */
+ mtd_free_parsed_partitions(parts, nparts);
+
+ put_mtd_device(mtd);
+ }
+
+ /*
+ * Call mtd_dev_list_updated() to clear updates generated by our own
+ * parts registration loop.
+ */
+ mtd_dev_list_updated();
+
+ return 0;
+}
+#else
+int mtd_probe_devices(void)
+{
+ mtd_probe_uclass_mtd_devs();
+
+ return 0;
+}
+#endif /* defined(CONFIG_MTD_PARTITIONS) */
diff --git a/roms/u-boot/drivers/mtd/mtdconcat.c b/roms/u-boot/drivers/mtd/mtdconcat.c
new file mode 100644
index 000000000..684bc9499
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/mtdconcat.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MTD device concatenation layer
+ *
+ * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
+ * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * NAND support by Christian Gan <cgan@iders.ca>
+ *
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/backing-dev.h>
+#include <asm/div64.h>
+#else
+#include <div64.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/concat.h>
+
+#include <ubi_uboot.h>
+
+/*
+ * Our storage structure:
+ * Subdev points to an array of pointers to struct mtd_info objects
+ * which is allocated along with this structure
+ *
+ */
+struct mtd_concat {
+ struct mtd_info mtd;
+ int num_subdev;
+ struct mtd_info **subdev;
+};
+
+/*
+ * how to calculate the size required for the above structure,
+ * including the pointer array subdev points to:
+ */
+#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
+ ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
+
+/*
+ * Given a pointer to the MTD object in the mtd_concat structure,
+ * we can retrieve the pointer to that structure with this macro.
+ */
+#define CONCAT(x) ((struct mtd_concat *)(x))
+
+/*
+ * MTD methods which look up the relevant subdevice, translate the
+ * effective address and pass through to the subdevice.
+ */
+
+static int
+concat_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int ret = 0, err;
+ int i;
+
+#ifdef __UBOOT__
+ *retlen = 0;
+#endif
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ err = mtd_read(subdev, from, size, &retsize, buf);
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (mtd_is_eccerr(err)) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (mtd_is_bitflip(err)) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ return ret;
+
+ buf += size;
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+#ifdef __UBOOT__
+ *retlen = 0;
+#endif
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ err = mtd_write(subdev, to, size, &retsize, buf);
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+#ifndef __UBOOT__
+static int
+concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct kvec *vecs_copy;
+ unsigned long entry_low, entry_high;
+ size_t total_len = 0;
+ int i;
+ int err = -EINVAL;
+
+ /* Calculate total length of data */
+ for (i = 0; i < count; i++)
+ total_len += vecs[i].iov_len;
+
+ /* Check alignment */
+ if (mtd->writesize > 1) {
+ uint64_t __to = to;
+ if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
+ return -EINVAL;
+ }
+
+ /* make a copy of vecs */
+ vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
+ if (!vecs_copy)
+ return -ENOMEM;
+
+ entry_low = 0;
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, wsize, retsize, old_iov_len;
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ size = min_t(uint64_t, total_len, subdev->size - to);
+ wsize = size; /* store for future use */
+
+ entry_high = entry_low;
+ while (entry_high < count) {
+ if (size <= vecs_copy[entry_high].iov_len)
+ break;
+ size -= vecs_copy[entry_high++].iov_len;
+ }
+
+ old_iov_len = vecs_copy[entry_high].iov_len;
+ vecs_copy[entry_high].iov_len = size;
+
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to, &retsize);
+
+ vecs_copy[entry_high].iov_len = old_iov_len - size;
+ vecs_copy[entry_high].iov_base += size;
+
+ entry_low = entry_high;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ total_len -= wsize;
+
+ if (total_len == 0)
+ break;
+
+ err = -EINVAL;
+ to = 0;
+ }
+
+ kfree(vecs_copy);
+ return err;
+}
+#endif
+
+static int
+concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err, ret = 0;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (from >= subdev->size) {
+ from -= subdev->size;
+ continue;
+ }
+
+ /* partial read ? */
+ if (from + devops.len > subdev->size)
+ devops.len = subdev->size - from;
+
+ err = mtd_read_oob(subdev, from, &devops);
+ ops->retlen += devops.retlen;
+ ops->oobretlen += devops.oobretlen;
+
+ /* Save information about bitflips! */
+ if (unlikely(err)) {
+ if (mtd_is_eccerr(err)) {
+ mtd->ecc_stats.failed++;
+ ret = err;
+ } else if (mtd_is_bitflip(err)) {
+ mtd->ecc_stats.corrected++;
+ /* Do not overwrite -EBADMSG !! */
+ if (!ret)
+ ret = err;
+ } else
+ return err;
+ }
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return ret;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return ret;
+ devops.oobbuf += ops->oobretlen;
+ }
+
+ from = 0;
+ }
+ return -EINVAL;
+}
+
+static int
+concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_oob_ops devops = *ops;
+ int i, err;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (to >= subdev->size) {
+ to -= subdev->size;
+ continue;
+ }
+
+ /* partial write ? */
+ if (to + devops.len > subdev->size)
+ devops.len = subdev->size - to;
+
+ err = mtd_write_oob(subdev, to, &devops);
+ ops->retlen += devops.oobretlen;
+ if (err)
+ return err;
+
+ if (devops.datbuf) {
+ devops.len = ops->len - ops->retlen;
+ if (!devops.len)
+ return 0;
+ devops.datbuf += devops.retlen;
+ }
+ if (devops.oobbuf) {
+ devops.ooblen = ops->ooblen - ops->oobretlen;
+ if (!devops.ooblen)
+ return 0;
+ devops.oobbuf += devops.oobretlen;
+ }
+ to = 0;
+ }
+ return -EINVAL;
+}
+
+static void concat_erase_callback(struct erase_info *instr)
+{
+ /* Nothing to do here in U-Boot */
+#ifndef __UBOOT__
+ wake_up((wait_queue_head_t *) instr->priv);
+#endif
+}
+
+static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+ int err;
+ wait_queue_head_t waitq;
+ DECLARE_WAITQUEUE(wait, current);
+
+ /*
+ * This code was stol^H^H^H^Hinspired by mtdchar.c
+ */
+ init_waitqueue_head(&waitq);
+
+ erase->mtd = mtd;
+ erase->callback = concat_erase_callback;
+ erase->priv = (unsigned long) &waitq;
+
+ /*
+ * FIXME: Allow INTERRUPTIBLE. Which means
+ * not having the wait_queue head on the stack.
+ */
+ err = mtd_erase(mtd, erase);
+ if (!err) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&waitq, &wait);
+ if (erase->state != MTD_ERASE_DONE
+ && erase->state != MTD_ERASE_FAILED)
+ schedule();
+ remove_wait_queue(&waitq, &wait);
+ set_current_state(TASK_RUNNING);
+
+ err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
+ }
+ return err;
+}
+
+static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ struct mtd_info *subdev;
+ int i, err;
+ uint64_t length, offset = 0;
+ struct erase_info *erase;
+
+ /*
+ * Check for proper erase block alignment of the to-be-erased area.
+ * It is easier to do this based on the super device's erase
+ * region info rather than looking at each particular sub-device
+ * in turn.
+ */
+ if (!concat->mtd.numeraseregions) {
+ /* the easy case: device has uniform erase block size */
+ if (instr->addr & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ if (instr->len & (concat->mtd.erasesize - 1))
+ return -EINVAL;
+ } else {
+ /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions =
+ concat->mtd.eraseregions;
+
+ /*
+ * Find the erase region where the to-be-erased area begins:
+ */
+ for (i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++) ;
+ --i;
+
+ /*
+ * Now erase_regions[i] is the region in which the
+ * to-be-erased area begins. Verify that the starting
+ * offset is aligned to this region's erase size:
+ */
+ if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
+ return -EINVAL;
+
+ /*
+ * now find the erase region where the to-be-erased area ends:
+ */
+ for (; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset;
+ ++i) ;
+ --i;
+ /*
+ * check if the ending offset is aligned to this region's erase size
+ */
+ if (i < 0 || ((instr->addr + instr->len) &
+ (erase_regions[i].erasesize - 1)))
+ return -EINVAL;
+ }
+
+ /* make a local copy of instr to avoid modifying the caller's struct */
+ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
+
+ if (!erase)
+ return -ENOMEM;
+
+ *erase = *instr;
+ length = instr->len;
+
+ /*
+ * find the subdevice where the to-be-erased area begins, adjust
+ * starting offset to be relative to the subdevice start
+ */
+ for (i = 0; i < concat->num_subdev; i++) {
+ subdev = concat->subdev[i];
+ if (subdev->size <= erase->addr) {
+ erase->addr -= subdev->size;
+ offset += subdev->size;
+ } else {
+ break;
+ }
+ }
+
+ /* must never happen since size limit has been verified above */
+ BUG_ON(i >= concat->num_subdev);
+
+ /* now do the erase: */
+ err = 0;
+ for (; length > 0; i++) {
+ /* loop for all subdevices affected by this request */
+ subdev = concat->subdev[i]; /* get current subdevice */
+
+ /* limit length to subdevice's size: */
+ if (erase->addr + length > subdev->size)
+ erase->len = subdev->size - erase->addr;
+ else
+ erase->len = length;
+
+ length -= erase->len;
+ if ((err = concat_dev_erase(subdev, erase))) {
+ /* sanity check: should never happen since
+ * block alignment has been checked above */
+ BUG_ON(err == -EINVAL);
+ if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr = erase->fail_addr + offset;
+ break;
+ }
+ /*
+ * erase->addr specifies the offset of the area to be
+ * erased *within the current subdevice*. It can be
+ * non-zero only the first time through this loop, i.e.
+ * for the first subdevice where blocks need to be erased.
+ * All the following erases must begin at the start of the
+ * current subdevice, i.e. at offset zero.
+ */
+ erase->addr = 0;
+ offset += subdev->size;
+ }
+ instr->state = erase->state;
+ kfree(erase);
+ if (err)
+ return err;
+
+ if (instr->callback)
+ instr->callback(instr);
+ return 0;
+}
+
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ uint64_t size;
+
+ if (ofs >= subdev->size) {
+ size = 0;
+ ofs -= subdev->size;
+ continue;
+ }
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
+ else
+ size = len;
+
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
+
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ ofs = 0;
+ }
+
+ return err;
+}
+
+static void concat_sync(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ mtd_sync(subdev);
+ }
+}
+
+#ifndef __UBOOT__
+static int concat_suspend(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, rc = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ if ((rc = mtd_suspend(subdev)) < 0)
+ return rc;
+ }
+ return rc;
+}
+
+static void concat_resume(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ mtd_resume(subdev);
+ }
+}
+#endif
+
+static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, res = 0;
+
+ if (!mtd_can_have_bb(concat->subdev[0]))
+ return res;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ res = mtd_block_isbad(subdev, ofs);
+ break;
+ }
+
+ return res;
+}
+
+static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i, err = -EINVAL;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (ofs >= subdev->size) {
+ ofs -= subdev->size;
+ continue;
+ }
+
+ err = mtd_block_markbad(subdev, ofs);
+ if (!err)
+ mtd->ecc_stats.badblocks++;
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * try to support NOMMU mmaps on concatenated devices
+ * - we don't support subdev spanning as we can't guarantee it'll work
+ */
+static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int i;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+
+ if (offset >= subdev->size) {
+ offset -= subdev->size;
+ continue;
+ }
+
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
+ }
+
+ return (unsigned long) -ENOSYS;
+}
+
+/*
+ * This function constructs a virtual MTD device by concatenating
+ * num_devs MTD devices. A pointer to the new device object is
+ * stored to *new_dev upon success. This function does _not_
+ * register any devices: this is the caller's responsibility.
+ */
+struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+#ifndef __UBOOT__
+ const char *name)
+#else
+ char *name)
+#endif
+{ /* name for the new device */
+ int i;
+ size_t size;
+ struct mtd_concat *concat;
+ uint32_t max_erasesize, curr_erasesize;
+ int num_erase_region;
+ int max_writebufsize = 0;
+
+ debug("Concatenating MTD devices:\n");
+ for (i = 0; i < num_devs; i++)
+ printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
+ debug("into device \"%s\"\n", name);
+
+ /* allocate the device structure */
+ size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
+ concat = kzalloc(size, GFP_KERNEL);
+ if (!concat) {
+ printk
+ ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
+ }
+ concat->subdev = (struct mtd_info **) (concat + 1);
+
+ /*
+ * Set up the new "super" device's MTD object structure, check for
+ * incompatibilities between the subdevices.
+ */
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
+ concat->mtd.erasesize = subdev[0]->erasesize;
+ concat->mtd.writesize = subdev[0]->writesize;
+
+ for (i = 0; i < num_devs; i++)
+ if (max_writebufsize < subdev[i]->writebufsize)
+ max_writebufsize = subdev[i]->writebufsize;
+ concat->mtd.writebufsize = max_writebufsize;
+
+ concat->mtd.subpage_sft = subdev[0]->subpage_sft;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.oobavail = subdev[0]->oobavail;
+#ifndef __UBOOT__
+ if (subdev[0]->_writev)
+ concat->mtd._writev = concat_writev;
+#endif
+ if (subdev[0]->_read_oob)
+ concat->mtd._read_oob = concat_read_oob;
+ if (subdev[0]->_write_oob)
+ concat->mtd._write_oob = concat_write_oob;
+ if (subdev[0]->_block_isbad)
+ concat->mtd._block_isbad = concat_block_isbad;
+ if (subdev[0]->_block_markbad)
+ concat->mtd._block_markbad = concat_block_markbad;
+
+ concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
+
+#ifndef __UBOOT__
+ concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
+#endif
+
+ concat->subdev[0] = subdev[0];
+
+ for (i = 1; i < num_devs; i++) {
+ if (concat->mtd.type != subdev[i]->type) {
+ kfree(concat);
+ printk("Incompatible device type on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ if (concat->mtd.flags != subdev[i]->flags) {
+ /*
+ * Expect all flags except MTD_WRITEABLE to be
+ * equal on all subdevices.
+ */
+ if ((concat->mtd.flags ^ subdev[i]->
+ flags) & ~MTD_WRITEABLE) {
+ kfree(concat);
+ printk("Incompatible device flags on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ } else
+ /* if writeable attribute differs,
+ make super device writeable */
+ concat->mtd.flags |=
+ subdev[i]->flags & MTD_WRITEABLE;
+ }
+
+#ifndef __UBOOT__
+ /* only permit direct mapping if the BDIs are all the same
+ * - copy-mapping is still permitted
+ */
+ if (concat->mtd.backing_dev_info !=
+ subdev[i]->backing_dev_info)
+ concat->mtd.backing_dev_info =
+ &default_backing_dev_info;
+#endif
+
+ concat->mtd.size += subdev[i]->size;
+ concat->mtd.ecc_stats.badblocks +=
+ subdev[i]->ecc_stats.badblocks;
+ if (concat->mtd.writesize != subdev[i]->writesize ||
+ concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ !concat->mtd._read_oob != !subdev[i]->_read_oob ||
+ !concat->mtd._write_oob != !subdev[i]->_write_oob) {
+ kfree(concat);
+ printk("Incompatible OOB or ECC data on \"%s\"\n",
+ subdev[i]->name);
+ return NULL;
+ }
+ concat->subdev[i] = subdev[i];
+
+ }
+
+ concat->mtd.ecclayout = subdev[0]->ecclayout;
+
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
+
+ concat->mtd._erase = concat_erase;
+ concat->mtd._read = concat_read;
+ concat->mtd._write = concat_write;
+ concat->mtd._sync = concat_sync;
+ concat->mtd._lock = concat_lock;
+ concat->mtd._unlock = concat_unlock;
+#ifndef __UBOOT__
+ concat->mtd._suspend = concat_suspend;
+ concat->mtd._resume = concat_resume;
+#endif
+ concat->mtd._get_unmapped_area = concat_get_unmapped_area;
+
+ /*
+ * Combine the erase block size info of the subdevices:
+ *
+ * first, walk the map of the new device and see how
+ * many changes in erase size we have
+ */
+ max_erasesize = curr_erasesize = subdev[0]->erasesize;
+ num_erase_region = 1;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /* if it differs from the last subdevice's erase size, count it */
+ ++num_erase_region;
+ curr_erasesize = subdev[i]->erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].erasesize !=
+ curr_erasesize) {
+ ++num_erase_region;
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ if (curr_erasesize > max_erasesize)
+ max_erasesize = curr_erasesize;
+ }
+ }
+ }
+ }
+
+ if (num_erase_region == 1) {
+ /*
+ * All subdevices have the same uniform erase size.
+ * This is easy:
+ */
+ concat->mtd.erasesize = curr_erasesize;
+ concat->mtd.numeraseregions = 0;
+ } else {
+ uint64_t tmp64;
+
+ /*
+ * erase block size varies across the subdevices: allocate
+ * space to store the data describing the variable erase regions
+ */
+ struct mtd_erase_region_info *erase_region_p;
+ uint64_t begin, position;
+
+ concat->mtd.erasesize = max_erasesize;
+ concat->mtd.numeraseregions = num_erase_region;
+ concat->mtd.eraseregions = erase_region_p =
+ kmalloc(num_erase_region *
+ sizeof (struct mtd_erase_region_info), GFP_KERNEL);
+ if (!erase_region_p) {
+ kfree(concat);
+ printk
+ ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
+ return NULL;
+ }
+
+ /*
+ * walk the map of the new device once more and fill in
+ * in erase region info:
+ */
+ curr_erasesize = subdev[0]->erasesize;
+ begin = position = 0;
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /*
+ * fill in an mtd_erase_region_info structure for the area
+ * we have walked so far:
+ */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize = subdev[i]->erasesize;
+ ++erase_region_p;
+ }
+ position += subdev[i]->size;
+ } else {
+ /* current subdevice has variable erase size */
+ int j;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].
+ erasesize != curr_erasesize) {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ begin = position;
+
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ ++erase_region_p;
+ }
+ position +=
+ subdev[i]->eraseregions[j].
+ numblocks * (uint64_t)curr_erasesize;
+ }
+ }
+ }
+ /* Now write the final entry */
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize = curr_erasesize;
+ tmp64 = position - begin;
+ do_div(tmp64, curr_erasesize);
+ erase_region_p->numblocks = tmp64;
+ }
+
+ return &concat->mtd;
+}
+
+/*
+ * This function destroys an MTD object obtained from concat_mtd_devs()
+ */
+
+void mtd_concat_destroy(struct mtd_info *mtd)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ if (concat->mtd.numeraseregions)
+ kfree(concat->mtd.eraseregions);
+ kfree(concat);
+}
+
+EXPORT_SYMBOL(mtd_concat_create);
+EXPORT_SYMBOL(mtd_concat_destroy);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
+MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/roms/u-boot/drivers/mtd/mtdcore.c b/roms/u-boot/drivers/mtd/mtdcore.c
new file mode 100644
index 000000000..0d1f94c6c
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/mtdcore.c
@@ -0,0 +1,1859 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Core registration and callback routines for MTD
+ * drivers and users.
+ *
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2006 Red Hat UK Limited
+ *
+ */
+
+#ifndef __UBOOT__
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/ioctl.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/idr.h>
+#include <linux/backing-dev.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#else
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <ubi_uboot.h>
+#endif
+
+#include <linux/log2.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include "mtdcore.h"
+
+#ifndef __UBOOT__
+/*
+ * backing device capabilities for non-mappable devices (such as NAND flash)
+ * - permits private mappings, copies are taken of the data
+ */
+static struct backing_dev_info mtd_bdi_unmappable = {
+ .capabilities = BDI_CAP_MAP_COPY,
+};
+
+/*
+ * backing device capabilities for R/O mappable devices (such as ROM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+static struct backing_dev_info mtd_bdi_ro_mappable = {
+ .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+ BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
+};
+
+/*
+ * backing device capabilities for writable mappable devices (such as RAM)
+ * - permits private mappings, copies are taken of the data
+ * - permits non-writable shared mappings
+ */
+static struct backing_dev_info mtd_bdi_rw_mappable = {
+ .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
+ BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
+ BDI_CAP_WRITE_MAP),
+};
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
+
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
+#else
+#define MAX_IDR_ID 64
+
+struct idr_layer {
+ int used;
+ void *ptr;
+};
+
+struct idr {
+ struct idr_layer id[MAX_IDR_ID];
+ bool updated;
+};
+
+#define DEFINE_IDR(name) struct idr name;
+
+void idr_remove(struct idr *idp, int id)
+{
+ if (idp->id[id].used) {
+ idp->id[id].used = 0;
+ idp->updated = true;
+ }
+
+ return;
+}
+void *idr_find(struct idr *idp, int id)
+{
+ if (idp->id[id].used)
+ return idp->id[id].ptr;
+
+ return NULL;
+}
+
+void *idr_get_next(struct idr *idp, int *next)
+{
+ void *ret;
+ int id = *next;
+
+ ret = idr_find(idp, id);
+ if (ret) {
+ id ++;
+ if (!idp->id[id].used)
+ id = 0;
+ *next = id;
+ } else {
+ *next = 0;
+ }
+
+ return ret;
+}
+
+int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
+{
+ struct idr_layer *idl;
+ int i = 0;
+
+ while (i < MAX_IDR_ID) {
+ idl = &idp->id[i];
+ if (idl->used == 0) {
+ idl->used = 1;
+ idl->ptr = ptr;
+ idp->updated = true;
+ return i;
+ }
+ i++;
+ }
+ return -ENOSPC;
+}
+#endif
+
+static DEFINE_IDR(mtd_idr);
+
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+ should not use them for _anything_ else */
+DEFINE_MUTEX(mtd_table_mutex);
+EXPORT_SYMBOL_GPL(mtd_table_mutex);
+
+struct mtd_info *__mtd_next_device(int i)
+{
+ return idr_get_next(&mtd_idr, &i);
+}
+EXPORT_SYMBOL_GPL(__mtd_next_device);
+
+bool mtd_dev_list_updated(void)
+{
+ if (mtd_idr.updated) {
+ mtd_idr.updated = false;
+ return true;
+ }
+
+ return false;
+}
+
+#ifndef __UBOOT__
+static LIST_HEAD(mtd_notifiers);
+
+
+#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
+
+/* REVISIT once MTD uses the driver model better, whoever allocates
+ * the mtd_info will probably want to use the release() hook...
+ */
+static void mtd_release(struct device *dev)
+{
+ struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
+
+ /* remove /dev/mtdXro node if needed */
+ if (index)
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return mtd ? mtd_suspend(mtd) : 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ if (mtd)
+ mtd_resume(mtd);
+ return 0;
+}
+
+static ssize_t mtd_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ char *type;
+
+ switch (mtd->type) {
+ case MTD_ABSENT:
+ type = "absent";
+ break;
+ case MTD_RAM:
+ type = "ram";
+ break;
+ case MTD_ROM:
+ type = "rom";
+ break;
+ case MTD_NORFLASH:
+ type = "nor";
+ break;
+ case MTD_NANDFLASH:
+ type = "nand";
+ break;
+ case MTD_DATAFLASH:
+ type = "dataflash";
+ break;
+ case MTD_UBIVOLUME:
+ type = "ubi";
+ break;
+ case MTD_MLCNANDFLASH:
+ type = "mlc-nand";
+ break;
+ default:
+ type = "unknown";
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", type);
+}
+static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
+
+static ssize_t mtd_flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
+
+}
+static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
+
+static ssize_t mtd_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)mtd->size);
+
+}
+static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
+
+static ssize_t mtd_erasesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
+
+}
+static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
+
+static ssize_t mtd_writesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
+
+}
+static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
+
+static ssize_t mtd_subpagesize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
+
+}
+static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
+
+static ssize_t mtd_oobsize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
+
+}
+static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
+
+static ssize_t mtd_numeraseregions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
+
+}
+static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
+ NULL);
+
+static ssize_t mtd_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
+
+}
+static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int bitflip_threshold;
+ int retval;
+
+ retval = kstrtouint(buf, 0, &bitflip_threshold);
+ if (retval)
+ return retval;
+
+ mtd->bitflip_threshold = bitflip_threshold;
+ return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+ mtd_bitflip_threshold_show,
+ mtd_bitflip_threshold_store);
+
+static ssize_t mtd_ecc_step_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
+
+}
+static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+
+static struct attribute *mtd_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_size.attr,
+ &dev_attr_erasesize.attr,
+ &dev_attr_writesize.attr,
+ &dev_attr_subpagesize.attr,
+ &dev_attr_oobsize.attr,
+ &dev_attr_numeraseregions.attr,
+ &dev_attr_name.attr,
+ &dev_attr_ecc_strength.attr,
+ &dev_attr_ecc_step_size.attr,
+ &dev_attr_bitflip_threshold.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mtd);
+
+static struct device_type mtd_devtype = {
+ .name = "mtd",
+ .groups = mtd_groups,
+ .release = mtd_release,
+};
+#endif
+
+/**
+ * add_mtd_device - register an MTD device
+ * @mtd: pointer to new MTD device info structure
+ *
+ * Add a device to the list of MTD devices present in the system, and
+ * notify each currently active MTD 'user' of its arrival. Returns
+ * zero on success or 1 on failure, which currently will only happen
+ * if there is insufficient memory or a sysfs error.
+ */
+
+int add_mtd_device(struct mtd_info *mtd)
+{
+#ifndef __UBOOT__
+ struct mtd_notifier *not;
+#endif
+ int i, error;
+
+#ifndef __UBOOT__
+ if (!mtd->backing_dev_info) {
+ switch (mtd->type) {
+ case MTD_RAM:
+ mtd->backing_dev_info = &mtd_bdi_rw_mappable;
+ break;
+ case MTD_ROM:
+ mtd->backing_dev_info = &mtd_bdi_ro_mappable;
+ break;
+ default:
+ mtd->backing_dev_info = &mtd_bdi_unmappable;
+ break;
+ }
+ }
+#endif
+
+ BUG_ON(mtd->writesize == 0);
+ mutex_lock(&mtd_table_mutex);
+
+ i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
+ if (i < 0)
+ goto fail_locked;
+
+ mtd->index = i;
+ mtd->usecount = 0;
+
+ INIT_LIST_HEAD(&mtd->partitions);
+
+ /* default value if not set by driver */
+ if (mtd->bitflip_threshold == 0)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
+ if (is_power_of_2(mtd->erasesize))
+ mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
+ else
+ mtd->erasesize_shift = 0;
+
+ if (is_power_of_2(mtd->writesize))
+ mtd->writesize_shift = ffs(mtd->writesize) - 1;
+ else
+ mtd->writesize_shift = 0;
+
+ mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
+ mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
+
+ /* Some chips always power up locked. Unlock them now */
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
+ printk(KERN_WARNING
+ "%s: unlock failed, writes may not work\n",
+ mtd->name);
+ }
+
+#ifndef __UBOOT__
+ /* Caller should have set dev.parent to match the
+ * physical device.
+ */
+ mtd->dev.type = &mtd_devtype;
+ mtd->dev.class = &mtd_class;
+ mtd->dev.devt = MTD_DEVT(i);
+ dev_set_name(&mtd->dev, "mtd%d", i);
+ dev_set_drvdata(&mtd->dev, mtd);
+ if (device_register(&mtd->dev) != 0)
+ goto fail_added;
+
+ if (MTD_DEVT(i))
+ device_create(&mtd_class, mtd->dev.parent,
+ MTD_DEVT(i) + 1,
+ NULL, "mtd%dro", i);
+
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->add(mtd);
+#else
+ pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
+#endif
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
+ return 0;
+
+#ifndef __UBOOT__
+fail_added:
+ idr_remove(&mtd_idr, i);
+#endif
+fail_locked:
+ mutex_unlock(&mtd_table_mutex);
+ return 1;
+}
+
+/**
+ * del_mtd_device - unregister an MTD device
+ * @mtd: pointer to MTD device info structure
+ *
+ * Remove a device from the list of MTD devices present in the system,
+ * and notify each currently active MTD 'user' of its departure.
+ * Returns zero on success or 1 on failure, which currently will happen
+ * if the requested device does not appear to be present in the list.
+ */
+
+int del_mtd_device(struct mtd_info *mtd)
+{
+ int ret;
+#ifndef __UBOOT__
+ struct mtd_notifier *not;
+#endif
+
+ ret = del_mtd_partitions(mtd);
+ if (ret) {
+ debug("Failed to delete MTD partitions attached to %s (err %d)\n",
+ mtd->name, ret);
+ return ret;
+ }
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (idr_find(&mtd_idr, mtd->index) != mtd) {
+ ret = -ENODEV;
+ goto out_error;
+ }
+
+#ifndef __UBOOT__
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each_entry(not, &mtd_notifiers, list)
+ not->remove(mtd);
+#endif
+
+ if (mtd->usecount) {
+ printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
+ mtd->index, mtd->name, mtd->usecount);
+ ret = -EBUSY;
+ } else {
+#ifndef __UBOOT__
+ device_unregister(&mtd->dev);
+#endif
+
+ idr_remove(&mtd_idr, mtd->index);
+
+ module_put(THIS_MODULE);
+ ret = 0;
+ }
+
+out_error:
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+}
+
+#ifndef __UBOOT__
+/**
+ * mtd_device_parse_register - parse partitions and register an MTD device.
+ *
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ * 'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ * only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ * MTD device is registered if no partition info is found
+ *
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
+ *
+ * * It first tries to probe partitions on MTD device @mtd using parsers
+ * specified in @types (if @types is %NULL, then the default list of parsers
+ * is used, see 'parse_mtd_partitions()' for more information). If none are
+ * found this functions tries to fallback to information specified in
+ * @parts/@nr_parts.
+ * * If any partitioning info was found, this function registers the found
+ * partitions.
+ * * If no partitions were found this function just registers the MTD device
+ * @mtd and exits.
+ *
+ * Returns zero in case of success and a negative error code in case of failure.
+ */
+int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *parts,
+ int nr_parts)
+{
+ int err;
+ struct mtd_partition *real_parts;
+
+ err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (err <= 0 && nr_parts && parts) {
+ real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
+ GFP_KERNEL);
+ if (!real_parts)
+ err = -ENOMEM;
+ else
+ err = nr_parts;
+ }
+
+ if (err > 0) {
+ err = add_mtd_partitions(mtd, real_parts, err);
+ kfree(real_parts);
+ } else if (err == 0) {
+ err = add_mtd_device(mtd);
+ if (err == 1)
+ err = -ENODEV;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
+
+/**
+ * mtd_device_unregister - unregister an existing MTD device.
+ *
+ * @master: the MTD device to unregister. This will unregister both the master
+ * and any partitions if registered.
+ */
+int mtd_device_unregister(struct mtd_info *master)
+{
+ int err;
+
+ err = del_mtd_partitions(master);
+ if (err)
+ return err;
+
+ if (!device_is_registered(&master->dev))
+ return 0;
+
+ return del_mtd_device(master);
+}
+EXPORT_SYMBOL_GPL(mtd_device_unregister);
+
+/**
+ * register_mtd_user - register a 'user' of MTD devices.
+ * @new: pointer to notifier info structure
+ *
+ * Registers a pair of callbacks function to be called upon addition
+ * or removal of MTD devices. Causes the 'add' callback to be immediately
+ * invoked for each MTD device currently present in the system.
+ */
+void register_mtd_user (struct mtd_notifier *new)
+{
+ struct mtd_info *mtd;
+
+ mutex_lock(&mtd_table_mutex);
+
+ list_add(&new->list, &mtd_notifiers);
+
+ __module_get(THIS_MODULE);
+
+ mtd_for_each_device(mtd)
+ new->add(mtd);
+
+ mutex_unlock(&mtd_table_mutex);
+}
+EXPORT_SYMBOL_GPL(register_mtd_user);
+
+/**
+ * unregister_mtd_user - unregister a 'user' of MTD devices.
+ * @old: pointer to notifier info structure
+ *
+ * Removes a callback function pair from the list of 'users' to be
+ * notified upon addition or removal of MTD devices. Causes the
+ * 'remove' callback to be immediately invoked for each MTD device
+ * currently present in the system.
+ */
+int unregister_mtd_user (struct mtd_notifier *old)
+{
+ struct mtd_info *mtd;
+
+ mutex_lock(&mtd_table_mutex);
+
+ module_put(THIS_MODULE);
+
+ mtd_for_each_device(mtd)
+ old->remove(mtd);
+
+ list_del(&old->list);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
+#endif
+
+/**
+ * get_mtd_device - obtain a validated handle for an MTD device
+ * @mtd: last known address of the required MTD device
+ * @num: internal device number of the required MTD device
+ *
+ * Given a number and NULL address, return the num'th entry in the device
+ * table, if any. Given an address and num == -1, search the device table
+ * for a device with that address and return if it's still present. Given
+ * both, return the num'th driver only if its address matches. Return
+ * error code if not.
+ */
+struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
+{
+ struct mtd_info *ret = NULL, *other;
+ int err = -ENODEV;
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (num == -1) {
+ mtd_for_each_device(other) {
+ if (other == mtd) {
+ ret = mtd;
+ break;
+ }
+ }
+ } else if (num >= 0) {
+ ret = idr_find(&mtd_idr, num);
+ if (mtd && mtd != ret)
+ ret = NULL;
+ }
+
+ if (!ret) {
+ ret = ERR_PTR(err);
+ goto out;
+ }
+
+ err = __get_mtd_device(ret);
+ if (err)
+ ret = ERR_PTR(err);
+out:
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(get_mtd_device);
+
+
+int __get_mtd_device(struct mtd_info *mtd)
+{
+ int err;
+
+ if (!try_module_get(mtd->owner))
+ return -ENODEV;
+
+ if (mtd->_get_device) {
+ err = mtd->_get_device(mtd);
+
+ if (err) {
+ module_put(mtd->owner);
+ return err;
+ }
+ }
+ mtd->usecount++;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
+
+/**
+ * get_mtd_device_nm - obtain a validated handle for an MTD device by
+ * device name
+ * @name: MTD device name to open
+ *
+ * This function returns MTD device description structure in case of
+ * success and an error code in case of failure.
+ */
+struct mtd_info *get_mtd_device_nm(const char *name)
+{
+ int err = -ENODEV;
+ struct mtd_info *mtd = NULL, *other;
+
+ mutex_lock(&mtd_table_mutex);
+
+ mtd_for_each_device(other) {
+ if (!strcmp(name, other->name)) {
+ mtd = other;
+ break;
+ }
+ }
+
+ if (!mtd)
+ goto out_unlock;
+
+ err = __get_mtd_device(mtd);
+ if (err)
+ goto out_unlock;
+
+ mutex_unlock(&mtd_table_mutex);
+ return mtd;
+
+out_unlock:
+ mutex_unlock(&mtd_table_mutex);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
+
+#if defined(CONFIG_CMD_MTDPARTS_SPREAD)
+/**
+ * mtd_get_len_incl_bad
+ *
+ * Check if length including bad blocks fits into device.
+ *
+ * @param mtd an MTD device
+ * @param offset offset in flash
+ * @param length image length
+ * @return image length including bad blocks in *len_incl_bad and whether or not
+ * the length returned was truncated in *truncated
+ */
+void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
+ const uint64_t length, uint64_t *len_incl_bad,
+ int *truncated)
+{
+ *truncated = 0;
+ *len_incl_bad = 0;
+
+ if (!mtd->_block_isbad) {
+ *len_incl_bad = length;
+ return;
+ }
+
+ uint64_t len_excl_bad = 0;
+ uint64_t block_len;
+
+ while (len_excl_bad < length) {
+ if (offset >= mtd->size) {
+ *truncated = 1;
+ return;
+ }
+
+ block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
+
+ if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
+ len_excl_bad += block_len;
+
+ *len_incl_bad += block_len;
+ offset += block_len;
+ }
+}
+#endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */
+
+void put_mtd_device(struct mtd_info *mtd)
+{
+ mutex_lock(&mtd_table_mutex);
+ __put_mtd_device(mtd);
+ mutex_unlock(&mtd_table_mutex);
+
+}
+EXPORT_SYMBOL_GPL(put_mtd_device);
+
+void __put_mtd_device(struct mtd_info *mtd)
+{
+ --mtd->usecount;
+ BUG_ON(mtd->usecount < 0);
+
+ if (mtd->_put_device)
+ mtd->_put_device(mtd);
+
+ module_put(mtd->owner);
+}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
+
+/*
+ * Erase is an asynchronous operation. Device drivers are supposed
+ * to call instr->callback() whenever the operation completes, even
+ * if it completes with a failure.
+ * Callers are supposed to pass a callback function and wait for it
+ * to be called before writing to the block.
+ */
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ if (!instr->len) {
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return 0;
+ }
+ return mtd->_erase(mtd, instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase);
+
+#ifndef __UBOOT__
+/*
+ * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
+ */
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys)
+{
+ *retlen = 0;
+ *virt = NULL;
+ if (phys)
+ *phys = 0;
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_point(mtd, from, len, retlen, virt, phys);
+}
+EXPORT_SYMBOL_GPL(mtd_point);
+
+/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_point)
+ return -EOPNOTSUPP;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unpoint(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unpoint);
+#endif
+
+/*
+ * Allow NOMMU mmap() to directly map the device (if not NULL)
+ * - return the address to which the offset maps
+ * - return -ENOSYS to indicate refusal to do the mapping
+ */
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags)
+{
+ if (!mtd->_get_unmapped_area)
+ return -EOPNOTSUPP;
+ if (offset > mtd->size || len > mtd->size - offset)
+ return -EINVAL;
+ return mtd->_get_unmapped_area(mtd, len, offset, flags);
+}
+EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
+
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf)
+{
+ int ret_code;
+ *retlen = 0;
+ if (from < 0 || from > mtd->size || len > mtd->size - from)
+ return -EINVAL;
+ if (!len)
+ return 0;
+
+ /*
+ * In the absence of an error, drivers return a non-negative integer
+ * representing the maximum number of bitflips that were corrected on
+ * any one ecc region (if applicable; zero otherwise).
+ */
+ if (mtd->_read) {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ } else if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ } else {
+ return -ENOTSUPP;
+ }
+
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read);
+
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (to < 0 || to > mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if ((!mtd->_write && !mtd->_write_oob) ||
+ !(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+
+ if (!mtd->_write) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
+
+ ret = mtd->_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+ }
+
+ return mtd->_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_write);
+
+/*
+ * In blackbox flight recorder like scenarios we want to make successful writes
+ * in interrupt context. panic_write() is only intended to be called when its
+ * known the kernel is about to panic and we need the write to succeed. Since
+ * the kernel is not going to be running for much longer, this function can
+ * break locks and delay to ensure the write succeeds (but not sleep).
+ */
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_panic_write)
+ return -EOPNOTSUPP;
+ if (to < 0 || to > mtd->size || len > mtd->size - to)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!len)
+ return 0;
+ return mtd->_panic_write(mtd, to, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_panic_write);
+
+static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
+ struct mtd_oob_ops *ops)
+{
+ /*
+ * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
+ * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
+ * this case.
+ */
+ if (!ops->datbuf)
+ ops->len = 0;
+
+ if (!ops->oobbuf)
+ ops->ooblen = 0;
+
+ if (offs < 0 || offs + ops->len > mtd->size)
+ return -EINVAL;
+
+ if (ops->ooblen) {
+ size_t maxooblen;
+
+ if (ops->ooboffs >= mtd_oobavail(mtd, ops))
+ return -EINVAL;
+
+ maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
+ mtd_oobavail(mtd, ops)) - ops->ooboffs;
+ if (ops->ooblen > maxooblen)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
+{
+ int ret_code;
+ ops->retlen = ops->oobretlen = 0;
+
+ ret_code = mtd_check_oob_ops(mtd, from, ops);
+ if (ret_code)
+ return ret_code;
+
+ /* Check the validity of a potential fallback on mtd->_read */
+ if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_read_oob)
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ else
+ ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ /*
+ * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
+ * similar to mtd->_read(), returning a non-negative integer
+ * representing max bitflips. In other cases, mtd->_read_oob() may
+ * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
+ */
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+}
+EXPORT_SYMBOL_GPL(mtd_read_oob);
+
+int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ ret = mtd_check_oob_ops(mtd, to, ops);
+ if (ret)
+ return ret;
+
+ /* Check the validity of a potential fallback on mtd->_write */
+ if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_write_oob)
+ return mtd->_write_oob(mtd, to, ops);
+ else
+ return mtd->_write(mtd, to, ops->len, &ops->retlen,
+ ops->datbuf);
+}
+EXPORT_SYMBOL_GPL(mtd_write_oob);
+
+/**
+ * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
+ * @mtd: MTD device structure
+ * @section: ECC section. Depending on the layout you may have all the ECC
+ * bytes stored in a single contiguous section, or one section
+ * per ECC chunk (and sometime several sections for a single ECC
+ * ECC chunk)
+ * @oobecc: OOB region struct filled with the appropriate ECC position
+ * information
+ *
+ * This function returns ECC section information in the OOB area. If you want
+ * to get all the ECC bytes information, then you should call
+ * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ memset(oobecc, 0, sizeof(*oobecc));
+
+ if (!mtd || section < 0)
+ return -EINVAL;
+
+ if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+ return -ENOTSUPP;
+
+ return mtd->ooblayout->ecc(mtd, section, oobecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
+
+/**
+ * mtd_ooblayout_free - Get the OOB region definition of a specific free
+ * section
+ * @mtd: MTD device structure
+ * @section: Free section you are interested in. Depending on the layout
+ * you may have all the free bytes stored in a single contiguous
+ * section, or one section per ECC chunk plus an extra section
+ * for the remaining bytes (or other funky layout).
+ * @oobfree: OOB region struct filled with the appropriate free position
+ * information
+ *
+ * This function returns free bytes position in the OOB area. If you want
+ * to get all the free bytes information, then you should call
+ * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ memset(oobfree, 0, sizeof(*oobfree));
+
+ if (!mtd || section < 0)
+ return -EINVAL;
+
+ if (!mtd->ooblayout || !mtd->ooblayout->rfree)
+ return -ENOTSUPP;
+
+ return mtd->ooblayout->rfree(mtd, section, oobfree);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
+
+/**
+ * mtd_ooblayout_find_region - Find the region attached to a specific byte
+ * @mtd: mtd info structure
+ * @byte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: used to retrieve the ECC position
+ * @iter: iterator function. Should be either mtd_ooblayout_free or
+ * mtd_ooblayout_ecc depending on the region type you're searching for
+ *
+ * This function returns the section id and oobregion information of a
+ * specific byte. For example, say you want to know where the 4th ECC byte is
+ * stored, you'll use:
+ *
+ * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
+ int *sectionp, struct mtd_oob_region *oobregion,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ int pos = 0, ret, section = 0;
+
+ memset(oobregion, 0, sizeof(*oobregion));
+
+ while (1) {
+ ret = iter(mtd, section, oobregion);
+ if (ret)
+ return ret;
+
+ if (pos + oobregion->length > byte)
+ break;
+
+ pos += oobregion->length;
+ section++;
+ }
+
+ /*
+ * Adjust region info to make it start at the beginning at the
+ * 'start' ECC byte.
+ */
+ oobregion->offset += byte - pos;
+ oobregion->length -= byte - pos;
+ *sectionp = section;
+
+ return 0;
+}
+
+/**
+ * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
+ * ECC byte
+ * @mtd: mtd info structure
+ * @eccbyte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: OOB region information
+ *
+ * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
+ * byte.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion)
+{
+ return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
+
+/**
+ * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @buf: destination buffer to store OOB bytes
+ * @oobbuf: OOB buffer
+ * @start: first byte to retrieve
+ * @nbytes: number of bytes to retrieve
+ * @iter: section iterator
+ *
+ * Extract bytes attached to a specific category (ECC or free)
+ * from the OOB buffer and copy them into buf.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
+ const u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, &section,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = min_t(int, nbytes, oobregion.length);
+ memcpy(buf, oobbuf + oobregion.offset, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @buf: source buffer to get OOB bytes from
+ * @oobbuf: OOB buffer
+ * @start: first OOB byte to set
+ * @nbytes: number of OOB bytes to set
+ * @iter: section iterator
+ *
+ * Fill the OOB buffer with data provided in buf. The category (ECC or free)
+ * is selected by passing the appropriate iterator.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
+ u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, &section,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = min_t(int, nbytes, oobregion.length);
+ memcpy(oobbuf + oobregion.offset, buf, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
+ * @mtd: mtd info structure
+ * @iter: category iterator
+ *
+ * Count the number of bytes in a given category.
+ *
+ * Returns a positive value on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section = 0, ret, nbytes = 0;
+
+ while (1) {
+ ret = iter(mtd, section++, &oobregion);
+ if (ret) {
+ if (ret == -ERANGE)
+ ret = nbytes;
+ break;
+ }
+
+ nbytes += oobregion.length;
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
+
+/**
+ * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get ECC bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
+
+/**
+ * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
+
+/**
+ * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get data bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
+
+/*
+ * Method to access the protection register area, present in some flash
+ * devices. The user data is one time programmable but the factory data is read
+ * only.
+ */
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ if (!mtd->_get_fact_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
+
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_fact_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
+
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf)
+{
+ if (!mtd->_get_user_prot_info)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_get_user_prot_info(mtd, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
+
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ *retlen = 0;
+ if (!mtd->_read_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
+}
+EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
+
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ int ret;
+
+ *retlen = 0;
+ if (!mtd->_write_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * If no data could be written at all, we are out of memory and
+ * must return -ENOSPC.
+ */
+ return (*retlen) ? 0 : -ENOSPC;
+}
+EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
+
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ if (!mtd->_lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ if (!len)
+ return 0;
+ return mtd->_lock_user_prot_reg(mtd, from, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
+
+/* Chip-supported device locking */
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_lock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_lock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_lock);
+
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_unlock)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_unlock(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_unlock);
+
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ if (!mtd->_is_locked)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
+ return -EINVAL;
+ if (!len)
+ return 0;
+ return mtd->_is_locked(mtd, ofs, len);
+}
+EXPORT_SYMBOL_GPL(mtd_is_locked);
+
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ if (!mtd->_block_isreserved)
+ return 0;
+ return mtd->_block_isreserved(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isreserved);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ if (!mtd->_block_isbad)
+ return 0;
+ return mtd->_block_isbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isbad);
+
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ if (!mtd->_block_markbad)
+ return -EOPNOTSUPP;
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ return mtd->_block_markbad(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_markbad);
+
+#ifndef __UBOOT__
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
+ }
+ *retlen = totlen;
+ return ret;
+}
+
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (!mtd->_writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->_writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
+/**
+ * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
+ * to the actual allocation size on success.
+ *
+ * This routine attempts to allocate a contiguous kernel buffer up to
+ * the specified size, backing off the size of the request exponentially
+ * until the request succeeds or until the allocation size falls below
+ * the system page size. This attempts to make sure it does not adversely
+ * impact system performance, so when allocating more than one page, we
+ * ask the memory allocator to avoid re-trying, swapping, writing back
+ * or performing I/O.
+ *
+ * Note, this function also makes sure that the allocated buffer is aligned to
+ * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
+ *
+ * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
+ * to handle smaller (i.e. degraded) buffer allocations under low- or
+ * fragmented-memory situations where such reduced allocations, from a
+ * requested ideal, are allowed.
+ *
+ * Returns a pointer to the allocated buffer on success; otherwise, NULL.
+ */
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
+{
+ gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
+ __GFP_NORETRY | __GFP_NO_KSWAPD;
+ size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
+ void *kbuf;
+
+ *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
+
+ while (*size > min_alloc) {
+ kbuf = kmalloc(*size, flags);
+ if (kbuf)
+ return kbuf;
+
+ *size >>= 1;
+ *size = ALIGN(*size, mtd->writesize);
+ }
+
+ /*
+ * For the last resort allocation allow 'kmalloc()' to do all sorts of
+ * things (write-back, dropping caches, etc) by using GFP_KERNEL.
+ */
+ return kmalloc(*size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
+#endif
+
+#ifdef CONFIG_PROC_FS
+
+/*====================================================================*/
+/* Support for /proc/mtd */
+
+static int mtd_proc_show(struct seq_file *m, void *v)
+{
+ struct mtd_info *mtd;
+
+ seq_puts(m, "dev: size erasesize name\n");
+ mutex_lock(&mtd_table_mutex);
+ mtd_for_each_device(mtd) {
+ seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
+ mtd->index, (unsigned long long)mtd->size,
+ mtd->erasesize, mtd->name);
+ }
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
+}
+
+static int mtd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtd_proc_show, NULL);
+}
+
+static const struct file_operations mtd_proc_ops = {
+ .open = mtd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_PROC_FS */
+
+/*====================================================================*/
+/* Init code */
+
+#ifndef __UBOOT__
+static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
+{
+ int ret;
+
+ ret = bdi_init(bdi);
+ if (!ret)
+ ret = bdi_register(bdi, NULL, "%s", name);
+
+ if (ret)
+ bdi_destroy(bdi);
+
+ return ret;
+}
+
+static struct proc_dir_entry *proc_mtd;
+
+static int __init init_mtd(void)
+{
+ int ret;
+
+ ret = class_register(&mtd_class);
+ if (ret)
+ goto err_reg;
+
+ ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
+ if (ret)
+ goto err_bdi1;
+
+ ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
+ if (ret)
+ goto err_bdi2;
+
+ ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
+ if (ret)
+ goto err_bdi3;
+
+ proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
+
+ ret = init_mtdchar();
+ if (ret)
+ goto out_procfs;
+
+ return 0;
+
+out_procfs:
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
+err_bdi3:
+ bdi_destroy(&mtd_bdi_ro_mappable);
+err_bdi2:
+ bdi_destroy(&mtd_bdi_unmappable);
+err_bdi1:
+ class_unregister(&mtd_class);
+err_reg:
+ pr_err("Error registering mtd class or bdi: %d\n", ret);
+ return ret;
+}
+
+static void __exit cleanup_mtd(void)
+{
+ cleanup_mtdchar();
+ if (proc_mtd)
+ remove_proc_entry("mtd", NULL);
+ class_unregister(&mtd_class);
+ bdi_destroy(&mtd_bdi_unmappable);
+ bdi_destroy(&mtd_bdi_ro_mappable);
+ bdi_destroy(&mtd_bdi_rw_mappable);
+}
+
+module_init(init_mtd);
+module_exit(cleanup_mtd);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Core MTD registration and access routines");
diff --git a/roms/u-boot/drivers/mtd/mtdcore.h b/roms/u-boot/drivers/mtd/mtdcore.h
new file mode 100644
index 000000000..1d181a104
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/mtdcore.h
@@ -0,0 +1,17 @@
+/*
+ * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c.
+ * You should not use them for _anything_ else.
+ */
+
+extern struct mutex mtd_table_mutex;
+
+int add_mtd_device(struct mtd_info *mtd);
+int del_mtd_device(struct mtd_info *mtd);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
+int del_mtd_partitions(struct mtd_info *);
+int parse_mtd_partitions(struct mtd_info *master, const char * const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data);
+
+int __init init_mtdchar(void);
+void __exit cleanup_mtdchar(void);
diff --git a/roms/u-boot/drivers/mtd/mtdpart.c b/roms/u-boot/drivers/mtd/mtdpart.c
new file mode 100644
index 000000000..d064ac304
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/mtdpart.c
@@ -0,0 +1,1000 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Simple MTD partitioning layer
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
+ * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#endif
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/compat.h>
+#include <ubi_uboot.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/err.h>
+#include <linux/sizes.h>
+
+#include "mtdcore.h"
+
+#ifndef __UBOOT__
+static DEFINE_MUTEX(mtd_partitions_mutex);
+#else
+DEFINE_MUTEX(mtd_partitions_mutex);
+#endif
+
+#ifdef __UBOOT__
+/* from mm/util.c */
+
+/**
+ * kstrdup - allocate space for and copy an existing string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ */
+char *kstrdup(const char *s, gfp_t gfp)
+{
+ size_t len;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ len = strlen(s) + 1;
+ buf = kmalloc(len, gfp);
+ if (buf)
+ memcpy(buf, s, len);
+ return buf;
+}
+#endif
+
+#define MTD_SIZE_REMAINING (~0LLU)
+#define MTD_OFFSET_NOT_SPECIFIED (~0LLU)
+
+bool mtd_partitions_used(struct mtd_info *master)
+{
+ struct mtd_info *slave;
+
+ list_for_each_entry(slave, &master->partitions, node) {
+ if (slave->usecount)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * mtd_parse_partition - Parse @mtdparts partition definition, fill @partition
+ * with it and update the @mtdparts string pointer.
+ *
+ * The partition name is allocated and must be freed by the caller.
+ *
+ * This function is widely inspired from part_parse (mtdparts.c).
+ *
+ * @mtdparts: String describing the partition with mtdparts command syntax
+ * @partition: MTD partition structure to fill
+ *
+ * @return 0 on success, an error otherwise.
+ */
+static int mtd_parse_partition(const char **_mtdparts,
+ struct mtd_partition *partition)
+{
+ const char *mtdparts = *_mtdparts;
+ const char *name = NULL;
+ int name_len;
+ char *buf;
+
+ /* Ensure the partition structure is empty */
+ memset(partition, 0, sizeof(struct mtd_partition));
+
+ /* Fetch the partition size */
+ if (*mtdparts == '-') {
+ /* Assign all remaining space to this partition */
+ partition->size = MTD_SIZE_REMAINING;
+ mtdparts++;
+ } else {
+ partition->size = ustrtoull(mtdparts, (char **)&mtdparts, 0);
+ if (partition->size < SZ_4K) {
+ printf("Minimum partition size 4kiB, %lldB requested\n",
+ partition->size);
+ return -EINVAL;
+ }
+ }
+
+ /* Check for the offset */
+ partition->offset = MTD_OFFSET_NOT_SPECIFIED;
+ if (*mtdparts == '@') {
+ mtdparts++;
+ partition->offset = ustrtoull(mtdparts, (char **)&mtdparts, 0);
+ }
+
+ /* Now look for the name */
+ if (*mtdparts == '(') {
+ name = ++mtdparts;
+ mtdparts = strchr(name, ')');
+ if (!mtdparts) {
+ printf("No closing ')' found in partition name\n");
+ return -EINVAL;
+ }
+ name_len = mtdparts - name + 1;
+ if ((name_len - 1) == 0) {
+ printf("Empty partition name\n");
+ return -EINVAL;
+ }
+ mtdparts++;
+ } else {
+ /* Name will be of the form size@offset */
+ name_len = 22;
+ }
+
+ /* Check if the partition is read-only */
+ if (strncmp(mtdparts, "ro", 2) == 0) {
+ partition->mask_flags |= MTD_WRITEABLE;
+ mtdparts += 2;
+ }
+
+ /* Check for a potential next partition definition */
+ if (*mtdparts == ',') {
+ if (partition->size == MTD_SIZE_REMAINING) {
+ printf("No partitions allowed after a fill-up\n");
+ return -EINVAL;
+ }
+ ++mtdparts;
+ } else if ((*mtdparts == ';') || (*mtdparts == '\0')) {
+ /* NOP */
+ } else {
+ printf("Unexpected character '%c' in mtdparts\n", *mtdparts);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a buffer for the name and either copy the provided name or
+ * auto-generate it with the form 'size@offset'.
+ */
+ buf = malloc(name_len);
+ if (!buf)
+ return -ENOMEM;
+
+ if (name)
+ strncpy(buf, name, name_len - 1);
+ else
+ snprintf(buf, name_len, "0x%08llx@0x%08llx",
+ partition->size, partition->offset);
+
+ buf[name_len - 1] = '\0';
+ partition->name = buf;
+
+ *_mtdparts = mtdparts;
+
+ return 0;
+}
+
+/**
+ * mtd_parse_partitions - Create a partition array from an mtdparts definition
+ *
+ * Stateless function that takes a @parent MTD device, a string @_mtdparts
+ * describing the partitions (with the "mtdparts" command syntax) and creates
+ * the corresponding MTD partition structure array @_parts. Both the name and
+ * the structure partition itself must be freed freed, the caller may use
+ * @mtd_free_parsed_partitions() for this purpose.
+ *
+ * @parent: MTD device which contains the partitions
+ * @_mtdparts: Pointer to a string describing the partitions with "mtdparts"
+ * command syntax.
+ * @_parts: Allocated array containing the partitions, must be freed by the
+ * caller.
+ * @_nparts: Size of @_parts array.
+ *
+ * @return 0 on success, an error otherwise.
+ */
+int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts,
+ struct mtd_partition **_parts, int *_nparts)
+{
+ struct mtd_partition partition = {}, *parts;
+ const char *mtdparts = *_mtdparts;
+ uint64_t cur_off = 0, cur_sz = 0;
+ int nparts = 0;
+ int ret, idx;
+ u64 sz;
+
+ /* First, iterate over the partitions until we know their number */
+ while (mtdparts[0] != '\0' && mtdparts[0] != ';') {
+ ret = mtd_parse_partition(&mtdparts, &partition);
+ if (ret)
+ return ret;
+
+ free((char *)partition.name);
+ nparts++;
+ }
+
+ /* Allocate an array of partitions to give back to the caller */
+ parts = malloc(sizeof(*parts) * nparts);
+ if (!parts) {
+ printf("Not enough space to save partitions meta-data\n");
+ return -ENOMEM;
+ }
+
+ /* Iterate again over each partition to save the data in our array */
+ for (idx = 0; idx < nparts; idx++) {
+ ret = mtd_parse_partition(_mtdparts, &parts[idx]);
+ if (ret)
+ return ret;
+
+ if (parts[idx].size == MTD_SIZE_REMAINING)
+ parts[idx].size = parent->size - cur_sz;
+ cur_sz += parts[idx].size;
+
+ sz = parts[idx].size;
+ if (sz < parent->writesize || do_div(sz, parent->writesize)) {
+ printf("Partition size must be a multiple of %d\n",
+ parent->writesize);
+ return -EINVAL;
+ }
+
+ if (parts[idx].offset == MTD_OFFSET_NOT_SPECIFIED)
+ parts[idx].offset = cur_off;
+ cur_off += parts[idx].size;
+
+ parts[idx].ecclayout = parent->ecclayout;
+ }
+
+ /* Offset by one mtdparts to point to the next device if any */
+ if (*_mtdparts[0] == ';')
+ (*_mtdparts)++;
+
+ *_parts = parts;
+ *_nparts = nparts;
+
+ return 0;
+}
+
+/**
+ * mtd_free_parsed_partitions - Free dynamically allocated partitions
+ *
+ * Each successful call to @mtd_parse_partitions must be followed by a call to
+ * @mtd_free_parsed_partitions to free any allocated array during the parsing
+ * process.
+ *
+ * @parts: Array containing the partitions that will be freed.
+ * @nparts: Size of @parts array.
+ */
+void mtd_free_parsed_partitions(struct mtd_partition *parts,
+ unsigned int nparts)
+{
+ int i;
+
+ for (i = 0; i < nparts; i++)
+ free((char *)parts[i].name);
+
+ free(parts);
+}
+
+/*
+ * MTD methods which simply translate the effective address and pass through
+ * to the _real_ device.
+ */
+
+static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_ecc_stats stats;
+ int res;
+
+ stats = mtd->parent->ecc_stats;
+ res = mtd->parent->_read(mtd->parent, from + mtd->offset, len,
+ retlen, buf);
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ mtd->parent->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ mtd->parent->ecc_stats.corrected - stats.corrected;
+ return res;
+}
+
+#ifndef __UBOOT__
+static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys)
+{
+ return mtd->parent->_point(mtd->parent, from + mtd->offset, len,
+ retlen, virt, phys);
+}
+
+static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
+{
+ return mtd->parent->_unpoint(mtd->parent, from + mtd->offset, len);
+}
+#endif
+
+static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags)
+{
+ offset += mtd->offset;
+ return mtd->parent->_get_unmapped_area(mtd->parent, len, offset, flags);
+}
+
+static int part_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int res;
+
+ if (from >= mtd->size)
+ return -EINVAL;
+ if (ops->datbuf && from + ops->len > mtd->size)
+ return -EINVAL;
+
+ /*
+ * If OOB is also requested, make sure that we do not read past the end
+ * of this partition.
+ */
+ if (ops->oobbuf) {
+ size_t len, pages;
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ len = mtd->oobavail;
+ else
+ len = mtd->oobsize;
+ pages = mtd_div_by_ws(mtd->size, mtd);
+ pages -= mtd_div_by_ws(from, mtd);
+ if (ops->ooboffs + ops->ooblen > pages * len)
+ return -EINVAL;
+ }
+
+ res = mtd->parent->_read_oob(mtd->parent, from + mtd->offset, ops);
+ if (unlikely(res)) {
+ if (mtd_is_bitflip(res))
+ mtd->ecc_stats.corrected++;
+ if (mtd_is_eccerr(res))
+ mtd->ecc_stats.failed++;
+ }
+ return res;
+}
+
+static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return mtd->parent->_read_user_prot_reg(mtd->parent, from, len,
+ retlen, buf);
+}
+
+static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return mtd->parent->_get_user_prot_info(mtd->parent, len, retlen,
+ buf);
+}
+
+static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return mtd->parent->_read_fact_prot_reg(mtd->parent, from, len,
+ retlen, buf);
+}
+
+static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return mtd->parent->_get_fact_prot_info(mtd->parent, len, retlen,
+ buf);
+}
+
+static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ return mtd->parent->_write(mtd->parent, to + mtd->offset, len,
+ retlen, buf);
+}
+
+static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ return mtd->parent->_panic_write(mtd->parent, to + mtd->offset, len,
+ retlen, buf);
+}
+
+static int part_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ if (to >= mtd->size)
+ return -EINVAL;
+ if (ops->datbuf && to + ops->len > mtd->size)
+ return -EINVAL;
+ return mtd->parent->_write_oob(mtd->parent, to + mtd->offset, ops);
+}
+
+static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf)
+{
+ return mtd->parent->_write_user_prot_reg(mtd->parent, from, len,
+ retlen, buf);
+}
+
+static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ return mtd->parent->_lock_user_prot_reg(mtd->parent, from, len);
+}
+
+#ifndef __UBOOT__
+static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ return mtd->parent->_writev(mtd->parent, vecs, count,
+ to + mtd->offset, retlen);
+}
+#endif
+
+static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ int ret;
+
+ instr->addr += mtd->offset;
+ ret = mtd->parent->_erase(mtd->parent, instr);
+ if (ret) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= mtd->offset;
+ instr->addr -= mtd->offset;
+ }
+ return ret;
+}
+
+void mtd_erase_callback(struct erase_info *instr)
+{
+ if (instr->mtd->_erase == part_erase) {
+ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
+ instr->fail_addr -= instr->mtd->offset;
+ instr->addr -= instr->mtd->offset;
+ }
+ if (instr->callback)
+ instr->callback(instr);
+}
+EXPORT_SYMBOL_GPL(mtd_erase_callback);
+
+static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return mtd->parent->_lock(mtd->parent, ofs + mtd->offset, len);
+}
+
+static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return mtd->parent->_unlock(mtd->parent, ofs + mtd->offset, len);
+}
+
+static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ return mtd->parent->_is_locked(mtd->parent, ofs + mtd->offset, len);
+}
+
+static void part_sync(struct mtd_info *mtd)
+{
+ mtd->parent->_sync(mtd->parent);
+}
+
+#ifndef __UBOOT__
+static int part_suspend(struct mtd_info *mtd)
+{
+ return mtd->parent->_suspend(mtd->parent);
+}
+
+static void part_resume(struct mtd_info *mtd)
+{
+ mtd->parent->_resume(mtd->parent);
+}
+#endif
+
+static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ ofs += mtd->offset;
+ return mtd->parent->_block_isreserved(mtd->parent, ofs);
+}
+
+static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ ofs += mtd->offset;
+ return mtd->parent->_block_isbad(mtd->parent, ofs);
+}
+
+static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int res;
+
+ ofs += mtd->offset;
+ res = mtd->parent->_block_markbad(mtd->parent, ofs);
+ if (!res)
+ mtd->ecc_stats.badblocks++;
+ return res;
+}
+
+static inline void free_partition(struct mtd_info *p)
+{
+ kfree(p->name);
+ kfree(p);
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
+ * attached to the given master MTD object, recursively.
+ */
+static int do_del_mtd_partitions(struct mtd_info *master)
+{
+ struct mtd_info *slave, *next;
+ int ret, err = 0;
+
+ list_for_each_entry_safe(slave, next, &master->partitions, node) {
+ if (mtd_has_partitions(slave))
+ del_mtd_partitions(slave);
+
+ debug("Deleting %s MTD partition\n", slave->name);
+ ret = del_mtd_device(slave);
+ if (ret < 0) {
+ printf("Error when deleting partition \"%s\" (%d)\n",
+ slave->name, ret);
+ err = ret;
+ continue;
+ }
+
+ list_del(&slave->node);
+ free_partition(slave);
+ }
+
+ return err;
+}
+
+int del_mtd_partitions(struct mtd_info *master)
+{
+ int ret;
+
+ debug("Deleting MTD partitions on \"%s\":\n", master->name);
+
+ mutex_lock(&mtd_partitions_mutex);
+ ret = do_del_mtd_partitions(master);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return ret;
+}
+
+static struct mtd_info *allocate_partition(struct mtd_info *master,
+ const struct mtd_partition *part,
+ int partno, uint64_t cur_offset)
+{
+ struct mtd_info *slave;
+ char *name;
+
+ /* allocate the partition structure */
+ slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+ name = kstrdup(part->name, GFP_KERNEL);
+ if (!name || !slave) {
+ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ kfree(name);
+ kfree(slave);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* set up the MTD object for this partition */
+ slave->type = master->type;
+ slave->flags = master->flags & ~part->mask_flags;
+ slave->size = part->size;
+ slave->writesize = master->writesize;
+ slave->writebufsize = master->writebufsize;
+ slave->oobsize = master->oobsize;
+ slave->oobavail = master->oobavail;
+ slave->subpage_sft = master->subpage_sft;
+
+ slave->name = name;
+ slave->owner = master->owner;
+#ifndef __UBOOT__
+ slave->backing_dev_info = master->backing_dev_info;
+
+ /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
+ * to have the same data be in two different partitions.
+ */
+ slave->dev.parent = master->dev.parent;
+#endif
+
+ if (master->_read)
+ slave->_read = part_read;
+ if (master->_write)
+ slave->_write = part_write;
+
+ if (master->_panic_write)
+ slave->_panic_write = part_panic_write;
+
+#ifndef __UBOOT__
+ if (master->_point && master->_unpoint) {
+ slave->_point = part_point;
+ slave->_unpoint = part_unpoint;
+ }
+#endif
+
+ if (master->_get_unmapped_area)
+ slave->_get_unmapped_area = part_get_unmapped_area;
+ if (master->_read_oob)
+ slave->_read_oob = part_read_oob;
+ if (master->_write_oob)
+ slave->_write_oob = part_write_oob;
+ if (master->_read_user_prot_reg)
+ slave->_read_user_prot_reg = part_read_user_prot_reg;
+ if (master->_read_fact_prot_reg)
+ slave->_read_fact_prot_reg = part_read_fact_prot_reg;
+ if (master->_write_user_prot_reg)
+ slave->_write_user_prot_reg = part_write_user_prot_reg;
+ if (master->_lock_user_prot_reg)
+ slave->_lock_user_prot_reg = part_lock_user_prot_reg;
+ if (master->_get_user_prot_info)
+ slave->_get_user_prot_info = part_get_user_prot_info;
+ if (master->_get_fact_prot_info)
+ slave->_get_fact_prot_info = part_get_fact_prot_info;
+ if (master->_sync)
+ slave->_sync = part_sync;
+#ifndef __UBOOT__
+ if (!partno && !master->dev.class && master->_suspend &&
+ master->_resume) {
+ slave->_suspend = part_suspend;
+ slave->_resume = part_resume;
+ }
+ if (master->_writev)
+ slave->_writev = part_writev;
+#endif
+ if (master->_lock)
+ slave->_lock = part_lock;
+ if (master->_unlock)
+ slave->_unlock = part_unlock;
+ if (master->_is_locked)
+ slave->_is_locked = part_is_locked;
+ if (master->_block_isreserved)
+ slave->_block_isreserved = part_block_isreserved;
+ if (master->_block_isbad)
+ slave->_block_isbad = part_block_isbad;
+ if (master->_block_markbad)
+ slave->_block_markbad = part_block_markbad;
+ slave->_erase = part_erase;
+ slave->parent = master;
+ slave->offset = part->offset;
+ INIT_LIST_HEAD(&slave->partitions);
+ INIT_LIST_HEAD(&slave->node);
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if (mtd_mod_by_eb(cur_offset, master) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
+ debug("Moving partition %d: "
+ "0x%012llx -> 0x%012llx\n", partno,
+ (unsigned long long)cur_offset, (unsigned long long)slave->offset);
+ }
+ }
+ if (slave->offset == MTDPART_OFS_RETAIN) {
+ slave->offset = cur_offset;
+ if (master->size - slave->offset >= slave->size) {
+ slave->size = master->size - slave->offset
+ - slave->size;
+ } else {
+ debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+ part->name, master->size - slave->offset,
+ slave->size);
+ /* register to preserve ordering */
+ goto out_register;
+ }
+ }
+ if (slave->size == MTDPART_SIZ_FULL)
+ slave->size = master->size - slave->offset;
+
+ debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
+ (unsigned long long)(slave->offset + slave->size), slave->name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->size = 0;
+ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ goto out_register;
+ }
+ if (slave->offset + slave->size > master->size) {
+ slave->size = master->size - slave->offset;
+ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
+ part->name, master->name, slave->size);
+ }
+ if (master->numeraseregions > 1) {
+ /* Deal with variable erase size stuff */
+ int i, max = master->numeraseregions;
+ u64 end = slave->offset + slave->size;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this
+ * partition. */
+ for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
+ ;
+ /* The loop searched for the region _behind_ the first one */
+ if (i > 0)
+ i--;
+
+ /* Pick biggest erasesize */
+ for (; i < max && regions[i].offset < end; i++) {
+ if (slave->erasesize < regions[i].erasesize)
+ slave->erasesize = regions[i].erasesize;
+ }
+ WARN_ON(slave->erasesize == 0);
+ } else {
+ /* Single erase size */
+ slave->erasesize = master->erasesize;
+ }
+
+ if ((slave->flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->offset, slave)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of
+ * _minor_ erase size though */
+ slave->flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->flags & MTD_WRITEABLE) &&
+ mtd_mod_by_eb(slave->size, slave)) {
+ slave->flags &= ~MTD_WRITEABLE;
+ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
+
+ slave->ecclayout = master->ecclayout;
+ slave->ecc_step_size = master->ecc_step_size;
+ slave->ecc_strength = master->ecc_strength;
+ slave->bitflip_threshold = master->bitflip_threshold;
+
+ if (master->_block_isbad) {
+ uint64_t offs = 0;
+
+ while (offs < slave->size) {
+ if (mtd_block_isbad(master, offs + slave->offset))
+ slave->ecc_stats.badblocks++;
+ offs += slave->erasesize;
+ }
+ }
+
+out_register:
+ return slave;
+}
+
+#ifndef __UBOOT__
+int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length)
+{
+ struct mtd_partition part;
+ struct mtd_info *p, *new;
+ uint64_t start, end;
+ int ret = 0;
+
+ /* the direct offset is expected */
+ if (offset == MTDPART_OFS_APPEND ||
+ offset == MTDPART_OFS_NXTBLK)
+ return -EINVAL;
+
+ if (length == MTDPART_SIZ_FULL)
+ length = master->size - offset;
+
+ if (length <= 0)
+ return -EINVAL;
+
+ part.name = name;
+ part.size = length;
+ part.offset = offset;
+ part.mask_flags = 0;
+ part.ecclayout = NULL;
+
+ new = allocate_partition(master, &part, -1, offset);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ start = offset;
+ end = offset + length;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry(p, &master->partitions, node) {
+ if (start >= p->offset &&
+ (start < (p->offset + p->size)))
+ goto err_inv;
+
+ if (end >= p->offset &&
+ (end < (p->offset + p->size)))
+ goto err_inv;
+ }
+
+ list_add_tail(&new->node, &master->partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(new);
+
+ return ret;
+err_inv:
+ mutex_unlock(&mtd_partitions_mutex);
+ free_partition(new);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mtd_add_partition);
+
+int mtd_del_partition(struct mtd_info *master, int partno)
+{
+ struct mtd_info *slave, *next;
+ int ret = -EINVAL;
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_for_each_entry_safe(slave, next, &master->partitions, node)
+ if (slave->index == partno) {
+ ret = del_mtd_device(slave);
+ if (ret < 0)
+ break;
+
+ list_del(&slave->node);
+ free_partition(slave);
+ break;
+ }
+ mutex_unlock(&mtd_partitions_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mtd_del_partition);
+#endif
+
+/*
+ * This function, given a master MTD object and a partition table, creates
+ * and registers slave MTD objects which are bound to the master according to
+ * the partition definitions.
+ *
+ * We don't register the master, or expect the caller to have done so,
+ * for reasons of data integrity.
+ */
+
+int add_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition *parts,
+ int nbparts)
+{
+ struct mtd_info *slave;
+ uint64_t cur_offset = 0;
+ int i;
+
+ debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
+
+ for (i = 0; i < nbparts; i++) {
+ slave = allocate_partition(master, parts + i, i, cur_offset);
+ if (IS_ERR(slave))
+ return PTR_ERR(slave);
+
+ mutex_lock(&mtd_partitions_mutex);
+ list_add_tail(&slave->node, &master->partitions);
+ mutex_unlock(&mtd_partitions_mutex);
+
+ add_mtd_device(slave);
+
+ cur_offset = slave->offset + slave->size;
+ }
+
+ return 0;
+}
+
+#ifndef __UBOOT__
+static DEFINE_SPINLOCK(part_parser_lock);
+static LIST_HEAD(part_parsers);
+
+static struct mtd_part_parser *get_partition_parser(const char *name)
+{
+ struct mtd_part_parser *p, *ret = NULL;
+
+ spin_lock(&part_parser_lock);
+
+ list_for_each_entry(p, &part_parsers, list)
+ if (!strcmp(p->name, name) && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
+#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+
+void register_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_add(&p->list, &part_parsers);
+ spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(register_mtd_parser);
+
+void deregister_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_del(&p->list);
+ spin_unlock(&part_parser_lock);
+}
+EXPORT_SYMBOL_GPL(deregister_mtd_parser);
+
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char * const default_mtd_part_types[] = {
+ "cmdlinepart",
+ "ofpart",
+ NULL
+};
+
+/**
+ * parse_mtd_partitions - parse MTD partitions
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @pparts: array of partitions found is returned here
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find partition on MTD device @master. It uses MTD
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o zero if no partitions were found
+ * o a positive number of found partitions, in which case on exit @pparts will
+ * point to an array containing this number of &struct mtd_info objects.
+ */
+int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ struct mtd_part_parser *parser;
+ int ret = 0;
+
+ if (!types)
+ types = default_mtd_part_types;
+
+ for ( ; ret <= 0 && *types; types++) {
+ parser = get_partition_parser(*types);
+ if (!parser && !request_module("%s", *types))
+ parser = get_partition_parser(*types);
+ if (!parser)
+ continue;
+ ret = (*parser->parse_fn)(master, pparts, data);
+ put_partition_parser(parser);
+ if (ret > 0) {
+ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
+ break;
+ }
+ }
+ return ret;
+}
+#endif
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+ if (mtd_is_partition(mtd))
+ return mtd->parent->size;
+
+ return mtd->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/roms/u-boot/drivers/mtd/nand/Kconfig b/roms/u-boot/drivers/mtd/nand/Kconfig
new file mode 100644
index 000000000..78ae04bdc
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/Kconfig
@@ -0,0 +1,6 @@
+config MTD_NAND_CORE
+ tristate
+
+source "drivers/mtd/nand/raw/Kconfig"
+
+source "drivers/mtd/nand/spi/Kconfig"
diff --git a/roms/u-boot/drivers/mtd/nand/Makefile b/roms/u-boot/drivers/mtd/nand/Makefile
new file mode 100644
index 000000000..96e186600
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+ifeq ($(CONFIG_SPL_BUILD)$(CONFIG_TPL_BUILD),)
+nandcore-objs := core.o bbt.o
+obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
+obj-$(CONFIG_MTD_RAW_NAND) += raw/
+obj-$(CONFIG_MTD_SPI_NAND) += spi/
+else
+obj-$(CONFIG_$(SPL_TPL_)NAND_SUPPORT) += raw/
+endif
diff --git a/roms/u-boot/drivers/mtd/nand/bbt.c b/roms/u-boot/drivers/mtd/nand/bbt.c
new file mode 100644
index 000000000..294daee7b
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/bbt.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#define pr_fmt(fmt) "nand-bbt: " fmt
+
+#include <common.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/mtd/nand.h>
+#ifndef __UBOOT__
+#include <linux/slab.h>
+#endif
+
+/**
+ * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
+ * @nand: NAND device
+ *
+ * Initialize the in-memory BBT.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_bbt_init(struct nand_device *nand)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned int nblocks = nanddev_neraseblocks(nand);
+ unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
+ BITS_PER_LONG);
+
+ nand->bbt.cache = kzalloc(nwords, GFP_KERNEL);
+ if (!nand->bbt.cache)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_init);
+
+/**
+ * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
+ * @nand: NAND device
+ *
+ * Undoes what has been done in nanddev_bbt_init()
+ */
+void nanddev_bbt_cleanup(struct nand_device *nand)
+{
+ kfree(nand->bbt.cache);
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
+
+/**
+ * nanddev_bbt_update() - Update a BBT
+ * @nand: nand device
+ *
+ * Update the BBT. Currently a NOP function since on-flash bbt is not yet
+ * supported.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_bbt_update(struct nand_device *nand)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_update);
+
+/**
+ * nanddev_bbt_get_block_status() - Return the status of an eraseblock
+ * @nand: nand device
+ * @entry: the BBT entry
+ *
+ * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
+ * is bigger than the BBT size.
+ */
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned long *pos = nand->bbt.cache +
+ ((entry * bits_per_block) / BITS_PER_LONG);
+ unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
+ unsigned long status;
+
+ if (entry >= nanddev_neraseblocks(nand))
+ return -ERANGE;
+
+ status = pos[0] >> offs;
+ if (bits_per_block + offs > BITS_PER_LONG)
+ status |= pos[1] << (BITS_PER_LONG - offs);
+
+ return status & GENMASK(bits_per_block - 1, 0);
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
+
+/**
+ * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
+ * in-memory BBT
+ * @nand: nand device
+ * @entry: the BBT entry to update
+ * @status: the new status
+ *
+ * Update an entry of the in-memory BBT. If you want to push the updated BBT
+ * the NAND you should call nanddev_bbt_update().
+ *
+ * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
+ * size.
+ */
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned long *pos = nand->bbt.cache +
+ ((entry * bits_per_block) / BITS_PER_LONG);
+ unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
+ unsigned long val = status & GENMASK(bits_per_block - 1, 0);
+
+ if (entry >= nanddev_neraseblocks(nand))
+ return -ERANGE;
+
+ pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
+ pos[0] |= val << offs;
+
+ if (bits_per_block + offs > BITS_PER_LONG) {
+ unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
+
+ pos[1] &= ~GENMASK(rbits - 1, 0);
+ pos[1] |= val >> (bits_per_block - rbits);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
diff --git a/roms/u-boot/drivers/mtd/nand/core.c b/roms/u-boot/drivers/mtd/nand/core.c
new file mode 100644
index 000000000..090834a49
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/core.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#define pr_fmt(fmt) "nand: " fmt
+
+#include <common.h>
+#include <watchdog.h>
+#ifndef __UBOOT__
+#include <linux/compat.h>
+#include <linux/module.h>
+#endif
+#include <linux/bitops.h>
+#include <linux/mtd/nand.h>
+
+/**
+ * nanddev_isbad() - Check if a block is bad
+ * @nand: NAND device
+ * @pos: position pointing to the block we want to check
+ *
+ * Return: true if the block is bad, false otherwise.
+ */
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ if (nanddev_bbt_is_initialized(nand)) {
+ unsigned int entry;
+ int status;
+
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ status = nanddev_bbt_get_block_status(nand, entry);
+ /* Lazy block status retrieval */
+ if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
+ if (nand->ops->isbad(nand, pos))
+ status = NAND_BBT_BLOCK_FACTORY_BAD;
+ else
+ status = NAND_BBT_BLOCK_GOOD;
+
+ nanddev_bbt_set_block_status(nand, entry, status);
+ }
+
+ if (status == NAND_BBT_BLOCK_WORN ||
+ status == NAND_BBT_BLOCK_FACTORY_BAD)
+ return true;
+
+ return false;
+ }
+
+ return nand->ops->isbad(nand, pos);
+}
+EXPORT_SYMBOL_GPL(nanddev_isbad);
+
+/**
+ * nanddev_markbad() - Mark a block as bad
+ * @nand: NAND device
+ * @pos: position of the block to mark bad
+ *
+ * Mark a block bad. This function is updating the BBT if available and
+ * calls the low-level markbad hook (nand->ops->markbad()).
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ unsigned int entry;
+ int ret = 0;
+
+ if (nanddev_isbad(nand, pos))
+ return 0;
+
+ ret = nand->ops->markbad(nand, pos);
+ if (ret)
+ pr_warn("failed to write BBM to block @%llx (err = %d)\n",
+ nanddev_pos_to_offs(nand, pos), ret);
+
+ if (!nanddev_bbt_is_initialized(nand))
+ goto out;
+
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
+ if (ret)
+ goto out;
+
+ ret = nanddev_bbt_update(nand);
+
+out:
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nanddev_markbad);
+
+/**
+ * nanddev_isreserved() - Check whether an eraseblock is reserved or not
+ * @nand: NAND device
+ * @pos: NAND position to test
+ *
+ * Checks whether the eraseblock pointed by @pos is reserved or not.
+ *
+ * Return: true if the eraseblock is reserved, false otherwise.
+ */
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
+{
+ unsigned int entry;
+ int status;
+
+ if (!nanddev_bbt_is_initialized(nand))
+ return false;
+
+ /* Return info from the table */
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ status = nanddev_bbt_get_block_status(nand, entry);
+ return status == NAND_BBT_BLOCK_RESERVED;
+}
+EXPORT_SYMBOL_GPL(nanddev_isreserved);
+
+/**
+ * nanddev_erase() - Erase a NAND portion
+ * @nand: NAND device
+ * @pos: position of the block to erase
+ *
+ * Erases the block if it's not bad.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ unsigned int entry;
+
+ if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
+ pr_warn("attempt to erase a bad/reserved block @%llx\n",
+ nanddev_pos_to_offs(nand, pos));
+ if (nanddev_isreserved(nand, pos))
+ return -EIO;
+
+ /* remove bad block from BBT */
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ nanddev_bbt_set_block_status(nand, entry,
+ NAND_BBT_BLOCK_STATUS_UNKNOWN);
+ }
+
+ return nand->ops->erase(nand, pos);
+}
+EXPORT_SYMBOL_GPL(nanddev_erase);
+
+/**
+ * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
+ * @mtd: MTD device
+ * @einfo: erase request
+ *
+ * This is a simple mtd->_erase() implementation iterating over all blocks
+ * concerned by @einfo and calling nand->ops->erase() on each of them.
+ *
+ * Note that mtd->_erase should not be directly assigned to this helper,
+ * because there's no locking here. NAND specialized layers should instead
+ * implement there own wrapper around nanddev_mtd_erase() taking the
+ * appropriate lock before calling nanddev_mtd_erase().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, last;
+ int ret;
+
+ nanddev_offs_to_pos(nand, einfo->addr, &pos);
+ nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
+ while (nanddev_pos_cmp(&pos, &last) <= 0) {
+ WATCHDOG_RESET();
+ ret = nanddev_erase(nand, &pos);
+ if (ret) {
+ einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
+
+ return ret;
+ }
+
+ nanddev_pos_next_eraseblock(nand, &pos);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
+
+/**
+ * nanddev_init() - Initialize a NAND device
+ * @nand: NAND device
+ * @ops: NAND device operations
+ * @owner: NAND device owner
+ *
+ * Initializes a NAND device object. Consistency checks are done on @ops and
+ * @nand->memorg. Also takes care of initializing the BBT.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
+
+ if (!nand || !ops)
+ return -EINVAL;
+
+ if (!ops->erase || !ops->markbad || !ops->isbad)
+ return -EINVAL;
+
+ if (!memorg->bits_per_cell || !memorg->pagesize ||
+ !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
+ !memorg->planes_per_lun || !memorg->luns_per_target ||
+ !memorg->ntargets)
+ return -EINVAL;
+
+ nand->rowconv.eraseblock_addr_shift =
+ fls(memorg->pages_per_eraseblock - 1);
+ nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
+ nand->rowconv.eraseblock_addr_shift;
+
+ nand->ops = ops;
+
+ mtd->type = memorg->bits_per_cell == 1 ?
+ MTD_NANDFLASH : MTD_MLCNANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
+ mtd->writesize = memorg->pagesize;
+ mtd->writebufsize = memorg->pagesize;
+ mtd->oobsize = memorg->oobsize;
+ mtd->size = nanddev_size(nand);
+ mtd->owner = owner;
+
+ return nanddev_bbt_init(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_init);
+
+/**
+ * nanddev_cleanup() - Release resources allocated in nanddev_init()
+ * @nand: NAND device
+ *
+ * Basically undoes what has been done in nanddev_init().
+ */
+void nanddev_cleanup(struct nand_device *nand)
+{
+ if (nanddev_bbt_is_initialized(nand))
+ nanddev_bbt_cleanup(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_cleanup);
+
+MODULE_DESCRIPTION("Generic NAND framework");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/roms/u-boot/drivers/mtd/nand/raw/Kconfig b/roms/u-boot/drivers/mtd/nand/raw/Kconfig
new file mode 100644
index 000000000..ed151ee0a
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/Kconfig
@@ -0,0 +1,446 @@
+
+menuconfig MTD_RAW_NAND
+ bool "Raw NAND Device Support"
+if MTD_RAW_NAND
+
+config SYS_NAND_SELF_INIT
+ bool
+ help
+ This option, if enabled, provides more flexible and linux-like
+ NAND initialization process.
+
+config SYS_NAND_DRIVER_ECC_LAYOUT
+ bool
+ help
+ Omit standard ECC layouts to safe space. Select this if your driver
+ is known to provide its own ECC layout.
+
+config SYS_NAND_USE_FLASH_BBT
+ bool "Enable BBT (Bad Block Table) support"
+ help
+ Enable the BBT (Bad Block Table) usage.
+
+config NAND_ATMEL
+ bool "Support Atmel NAND controller"
+ imply SYS_NAND_USE_FLASH_BBT
+ help
+ Enable this driver for NAND flash platforms using an Atmel NAND
+ controller.
+
+if NAND_ATMEL
+
+config ATMEL_NAND_HWECC
+ bool "Atmel Hardware ECC"
+ default n
+
+config ATMEL_NAND_HW_PMECC
+ bool "Atmel Programmable Multibit ECC (PMECC)"
+ select ATMEL_NAND_HWECC
+ default n
+ help
+ The Programmable Multibit ECC (PMECC) controller is a programmable
+ binary BCH(Bose, Chaudhuri and Hocquenghem) encoder and decoder.
+
+config PMECC_CAP
+ int "PMECC Correctable ECC Bits"
+ depends on ATMEL_NAND_HW_PMECC
+ default 2
+ help
+ Correctable ECC bits, can be 2, 4, 8, 12, and 24.
+
+config PMECC_SECTOR_SIZE
+ int "PMECC Sector Size"
+ depends on ATMEL_NAND_HW_PMECC
+ default 512
+ help
+ Sector size, in bytes, can be 512 or 1024.
+
+config SPL_GENERATE_ATMEL_PMECC_HEADER
+ bool "Atmel PMECC Header Generation"
+ select ATMEL_NAND_HWECC
+ select ATMEL_NAND_HW_PMECC
+ default n
+ help
+ Generate Programmable Multibit ECC (PMECC) header for SPL image.
+
+endif
+
+config NAND_BRCMNAND
+ bool "Support Broadcom NAND controller"
+ depends on OF_CONTROL && DM && DM_MTD
+ help
+ Enable the driver for NAND flash on platforms using a Broadcom NAND
+ controller.
+
+config NAND_BRCMNAND_6368
+ bool "Support Broadcom NAND controller on bcm6368"
+ depends on NAND_BRCMNAND && ARCH_BMIPS
+ help
+ Enable support for broadcom nand driver on bcm6368.
+
+config NAND_BRCMNAND_68360
+ bool "Support Broadcom NAND controller on bcm68360"
+ depends on NAND_BRCMNAND && ARCH_BCM68360
+ help
+ Enable support for broadcom nand driver on bcm68360.
+
+config NAND_BRCMNAND_6838
+ bool "Support Broadcom NAND controller on bcm6838"
+ depends on NAND_BRCMNAND && ARCH_BMIPS && SOC_BMIPS_BCM6838
+ help
+ Enable support for broadcom nand driver on bcm6838.
+
+config NAND_BRCMNAND_6858
+ bool "Support Broadcom NAND controller on bcm6858"
+ depends on NAND_BRCMNAND && ARCH_BCM6858
+ help
+ Enable support for broadcom nand driver on bcm6858.
+
+config NAND_BRCMNAND_63158
+ bool "Support Broadcom NAND controller on bcm63158"
+ depends on NAND_BRCMNAND && ARCH_BCM63158
+ help
+ Enable support for broadcom nand driver on bcm63158.
+
+config NAND_DAVINCI
+ bool "Support TI Davinci NAND controller"
+ help
+ Enable this driver for NAND flash controllers available in TI Davinci
+ and Keystone2 platforms
+
+config NAND_DENALI
+ bool
+ select SYS_NAND_SELF_INIT
+ imply CMD_NAND
+
+config NAND_DENALI_DT
+ bool "Support Denali NAND controller as a DT device"
+ select NAND_DENALI
+ depends on OF_CONTROL && DM_MTD
+ help
+ Enable the driver for NAND flash on platforms using a Denali NAND
+ controller as a DT device.
+
+config NAND_LPC32XX_SLC
+ bool "Support LPC32XX_SLC controller"
+ help
+ Enable the LPC32XX SLC NAND controller.
+
+config NAND_OMAP_GPMC
+ bool "Support OMAP GPMC NAND controller"
+ depends on ARCH_OMAP2PLUS
+ help
+ Enables omap_gpmc.c driver for OMAPx and AMxxxx platforms.
+ GPMC controller is used for parallel NAND flash devices, and can
+ do ECC calculation (not ECC error detection) for HAM1, BCH4, BCH8
+ and BCH16 ECC algorithms.
+
+config NAND_OMAP_GPMC_PREFETCH
+ bool "Enable GPMC Prefetch"
+ depends on NAND_OMAP_GPMC
+ default y
+ help
+ On OMAP platforms that use the GPMC controller
+ (CONFIG_NAND_OMAP_GPMC_PREFETCH), this options enables the code that
+ uses the prefetch mode to speed up read operations.
+
+config NAND_OMAP_ELM
+ bool "Enable ELM driver for OMAPxx and AMxx platforms."
+ depends on NAND_OMAP_GPMC && !OMAP34XX
+ help
+ ELM controller is used for ECC error detection (not ECC calculation)
+ of BCH4, BCH8 and BCH16 ECC algorithms.
+ Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine,
+ thus such SoC platforms need to depend on software library for ECC error
+ detection. However ECC calculation on such plaforms would still be
+ done by GPMC controller.
+
+config NAND_VF610_NFC
+ bool "Support for Freescale NFC for VF610"
+ select SYS_NAND_SELF_INIT
+ select SYS_NAND_DRIVER_ECC_LAYOUT
+ imply CMD_NAND
+ help
+ Enables support for NAND Flash Controller on some Freescale
+ processors like the VF610, MCF54418 or Kinetis K70.
+ The driver supports a maximum 2k page size. The driver
+ currently does not support hardware ECC.
+
+if NAND_VF610_NFC
+
+config NAND_VF610_NFC_DT
+ bool "Support Vybrid's vf610 NAND controller as a DT device"
+ depends on OF_CONTROL && DM_MTD
+ help
+ Enable the driver for Vybrid's vf610 NAND flash on platforms
+ using device tree.
+
+choice
+ prompt "Hardware ECC strength"
+ depends on NAND_VF610_NFC
+ default SYS_NAND_VF610_NFC_45_ECC_BYTES
+ help
+ Select the ECC strength used in the hardware BCH ECC block.
+
+config SYS_NAND_VF610_NFC_45_ECC_BYTES
+ bool "24-error correction (45 ECC bytes)"
+
+config SYS_NAND_VF610_NFC_60_ECC_BYTES
+ bool "32-error correction (60 ECC bytes)"
+
+endchoice
+
+endif
+
+config NAND_PXA3XX
+ bool "Support for NAND on PXA3xx and Armada 370/XP/38x"
+ select SYS_NAND_SELF_INIT
+ select DM_MTD
+ select REGMAP
+ select SYSCON
+ imply CMD_NAND
+ help
+ This enables the driver for the NAND flash device found on
+ PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+
+config NAND_SUNXI
+ bool "Support for NAND on Allwinner SoCs"
+ default ARCH_SUNXI
+ depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I || MACH_SUN8I
+ select SYS_NAND_SELF_INIT
+ select SYS_NAND_U_BOOT_LOCATIONS
+ select SPL_NAND_SUPPORT
+ imply CMD_NAND
+ ---help---
+ Enable support for NAND. This option enables the standard and
+ SPL drivers.
+ The SPL driver only supports reading from the NAND using DMA
+ transfers.
+
+if NAND_SUNXI
+
+config NAND_SUNXI_SPL_ECC_STRENGTH
+ int "Allwinner NAND SPL ECC Strength"
+ default 64
+
+config NAND_SUNXI_SPL_ECC_SIZE
+ int "Allwinner NAND SPL ECC Step Size"
+ default 1024
+
+config NAND_SUNXI_SPL_USABLE_PAGE_SIZE
+ int "Allwinner NAND SPL Usable Page Size"
+ default 1024
+
+endif
+
+config NAND_ARASAN
+ bool "Configure Arasan Nand"
+ select SYS_NAND_SELF_INIT
+ depends on DM_MTD
+ imply CMD_NAND
+ help
+ This enables Nand driver support for Arasan nand flash
+ controller. This uses the hardware ECC for read and
+ write operations.
+
+config NAND_MXC
+ bool "MXC NAND support"
+ depends on CPU_ARM926EJS || CPU_ARM1136 || MX5
+ imply CMD_NAND
+ help
+ This enables the NAND driver for the NAND flash controller on the
+ i.MX27 / i.MX31 / i.MX5 rocessors.
+
+config NAND_MXS
+ bool "MXS NAND support"
+ depends on MX23 || MX28 || MX6 || MX7 || IMX8 || IMX8M
+ select SYS_NAND_SELF_INIT
+ imply CMD_NAND
+ select APBH_DMA
+ select APBH_DMA_BURST if ARCH_MX6 || ARCH_MX7 || ARCH_IMX8 || ARCH_IMX8M
+ select APBH_DMA_BURST8 if ARCH_MX6 || ARCH_MX7 || ARCH_IMX8 || ARCH_IMX8M
+ help
+ This enables NAND driver for the NAND flash controller on the
+ MXS processors.
+
+if NAND_MXS
+
+config NAND_MXS_DT
+ bool "Support MXS NAND controller as a DT device"
+ depends on OF_CONTROL && DM_MTD
+ help
+ Enable the driver for MXS NAND flash on platforms using
+ device tree.
+
+config NAND_MXS_USE_MINIMUM_ECC
+ bool "Use minimum ECC strength supported by the controller"
+ default false
+
+endif
+
+config NAND_ZYNQ
+ bool "Support for Zynq Nand controller"
+ select SYS_NAND_SELF_INIT
+ select DM_MTD
+ imply CMD_NAND
+ help
+ This enables Nand driver support for Nand flash controller
+ found on Zynq SoC.
+
+config NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
+ bool "Enable use of 1st stage bootloader timing for NAND"
+ depends on NAND_ZYNQ
+ help
+ This flag prevent U-boot reconfigure NAND flash controller and reuse
+ the NAND timing from 1st stage bootloader.
+
+config NAND_OCTEONTX
+ bool "Support for OcteonTX NAND controller"
+ select SYS_NAND_SELF_INIT
+ imply CMD_NAND
+ help
+ This enables Nand flash controller hardware found on the OcteonTX
+ processors.
+
+config NAND_OCTEONTX_HW_ECC
+ bool "Support Hardware ECC for OcteonTX NAND controller"
+ depends on NAND_OCTEONTX
+ default y
+ help
+ This enables Hardware BCH engine found on the OcteonTX processors to
+ support ECC for NAND flash controller.
+
+config NAND_STM32_FMC2
+ bool "Support for NAND controller on STM32MP SoCs"
+ depends on ARCH_STM32MP
+ select SYS_NAND_SELF_INIT
+ imply CMD_NAND
+ help
+ Enables support for NAND Flash chips on SoCs containing the FMC2
+ NAND controller. This controller is found on STM32MP SoCs.
+ The controller supports a maximum 8k page size and supports
+ a maximum 8-bit correction error per sector of 512 bytes.
+
+config CORTINA_NAND
+ bool "Support for NAND controller on Cortina-Access SoCs"
+ depends on CORTINA_PLATFORM
+ select SYS_NAND_SELF_INIT
+ select DM_MTD
+ imply CMD_NAND
+ help
+ Enables support for NAND Flash chips on Coartina-Access SoCs platform
+ This controller is found on Presidio/Venus SoCs.
+ The controller supports a maximum 8k page size and supports
+ a maximum 40-bit error correction per sector of 1024 bytes.
+
+comment "Generic NAND options"
+
+config SYS_NAND_BLOCK_SIZE
+ hex "NAND chip eraseblock size"
+ depends on ARCH_SUNXI
+ help
+ Number of data bytes in one eraseblock for the NAND chip on the
+ board. This is the multiple of NAND_PAGE_SIZE and the number of
+ pages.
+
+config SYS_NAND_PAGE_SIZE
+ hex "NAND chip page size"
+ depends on ARCH_SUNXI
+ help
+ Number of data bytes in one page for the NAND chip on the
+ board, not including the OOB area.
+
+config SYS_NAND_OOBSIZE
+ hex "NAND chip OOB size"
+ depends on ARCH_SUNXI
+ help
+ Number of bytes in the Out-Of-Band area for the NAND chip on
+ the board.
+
+# Enhance depends when converting drivers to Kconfig which use this config
+# option (mxc_nand, ndfc, omap_gpmc).
+config SYS_NAND_BUSWIDTH_16BIT
+ bool "Use 16-bit NAND interface"
+ depends on NAND_VF610_NFC || NAND_OMAP_GPMC || NAND_MXC || ARCH_DAVINCI
+ help
+ Indicates that NAND device has 16-bit wide data-bus. In absence of this
+ config, bus-width of NAND device is assumed to be either 8-bit and later
+ determined by reading ONFI params.
+ Above config is useful when NAND device's bus-width information cannot
+ be determined from on-chip ONFI params, like in following scenarios:
+ - SPL boot does not support reading of ONFI parameters. This is done to
+ keep SPL code foot-print small.
+ - In current U-Boot flow using nand_init(), driver initialization
+ happens in board_nand_init() which is called before any device probe
+ (nand_scan_ident + nand_scan_tail), thus device's ONFI parameters are
+ not available while configuring controller. So a static CONFIG_NAND_xx
+ is needed to know the device's bus-width in advance.
+
+config SYS_NAND_MAX_CHIPS
+ int "NAND max chips"
+ default 1
+ depends on NAND_ARASAN
+ help
+ The maximum number of NAND chips per device to be supported.
+
+if SPL
+
+config SYS_NAND_U_BOOT_LOCATIONS
+ bool "Define U-boot binaries locations in NAND"
+ help
+ Enable CONFIG_SYS_NAND_U_BOOT_OFFS though Kconfig.
+ This option should not be enabled when compiling U-boot for boards
+ defining CONFIG_SYS_NAND_U_BOOT_OFFS in their include/configs/<board>.h
+ file.
+
+config SYS_NAND_U_BOOT_OFFS
+ hex "Location in NAND to read U-Boot from"
+ default 0x800000 if NAND_SUNXI
+ depends on SYS_NAND_U_BOOT_LOCATIONS
+ help
+ Set the offset from the start of the nand where u-boot should be
+ loaded from.
+
+config SYS_NAND_U_BOOT_OFFS_REDUND
+ hex "Location in NAND to read U-Boot from"
+ default SYS_NAND_U_BOOT_OFFS
+ depends on SYS_NAND_U_BOOT_LOCATIONS
+ help
+ Set the offset from the start of the nand where the redundant u-boot
+ should be loaded from.
+
+config SPL_NAND_AM33XX_BCH
+ bool "Enables SPL-NAND driver which supports ELM based"
+ depends on NAND_OMAP_GPMC && !OMAP34XX
+ default y
+ help
+ Hardware ECC correction. This is useful for platforms which have ELM
+ hardware engine and use NAND boot mode.
+ Some legacy platforms like OMAP3xx do not have in-built ELM h/w engine,
+ so those platforms should use CONFIG_SPL_NAND_SIMPLE for enabling
+ SPL-NAND driver with software ECC correction support.
+
+config SPL_NAND_DENALI
+ bool "Support Denali NAND controller for SPL"
+ help
+ This is a small implementation of the Denali NAND controller
+ for use on SPL.
+
+config NAND_DENALI_SPARE_AREA_SKIP_BYTES
+ int "Number of bytes skipped in OOB area"
+ depends on SPL_NAND_DENALI
+ range 0 63
+ help
+ This option specifies the number of bytes to skip from the beginning
+ of OOB area before last ECC sector data starts. This is potentially
+ used to preserve the bad block marker in the OOB area.
+
+config SPL_NAND_SIMPLE
+ bool "Use simple SPL NAND driver"
+ depends on !SPL_NAND_AM33XX_BCH
+ help
+ Support for NAND boot using simple NAND drivers that
+ expose the cmd_ctrl() interface.
+endif
+
+endif # if NAND
diff --git a/roms/u-boot/drivers/mtd/nand/raw/Makefile b/roms/u-boot/drivers/mtd/nand/raw/Makefile
new file mode 100644
index 000000000..f3f0e15a1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/Makefile
@@ -0,0 +1,82 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+ifdef CONFIG_SPL_BUILD
+
+ifdef CONFIG_SPL_NAND_DRIVERS
+NORMAL_DRIVERS=y
+endif
+
+obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o
+obj-$(CONFIG_SPL_NAND_DENALI) += denali_spl.o
+obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o
+obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o
+obj-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o
+obj-$(CONFIG_SPL_NAND_BASE) += nand_base.o
+obj-$(CONFIG_SPL_NAND_IDENT) += nand_ids.o nand_timings.o
+obj-$(CONFIG_SPL_NAND_INIT) += nand.o
+ifeq ($(CONFIG_SPL_ENV_SUPPORT),y)
+obj-$(CONFIG_ENV_IS_IN_NAND) += nand_util.o
+endif
+
+else # not spl
+
+NORMAL_DRIVERS=y
+
+obj-y += nand.o
+obj-y += nand_bbt.o
+obj-y += nand_ids.o
+obj-y += nand_util.o
+obj-y += nand_ecc.o
+obj-y += nand_base.o
+obj-y += nand_timings.o
+
+endif # not spl
+
+ifdef NORMAL_DRIVERS
+
+obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
+
+obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o
+obj-$(CONFIG_NAND_ARASAN) += arasan_nfc.o
+obj-$(CONFIG_NAND_BRCMNAND) += brcmnand/
+obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o
+obj-$(CONFIG_NAND_DENALI) += denali.o
+obj-$(CONFIG_NAND_DENALI_DT) += denali_dt.o
+obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o
+obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o
+obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o
+obj-$(CONFIG_NAND_FSMC) += fsmc_nand.o
+obj-$(CONFIG_NAND_KB9202) += kb9202_nand.o
+obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o
+obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o
+obj-$(CONFIG_NAND_LPC32XX_MLC) += lpc32xx_nand_mlc.o
+obj-$(CONFIG_NAND_LPC32XX_SLC) += lpc32xx_nand_slc.o
+obj-$(CONFIG_NAND_VF610_NFC) += vf610_nfc.o
+obj-$(CONFIG_NAND_MXC) += mxc_nand.o
+obj-$(CONFIG_NAND_MXS) += mxs_nand.o
+obj-$(CONFIG_NAND_MXS_DT) += mxs_nand_dt.o
+obj-$(CONFIG_NAND_OCTEONTX) += octeontx_nand.o
+obj-$(CONFIG_NAND_OCTEONTX_HW_ECC) += octeontx_bch.o
+obj-$(CONFIG_NAND_PXA3XX) += pxa3xx_nand.o
+obj-$(CONFIG_NAND_SPEAR) += spr_nand.o
+obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o
+obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o
+obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o
+obj-$(CONFIG_NAND_PLAT) += nand_plat.o
+obj-$(CONFIG_NAND_SUNXI) += sunxi_nand.o
+obj-$(CONFIG_NAND_ZYNQ) += zynq_nand.o
+obj-$(CONFIG_NAND_STM32_FMC2) += stm32_fmc2_nand.o
+obj-$(CONFIG_CORTINA_NAND) += cortina_nand.o
+
+else # minimal SPL drivers
+
+obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o
+obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_spl.o
+obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o
+obj-$(CONFIG_NAND_MXS) += mxs_nand_spl.o mxs_nand.o
+obj-$(CONFIG_NAND_SUNXI) += sunxi_nand_spl.o
+
+endif # drivers
diff --git a/roms/u-boot/drivers/mtd/nand/raw/am335x_spl_bch.c b/roms/u-boot/drivers/mtd/nand/raw/am335x_spl_bch.c
new file mode 100644
index 000000000..b6fc5f29c
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/am335x_spl_bch.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2012
+ * Konstantin Kozhevnikov, Cogent Embedded
+ *
+ * based on nand_spl_simple code
+ *
+ * (C) Copyright 2006-2008
+ * Stefan Roese, DENX Software Engineering, sr@denx.de.
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/mtd/nand_ecc.h>
+
+static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS;
+static struct mtd_info *mtd;
+static struct nand_chip nand_chip;
+
+#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / \
+ CONFIG_SYS_NAND_ECCSIZE)
+#define ECCTOTAL (ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES)
+
+
+/*
+ * NAND command for large page NAND devices (2k)
+ */
+static int nand_command(int block, int page, uint32_t offs,
+ u8 cmd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT;
+ void (*hwctrl)(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl) = this->cmd_ctrl;
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ /* Emulate NAND_CMD_READOOB */
+ if (cmd == NAND_CMD_READOOB) {
+ offs += CONFIG_SYS_NAND_PAGE_SIZE;
+ cmd = NAND_CMD_READ0;
+ }
+
+ /* Begin command latch cycle */
+ hwctrl(mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+
+ if (cmd == NAND_CMD_RESET) {
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Apply this short delay always to ensure that we do wait
+ * tWB in any case on any machine.
+ */
+ ndelay(150);
+
+ while (!this->dev_ready(mtd))
+ ;
+ return 0;
+ }
+
+ /* Shift the offset from byte addressing to word addressing. */
+ if ((this->options & NAND_BUSWIDTH_16) && !nand_opcode_8bits(cmd))
+ offs >>= 1;
+
+ /* Set ALE and clear CLE to start address cycle */
+ /* Column address */
+ hwctrl(mtd, offs & 0xff,
+ NAND_CTRL_ALE | NAND_CTRL_CHANGE); /* A[7:0] */
+ hwctrl(mtd, (offs >> 8) & 0xff, NAND_CTRL_ALE); /* A[11:9] */
+ /* Row address */
+ if (cmd != NAND_CMD_RNDOUT) {
+ hwctrl(mtd, (page_addr & 0xff),
+ NAND_CTRL_ALE); /* A[19:12] */
+ hwctrl(mtd, ((page_addr >> 8) & 0xff),
+ NAND_CTRL_ALE); /* A[27:20] */
+#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE
+ /* One more address cycle for devices > 128MiB */
+ hwctrl(mtd, (page_addr >> 16) & 0x0f,
+ NAND_CTRL_ALE); /* A[31:28] */
+#endif
+ }
+
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (cmd) {
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ return 0;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ hwctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_CTRL_CLE |
+ NAND_CTRL_CHANGE);
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ return 0;
+
+ case NAND_CMD_READ0:
+ /* Latch in address */
+ hwctrl(mtd, NAND_CMD_READSTART,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(150);
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ return 0;
+}
+
+static int nand_is_bad_block(int block)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS,
+ NAND_CMD_READOOB);
+
+ /*
+ * Read one byte (or two if it's a 16 bit chip).
+ */
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (readw(this->IO_ADDR_R) != 0xffff)
+ return 1;
+ } else {
+ if (readb(this->IO_ADDR_R) != 0xff)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int nand_read_page(int block, int page, void *dst)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_char ecc_calc[ECCTOTAL];
+ u_char ecc_code[ECCTOTAL];
+ u_char oob_data[CONFIG_SYS_NAND_OOBSIZE];
+ int i;
+ int eccsize = CONFIG_SYS_NAND_ECCSIZE;
+ int eccbytes = CONFIG_SYS_NAND_ECCBYTES;
+ int eccsteps = ECCSTEPS;
+ uint8_t *p = dst;
+ uint32_t data_pos = 0;
+ uint8_t *oob = &oob_data[0] + nand_ecc_pos[0];
+ uint32_t oob_pos = eccsize * eccsteps + nand_ecc_pos[0];
+
+ nand_command(block, page, 0, NAND_CMD_READ0);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ this->ecc.hwctl(mtd, NAND_ECC_READ);
+ nand_command(block, page, data_pos, NAND_CMD_RNDOUT);
+
+ this->read_buf(mtd, p, eccsize);
+
+ nand_command(block, page, oob_pos, NAND_CMD_RNDOUT);
+
+ this->read_buf(mtd, oob, eccbytes);
+ this->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ data_pos += eccsize;
+ oob_pos += eccbytes;
+ oob += eccbytes;
+ }
+
+ /* Pick the ECC bytes out of the oob data */
+ for (i = 0; i < ECCTOTAL; i++)
+ ecc_code[i] = oob_data[nand_ecc_pos[i]];
+
+ eccsteps = ECCSTEPS;
+ p = dst;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ /* No chance to do something with the possible error message
+ * from correct_data(). We just hope that all possible errors
+ * are corrected by this routine.
+ */
+ this->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ }
+
+ return 0;
+}
+
+/* nand_init() - initialize data to make nand usable by SPL */
+void nand_init(void)
+{
+ /*
+ * Init board specific nand support
+ */
+ mtd = nand_to_mtd(&nand_chip);
+ nand_chip.IO_ADDR_R = nand_chip.IO_ADDR_W =
+ (void __iomem *)CONFIG_SYS_NAND_BASE;
+ board_nand_init(&nand_chip);
+
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, 0);
+
+ /* NAND chip may require reset after power-on */
+ nand_command(0, 0, 0, NAND_CMD_RESET);
+}
+
+/* Unselect after operation */
+void nand_deselect(void)
+{
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, -1);
+}
+
+#include "nand_spl_loaders.c"
diff --git a/roms/u-boot/drivers/mtd/nand/raw/arasan_nfc.c b/roms/u-boot/drivers/mtd/nand/raw/arasan_nfc.c
new file mode 100644
index 000000000..4621bfb03
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/arasan_nfc.c
@@ -0,0 +1,1325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Arasan NAND Flash Controller Driver
+ *
+ * Copyright (C) 2014 - 2015 Xilinx, Inc.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/sys_proto.h>
+#include <dm.h>
+#include <nand.h>
+
+struct nand_config {
+ u32 page;
+ bool on_die_ecc_enabled;
+};
+
+struct nand_drv {
+ struct nand_regs *reg;
+ struct nand_config config;
+};
+
+struct arasan_nand_info {
+ struct udevice *dev;
+ struct nand_drv nand_ctrl;
+ struct nand_chip nand_chip;
+};
+
+struct nand_regs {
+ u32 pkt_reg;
+ u32 memadr_reg1;
+ u32 memadr_reg2;
+ u32 cmd_reg;
+ u32 pgm_reg;
+ u32 intsts_enr;
+ u32 intsig_enr;
+ u32 intsts_reg;
+ u32 rdy_busy;
+ u32 cms_sysadr_reg;
+ u32 flash_sts_reg;
+ u32 tmg_reg;
+ u32 buf_dataport;
+ u32 ecc_reg;
+ u32 ecc_errcnt_reg;
+ u32 ecc_sprcmd_reg;
+ u32 errcnt_1bitreg;
+ u32 errcnt_2bitreg;
+ u32 errcnt_3bitreg;
+ u32 errcnt_4bitreg;
+ u32 dma_sysadr0_reg;
+ u32 dma_bufbdry_reg;
+ u32 cpu_rls_reg;
+ u32 errcnt_5bitreg;
+ u32 errcnt_6bitreg;
+ u32 errcnt_7bitreg;
+ u32 errcnt_8bitreg;
+ u32 data_if_reg;
+};
+
+struct arasan_nand_command_format {
+ u8 cmd1;
+ u8 cmd2;
+ u8 addr_cycles;
+ u32 pgm;
+};
+
+#define ONDIE_ECC_FEATURE_ADDR 0x90
+#define ENABLE_ONDIE_ECC 0x08
+
+#define ARASAN_PROG_RD_MASK 0x00000001
+#define ARASAN_PROG_BLK_ERS_MASK 0x00000004
+#define ARASAN_PROG_RD_ID_MASK 0x00000040
+#define ARASAN_PROG_RD_STS_MASK 0x00000008
+#define ARASAN_PROG_PG_PROG_MASK 0x00000010
+#define ARASAN_PROG_RD_PARAM_PG_MASK 0x00000080
+#define ARASAN_PROG_RST_MASK 0x00000100
+#define ARASAN_PROG_GET_FTRS_MASK 0x00000200
+#define ARASAN_PROG_SET_FTRS_MASK 0x00000400
+#define ARASAN_PROG_CHNG_ROWADR_END_MASK 0x00400000
+
+#define ARASAN_NAND_CMD_ECC_ON_MASK 0x80000000
+#define ARASAN_NAND_CMD_CMD12_MASK 0xFFFF
+#define ARASAN_NAND_CMD_PG_SIZE_MASK 0x3800000
+#define ARASAN_NAND_CMD_PG_SIZE_SHIFT 23
+#define ARASAN_NAND_CMD_CMD2_SHIFT 8
+#define ARASAN_NAND_CMD_ADDR_CYCL_MASK 0x70000000
+#define ARASAN_NAND_CMD_ADDR_CYCL_SHIFT 28
+
+#define ARASAN_NAND_MEM_ADDR1_PAGE_MASK 0xFFFF0000
+#define ARASAN_NAND_MEM_ADDR1_COL_MASK 0xFFFF
+#define ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT 16
+#define ARASAN_NAND_MEM_ADDR2_PAGE_MASK 0xFF
+#define ARASAN_NAND_MEM_ADDR2_CS_MASK 0xC0000000
+#define ARASAN_NAND_MEM_ADDR2_CS0_MASK (0x3 << 30)
+#define ARASAN_NAND_MEM_ADDR2_CS1_MASK (0x1 << 30)
+#define ARASAN_NAND_MEM_ADDR2_BCH_MASK 0xE000000
+#define ARASAN_NAND_MEM_ADDR2_BCH_SHIFT 25
+
+#define ARASAN_NAND_INT_STS_ERR_EN_MASK 0x10
+#define ARASAN_NAND_INT_STS_MUL_BIT_ERR_MASK 0x08
+#define ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK 0x02
+#define ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK 0x01
+#define ARASAN_NAND_INT_STS_XFR_CMPLT_MASK 0x04
+
+#define ARASAN_NAND_PKT_REG_PKT_CNT_MASK 0xFFF000
+#define ARASAN_NAND_PKT_REG_PKT_SIZE_MASK 0x7FF
+#define ARASAN_NAND_PKT_REG_PKT_CNT_SHFT 12
+
+#define ARASAN_NAND_ROW_ADDR_CYCL_MASK 0x0F
+#define ARASAN_NAND_COL_ADDR_CYCL_MASK 0xF0
+#define ARASAN_NAND_COL_ADDR_CYCL_SHIFT 4
+
+#define ARASAN_NAND_ECC_SIZE_SHIFT 16
+#define ARASAN_NAND_ECC_BCH_SHIFT 27
+
+#define ARASAN_NAND_PKTSIZE_1K 1024
+#define ARASAN_NAND_PKTSIZE_512 512
+
+#define ARASAN_NAND_POLL_TIMEOUT 1000000
+#define ARASAN_NAND_INVALID_ADDR_CYCL 0xFF
+
+#define ERR_ADDR_CYCLE -1
+#define READ_BUFF_SIZE 0x4000
+
+static struct arasan_nand_command_format *curr_cmd;
+
+enum addr_cycles {
+ NAND_ADDR_CYCL_NONE,
+ NAND_ADDR_CYCL_ONE,
+ NAND_ADDR_CYCL_ROW,
+ NAND_ADDR_CYCL_COL,
+ NAND_ADDR_CYCL_BOTH,
+};
+
+static struct arasan_nand_command_format arasan_nand_commands[] = {
+ {NAND_CMD_READ0, NAND_CMD_READSTART, NAND_ADDR_CYCL_BOTH,
+ ARASAN_PROG_RD_MASK},
+ {NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART, NAND_ADDR_CYCL_COL,
+ ARASAN_PROG_RD_MASK},
+ {NAND_CMD_READID, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
+ ARASAN_PROG_RD_ID_MASK},
+ {NAND_CMD_STATUS, NAND_CMD_NONE, NAND_ADDR_CYCL_NONE,
+ ARASAN_PROG_RD_STS_MASK},
+ {NAND_CMD_SEQIN, NAND_CMD_PAGEPROG, NAND_ADDR_CYCL_BOTH,
+ ARASAN_PROG_PG_PROG_MASK},
+ {NAND_CMD_RNDIN, NAND_CMD_NONE, NAND_ADDR_CYCL_COL,
+ ARASAN_PROG_CHNG_ROWADR_END_MASK},
+ {NAND_CMD_ERASE1, NAND_CMD_ERASE2, NAND_ADDR_CYCL_ROW,
+ ARASAN_PROG_BLK_ERS_MASK},
+ {NAND_CMD_RESET, NAND_CMD_NONE, NAND_ADDR_CYCL_NONE,
+ ARASAN_PROG_RST_MASK},
+ {NAND_CMD_PARAM, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
+ ARASAN_PROG_RD_PARAM_PG_MASK},
+ {NAND_CMD_GET_FEATURES, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
+ ARASAN_PROG_GET_FTRS_MASK},
+ {NAND_CMD_SET_FEATURES, NAND_CMD_NONE, NAND_ADDR_CYCL_ONE,
+ ARASAN_PROG_SET_FTRS_MASK},
+ {NAND_CMD_NONE, NAND_CMD_NONE, NAND_ADDR_CYCL_NONE, 0},
+};
+
+struct arasan_ecc_matrix {
+ u32 pagesize;
+ u32 ecc_codeword_size;
+ u8 eccbits;
+ u8 bch;
+ u8 bchval;
+ u16 eccaddr;
+ u16 eccsize;
+};
+
+static const struct arasan_ecc_matrix ecc_matrix[] = {
+ {512, 512, 1, 0, 0, 0x20D, 0x3},
+ {512, 512, 4, 1, 3, 0x209, 0x7},
+ {512, 512, 8, 1, 2, 0x203, 0xD},
+ /*
+ * 2K byte page
+ */
+ {2048, 512, 1, 0, 0, 0x834, 0xC},
+ {2048, 512, 4, 1, 3, 0x826, 0x1A},
+ {2048, 512, 8, 1, 2, 0x80c, 0x34},
+ {2048, 512, 12, 1, 1, 0x822, 0x4E},
+ {2048, 512, 16, 1, 0, 0x808, 0x68},
+ {2048, 1024, 24, 1, 4, 0x81c, 0x54},
+ /*
+ * 4K byte page
+ */
+ {4096, 512, 1, 0, 0, 0x1068, 0x18},
+ {4096, 512, 4, 1, 3, 0x104c, 0x34},
+ {4096, 512, 8, 1, 2, 0x1018, 0x68},
+ {4096, 512, 12, 1, 1, 0x1044, 0x9C},
+ {4096, 512, 16, 1, 0, 0x1010, 0xD0},
+ {4096, 1024, 24, 1, 4, 0x1038, 0xA8},
+ /*
+ * 8K byte page
+ */
+ {8192, 512, 1, 0, 0, 0x20d0, 0x30},
+ {8192, 512, 4, 1, 3, 0x2098, 0x68},
+ {8192, 512, 8, 1, 2, 0x2030, 0xD0},
+ {8192, 512, 12, 1, 1, 0x2088, 0x138},
+ {8192, 512, 16, 1, 0, 0x2020, 0x1A0},
+ {8192, 1024, 24, 1, 4, 0x2070, 0x150},
+ /*
+ * 16K byte page
+ */
+ {16384, 512, 1, 0, 0, 0x4460, 0x60},
+ {16384, 512, 4, 1, 3, 0x43f0, 0xD0},
+ {16384, 512, 8, 1, 2, 0x4320, 0x1A0},
+ {16384, 512, 12, 1, 1, 0x4250, 0x270},
+ {16384, 512, 16, 1, 0, 0x4180, 0x340},
+ {16384, 1024, 24, 1, 4, 0x4220, 0x2A0}
+};
+
+static struct nand_ecclayout ondie_nand_oob_64 = {
+ .eccbytes = 32,
+
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 56, 57, 58, 59, 60, 61, 62, 63
+ },
+
+ .oobfree = {
+ { .offset = 4, .length = 4 },
+ { .offset = 20, .length = 4 },
+ { .offset = 36, .length = 4 },
+ { .offset = 52, .length = 4 }
+ }
+};
+
+/*
+ * bbt decriptors for chips with on-die ECC and
+ * chips with 64-byte OOB
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+static u8 buf_data[READ_BUFF_SIZE];
+static u32 buf_index;
+
+static struct nand_ecclayout nand_oob;
+
+static void arasan_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(nand_chip);
+ u32 reg_val;
+
+ reg_val = readl(&info->reg->memadr_reg2);
+ if (chip == 0) {
+ reg_val &= ~ARASAN_NAND_MEM_ADDR2_CS0_MASK;
+ writel(reg_val, &info->reg->memadr_reg2);
+ } else if (chip == 1) {
+ reg_val |= ARASAN_NAND_MEM_ADDR2_CS1_MASK;
+ writel(reg_val, &info->reg->memadr_reg2);
+ }
+}
+
+static void arasan_nand_enable_ecc(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val;
+
+ reg_val = readl(&info->reg->cmd_reg);
+ reg_val |= ARASAN_NAND_CMD_ECC_ON_MASK;
+
+ writel(reg_val, &info->reg->cmd_reg);
+}
+
+static u8 arasan_nand_get_addrcycle(struct mtd_info *mtd)
+{
+ u8 addrcycles;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ switch (curr_cmd->addr_cycles) {
+ case NAND_ADDR_CYCL_NONE:
+ addrcycles = 0;
+ break;
+ case NAND_ADDR_CYCL_ONE:
+ addrcycles = 1;
+ break;
+ case NAND_ADDR_CYCL_ROW:
+ addrcycles = chip->onfi_params.addr_cycles &
+ ARASAN_NAND_ROW_ADDR_CYCL_MASK;
+ break;
+ case NAND_ADDR_CYCL_COL:
+ addrcycles = (chip->onfi_params.addr_cycles &
+ ARASAN_NAND_COL_ADDR_CYCL_MASK) >>
+ ARASAN_NAND_COL_ADDR_CYCL_SHIFT;
+ break;
+ case NAND_ADDR_CYCL_BOTH:
+ addrcycles = chip->onfi_params.addr_cycles &
+ ARASAN_NAND_ROW_ADDR_CYCL_MASK;
+ addrcycles += (chip->onfi_params.addr_cycles &
+ ARASAN_NAND_COL_ADDR_CYCL_MASK) >>
+ ARASAN_NAND_COL_ADDR_CYCL_SHIFT;
+ break;
+ default:
+ addrcycles = ARASAN_NAND_INVALID_ADDR_CYCL;
+ break;
+ }
+ return addrcycles;
+}
+
+static int arasan_nand_read_page(struct mtd_info *mtd, u8 *buf, u32 size)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ struct nand_config *nand = &info->config;
+ u32 reg_val, i, pktsize, pktnum;
+ u32 *bufptr = (u32 *)buf;
+ u32 timeout;
+ u32 rdcount = 0;
+ u8 addr_cycles;
+
+ if (chip->ecc_step_ds >= ARASAN_NAND_PKTSIZE_1K)
+ pktsize = ARASAN_NAND_PKTSIZE_1K;
+ else
+ pktsize = ARASAN_NAND_PKTSIZE_512;
+
+ if (size % pktsize)
+ pktnum = size/pktsize + 1;
+ else
+ pktnum = size/pktsize;
+
+ reg_val = readl(&info->reg->intsts_enr);
+ reg_val |= ARASAN_NAND_INT_STS_ERR_EN_MASK |
+ ARASAN_NAND_INT_STS_MUL_BIT_ERR_MASK;
+ writel(reg_val, &info->reg->intsts_enr);
+
+ reg_val = readl(&info->reg->pkt_reg);
+ reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
+ ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
+ reg_val |= (pktnum << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) |
+ pktsize;
+ writel(reg_val, &info->reg->pkt_reg);
+
+ if (!nand->on_die_ecc_enabled) {
+ arasan_nand_enable_ecc(mtd);
+ addr_cycles = arasan_nand_get_addrcycle(mtd);
+ if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
+ return ERR_ADDR_CYCLE;
+
+ writel((NAND_CMD_RNDOUTSTART << ARASAN_NAND_CMD_CMD2_SHIFT) |
+ NAND_CMD_RNDOUT | (addr_cycles <<
+ ARASAN_NAND_CMD_ADDR_CYCL_SHIFT),
+ &info->reg->ecc_sprcmd_reg);
+ }
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+
+ while (rdcount < pktnum) {
+ timeout = ARASAN_NAND_POLL_TIMEOUT;
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ puts("arasan_read_page: timedout:Buff RDY\n");
+ return -ETIMEDOUT;
+ }
+
+ rdcount++;
+
+ if (pktnum == rdcount) {
+ reg_val = readl(&info->reg->intsts_enr);
+ reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
+ writel(reg_val, &info->reg->intsts_enr);
+ } else {
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
+ &info->reg->intsts_enr);
+ }
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
+ &info->reg->intsts_reg);
+
+ for (i = 0; i < pktsize/4; i++)
+ bufptr[i] = readl(&info->reg->buf_dataport);
+
+
+ bufptr += pktsize/4;
+
+ if (rdcount >= pktnum)
+ break;
+
+ writel(ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
+ &info->reg->intsts_enr);
+ }
+
+ timeout = ARASAN_NAND_POLL_TIMEOUT;
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ puts("arasan rd_page timedout:Xfer CMPLT\n");
+ return -ETIMEDOUT;
+ }
+
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+
+ if (!nand->on_die_ecc_enabled) {
+ if (readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_MUL_BIT_ERR_MASK) {
+ printf("arasan rd_page:sbiterror\n");
+ return -1;
+ }
+
+ if (readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_ERR_EN_MASK) {
+ mtd->ecc_stats.failed++;
+ printf("arasan rd_page:multibiterror\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int arasan_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf, int oob_required, int page)
+{
+ int status;
+
+ status = arasan_nand_read_page(mtd, buf, (mtd->writesize));
+
+ if (oob_required)
+ chip->ecc.read_oob(mtd, chip, page);
+
+ return status;
+}
+
+static void arasan_nand_fill_tx(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 __iomem *nand = &info->reg->buf_dataport;
+
+ if (((unsigned long)buf & 0x3) != 0) {
+ if (((unsigned long)buf & 0x1) != 0) {
+ if (len) {
+ writeb(*buf, nand);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((unsigned long)buf & 0x3) != 0) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ while (len >= 4) {
+ writel(*(u32 *)buf, nand);
+ buf += 4;
+ len -= 4;
+ }
+
+ if (len) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len)
+ writeb(*buf, nand);
+ }
+}
+
+static int arasan_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf, int oob_required,
+ int page)
+{
+ struct nand_drv *info = nand_get_controller_data(chip);
+ struct nand_config *nand = &info->config;
+ u32 reg_val, i, pktsize, pktnum;
+ const u32 *bufptr = (const u32 *)buf;
+ u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
+ u32 size = mtd->writesize;
+ u32 rdcount = 0;
+ u8 column_addr_cycles;
+
+ if (chip->ecc_step_ds >= ARASAN_NAND_PKTSIZE_1K)
+ pktsize = ARASAN_NAND_PKTSIZE_1K;
+ else
+ pktsize = ARASAN_NAND_PKTSIZE_512;
+
+ if (size % pktsize)
+ pktnum = size/pktsize + 1;
+ else
+ pktnum = size/pktsize;
+
+ reg_val = readl(&info->reg->pkt_reg);
+ reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
+ ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
+ reg_val |= (pktnum << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | pktsize;
+ writel(reg_val, &info->reg->pkt_reg);
+
+ if (!nand->on_die_ecc_enabled) {
+ arasan_nand_enable_ecc(mtd);
+ column_addr_cycles = (chip->onfi_params.addr_cycles &
+ ARASAN_NAND_COL_ADDR_CYCL_MASK) >>
+ ARASAN_NAND_COL_ADDR_CYCL_SHIFT;
+ writel((NAND_CMD_RNDIN | (column_addr_cycles << 28)),
+ &info->reg->ecc_sprcmd_reg);
+ }
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+
+ while (rdcount < pktnum) {
+ timeout = ARASAN_NAND_POLL_TIMEOUT;
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ puts("arasan_write_page: timedout:Buff RDY\n");
+ return -ETIMEDOUT;
+ }
+
+ rdcount++;
+
+ if (pktnum == rdcount) {
+ reg_val = readl(&info->reg->intsts_enr);
+ reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
+ writel(reg_val, &info->reg->intsts_enr);
+ } else {
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
+ &info->reg->intsts_enr);
+ }
+
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
+ &info->reg->intsts_reg);
+
+ for (i = 0; i < pktsize/4; i++)
+ writel(bufptr[i], &info->reg->buf_dataport);
+
+ bufptr += pktsize/4;
+
+ if (rdcount >= pktnum)
+ break;
+
+ writel(ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
+ &info->reg->intsts_enr);
+ }
+
+ timeout = ARASAN_NAND_POLL_TIMEOUT;
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ puts("arasan write_page timedout:Xfer CMPLT\n");
+ return -ETIMEDOUT;
+ }
+
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+
+ if (oob_required)
+ chip->ecc.write_oob(mtd, chip, nand->page);
+
+ return 0;
+}
+
+static int arasan_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, (mtd->oobsize));
+
+ return 0;
+}
+
+static int arasan_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int status = 0;
+ const u8 *buf = chip->oob_poi;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, buf, mtd->oobsize);
+
+ return status;
+}
+
+static int arasan_nand_reset(struct mtd_info *mtd,
+ struct arasan_nand_command_format *curr_cmd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
+ u32 cmd_reg = 0;
+
+ writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ cmd_reg = readl(&info->reg->cmd_reg);
+ cmd_reg &= ~ARASAN_NAND_CMD_CMD12_MASK;
+
+ cmd_reg |= curr_cmd->cmd1 |
+ (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
+ writel(cmd_reg, &info->reg->cmd_reg);
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ printf("ERROR:%s timedout\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+
+ writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+
+ return 0;
+}
+
+static u8 arasan_nand_page(struct mtd_info *mtd)
+{
+ u8 page_val = 0;
+
+ switch (mtd->writesize) {
+ case 512:
+ page_val = 0;
+ break;
+ case 2048:
+ page_val = 1;
+ break;
+ case 4096:
+ page_val = 2;
+ break;
+ case 8192:
+ page_val = 3;
+ break;
+ case 16384:
+ page_val = 4;
+ break;
+ case 1024:
+ page_val = 5;
+ break;
+ default:
+ printf("%s:Pagesize>16K\n", __func__);
+ break;
+ }
+
+ return page_val;
+}
+
+static int arasan_nand_send_wrcmd(struct arasan_nand_command_format *curr_cmd,
+ int column, int page_addr, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val, page;
+ u8 page_val, addr_cycles;
+
+ writel(ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->cmd_reg);
+ reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
+ reg_val |= curr_cmd->cmd1 |
+ (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
+ if (curr_cmd->cmd1 == NAND_CMD_SEQIN) {
+ reg_val &= ~ARASAN_NAND_CMD_PG_SIZE_MASK;
+ page_val = arasan_nand_page(mtd);
+ reg_val |= (page_val << ARASAN_NAND_CMD_PG_SIZE_SHIFT);
+ }
+
+ reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
+ addr_cycles = arasan_nand_get_addrcycle(mtd);
+
+ if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
+ return ERR_ADDR_CYCLE;
+
+ reg_val |= (addr_cycles <<
+ ARASAN_NAND_CMD_ADDR_CYCL_SHIFT);
+ writel(reg_val, &info->reg->cmd_reg);
+
+ if (page_addr == -1)
+ page_addr = 0;
+
+ page = (page_addr << ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT) &
+ ARASAN_NAND_MEM_ADDR1_PAGE_MASK;
+ column &= ARASAN_NAND_MEM_ADDR1_COL_MASK;
+ writel(page | column, &info->reg->memadr_reg1);
+
+ reg_val = readl(&info->reg->memadr_reg2);
+ reg_val &= ~ARASAN_NAND_MEM_ADDR2_PAGE_MASK;
+ reg_val |= (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT);
+ writel(reg_val, &info->reg->memadr_reg2);
+
+ return 0;
+}
+
+static void arasan_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val;
+ u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
+
+ reg_val = readl(&info->reg->pkt_reg);
+ reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
+ ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
+
+ reg_val |= (1 << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | len;
+ writel(reg_val, &info->reg->pkt_reg);
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+
+ if (!timeout)
+ puts("ERROR:arasan_nand_write_buf timedout:Buff RDY\n");
+
+ reg_val = readl(&info->reg->intsts_enr);
+ reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
+ writel(reg_val, &info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_WR_RDY_MASK,
+ &info->reg->intsts_reg);
+
+ arasan_nand_fill_tx(mtd, buf, len);
+
+ timeout = ARASAN_NAND_POLL_TIMEOUT;
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+ if (!timeout)
+ puts("ERROR:arasan_nand_write_buf timedout:Xfer CMPLT\n");
+
+ writel(readl(&info->reg->intsts_enr) |
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ writel(readl(&info->reg->intsts_reg) |
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+}
+
+static int arasan_nand_erase(struct arasan_nand_command_format *curr_cmd,
+ int column, int page_addr, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val, page;
+ u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
+ u8 row_addr_cycles;
+
+ writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->cmd_reg);
+ reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
+ reg_val |= curr_cmd->cmd1 |
+ (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
+ row_addr_cycles = arasan_nand_get_addrcycle(mtd);
+
+ if (row_addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
+ return ERR_ADDR_CYCLE;
+
+ reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
+ reg_val |= (row_addr_cycles <<
+ ARASAN_NAND_CMD_ADDR_CYCL_SHIFT);
+
+ writel(reg_val, &info->reg->cmd_reg);
+
+ page = (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT) &
+ ARASAN_NAND_MEM_ADDR1_COL_MASK;
+ column = page_addr & ARASAN_NAND_MEM_ADDR1_COL_MASK;
+ writel(column | (page << ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT),
+ &info->reg->memadr_reg1);
+
+ reg_val = readl(&info->reg->memadr_reg2);
+ reg_val &= ~ARASAN_NAND_MEM_ADDR2_PAGE_MASK;
+ reg_val |= (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT);
+ writel(reg_val, &info->reg->memadr_reg2);
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+ if (!timeout) {
+ printf("ERROR:%s timedout:Xfer CMPLT\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+
+ return 0;
+}
+
+static int arasan_nand_read_status(struct arasan_nand_command_format *curr_cmd,
+ int column, int page_addr, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val;
+ u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
+ u8 addr_cycles;
+
+ writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->cmd_reg);
+ reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
+ reg_val |= curr_cmd->cmd1 |
+ (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
+ addr_cycles = arasan_nand_get_addrcycle(mtd);
+
+ if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
+ return ERR_ADDR_CYCLE;
+
+ reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
+ reg_val |= (addr_cycles <<
+ ARASAN_NAND_CMD_ADDR_CYCL_SHIFT);
+
+ writel(reg_val, &info->reg->cmd_reg);
+
+ reg_val = readl(&info->reg->pkt_reg);
+ reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
+ ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
+ reg_val |= (1 << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | 1;
+ writel(reg_val, &info->reg->pkt_reg);
+
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+
+ if (!timeout) {
+ printf("ERROR:%s: timedout:Xfer CMPLT\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+
+ return 0;
+}
+
+static int arasan_nand_send_rdcmd(struct arasan_nand_command_format *curr_cmd,
+ int column, int page_addr, struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val, addr_cycles, page;
+ u8 page_val;
+
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
+ &info->reg->intsts_enr);
+
+ reg_val = readl(&info->reg->cmd_reg);
+ reg_val &= ~ARASAN_NAND_CMD_CMD12_MASK;
+ reg_val |= curr_cmd->cmd1 |
+ (curr_cmd->cmd2 << ARASAN_NAND_CMD_CMD2_SHIFT);
+
+ if (curr_cmd->cmd1 == NAND_CMD_RNDOUT ||
+ curr_cmd->cmd1 == NAND_CMD_READ0) {
+ reg_val &= ~ARASAN_NAND_CMD_PG_SIZE_MASK;
+ page_val = arasan_nand_page(mtd);
+ reg_val |= (page_val << ARASAN_NAND_CMD_PG_SIZE_SHIFT);
+ }
+
+ reg_val &= ~ARASAN_NAND_CMD_ECC_ON_MASK;
+
+ reg_val &= ~ARASAN_NAND_CMD_ADDR_CYCL_MASK;
+
+ addr_cycles = arasan_nand_get_addrcycle(mtd);
+
+ if (addr_cycles == ARASAN_NAND_INVALID_ADDR_CYCL)
+ return ERR_ADDR_CYCLE;
+
+ reg_val |= (addr_cycles << 28);
+ writel(reg_val, &info->reg->cmd_reg);
+
+ if (page_addr == -1)
+ page_addr = 0;
+
+ page = (page_addr << ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT) &
+ ARASAN_NAND_MEM_ADDR1_PAGE_MASK;
+ column &= ARASAN_NAND_MEM_ADDR1_COL_MASK;
+ writel(page | column, &info->reg->memadr_reg1);
+
+ reg_val = readl(&info->reg->memadr_reg2);
+ reg_val &= ~ARASAN_NAND_MEM_ADDR2_PAGE_MASK;
+ reg_val |= (page_addr >> ARASAN_NAND_MEM_ADDR1_PAGE_SHIFT);
+ writel(reg_val, &info->reg->memadr_reg2);
+
+ buf_index = 0;
+
+ return 0;
+}
+
+static void arasan_nand_read_buf(struct mtd_info *mtd, u8 *buf, int size)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 reg_val, i;
+ u32 *bufptr = (u32 *)buf;
+ u32 timeout = ARASAN_NAND_POLL_TIMEOUT;
+
+ reg_val = readl(&info->reg->pkt_reg);
+ reg_val &= ~(ARASAN_NAND_PKT_REG_PKT_CNT_MASK |
+ ARASAN_NAND_PKT_REG_PKT_SIZE_MASK);
+ reg_val |= (1 << ARASAN_NAND_PKT_REG_PKT_CNT_SHFT) | size;
+ writel(reg_val, &info->reg->pkt_reg);
+
+ writel(curr_cmd->pgm, &info->reg->pgm_reg);
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+
+ if (!timeout)
+ puts("ERROR:arasan_nand_read_buf timedout:Buff RDY\n");
+
+ reg_val = readl(&info->reg->intsts_enr);
+ reg_val |= ARASAN_NAND_INT_STS_XFR_CMPLT_MASK;
+ writel(reg_val, &info->reg->intsts_enr);
+
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_BUF_RD_RDY_MASK,
+ &info->reg->intsts_reg);
+
+ buf_index = 0;
+ for (i = 0; i < size / 4; i++)
+ bufptr[i] = readl(&info->reg->buf_dataport);
+
+ if (size & 0x03)
+ bufptr[i] = readl(&info->reg->buf_dataport);
+
+ timeout = ARASAN_NAND_POLL_TIMEOUT;
+
+ while (!(readl(&info->reg->intsts_reg) &
+ ARASAN_NAND_INT_STS_XFR_CMPLT_MASK) && timeout) {
+ udelay(1);
+ timeout--;
+ }
+
+ if (!timeout)
+ puts("ERROR:arasan_nand_read_buf timedout:Xfer CMPLT\n");
+
+ reg_val = readl(&info->reg->intsts_enr);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+ reg_val = readl(&info->reg->intsts_reg);
+ writel(reg_val | ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_reg);
+}
+
+static u8 arasan_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ u32 size;
+ u8 val;
+ struct nand_onfi_params *p;
+
+ if (buf_index == 0) {
+ p = &chip->onfi_params;
+ if (curr_cmd->cmd1 == NAND_CMD_READID)
+ size = 4;
+ else if (curr_cmd->cmd1 == NAND_CMD_PARAM)
+ size = sizeof(struct nand_onfi_params);
+ else if (curr_cmd->cmd1 == NAND_CMD_RNDOUT)
+ size = le16_to_cpu(p->ext_param_page_length) * 16;
+ else if (curr_cmd->cmd1 == NAND_CMD_GET_FEATURES)
+ size = 4;
+ else if (curr_cmd->cmd1 == NAND_CMD_STATUS)
+ return readb(&info->reg->flash_sts_reg);
+ else
+ size = 8;
+ chip->read_buf(mtd, &buf_data[0], size);
+ }
+
+ val = *(&buf_data[0] + buf_index);
+ buf_index++;
+
+ return val;
+}
+
+static void arasan_nand_cmd_function(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(chip);
+ struct nand_config *nand = &info->config;
+ u32 i, ret = 0;
+
+ curr_cmd = NULL;
+ writel(ARASAN_NAND_INT_STS_XFR_CMPLT_MASK,
+ &info->reg->intsts_enr);
+
+ if ((command == NAND_CMD_READOOB) &&
+ (mtd->writesize > 512)) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Get the command format */
+ for (i = 0; (arasan_nand_commands[i].cmd1 != NAND_CMD_NONE ||
+ arasan_nand_commands[i].cmd2 != NAND_CMD_NONE); i++) {
+ if (command == arasan_nand_commands[i].cmd1) {
+ curr_cmd = &arasan_nand_commands[i];
+ break;
+ }
+ }
+
+ if (curr_cmd == NULL) {
+ printf("Unsupported Command; 0x%x\n", command);
+ return;
+ }
+
+ if (curr_cmd->cmd1 == NAND_CMD_RESET)
+ ret = arasan_nand_reset(mtd, curr_cmd);
+
+ if ((curr_cmd->cmd1 == NAND_CMD_READID) ||
+ (curr_cmd->cmd1 == NAND_CMD_PARAM) ||
+ (curr_cmd->cmd1 == NAND_CMD_RNDOUT) ||
+ (curr_cmd->cmd1 == NAND_CMD_GET_FEATURES) ||
+ (curr_cmd->cmd1 == NAND_CMD_READ0))
+ ret = arasan_nand_send_rdcmd(curr_cmd, column, page_addr, mtd);
+
+ if ((curr_cmd->cmd1 == NAND_CMD_SET_FEATURES) ||
+ (curr_cmd->cmd1 == NAND_CMD_SEQIN)) {
+ nand->page = page_addr;
+ ret = arasan_nand_send_wrcmd(curr_cmd, column, page_addr, mtd);
+ }
+
+ if (curr_cmd->cmd1 == NAND_CMD_ERASE1)
+ ret = arasan_nand_erase(curr_cmd, column, page_addr, mtd);
+
+ if (curr_cmd->cmd1 == NAND_CMD_STATUS)
+ ret = arasan_nand_read_status(curr_cmd, column, page_addr, mtd);
+
+ if (ret != 0)
+ printf("ERROR:%s:command:0x%x\n", __func__, curr_cmd->cmd1);
+}
+
+static void arasan_check_ondie(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(nand_chip);
+ struct nand_config *nand = &info->config;
+ u8 maf_id, dev_id;
+ u8 get_feature[4];
+ u8 set_feature[4] = {ENABLE_ONDIE_ECC, 0x00, 0x00, 0x00};
+ u32 i;
+
+ nand_chip->select_chip(mtd, 0);
+
+ /* Send the command for reading device ID */
+ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0, -1);
+
+ /* Read manufacturer and device IDs */
+ maf_id = nand_chip->read_byte(mtd);
+ dev_id = nand_chip->read_byte(mtd);
+
+ if ((maf_id == NAND_MFR_MICRON) &&
+ ((dev_id == 0xf1) || (dev_id == 0xa1) || (dev_id == 0xb1) ||
+ (dev_id == 0xaa) || (dev_id == 0xba) || (dev_id == 0xda) ||
+ (dev_id == 0xca) || (dev_id == 0xac) || (dev_id == 0xbc) ||
+ (dev_id == 0xdc) || (dev_id == 0xcc) || (dev_id == 0xa3) ||
+ (dev_id == 0xb3) || (dev_id == 0xd3) || (dev_id == 0xc3))) {
+ nand_chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES,
+ ONDIE_ECC_FEATURE_ADDR, -1);
+
+ nand_chip->write_buf(mtd, &set_feature[0], 4);
+ nand_chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES,
+ ONDIE_ECC_FEATURE_ADDR, -1);
+
+ for (i = 0; i < 4; i++)
+ get_feature[i] = nand_chip->read_byte(mtd);
+
+ if (get_feature[0] & ENABLE_ONDIE_ECC) {
+ nand->on_die_ecc_enabled = true;
+ printf("On-DIE ECC Enabled\n");
+ } else {
+ printf("%s: Unable to enable OnDie ECC\n", __func__);
+ }
+
+ /* Use the BBT pattern descriptors */
+ nand_chip->bbt_td = &bbt_main_descr;
+ nand_chip->bbt_md = &bbt_mirror_descr;
+ }
+}
+
+static int arasan_nand_ecc_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *info = nand_get_controller_data(nand_chip);
+ int found = -1;
+ u32 regval, eccpos_start, i, eccaddr;
+
+ for (i = 0; i < ARRAY_SIZE(ecc_matrix); i++) {
+ if ((ecc_matrix[i].pagesize == mtd->writesize) &&
+ (ecc_matrix[i].ecc_codeword_size >=
+ nand_chip->ecc_step_ds)) {
+ if (ecc_matrix[i].eccbits >=
+ nand_chip->ecc_strength_ds) {
+ found = i;
+ break;
+ }
+ found = i;
+ }
+ }
+
+ if (found < 0)
+ return 1;
+
+ eccaddr = mtd->writesize + mtd->oobsize -
+ ecc_matrix[found].eccsize;
+
+ regval = eccaddr |
+ (ecc_matrix[found].eccsize << ARASAN_NAND_ECC_SIZE_SHIFT) |
+ (ecc_matrix[found].bch << ARASAN_NAND_ECC_BCH_SHIFT);
+ writel(regval, &info->reg->ecc_reg);
+
+ if (ecc_matrix[found].bch) {
+ regval = readl(&info->reg->memadr_reg2);
+ regval &= ~ARASAN_NAND_MEM_ADDR2_BCH_MASK;
+ regval |= (ecc_matrix[found].bchval <<
+ ARASAN_NAND_MEM_ADDR2_BCH_SHIFT);
+ writel(regval, &info->reg->memadr_reg2);
+ }
+
+ nand_oob.eccbytes = ecc_matrix[found].eccsize;
+ eccpos_start = mtd->oobsize - nand_oob.eccbytes;
+
+ for (i = 0; i < nand_oob.eccbytes; i++)
+ nand_oob.eccpos[i] = eccpos_start + i;
+
+ nand_oob.oobfree[0].offset = 2;
+ nand_oob.oobfree[0].length = eccpos_start - 2;
+
+ nand_chip->ecc.size = ecc_matrix[found].ecc_codeword_size;
+ nand_chip->ecc.strength = ecc_matrix[found].eccbits;
+ nand_chip->ecc.bytes = ecc_matrix[found].eccsize;
+ nand_chip->ecc.layout = &nand_oob;
+
+ return 0;
+}
+
+static int arasan_probe(struct udevice *dev)
+{
+ struct arasan_nand_info *arasan = dev_get_priv(dev);
+ struct nand_chip *nand_chip = &arasan->nand_chip;
+ struct nand_drv *info = &arasan->nand_ctrl;
+ struct nand_config *nand = &info->config;
+ struct mtd_info *mtd;
+ int err = -1;
+
+ info->reg = (struct nand_regs *)dev_read_addr(dev);
+ mtd = nand_to_mtd(nand_chip);
+ nand_set_controller_data(nand_chip, &arasan->nand_ctrl);
+
+#ifdef CONFIG_SYS_NAND_NO_SUBPAGE_WRITE
+ nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+#endif
+
+ /* Set the driver entry points for MTD */
+ nand_chip->cmdfunc = arasan_nand_cmd_function;
+ nand_chip->select_chip = arasan_nand_select_chip;
+ nand_chip->read_byte = arasan_nand_read_byte;
+
+ /* Buffer read/write routines */
+ nand_chip->read_buf = arasan_nand_read_buf;
+ nand_chip->write_buf = arasan_nand_write_buf;
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ writel(0x0, &info->reg->cmd_reg);
+ writel(0x0, &info->reg->pgm_reg);
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, CONFIG_SYS_NAND_MAX_CHIPS, NULL)) {
+ printf("%s: nand_scan_ident failed\n", __func__);
+ goto fail;
+ }
+
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.hwctl = NULL;
+ nand_chip->ecc.read_page = arasan_nand_read_page_hwecc;
+ nand_chip->ecc.write_page = arasan_nand_write_page_hwecc;
+ nand_chip->ecc.read_oob = arasan_nand_read_oob;
+ nand_chip->ecc.write_oob = arasan_nand_write_oob;
+
+ arasan_check_ondie(mtd);
+
+ /*
+ * If on die supported, then give priority to on-die ecc and use
+ * it instead of controller ecc.
+ */
+ if (nand->on_die_ecc_enabled) {
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.bytes = 0;
+ nand_chip->ecc.layout = &ondie_nand_oob_64;
+ } else {
+ if (arasan_nand_ecc_init(mtd)) {
+ printf("%s: nand_ecc_init failed\n", __func__);
+ goto fail;
+ }
+ }
+
+ if (nand_scan_tail(mtd)) {
+ printf("%s: nand_scan_tail failed\n", __func__);
+ goto fail;
+ }
+
+ if (nand_register(0, mtd)) {
+ printf("Nand Register Fail\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ free(nand);
+ return err;
+}
+
+static const struct udevice_id arasan_nand_dt_ids[] = {
+ {.compatible = "arasan,nfc-v3p10",},
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(arasan_nand) = {
+ .name = "arasan_nand",
+ .id = UCLASS_MTD,
+ .of_match = arasan_nand_dt_ids,
+ .probe = arasan_probe,
+ .priv_auto = sizeof(struct arasan_nand_info),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(arasan_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name, ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/atmel_nand.c b/roms/u-boot/drivers/mtd/nand/raw/atmel_nand.c
new file mode 100644
index 000000000..abc432c86
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/atmel_nand.c
@@ -0,0 +1,1526 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2007-2008
+ * Stelian Pop <stelian@popies.net>
+ * Lead Tech Design <www.leadtechdesign.com>
+ *
+ * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ * Add Programmable Multibit ECC support for various AT91 SoC
+ * (C) Copyright 2012 ATMEL, Hong Xu
+ */
+
+#include <common.h>
+#include <log.h>
+#include <asm/gpio.h>
+#include <asm/arch/gpio.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+
+#include <malloc.h>
+#include <nand.h>
+#include <watchdog.h>
+#include <linux/mtd/nand_ecc.h>
+
+#ifdef CONFIG_ATMEL_NAND_HWECC
+
+/* Register access macros */
+#define ecc_readl(add, reg) \
+ readl(add + ATMEL_ECC_##reg)
+#define ecc_writel(add, reg, value) \
+ writel((value), add + ATMEL_ECC_##reg)
+
+#include "atmel_nand_ecc.h" /* Hardware ECC registers */
+
+#ifdef CONFIG_ATMEL_NAND_HW_PMECC
+
+#ifdef CONFIG_SPL_BUILD
+#undef CONFIG_SYS_NAND_ONFI_DETECTION
+#endif
+
+struct atmel_nand_host {
+ struct pmecc_regs __iomem *pmecc;
+ struct pmecc_errloc_regs __iomem *pmerrloc;
+ void __iomem *pmecc_rom_base;
+
+ u8 pmecc_corr_cap;
+ u16 pmecc_sector_size;
+ u32 pmecc_index_table_offset;
+ u32 pmecc_version;
+
+ int pmecc_bytes_per_sector;
+ int pmecc_sector_number;
+ int pmecc_degree; /* Degree of remainders */
+ int pmecc_cw_len; /* Length of codeword */
+
+ /* lookup table for alpha_to and index_of */
+ void __iomem *pmecc_alpha_to;
+ void __iomem *pmecc_index_of;
+
+ /* data for pmecc computation */
+ int16_t *pmecc_smu;
+ int16_t *pmecc_partial_syn;
+ int16_t *pmecc_si;
+ int16_t *pmecc_lmu; /* polynomal order */
+ int *pmecc_mu;
+ int *pmecc_dmu;
+ int *pmecc_delta;
+};
+
+static struct atmel_nand_host pmecc_host;
+static struct nand_ecclayout atmel_pmecc_oobinfo;
+
+/*
+ * Return number of ecc bytes per sector according to sector size and
+ * correction capability
+ *
+ * Following table shows what at91 PMECC supported:
+ * Correction Capability Sector_512_bytes Sector_1024_bytes
+ * ===================== ================ =================
+ * 2-bits 4-bytes 4-bytes
+ * 4-bits 7-bytes 7-bytes
+ * 8-bits 13-bytes 14-bytes
+ * 12-bits 20-bytes 21-bytes
+ * 24-bits 39-bytes 42-bytes
+ * 32-bits 52-bytes 56-bytes
+ */
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+ int m = 12 + sector_size / 512;
+ return (m * cap + 7) / 8;
+}
+
+static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
+ int oobsize, int ecc_len)
+{
+ int i;
+
+ layout->eccbytes = ecc_len;
+
+ /* ECC will occupy the last ecc_len bytes continuously */
+ for (i = 0; i < ecc_len; i++)
+ layout->eccpos[i] = oobsize - ecc_len + i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length =
+ oobsize - ecc_len - layout->oobfree[0].offset;
+}
+
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+{
+ int table_size;
+
+ table_size = host->pmecc_sector_size == 512 ?
+ PMECC_INDEX_TABLE_SIZE_512 : PMECC_INDEX_TABLE_SIZE_1024;
+
+ /* the ALPHA lookup table is right behind the INDEX lookup table. */
+ return host->pmecc_rom_base + host->pmecc_index_table_offset +
+ table_size * sizeof(int16_t);
+}
+
+static void pmecc_data_free(struct atmel_nand_host *host)
+{
+ free(host->pmecc_partial_syn);
+ free(host->pmecc_si);
+ free(host->pmecc_lmu);
+ free(host->pmecc_smu);
+ free(host->pmecc_mu);
+ free(host->pmecc_dmu);
+ free(host->pmecc_delta);
+}
+
+static int pmecc_data_alloc(struct atmel_nand_host *host)
+{
+ const int cap = host->pmecc_corr_cap;
+ int size;
+
+ size = (2 * cap + 1) * sizeof(int16_t);
+ host->pmecc_partial_syn = malloc(size);
+ host->pmecc_si = malloc(size);
+ host->pmecc_lmu = malloc((cap + 1) * sizeof(int16_t));
+ host->pmecc_smu = malloc((cap + 2) * size);
+
+ size = (cap + 1) * sizeof(int);
+ host->pmecc_mu = malloc(size);
+ host->pmecc_dmu = malloc(size);
+ host->pmecc_delta = malloc(size);
+
+ if (host->pmecc_partial_syn &&
+ host->pmecc_si &&
+ host->pmecc_lmu &&
+ host->pmecc_smu &&
+ host->pmecc_mu &&
+ host->pmecc_dmu &&
+ host->pmecc_delta)
+ return 0;
+
+ /* error happened */
+ pmecc_data_free(host);
+ return -ENOMEM;
+
+}
+
+static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ int i;
+ uint32_t value;
+
+ /* Fill odd syndromes */
+ for (i = 0; i < host->pmecc_corr_cap; i++) {
+ value = pmecc_readl(host->pmecc, rem_port[sector].rem[i / 2]);
+ if (i & 1)
+ value >>= 16;
+ value &= 0xffff;
+ host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
+ }
+}
+
+static void pmecc_substitute(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t *partial_syn = host->pmecc_partial_syn;
+ const int cap = host->pmecc_corr_cap;
+ int16_t *si;
+ int i, j;
+
+ /* si[] is a table that holds the current syndrome value,
+ * an element of that table belongs to the field
+ */
+ si = host->pmecc_si;
+
+ memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
+
+ /* Computation 2t syndromes based on S(x) */
+ /* Odd syndromes */
+ for (i = 1; i < 2 * cap; i += 2) {
+ for (j = 0; j < host->pmecc_degree; j++) {
+ if (partial_syn[i] & (0x1 << j))
+ si[i] = readw(alpha_to + i * j) ^ si[i];
+ }
+ }
+ /* Even syndrome = (Odd syndrome) ** 2 */
+ for (i = 2, j = 1; j <= cap; i = ++j << 1) {
+ if (si[j] == 0) {
+ si[i] = 0;
+ } else {
+ int16_t tmp;
+
+ tmp = readw(index_of + si[j]);
+ tmp = (tmp * 2) % host->pmecc_cw_len;
+ si[i] = readw(alpha_to + tmp);
+ }
+ }
+}
+
+/*
+ * This function defines a Berlekamp iterative procedure for
+ * finding the value of the error location polynomial.
+ * The input is si[], initialize by pmecc_substitute().
+ * The output is smu[][].
+ *
+ * This function is written according to chip datasheet Chapter:
+ * Find the Error Location Polynomial Sigma(x) of Section:
+ * Programmable Multibit ECC Control (PMECC).
+ */
+static void pmecc_get_sigma(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+ int16_t *lmu = host->pmecc_lmu;
+ int16_t *si = host->pmecc_si;
+ int *mu = host->pmecc_mu;
+ int *dmu = host->pmecc_dmu; /* Discrepancy */
+ int *delta = host->pmecc_delta; /* Delta order */
+ int cw_len = host->pmecc_cw_len;
+ const int16_t cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int16_t __iomem *index_of = host->pmecc_index_of;
+ int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+ int i, j, k;
+ uint32_t dmu_0_count, tmp;
+ int16_t *smu = host->pmecc_smu;
+
+ /* index of largest delta */
+ int ro;
+ int largest;
+ int diff;
+
+ /* Init the Sigma(x) */
+ memset(smu, 0, sizeof(int16_t) * num * (cap + 2));
+
+ dmu_0_count = 0;
+
+ /* First Row */
+
+ /* Mu */
+ mu[0] = -1;
+
+ smu[0] = 1;
+
+ /* discrepancy set to 1 */
+ dmu[0] = 1;
+ /* polynom order set to 0 */
+ lmu[0] = 0;
+ /* delta[0] = (mu[0] * 2 - lmu[0]) >> 1; */
+ delta[0] = -1;
+
+ /* Second Row */
+
+ /* Mu */
+ mu[1] = 0;
+ /* Sigma(x) set to 1 */
+ smu[num] = 1;
+
+ /* discrepancy set to S1 */
+ dmu[1] = si[1];
+
+ /* polynom order set to 0 */
+ lmu[1] = 0;
+
+ /* delta[1] = (mu[1] * 2 - lmu[1]) >> 1; */
+ delta[1] = 0;
+
+ for (i = 1; i <= cap; i++) {
+ mu[i + 1] = i << 1;
+ /* Begin Computing Sigma (Mu+1) and L(mu) */
+ /* check if discrepancy is set to 0 */
+ if (dmu[i] == 0) {
+ dmu_0_count++;
+
+ tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
+ if ((cap - (lmu[i] >> 1) - 1) & 0x1)
+ tmp += 2;
+ else
+ tmp += 1;
+
+ if (dmu_0_count == tmp) {
+ for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+ smu[(cap + 1) * num + j] =
+ smu[i * num + j];
+
+ lmu[cap + 1] = lmu[i];
+ return;
+ }
+
+ /* copy polynom */
+ for (j = 0; j <= lmu[i] >> 1; j++)
+ smu[(i + 1) * num + j] = smu[i * num + j];
+
+ /* copy previous polynom order to the next */
+ lmu[i + 1] = lmu[i];
+ } else {
+ ro = 0;
+ largest = -1;
+ /* find largest delta with dmu != 0 */
+ for (j = 0; j < i; j++) {
+ if ((dmu[j]) && (delta[j] > largest)) {
+ largest = delta[j];
+ ro = j;
+ }
+ }
+
+ /* compute difference */
+ diff = (mu[i] - mu[ro]);
+
+ /* Compute degree of the new smu polynomial */
+ if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+ lmu[i + 1] = lmu[i];
+ else
+ lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+ /* Init smu[i+1] with 0 */
+ for (k = 0; k < num; k++)
+ smu[(i + 1) * num + k] = 0;
+
+ /* Compute smu[i+1] */
+ for (k = 0; k <= lmu[ro] >> 1; k++) {
+ int16_t a, b, c;
+
+ if (!(smu[ro * num + k] && dmu[i]))
+ continue;
+ a = readw(index_of + dmu[i]);
+ b = readw(index_of + dmu[ro]);
+ c = readw(index_of + smu[ro * num + k]);
+ tmp = a + (cw_len - b) + c;
+ a = readw(alpha_to + tmp % cw_len);
+ smu[(i + 1) * num + (k + diff)] = a;
+ }
+
+ for (k = 0; k <= lmu[i] >> 1; k++)
+ smu[(i + 1) * num + k] ^= smu[i * num + k];
+ }
+
+ /* End Computing Sigma (Mu+1) and L(mu) */
+ /* In either case compute delta */
+ delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+ /* Do not compute discrepancy for the last iteration */
+ if (i >= cap)
+ continue;
+
+ for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+ tmp = 2 * (i - 1);
+ if (k == 0) {
+ dmu[i + 1] = si[tmp + 3];
+ } else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+ int16_t a, b, c;
+ a = readw(index_of +
+ smu[(i + 1) * num + k]);
+ b = si[2 * (i - 1) + 3 - k];
+ c = readw(index_of + b);
+ tmp = a + c;
+ tmp %= cw_len;
+ dmu[i + 1] = readw(alpha_to + tmp) ^
+ dmu[i + 1];
+ }
+ }
+ }
+}
+
+static int pmecc_err_location(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ const int cap = host->pmecc_corr_cap;
+ const int num = 2 * cap + 1;
+ int sector_size = host->pmecc_sector_size;
+ int err_nbr = 0; /* number of error */
+ int roots_nbr; /* number of roots */
+ int i;
+ uint32_t val;
+ int16_t *smu = host->pmecc_smu;
+ int timeout = PMECC_MAX_TIMEOUT_US;
+
+ pmecc_writel(host->pmerrloc, eldis, PMERRLOC_DISABLE);
+
+ for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
+ pmecc_writel(host->pmerrloc, sigma[i],
+ smu[(cap + 1) * num + i]);
+ err_nbr++;
+ }
+
+ val = PMERRLOC_ELCFG_NUM_ERRORS(err_nbr - 1);
+ if (sector_size == 1024)
+ val |= PMERRLOC_ELCFG_SECTOR_1024;
+
+ pmecc_writel(host->pmerrloc, elcfg, val);
+ pmecc_writel(host->pmerrloc, elen,
+ sector_size * 8 + host->pmecc_degree * cap);
+
+ while (--timeout) {
+ if (pmecc_readl(host->pmerrloc, elisr) & PMERRLOC_CALC_DONE)
+ break;
+ WATCHDOG_RESET();
+ udelay(1);
+ }
+
+ if (!timeout) {
+ dev_err(mtd->dev,
+ "Timeout to calculate PMECC error location\n");
+ return -1;
+ }
+
+ roots_nbr = (pmecc_readl(host->pmerrloc, elisr) & PMERRLOC_ERR_NUM_MASK)
+ >> 8;
+ /* Number of roots == degree of smu hence <= cap */
+ if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
+ return err_nbr - 1;
+
+ /* Number of roots does not match the degree of smu
+ * unable to correct error */
+ return -1;
+}
+
+static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
+ int sector_num, int extra_bytes, int err_nbr)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ int i = 0;
+ int byte_pos, bit_pos, sector_size, pos;
+ uint32_t tmp;
+ uint8_t err_byte;
+
+ sector_size = host->pmecc_sector_size;
+
+ while (err_nbr) {
+ tmp = pmecc_readl(host->pmerrloc, el[i]) - 1;
+ byte_pos = tmp / 8;
+ bit_pos = tmp % 8;
+
+ if (byte_pos >= (sector_size + extra_bytes))
+ BUG(); /* should never happen */
+
+ if (byte_pos < sector_size) {
+ err_byte = *(buf + byte_pos);
+ *(buf + byte_pos) ^= (1 << bit_pos);
+
+ pos = sector_num * host->pmecc_sector_size + byte_pos;
+ dev_dbg(mtd->dev,
+ "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, *(buf + byte_pos));
+ } else {
+ /* Bit flip in OOB area */
+ tmp = sector_num * host->pmecc_bytes_per_sector
+ + (byte_pos - sector_size);
+ err_byte = ecc[tmp];
+ ecc[tmp] ^= (1 << bit_pos);
+
+ pos = tmp + nand_chip->ecc.layout->eccpos[0];
+ dev_dbg(mtd->dev,
+ "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+ pos, bit_pos, err_byte, ecc[tmp]);
+ }
+
+ i++;
+ err_nbr--;
+ }
+
+ return;
+}
+
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
+ u8 *ecc)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ int i, err_nbr, eccbytes;
+ uint8_t *buf_pos;
+
+ /* SAMA5D4 PMECC IP can correct errors for all 0xff page */
+ if (host->pmecc_version >= PMECC_VERSION_SAMA5D4)
+ goto normal_check;
+
+ eccbytes = nand_chip->ecc.bytes;
+ for (i = 0; i < eccbytes; i++)
+ if (ecc[i] != 0xff)
+ goto normal_check;
+ /* Erased page, return OK */
+ return 0;
+
+normal_check:
+ for (i = 0; i < host->pmecc_sector_number; i++) {
+ err_nbr = 0;
+ if (pmecc_stat & 0x1) {
+ buf_pos = buf + i * host->pmecc_sector_size;
+
+ pmecc_gen_syndrome(mtd, i);
+ pmecc_substitute(mtd);
+ pmecc_get_sigma(mtd);
+
+ err_nbr = pmecc_err_location(mtd);
+ if (err_nbr == -1) {
+ dev_err(mtd->dev, "PMECC: Too many errors\n");
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ } else {
+ pmecc_correct_data(mtd, buf_pos, ecc, i,
+ host->pmecc_bytes_per_sector, err_nbr);
+ mtd->ecc_stats.corrected += err_nbr;
+ }
+ }
+ pmecc_stat >>= 1;
+ }
+
+ return 0;
+}
+
+static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ struct atmel_nand_host *host = nand_get_controller_data(chip);
+ int eccsize = chip->ecc.size;
+ uint8_t *oob = chip->oob_poi;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t stat;
+ int timeout = PMECC_MAX_TIMEOUT_US;
+
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_RST);
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DISABLE);
+ pmecc_writel(host->pmecc, cfg, ((pmecc_readl(host->pmecc, cfg))
+ & ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DATA);
+
+ chip->read_buf(mtd, buf, eccsize);
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ while (--timeout) {
+ if (!(pmecc_readl(host->pmecc, sr) & PMECC_SR_BUSY))
+ break;
+ WATCHDOG_RESET();
+ udelay(1);
+ }
+
+ if (!timeout) {
+ dev_err(mtd->dev, "Timeout to read PMECC page\n");
+ return -1;
+ }
+
+ stat = pmecc_readl(host->pmecc, isr);
+ if (stat != 0)
+ if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct atmel_nand_host *host = nand_get_controller_data(chip);
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int i, j;
+ int timeout = PMECC_MAX_TIMEOUT_US;
+
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_RST);
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DISABLE);
+
+ pmecc_writel(host->pmecc, cfg, (pmecc_readl(host->pmecc, cfg) |
+ PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE);
+
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_ENABLE);
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DATA);
+
+ chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+
+ while (--timeout) {
+ if (!(pmecc_readl(host->pmecc, sr) & PMECC_SR_BUSY))
+ break;
+ WATCHDOG_RESET();
+ udelay(1);
+ }
+
+ if (!timeout) {
+ dev_err(mtd->dev,
+ "Timeout to read PMECC status, fail to write PMECC in oob\n");
+ goto out;
+ }
+
+ for (i = 0; i < host->pmecc_sector_number; i++) {
+ for (j = 0; j < host->pmecc_bytes_per_sector; j++) {
+ int pos;
+
+ pos = i * host->pmecc_bytes_per_sector + j;
+ chip->oob_poi[eccpos[pos]] =
+ pmecc_readb(host->pmecc, ecc_port[i].ecc[j]);
+ }
+ }
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+out:
+ return 0;
+}
+
+static void atmel_pmecc_core_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ uint32_t val = 0;
+ struct nand_ecclayout *ecc_layout;
+
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_RST);
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DISABLE);
+
+ switch (host->pmecc_corr_cap) {
+ case 2:
+ val = PMECC_CFG_BCH_ERR2;
+ break;
+ case 4:
+ val = PMECC_CFG_BCH_ERR4;
+ break;
+ case 8:
+ val = PMECC_CFG_BCH_ERR8;
+ break;
+ case 12:
+ val = PMECC_CFG_BCH_ERR12;
+ break;
+ case 24:
+ val = PMECC_CFG_BCH_ERR24;
+ break;
+ case 32:
+ val = PMECC_CFG_BCH_ERR32;
+ break;
+ }
+
+ if (host->pmecc_sector_size == 512)
+ val |= PMECC_CFG_SECTOR512;
+ else if (host->pmecc_sector_size == 1024)
+ val |= PMECC_CFG_SECTOR1024;
+
+ switch (host->pmecc_sector_number) {
+ case 1:
+ val |= PMECC_CFG_PAGE_1SECTOR;
+ break;
+ case 2:
+ val |= PMECC_CFG_PAGE_2SECTORS;
+ break;
+ case 4:
+ val |= PMECC_CFG_PAGE_4SECTORS;
+ break;
+ case 8:
+ val |= PMECC_CFG_PAGE_8SECTORS;
+ break;
+ }
+
+ val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
+ | PMECC_CFG_AUTO_DISABLE);
+ pmecc_writel(host->pmecc, cfg, val);
+
+ ecc_layout = nand_chip->ecc.layout;
+ pmecc_writel(host->pmecc, sarea, mtd->oobsize - 1);
+ pmecc_writel(host->pmecc, saddr, ecc_layout->eccpos[0]);
+ pmecc_writel(host->pmecc, eaddr,
+ ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+ /* See datasheet about PMECC Clock Control Register */
+ pmecc_writel(host->pmecc, clk, PMECC_CLK_133MHZ);
+ pmecc_writel(host->pmecc, idr, 0xff);
+ pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_ENABLE);
+}
+
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+/*
+ * pmecc_choose_ecc - Get ecc requirement from ONFI parameters. If
+ * pmecc_corr_cap or pmecc_sector_size is 0, then set it as
+ * ONFI ECC parameters.
+ * @host: point to an atmel_nand_host structure.
+ * if host->pmecc_corr_cap is 0 then set it as the ONFI ecc_bits.
+ * if host->pmecc_sector_size is 0 then set it as the ONFI sector_size.
+ * @chip: point to an nand_chip structure.
+ * @cap: store the ONFI ECC correct bits capbility
+ * @sector_size: in how many bytes that ONFI require to correct @ecc_bits
+ *
+ * Return 0 if success. otherwise return the error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+ struct nand_chip *chip,
+ int *cap, int *sector_size)
+{
+ /* Get ECC requirement from ONFI parameters */
+ *cap = *sector_size = 0;
+ if (chip->onfi_version) {
+ *cap = chip->ecc_strength_ds;
+ *sector_size = chip->ecc_step_ds;
+ pr_debug("ONFI params, minimum required ECC: %d bits in %d bytes\n",
+ *cap, *sector_size);
+ }
+
+ if (*cap == 0 && *sector_size == 0) {
+ /* Non-ONFI compliant */
+ dev_info(chip->mtd.dev,
+ "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes\n");
+ *cap = 2;
+ *sector_size = 512;
+ }
+
+ /* If head file doesn't specify then use the one in ONFI parameters */
+ if (host->pmecc_corr_cap == 0) {
+ /* use the most fitable ecc bits (the near bigger one ) */
+ if (*cap <= 2)
+ host->pmecc_corr_cap = 2;
+ else if (*cap <= 4)
+ host->pmecc_corr_cap = 4;
+ else if (*cap <= 8)
+ host->pmecc_corr_cap = 8;
+ else if (*cap <= 12)
+ host->pmecc_corr_cap = 12;
+ else if (*cap <= 24)
+ host->pmecc_corr_cap = 24;
+ else
+#ifdef CONFIG_SAMA5D2
+ host->pmecc_corr_cap = 32;
+#else
+ host->pmecc_corr_cap = 24;
+#endif
+ }
+ if (host->pmecc_sector_size == 0) {
+ /* use the most fitable sector size (the near smaller one ) */
+ if (*sector_size >= 1024)
+ host->pmecc_sector_size = 1024;
+ else if (*sector_size >= 512)
+ host->pmecc_sector_size = 512;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+#endif
+
+#if defined(NO_GALOIS_TABLE_IN_ROM)
+static uint16_t *pmecc_galois_table;
+static inline int deg(unsigned int poly)
+{
+ /* polynomial degree is the most-significant bit index */
+ return fls(poly) - 1;
+}
+
+static int build_gf_tables(int mm, unsigned int poly,
+ int16_t *index_of, int16_t *alpha_to)
+{
+ unsigned int i, x = 1;
+ const unsigned int k = 1 << deg(poly);
+ unsigned int nn = (1 << mm) - 1;
+
+ /* primitive polynomial must be of degree m */
+ if (k != (1u << mm))
+ return -EINVAL;
+
+ for (i = 0; i < nn; i++) {
+ alpha_to[i] = x;
+ index_of[x] = i;
+ if (i && (x == 1))
+ /* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+ return -EINVAL;
+ x <<= 1;
+ if (x & k)
+ x ^= poly;
+ }
+
+ alpha_to[nn] = 1;
+ index_of[0] = 0;
+
+ return 0;
+}
+
+static uint16_t *create_lookup_table(int sector_size)
+{
+ int degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 :
+ PMECC_GF_DIMENSION_14;
+ unsigned int poly = (sector_size == 512) ?
+ PMECC_GF_13_PRIMITIVE_POLY :
+ PMECC_GF_14_PRIMITIVE_POLY;
+ int table_size = (sector_size == 512) ?
+ PMECC_INDEX_TABLE_SIZE_512 :
+ PMECC_INDEX_TABLE_SIZE_1024;
+
+ int16_t *addr = kzalloc(2 * table_size * sizeof(uint16_t), GFP_KERNEL);
+ if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
+ return NULL;
+
+ return (uint16_t *)addr;
+}
+#endif
+
+static int atmel_pmecc_nand_init_params(struct nand_chip *nand,
+ struct mtd_info *mtd)
+{
+ struct atmel_nand_host *host;
+ int cap, sector_size;
+
+ host = &pmecc_host;
+ nand_set_controller_data(nand, host);
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.calculate = NULL;
+ nand->ecc.correct = NULL;
+ nand->ecc.hwctl = NULL;
+
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+ host->pmecc_corr_cap = host->pmecc_sector_size = 0;
+
+#ifdef CONFIG_PMECC_CAP
+ host->pmecc_corr_cap = CONFIG_PMECC_CAP;
+#endif
+#ifdef CONFIG_PMECC_SECTOR_SIZE
+ host->pmecc_sector_size = CONFIG_PMECC_SECTOR_SIZE;
+#endif
+ /* Get ECC requirement of ONFI parameters. And if CONFIG_PMECC_CAP or
+ * CONFIG_PMECC_SECTOR_SIZE not defined, then use ecc_bits, sector_size
+ * from ONFI.
+ */
+ if (pmecc_choose_ecc(host, nand, &cap, &sector_size)) {
+ dev_err(mtd->dev,
+ "Required ECC %d bits in %d bytes not supported!\n",
+ cap, sector_size);
+ return -EINVAL;
+ }
+
+ if (cap > host->pmecc_corr_cap)
+ dev_info(mtd->dev,
+ "WARNING: Using different ecc correct bits(%d bit) from Nand ONFI ECC reqirement (%d bit).\n",
+ host->pmecc_corr_cap, cap);
+ if (sector_size < host->pmecc_sector_size)
+ dev_info(mtd->dev,
+ "WARNING: Using different ecc correct sector size (%d bytes) from Nand ONFI ECC reqirement (%d bytes).\n",
+ host->pmecc_sector_size, sector_size);
+#else /* CONFIG_SYS_NAND_ONFI_DETECTION */
+ host->pmecc_corr_cap = CONFIG_PMECC_CAP;
+ host->pmecc_sector_size = CONFIG_PMECC_SECTOR_SIZE;
+#endif
+
+ cap = host->pmecc_corr_cap;
+ sector_size = host->pmecc_sector_size;
+
+ /* TODO: need check whether cap & sector_size is validate */
+#if defined(NO_GALOIS_TABLE_IN_ROM)
+ /*
+ * As pmecc_rom_base is the begin of the gallois field table, So the
+ * index offset just set as 0.
+ */
+ host->pmecc_index_table_offset = 0;
+#else
+ if (host->pmecc_sector_size == 512)
+ host->pmecc_index_table_offset = ATMEL_PMECC_INDEX_OFFSET_512;
+ else
+ host->pmecc_index_table_offset = ATMEL_PMECC_INDEX_OFFSET_1024;
+#endif
+
+ pr_debug("Initialize PMECC params, cap: %d, sector: %d\n",
+ cap, sector_size);
+
+ host->pmecc = (struct pmecc_regs __iomem *) ATMEL_BASE_PMECC;
+ host->pmerrloc = (struct pmecc_errloc_regs __iomem *)
+ ATMEL_BASE_PMERRLOC;
+#if defined(NO_GALOIS_TABLE_IN_ROM)
+ pmecc_galois_table = create_lookup_table(host->pmecc_sector_size);
+ if (!pmecc_galois_table) {
+ dev_err(mtd->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ host->pmecc_rom_base = (void __iomem *)pmecc_galois_table;
+#else
+ host->pmecc_rom_base = (void __iomem *) ATMEL_BASE_ROM;
+#endif
+
+ /* ECC is calculated for the whole page (1 step) */
+ nand->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 2048:
+ case 4096:
+ case 8192:
+ host->pmecc_degree = (sector_size == 512) ?
+ PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
+ host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
+ host->pmecc_sector_number = mtd->writesize / sector_size;
+ host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes(
+ cap, sector_size);
+ host->pmecc_alpha_to = pmecc_get_alpha_to(host);
+ host->pmecc_index_of = host->pmecc_rom_base +
+ host->pmecc_index_table_offset;
+
+ nand->ecc.steps = 1;
+ nand->ecc.bytes = host->pmecc_bytes_per_sector *
+ host->pmecc_sector_number;
+
+ if (nand->ecc.bytes > MTD_MAX_ECCPOS_ENTRIES_LARGE) {
+ dev_err(mtd->dev,
+ "too large eccpos entries. max support ecc.bytes is %d\n",
+ MTD_MAX_ECCPOS_ENTRIES_LARGE);
+ return -EINVAL;
+ }
+
+ if (nand->ecc.bytes > mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
+ dev_err(mtd->dev, "No room for ECC bytes\n");
+ return -EINVAL;
+ }
+ pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
+ mtd->oobsize,
+ nand->ecc.bytes);
+ nand->ecc.layout = &atmel_pmecc_oobinfo;
+ break;
+ case 512:
+ case 1024:
+ /* TODO */
+ dev_err(mtd->dev,
+ "Unsupported page size for PMECC, use Software ECC\n");
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->ecc.read_page = NULL;
+ nand->ecc.postpad = 0;
+ nand->ecc.prepad = 0;
+ nand->ecc.bytes = 0;
+ return 0;
+ }
+
+ /* Allocate data for PMECC computation */
+ if (pmecc_data_alloc(host)) {
+ dev_err(mtd->dev,
+ "Cannot allocate memory for PMECC computation!\n");
+ return -ENOMEM;
+ }
+
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+ nand->ecc.read_page = atmel_nand_pmecc_read_page;
+ nand->ecc.write_page = atmel_nand_pmecc_write_page;
+ nand->ecc.strength = cap;
+
+ /* Check the PMECC ip version */
+ host->pmecc_version = pmecc_readl(host->pmerrloc, version);
+ dev_dbg(mtd->dev, "PMECC IP version is: %x\n", host->pmecc_version);
+
+ atmel_pmecc_core_init(mtd);
+
+ return 0;
+}
+
+#else
+
+/* oob layout for large page size
+ * bad block info is on bytes 0 and 1
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_large = {
+ .eccbytes = 4,
+ .eccpos = {60, 61, 62, 63},
+ .oobfree = {
+ {2, 58}
+ },
+};
+
+/* oob layout for small page size
+ * bad block info is on bytes 4 and 5
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static struct nand_ecclayout atmel_oobinfo_small = {
+ .eccbytes = 4,
+ .eccpos = {0, 1, 2, 3},
+ .oobfree = {
+ {6, 10}
+ },
+};
+
+/*
+ * Calculate HW ECC
+ *
+ * function called after a write
+ *
+ * mtd: MTD block structure
+ * dat: raw data (unused)
+ * ecc_code: buffer for ECC
+ */
+static int atmel_nand_calculate(struct mtd_info *mtd,
+ const u_char *dat, unsigned char *ecc_code)
+{
+ unsigned int ecc_value;
+
+ /* get the first 2 ECC bytes */
+ ecc_value = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR);
+
+ ecc_code[0] = ecc_value & 0xFF;
+ ecc_code[1] = (ecc_value >> 8) & 0xFF;
+
+ /* get the last 2 ECC bytes */
+ ecc_value = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, NPR) & ATMEL_ECC_NPARITY;
+
+ ecc_code[2] = ecc_value & 0xFF;
+ ecc_code[3] = (ecc_value >> 8) & 0xFF;
+
+ return 0;
+}
+
+/*
+ * HW ECC read page function
+ *
+ * mtd: mtd info structure
+ * chip: nand chip info structure
+ * buf: buffer to store read data
+ * oob_required: caller expects OOB data read to chip->oob_poi
+ */
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ uint8_t *ecc_pos;
+ int stat;
+
+ /* read the page */
+ chip->read_buf(mtd, p, eccsize);
+
+ /* move to ECC position if needed */
+ if (eccpos[0] != 0) {
+ /* This only works on large pages
+ * because the ECC controller waits for
+ * NAND_CMD_RNDOUTSTART after the
+ * NAND_CMD_RNDOUT.
+ * anyway, for small pages, the eccpos[0] == 0
+ */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + eccpos[0], -1);
+ }
+
+ /* the ECC controller needs to read the ECC just after the data */
+ ecc_pos = oob + eccpos[0];
+ chip->read_buf(mtd, ecc_pos, eccbytes);
+
+ /* check if there's an error */
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+
+ /* get back to oob start (end of page) */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+
+ /* read the oob */
+ chip->read_buf(mtd, oob, mtd->oobsize);
+
+ return 0;
+}
+
+/*
+ * HW ECC Correction
+ *
+ * function called after a read
+ *
+ * mtd: MTD block structure
+ * dat: raw data read from the chip
+ * read_ecc: ECC from the chip (unused)
+ * isnull: unused
+ *
+ * Detect and correct a 1 bit error for a page
+ */
+static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ unsigned int ecc_status;
+ unsigned int ecc_word, ecc_bit;
+
+ /* get the status from the Status Register */
+ ecc_status = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, SR);
+
+ /* if there's no error */
+ if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
+ return 0;
+
+ /* get error bit offset (4 bits) */
+ ecc_bit = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR) & ATMEL_ECC_BITADDR;
+ /* get word address (12 bits) */
+ ecc_word = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR) & ATMEL_ECC_WORDADDR;
+ ecc_word >>= 4;
+
+ /* if there are multiple errors */
+ if (ecc_status & ATMEL_ECC_MULERR) {
+ /* check if it is a freshly erased block
+ * (filled with 0xff) */
+ if ((ecc_bit == ATMEL_ECC_BITADDR)
+ && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
+ /* the block has just been erased, return OK */
+ return 0;
+ }
+ /* it doesn't seems to be a freshly
+ * erased block.
+ * We can't correct so many errors */
+ dev_warn(mtd->dev,
+ "multiple errors detected. Unable to correct.\n");
+ return -EBADMSG;
+ }
+
+ /* if there's a single bit error : we can correct it */
+ if (ecc_status & ATMEL_ECC_ECCERR) {
+ /* there's nothing much to do here.
+ * the bit error is on the ECC itself.
+ */
+ dev_warn(mtd->dev,
+ "one bit error on ECC code. Nothing to correct\n");
+ return 0;
+ }
+
+ dev_warn(mtd->dev,
+ "one bit error on data. (word offset in the page : 0x%x bit offset : 0x%x)\n",
+ ecc_word, ecc_bit);
+ /* correct the error */
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* 16 bits words */
+ ((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
+ } else {
+ /* 8 bits words */
+ dat[ecc_word] ^= (1 << ecc_bit);
+ }
+ dev_warn(mtd->dev, "error corrected\n");
+ return 1;
+}
+
+/*
+ * Enable HW ECC : unused on most chips
+ */
+static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+}
+
+int atmel_hwecc_nand_init_param(struct nand_chip *nand, struct mtd_info *mtd)
+{
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.calculate = atmel_nand_calculate;
+ nand->ecc.correct = atmel_nand_correct;
+ nand->ecc.hwctl = atmel_nand_hwctl;
+ nand->ecc.read_page = atmel_nand_read_page;
+ nand->ecc.bytes = 4;
+ nand->ecc.strength = 4;
+
+ if (nand->ecc.mode == NAND_ECC_HW) {
+ /* ECC is calculated for the whole page (1 step) */
+ nand->ecc.size = mtd->writesize;
+
+ /* set ECC page size and oob layout */
+ switch (mtd->writesize) {
+ case 512:
+ nand->ecc.layout = &atmel_oobinfo_small;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR,
+ ATMEL_ECC_PAGESIZE_528);
+ break;
+ case 1024:
+ nand->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR,
+ ATMEL_ECC_PAGESIZE_1056);
+ break;
+ case 2048:
+ nand->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR,
+ ATMEL_ECC_PAGESIZE_2112);
+ break;
+ case 4096:
+ nand->ecc.layout = &atmel_oobinfo_large;
+ ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR,
+ ATMEL_ECC_PAGESIZE_4224);
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->ecc.calculate = NULL;
+ nand->ecc.correct = NULL;
+ nand->ecc.hwctl = NULL;
+ nand->ecc.read_page = NULL;
+ nand->ecc.postpad = 0;
+ nand->ecc.prepad = 0;
+ nand->ecc.bytes = 0;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_ATMEL_NAND_HW_PMECC */
+
+#endif /* CONFIG_ATMEL_NAND_HWECC */
+
+static void at91_nand_hwcontrol(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ ulong IO_ADDR_W = (ulong) this->IO_ADDR_W;
+ IO_ADDR_W &= ~(CONFIG_SYS_NAND_MASK_ALE
+ | CONFIG_SYS_NAND_MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_MASK_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_MASK_ALE;
+
+#ifdef CONFIG_SYS_NAND_ENABLE_PIN
+ at91_set_gpio_value(CONFIG_SYS_NAND_ENABLE_PIN,
+ !(ctrl & NAND_NCE));
+#endif
+ this->IO_ADDR_W = (void *) IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+#ifdef CONFIG_SYS_NAND_READY_PIN
+static int at91_nand_ready(struct mtd_info *mtd)
+{
+ return at91_get_gpio_value(CONFIG_SYS_NAND_READY_PIN);
+}
+#endif
+
+#ifdef CONFIG_SPL_BUILD
+/* The following code is for SPL */
+static struct mtd_info *mtd;
+static struct nand_chip nand_chip;
+
+static int nand_command(int block, int page, uint32_t offs, u8 cmd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT;
+ void (*hwctrl)(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl) = this->cmd_ctrl;
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ if (cmd == NAND_CMD_READOOB) {
+ offs += CONFIG_SYS_NAND_PAGE_SIZE;
+ cmd = NAND_CMD_READ0;
+ }
+
+ hwctrl(mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+
+ if ((this->options & NAND_BUSWIDTH_16) && !nand_opcode_8bits(cmd))
+ offs >>= 1;
+
+ hwctrl(mtd, offs & 0xff, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ hwctrl(mtd, (offs >> 8) & 0xff, NAND_CTRL_ALE);
+ hwctrl(mtd, (page_addr & 0xff), NAND_CTRL_ALE);
+ hwctrl(mtd, ((page_addr >> 8) & 0xff), NAND_CTRL_ALE);
+#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE
+ hwctrl(mtd, (page_addr >> 16) & 0x0f, NAND_CTRL_ALE);
+#endif
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ hwctrl(mtd, NAND_CMD_READSTART, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ return 0;
+}
+
+static int nand_is_bad_block(int block)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS, NAND_CMD_READOOB);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (readw(this->IO_ADDR_R) != 0xffff)
+ return 1;
+ } else {
+ if (readb(this->IO_ADDR_R) != 0xff)
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_SPL_NAND_ECC
+static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS;
+#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / \
+ CONFIG_SYS_NAND_ECCSIZE)
+#define ECCTOTAL (ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES)
+
+static int nand_read_page(int block, int page, void *dst)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_char ecc_calc[ECCTOTAL];
+ u_char ecc_code[ECCTOTAL];
+ u_char oob_data[CONFIG_SYS_NAND_OOBSIZE];
+ int eccsize = CONFIG_SYS_NAND_ECCSIZE;
+ int eccbytes = CONFIG_SYS_NAND_ECCBYTES;
+ int eccsteps = ECCSTEPS;
+ int i;
+ uint8_t *p = dst;
+ nand_command(block, page, 0, NAND_CMD_READ0);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ if (this->ecc.mode != NAND_ECC_SOFT)
+ this->ecc.hwctl(mtd, NAND_ECC_READ);
+ this->read_buf(mtd, p, eccsize);
+ this->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+ this->read_buf(mtd, oob_data, CONFIG_SYS_NAND_OOBSIZE);
+
+ for (i = 0; i < ECCTOTAL; i++)
+ ecc_code[i] = oob_data[nand_ecc_pos[i]];
+
+ eccsteps = ECCSTEPS;
+ p = dst;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ this->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+
+ return 0;
+}
+
+int spl_nand_erase_one(int block, int page)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ void (*hwctrl)(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl) = this->cmd_ctrl;
+ int page_addr;
+
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, 0);
+
+ page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT;
+ hwctrl(mtd, NAND_CMD_ERASE1, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ /* Row address */
+ hwctrl(mtd, (page_addr & 0xff), NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ hwctrl(mtd, ((page_addr >> 8) & 0xff),
+ NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE
+ /* One more address cycle for devices > 128MiB */
+ hwctrl(mtd, (page_addr >> 16) & 0x0f,
+ NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+#endif
+ hwctrl(mtd, NAND_CMD_ERASE2, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ nand_deselect();
+
+ return 0;
+}
+#else
+static int nand_read_page(int block, int page, void *dst)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ nand_command(block, page, 0, NAND_CMD_READ0);
+ atmel_nand_pmecc_read_page(mtd, this, dst, 0, page);
+
+ return 0;
+}
+#endif /* CONFIG_SPL_NAND_ECC */
+
+int at91_nand_wait_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ udelay(this->chip_delay);
+
+ return 1;
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ int ret = 0;
+
+ nand->ecc.mode = NAND_ECC_SOFT;
+#ifdef CONFIG_SYS_NAND_DBW_16
+ nand->options = NAND_BUSWIDTH_16;
+ nand->read_buf = nand_read_buf16;
+#else
+ nand->read_buf = nand_read_buf;
+#endif
+ nand->cmd_ctrl = at91_nand_hwcontrol;
+#ifdef CONFIG_SYS_NAND_READY_PIN
+ nand->dev_ready = at91_nand_ready;
+#else
+ nand->dev_ready = at91_nand_wait_ready;
+#endif
+ nand->chip_delay = 20;
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ nand->bbt_options |= NAND_BBT_USE_FLASH;
+#endif
+
+#ifdef CONFIG_ATMEL_NAND_HWECC
+#ifdef CONFIG_ATMEL_NAND_HW_PMECC
+ ret = atmel_pmecc_nand_init_params(nand, mtd);
+#endif
+#endif
+
+ return ret;
+}
+
+void nand_init(void)
+{
+ mtd = nand_to_mtd(&nand_chip);
+ mtd->writesize = CONFIG_SYS_NAND_PAGE_SIZE;
+ mtd->oobsize = CONFIG_SYS_NAND_OOBSIZE;
+ nand_chip.IO_ADDR_R = (void __iomem *)CONFIG_SYS_NAND_BASE;
+ nand_chip.IO_ADDR_W = (void __iomem *)CONFIG_SYS_NAND_BASE;
+ board_nand_init(&nand_chip);
+
+#ifdef CONFIG_SPL_NAND_ECC
+ if (nand_chip.ecc.mode == NAND_ECC_SOFT) {
+ nand_chip.ecc.calculate = nand_calculate_ecc;
+ nand_chip.ecc.correct = nand_correct_data;
+ }
+#endif
+
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, 0);
+}
+
+void nand_deselect(void)
+{
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, -1);
+}
+
+#include "nand_spl_loaders.c"
+
+#else
+
+#ifndef CONFIG_SYS_NAND_BASE_LIST
+#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE }
+#endif
+static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE];
+static ulong base_addr[CONFIG_SYS_MAX_NAND_DEVICE] = CONFIG_SYS_NAND_BASE_LIST;
+
+int atmel_nand_chip_init(int devnum, ulong base_addr)
+{
+ int ret;
+ struct nand_chip *nand = &nand_chip[devnum];
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ nand->IO_ADDR_R = nand->IO_ADDR_W = (void __iomem *)base_addr;
+
+#ifdef CONFIG_NAND_ECC_BCH
+ nand->ecc.mode = NAND_ECC_SOFT_BCH;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif
+#ifdef CONFIG_SYS_NAND_DBW_16
+ nand->options = NAND_BUSWIDTH_16;
+#endif
+ nand->cmd_ctrl = at91_nand_hwcontrol;
+#ifdef CONFIG_SYS_NAND_READY_PIN
+ nand->dev_ready = at91_nand_ready;
+#endif
+ nand->chip_delay = 75;
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ nand->bbt_options |= NAND_BBT_USE_FLASH;
+#endif
+
+ ret = nand_scan_ident(mtd, CONFIG_SYS_NAND_MAX_CHIPS, NULL);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_ATMEL_NAND_HWECC
+#ifdef CONFIG_ATMEL_NAND_HW_PMECC
+ ret = atmel_pmecc_nand_init_params(nand, mtd);
+#else
+ ret = atmel_hwecc_nand_init_param(nand, mtd);
+#endif
+ if (ret)
+ return ret;
+#endif
+
+ ret = nand_scan_tail(mtd);
+ if (!ret)
+ nand_register(devnum, mtd);
+
+ return ret;
+}
+
+void board_nand_init(void)
+{
+ int i;
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++)
+ if (atmel_nand_chip_init(i, base_addr[i]))
+ log_err("atmel_nand: Fail to initialize #%d chip", i);
+}
+#endif /* CONFIG_SPL_BUILD */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/atmel_nand_ecc.h b/roms/u-boot/drivers/mtd/nand/raw/atmel_nand_ecc.h
new file mode 100644
index 000000000..05eeedb3f
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/atmel_nand_ecc.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Error Corrected Code Controller (ECC) - System peripherals regsters.
+ * Based on AT91SAM9260 datasheet revision B.
+ */
+
+#ifndef ATMEL_NAND_ECC_H
+#define ATMEL_NAND_ECC_H
+
+#define ATMEL_ECC_CR 0x00 /* Control register */
+#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
+
+#define ATMEL_ECC_MR 0x04 /* Mode register */
+#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
+#define ATMEL_ECC_PAGESIZE_528 (0)
+#define ATMEL_ECC_PAGESIZE_1056 (1)
+#define ATMEL_ECC_PAGESIZE_2112 (2)
+#define ATMEL_ECC_PAGESIZE_4224 (3)
+
+#define ATMEL_ECC_SR 0x08 /* Status register */
+#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
+#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
+#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
+
+#define ATMEL_ECC_PR 0x0c /* Parity register */
+#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
+#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
+
+#define ATMEL_ECC_NPR 0x10 /* NParity register */
+#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
+
+/* Register access macros for PMECC */
+#define pmecc_readl(addr, reg) \
+ readl(&addr->reg)
+
+#define pmecc_readb(addr, reg) \
+ readb(&addr->reg)
+
+#define pmecc_writel(addr, reg, value) \
+ writel((value), &addr->reg)
+
+/* PMECC Register Definitions */
+#define PMECC_MAX_SECTOR_NUM 8
+struct pmecc_regs {
+ u32 cfg; /* 0x00 PMECC Configuration Register */
+ u32 sarea; /* 0x04 PMECC Spare Area Size Register */
+ u32 saddr; /* 0x08 PMECC Start Address Register */
+ u32 eaddr; /* 0x0C PMECC End Address Register */
+ u32 clk; /* 0x10 PMECC Clock Control Register */
+ u32 ctrl; /* 0x14 PMECC Control Register */
+ u32 sr; /* 0x18 PMECC Status Register */
+ u32 ier; /* 0x1C PMECC Interrupt Enable Register */
+ u32 idr; /* 0x20 PMECC Interrupt Disable Register */
+ u32 imr; /* 0x24 PMECC Interrupt Mask Register */
+ u32 isr; /* 0x28 PMECC Interrupt Status Register */
+ u32 reserved0[5]; /* 0x2C-0x3C Reserved */
+
+ /* 0x40 + sector_num * (0x40), Redundancy Registers */
+ struct {
+#ifdef CONFIG_SAMA5D2
+ u8 ecc[56]; /* PMECC Generated Redundancy Byte Per Sector */
+ u32 reserved1[2];
+#else
+ u8 ecc[44]; /* PMECC Generated Redundancy Byte Per Sector */
+ u32 reserved1[5];
+#endif
+ } ecc_port[PMECC_MAX_SECTOR_NUM];
+
+ /* 0x240 + sector_num * (0x40) Remainder Registers */
+ struct {
+#ifdef CONFIG_SAMA5D2
+ u32 rem[16];
+#else
+ u32 rem[12];
+ u32 reserved2[4];
+#endif
+ } rem_port[PMECC_MAX_SECTOR_NUM];
+ u32 reserved3[16]; /* 0x440-0x47C Reserved */
+};
+
+/* For PMECC Configuration Register */
+#define PMECC_CFG_BCH_ERR2 (0 << 0)
+#define PMECC_CFG_BCH_ERR4 (1 << 0)
+#define PMECC_CFG_BCH_ERR8 (2 << 0)
+#define PMECC_CFG_BCH_ERR12 (3 << 0)
+#define PMECC_CFG_BCH_ERR24 (4 << 0)
+#define PMECC_CFG_BCH_ERR32 (5 << 0)
+
+#define PMECC_CFG_SECTOR512 (0 << 4)
+#define PMECC_CFG_SECTOR1024 (1 << 4)
+
+#define PMECC_CFG_PAGE_1SECTOR (0 << 8)
+#define PMECC_CFG_PAGE_2SECTORS (1 << 8)
+#define PMECC_CFG_PAGE_4SECTORS (2 << 8)
+#define PMECC_CFG_PAGE_8SECTORS (3 << 8)
+
+#define PMECC_CFG_READ_OP (0 << 12)
+#define PMECC_CFG_WRITE_OP (1 << 12)
+
+#define PMECC_CFG_SPARE_ENABLE (1 << 16)
+#define PMECC_CFG_SPARE_DISABLE (0 << 16)
+
+#define PMECC_CFG_AUTO_ENABLE (1 << 20)
+#define PMECC_CFG_AUTO_DISABLE (0 << 20)
+
+/* For PMECC Clock Control Register */
+#define PMECC_CLK_133MHZ (2 << 0)
+
+/* For PMECC Control Register */
+#define PMECC_CTRL_RST (1 << 0)
+#define PMECC_CTRL_DATA (1 << 1)
+#define PMECC_CTRL_USER (1 << 2)
+#define PMECC_CTRL_ENABLE (1 << 4)
+#define PMECC_CTRL_DISABLE (1 << 5)
+
+/* For PMECC Status Register */
+#define PMECC_SR_BUSY (1 << 0)
+#define PMECC_SR_ENABLE (1 << 4)
+
+/* PMERRLOC Register Definitions */
+struct pmecc_errloc_regs {
+ u32 elcfg; /* 0x00 Error Location Configuration Register */
+ u32 elprim; /* 0x04 Error Location Primitive Register */
+ u32 elen; /* 0x08 Error Location Enable Register */
+ u32 eldis; /* 0x0C Error Location Disable Register */
+ u32 elsr; /* 0x10 Error Location Status Register */
+ u32 elier; /* 0x14 Error Location Interrupt Enable Register */
+ u32 elidr; /* 0x08 Error Location Interrupt Disable Register */
+ u32 elimr; /* 0x0C Error Location Interrupt Mask Register */
+ u32 elisr; /* 0x20 Error Location Interrupt Status Register */
+ u32 reserved0; /* 0x24 Reserved */
+#ifdef CONFIG_SAMA5D2
+ u32 sigma[33]; /* 0x28-0xA8 Error Location Sigma Registers */
+ u32 el[32]; /* 0xAC-0x128 Error Location Registers */
+
+ /*
+ * 0x12C-0x1FC:
+ * Reserved for SAMA5D2.
+ */
+ u32 reserved1[53];
+#else
+ u32 sigma[25]; /* 0x28-0x88 Error Location Sigma Registers */
+ u32 el[24]; /* 0x8C-0xE8 Error Location Registers */
+ u32 reserved1[5]; /* 0xEC-0xFC Reserved */
+#endif
+
+ /*
+ * SAMA5 chip HSMC registers start here. But for 9X5 chip it is just
+ * reserved.
+ *
+ * Offset 0x00-0xF8:
+ */
+ u32 reserved2[63];
+
+ /*
+ * Offset 0xFC:
+ * PMECC version for AT91SAM9X5, AT91SAM9N12.
+ * HSMC version for SAMA5D3, SAMA5D4. Can refer as PMECC version.
+ */
+ u32 version;
+};
+
+/* For Error Location Configuration Register */
+#define PMERRLOC_ELCFG_SECTOR_512 (0 << 0)
+#define PMERRLOC_ELCFG_SECTOR_1024 (1 << 0)
+#define PMERRLOC_ELCFG_NUM_ERRORS(n) ((n) << 16)
+
+/* For Error Location Disable Register */
+#define PMERRLOC_DISABLE (1 << 0)
+
+/* For Error Location Interrupt Status Register */
+#ifdef CONFIG_SAMA5D2
+#define PMERRLOC_ERR_NUM_MASK (0x3f << 8)
+#else
+#define PMERRLOC_ERR_NUM_MASK (0x1f << 8)
+#endif
+
+#define PMERRLOC_CALC_DONE (1 << 0)
+
+/* PMECC IP version */
+#define PMECC_VERSION_SAMA5D2 0x210
+#define PMECC_VERSION_SAMA5D4 0x113
+#define PMECC_VERSION_SAMA5D3 0x112
+#define PMECC_VERSION_AT91SAM9N12 0x102
+#define PMECC_VERSION_AT91SAM9X5 0x101
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13 13
+#define PMECC_GF_DIMENSION_14 14
+
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY 0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY 0x4443
+
+#define PMECC_INDEX_TABLE_SIZE_512 0x2000
+#define PMECC_INDEX_TABLE_SIZE_1024 0x4000
+
+#define PMECC_MAX_TIMEOUT_US (100 * 1000)
+
+/* Reserved bytes in oob area */
+#define PMECC_OOB_RESERVED_BYTES 2
+
+#endif
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/Makefile b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/Makefile
new file mode 100644
index 000000000..5d9e7e3f3
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+obj-$(CONFIG_NAND_BRCMNAND_6368) += bcm6368_nand.o
+obj-$(CONFIG_NAND_BRCMNAND_63158) += bcm63158_nand.o
+obj-$(CONFIG_NAND_BRCMNAND_68360) += bcm68360_nand.o
+obj-$(CONFIG_NAND_BRCMNAND_6838) += bcm6838_nand.o
+obj-$(CONFIG_NAND_BRCMNAND_6858) += bcm6858_nand.o
+obj-$(CONFIG_NAND_BRCMNAND) += brcmnand.o
+obj-$(CONFIG_NAND_BRCMNAND) += brcmnand_compat.o
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm63158_nand.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm63158_nand.c
new file mode 100644
index 000000000..aa095c439
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm63158_nand.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <dm.h>
+
+#include "brcmnand.h"
+
+struct bcm63158_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM63158_NAND_INT 0x00
+#define BCM63158_NAND_STATUS_SHIFT 0
+#define BCM63158_NAND_STATUS_MASK (0xfff << BCM63158_NAND_STATUS_SHIFT)
+
+#define BCM63158_NAND_INT_EN 0x04
+#define BCM63158_NAND_ENABLE_SHIFT 0
+#define BCM63158_NAND_ENABLE_MASK (0xffff << BCM63158_NAND_ENABLE_SHIFT)
+
+enum {
+ BCM63158_NP_READ = BIT(0),
+ BCM63158_BLOCK_ERASE = BIT(1),
+ BCM63158_COPY_BACK = BIT(2),
+ BCM63158_PAGE_PGM = BIT(3),
+ BCM63158_CTRL_READY = BIT(4),
+ BCM63158_DEV_RBPIN = BIT(5),
+ BCM63158_ECC_ERR_UNC = BIT(6),
+ BCM63158_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm63158_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm63158_nand_soc *priv =
+ container_of(soc, struct bcm63158_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63158_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM63158_CTRL_READY << BCM63158_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM63158_NAND_STATUS_MASK;
+ val |= BCM63158_CTRL_READY << BCM63158_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm63158_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm63158_nand_soc *priv =
+ container_of(soc, struct bcm63158_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM63158_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM63158_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM63158_CTRL_READY << BCM63158_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM63158_CTRL_READY << BCM63158_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm63158_nand_probe(struct udevice *dev)
+{
+ struct udevice *pdev = dev;
+ struct bcm63158_nand_soc *priv = dev_get_priv(dev);
+ struct brcmnand_soc *soc;
+ struct resource res;
+
+ soc = &priv->soc;
+
+ dev_read_resource_byname(pdev, "nand-int-base", &res);
+ priv->base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm63158_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm63158_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM63158_NAND_INT_EN);
+ brcmnand_writel(0, priv->base + BCM63158_NAND_INT);
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct udevice_id bcm63158_nand_dt_ids[] = {
+ {
+ .compatible = "brcm,nand-bcm63158",
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(bcm63158_nand) = {
+ .name = "bcm63158-nand",
+ .id = UCLASS_MTD,
+ .of_match = bcm63158_nand_dt_ids,
+ .probe = bcm63158_nand_probe,
+ .priv_auto = sizeof(struct bcm63158_nand_soc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(bcm63158_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
new file mode 100644
index 000000000..e4bf19368
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6368_nand.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <dm.h>
+
+#include "brcmnand.h"
+
+struct bcm6368_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define soc_to_priv(_soc) container_of(_soc, struct bcm6368_nand_soc, soc)
+
+#define BCM6368_NAND_INT 0x00
+#define BCM6368_NAND_STATUS_SHIFT 0
+#define BCM6368_NAND_STATUS_MASK (0xfff << BCM6368_NAND_STATUS_SHIFT)
+#define BCM6368_NAND_ENABLE_SHIFT 16
+#define BCM6368_NAND_ENABLE_MASK (0xffff << BCM6368_NAND_ENABLE_SHIFT)
+
+enum {
+ BCM6368_NP_READ = BIT(0),
+ BCM6368_BLOCK_ERASE = BIT(1),
+ BCM6368_COPY_BACK = BIT(2),
+ BCM6368_PAGE_PGM = BIT(3),
+ BCM6368_CTRL_READY = BIT(4),
+ BCM6368_DEV_RBPIN = BIT(5),
+ BCM6368_ECC_ERR_UNC = BIT(6),
+ BCM6368_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm6368_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm6368_nand_soc *priv = soc_to_priv(soc);
+ void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM6368_NAND_STATUS_MASK;
+ val |= BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm6368_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm6368_nand_soc *priv = soc_to_priv(soc);
+ void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM6368_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm6368_nand_probe(struct udevice *dev)
+{
+ struct bcm6368_nand_soc *priv = dev_get_priv(dev);
+ struct brcmnand_soc *soc = &priv->soc;
+
+ priv->base = dev_remap_addr_name(dev, "nand-int-base");
+ if (!priv->base)
+ return -EINVAL;
+
+ soc->ctlrdy_ack = bcm6368_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm6368_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM6368_NAND_INT);
+ brcmnand_writel(BCM6368_NAND_STATUS_MASK,
+ priv->base + BCM6368_NAND_INT);
+
+ return brcmnand_probe(dev, soc);
+}
+
+static const struct udevice_id bcm6368_nand_dt_ids[] = {
+ {
+ .compatible = "brcm,nand-bcm6368",
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(bcm6368_nand) = {
+ .name = "bcm6368-nand",
+ .id = UCLASS_MTD,
+ .of_match = bcm6368_nand_dt_ids,
+ .probe = bcm6368_nand_probe,
+ .priv_auto = sizeof(struct bcm6368_nand_soc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(bcm6368_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm68360_nand.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm68360_nand.c
new file mode 100644
index 000000000..586ea3d8f
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm68360_nand.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <dm.h>
+
+#include "brcmnand.h"
+
+struct bcm68360_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM68360_NAND_INT 0x00
+#define BCM68360_NAND_STATUS_SHIFT 0
+#define BCM68360_NAND_STATUS_MASK (0xfff << BCM68360_NAND_STATUS_SHIFT)
+
+#define BCM68360_NAND_INT_EN 0x04
+#define BCM68360_NAND_ENABLE_SHIFT 0
+#define BCM68360_NAND_ENABLE_MASK (0xffff << BCM68360_NAND_ENABLE_SHIFT)
+
+enum {
+ BCM68360_NP_READ = BIT(0),
+ BCM68360_BLOCK_ERASE = BIT(1),
+ BCM68360_COPY_BACK = BIT(2),
+ BCM68360_PAGE_PGM = BIT(3),
+ BCM68360_CTRL_READY = BIT(4),
+ BCM68360_DEV_RBPIN = BIT(5),
+ BCM68360_ECC_ERR_UNC = BIT(6),
+ BCM68360_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm68360_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm68360_nand_soc *priv =
+ container_of(soc, struct bcm68360_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM68360_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM68360_CTRL_READY << BCM68360_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM68360_NAND_STATUS_MASK;
+ val |= BCM68360_CTRL_READY << BCM68360_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm68360_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm68360_nand_soc *priv =
+ container_of(soc, struct bcm68360_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM68360_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM68360_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM68360_CTRL_READY << BCM68360_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM68360_CTRL_READY << BCM68360_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm68360_nand_probe(struct udevice *dev)
+{
+ struct udevice *pdev = dev;
+ struct bcm68360_nand_soc *priv = dev_get_priv(dev);
+ struct brcmnand_soc *soc;
+ struct resource res;
+
+ soc = &priv->soc;
+
+ dev_read_resource_byname(pdev, "nand-int-base", &res);
+ priv->base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm68360_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm68360_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM68360_NAND_INT_EN);
+ brcmnand_writel(0, priv->base + BCM68360_NAND_INT);
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct udevice_id bcm68360_nand_dt_ids[] = {
+ {
+ .compatible = "brcm,nand-bcm68360",
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(bcm68360_nand) = {
+ .name = "bcm68360-nand",
+ .id = UCLASS_MTD,
+ .of_match = bcm68360_nand_dt_ids,
+ .probe = bcm68360_nand_probe,
+ .priv_auto = sizeof(struct bcm68360_nand_soc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(bcm68360_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6838_nand.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6838_nand.c
new file mode 100644
index 000000000..85f318bd7
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6838_nand.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <dm.h>
+
+#include "brcmnand.h"
+
+struct bcm6838_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM6838_NAND_INT 0x00
+#define BCM6838_NAND_STATUS_SHIFT 0
+#define BCM6838_NAND_STATUS_MASK (0xfff << BCM6838_NAND_STATUS_SHIFT)
+#define BCM6838_NAND_ENABLE_SHIFT 16
+#define BCM6838_NAND_ENABLE_MASK (0xffff << BCM6838_NAND_ENABLE_SHIFT)
+
+enum {
+ BCM6838_NP_READ = BIT(0),
+ BCM6838_BLOCK_ERASE = BIT(1),
+ BCM6838_COPY_BACK = BIT(2),
+ BCM6838_PAGE_PGM = BIT(3),
+ BCM6838_CTRL_READY = BIT(4),
+ BCM6838_DEV_RBPIN = BIT(5),
+ BCM6838_ECC_ERR_UNC = BIT(6),
+ BCM6838_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm6838_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm6838_nand_soc *priv =
+ container_of(soc, struct bcm6838_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6838_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM6838_CTRL_READY << BCM6838_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM6838_NAND_STATUS_MASK;
+ val |= BCM6838_CTRL_READY << BCM6838_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm6838_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm6838_nand_soc *priv =
+ container_of(soc, struct bcm6838_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6838_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM6838_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM6838_CTRL_READY << BCM6838_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM6838_CTRL_READY << BCM6838_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm6838_nand_probe(struct udevice *dev)
+{
+ struct udevice *pdev = dev;
+ struct bcm6838_nand_soc *priv = dev_get_priv(dev);
+ struct brcmnand_soc *soc;
+ struct resource res;
+
+ soc = &priv->soc;
+
+ dev_read_resource_byname(pdev, "nand-int-base", &res);
+ priv->base = ioremap(res.start, resource_size(&res));
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm6838_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm6838_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM6838_NAND_INT);
+ brcmnand_writel(BCM6838_NAND_STATUS_MASK,
+ priv->base + BCM6838_NAND_INT);
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct udevice_id bcm6838_nand_dt_ids[] = {
+ {
+ .compatible = "brcm,nand-bcm6838",
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(bcm6838_nand) = {
+ .name = "bcm6838-nand",
+ .id = UCLASS_MTD,
+ .of_match = bcm6838_nand_dt_ids,
+ .probe = bcm6838_nand_probe,
+ .priv_auto = sizeof(struct bcm6838_nand_soc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(bcm6838_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6858_nand.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6858_nand.c
new file mode 100644
index 000000000..a5e159ad5
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/bcm6858_nand.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <dm.h>
+
+#include "brcmnand.h"
+
+struct bcm6858_nand_soc {
+ struct brcmnand_soc soc;
+ void __iomem *base;
+};
+
+#define BCM6858_NAND_INT 0x00
+#define BCM6858_NAND_STATUS_SHIFT 0
+#define BCM6858_NAND_STATUS_MASK (0xfff << BCM6858_NAND_STATUS_SHIFT)
+
+#define BCM6858_NAND_INT_EN 0x04
+#define BCM6858_NAND_ENABLE_SHIFT 0
+#define BCM6858_NAND_ENABLE_MASK (0xffff << BCM6858_NAND_ENABLE_SHIFT)
+
+enum {
+ BCM6858_NP_READ = BIT(0),
+ BCM6858_BLOCK_ERASE = BIT(1),
+ BCM6858_COPY_BACK = BIT(2),
+ BCM6858_PAGE_PGM = BIT(3),
+ BCM6858_CTRL_READY = BIT(4),
+ BCM6858_DEV_RBPIN = BIT(5),
+ BCM6858_ECC_ERR_UNC = BIT(6),
+ BCM6858_ECC_ERR_CORR = BIT(7),
+};
+
+static bool bcm6858_nand_intc_ack(struct brcmnand_soc *soc)
+{
+ struct bcm6858_nand_soc *priv =
+ container_of(soc, struct bcm6858_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6858_NAND_INT;
+ u32 val = brcmnand_readl(mmio);
+
+ if (val & (BCM6858_CTRL_READY << BCM6858_NAND_STATUS_SHIFT)) {
+ /* Ack interrupt */
+ val &= ~BCM6858_NAND_STATUS_MASK;
+ val |= BCM6858_CTRL_READY << BCM6858_NAND_STATUS_SHIFT;
+ brcmnand_writel(val, mmio);
+ return true;
+ }
+
+ return false;
+}
+
+static void bcm6858_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+ struct bcm6858_nand_soc *priv =
+ container_of(soc, struct bcm6858_nand_soc, soc);
+ void __iomem *mmio = priv->base + BCM6858_NAND_INT_EN;
+ u32 val = brcmnand_readl(mmio);
+
+ /* Don't ack any interrupts */
+ val &= ~BCM6858_NAND_STATUS_MASK;
+
+ if (en)
+ val |= BCM6858_CTRL_READY << BCM6858_NAND_ENABLE_SHIFT;
+ else
+ val &= ~(BCM6858_CTRL_READY << BCM6858_NAND_ENABLE_SHIFT);
+
+ brcmnand_writel(val, mmio);
+}
+
+static int bcm6858_nand_probe(struct udevice *dev)
+{
+ struct udevice *pdev = dev;
+ struct bcm6858_nand_soc *priv = dev_get_priv(dev);
+ struct brcmnand_soc *soc;
+ struct resource res;
+
+ soc = &priv->soc;
+
+ dev_read_resource_byname(pdev, "nand-int-base", &res);
+ priv->base = devm_ioremap(dev, res.start, resource_size(&res));
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ soc->ctlrdy_ack = bcm6858_nand_intc_ack;
+ soc->ctlrdy_set_enabled = bcm6858_nand_intc_set;
+
+ /* Disable and ack all interrupts */
+ brcmnand_writel(0, priv->base + BCM6858_NAND_INT_EN);
+ brcmnand_writel(0, priv->base + BCM6858_NAND_INT);
+
+ return brcmnand_probe(pdev, soc);
+}
+
+static const struct udevice_id bcm6858_nand_dt_ids[] = {
+ {
+ .compatible = "brcm,nand-bcm6858",
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(bcm6858_nand) = {
+ .name = "bcm6858-nand",
+ .id = UCLASS_MTD,
+ .of_match = bcm6858_nand_dt_ids,
+ .probe = bcm6858_nand_probe,
+ .priv_auto = sizeof(struct bcm6858_nand_soc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(bcm6858_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.c
new file mode 100644
index 000000000..99a1c2e6e
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -0,0 +1,2754 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright © 2010-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <clk.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <asm/processor.h>
+#include <dm.h>
+
+#include "brcmnand.h"
+#include "brcmnand_compat.h"
+
+/*
+ * This flag controls if WP stays on between erase/write commands to mitigate
+ * flash corruption due to power glitches. Values:
+ * 0: NAND_WP is not used or not available
+ * 1: NAND_WP is set by default, cleared for erase/write operations
+ * 2: NAND_WP is always cleared
+ */
+static int wp_on = 1;
+module_param(wp_on, int, 0444);
+
+/***********************************************************************
+ * Definitions
+ ***********************************************************************/
+
+#define DRV_NAME "brcmnand"
+
+#define CMD_NULL 0x00
+#define CMD_PAGE_READ 0x01
+#define CMD_SPARE_AREA_READ 0x02
+#define CMD_STATUS_READ 0x03
+#define CMD_PROGRAM_PAGE 0x04
+#define CMD_PROGRAM_SPARE_AREA 0x05
+#define CMD_COPY_BACK 0x06
+#define CMD_DEVICE_ID_READ 0x07
+#define CMD_BLOCK_ERASE 0x08
+#define CMD_FLASH_RESET 0x09
+#define CMD_BLOCKS_LOCK 0x0a
+#define CMD_BLOCKS_LOCK_DOWN 0x0b
+#define CMD_BLOCKS_UNLOCK 0x0c
+#define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
+#define CMD_PARAMETER_READ 0x0e
+#define CMD_PARAMETER_CHANGE_COL 0x0f
+#define CMD_LOW_LEVEL_OP 0x10
+
+struct brcm_nand_dma_desc {
+ u32 next_desc;
+ u32 next_desc_ext;
+ u32 cmd_irq;
+ u32 dram_addr;
+ u32 dram_addr_ext;
+ u32 tfr_len;
+ u32 total_len;
+ u32 flash_addr;
+ u32 flash_addr_ext;
+ u32 cs;
+ u32 pad2[5];
+ u32 status_valid;
+} __packed;
+
+/* Bitfields for brcm_nand_dma_desc::status_valid */
+#define FLASH_DMA_ECC_ERROR (1 << 8)
+#define FLASH_DMA_CORR_ERROR (1 << 9)
+
+/* 512B flash cache in the NAND controller HW */
+#define FC_SHIFT 9U
+#define FC_BYTES 512U
+#define FC_WORDS (FC_BYTES >> 2)
+
+#define BRCMNAND_MIN_PAGESIZE 512
+#define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
+#define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
+
+#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
+#define NAND_POLL_STATUS_TIMEOUT_MS 100
+
+/* Controller feature flags */
+enum {
+ BRCMNAND_HAS_1K_SECTORS = BIT(0),
+ BRCMNAND_HAS_PREFETCH = BIT(1),
+ BRCMNAND_HAS_CACHE_MODE = BIT(2),
+ BRCMNAND_HAS_WP = BIT(3),
+};
+
+struct brcmnand_controller {
+#ifndef __UBOOT__
+ struct device *dev;
+#else
+ struct udevice *dev;
+#endif /* __UBOOT__ */
+ struct nand_hw_control controller;
+ void __iomem *nand_base;
+ void __iomem *nand_fc; /* flash cache */
+ void __iomem *flash_dma_base;
+ unsigned int irq;
+ unsigned int dma_irq;
+ int nand_version;
+ int parameter_page_big_endian;
+
+ /* Some SoCs provide custom interrupt status register(s) */
+ struct brcmnand_soc *soc;
+
+ /* Some SoCs have a gateable clock for the controller */
+ struct clk *clk;
+
+ int cmd_pending;
+ bool dma_pending;
+ struct completion done;
+ struct completion dma_done;
+
+ /* List of NAND hosts (one for each chip-select) */
+ struct list_head host_list;
+
+ struct brcm_nand_dma_desc *dma_desc;
+ dma_addr_t dma_pa;
+
+ /* in-memory cache of the FLASH_CACHE, used only for some commands */
+ u8 flash_cache[FC_BYTES];
+
+ /* Controller revision details */
+ const u16 *reg_offsets;
+ unsigned int reg_spacing; /* between CS1, CS2, ... regs */
+ const u8 *cs_offsets; /* within each chip-select */
+ const u8 *cs0_offsets; /* within CS0, if different */
+ unsigned int max_block_size;
+ const unsigned int *block_sizes;
+ unsigned int max_page_size;
+ const unsigned int *page_sizes;
+ unsigned int max_oob;
+ u32 features;
+
+ /* for low-power standby/resume only */
+ u32 nand_cs_nand_select;
+ u32 nand_cs_nand_xor;
+ u32 corr_stat_threshold;
+ u32 flash_dma_mode;
+};
+
+struct brcmnand_cfg {
+ u64 device_size;
+ unsigned int block_size;
+ unsigned int page_size;
+ unsigned int spare_area_size;
+ unsigned int device_width;
+ unsigned int col_adr_bytes;
+ unsigned int blk_adr_bytes;
+ unsigned int ful_adr_bytes;
+ unsigned int sector_size_1k;
+ unsigned int ecc_level;
+ /* use for low-power standby/resume only */
+ u32 acc_control;
+ u32 config;
+ u32 config_ext;
+ u32 timing_1;
+ u32 timing_2;
+};
+
+struct brcmnand_host {
+ struct list_head node;
+
+ struct nand_chip chip;
+#ifndef __UBOOT__
+ struct platform_device *pdev;
+#else
+ struct udevice *pdev;
+#endif /* __UBOOT__ */
+ int cs;
+
+ unsigned int last_cmd;
+ unsigned int last_byte;
+ u64 last_addr;
+ struct brcmnand_cfg hwcfg;
+ struct brcmnand_controller *ctrl;
+};
+
+enum brcmnand_reg {
+ BRCMNAND_CMD_START = 0,
+ BRCMNAND_CMD_EXT_ADDRESS,
+ BRCMNAND_CMD_ADDRESS,
+ BRCMNAND_INTFC_STATUS,
+ BRCMNAND_CS_SELECT,
+ BRCMNAND_CS_XOR,
+ BRCMNAND_LL_OP,
+ BRCMNAND_CS0_BASE,
+ BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
+ BRCMNAND_CORR_THRESHOLD,
+ BRCMNAND_CORR_THRESHOLD_EXT,
+ BRCMNAND_UNCORR_COUNT,
+ BRCMNAND_CORR_COUNT,
+ BRCMNAND_CORR_EXT_ADDR,
+ BRCMNAND_CORR_ADDR,
+ BRCMNAND_UNCORR_EXT_ADDR,
+ BRCMNAND_UNCORR_ADDR,
+ BRCMNAND_SEMAPHORE,
+ BRCMNAND_ID,
+ BRCMNAND_ID_EXT,
+ BRCMNAND_LL_RDATA,
+ BRCMNAND_OOB_READ_BASE,
+ BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_OOB_WRITE_BASE,
+ BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
+ BRCMNAND_FC_BASE,
+};
+
+/* BRCMNAND v4.0 */
+static const u16 brcmnand_regs_v40[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v5.0 */
+static const u16 brcmnand_regs_v50[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x6c,
+ [BRCMNAND_CS_SELECT] = 0x14,
+ [BRCMNAND_CS_XOR] = 0x18,
+ [BRCMNAND_LL_OP] = 0x178,
+ [BRCMNAND_CS0_BASE] = 0x40,
+ [BRCMNAND_CS1_BASE] = 0xd0,
+ [BRCMNAND_CORR_THRESHOLD] = 0x84,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
+ [BRCMNAND_UNCORR_COUNT] = 0,
+ [BRCMNAND_CORR_COUNT] = 0,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x70,
+ [BRCMNAND_CORR_ADDR] = 0x74,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
+ [BRCMNAND_UNCORR_ADDR] = 0x7c,
+ [BRCMNAND_SEMAPHORE] = 0x58,
+ [BRCMNAND_ID] = 0x60,
+ [BRCMNAND_ID_EXT] = 0x64,
+ [BRCMNAND_LL_RDATA] = 0x17c,
+ [BRCMNAND_OOB_READ_BASE] = 0x20,
+ [BRCMNAND_OOB_READ_10_BASE] = 0x130,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x30,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
+ [BRCMNAND_FC_BASE] = 0x200,
+};
+
+/* BRCMNAND v6.0 - v7.1 */
+static const u16 brcmnand_regs_v60[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xc0,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x280,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x400,
+};
+
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+ [BRCMNAND_CMD_START] = 0x04,
+ [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
+ [BRCMNAND_CMD_ADDRESS] = 0x0c,
+ [BRCMNAND_INTFC_STATUS] = 0x14,
+ [BRCMNAND_CS_SELECT] = 0x18,
+ [BRCMNAND_CS_XOR] = 0x1c,
+ [BRCMNAND_LL_OP] = 0x20,
+ [BRCMNAND_CS0_BASE] = 0x50,
+ [BRCMNAND_CS1_BASE] = 0,
+ [BRCMNAND_CORR_THRESHOLD] = 0xdc,
+ [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
+ [BRCMNAND_UNCORR_COUNT] = 0xfc,
+ [BRCMNAND_CORR_COUNT] = 0x100,
+ [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
+ [BRCMNAND_CORR_ADDR] = 0x110,
+ [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
+ [BRCMNAND_UNCORR_ADDR] = 0x118,
+ [BRCMNAND_SEMAPHORE] = 0x150,
+ [BRCMNAND_ID] = 0x194,
+ [BRCMNAND_ID_EXT] = 0x198,
+ [BRCMNAND_LL_RDATA] = 0x19c,
+ [BRCMNAND_OOB_READ_BASE] = 0x200,
+ [BRCMNAND_OOB_READ_10_BASE] = 0,
+ [BRCMNAND_OOB_WRITE_BASE] = 0x400,
+ [BRCMNAND_OOB_WRITE_10_BASE] = 0,
+ [BRCMNAND_FC_BASE] = 0x600,
+};
+
+enum brcmnand_cs_reg {
+ BRCMNAND_CS_CFG_EXT = 0,
+ BRCMNAND_CS_CFG,
+ BRCMNAND_CS_ACC_CONTROL,
+ BRCMNAND_CS_TIMING1,
+ BRCMNAND_CS_TIMING2,
+};
+
+/* Per chip-select offsets for v7.1 */
+static const u8 brcmnand_cs_offsets_v71[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x0c,
+ [BRCMNAND_CS_TIMING2] = 0x10,
+};
+
+/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
+static const u8 brcmnand_cs_offsets[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x04,
+ [BRCMNAND_CS_CFG] = 0x04,
+ [BRCMNAND_CS_TIMING1] = 0x08,
+ [BRCMNAND_CS_TIMING2] = 0x0c,
+};
+
+/* Per chip-select offset for <= v5.0 on CS0 only */
+static const u8 brcmnand_cs_offsets_cs0[] = {
+ [BRCMNAND_CS_ACC_CONTROL] = 0x00,
+ [BRCMNAND_CS_CFG_EXT] = 0x08,
+ [BRCMNAND_CS_CFG] = 0x08,
+ [BRCMNAND_CS_TIMING1] = 0x10,
+ [BRCMNAND_CS_TIMING2] = 0x14,
+};
+
+/*
+ * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
+ * one config register, but once the bitfields overflowed, newer controllers
+ * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
+ */
+enum {
+ CFG_BLK_ADR_BYTES_SHIFT = 8,
+ CFG_COL_ADR_BYTES_SHIFT = 12,
+ CFG_FUL_ADR_BYTES_SHIFT = 16,
+ CFG_BUS_WIDTH_SHIFT = 23,
+ CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
+ CFG_DEVICE_SIZE_SHIFT = 24,
+
+ /* Only for pre-v7.1 (with no CFG_EXT register) */
+ CFG_PAGE_SIZE_SHIFT = 20,
+ CFG_BLK_SIZE_SHIFT = 28,
+
+ /* Only for v7.1+ (with CFG_EXT register) */
+ CFG_EXT_PAGE_SIZE_SHIFT = 0,
+ CFG_EXT_BLK_SIZE_SHIFT = 4,
+};
+
+/* BRCMNAND_INTFC_STATUS */
+enum {
+ INTFC_FLASH_STATUS = GENMASK(7, 0),
+
+ INTFC_ERASED = BIT(27),
+ INTFC_OOB_VALID = BIT(28),
+ INTFC_CACHE_VALID = BIT(29),
+ INTFC_FLASH_READY = BIT(30),
+ INTFC_CTLR_READY = BIT(31),
+};
+
+static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
+{
+ return brcmnand_readl(ctrl->nand_base + offs);
+}
+
+static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
+ u32 val)
+{
+ brcmnand_writel(val, ctrl->nand_base + offs);
+}
+
+static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+{
+ static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
+ static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
+ static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
+
+ ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
+
+ /* Only support v4.0+? */
+ if (ctrl->nand_version < 0x0400) {
+ dev_err(ctrl->dev, "version %#x not supported\n",
+ ctrl->nand_version);
+ return -ENODEV;
+ }
+
+ /* Register offsets */
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->reg_offsets = brcmnand_regs_v72;
+ else if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_offsets = brcmnand_regs_v71;
+ else if (ctrl->nand_version >= 0x0600)
+ ctrl->reg_offsets = brcmnand_regs_v60;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->reg_offsets = brcmnand_regs_v50;
+ else if (ctrl->nand_version >= 0x0400)
+ ctrl->reg_offsets = brcmnand_regs_v40;
+
+ /* Chip-select stride */
+ if (ctrl->nand_version >= 0x0701)
+ ctrl->reg_spacing = 0x14;
+ else
+ ctrl->reg_spacing = 0x10;
+
+ /* Per chip-select registers */
+ if (ctrl->nand_version >= 0x0701) {
+ ctrl->cs_offsets = brcmnand_cs_offsets_v71;
+ } else {
+ ctrl->cs_offsets = brcmnand_cs_offsets;
+
+ /* v5.0 and earlier has a different CS0 offset layout */
+ if (ctrl->nand_version <= 0x0500)
+ ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
+ }
+
+ /* Page / block sizes */
+ if (ctrl->nand_version >= 0x0701) {
+ /* >= v7.1 use nice power-of-2 values! */
+ ctrl->max_page_size = 16 * 1024;
+ ctrl->max_block_size = 2 * 1024 * 1024;
+ } else {
+ ctrl->page_sizes = page_sizes;
+ if (ctrl->nand_version >= 0x0600)
+ ctrl->block_sizes = block_sizes_v6;
+ else
+ ctrl->block_sizes = block_sizes_v4;
+
+ if (ctrl->nand_version < 0x0400) {
+ ctrl->max_page_size = 4096;
+ ctrl->max_block_size = 512 * 1024;
+ }
+ }
+
+ /* Maximum spare area sector size (per 512B) */
+ if (ctrl->nand_version >= 0x0702)
+ ctrl->max_oob = 128;
+ else if (ctrl->nand_version >= 0x0600)
+ ctrl->max_oob = 64;
+ else if (ctrl->nand_version >= 0x0500)
+ ctrl->max_oob = 32;
+ else
+ ctrl->max_oob = 16;
+
+ /* v6.0 and newer (except v6.1) have prefetch support */
+ if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
+ ctrl->features |= BRCMNAND_HAS_PREFETCH;
+
+ /*
+ * v6.x has cache mode, but it's implemented differently. Ignore it for
+ * now.
+ */
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
+
+ if (ctrl->nand_version >= 0x0500)
+ ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
+
+ if (ctrl->nand_version >= 0x0700)
+ ctrl->features |= BRCMNAND_HAS_WP;
+#ifndef __UBOOT__
+ else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+#else
+ else if (dev_read_bool(ctrl->dev, "brcm,nand-has-wp"))
+#endif /* __UBOOT__ */
+ ctrl->features |= BRCMNAND_HAS_WP;
+
+ return 0;
+}
+
+static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ return nand_readreg(ctrl, offs);
+ else
+ return 0;
+}
+
+static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 val)
+{
+ u16 offs = ctrl->reg_offsets[reg];
+
+ if (offs)
+ nand_writereg(ctrl, offs, val);
+}
+
+static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
+ enum brcmnand_reg reg, u32 mask, unsigned
+ int shift, u32 val)
+{
+ u32 tmp = brcmnand_read_reg(ctrl, reg);
+
+ tmp &= ~mask;
+ tmp |= val << shift;
+ brcmnand_write_reg(ctrl, reg, tmp);
+}
+
+static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
+{
+ return __raw_readl(ctrl->nand_fc + word * 4);
+}
+
+static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
+ int word, u32 val)
+{
+ __raw_writel(val, ctrl->nand_fc + word * 4);
+}
+
+static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
+ enum brcmnand_cs_reg reg)
+{
+ u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
+ u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
+ u8 cs_offs;
+
+ if (cs == 0 && ctrl->cs0_offsets)
+ cs_offs = ctrl->cs0_offsets[reg];
+ else
+ cs_offs = ctrl->cs_offsets[reg];
+
+ if (cs && offs_cs1)
+ return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
+
+ return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
+}
+
+static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0600)
+ return 1;
+ return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+}
+
+static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int shift = 0, bits;
+ enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
+ int cs = host->cs;
+
+ if (ctrl->nand_version >= 0x0702)
+ bits = 7;
+ else if (ctrl->nand_version >= 0x0600)
+ bits = 6;
+ else if (ctrl->nand_version >= 0x0500)
+ bits = 5;
+ else
+ bits = 4;
+
+ if (ctrl->nand_version >= 0x0702) {
+ if (cs >= 4)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 4) * bits;
+ } else if (ctrl->nand_version >= 0x0600) {
+ if (cs >= 5)
+ reg = BRCMNAND_CORR_THRESHOLD_EXT;
+ shift = (cs % 5) * bits;
+ }
+ brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
+}
+
+static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version < 0x0602)
+ return 24;
+ return 0;
+}
+
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version >= 0x0702)
+ return GENMASK(7, 0);
+ else if (ctrl->nand_version >= 0x0600)
+ return GENMASK(6, 0);
+ else
+ return GENMASK(5, 0);
+}
+
+#define NAND_ACC_CONTROL_ECC_SHIFT 16
+#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
+
+static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+{
+ u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+
+ mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+
+ /* v7.2 includes additional ECC levels */
+ if (ctrl->nand_version >= 0x0702)
+ mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+
+ return mask;
+}
+
+static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ u32 acc_control = nand_readreg(ctrl, offs);
+ u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
+
+ if (en) {
+ acc_control |= ecc_flags; /* enable RD/WR ECC */
+ acc_control |= host->hwcfg.ecc_level
+ << NAND_ACC_CONTROL_ECC_SHIFT;
+ } else {
+ acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ }
+
+ nand_writereg(ctrl, offs, acc_control);
+}
+
+static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
+{
+ if (ctrl->nand_version >= 0x0702)
+ return 9;
+ else if (ctrl->nand_version >= 0x0600)
+ return 7;
+ else if (ctrl->nand_version >= 0x0500)
+ return 6;
+ else
+ return -1;
+}
+
+static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+
+ if (shift < 0)
+ return 0;
+
+ return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
+}
+
+static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int shift = brcmnand_sector_1k_shift(ctrl);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u32 tmp;
+
+ if (shift < 0)
+ return;
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~(1 << shift);
+ tmp |= (!!val) << shift;
+ nand_writereg(ctrl, acc_control_offs, tmp);
+}
+
+/***********************************************************************
+ * CS_NAND_SELECT
+ ***********************************************************************/
+
+enum {
+ CS_SELECT_NAND_WP = BIT(29),
+ CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
+};
+
+static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
+ u32 mask, u32 expected_val,
+ unsigned long timeout_ms)
+{
+#ifndef __UBOOT__
+ unsigned long limit;
+ u32 val;
+
+ if (!timeout_ms)
+ timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
+
+ limit = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (time_after(limit, jiffies));
+#else
+ unsigned long base, limit;
+ u32 val;
+
+ if (!timeout_ms)
+ timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
+
+ base = get_timer(0);
+ limit = CONFIG_SYS_HZ * timeout_ms / 1000;
+ do {
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
+ cpu_relax();
+ } while (get_timer(base) < limit);
+#endif /* __UBOOT__ */
+
+ dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
+ expected_val, val & mask);
+
+ return -ETIMEDOUT;
+}
+
+static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
+{
+ u32 val = en ? CS_SELECT_NAND_WP : 0;
+
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
+}
+
+/***********************************************************************
+ * Flash DMA
+ ***********************************************************************/
+
+enum flash_dma_reg {
+ FLASH_DMA_REVISION = 0x00,
+ FLASH_DMA_FIRST_DESC = 0x04,
+ FLASH_DMA_FIRST_DESC_EXT = 0x08,
+ FLASH_DMA_CTRL = 0x0c,
+ FLASH_DMA_MODE = 0x10,
+ FLASH_DMA_STATUS = 0x14,
+ FLASH_DMA_INTERRUPT_DESC = 0x18,
+ FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c,
+ FLASH_DMA_ERROR_STATUS = 0x20,
+ FLASH_DMA_CURRENT_DESC = 0x24,
+ FLASH_DMA_CURRENT_DESC_EXT = 0x28,
+};
+
+static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
+{
+ return ctrl->flash_dma_base;
+}
+
+static inline bool flash_dma_buf_ok(const void *buf)
+{
+#ifndef __UBOOT__
+ return buf && !is_vmalloc_addr(buf) &&
+ likely(IS_ALIGNED((uintptr_t)buf, 4));
+#else
+ return buf && likely(IS_ALIGNED((uintptr_t)buf, 4));
+#endif /* __UBOOT__ */
+}
+
+static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
+ u32 val)
+{
+ brcmnand_writel(val, ctrl->flash_dma_base + offs);
+}
+
+static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
+{
+ return brcmnand_readl(ctrl->flash_dma_base + offs);
+}
+
+/* Low-level operation types: command, address, write, or read */
+enum brcmnand_llop_type {
+ LL_OP_CMD,
+ LL_OP_ADDR,
+ LL_OP_WR,
+ LL_OP_RD,
+};
+
+/***********************************************************************
+ * Internal support functions
+ ***********************************************************************/
+
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+ struct brcmnand_cfg *cfg)
+{
+ if (ctrl->nand_version <= 0x0701)
+ return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15;
+ else
+ return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+ cfg->ecc_level == 15) ||
+ (cfg->spare_area_size == 28 && cfg->ecc_level == 16));
+}
+
+/*
+ * Returns a nand_ecclayout strucutre for the given layout/configuration.
+ * Returns NULL on failure.
+ */
+static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
+ struct brcmnand_host *host)
+{
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int i, j;
+ struct nand_ecclayout *layout;
+ int req;
+ int sectors;
+ int sas;
+ int idx1, idx2;
+
+#ifndef __UBOOT__
+ layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
+#else
+ layout = devm_kzalloc(host->pdev, sizeof(*layout), GFP_KERNEL);
+#endif
+ if (!layout)
+ return NULL;
+
+ sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+ sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+ /* Hamming */
+ if (is_hamming_ecc(host->ctrl, cfg)) {
+ for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
+ /* First sector of each page may have BBI */
+ if (i == 0) {
+ layout->oobfree[idx2].offset = i * sas + 1;
+ /* Small-page NAND use byte 6 for BBI */
+ if (cfg->page_size == 512)
+ layout->oobfree[idx2].offset--;
+ layout->oobfree[idx2].length = 5;
+ } else {
+ layout->oobfree[idx2].offset = i * sas;
+ layout->oobfree[idx2].length = 6;
+ }
+ idx2++;
+ layout->eccpos[idx1++] = i * sas + 6;
+ layout->eccpos[idx1++] = i * sas + 7;
+ layout->eccpos[idx1++] = i * sas + 8;
+ layout->oobfree[idx2].offset = i * sas + 9;
+ layout->oobfree[idx2].length = 7;
+ idx2++;
+ /* Leave zero-terminated entry for OOBFREE */
+ if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
+ idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
+ break;
+ }
+
+ return layout;
+ }
+
+ /*
+ * CONTROLLER_VERSION:
+ * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
+ * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
+ * But we will just be conservative.
+ */
+ req = DIV_ROUND_UP(ecc_level * 14, 8);
+ if (req >= sas) {
+ dev_err(host->pdev,
+ "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
+ req, sas);
+ return NULL;
+ }
+
+ layout->eccbytes = req * sectors;
+ for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
+ for (j = sas - req; j < sas && idx1 <
+ MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
+ layout->eccpos[idx1] = i * sas + j;
+
+ /* First sector of each page may have BBI */
+ if (i == 0) {
+ if (cfg->page_size == 512 && (sas - req >= 6)) {
+ /* Small-page NAND use byte 6 for BBI */
+ layout->oobfree[idx2].offset = 0;
+ layout->oobfree[idx2].length = 5;
+ idx2++;
+ if (sas - req > 6) {
+ layout->oobfree[idx2].offset = 6;
+ layout->oobfree[idx2].length =
+ sas - req - 6;
+ idx2++;
+ }
+ } else if (sas > req + 1) {
+ layout->oobfree[idx2].offset = i * sas + 1;
+ layout->oobfree[idx2].length = sas - req - 1;
+ idx2++;
+ }
+ } else if (sas > req) {
+ layout->oobfree[idx2].offset = i * sas;
+ layout->oobfree[idx2].length = sas - req;
+ idx2++;
+ }
+ /* Leave zero-terminated entry for OOBFREE */
+ if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
+ idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
+ break;
+ }
+
+ return layout;
+}
+
+static struct nand_ecclayout *brcmstb_choose_ecc_layout(
+ struct brcmnand_host *host)
+{
+ struct nand_ecclayout *layout;
+ struct brcmnand_cfg *p = &host->hwcfg;
+ unsigned int ecc_level = p->ecc_level;
+
+ if (p->sector_size_1k)
+ ecc_level <<= 1;
+
+ layout = brcmnand_create_layout(ecc_level, host);
+ if (!layout) {
+ dev_err(host->pdev,
+ "no proper ecc_layout for this NAND cfg\n");
+ return NULL;
+ }
+
+ return layout;
+}
+
+static void brcmnand_wp(struct mtd_info *mtd, int wp)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+ if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
+ static int old_wp = -1;
+ int ret;
+
+ if (old_wp != wp) {
+ dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+ old_wp = wp;
+ }
+
+ /*
+ * make sure ctrl/flash ready before and after
+ * changing state of #WP pin
+ */
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
+ NAND_STATUS_READY,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY, 0);
+ if (ret)
+ return;
+
+ brcmnand_set_wp(ctrl, wp);
+ nand_status_op(chip, NULL);
+ /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
+ ret = bcmnand_ctrl_poll_status(ctrl,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ NAND_STATUS_WP,
+ NAND_CTRL_RDY |
+ NAND_STATUS_READY |
+ (wp ? 0 : NAND_STATUS_WP), 0);
+ if (ret)
+ dev_err(host->pdev, "nand #WP expected %s\n",
+ wp ? "on" : "off");
+ }
+}
+
+/* Helper functions for reading and writing OOB registers */
+static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return 0x77;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
+}
+
+static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
+ u32 data)
+{
+ u16 offset0, offset10, reg_offs;
+
+ offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
+ offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
+
+ if (offs >= ctrl->max_oob)
+ return;
+
+ if (offs >= 16 && offset10)
+ reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+ else
+ reg_offs = offset0 + (offs & ~0x03);
+
+ nand_writereg(ctrl, reg_offs, data);
+}
+
+/*
+ * read_oob_from_regs - read data from OOB registers
+ * @ctrl: NAND controller
+ * @i: sub-page sector index
+ * @oob: buffer to read to
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
+ int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ for (j = 0; j < tbytes; j++)
+ oob[j] = oob_reg_read(ctrl, j);
+ return tbytes;
+}
+
+/*
+ * write_oob_to_regs - write data to OOB registers
+ * @i: sub-page sector index
+ * @oob: buffer to write from
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+ const u8 *oob, int sas, int sector_1k)
+{
+ int tbytes = sas << sector_1k;
+ int j;
+
+ /* Adjust OOB values for 1K sector size */
+ if (sector_1k && (i & 0x01))
+ tbytes = max(0, tbytes - (int)ctrl->max_oob);
+ tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+ for (j = 0; j < tbytes; j += 4)
+ oob_reg_write(ctrl, j,
+ (oob[j + 0] << 24) |
+ (oob[j + 1] << 16) |
+ (oob[j + 2] << 8) |
+ (oob[j + 3] << 0));
+ return tbytes;
+}
+
+#ifndef __UBOOT__
+static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ /* Discard all NAND_CTLRDY interrupts during DMA */
+ if (ctrl->dma_pending)
+ return IRQ_HANDLED;
+
+ complete(&ctrl->done);
+ return IRQ_HANDLED;
+}
+
+/* Handle SoC-specific interrupt hardware */
+static irqreturn_t brcmnand_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ if (ctrl->soc->ctlrdy_ack(ctrl->soc))
+ return brcmnand_ctlrdy_irq(irq, data);
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t brcmnand_dma_irq(int irq, void *data)
+{
+ struct brcmnand_controller *ctrl = data;
+
+ complete(&ctrl->dma_done);
+
+ return IRQ_HANDLED;
+}
+#endif /* __UBOOT__ */
+
+static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int ret;
+
+ dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
+ brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
+ BUG_ON(ctrl->cmd_pending != 0);
+ ctrl->cmd_pending = cmd;
+
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
+ WARN_ON(ret);
+
+ mb(); /* flush previous writes */
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
+ cmd << brcmnand_cmd_shift(ctrl));
+}
+
+/***********************************************************************
+ * NAND MTD API: read/program/erase
+ ***********************************************************************/
+
+static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ /* intentionally left blank */
+}
+
+static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+
+#ifndef __UBOOT__
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
+ if (ctrl->cmd_pending &&
+ wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
+ u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
+ >> brcmnand_cmd_shift(ctrl);
+
+ dev_err_ratelimited(ctrl->dev,
+ "timeout waiting for command %#02x\n", cmd);
+ dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
+ brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ }
+#else
+ unsigned long timeo = 100; /* 100 msec */
+ int ret;
+
+ dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
+
+ ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, timeo);
+ WARN_ON(ret);
+#endif /* __UBOOT__ */
+
+ ctrl->cmd_pending = 0;
+ return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+}
+
+enum {
+ LLOP_RE = BIT(16),
+ LLOP_WE = BIT(17),
+ LLOP_ALE = BIT(18),
+ LLOP_CLE = BIT(19),
+ LLOP_RETURN_IDLE = BIT(31),
+
+ LLOP_DATA_MASK = GENMASK(15, 0),
+};
+
+static int brcmnand_low_level_op(struct brcmnand_host *host,
+ enum brcmnand_llop_type type, u32 data,
+ bool last_op)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_chip *chip = &host->chip;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u32 tmp;
+
+ tmp = data & LLOP_DATA_MASK;
+ switch (type) {
+ case LL_OP_CMD:
+ tmp |= LLOP_WE | LLOP_CLE;
+ break;
+ case LL_OP_ADDR:
+ /* WE | ALE */
+ tmp |= LLOP_WE | LLOP_ALE;
+ break;
+ case LL_OP_WR:
+ /* WE */
+ tmp |= LLOP_WE;
+ break;
+ case LL_OP_RD:
+ /* RE */
+ tmp |= LLOP_RE;
+ break;
+ }
+ if (last_op)
+ /* RETURN_IDLE */
+ tmp |= LLOP_RETURN_IDLE;
+
+ dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
+
+ brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
+ return brcmnand_waitfunc(mtd, chip);
+}
+
+static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 addr = (u64)page_addr << chip->page_shift;
+ int native_cmd = 0;
+
+ if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
+ command == NAND_CMD_RNDOUT)
+ addr = (u64)column;
+ /* Avoid propagating a negative, don't-care address */
+ else if (page_addr < 0)
+ addr = 0;
+
+ dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
+ (unsigned long long)addr);
+
+ host->last_cmd = command;
+ host->last_byte = 0;
+ host->last_addr = addr;
+
+ switch (command) {
+ case NAND_CMD_RESET:
+ native_cmd = CMD_FLASH_RESET;
+ break;
+ case NAND_CMD_STATUS:
+ native_cmd = CMD_STATUS_READ;
+ break;
+ case NAND_CMD_READID:
+ native_cmd = CMD_DEVICE_ID_READ;
+ break;
+ case NAND_CMD_READOOB:
+ native_cmd = CMD_SPARE_AREA_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ native_cmd = CMD_BLOCK_ERASE;
+ brcmnand_wp(mtd, 0);
+ break;
+ case NAND_CMD_PARAM:
+ native_cmd = CMD_PARAMETER_READ;
+ break;
+ case NAND_CMD_SET_FEATURES:
+ case NAND_CMD_GET_FEATURES:
+ brcmnand_low_level_op(host, LL_OP_CMD, command, false);
+ brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
+ break;
+ case NAND_CMD_RNDOUT:
+ native_cmd = CMD_PARAMETER_CHANGE_COL;
+ addr &= ~((u64)(FC_BYTES - 1));
+ /*
+ * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
+ * NB: hwcfg.sector_size_1k may not be initialized yet
+ */
+ if (brcmnand_get_sector_size_1k(host)) {
+ host->hwcfg.sector_size_1k =
+ brcmnand_get_sector_size_1k(host);
+ brcmnand_set_sector_size_1k(host, 0);
+ }
+ break;
+ }
+
+ if (!native_cmd)
+ return;
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+ brcmnand_send_cmd(host, native_cmd);
+ brcmnand_waitfunc(mtd, chip);
+
+ if (native_cmd == CMD_PARAMETER_READ ||
+ native_cmd == CMD_PARAMETER_CHANGE_COL) {
+ /* Copy flash cache word-wise */
+ u32 *flash_cache = (u32 *)ctrl->flash_cache;
+ int i;
+
+ brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+ /*
+ * Must cache the FLASH_CACHE now, since changes in
+ * SECTOR_SIZE_1K may invalidate it
+ */
+ for (i = 0; i < FC_WORDS; i++) {
+ u32 fc;
+
+ fc = brcmnand_read_fc(ctrl, i);
+
+ /*
+ * Flash cache is big endian for parameter pages, at
+ * least on STB SoCs
+ */
+ if (ctrl->parameter_page_big_endian)
+ flash_cache[i] = be32_to_cpu(fc);
+ else
+ flash_cache[i] = le32_to_cpu(fc);
+ }
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+
+ /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
+ if (host->hwcfg.sector_size_1k)
+ brcmnand_set_sector_size_1k(host,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* Re-enable protection is necessary only after erase */
+ if (command == NAND_CMD_ERASE1)
+ brcmnand_wp(mtd, 1);
+}
+
+static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ uint8_t ret = 0;
+ int addr, offs;
+
+ switch (host->last_cmd) {
+ case NAND_CMD_READID:
+ if (host->last_byte < 4)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
+ (24 - (host->last_byte << 3));
+ else if (host->last_byte < 8)
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
+ (56 - (host->last_byte << 3));
+ break;
+
+ case NAND_CMD_READOOB:
+ ret = oob_reg_read(ctrl, host->last_byte);
+ break;
+
+ case NAND_CMD_STATUS:
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_FLASH_STATUS;
+ if (wp_on) /* hide WP status */
+ ret |= NAND_STATUS_WP;
+ break;
+
+ case NAND_CMD_PARAM:
+ case NAND_CMD_RNDOUT:
+ addr = host->last_addr + host->last_byte;
+ offs = addr & (FC_BYTES - 1);
+
+ /* At FC_BYTES boundary, switch to next column */
+ if (host->last_byte > 0 && offs == 0)
+ nand_change_read_column_op(chip, addr, NULL, 0, false);
+
+ ret = ctrl->flash_cache[offs];
+ break;
+ case NAND_CMD_GET_FEATURES:
+ if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
+ ret = 0;
+ } else {
+ bool last = host->last_byte ==
+ ONFI_SUBFEATURE_PARAM_LEN - 1;
+ brcmnand_low_level_op(host, LL_OP_RD, 0, last);
+ ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
+ }
+ }
+
+ dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
+ host->last_byte++;
+
+ return ret;
+}
+
+static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++, buf++)
+ *buf = brcmnand_read_byte(mtd);
+}
+
+static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+
+ switch (host->last_cmd) {
+ case NAND_CMD_SET_FEATURES:
+ for (i = 0; i < len; i++)
+ brcmnand_low_level_op(host, LL_OP_WR, buf[i],
+ (i + 1) == len);
+ break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+/**
+ * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
+ * following ahead of time:
+ * - Is this descriptor the beginning or end of a linked list?
+ * - What is the (DMA) address of the next descriptor in the linked list?
+ */
+#ifndef __UBOOT__
+static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
+ struct brcm_nand_dma_desc *desc, u64 addr,
+ dma_addr_t buf, u32 len, u8 dma_cmd,
+ bool begin, bool end,
+ dma_addr_t next_desc)
+{
+ memset(desc, 0, sizeof(*desc));
+ /* Descriptors are written in native byte order (wordwise) */
+ desc->next_desc = lower_32_bits(next_desc);
+ desc->next_desc_ext = upper_32_bits(next_desc);
+ desc->cmd_irq = (dma_cmd << 24) |
+ (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
+ (!!begin) | ((!!end) << 1); /* head, tail */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ desc->cmd_irq |= 0x01 << 12;
+#endif
+ desc->dram_addr = lower_32_bits(buf);
+ desc->dram_addr_ext = upper_32_bits(buf);
+ desc->tfr_len = len;
+ desc->total_len = len;
+ desc->flash_addr = lower_32_bits(addr);
+ desc->flash_addr_ext = upper_32_bits(addr);
+ desc->cs = host->cs;
+ desc->status_valid = 0x01;
+ return 0;
+}
+
+/**
+ * Kick the FLASH_DMA engine, with a given DMA descriptor
+ */
+static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned long timeo = msecs_to_jiffies(100);
+
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
+ flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
+ (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+
+ /* Start FLASH_DMA engine */
+ ctrl->dma_pending = true;
+ mb(); /* flush previous writes */
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
+
+ if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
+ dev_err(ctrl->dev,
+ "timeout waiting for DMA; status %#x, error status %#x\n",
+ flash_dma_readl(ctrl, FLASH_DMA_STATUS),
+ flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
+ }
+ ctrl->dma_pending = false;
+ flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
+}
+
+static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+ u32 len, u8 dma_cmd)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ dma_addr_t buf_pa;
+ int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
+ if (dma_mapping_error(ctrl->dev, buf_pa)) {
+ dev_err(ctrl->dev, "unable to map buffer for DMA\n");
+ return -ENOMEM;
+ }
+
+ brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
+ dma_cmd, true, true, 0);
+
+ brcmnand_dma_run(host, ctrl->dma_pa);
+
+ dma_unmap_single(ctrl->dev, buf_pa, len, dir);
+
+ if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
+ return -EBADMSG;
+ else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
+ return -EUCLEAN;
+
+ return 0;
+}
+#endif /* __UBOOT__ */
+
+/*
+ * Assumes proper CS is already set
+ */
+static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf,
+ u8 *oob, u64 *err_addr)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ int i, j, ret = 0;
+
+ /* Clear error addresses */
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+ /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
+ brcmnand_send_cmd(host, CMD_PAGE_READ);
+ brcmnand_waitfunc(mtd, chip);
+
+ if (likely(buf)) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ *buf = brcmnand_read_fc(ctrl, j);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+ }
+
+ if (oob)
+ oob += read_oob_from_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+
+ if (!ret) {
+ *err_addr = brcmnand_read_reg(ctrl,
+ BRCMNAND_UNCORR_ADDR) |
+ ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_UNCORR_EXT_ADDR)
+ & 0xffff) << 32);
+ if (*err_addr)
+ ret = -EBADMSG;
+ }
+
+ if (!ret) {
+ *err_addr = brcmnand_read_reg(ctrl,
+ BRCMNAND_CORR_ADDR) |
+ ((u64)(brcmnand_read_reg(ctrl,
+ BRCMNAND_CORR_EXT_ADDR)
+ & 0xffff) << 32);
+ if (*err_addr)
+ ret = -EUCLEAN;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+ struct nand_chip *chip, void *buf, u64 addr)
+{
+ int i, sas;
+ void *oob = chip->oob_poi;
+ int bitflips = 0;
+ int page = addr >> chip->page_shift;
+ int ret;
+
+ if (!buf) {
+#ifndef __UBOOT__
+ buf = chip->data_buf;
+#else
+ buf = chip->buffers->databuf;
+#endif
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
+
+ sas = mtd->oobsize / chip->ecc.steps;
+
+ /* read without ecc for verification */
+ ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+ ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
+ oob, sas, NULL, 0,
+ chip->ecc.strength);
+ if (ret < 0)
+ return ret;
+
+ bitflips = max(bitflips, ret);
+ }
+
+ return bitflips;
+}
+
+static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, unsigned int trans, u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u64 err_addr = 0;
+ int err;
+ bool retry = true;
+
+ dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+
+try_dmaread:
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
+
+#ifndef __UBOOT__
+ if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
+ CMD_PAGE_READ);
+ if (err) {
+ if (mtd_is_bitflip_or_eccerr(err))
+ err_addr = addr;
+ else
+ return -EIO;
+ }
+ } else {
+ if (oob)
+ memset(oob, 0x99, mtd->oobsize);
+
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+ }
+#else
+ if (oob)
+ memset(oob, 0x99, mtd->oobsize);
+
+ err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+ oob, &err_addr);
+#endif /* __UBOOT__ */
+
+ if (mtd_is_eccerr(err)) {
+ /*
+ * On controller version and 7.0, 7.1 , DMA read after a
+ * prior PIO read that reported uncorrectable error,
+ * the DMA engine captures this error following DMA read
+ * cleared only on subsequent DMA read, so just retry once
+ * to clear a possible false error reported for current DMA
+ * read
+ */
+ if ((ctrl->nand_version == 0x0700) ||
+ (ctrl->nand_version == 0x0701)) {
+ if (retry) {
+ retry = false;
+ goto try_dmaread;
+ }
+ }
+
+ /*
+ * Controller version 7.2 has hw encoder to detect erased page
+ * bitflips, apply sw verification for older controllers only
+ */
+ if (ctrl->nand_version < 0x0702) {
+ err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+ addr);
+ /* erased page bitflips corrected */
+ if (err >= 0)
+ return err;
+ }
+
+ dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.failed++;
+ /* NAND layer expects zero on ECC errors */
+ return 0;
+ }
+
+ if (mtd_is_bitflip(err)) {
+ unsigned int corrected = brcmnand_count_corrected(ctrl);
+
+ dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.corrected += corrected;
+ /* Always exceed the software-imposed threshold */
+ return max(mtd->bitflip_threshold, corrected);
+ }
+
+ return 0;
+}
+
+static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ return brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+}
+
+static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+ int ret;
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_read(mtd, chip, host->last_addr,
+ mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+ return ret;
+}
+
+static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+}
+
+static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+ mtd->writesize >> FC_SHIFT,
+ NULL, (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+ return 0;
+}
+
+static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+ u64 addr, const u32 *buf, u8 *oob)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_controller *ctrl = host->ctrl;
+ unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
+ int status, ret = 0;
+
+ dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
+
+ if (unlikely((unsigned long)buf & 0x03)) {
+ dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
+ buf = (u32 *)((unsigned long)buf & ~0x03);
+ }
+
+ brcmnand_wp(mtd, 0);
+
+ for (i = 0; i < ctrl->max_oob; i += 4)
+ oob_reg_write(ctrl, i, 0xffffffff);
+
+#ifndef __UBOOT__
+ if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+ if (brcmnand_dma_trans(host, addr, (u32 *)buf,
+ mtd->writesize, CMD_PROGRAM_PAGE))
+ ret = -EIO;
+ goto out;
+ }
+#endif /* __UBOOT__ */
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+ (host->cs << 16) | ((addr >> 32) & 0xffff));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+
+ for (i = 0; i < trans; i++, addr += FC_BYTES) {
+ /* full address MUST be set before populating FC */
+ brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+ lower_32_bits(addr));
+ (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+ if (buf) {
+ brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+ for (j = 0; j < FC_WORDS; j++, buf++)
+ brcmnand_write_fc(ctrl, j, *buf);
+
+ brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+ } else if (oob) {
+ for (j = 0; j < FC_WORDS; j++)
+ brcmnand_write_fc(ctrl, j, 0xffffffff);
+ }
+
+ if (oob) {
+ oob += write_oob_to_regs(ctrl, i, oob,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+ }
+
+ /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
+ brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+ status = brcmnand_waitfunc(mtd, chip);
+
+ if (status & NAND_STATUS_FAIL) {
+ dev_info(ctrl->dev, "program failed at %llx\n",
+ (unsigned long long)addr);
+ ret = -EIO;
+ goto out;
+ }
+ }
+out:
+ brcmnand_wp(mtd, 1);
+ return ret;
+}
+
+static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int brcmnand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ void *oob = oob_required ? chip->oob_poi : NULL;
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ brcmnand_set_ecc_enabled(host, 0);
+ brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return nand_prog_page_end_op(chip);
+}
+
+static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
+ NULL, chip->oob_poi);
+}
+
+static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ int ret;
+
+ brcmnand_set_ecc_enabled(host, 0);
+ ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
+ (u8 *)chip->oob_poi);
+ brcmnand_set_ecc_enabled(host, 1);
+
+ return ret;
+}
+
+/***********************************************************************
+ * Per-CS setup (1 NAND device)
+ ***********************************************************************/
+
+static int brcmnand_set_cfg(struct brcmnand_host *host,
+ struct brcmnand_cfg *cfg)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct nand_chip *chip = &host->chip;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u8 block_size = 0, page_size = 0, device_size = 0;
+ u32 tmp;
+
+ if (ctrl->block_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
+ if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
+ block_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ return -EINVAL;
+ }
+ } else {
+ block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
+ }
+
+ if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
+ cfg->block_size > ctrl->max_block_size)) {
+ dev_warn(ctrl->dev, "invalid block size %u\n",
+ cfg->block_size);
+ block_size = 0;
+ }
+
+ if (ctrl->page_sizes) {
+ int i, found;
+
+ for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
+ if (ctrl->page_sizes[i] == cfg->page_size) {
+ page_size = i;
+ found = 1;
+ }
+ if (!found) {
+ dev_warn(ctrl->dev, "invalid page size %u\n",
+ cfg->page_size);
+ return -EINVAL;
+ }
+ } else {
+ page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
+ }
+
+ if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
+ cfg->page_size > ctrl->max_page_size)) {
+ dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
+ return -EINVAL;
+ }
+
+ if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
+ dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
+ (unsigned long long)cfg->device_size);
+ return -EINVAL;
+ }
+ device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
+
+ tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
+ (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
+ (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
+ (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
+ (device_size << CFG_DEVICE_SIZE_SHIFT);
+ if (cfg_offs == cfg_ext_offs) {
+ tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
+ (block_size << CFG_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_offs, tmp);
+ } else {
+ nand_writereg(ctrl, cfg_offs, tmp);
+ tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
+ (block_size << CFG_EXT_BLK_SIZE_SHIFT);
+ nand_writereg(ctrl, cfg_ext_offs, tmp);
+ }
+
+ tmp = nand_readreg(ctrl, acc_control_offs);
+ tmp &= ~brcmnand_ecc_level_mask(ctrl);
+ tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp &= ~brcmnand_spare_area_mask(ctrl);
+ tmp |= cfg->spare_area_size;
+ nand_writereg(ctrl, acc_control_offs, tmp);
+
+ brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
+
+ /* threshold = ceil(BCH-level * 0.75) */
+ brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
+
+ return 0;
+}
+
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+ char *buf, struct brcmnand_cfg *cfg)
+{
+ buf += sprintf(buf,
+ "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
+ (unsigned long long)cfg->device_size >> 20,
+ cfg->block_size >> 10,
+ cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
+ cfg->page_size >= 1024 ? "KiB" : "B",
+ cfg->spare_area_size, cfg->device_width);
+
+ /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
+ if (is_hamming_ecc(host->ctrl, cfg))
+ sprintf(buf, ", Hamming ECC");
+ else if (cfg->sector_size_1k)
+ sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
+ else
+ sprintf(buf, ", BCH-%u", cfg->ecc_level);
+}
+
+/*
+ * Minimum number of bytes to address a page. Calculated as:
+ * roundup(log2(size / page-size) / 8)
+ *
+ * NB: the following does not "round up" for non-power-of-2 'size'; but this is
+ * OK because many other things will break if 'size' is irregular...
+ */
+static inline int get_blk_adr_bytes(u64 size, u32 writesize)
+{
+ return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
+}
+
+static int brcmnand_setup_dev(struct brcmnand_host *host)
+{
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_chip *chip = &host->chip;
+ struct brcmnand_controller *ctrl = host->ctrl;
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ char msg[128];
+ u32 offs, tmp, oob_sector;
+ int ret;
+
+ memset(cfg, 0, sizeof(*cfg));
+
+#ifndef __UBOOT__
+ ret = of_property_read_u32(nand_get_flash_node(chip),
+ "brcm,nand-oob-sector-size",
+ &oob_sector);
+#else
+ ret = ofnode_read_u32(nand_get_flash_node(chip),
+ "brcm,nand-oob-sector-size",
+ &oob_sector);
+#endif /* __UBOOT__ */
+ if (ret) {
+ /* Use detected size */
+ cfg->spare_area_size = mtd->oobsize /
+ (mtd->writesize >> FC_SHIFT);
+ } else {
+ cfg->spare_area_size = oob_sector;
+ }
+ if (cfg->spare_area_size > ctrl->max_oob)
+ cfg->spare_area_size = ctrl->max_oob;
+ /*
+ * Set oobsize to be consistent with controller's spare_area_size, as
+ * the rest is inaccessible.
+ */
+ mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+
+ cfg->device_size = mtd->size;
+ cfg->block_size = mtd->erasesize;
+ cfg->page_size = mtd->writesize;
+ cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
+ cfg->col_adr_bytes = 2;
+ cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
+
+ if (chip->ecc.mode != NAND_ECC_HW) {
+ dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
+ chip->ecc.mode);
+ return -EINVAL;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+ if (chip->ecc.strength == 1 && chip->ecc.size == 512)
+ /* Default to Hamming for 1-bit ECC, if unspecified */
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ else
+ /* Otherwise, BCH */
+ chip->ecc.algo = NAND_ECC_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
+ chip->ecc.size != 512)) {
+ dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
+ chip->ecc.strength, chip->ecc.size);
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.size) {
+ case 512:
+ if (chip->ecc.algo == NAND_ECC_HAMMING)
+ cfg->ecc_level = 15;
+ else
+ cfg->ecc_level = chip->ecc.strength;
+ cfg->sector_size_1k = 0;
+ break;
+ case 1024:
+ if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
+ dev_err(ctrl->dev, "1KB sectors not supported\n");
+ return -EINVAL;
+ }
+ if (chip->ecc.strength & 0x1) {
+ dev_err(ctrl->dev,
+ "odd ECC not supported with 1KB sectors\n");
+ return -EINVAL;
+ }
+
+ cfg->ecc_level = chip->ecc.strength >> 1;
+ cfg->sector_size_1k = 1;
+ break;
+ default:
+ dev_err(ctrl->dev, "unsupported ECC size: %d\n",
+ chip->ecc.size);
+ return -EINVAL;
+ }
+
+ cfg->ful_adr_bytes = cfg->blk_adr_bytes;
+ if (mtd->writesize > 512)
+ cfg->ful_adr_bytes += cfg->col_adr_bytes;
+ else
+ cfg->ful_adr_bytes += 1;
+
+ ret = brcmnand_set_cfg(host, cfg);
+ if (ret)
+ return ret;
+
+ brcmnand_set_ecc_enabled(host, 1);
+
+ brcmnand_print_cfg(host, msg, cfg);
+ dev_info(ctrl->dev, "detected %s\n", msg);
+
+ /* Configure ACC_CONTROL */
+ offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+ tmp = nand_readreg(ctrl, offs);
+ tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
+ tmp &= ~ACC_CONTROL_RD_ERASED;
+
+ /* We need to turn on Read from erased paged protected by ECC */
+ if (ctrl->nand_version >= 0x0702)
+ tmp |= ACC_CONTROL_RD_ERASED;
+ tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
+ if (ctrl->features & BRCMNAND_HAS_PREFETCH)
+ tmp &= ~ACC_CONTROL_PREFETCH;
+
+ nand_writereg(ctrl, offs, tmp);
+
+ return 0;
+}
+
+#ifndef __UBOOT__
+static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
+#else
+static int brcmnand_init_cs(struct brcmnand_host *host, ofnode dn)
+#endif
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+#ifndef __UBOOT__
+ struct platform_device *pdev = host->pdev;
+#else
+ struct udevice *pdev = host->pdev;
+#endif /* __UBOOT__ */
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ int ret;
+ u16 cfg_offs;
+
+#ifndef __UBOOT__
+ ret = of_property_read_u32(dn, "reg", &host->cs);
+#else
+ ret = ofnode_read_s32(dn, "reg", &host->cs);
+#endif
+ if (ret) {
+ dev_err(pdev, "can't get chip-select\n");
+ return -ENXIO;
+ }
+
+ mtd = nand_to_mtd(&host->chip);
+ chip = &host->chip;
+
+ nand_set_flash_node(chip, dn);
+ nand_set_controller_data(chip, host);
+#ifndef __UBOOT__
+ mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
+ host->cs);
+#else
+ mtd->name = devm_kasprintf(pdev, GFP_KERNEL, "brcmnand.%d",
+ host->cs);
+#endif /* __UBOOT__ */
+ if (!mtd->name)
+ return -ENOMEM;
+
+ mtd->owner = THIS_MODULE;
+#ifndef __UBOOT__
+ mtd->dev.parent = &pdev->dev;
+#else
+ mtd->dev->parent = pdev;
+#endif /* __UBOOT__ */
+
+ chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
+ chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
+
+ chip->cmd_ctrl = brcmnand_cmd_ctrl;
+ chip->cmdfunc = brcmnand_cmdfunc;
+ chip->waitfunc = brcmnand_waitfunc;
+ chip->read_byte = brcmnand_read_byte;
+ chip->read_buf = brcmnand_read_buf;
+ chip->write_buf = brcmnand_write_buf;
+
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.read_page = brcmnand_read_page;
+ chip->ecc.write_page = brcmnand_write_page;
+ chip->ecc.read_page_raw = brcmnand_read_page_raw;
+ chip->ecc.write_page_raw = brcmnand_write_page_raw;
+ chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
+ chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
+ chip->ecc.read_oob = brcmnand_read_oob;
+ chip->ecc.write_oob = brcmnand_write_oob;
+
+ chip->controller = &ctrl->controller;
+
+ /*
+ * The bootloader might have configured 16bit mode but
+ * NAND READID command only works in 8bit mode. We force
+ * 8bit mode here to ensure that NAND READID commands works.
+ */
+ cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ nand_writereg(ctrl, cfg_offs,
+ nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ /*
+ * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+ * to/from, and have nand_base pass us a bounce buffer instead, as
+ * needed.
+ */
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (brcmnand_setup_dev(host))
+ return -ENXIO;
+
+ chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+ /* only use our internal HW threshold */
+ mtd->bitflip_threshold = 1;
+
+ chip->ecc.layout = brcmstb_choose_ecc_layout(host);
+ if (!chip->ecc.layout)
+ return -ENXIO;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
+
+#ifndef __UBOOT__
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret)
+ nand_cleanup(chip);
+#else
+ ret = nand_register(0, mtd);
+#endif /* __UBOOT__ */
+
+ return ret;
+}
+
+#ifndef __UBOOT__
+static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
+ int restore)
+{
+ struct brcmnand_controller *ctrl = host->ctrl;
+ u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+ u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_CFG_EXT);
+ u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+ BRCMNAND_CS_ACC_CONTROL);
+ u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
+ u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
+
+ if (restore) {
+ nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
+ if (cfg_offs != cfg_ext_offs)
+ nand_writereg(ctrl, cfg_ext_offs,
+ host->hwcfg.config_ext);
+ nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
+ nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
+ nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
+ } else {
+ host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
+ if (cfg_offs != cfg_ext_offs)
+ host->hwcfg.config_ext =
+ nand_readreg(ctrl, cfg_ext_offs);
+ host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
+ host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
+ host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
+ }
+}
+
+static int brcmnand_suspend(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ list_for_each_entry(host, &ctrl->host_list, node)
+ brcmnand_save_restore_cs_config(host, 0);
+
+ ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
+ ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
+ ctrl->corr_stat_threshold =
+ brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
+
+ if (has_flash_dma(ctrl))
+ ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+
+ return 0;
+}
+
+static int brcmnand_resume(struct device *dev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+ struct brcmnand_host *host;
+
+ if (has_flash_dma(ctrl)) {
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+ }
+
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
+ brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
+ ctrl->corr_stat_threshold);
+ if (ctrl->soc) {
+ /* Clear/re-enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ }
+
+ list_for_each_entry(host, &ctrl->host_list, node) {
+ struct nand_chip *chip = &host->chip;
+
+ brcmnand_save_restore_cs_config(host, 1);
+
+ /* Reset the chip, required by some chips after power-up */
+ nand_reset_op(chip);
+ }
+
+ return 0;
+}
+
+const struct dev_pm_ops brcmnand_pm_ops = {
+ .suspend = brcmnand_suspend,
+ .resume = brcmnand_resume,
+};
+EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
+
+static const struct of_device_id brcmnand_of_match[] = {
+ { .compatible = "brcm,brcmnand-v4.0" },
+ { .compatible = "brcm,brcmnand-v5.0" },
+ { .compatible = "brcm,brcmnand-v6.0" },
+ { .compatible = "brcm,brcmnand-v6.1" },
+ { .compatible = "brcm,brcmnand-v6.2" },
+ { .compatible = "brcm,brcmnand-v7.0" },
+ { .compatible = "brcm,brcmnand-v7.1" },
+ { .compatible = "brcm,brcmnand-v7.2" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcmnand_of_match);
+#endif /* __UBOOT__ */
+
+/***********************************************************************
+ * Platform driver setup (per controller)
+ ***********************************************************************/
+
+#ifndef __UBOOT__
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
+#else
+int brcmnand_probe(struct udevice *dev, struct brcmnand_soc *soc)
+#endif /* __UBOOT__ */
+{
+#ifndef __UBOOT__
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node, *child;
+#else
+ ofnode child;
+ struct udevice *pdev = dev;
+#endif /* __UBOOT__ */
+ struct brcmnand_controller *ctrl;
+#ifndef __UBOOT__
+ struct resource *res;
+#else
+ struct resource res;
+#endif /* __UBOOT__ */
+ int ret;
+
+#ifndef __UBOOT__
+ /* We only support device-tree instantiation */
+ if (!dn)
+ return -ENODEV;
+
+ if (!of_match_node(brcmnand_of_match, dn))
+ return -ENODEV;
+#endif /* __UBOOT__ */
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+#ifndef __UBOOT__
+ dev_set_drvdata(dev, ctrl);
+#else
+ /*
+ * in u-boot, the data for the driver is allocated before probing
+ * so to keep the reference to ctrl, we store it in the variable soc
+ */
+ soc->ctrl = ctrl;
+#endif /* __UBOOT__ */
+ ctrl->dev = dev;
+
+ init_completion(&ctrl->done);
+ init_completion(&ctrl->dma_done);
+ nand_hw_control_init(&ctrl->controller);
+ INIT_LIST_HEAD(&ctrl->host_list);
+
+ /* Is parameter page in big endian ? */
+ ctrl->parameter_page_big_endian =
+ dev_read_u32_default(dev, "parameter-page-big-endian", 1);
+
+ /* NAND register range */
+#ifndef __UBOOT__
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctrl->nand_base = devm_ioremap_resource(dev, res);
+#else
+ dev_read_resource(pdev, 0, &res);
+ ctrl->nand_base = devm_ioremap(pdev, res.start, resource_size(&res));
+#endif
+ if (IS_ERR(ctrl->nand_base))
+ return PTR_ERR(ctrl->nand_base);
+
+ /* Enable clock before using NAND registers */
+ ctrl->clk = devm_clk_get(dev, "nand");
+ if (!IS_ERR(ctrl->clk)) {
+ ret = clk_prepare_enable(ctrl->clk);
+ if (ret)
+ return ret;
+ } else {
+ /* Ignore PTR_ERR(ctrl->clk) */
+ ctrl->clk = NULL;
+ }
+
+ /* Initialize NAND revision */
+ ret = brcmnand_revision_init(ctrl);
+ if (ret)
+ goto err;
+
+ /*
+ * Most chips have this cache at a fixed offset within 'nand' block.
+ * Some must specify this region separately.
+ */
+#ifndef __UBOOT__
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
+ if (res) {
+ ctrl->nand_fc = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->nand_fc)) {
+ ret = PTR_ERR(ctrl->nand_fc);
+ goto err;
+ }
+ } else {
+ ctrl->nand_fc = ctrl->nand_base +
+ ctrl->reg_offsets[BRCMNAND_FC_BASE];
+ }
+#else
+ if (!dev_read_resource_byname(pdev, "nand-cache", &res)) {
+ ctrl->nand_fc = devm_ioremap(dev, res.start,
+ resource_size(&res));
+ if (IS_ERR(ctrl->nand_fc)) {
+ ret = PTR_ERR(ctrl->nand_fc);
+ goto err;
+ }
+ } else {
+ ctrl->nand_fc = ctrl->nand_base +
+ ctrl->reg_offsets[BRCMNAND_FC_BASE];
+ }
+#endif
+
+#ifndef __UBOOT__
+ /* FLASH_DMA */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
+ if (res) {
+ ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctrl->flash_dma_base)) {
+ ret = PTR_ERR(ctrl->flash_dma_base);
+ goto err;
+ }
+
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
+ flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+
+ /* Allocate descriptor(s) */
+ ctrl->dma_desc = dmam_alloc_coherent(dev,
+ sizeof(*ctrl->dma_desc),
+ &ctrl->dma_pa, GFP_KERNEL);
+ if (!ctrl->dma_desc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ctrl->dma_irq = platform_get_irq(pdev, 1);
+ if ((int)ctrl->dma_irq < 0) {
+ dev_err(dev, "missing FLASH_DMA IRQ\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = devm_request_irq(dev, ctrl->dma_irq,
+ brcmnand_dma_irq, 0, DRV_NAME,
+ ctrl);
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->dma_irq, ret);
+ goto err;
+ }
+
+ dev_info(dev, "enabling FLASH_DMA\n");
+ }
+#endif /* __UBOOT__ */
+
+ /* Disable automatic device ID config, direct addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
+ CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
+ /* Disable XOR addressing */
+ brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+
+ /* Read the write-protect configuration in the device tree */
+ wp_on = dev_read_u32_default(dev, "write-protect", wp_on);
+
+ if (ctrl->features & BRCMNAND_HAS_WP) {
+ /* Permanently disable write protection */
+ if (wp_on == 2)
+ brcmnand_set_wp(ctrl, false);
+ } else {
+ wp_on = 0;
+ }
+
+#ifndef __UBOOT__
+ /* IRQ */
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if ((int)ctrl->irq < 0) {
+ dev_err(dev, "no IRQ defined\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /*
+ * Some SoCs integrate this controller (e.g., its interrupt bits) in
+ * interesting ways
+ */
+ if (soc) {
+ ctrl->soc = soc;
+
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
+ DRV_NAME, ctrl);
+
+ /* Enable interrupt */
+ ctrl->soc->ctlrdy_ack(ctrl->soc);
+ ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+ } else {
+ /* Use standard interrupt infrastructure */
+ ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
+ DRV_NAME, ctrl);
+ }
+ if (ret < 0) {
+ dev_err(dev, "can't allocate IRQ %d: error %d\n",
+ ctrl->irq, ret);
+ goto err;
+ }
+#endif /* __UBOOT__ */
+
+#ifndef __UBOOT__
+ for_each_available_child_of_node(dn, child) {
+ if (of_device_is_compatible(child, "brcm,nandcs")) {
+ struct brcmnand_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ of_node_put(child);
+ ret = -ENOMEM;
+ goto err;
+ }
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+
+ ret = brcmnand_init_cs(host, child);
+ if (ret) {
+ devm_kfree(dev, host);
+ continue; /* Try all chip-selects */
+ }
+
+ list_add_tail(&host->node, &ctrl->host_list);
+ }
+ }
+#else
+ ofnode_for_each_subnode(child, dev_ofnode(dev)) {
+ if (ofnode_device_is_compatible(child, "brcm,nandcs")) {
+ struct brcmnand_host *host;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ host->pdev = pdev;
+ host->ctrl = ctrl;
+
+ ret = brcmnand_init_cs(host, child);
+ if (ret) {
+ devm_kfree(dev, host);
+ continue; /* Try all chip-selects */
+ }
+
+ list_add_tail(&host->node, &ctrl->host_list);
+ }
+ }
+#endif /* __UBOOT__ */
+
+ /* No chip-selects could initialize properly */
+ if (list_empty(&ctrl->host_list)) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ return 0;
+
+err:
+#ifndef __UBOOT__
+ clk_disable_unprepare(ctrl->clk);
+#else
+ if (ctrl->clk)
+ clk_disable(ctrl->clk);
+#endif /* __UBOOT__ */
+ return ret;
+}
+EXPORT_SYMBOL_GPL(brcmnand_probe);
+
+#ifndef __UBOOT__
+int brcmnand_remove(struct platform_device *pdev)
+{
+ struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+ struct brcmnand_host *host;
+
+ list_for_each_entry(host, &ctrl->host_list, node)
+ nand_release(nand_to_mtd(&host->chip));
+
+ clk_disable_unprepare(ctrl->clk);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+#else
+int brcmnand_remove(struct udevice *pdev)
+{
+ return 0;
+}
+#endif /* __UBOOT__ */
+EXPORT_SYMBOL_GPL(brcmnand_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kevin Cernekee");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom chips");
+MODULE_ALIAS("platform:brcmnand");
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.h b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.h
new file mode 100644
index 000000000..6946a62b0
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __BRCMNAND_H__
+#define __BRCMNAND_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+struct brcmnand_soc {
+ bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
+ void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
+ void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
+ bool is_param);
+ void *ctrl;
+};
+
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
+ bool is_param)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, true, is_param);
+}
+
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
+ bool is_param)
+{
+ if (soc && soc->prepare_data_bus)
+ soc->prepare_data_bus(soc, false, is_param);
+}
+
+static inline u32 brcmnand_readl(void __iomem *addr)
+{
+ /*
+ * MIPS endianness is configured by boot strap, which also reverses all
+ * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+ * endian I/O).
+ *
+ * Other architectures (e.g., ARM) either do not support big endian, or
+ * else leave I/O in little endian mode.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_SYS_BIG_ENDIAN))
+ return __raw_readl(addr);
+ else
+ return readl_relaxed(addr);
+}
+
+static inline void brcmnand_writel(u32 val, void __iomem *addr)
+{
+ /* See brcmnand_readl() comments */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_SYS_BIG_ENDIAN))
+ __raw_writel(val, addr);
+ else
+ writel_relaxed(val, addr);
+}
+
+int brcmnand_probe(struct udevice *dev, struct brcmnand_soc *soc);
+int brcmnand_remove(struct udevice *dev);
+
+#ifndef __UBOOT__
+extern const struct dev_pm_ops brcmnand_pm_ops;
+#endif /* __UBOOT__ */
+
+#endif /* __BRCMNAND_H__ */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.c b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.c
new file mode 100644
index 000000000..a6acf556b
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <common.h>
+#include <dm.h>
+#include <malloc.h>
+#include <dm/devres.h>
+#include "brcmnand_compat.h"
+
+static char *devm_kvasprintf(struct udevice *dev, gfp_t gfp, const char *fmt,
+ va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = devm_kmalloc(dev, len + 1, gfp);
+ if (!p)
+ return NULL;
+
+ vsnprintf(p, len + 1, fmt, ap);
+
+ return p;
+}
+
+char *devm_kasprintf(struct udevice *dev, gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = devm_kvasprintf(dev, gfp, fmt, ap);
+ va_end(ap);
+
+ return p;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.h b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.h
new file mode 100644
index 000000000..52711d497
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/brcmnand/brcmnand_compat.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __BRCMNAND_COMPAT_H
+#define __BRCMNAND_COMPAT_H
+
+struct clk;
+struct udevice;
+
+char *devm_kasprintf(struct udevice *dev, gfp_t gfp, const char *fmt, ...);
+
+#endif /* __BRCMNAND_COMPAT_H */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/cortina_nand.c b/roms/u-boot/drivers/mtd/nand/raw/cortina_nand.c
new file mode 100644
index 000000000..81fa8788a
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/cortina_nand.c
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2020, Cortina Access Inc..
+ */
+
+#include <common.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/sizes.h>
+#include <log.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <dm/device_compat.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/errno.h>
+#include <asm/gpio.h>
+#include <fdtdec.h>
+#include <bouncebuf.h>
+#include <dm.h>
+#include "cortina_nand.h"
+
+static unsigned int *pread, *pwrite;
+
+static const struct udevice_id cortina_nand_dt_ids[] = {
+ {
+ .compatible = "cortina,ca-nand",
+ },
+ { /* sentinel */ }
+};
+
+static struct nand_ecclayout eccoob;
+
+/* Information about an attached NAND chip */
+struct fdt_nand {
+ int enabled; /* 1 to enable, 0 to disable */
+ s32 width; /* bit width, must be 8 */
+ u32 nand_ecc_strength;
+};
+
+struct nand_drv {
+ u32 fifo_index;
+ struct nand_ctlr *reg;
+ struct dma_global *dma_glb;
+ struct dma_ssp *dma_nand;
+ struct tx_descriptor_t *tx_desc;
+ struct rx_descriptor_t *rx_desc;
+ struct fdt_nand config;
+ unsigned int flash_base;
+};
+
+struct ca_nand_info {
+ struct udevice *dev;
+ struct nand_drv nand_ctrl;
+ struct nand_chip nand_chip;
+};
+
+/**
+ * Wait for command completion
+ *
+ * @param reg nand_ctlr structure
+ * @return
+ * 1 - Command completed
+ * 0 - Timeout
+ */
+static int nand_waitfor_cmd_completion(struct nand_ctlr *reg, unsigned int mask)
+{
+ unsigned int reg_v = 0;
+
+ if (readl_poll_timeout(&reg->flash_flash_access_start, reg_v,
+ !(reg_v & mask), (FLASH_LONG_DELAY << 2))) {
+ pr_err("Nand CMD timeout!\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * Read one byte from the chip
+ *
+ * @param mtd MTD device structure
+ * @return data byte
+ *
+ * Read function for 8bit bus-width
+ */
+static uint8_t read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info;
+ u8 ret_v;
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+
+ clrsetbits_le32(&info->reg->flash_flash_access_start, GENMASK(31, 0),
+ NFLASH_GO | NFLASH_RD);
+
+ if (!nand_waitfor_cmd_completion(info->reg, NFLASH_GO))
+ printf("%s: Command timeout\n", __func__);
+
+ ret_v = readl(&info->reg->flash_nf_data) >> (8 * info->fifo_index++);
+ info->fifo_index %= 4;
+
+ return (uint8_t)ret_v;
+}
+
+/**
+ * Read len bytes from the chip into a buffer
+ *
+ * @param mtd MTD device structure
+ * @param buf buffer to store data to
+ * @param len number of bytes to read
+ *
+ * Read function for 8bit bus-width
+ */
+static void read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+ unsigned int reg;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+
+ for (i = 0; i < len; i++) {
+ clrsetbits_le32(&info->reg->flash_flash_access_start,
+ GENMASK(31, 0), NFLASH_GO | NFLASH_RD);
+
+ if (!nand_waitfor_cmd_completion(info->reg, NFLASH_GO))
+ printf("%s: Command timeout\n", __func__);
+
+ reg = readl(&info->reg->flash_nf_data) >>
+ (8 * info->fifo_index++);
+ memcpy(buf + i, &reg, 1);
+ info->fifo_index %= 4;
+ }
+}
+
+/**
+ * Check READY pin status to see if it is ready or not
+ *
+ * @param mtd MTD device structure
+ * @return
+ * 1 - ready
+ * 0 - not ready
+ */
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int reg_val;
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+
+ reg_val = readl(&info->reg->flash_status);
+ if (reg_val & NFLASH_READY)
+ return 1;
+ else
+ return 0;
+}
+
+/* Dummy implementation: we don't support multiple chips */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ switch (chipnr) {
+ case -1:
+ case 0:
+ break;
+
+ default:
+ WARN_ON(chipnr);
+ }
+}
+
+int init_nand_dma(struct nand_chip *nand)
+{
+ int i;
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(nand);
+
+ setbits_le32(&info->dma_glb->dma_glb_dma_lso_ctrl, TX_DMA_ENABLE);
+ setbits_le32(&info->dma_glb->dma_glb_dma_ssp_rx_ctrl,
+ TX_DMA_ENABLE | DMA_CHECK_OWNER);
+ setbits_le32(&info->dma_glb->dma_glb_dma_ssp_tx_ctrl,
+ RX_DMA_ENABLE | DMA_CHECK_OWNER);
+
+ info->tx_desc = malloc_cache_aligned((sizeof(struct tx_descriptor_t) *
+ CA_DMA_DESC_NUM));
+ info->rx_desc = malloc_cache_aligned((sizeof(struct rx_descriptor_t) *
+ CA_DMA_DESC_NUM));
+
+ if (!info->rx_desc && info->tx_desc) {
+ printf("Fail to alloc DMA descript!\n");
+ kfree(info->tx_desc);
+ return -ENOMEM;
+ } else if (info->rx_desc && !info->tx_desc) {
+ printf("Fail to alloc DMA descript!\n");
+ kfree(info->tx_desc);
+ return -ENOMEM;
+ }
+
+ /* set RX DMA base address and depth */
+ clrsetbits_le32(&info->dma_nand->dma_q_rxq_base_depth,
+ GENMASK(31, 4), (uintptr_t)info->rx_desc);
+ clrsetbits_le32(&info->dma_nand->dma_q_rxq_base_depth,
+ GENMASK(3, 0), CA_DMA_DEPTH);
+
+ /* set TX DMA base address and depth */
+ clrsetbits_le32(&info->dma_nand->dma_q_txq_base_depth,
+ GENMASK(31, 4), (uintptr_t)info->tx_desc);
+ clrsetbits_le32(&info->dma_nand->dma_q_txq_base_depth,
+ GENMASK(3, 0), CA_DMA_DEPTH);
+
+ memset((unsigned char *)info->tx_desc, 0,
+ (sizeof(struct tx_descriptor_t) * CA_DMA_DESC_NUM));
+ memset((unsigned char *)info->rx_desc, 0,
+ (sizeof(struct rx_descriptor_t) * CA_DMA_DESC_NUM));
+
+ for (i = 0; i < CA_DMA_DESC_NUM; i++) {
+ /* set owner bit as SW */
+ info->tx_desc[i].own = 1;
+ /* enable Scatter-Gather memory copy */
+ info->tx_desc[i].sgm = 0x1;
+ }
+
+ return 0;
+}
+
+/**
+ * Send command to NAND device
+ *
+ * @param mtd MTD device structure
+ * @param command the command to be sent
+ * @param column the column address for this command, -1 if none
+ * @param page_addr the page address for this command, -1 if none
+ */
+static void ca_nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info;
+ unsigned int reg_v = 0;
+ u32 cmd = 0, cnt = 0, addr1 = 0, addr2 = 0;
+ int ret;
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+ /*
+ * Write out the command to the device.
+ *
+ * Only command NAND_CMD_RESET or NAND_CMD_READID will come
+ * here before mtd->writesize is initialized.
+ */
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ assert(mtd->writesize != 0);
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Reset FIFO before issue new command */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ ECC_RESET_ALL);
+ ret =
+ readl_poll_timeout(&info->reg->flash_nf_ecc_reset, reg_v,
+ !(reg_v & RESET_NFLASH_FIFO), FLASH_SHORT_DELAY);
+ if (ret) {
+ printf("FIFO reset timeout\n");
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ ECC_RESET_ALL);
+ udelay(10);
+ }
+
+ /* Reset FIFO index
+ * Next read start from flash_nf_data[0]
+ */
+ info->fifo_index = 0;
+
+ clrsetbits_le32(&info->reg->flash_nf_access, GENMASK(11, 10),
+ NFLASH_REG_WIDTH_8);
+
+ /*
+ * Program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+ case NAND_CMD_READID:
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_READID);
+ /* 1 byte CMD cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(1, 0),
+ REG_CMD_COUNT_1TOGO);
+ /* 1 byte CMD cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(6, 4),
+ REG_ADDR_COUNT_1);
+ /* Data cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8),
+ REG_DATA_COUNT_DATA_4);
+ /* 0 OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ REG_OOB_COUNT_EMPTY);
+
+ /* addresses */
+ clrsetbits_le32(&info->reg->flash_nf_address_1, GENMASK(31, 0),
+ column & ADDR1_MASK2);
+ clrsetbits_le32(&info->reg->flash_nf_address_2, GENMASK(31, 0),
+ 0);
+
+ /* clear FLASH_NF_ACCESS */
+ clrsetbits_le32(&info->reg->flash_nf_access, GENMASK(31, 0),
+ DISABLE_AUTO_RESET);
+
+ break;
+ case NAND_CMD_PARAM:
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_PARAM);
+ /* 1 byte CMD cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(1, 0),
+ REG_CMD_COUNT_1TOGO);
+ /* 1 byte ADDR cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(6, 4),
+ REG_ADDR_COUNT_1);
+ /* Data cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8),
+ (SZ_4K - 1) << 8);
+ /* 0 OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ REG_OOB_COUNT_EMPTY);
+
+ /* addresses */
+ clrsetbits_le32(&info->reg->flash_nf_address_1, GENMASK(31, 0),
+ column & ADDR1_MASK2);
+ clrsetbits_le32(&info->reg->flash_nf_address_2, GENMASK(31, 0),
+ 0);
+
+ break;
+ case NAND_CMD_READ0:
+ if (chip->chipsize < SZ_32M) {
+ cmd = NAND_CMD_READ0;
+ cnt = REG_CMD_COUNT_1TOGO | REG_ADDR_COUNT_3;
+ addr1 = (((page_addr & ADDR1_MASK0) << 8));
+ addr2 = ((page_addr & ADDR2_MASK0) >> 24);
+ } else if (chip->chipsize >= SZ_32M &&
+ (chip->chipsize <= SZ_128M)) {
+ cmd = NAND_CMD_READ0;
+ cnt = REG_ADDR_COUNT_4;
+ if (mtd->writesize > (REG_DATA_COUNT_512_DATA >> 8)) {
+ cmd |= (NAND_CMD_READSTART << 8);
+ cnt |= REG_CMD_COUNT_2TOGO;
+ } else {
+ cnt |= REG_CMD_COUNT_1TOGO;
+ }
+ addr1 = ((page_addr << 16) | (column & ADDR1_MASK1));
+ addr2 = (page_addr >> 16);
+ } else {
+ cmd = NAND_CMD_READ0 | (NAND_CMD_READSTART << 8);
+ cnt = REG_CMD_COUNT_2TOGO | REG_ADDR_COUNT_5;
+ addr1 = ((page_addr << 16) | (column & ADDR1_MASK1));
+ addr2 = (page_addr >> 16);
+ }
+
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ cmd);
+ /* CMD & ADDR cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(7, 0), cnt);
+ /* Data cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8),
+ (mtd->writesize - 1) << 8);
+ /* OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ (mtd->oobsize - 1) << 22);
+
+ /* addresses */
+ clrsetbits_le32(&info->reg->flash_nf_address_1, GENMASK(31, 0),
+ addr1);
+ clrsetbits_le32(&info->reg->flash_nf_address_2, GENMASK(31, 0),
+ addr2);
+
+ return;
+ case NAND_CMD_SEQIN:
+ if (chip->chipsize < SZ_32M) {
+ cnt = REG_CMD_COUNT_2TOGO | REG_ADDR_COUNT_3;
+ addr1 = (((page_addr & ADDR1_MASK0) << 8));
+ addr2 = ((page_addr & ADDR2_MASK0) >> 24);
+ } else if (chip->chipsize >= SZ_32M &&
+ (chip->chipsize <= SZ_128M)) {
+ cnt = REG_CMD_COUNT_2TOGO | REG_ADDR_COUNT_4;
+ addr1 = ((page_addr << 16) | (column & ADDR1_MASK1));
+ addr2 = (page_addr >> 16);
+ } else {
+ cnt = REG_CMD_COUNT_2TOGO | REG_ADDR_COUNT_5;
+ addr1 = ((page_addr << 16) | (column & ADDR1_MASK1));
+ addr2 = (page_addr >> 16);
+ }
+
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_SEQIN | (NAND_CMD_PAGEPROG << 8));
+ /* CMD cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(7, 0), cnt);
+ /* Data cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8),
+ (mtd->writesize - 1) << 8);
+ /* OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ (mtd->oobsize - 1) << 22);
+
+ /* addresses */
+ clrsetbits_le32(&info->reg->flash_nf_address_1, GENMASK(31, 0),
+ addr1);
+ clrsetbits_le32(&info->reg->flash_nf_address_2, GENMASK(31, 0),
+ addr2);
+
+ return;
+ case NAND_CMD_PAGEPROG:
+ return;
+ case NAND_CMD_ERASE1:
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8));
+ /* 2 byte CMD cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(1, 0),
+ REG_CMD_COUNT_2TOGO);
+ /* 3 byte ADDR cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(6, 4),
+ REG_ADDR_COUNT_3);
+ /* 0 Data cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8),
+ REG_DATA_COUNT_EMPTY);
+ /* 0 OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ REG_OOB_COUNT_EMPTY);
+
+ /* addresses */
+ clrsetbits_le32(&info->reg->flash_nf_address_1, GENMASK(31, 0),
+ page_addr);
+ clrsetbits_le32(&info->reg->flash_nf_address_2, GENMASK(31, 0),
+ 0);
+
+ /* Issue command */
+ clrsetbits_le32(&info->reg->flash_flash_access_start,
+ GENMASK(31, 0), NFLASH_GO | NFLASH_RD);
+ break;
+ case NAND_CMD_ERASE2:
+ return;
+ case NAND_CMD_STATUS:
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_STATUS);
+ /* 1 byte CMD cycle */
+ clrbits_le32(&info->reg->flash_nf_count, GENMASK(1, 0));
+ /* 0 byte Addr cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(6, 4),
+ REG_ADDR_COUNT_EMPTY);
+ /* 1 Data cycle */
+ clrbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8));
+ /* 0 OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ REG_OOB_COUNT_EMPTY);
+
+ break;
+ case NAND_CMD_RESET:
+ /* Command */
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_RESET);
+ /* 1 byte CMD cycle */
+ clrbits_le32(&info->reg->flash_nf_count, GENMASK(1, 0));
+ /* 0 byte Addr cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(6, 4),
+ REG_ADDR_COUNT_EMPTY);
+ /* 0 Data cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(21, 8),
+ REG_DATA_COUNT_EMPTY);
+ /* 0 OOB cycle */
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(31, 22),
+ REG_OOB_COUNT_EMPTY);
+
+ /* addresses */
+ clrsetbits_le32(&info->reg->flash_nf_address_1, GENMASK(31, 0),
+ column & ADDR1_MASK2);
+ clrsetbits_le32(&info->reg->flash_nf_address_2, GENMASK(31, 0),
+ 0);
+
+ /* Issue command */
+ clrsetbits_le32(&info->reg->flash_flash_access_start,
+ GENMASK(31, 0), NFLASH_GO | NFLASH_WT);
+
+ break;
+ case NAND_CMD_RNDOUT:
+ default:
+ printf("%s: Unsupported command %d\n", __func__, command);
+ return;
+ }
+
+ if (!nand_waitfor_cmd_completion(info->reg, NFLASH_GO))
+ printf("Command 0x%02X timeout\n", command);
+}
+
+/**
+ * Set up NAND bus width and page size
+ *
+ * @param info nand_info structure
+ * @return 0 if ok, -1 on error
+ */
+static int set_bus_width_page_size(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+
+ if (info->config.width == SZ_8) {
+ clrsetbits_le32(&info->reg->flash_nf_access, GENMASK(31, 0),
+ NFLASH_REG_WIDTH_8);
+ } else if (info->config.width == SZ_16) {
+ clrsetbits_le32(&info->reg->flash_nf_access, GENMASK(31, 0),
+ NFLASH_REG_WIDTH_16);
+ } else {
+ debug("%s: Unsupported bus width %d\n", __func__,
+ info->config.width);
+ return -1;
+ }
+
+ if (mtd->writesize == SZ_512) {
+ setbits_le32(&info->reg->flash_type, FLASH_TYPE_512);
+ } else if (mtd->writesize == SZ_2K) {
+ setbits_le32(&info->reg->flash_type, FLASH_TYPE_2K);
+ } else if (mtd->writesize == SZ_4K) {
+ setbits_le32(&info->reg->flash_type, FLASH_TYPE_4K);
+ } else if (mtd->writesize == SZ_8K) {
+ setbits_le32(&info->reg->flash_type, FLASH_TYPE_8K);
+ } else {
+ debug("%s: Unsupported page size %d\n", __func__,
+ mtd->writesize);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ca_do_bch_correction(struct nand_chip *chip,
+ unsigned int err_num, u8 *buff_ptr, int i)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ unsigned int reg_v, err_loc0, err_loc1;
+ int k, max_bitflips = 0;
+
+ for (k = 0; k < (err_num + 1) / 2; k++) {
+ reg_v = readl(&info->reg->flash_nf_bch_error_loc01 + k);
+ err_loc0 = reg_v & BCH_ERR_LOC_MASK;
+ err_loc1 = (reg_v >> 16) & BCH_ERR_LOC_MASK;
+
+ if (err_loc0 / 8 < BCH_DATA_UNIT) {
+ printf("pdata[%x]:%x =>", ((i / chip->ecc.bytes) *
+ chip->ecc.size + ((reg_v & 0x1fff) >> 3)),
+ buff_ptr[(reg_v & 0x1fff) >> 3]);
+
+ buff_ptr[err_loc0 / 8] ^=
+ (1 << (reg_v & BCH_CORRECT_LOC_MASK));
+
+ printf("%x\n", buff_ptr[(reg_v & 0x1fff) >> 3]);
+
+ max_bitflips++;
+ }
+
+ if (((k + 1) * 2) <= err_num && ((err_loc1 / 8) <
+ BCH_DATA_UNIT)) {
+ printf("pdata[%x]:%x =>", ((i / chip->ecc.bytes) *
+ chip->ecc.size + (((reg_v >> 16) & 0x1fff) >>
+ 3)), buff_ptr[((reg_v >> 16) & 0x1fff) >> 3]);
+
+ buff_ptr[err_loc1 / 8] ^= (1 << ((reg_v >> 16) &
+ BCH_CORRECT_LOC_MASK));
+
+ printf("%x\n", buff_ptr[((reg_v >> 16) & 0x1fff) >> 3]);
+
+ max_bitflips++;
+ }
+ }
+
+ return max_bitflips;
+}
+
+static int ca_do_bch_decode(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, unsigned int addr)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ unsigned int reg_v, err_num;
+ unsigned char *ecc_code = chip->buffers->ecccode;
+ unsigned char *ecc_end_pos;
+ int ret, i, j, k, n, step, eccsteps, max_bitflips = 0;
+ u8 *buff_ptr = (u8 *)buf;
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccoob.eccpos[i]];
+
+ for (i = 0, eccsteps = chip->ecc.steps; eccsteps;
+ i += chip->ecc.bytes, eccsteps--) {
+ ecc_end_pos = ecc_code + chip->ecc.bytes;
+
+ for (j = 0, k = 0; j < chip->ecc.bytes; j += 4, k++) {
+ reg_v = 0;
+ for (n = 0; n < 4 && ecc_code != ecc_end_pos;
+ ++n, ++ecc_code) {
+ reg_v |= *ecc_code << (8 * n);
+ }
+ clrsetbits_le32(&info->reg->flash_nf_bch_oob0 + k,
+ GENMASK(31, 0), reg_v);
+ }
+
+ /* Clear ECC buffer */
+ setbits_le32(&info->reg->flash_nf_ecc_reset, RESET_NFLASH_ECC);
+ ret = readl_poll_timeout(&info->reg->flash_nf_ecc_reset, reg_v,
+ !(reg_v & RESET_NFLASH_ECC),
+ FLASH_SHORT_DELAY);
+ if (ret)
+ pr_err("Reset ECC buffer fail\n");
+
+ clrsetbits_le32(&info->reg->flash_nf_bch_control, GENMASK(8, 8),
+ BCH_DISABLE);
+
+ /* Start BCH */
+ step = i / chip->ecc.bytes;
+ clrsetbits_le32(&info->reg->flash_nf_bch_control,
+ GENMASK(6, 4), step << 4);
+ setbits_le32(&info->reg->flash_nf_bch_control, BCH_ENABLE);
+ udelay(10);
+ setbits_le32(&info->reg->flash_nf_bch_control, BCH_COMPARE);
+
+ ret = readl_poll_timeout(&info->reg->flash_nf_bch_status, reg_v,
+ (reg_v & BCH_DECO_DONE),
+ FLASH_SHORT_DELAY);
+ if (ret)
+ pr_err("ECC Decode timeout\n");
+
+ /* Stop compare */
+ clrbits_le32(&info->reg->flash_nf_bch_control, BCH_COMPARE);
+
+ reg_v = readl(&info->reg->flash_nf_bch_status);
+ err_num = (reg_v >> 8) & BCH_ERR_NUM_MASK;
+ reg_v &= BCH_ERR_MASK;
+
+ /* Uncorrectable */
+ if (reg_v == BCH_UNCORRECTABLE) {
+ max_bitflips =
+ nand_check_erased_ecc_chunk(buff_ptr,
+ chip->ecc.size,
+ &chip->buffers->ecccode[i],
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+
+ if (max_bitflips) {
+ mtd->ecc_stats.failed++;
+ pr_err("Uncorrectable error\n");
+ pr_err(" Page:%x step:%d\n", page, step);
+
+ return -1;
+ }
+ } else if (reg_v == BCH_CORRECTABLE_ERR) {
+ printf("Correctable error(%x)!! addr:%lx\n",
+ err_num, (unsigned long)addr - mtd->writesize);
+ printf("Dst buf: %p [ColSel:%x ]\n",
+ buff_ptr + reg_v * BCH_DATA_UNIT, step);
+
+ max_bitflips =
+ ca_do_bch_correction(chip, err_num, buff_ptr, i);
+ }
+
+ buff_ptr += BCH_DATA_UNIT;
+ }
+
+ /* Disable BCH */
+ clrsetbits_le32(&info->reg->flash_nf_bch_control, GENMASK(31, 0),
+ BCH_DISABLE);
+
+ return max_bitflips;
+}
+
+static int ca_do_bch_encode(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct nand_drv *info;
+ unsigned int reg_v;
+ int i, j, n, eccsteps, gen_index;
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+
+ for (i = 0, n = 0, eccsteps = chip->ecc.steps; eccsteps;
+ i += chip->ecc.bytes, eccsteps--, n++) {
+ gen_index = 0;
+ for (j = 0; j < chip->ecc.bytes; j += 4, gen_index++) {
+ reg_v =
+ readl(&info->reg->flash_nf_bch_gen0_0 + gen_index +
+ 18 * n);
+ chip->oob_poi[eccoob.eccpos[i + j]] = reg_v & OOB_MASK;
+ chip->oob_poi[eccoob.eccpos[i + j + 1]] =
+ (reg_v >> 8) & OOB_MASK;
+ chip->oob_poi[eccoob.eccpos[i + j + 2]] =
+ (reg_v >> 16) & OOB_MASK;
+ chip->oob_poi[eccoob.eccpos[i + j + 3]] =
+ (reg_v >> 24) & OOB_MASK;
+ }
+ }
+
+ /* Disable BCH */
+ clrsetbits_le32(&info->reg->flash_nf_bch_control, GENMASK(8, 8),
+ BCH_DISABLE);
+
+ return 0;
+}
+
+/**
+ * Page read/write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf data buffer
+ * @param page page number
+ * @param with_ecc 1 to enable ECC, 0 to disable ECC
+ * @param is_writing 0 for read, 1 for write
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_rw_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int page, int with_ecc, int is_writing)
+{
+ unsigned int reg_v, ext_addr, addr, dma_index;
+ struct tx_descriptor_t *tx_desc;
+ struct rx_descriptor_t *rx_desc;
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ /* reset ecc control */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ RESET_NFLASH_ECC);
+
+ /* flash interrupt */
+ clrsetbits_le32(&info->reg->flash_flash_interrupt, GENMASK(0, 0),
+ REGIRQ_CLEAR);
+
+ /* reset ecc control */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ RESET_NFLASH_ECC);
+
+ /* Disable TXQ */
+ clrbits_le32(&info->dma_nand->dma_q_txq_control, GENMASK(0, 0));
+
+ /* Clear interrupt */
+ setbits_le32(&info->dma_nand->dma_q_rxq_coal_interrupt, GENMASK(0, 0));
+ setbits_le32(&info->dma_nand->dma_q_txq_coal_interrupt, GENMASK(0, 0));
+
+ if (with_ecc == 1) {
+ switch (info->config.nand_ecc_strength) {
+ case ECC_STRENGTH_8:
+ reg_v = BCH_ERR_CAP_8;
+ break;
+ case ECC_STRENGTH_16:
+ reg_v = BCH_ERR_CAP_16;
+ break;
+ case ECC_STRENGTH_24:
+ reg_v = BCH_ERR_CAP_24;
+ break;
+ case ECC_STRENGTH_40:
+ reg_v = BCH_ERR_CAP_40;
+ break;
+ default:
+ reg_v = BCH_ERR_CAP_16;
+ break;
+ }
+ reg_v |= BCH_ENABLE;
+
+ /* BCH decode for flash read */
+ if (is_writing == 0)
+ reg_v |= BCH_DECODE;
+ clrsetbits_le32(&info->reg->flash_nf_bch_control,
+ GENMASK(31, 0), reg_v);
+ } else {
+ clrsetbits_le32(&info->reg->flash_nf_bch_control,
+ GENMASK(31, 0), 0);
+ }
+
+ /* Fill Extend address */
+ ext_addr = ((page << chip->page_shift) / EXT_ADDR_MASK);
+
+ clrsetbits_le32(&info->reg->flash_nf_access,
+ GENMASK(7, 0), (uintptr_t)ext_addr);
+
+ addr = (uintptr_t)((page << chip->page_shift) % EXT_ADDR_MASK);
+ addr = (uintptr_t)(addr + info->flash_base);
+
+ dma_index = readl(&info->dma_nand->dma_q_txq_wptr) & CA_DMA_Q_PTR_MASK;
+
+ tx_desc = info->tx_desc;
+ rx_desc = info->rx_desc;
+
+ /* TX/RX descriptor for page data */
+ tx_desc[dma_index].own = OWN_DMA;
+ tx_desc[dma_index].buf_len = mtd->writesize;
+ rx_desc[dma_index].own = OWN_DMA;
+ rx_desc[dma_index].buf_len = mtd->writesize;
+ if (is_writing == 0) {
+ tx_desc[dma_index].buf_adr = (uintptr_t)addr;
+ rx_desc[dma_index].buf_adr = (uintptr_t)(buf);
+ } else {
+ tx_desc[dma_index].buf_adr = (uintptr_t)buf;
+ rx_desc[dma_index].buf_adr = (uintptr_t)(addr);
+ }
+
+ dma_index++;
+ dma_index %= CA_DMA_DESC_NUM;
+
+ /* TX/RX descriptor for OOB area */
+ addr = (uintptr_t)(addr + mtd->writesize);
+ tx_desc[dma_index].own = OWN_DMA;
+ tx_desc[dma_index].buf_len = mtd->oobsize;
+ rx_desc[dma_index].own = OWN_DMA;
+ rx_desc[dma_index].buf_len = mtd->oobsize;
+ if (is_writing) {
+ tx_desc[dma_index].buf_adr = (uintptr_t)(chip->oob_poi);
+ rx_desc[dma_index].buf_adr = (uintptr_t)addr;
+ } else {
+ tx_desc[dma_index].buf_adr = (uintptr_t)addr;
+ rx_desc[dma_index].buf_adr = (uintptr_t)(chip->oob_poi);
+ dma_index++;
+ dma_index %= CA_DMA_DESC_NUM;
+ }
+
+ if (is_writing == 1) {
+ clrsetbits_le32(&info->reg->flash_fifo_control, GENMASK(1, 0),
+ FIFO_WRITE);
+ } else {
+ clrsetbits_le32(&info->reg->flash_fifo_control, GENMASK(1, 0),
+ FIFO_READ);
+ }
+
+ /* Start FIFO request */
+ clrsetbits_le32(&info->reg->flash_flash_access_start, GENMASK(2, 2),
+ NFLASH_FIFO_REQ);
+
+ /* Update DMA write pointer */
+ clrsetbits_le32(&info->dma_nand->dma_q_txq_wptr, GENMASK(12, 0),
+ dma_index);
+
+ /* Start DMA */
+ clrsetbits_le32(&info->dma_nand->dma_q_txq_control, GENMASK(0, 0),
+ TX_DMA_ENABLE);
+
+ /* Wait TX DMA done */
+ ret =
+ readl_poll_timeout(&info->dma_nand->dma_q_txq_coal_interrupt,
+ reg_v, (reg_v & 1), FLASH_LONG_DELAY);
+ if (ret) {
+ pr_err("TX DMA timeout\n");
+ return -ETIMEDOUT;
+ }
+ /* clear tx interrupt */
+ setbits_le32(&info->dma_nand->dma_q_txq_coal_interrupt, 1);
+
+ /* Wait RX DMA done */
+ ret =
+ readl_poll_timeout(&info->dma_nand->dma_q_rxq_coal_interrupt, reg_v,
+ (reg_v & 1), FLASH_LONG_DELAY);
+ if (ret) {
+ pr_err("RX DMA timeout\n");
+ return -ETIMEDOUT;
+ }
+ /* clear rx interrupt */
+ setbits_le32(&info->dma_nand->dma_q_rxq_coal_interrupt, 1);
+
+ /* wait NAND CMD done */
+ if (is_writing == 0) {
+ if (!nand_waitfor_cmd_completion(info->reg, NFLASH_FIFO_REQ))
+ printf("%s: Command timeout\n", __func__);
+ }
+
+ /* Update DMA read pointer */
+ clrsetbits_le32(&info->dma_nand->dma_q_rxq_rptr, GENMASK(12, 0),
+ dma_index);
+
+ /* ECC correction */
+ if (with_ecc == 1) {
+ ret =
+ readl_poll_timeout(&info->reg->flash_nf_bch_status,
+ reg_v, (reg_v & BCH_GEN_DONE),
+ FLASH_LONG_DELAY);
+
+ if (ret) {
+ pr_err("BCH_GEN timeout! flash_nf_bch_status=[0x%x]\n",
+ reg_v);
+ return -ETIMEDOUT;
+ }
+
+ if (is_writing == 0)
+ ca_do_bch_decode(mtd, chip, buf, page, addr);
+ else
+ ca_do_bch_encode(mtd, chip, page);
+ }
+
+ if (is_writing) {
+ dma_index++;
+ dma_index %= CA_DMA_DESC_NUM;
+
+ /* Update DMA R/W pointer */
+ clrsetbits_le32(&info->dma_nand->dma_q_txq_wptr, GENMASK(12, 0),
+ dma_index);
+
+ /* Wait TX DMA done */
+ ret =
+ readl_poll_timeout(&info->dma_nand->dma_q_txq_coal_interrupt,
+ reg_v, (reg_v & 1), FLASH_LONG_DELAY);
+ if (ret) {
+ pr_err("TX DMA timeout\n");
+ return -ETIMEDOUT;
+ }
+ /* clear tx interrupt */
+ setbits_le32(&info->dma_nand->dma_q_txq_coal_interrupt, 1);
+
+ /* Wait RX DMA done */
+ ret =
+ readl_poll_timeout(&info->dma_nand->dma_q_rxq_coal_interrupt,
+ reg_v, (reg_v & 1), FLASH_LONG_DELAY);
+ if (ret) {
+ pr_err("RX DMA timeout\n");
+ return -ETIMEDOUT;
+ }
+ /* clear rx interrupt */
+ setbits_le32(&info->dma_nand->dma_q_rxq_coal_interrupt, 1);
+
+ /* wait NAND CMD done */
+ if (!nand_waitfor_cmd_completion(info->reg, NFLASH_FIFO_REQ))
+ printf("%s: Command timeout\n", __func__);
+
+ /* Update DMA R/W pointer */
+ clrsetbits_le32(&info->dma_nand->dma_q_rxq_rptr, GENMASK(12, 0),
+ dma_index);
+ }
+
+ return 0;
+}
+
+/**
+ * Hardware ecc based page read function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf buffer to store read data
+ * @param page page number to read
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ ret = nand_rw_page(mtd, chip, buf, page, 1, 0);
+ if (ret)
+ return ret;
+
+ /* Reset FIFO */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ ECC_RESET_ALL);
+
+ return 0;
+}
+
+/**
+ * Hardware ecc based page write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf data buffer
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ ret = nand_rw_page(mtd, chip, (uint8_t *)buf, page, 1, 1);
+ if (ret)
+ return ret;
+
+ /* Reset FIFO */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ ECC_RESET_ALL);
+
+ return 0;
+}
+
+/**
+ * Read raw page data without ecc
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf buffer to store read data
+ * @param page page number to read
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ ret = nand_rw_page(mtd, chip, buf, page, 0, 0);
+ if (ret)
+ return ret;
+
+ /* Reset FIFO */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ ECC_RESET_ALL);
+
+ return 0;
+}
+
+/**
+ * Raw page write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf data buffer
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ ret = nand_rw_page(mtd, chip, buf, page, 0, 1);
+ if (ret)
+ return ret;
+
+ /* Reset FIFO */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset, GENMASK(31, 0),
+ ECC_RESET_ALL);
+
+ return 0;
+}
+
+/**
+ * OOB data read/write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param page page number to read
+ * @param with_ecc 1 to enable ECC, 0 to disable ECC
+ * @param is_writing 0 for read, 1 for write
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_rw_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int with_ecc, int is_writing)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ u32 reg_val;
+ int rw_index;
+
+ if (is_writing) {
+ reg_val = NFLASH_GO | NFLASH_WT;
+ pwrite = (unsigned int *)chip->oob_poi;
+ } else {
+ reg_val = NFLASH_GO | NFLASH_RD;
+ pread = (unsigned int *)chip->oob_poi;
+ }
+
+ for (rw_index = 0; rw_index < mtd->oobsize / 4; rw_index++) {
+ clrsetbits_le32(&info->reg->flash_nf_access, GENMASK(31, 0),
+ NFLASH_REG_WIDTH_32);
+ if (is_writing)
+ clrsetbits_le32(&info->reg->flash_nf_data,
+ GENMASK(31, 0), pwrite[rw_index]);
+
+ clrsetbits_le32(&info->reg->flash_flash_access_start,
+ GENMASK(11, 10), reg_val);
+
+ if (!nand_waitfor_cmd_completion(info->reg, NFLASH_GO))
+ printf("%s: Command timeout\n", __func__);
+
+ if (!is_writing)
+ pread[rw_index] = readl(&info->reg->flash_nf_data);
+ }
+ return 0;
+}
+
+/**
+ * OOB data read function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param page page number to read
+ */
+static int nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ if (mtd->writesize <= (REG_DATA_COUNT_512_DATA >> 8))
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(7, 0),
+ NAND_CMD_READOOB);
+ ret = nand_rw_oob(mtd, chip, page, 0, 0);
+
+ /* Reset FIFO */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset,
+ GENMASK(31, 0), ECC_RESET_ALL);
+
+ return ret;
+}
+
+/**
+ * OOB data write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param page page number to write
+ * @return 0 when successfully completed
+ * -ETIMEDOUT when command timeout
+ */
+static int nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(chip);
+ int ret;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ if (mtd->writesize <= (REG_DATA_COUNT_512_DATA >> 8)) {
+ clrsetbits_le32(&info->reg->flash_nf_command, GENMASK(31, 0),
+ NAND_CMD_READOOB | (NAND_CMD_SEQIN << 8) |
+ (NAND_CMD_PAGEPROG << 16));
+ clrsetbits_le32(&info->reg->flash_nf_count, GENMASK(1, 0),
+ REG_CMD_COUNT_3TOGO);
+ }
+ ret = nand_rw_oob(mtd, chip, page, 1, 1);
+
+ /* Reset FIFO */
+ clrsetbits_le32(&info->reg->flash_nf_ecc_reset,
+ GENMASK(31, 0), ECC_RESET_ALL);
+
+ return ret;
+}
+
+/**
+ * Decode NAND parameters from the device tree
+ *
+ * @param dev Driver model device
+ * @param config Device tree NAND configuration
+ */
+static int fdt_decode_nand(struct udevice *dev, struct nand_drv *info)
+{
+ int ecc_strength;
+
+ info->reg = (struct nand_ctlr *)dev_read_addr(dev);
+ info->dma_glb = (struct dma_global *)dev_read_addr_index(dev, 1);
+ info->dma_nand = (struct dma_ssp *)dev_read_addr_index(dev, 2);
+ info->config.enabled = dev_read_enabled(dev);
+ ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 16);
+ info->flash_base =
+ dev_read_u32_default(dev, "nand_flash_base_addr", NAND_BASE_ADDR);
+
+ switch (ecc_strength) {
+ case ECC_STRENGTH_8:
+ info->config.nand_ecc_strength = ECC_STRENGTH_8;
+ break;
+ case ECC_STRENGTH_16:
+ info->config.nand_ecc_strength = ECC_STRENGTH_16;
+ break;
+ case ECC_STRENGTH_24:
+ info->config.nand_ecc_strength = ECC_STRENGTH_24;
+ break;
+ case ECC_STRENGTH_40:
+ info->config.nand_ecc_strength = ECC_STRENGTH_40;
+ break;
+ default:
+ info->config.nand_ecc_strength = ECC_STRENGTH_16;
+ }
+
+ return 0;
+}
+
+/**
+ * config flash type
+ *
+ * @param chip nand chip info structure
+ */
+static void nand_config_flash_type(struct nand_chip *nand)
+{
+ struct nand_drv *info =
+ (struct nand_drv *)nand_get_controller_data(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ switch (mtd->writesize) {
+ case WRITE_SIZE_512:
+ clrsetbits_le32(&info->reg->flash_type, GENMASK(31, 0),
+ FLASH_PIN | FLASH_TYPE_512);
+ break;
+ case WRITE_SIZE_2048:
+ clrsetbits_le32(&info->reg->flash_type, GENMASK(31, 0),
+ FLASH_PIN | FLASH_TYPE_2K);
+ break;
+ case WRITE_SIZE_4096:
+ clrsetbits_le32(&info->reg->flash_type, GENMASK(31, 0),
+ FLASH_PIN | FLASH_TYPE_4K);
+ break;
+ case WRITE_SIZE_8192:
+ clrsetbits_le32(&info->reg->flash_type, GENMASK(31, 0),
+ FLASH_PIN | FLASH_TYPE_8K);
+ break;
+ default:
+ pr_err("Unsupported page size(0x%x)!", nand->ecc.size);
+ }
+}
+
+/**
+ * config oob layout
+ *
+ * @param chip nand chip info structure
+ * @return 0 when successfully completed
+ * -EINVAL when ECC bytes exceed OOB size
+ */
+static int nand_config_oob_layout(struct nand_chip *nand)
+{
+ int i, ecc_start_offset;
+ struct mtd_info *mtd = nand_to_mtd(nand);
+
+ /* Calculate byte count for ECC */
+ eccoob.eccbytes = mtd->writesize / nand->ecc.size * nand->ecc.bytes;
+
+ if (mtd->oobsize < eccoob.eccbytes) {
+ pr_err("Spare area(%d) too small for BCH%d\n", nand->ecc.bytes,
+ nand->ecc.strength / 8);
+ pr_err("page_sz: %d\n", nand->ecc.size);
+ pr_err("oob_sz: %d\n", nand->ecc.bytes);
+ return -EINVAL;
+ }
+
+ /* Update OOB layout */
+ ecc_start_offset = mtd->oobsize - eccoob.eccbytes;
+ memset(eccoob.eccpos, 0, sizeof(eccoob.eccpos));
+ for (i = 0; i < eccoob.eccbytes; ++i)
+ eccoob.eccpos[i] = i + ecc_start_offset;
+
+ /* Unused spare area
+ * OOB[0] is bad block marker.
+ * Extra two byte is reserved as
+ * erase marker just right before ECC code.
+ */
+ eccoob.oobavail = nand->ecc.bytes - eccoob.eccbytes - 2;
+ eccoob.oobfree[0].offset = 2;
+ eccoob.oobfree[0].length =
+ mtd->oobsize - eccoob.eccbytes - eccoob.oobfree[0].offset - 1;
+
+ return 0;
+}
+
+static int ca_nand_probe(struct udevice *dev)
+{
+ struct ca_nand_info *ca_nand = dev_get_priv(dev);
+ struct nand_chip *nand = &ca_nand->nand_chip;
+ struct nand_drv *info = &ca_nand->nand_ctrl;
+ struct fdt_nand *config = &info->config;
+ struct mtd_info *our_mtd;
+ int ret;
+
+ if (fdt_decode_nand(dev, info)) {
+ printf("Could not decode nand-flash in device tree\n");
+ return -1;
+ }
+ if (!config->enabled)
+ return -1;
+
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.layout = &eccoob;
+
+ nand->cmdfunc = ca_nand_command;
+ nand->read_byte = read_byte;
+ nand->read_buf = read_buf;
+ nand->ecc.read_page = nand_read_page_hwecc;
+ nand->ecc.write_page = nand_write_page_hwecc;
+ nand->ecc.read_page_raw = nand_read_page_raw;
+ nand->ecc.write_page_raw = nand_write_page_raw;
+ nand->ecc.read_oob = nand_read_oob;
+ nand->ecc.write_oob = nand_write_oob;
+ nand->ecc.strength = config->nand_ecc_strength;
+ nand->select_chip = nand_select_chip;
+ nand->dev_ready = nand_dev_ready;
+ nand_set_controller_data(nand, &ca_nand->nand_ctrl);
+
+ /* Disable subpage writes as we do not provide ecc->hwctl */
+ nand->options |= NAND_NO_SUBPAGE_WRITE | NAND_SKIP_BBTSCAN;
+
+ /* Configure flash type as P-NAND */
+ clrsetbits_le32(&info->reg->flash_type, FLASH_PIN,
+ FLASH_TYPE_4K | FLASH_SIZE_436OOB);
+ config->width = FLASH_WIDTH;
+
+ our_mtd = nand_to_mtd(nand);
+ ret = nand_scan_ident(our_mtd, CONFIG_SYS_NAND_MAX_CHIPS, NULL);
+ if (ret)
+ return ret;
+
+ nand->ecc.size = BCH_DATA_UNIT;
+ nand->ecc.bytes = BCH_GF_PARAM_M * (nand->ecc.strength / 8);
+
+ /* Reconfig flash type according to ONFI */
+ nand_config_flash_type(nand);
+
+ ret = set_bus_width_page_size(our_mtd);
+ if (ret)
+ return ret;
+
+ /* Set the bad block position */
+ nand->badblockpos =
+ our_mtd->writesize >
+ 512 ? NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+
+ /* Arrange OOB layout */
+ ret = nand_config_oob_layout(nand);
+ if (ret)
+ return ret;
+
+ /* Init DMA descriptor ring */
+ ret = init_nand_dma(nand);
+ if (ret)
+ return ret;
+
+ ret = nand_scan_tail(our_mtd);
+ if (ret)
+ return ret;
+
+ ret = nand_register(0, our_mtd);
+ if (ret) {
+ dev_err(dev, "Failed to register MTD: %d\n", ret);
+ return ret;
+ }
+
+ ret = set_bus_width_page_size(our_mtd);
+ if (ret)
+ return ret;
+
+ printf("P-NAND : %s\n", our_mtd->name);
+ printf("Chip Size: %lldMB\n", nand->chipsize / (1024 * 1024));
+ printf("Block Size: %dKB\n", our_mtd->erasesize / 1024);
+ printf("Page Size: %dB\n", our_mtd->writesize);
+ printf("OOB Size: %dB\n", our_mtd->oobsize);
+
+ return 0;
+}
+
+U_BOOT_DRIVER(cortina_nand) = {
+ .name = "CA-PNAND",
+ .id = UCLASS_MTD,
+ .of_match = cortina_nand_dt_ids,
+ .probe = ca_nand_probe,
+ .priv_auto = sizeof(struct ca_nand_info),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(cortina_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name, ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/cortina_nand.h b/roms/u-boot/drivers/mtd/nand/raw/cortina_nand.h
new file mode 100644
index 000000000..1e3e3bfd0
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/cortina_nand.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2020 Cortina Access Inc..
+ */
+
+/* Cortina NAND definition */
+#define NAND_BASE_ADDR 0xE0000000
+#define BCH_GF_PARAM_M 14
+#define BCH_DATA_UNIT 1024
+#define FLASH_SHORT_DELAY 100
+#define FLASH_LONG_DELAY 1000
+#define FLASH_WIDTH 16
+#define BBT_PAGE_MASK 0xffffff3f
+#define WRITE_SIZE_512 512
+#define WRITE_SIZE_2048 2048
+#define WRITE_SIZE_4096 4096
+#define WRITE_SIZE_8192 8192
+#define ECC_STRENGTH_8 8
+#define ECC_STRENGTH_16 16
+#define ECC_STRENGTH_24 24
+#define ECC_STRENGTH_40 40
+#define EMPTY_PAGE 0xff
+#define ADDR1_MASK0 0x00ffffff
+#define ADDR2_MASK0 0xff000000
+#define ADDR1_MASK1 0xffff
+#define ADDR1_MASK2 0xff
+#define OOB_MASK 0xff
+#define EXT_ADDR_MASK 0x8000000
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
+/* Bit field in FLAS_TYPE */
+#define FLASH_PIN BIT(15)
+#define FLASH_TYPE_512 0x4000
+#define FLASH_TYPE_2K 0x5000
+#define FLASH_TYPE_4K 0x6000
+#define FLASH_TYPE_8K 0x7000
+#define FLASH_SIZE_CONFIGURABLEOOB (0x0 << 9)
+#define FLASH_SIZE_400OOB (0x1 << 9)
+#define FLASH_SIZE_436OOB (0x2 << 9)
+#define FLASH_SIZE_640OOB (0x3 << 9)
+
+/* Bit field in FLASH_STATUS */
+#define NFLASH_READY BIT(26)
+
+/* Bit field in FLASH_NF_ACCESS */
+#define NFLASH_ENABLE_ALTERNATIVE (0x0 << 15)
+#define AUTO_RESET BIT(16)
+#define DISABLE_AUTO_RESET (0x0 << 16)
+#define NFLASH_REG_WIDTH_RESERVED (0x3 << 10)
+#define NFLASH_REG_WIDTH_32 (0x2 << 10)
+#define NFLASH_REG_WIDTH_16 (0x1 << 10)
+#define NFLASH_REG_WIDTH_8 (0x0 << 10)
+
+/* Bit field in FLASH_NF_COUNT */
+#define REG_CMD_COUNT_EMPTY 0x3
+#define REG_CMD_COUNT_3TOGO 0x2
+#define REG_CMD_COUNT_2TOGO 0x1
+#define REG_CMD_COUNT_1TOGO 0x0
+#define REG_ADDR_COUNT_EMPTY (0x7 << 4)
+#define REG_ADDR_COUNT_5 (0x4 << 4)
+#define REG_ADDR_COUNT_4 (0x3 << 4)
+#define REG_ADDR_COUNT_3 (0x2 << 4)
+#define REG_ADDR_COUNT_2 (0x1 << 4)
+#define REG_ADDR_COUNT_1 (0x0 << 4)
+#define REG_DATA_COUNT_EMPTY (0x3fff << 8)
+#define REG_DATA_COUNT_512_DATA (0x1FF << 8)
+#define REG_DATA_COUNT_2k_DATA (0x7FF << 8)
+#define REG_DATA_COUNT_4k_DATA (0xFFF << 8)
+#define REG_DATA_COUNT_DATA_1 (0x0 << 8)
+#define REG_DATA_COUNT_DATA_2 (0x1 << 8)
+#define REG_DATA_COUNT_DATA_3 (0x2 << 8)
+#define REG_DATA_COUNT_DATA_4 (0x3 << 8)
+#define REG_DATA_COUNT_DATA_5 (0x4 << 8)
+#define REG_DATA_COUNT_DATA_6 (0x5 << 8)
+#define REG_DATA_COUNT_DATA_7 (0x6 << 8)
+#define REG_DATA_COUNT_DATA_8 (0x7 << 8)
+#define REG_OOB_COUNT_EMPTY (0x3ff << 22)
+
+/* Bit field in FLASH_FLASH_ACCESS_START */
+#define NFLASH_GO BIT(0)
+#define NFLASH_FIFO_REQ BIT(2)
+#define NFLASH_RD BIT(13)
+#define NFLASH_WT (BIT(12) | BIT(13))
+
+/* Bit field in FLASH_NF_ECC_RESET */
+#define RESET_NFLASH_RESET BIT(2)
+#define RESET_NFLASH_FIFO BIT(1)
+#define RESET_NFLASH_ECC BIT(0)
+#define ECC_RESET_ALL \
+ RESET_NFLASH_RESET | RESET_NFLASH_FIFO | RESET_NFLASH_ECC
+
+/* Bit field in FLASH_NF_ECC_CONTROL */
+#define ENABLE_ECC_GENERATION BIT(8)
+#define DISABLE_ECC_GENERATION (0 << 8)
+
+/* Flash FIFO control */
+#define FIFO_READ 2
+#define FIFO_WRITE 3
+
+/* NFLASH INTERRUPT */
+#define REGIRQ_CLEAR BIT(0)
+#define F_ADDR_ERR 2
+
+/* BCH ECC field definition */
+#define BCH_COMPARE BIT(0)
+#define BCH_ENABLE BIT(8)
+#define BCH_DISABLE (0 << 8)
+#define BCH_DECODE BIT(1)
+#define BCH_ENCODE (0 << 1)
+#define BCH_DECO_DONE BIT(30)
+#define BCH_GEN_DONE BIT(31)
+#define BCH_UNCORRECTABLE 0x3
+#define BCH_CORRECTABLE_ERR 0x2
+#define BCH_NO_ERR 0x1
+#define BCH_BUSY 0x0
+#define BCH_ERR_MASK 0x3
+#define BCH_ERR_NUM_MASK 0x3F
+#define BCH_ERR_LOC_MASK 0x3FFF
+#define BCH_CORRECT_LOC_MASK 0x7
+#define BCH_ERR_CAP_8 (0x0 << 9)
+#define BCH_ERR_CAP_16 (0x1 << 9)
+#define BCH_ERR_CAP_24 (0x2 << 9)
+#define BCH_ERR_CAP_40 (0x3 << 9)
+
+#define BCH_GF_PARAM_M 14
+
+struct nand_ctlr {
+ /* Cortina NAND controller register */
+ u32 flash_id;
+ u32 flash_timeout;
+ u32 flash_status;
+ u32 flash_type;
+ u32 flash_flash_access_start;
+ u32 flash_flash_interrupt;
+ u32 flash_flash_mask;
+ u32 flash_fifo_control;
+ u32 flash_fifo_status;
+ u32 flash_fifo_address;
+ u32 flash_fifo_match_address;
+ u32 flash_fifo_data;
+ u32 flash_sf_access;
+ u32 flash_sf_ext_access;
+ u32 flash_sf_address;
+ u32 flash_sf_data;
+ u32 flash_sf_timing;
+ u32 resv[3];
+ u32 flash_pf_access; // offset 0x050
+ u32 flash_pf_timing;
+ u32 resv1[2];
+ u32 flash_nf_access; // offset 0x060
+ u32 flash_nf_count;
+ u32 flash_nf_command;
+ u32 flash_nf_address_1;
+ u32 flash_nf_address_2;
+ u32 flash_nf_data;
+ u32 flash_nf_timing;
+ u32 flash_nf_ecc_status;
+ u32 flash_nf_ecc_control;
+ u32 flash_nf_ecc_oob;
+ u32 flash_nf_ecc_gen0;
+ u32 resv3[15];
+ u32 flash_nf_ecc_reset; // offset 0x0c8
+ u32 flash_nf_bch_control;
+ u32 flash_nf_bch_status;
+ u32 flash_nf_bch_error_loc01;
+ u32 resv4[19];
+ u32 flash_nf_bch_oob0; // offset 0x124
+ u32 resv5[17];
+ u32 flash_nf_bch_gen0_0; // offset 0x16c
+};
+
+/* Definition for DMA bitfield */
+#define TX_DMA_ENABLE BIT(0)
+#define RX_DMA_ENABLE BIT(0)
+#define DMA_CHECK_OWNER BIT(1)
+#define OWN_DMA 0
+#define OWN_CPU 1
+
+#define CA_DMA_DEPTH 3
+#define CA_DMA_DESC_NUM (BIT(0) << CA_DMA_DEPTH)
+#define CA_DMA_Q_PTR_MASK 0x1fff
+
+struct dma_q_base_depth_t {
+ u32 depth : 4 ; /* bits 3:0 */
+ u32 base : 28 ; /* bits 31:4 */
+};
+
+struct tx_descriptor_t {
+ unsigned int buf_adr; /* Buff addr */
+ unsigned int buf_adr_hi : 8 ; /* bits 7:0 */
+ unsigned int buf_len : 16 ; /* bits 23:8 */
+ unsigned int sgm : 1 ; /* bits 24 */
+ unsigned int rsrvd : 6 ; /* bits 30:25 */
+ unsigned int own : 1 ; /* bits 31:31 */
+};
+
+struct rx_descriptor_t {
+ unsigned int buf_adr; /* Buff addr */
+ unsigned int buf_adr_hi : 8 ; /* bits 7:0 */
+ unsigned int buf_len : 16 ; /* bits 23:8 */
+ unsigned int rsrvd : 7 ; /* bits 30:24 */
+ unsigned int own : 1 ; /* bits 31:31 */
+};
+
+struct dma_global {
+ u32 dma_glb_dma_lso_ctrl;
+ u32 dma_glb_lso_interrupt;
+ u32 dma_glb_lso_intenable;
+ u32 dma_glb_dma_lso_vlan_tag_type0;
+ u32 dma_glb_dma_lso_vlan_tag_type1;
+ u32 dma_glb_dma_lso_axi_user_sel0;
+ u32 dma_glb_axi_user_pat0;
+ u32 dma_glb_axi_user_pat1;
+ u32 dma_glb_axi_user_pat2;
+ u32 dma_glb_axi_user_pat3;
+ u32 dma_glb_fast_reg_pe0;
+ u32 dma_glb_fast_reg_pe1;
+ u32 dma_glb_dma_lso_tx_fdes_addr0;
+ u32 dma_glb_dma_lso_tx_fdes_addr1;
+ u32 dma_glb_dma_lso_tx_cdes_addr0;
+ u32 dma_glb_dma_lso_tx_cdes_addr1;
+ u32 dma_glb_dma_lso_tx_des_word0;
+ u32 dma_glb_dma_lso_tx_des_word1;
+ u32 dma_glb_dma_lso_lso_para_word0;
+ u32 dma_glb_dma_lso_lso_para_word1;
+ u32 dma_glb_dma_lso_debug0;
+ u32 dma_glb_dma_lso_debug1;
+ u32 dma_glb_dma_lso_debug2;
+ u32 dma_glb_dma_lso_spare0;
+ u32 dma_glb_dma_lso_spare1;
+ u32 dma_glb_dma_ssp_rx_ctrl;
+ u32 dma_glb_dma_ssp_tx_ctrl;
+ u32 dma_glb_dma_ssp_axi_user_sel0;
+ u32 dma_glb_dma_ssp_axi_user_sel1;
+ u32 dma_glb_dma_ssp_rx_fdes_addr0;
+ u32 dma_glb_dma_ssp_rx_fdes_addr1;
+ u32 dma_glb_dma_ssp_rx_cdes_addr0;
+ u32 dma_glb_dma_ssp_rx_cdes_addr1;
+ u32 dma_glb_dma_ssp_rx_des_word0;
+ u32 dma_glb_dma_ssp_rx_des_word1;
+ u32 dma_glb_dma_ssp_tx_fdes_addr0;
+ u32 dma_glb_dma_ssp_tx_fdes_addr1;
+ u32 dma_glb_dma_ssp_tx_cdes_addr0;
+ u32 dma_glb_dma_ssp_tx_cdes_addr1;
+ u32 dma_glb_dma_ssp_tx_des_word0;
+ u32 dma_glb_dma_ssp_tx_des_word1;
+ u32 dma_glb_dma_ssp_debug0;
+ u32 dma_glb_dma_ssp_debug1;
+ u32 dma_glb_dma_ssp_debug2;
+ u32 dma_glb_dma_ssp_spare0;
+ u32 dma_glb_dma_ssp_spare1;
+};
+
+struct dma_ssp {
+ u32 dma_q_rxq_control;
+ u32 dma_q_rxq_base_depth;
+ u32 dma_q_rxq_base;
+ u32 dma_q_rxq_wptr;
+ u32 dma_q_rxq_rptr;
+ u32 dma_q_rxq_pktcnt;
+ u32 dma_q_txq_control;
+ u32 dma_q_txq_base_depth;
+ u32 dma_q_txq_base;
+ u32 dma_q_txq_wptr;
+ u32 dma_q_txq_rptr;
+ u32 dma_q_txq_pktcnt;
+ u32 dma_q_rxq_interrupt;
+ u32 dma_q_rxq_intenable;
+ u32 dma_q_txq_interrupt;
+ u32 dma_q_txq_intenable;
+ u32 dma_q_rxq_misc_interrupt;
+ u32 dma_q_rxq_misc_intenable;
+ u32 dma_q_txq_misc_interrupt;
+ u32 dma_q_txq_misc_intenable;
+ u32 dma_q_rxq_coal_interrupt;
+ u32 dma_q_rxq_coal_intenable;
+ u32 dma_q_txq_coal_interrupt;
+ u32 dma_q_txq_coal_intenable;
+ u32 dma_q_rxq_frag_buff_addr0;
+ u32 dma_q_rxq_frag_buff_addr1;
+ u32 dma_q_rxq_frag_buff_size;
+ u32 dma_q_txq_frag_buff_addr0;
+ u32 dma_q_txq_frag_buff_addr1;
+ u32 dma_q_txq_frag_buff_size;
+ u32 dma_q_dma_spare_0;
+ u32 dma_q_dma_spare_1;
+};
diff --git a/roms/u-boot/drivers/mtd/nand/raw/davinci_nand.c b/roms/u-boot/drivers/mtd/nand/raw/davinci_nand.c
new file mode 100644
index 000000000..9ad3a5769
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/davinci_nand.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NAND driver for TI DaVinci based boards.
+ *
+ * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net>
+ *
+ * Based on Linux DaVinci NAND driver by TI. Original copyright follows:
+ */
+
+/*
+ *
+ * linux/drivers/mtd/nand/raw/nand_davinci.c
+ *
+ * NAND Flash Driver
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * DaVinci board which utilizes the Samsung k9k2g08 part.
+ *
+ Modifications:
+ ver. 1.0: Feb 2005, Vinod/Sudhakar
+ -
+ */
+
+#include <common.h>
+#include <log.h>
+#include <asm/io.h>
+#include <nand.h>
+#include <dm/uclass.h>
+#include <asm/ti-common/davinci_nand.h>
+
+/* Definitions for 4-bit hardware ECC */
+#define NAND_TIMEOUT 10240
+#define NAND_ECC_BUSY 0xC
+#define NAND_4BITECC_MASK 0x03FF03FF
+#define EMIF_NANDFSR_ECC_STATE_MASK 0x00000F00
+#define ECC_STATE_NO_ERR 0x0
+#define ECC_STATE_TOO_MANY_ERRS 0x1
+#define ECC_STATE_ERR_CORR_COMP_P 0x2
+#define ECC_STATE_ERR_CORR_COMP_N 0x3
+
+/*
+ * Exploit the little endianness of the ARM to do multi-byte transfers
+ * per device read. This can perform over twice as quickly as individual
+ * byte transfers when buffer alignment is conducive.
+ *
+ * NOTE: This only works if the NAND is not connected to the 2 LSBs of
+ * the address bus. On Davinci EVM platforms this has always been true.
+ */
+static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const u32 *nand = chip->IO_ADDR_R;
+
+ /* Make sure that buf is 32 bit aligned */
+ if (((int)buf & 0x3) != 0) {
+ if (((int)buf & 0x1) != 0) {
+ if (len) {
+ *buf = readb(nand);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((int)buf & 0x3) != 0) {
+ if (len >= 2) {
+ *(u16 *)buf = readw(nand);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ /* copy aligned data */
+ while (len >= 4) {
+ *(u32 *)buf = __raw_readl(nand);
+ buf += 4;
+ len -= 4;
+ }
+
+ /* mop up any remaining bytes */
+ if (len) {
+ if (len >= 2) {
+ *(u16 *)buf = readw(nand);
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len)
+ *buf = readb(nand);
+ }
+}
+
+static void nand_davinci_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const u32 *nand = chip->IO_ADDR_W;
+
+ /* Make sure that buf is 32 bit aligned */
+ if (((int)buf & 0x3) != 0) {
+ if (((int)buf & 0x1) != 0) {
+ if (len) {
+ writeb(*buf, nand);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((int)buf & 0x3) != 0) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ /* copy aligned data */
+ while (len >= 4) {
+ __raw_writel(*(u32 *)buf, nand);
+ buf += 4;
+ len -= 4;
+ }
+
+ /* mop up any remaining bytes */
+ if (len) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len)
+ writeb(*buf, nand);
+ }
+}
+
+static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_int32_t IO_ADDR_W = (u_int32_t)this->IO_ADDR_W;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ IO_ADDR_W &= ~(MASK_ALE|MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= MASK_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= MASK_ALE;
+ this->IO_ADDR_W = (void __iomem *) IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, IO_ADDR_W);
+}
+
+#ifdef CONFIG_SYS_NAND_HW_ECC
+
+static u_int32_t nand_davinci_readecc(struct mtd_info *mtd)
+{
+ u_int32_t ecc = 0;
+
+ ecc = __raw_readl(&(davinci_emif_regs->nandfecc[
+ CONFIG_SYS_NAND_CS - 2]));
+
+ return ecc;
+}
+
+static void nand_davinci_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ u_int32_t val;
+
+ /* reading the ECC result register resets the ECC calculation */
+ nand_davinci_readecc(mtd);
+
+ val = __raw_readl(&davinci_emif_regs->nandfcr);
+ val |= DAVINCI_NANDFCR_NAND_ENABLE(CONFIG_SYS_NAND_CS);
+ val |= DAVINCI_NANDFCR_1BIT_ECC_START(CONFIG_SYS_NAND_CS);
+ __raw_writel(val, &davinci_emif_regs->nandfcr);
+}
+
+static int nand_davinci_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ u_int32_t tmp;
+
+ tmp = nand_davinci_readecc(mtd);
+
+ /* Squeeze 4 bytes ECC into 3 bytes by removing RESERVED bits
+ * and shifting. RESERVED bits are 31 to 28 and 15 to 12. */
+ tmp = (tmp & 0x00000fff) | ((tmp & 0x0fff0000) >> 4);
+
+ /* Invert so that erased block ECC is correct */
+ tmp = ~tmp;
+
+ *ecc_code++ = tmp;
+ *ecc_code++ = tmp >> 8;
+ *ecc_code++ = tmp >> 16;
+
+ /* NOTE: the above code matches mainline Linux:
+ * .PQR.stu ==> ~PQRstu
+ *
+ * MontaVista/TI kernels encode those bytes differently, use
+ * complicated (and allegedly sometimes-wrong) correction code,
+ * and usually shipped with U-Boot that uses software ECC:
+ * .PQR.stu ==> PsQRtu
+ *
+ * If you need MV/TI compatible NAND I/O in U-Boot, it should
+ * be possible to (a) change the mangling above, (b) reverse
+ * that mangling in nand_davinci_correct_data() below.
+ */
+
+ return 0;
+}
+
+static int nand_davinci_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_int32_t ecc_nand = read_ecc[0] | (read_ecc[1] << 8) |
+ (read_ecc[2] << 16);
+ u_int32_t ecc_calc = calc_ecc[0] | (calc_ecc[1] << 8) |
+ (calc_ecc[2] << 16);
+ u_int32_t diff = ecc_calc ^ ecc_nand;
+
+ if (diff) {
+ if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+ /* Correctable error */
+ if ((diff >> (12 + 3)) < this->ecc.size) {
+ uint8_t find_bit = 1 << ((diff >> 12) & 7);
+ uint32_t find_byte = diff >> (12 + 3);
+
+ dat[find_byte] ^= find_bit;
+ pr_debug("Correcting single "
+ "bit ECC error at offset: %d, bit: "
+ "%d\n", find_byte, find_bit);
+ return 1;
+ } else {
+ return -EBADMSG;
+ }
+ } else if (!(diff & (diff - 1))) {
+ /* Single bit ECC error in the ECC itself,
+ nothing to fix */
+ pr_debug("Single bit ECC error in " "ECC.\n");
+ return 1;
+ } else {
+ /* Uncorrectable error */
+ pr_debug("ECC UNCORRECTED_ERROR 1\n");
+ return -EBADMSG;
+ }
+ }
+ return 0;
+}
+#endif /* CONFIG_SYS_NAND_HW_ECC */
+
+#ifdef CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST
+static struct nand_ecclayout nand_davinci_4bit_layout_oobfirst = {
+#if defined(CONFIG_SYS_NAND_PAGE_2K)
+ .eccbytes = 40,
+#ifdef CONFIG_NAND_6BYTES_OOB_FREE_10BYTES_ECC
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ {2, 4}, {16, 6}, {32, 6}, {48, 6},
+ },
+#else
+ .eccpos = {
+ 24, 25, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 22, },
+ },
+#endif /* #ifdef CONFIG_NAND_6BYTES_OOB_FREE_10BYTES_ECC */
+#elif defined(CONFIG_SYS_NAND_PAGE_4K)
+ .eccbytes = 80,
+ .eccpos = {
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+ 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+ 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 46, },
+ },
+#endif
+};
+
+#if defined CONFIG_KEYSTONE_RBL_NAND
+static struct nand_ecclayout nand_keystone_rbl_4bit_layout_oobfirst = {
+#if defined(CONFIG_SYS_NAND_PAGE_2K)
+ .eccbytes = 40,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4, },
+ {.offset = 16, .length = 6, },
+ {.offset = 32, .length = 6, },
+ {.offset = 48, .length = 6, },
+ },
+#elif defined(CONFIG_SYS_NAND_PAGE_4K)
+ .eccbytes = 80,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+ },
+ .oobfree = {
+ {.offset = 2, .length = 4, },
+ {.offset = 16, .length = 6, },
+ {.offset = 32, .length = 6, },
+ {.offset = 48, .length = 6, },
+ {.offset = 64, .length = 6, },
+ {.offset = 80, .length = 6, },
+ {.offset = 96, .length = 6, },
+ {.offset = 112, .length = 6, },
+ },
+#endif
+};
+
+#ifdef CONFIG_SYS_NAND_PAGE_2K
+#define CONFIG_KEYSTONE_NAND_MAX_RBL_PAGE CONFIG_KEYSTONE_NAND_MAX_RBL_SIZE >> 11
+#elif defined(CONFIG_SYS_NAND_PAGE_4K)
+#define CONFIG_KEYSTONE_NAND_MAX_RBL_PAGE CONFIG_KEYSTONE_NAND_MAX_RBL_SIZE >> 12
+#endif
+
+/**
+ * nand_davinci_write_page - write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @raw: use _raw version of write_page
+ */
+static int nand_davinci_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len,
+ const uint8_t *buf, int oob_required,
+ int page, int raw)
+{
+ int status;
+ int ret = 0;
+ struct nand_ecclayout *saved_ecc_layout;
+
+ /* save current ECC layout and assign Keystone RBL ECC layout */
+ if (page < CONFIG_KEYSTONE_NAND_MAX_RBL_PAGE) {
+ saved_ecc_layout = chip->ecc.layout;
+ chip->ecc.layout = &nand_keystone_rbl_4bit_layout_oobfirst;
+ mtd->oobavail = chip->ecc.layout->oobavail;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ if (unlikely(raw)) {
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required, page);
+ } else {
+ status = chip->ecc.write_page(mtd, chip, buf,
+ oob_required, page);
+ }
+
+ if (status < 0) {
+ ret = status;
+ goto err;
+ }
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ if (status & NAND_STATUS_FAIL) {
+ ret = -EIO;
+ goto err;
+ }
+
+err:
+ /* restore ECC layout */
+ if (page < CONFIG_KEYSTONE_NAND_MAX_RBL_PAGE) {
+ chip->ecc.layout = saved_ecc_layout;
+ mtd->oobavail = saved_ecc_layout->oobavail;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_davinci_read_page_hwecc - hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_davinci_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint32_t *eccpos;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ struct nand_ecclayout *saved_ecc_layout = chip->ecc.layout;
+
+ /* save current ECC layout and assign Keystone RBL ECC layout */
+ if (page < CONFIG_KEYSTONE_NAND_MAX_RBL_PAGE) {
+ chip->ecc.layout = &nand_keystone_rbl_4bit_layout_oobfirst;
+ mtd->oobavail = chip->ecc.layout->oobavail;
+ }
+
+ eccpos = chip->ecc.layout->eccpos;
+
+ /* Read the OOB area first */
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+
+ /* restore ECC layout */
+ if (page < CONFIG_KEYSTONE_NAND_MAX_RBL_PAGE) {
+ chip->ecc.layout = saved_ecc_layout;
+ mtd->oobavail = saved_ecc_layout->oobavail;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_KEYSTONE_RBL_NAND */
+
+static void nand_davinci_4bit_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ u32 val;
+
+ switch (mode) {
+ case NAND_ECC_WRITE:
+ case NAND_ECC_READ:
+ /*
+ * Start a new ECC calculation for reading or writing 512 bytes
+ * of data.
+ */
+ val = __raw_readl(&davinci_emif_regs->nandfcr);
+ val &= ~DAVINCI_NANDFCR_4BIT_ECC_SEL_MASK;
+ val |= DAVINCI_NANDFCR_NAND_ENABLE(CONFIG_SYS_NAND_CS);
+ val |= DAVINCI_NANDFCR_4BIT_ECC_SEL(CONFIG_SYS_NAND_CS);
+ val |= DAVINCI_NANDFCR_4BIT_ECC_START;
+ __raw_writel(val, &davinci_emif_regs->nandfcr);
+ break;
+ case NAND_ECC_READSYN:
+ val = __raw_readl(&davinci_emif_regs->nand4bitecc[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+static u32 nand_davinci_4bit_readecc(struct mtd_info *mtd, unsigned int ecc[4])
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ ecc[i] = __raw_readl(&davinci_emif_regs->nand4bitecc[i]) &
+ NAND_4BITECC_MASK;
+ }
+
+ return 0;
+}
+
+static int nand_davinci_4bit_calculate_ecc(struct mtd_info *mtd,
+ const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ unsigned int hw_4ecc[4];
+ unsigned int i;
+
+ nand_davinci_4bit_readecc(mtd, hw_4ecc);
+
+ /*Convert 10 bit ecc value to 8 bit */
+ for (i = 0; i < 2; i++) {
+ unsigned int hw_ecc_low = hw_4ecc[i * 2];
+ unsigned int hw_ecc_hi = hw_4ecc[(i * 2) + 1];
+
+ /* Take first 8 bits from val1 (count1=0) or val5 (count1=1) */
+ *ecc_code++ = hw_ecc_low & 0xFF;
+
+ /*
+ * Take 2 bits as LSB bits from val1 (count1=0) or val5
+ * (count1=1) and 6 bits from val2 (count1=0) or
+ * val5 (count1=1)
+ */
+ *ecc_code++ =
+ ((hw_ecc_low >> 8) & 0x3) | ((hw_ecc_low >> 14) & 0xFC);
+
+ /*
+ * Take 4 bits from val2 (count1=0) or val5 (count1=1) and
+ * 4 bits from val3 (count1=0) or val6 (count1=1)
+ */
+ *ecc_code++ =
+ ((hw_ecc_low >> 22) & 0xF) | ((hw_ecc_hi << 4) & 0xF0);
+
+ /*
+ * Take 6 bits from val3(count1=0) or val6 (count1=1) and
+ * 2 bits from val4 (count1=0) or val7 (count1=1)
+ */
+ *ecc_code++ =
+ ((hw_ecc_hi >> 4) & 0x3F) | ((hw_ecc_hi >> 10) & 0xC0);
+
+ /* Take 8 bits from val4 (count1=0) or val7 (count1=1) */
+ *ecc_code++ = (hw_ecc_hi >> 18) & 0xFF;
+ }
+
+ return 0;
+}
+
+static int nand_davinci_4bit_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ int i;
+ unsigned int hw_4ecc[4];
+ unsigned int iserror;
+ unsigned short *ecc16;
+ unsigned int numerrors, erroraddress, errorvalue;
+ u32 val;
+
+ /*
+ * Check for an ECC where all bytes are 0xFF. If this is the case, we
+ * will assume we are looking at an erased page and we should ignore
+ * the ECC.
+ */
+ for (i = 0; i < 10; i++) {
+ if (read_ecc[i] != 0xFF)
+ break;
+ }
+ if (i == 10)
+ return 0;
+
+ /* Convert 8 bit in to 10 bit */
+ ecc16 = (unsigned short *)&read_ecc[0];
+
+ /*
+ * Write the parity values in the NAND Flash 4-bit ECC Load register.
+ * Write each parity value one at a time starting from 4bit_ecc_val8
+ * to 4bit_ecc_val1.
+ */
+
+ /*Take 2 bits from 8th byte and 8 bits from 9th byte */
+ __raw_writel(((ecc16[4]) >> 6) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 4 bits from 7th byte and 6 bits from 8th byte */
+ __raw_writel((((ecc16[3]) >> 12) & 0xF) | ((((ecc16[4])) << 4) & 0x3F0),
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 6 bits from 6th byte and 4 bits from 7th byte */
+ __raw_writel((ecc16[3] >> 2) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 8 bits from 5th byte and 2 bits from 6th byte */
+ __raw_writel(((ecc16[2]) >> 8) | ((((ecc16[3])) << 8) & 0x300),
+ &davinci_emif_regs->nand4biteccload);
+
+ /*Take 2 bits from 3rd byte and 8 bits from 4th byte */
+ __raw_writel((((ecc16[1]) >> 14) & 0x3) | ((((ecc16[2])) << 2) & 0x3FC),
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 4 bits form 2nd bytes and 6 bits from 3rd bytes */
+ __raw_writel(((ecc16[1]) >> 4) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 6 bits from 1st byte and 4 bits from 2nd byte */
+ __raw_writel((((ecc16[0]) >> 10) & 0x3F) | (((ecc16[1]) << 6) & 0x3C0),
+ &davinci_emif_regs->nand4biteccload);
+
+ /* Take 10 bits from 0th and 1st bytes */
+ __raw_writel((ecc16[0]) & 0x3FF,
+ &davinci_emif_regs->nand4biteccload);
+
+ /*
+ * Perform a dummy read to the EMIF Revision Code and Status register.
+ * This is required to ensure time for syndrome calculation after
+ * writing the ECC values in previous step.
+ */
+
+ val = __raw_readl(&davinci_emif_regs->nandfsr);
+
+ /*
+ * Read the syndrome from the NAND Flash 4-Bit ECC 1-4 registers.
+ * A syndrome value of 0 means no bit errors. If the syndrome is
+ * non-zero then go further otherwise return.
+ */
+ nand_davinci_4bit_readecc(mtd, hw_4ecc);
+
+ if (!(hw_4ecc[0] | hw_4ecc[1] | hw_4ecc[2] | hw_4ecc[3]))
+ return 0;
+
+ /*
+ * Clear any previous address calculation by doing a dummy read of an
+ * error address register.
+ */
+ val = __raw_readl(&davinci_emif_regs->nanderradd1);
+
+ /*
+ * Set the addr_calc_st bit(bit no 13) in the NAND Flash Control
+ * register to 1.
+ */
+ __raw_writel(DAVINCI_NANDFCR_4BIT_CALC_START,
+ &davinci_emif_regs->nandfcr);
+
+ /*
+ * Wait for the corr_state field (bits 8 to 11) in the
+ * NAND Flash Status register to be not equal to 0x0, 0x1, 0x2, or 0x3.
+ * Otherwise ECC calculation has not even begun and the next loop might
+ * fail because of a false positive!
+ */
+ i = NAND_TIMEOUT;
+ do {
+ val = __raw_readl(&davinci_emif_regs->nandfsr);
+ val &= 0xc00;
+ i--;
+ } while ((i > 0) && !val);
+
+ /*
+ * Wait for the corr_state field (bits 8 to 11) in the
+ * NAND Flash Status register to be equal to 0x0, 0x1, 0x2, or 0x3.
+ */
+ i = NAND_TIMEOUT;
+ do {
+ val = __raw_readl(&davinci_emif_regs->nandfsr);
+ val &= 0xc00;
+ i--;
+ } while ((i > 0) && val);
+
+ iserror = __raw_readl(&davinci_emif_regs->nandfsr);
+ iserror &= EMIF_NANDFSR_ECC_STATE_MASK;
+ iserror = iserror >> 8;
+
+ /*
+ * ECC_STATE_TOO_MANY_ERRS (0x1) means errors cannot be
+ * corrected (five or more errors). The number of errors
+ * calculated (err_num field) differs from the number of errors
+ * searched. ECC_STATE_ERR_CORR_COMP_P (0x2) means error
+ * correction complete (errors on bit 8 or 9).
+ * ECC_STATE_ERR_CORR_COMP_N (0x3) means error correction
+ * complete (error exists).
+ */
+
+ if (iserror == ECC_STATE_NO_ERR) {
+ val = __raw_readl(&davinci_emif_regs->nanderrval1);
+ return 0;
+ } else if (iserror == ECC_STATE_TOO_MANY_ERRS) {
+ val = __raw_readl(&davinci_emif_regs->nanderrval1);
+ return -EBADMSG;
+ }
+
+ numerrors = ((__raw_readl(&davinci_emif_regs->nandfsr) >> 16)
+ & 0x3) + 1;
+
+ /* Read the error address, error value and correct */
+ for (i = 0; i < numerrors; i++) {
+ if (i > 1) {
+ erroraddress =
+ ((__raw_readl(&davinci_emif_regs->nanderradd2) >>
+ (16 * (i & 1))) & 0x3FF);
+ erroraddress = ((512 + 7) - erroraddress);
+ errorvalue =
+ ((__raw_readl(&davinci_emif_regs->nanderrval2) >>
+ (16 * (i & 1))) & 0xFF);
+ } else {
+ erroraddress =
+ ((__raw_readl(&davinci_emif_regs->nanderradd1) >>
+ (16 * (i & 1))) & 0x3FF);
+ erroraddress = ((512 + 7) - erroraddress);
+ errorvalue =
+ ((__raw_readl(&davinci_emif_regs->nanderrval1) >>
+ (16 * (i & 1))) & 0xFF);
+ }
+ /* xor the corrupt data with error value */
+ if (erroraddress < 512)
+ dat[erroraddress] ^= errorvalue;
+ }
+
+ return numerrors;
+}
+#endif /* CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST */
+
+static int nand_davinci_dev_ready(struct mtd_info *mtd)
+{
+ return __raw_readl(&davinci_emif_regs->nandfsr) & 0x1;
+}
+
+static void davinci_nand_init(struct nand_chip *nand)
+{
+#if defined CONFIG_KEYSTONE_RBL_NAND
+ int i;
+ struct nand_ecclayout *layout;
+
+ layout = &nand_keystone_rbl_4bit_layout_oobfirst;
+ layout->oobavail = 0;
+ for (i = 0; i < ARRAY_SIZE(layout->oobfree) &&
+ layout->oobfree[i].length; i++)
+ layout->oobavail += layout->oobfree[i].length;
+
+ nand->write_page = nand_davinci_write_page;
+ nand->ecc.read_page = nand_davinci_read_page_hwecc;
+#endif
+ nand->chip_delay = 0;
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ nand->bbt_options |= NAND_BBT_USE_FLASH;
+#endif
+#ifdef CONFIG_SYS_NAND_NO_SUBPAGE_WRITE
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+#endif
+#ifdef CONFIG_SYS_NAND_BUSWIDTH_16BIT
+ nand->options |= NAND_BUSWIDTH_16;
+#endif
+#ifdef CONFIG_SYS_NAND_HW_ECC
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ nand->ecc.calculate = nand_davinci_calculate_ecc;
+ nand->ecc.correct = nand_davinci_correct_data;
+ nand->ecc.hwctl = nand_davinci_enable_hwecc;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif /* CONFIG_SYS_NAND_HW_ECC */
+#ifdef CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST
+ nand->ecc.mode = NAND_ECC_HW_OOB_FIRST;
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 10;
+ nand->ecc.strength = 4;
+ nand->ecc.calculate = nand_davinci_4bit_calculate_ecc;
+ nand->ecc.correct = nand_davinci_4bit_correct_data;
+ nand->ecc.hwctl = nand_davinci_4bit_enable_hwecc;
+ nand->ecc.layout = &nand_davinci_4bit_layout_oobfirst;
+#endif
+ /* Set address of hardware control function */
+ nand->cmd_ctrl = nand_davinci_hwcontrol;
+
+ nand->read_buf = nand_davinci_read_buf;
+ nand->write_buf = nand_davinci_write_buf;
+
+ nand->dev_ready = nand_davinci_dev_ready;
+}
+
+#ifdef CONFIG_SYS_NAND_SELF_INIT
+static int davinci_nand_probe(struct udevice *dev)
+{
+ struct nand_chip *nand = dev_get_priv(dev);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ int ret;
+
+ nand->IO_ADDR_R = (void __iomem *)CONFIG_SYS_NAND_BASE;
+ nand->IO_ADDR_W = (void __iomem *)CONFIG_SYS_NAND_BASE;
+
+ davinci_nand_init(nand);
+
+ ret = nand_scan(mtd, CONFIG_SYS_NAND_MAX_CHIPS);
+ if (ret)
+ return ret;
+
+ return nand_register(0, mtd);
+}
+
+static const struct udevice_id davinci_nand_ids[] = {
+ { .compatible = "ti,davinci-nand" },
+ { }
+};
+
+U_BOOT_DRIVER(davinci_nand) = {
+ .name = "davinci-nand",
+ .id = UCLASS_MTD,
+ .of_match = davinci_nand_ids,
+ .probe = davinci_nand_probe,
+ .priv_auto = sizeof(struct nand_chip),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(davinci_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s: %d\n", dev->name, ret);
+}
+#else
+int board_nand_init(struct nand_chip *chip) __attribute__((weak));
+int board_nand_init(struct nand_chip *chip)
+{
+ davinci_nand_init(chip);
+ return 0;
+}
+#endif /* CONFIG_SYS_NAND_SELF_INIT */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/denali.c b/roms/u-boot/drivers/mtd/nand/raw/denali.c
new file mode 100644
index 000000000..ab91db854
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/denali.c
@@ -0,0 +1,1376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014 Panasonic Corporation
+ * Copyright (C) 2013-2014, Altera Corporation <www.altera.com>
+ * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <malloc.h>
+#include <nand.h>
+#include <asm/cache.h>
+#include <asm/dma-mapping.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME "denali-nand"
+
+/* for Indexed Addressing */
+#define DENALI_INDEXED_CTRL 0x00
+#define DENALI_INDEXED_DATA 0x10
+
+#define DENALI_MAP00 (0 << 26) /* direct access to buffer */
+#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
+#define DENALI_MAP10 (2 << 26) /* high-level control plane */
+#define DENALI_MAP11 (3 << 26) /* direct controller access */
+
+/* MAP11 access cycle type */
+#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */
+#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */
+#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */
+
+/* MAP10 commands */
+#define DENALI_ERASE 0x01
+
+#define DENALI_BANK(denali) ((denali)->active_bank << 24)
+
+#define DENALI_INVALID_BANK -1
+#define DENALI_NR_BANKS 4
+
+static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
+}
+
+/*
+ * Direct Addressing - the slave address forms the control information (command
+ * type, bank, block, and page address). The slave data is the actual data to
+ * be transferred. This mode requires 28 bits of address region allocated.
+ */
+static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
+{
+ return ioread32(denali->host + addr);
+}
+
+static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(data, denali->host + addr);
+}
+
+/*
+ * Indexed Addressing - address translation module intervenes in passing the
+ * control information. This mode reduces the required address range. The
+ * control information and transferred data are latched by the registers in
+ * the translation module.
+ */
+static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ return ioread32(denali->host + DENALI_INDEXED_DATA);
+}
+
+static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
+ u32 data)
+{
+ iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
+ iowrite32(data, denali->host + DENALI_INDEXED_DATA);
+}
+
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void denali_detect_max_banks(struct denali_nand_info *denali)
+{
+ uint32_t features = ioread32(denali->reg + FEATURES);
+
+ denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
+
+ /* the encoding changed from rev 5.0 to 5.1 */
+ if (denali->revision < 0x0501)
+ denali->max_banks <<= 1;
+}
+
+static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali)
+{
+ int i;
+
+ for (i = 0; i < DENALI_NR_BANKS; i++)
+ iowrite32(U32_MAX, denali->reg + INTR_EN(i));
+ iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE);
+}
+
+static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali)
+{
+ int i;
+
+ for (i = 0; i < DENALI_NR_BANKS; i++)
+ iowrite32(0, denali->reg + INTR_EN(i));
+ iowrite32(0, denali->reg + GLOBAL_INT_ENABLE);
+}
+
+static void denali_clear_irq(struct denali_nand_info *denali,
+ int bank, uint32_t irq_status)
+{
+ /* write one to clear bits */
+ iowrite32(irq_status, denali->reg + INTR_STATUS(bank));
+}
+
+static void denali_clear_irq_all(struct denali_nand_info *denali)
+{
+ int i;
+
+ for (i = 0; i < DENALI_NR_BANKS; i++)
+ denali_clear_irq(denali, i, U32_MAX);
+}
+
+static void __denali_check_irq(struct denali_nand_info *denali)
+{
+ uint32_t irq_status;
+ int i;
+
+ for (i = 0; i < DENALI_NR_BANKS; i++) {
+ irq_status = ioread32(denali->reg + INTR_STATUS(i));
+ denali_clear_irq(denali, i, irq_status);
+
+ if (i != denali->active_bank)
+ continue;
+
+ denali->irq_status |= irq_status;
+ }
+}
+
+static void denali_reset_irq(struct denali_nand_info *denali)
+{
+ denali->irq_status = 0;
+ denali->irq_mask = 0;
+}
+
+static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
+ uint32_t irq_mask)
+{
+ unsigned long time_left = 1000000;
+
+ while (time_left) {
+ __denali_check_irq(denali);
+
+ if (irq_mask & denali->irq_status)
+ return denali->irq_status;
+ udelay(1);
+ time_left--;
+ }
+
+ if (!time_left) {
+ dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
+ irq_mask);
+ return 0;
+ }
+
+ return denali->irq_status;
+}
+
+static uint32_t denali_check_irq(struct denali_nand_info *denali)
+{
+ __denali_check_irq(denali);
+
+ return denali->irq_status;
+}
+
+static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = denali->host_read(denali, addr);
+}
+
+static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
+ int i;
+
+ for (i = 0; i < len; i++)
+ denali->host_write(denali, addr, buf[i]);
+}
+
+static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
+ uint16_t *buf16 = (uint16_t *)buf;
+ int i;
+
+ for (i = 0; i < len / 2; i++)
+ buf16[i] = denali->host_read(denali, addr);
+}
+
+static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
+ const uint16_t *buf16 = (const uint16_t *)buf;
+ int i;
+
+ for (i = 0; i < len / 2; i++)
+ denali->host_write(denali, addr, buf16[i]);
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+ uint8_t byte;
+
+ denali_read_buf(mtd, &byte, 1);
+
+ return byte;
+}
+
+static void denali_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ denali_write_buf(mtd, &byte, 1);
+}
+
+static uint16_t denali_read_word(struct mtd_info *mtd)
+{
+ uint16_t word;
+
+ denali_read_buf16(mtd, (uint8_t *)&word, 2);
+
+ return word;
+}
+
+static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t type;
+
+ if (ctrl & NAND_CLE)
+ type = DENALI_MAP11_CMD;
+ else if (ctrl & NAND_ALE)
+ type = DENALI_MAP11_ADDR;
+ else
+ return;
+
+ /*
+ * Some commands are followed by chip->dev_ready or chip->waitfunc.
+ * irq_status must be cleared here to catch the R/B# interrupt later.
+ */
+ if (ctrl & NAND_CTRL_CHANGE)
+ denali_reset_irq(denali);
+
+ denali->host_write(denali, DENALI_BANK(denali) | type, dat);
+}
+
+static int denali_dev_ready(struct mtd_info *mtd)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ return !!(denali_check_irq(denali) & INTR__INT_ACT);
+}
+
+static int denali_check_erased_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ unsigned long uncor_ecc_flags,
+ unsigned int max_bitflips)
+{
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int i, ret, stat;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ecc_steps; i++) {
+ if (!(uncor_ecc_flags & BIT(i)))
+ continue;
+
+ stat = nand_check_erased_ecc_chunk(buf, ecc_size,
+ ecc_code, ecc_bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ buf += ecc_size;
+ ecc_code += ecc_bytes;
+ }
+
+ return max_bitflips;
+}
+
+static int denali_hw_ecc_fixup(struct mtd_info *mtd,
+ struct denali_nand_info *denali,
+ unsigned long *uncor_ecc_flags)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int bank = denali->active_bank;
+ uint32_t ecc_cor;
+ unsigned int max_bitflips;
+
+ ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank));
+ ecc_cor >>= ECC_COR_INFO__SHIFT(bank);
+
+ if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) {
+ /*
+ * This flag is set when uncorrectable error occurs at least in
+ * one ECC sector. We can not know "how many sectors", or
+ * "which sector(s)". We need erase-page check for all sectors.
+ */
+ *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0);
+ return 0;
+ }
+
+ max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
+
+ /*
+ * The register holds the maximum of per-sector corrected bitflips.
+ * This is suitable for the return value of the ->read_page() callback.
+ * Unfortunately, we can not know the total number of corrected bits in
+ * the page. Increase the stats by max_bitflips. (compromised solution)
+ */
+ mtd->ecc_stats.corrected += max_bitflips;
+
+ return max_bitflips;
+}
+
+static int denali_sw_ecc_fixup(struct mtd_info *mtd,
+ struct denali_nand_info *denali,
+ unsigned long *uncor_ecc_flags, uint8_t *buf)
+{
+ unsigned int ecc_size = denali->nand.ecc.size;
+ unsigned int bitflips = 0;
+ unsigned int max_bitflips = 0;
+ uint32_t err_addr, err_cor_info;
+ unsigned int err_byte, err_sector, err_device;
+ uint8_t err_cor_value;
+ unsigned int prev_sector = 0;
+ uint32_t irq_status;
+
+ denali_reset_irq(denali);
+
+ do {
+ err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
+ err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
+ err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
+
+ err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
+ err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
+ err_cor_info);
+ err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
+ err_cor_info);
+
+ /* reset the bitflip counter when crossing ECC sector */
+ if (err_sector != prev_sector)
+ bitflips = 0;
+
+ if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
+ /*
+ * Check later if this is a real ECC error, or
+ * an erased sector.
+ */
+ *uncor_ecc_flags |= BIT(err_sector);
+ } else if (err_byte < ecc_size) {
+ /*
+ * If err_byte is larger than ecc_size, means error
+ * happened in OOB, so we ignore it. It's no need for
+ * us to correct it err_device is represented the NAND
+ * error bits are happened in if there are more than
+ * one NAND connected.
+ */
+ int offset;
+ unsigned int flips_in_byte;
+
+ offset = (err_sector * ecc_size + err_byte) *
+ denali->devs_per_cs + err_device;
+
+ /* correct the ECC error */
+ flips_in_byte = hweight8(buf[offset] ^ err_cor_value);
+ buf[offset] ^= err_cor_value;
+ mtd->ecc_stats.corrected += flips_in_byte;
+ bitflips += flips_in_byte;
+
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+
+ prev_sector = err_sector;
+ } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
+
+ /*
+ * Once handle all ECC errors, controller will trigger an
+ * ECC_TRANSACTION_DONE interrupt.
+ */
+ irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
+ if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
+ return -EIO;
+
+ return max_bitflips;
+}
+
+static void denali_setup_dma64(struct denali_nand_info *denali,
+ dma_addr_t dma_addr, int page, int write)
+{
+ uint32_t mode;
+ const int page_count = 1;
+
+ mode = DENALI_MAP10 | DENALI_BANK(denali) | page;
+
+ /* DMA is a three step process */
+
+ /*
+ * 1. setup transfer type, interrupt when complete,
+ * burst len = 64 bytes, the number of pages
+ */
+ denali->host_write(denali, mode,
+ 0x01002000 | (64 << 16) | (write << 8) | page_count);
+
+ /* 2. set memory low address */
+ denali->host_write(denali, mode, lower_32_bits(dma_addr));
+
+ /* 3. set memory high address */
+ denali->host_write(denali, mode, upper_32_bits(dma_addr));
+}
+
+static void denali_setup_dma32(struct denali_nand_info *denali,
+ dma_addr_t dma_addr, int page, int write)
+{
+ uint32_t mode;
+ const int page_count = 1;
+
+ mode = DENALI_MAP10 | DENALI_BANK(denali);
+
+ /* DMA is a four step process */
+
+ /* 1. setup transfer type and # of pages */
+ denali->host_write(denali, mode | page,
+ 0x2000 | (write << 8) | page_count);
+
+ /* 2. set memory high address bits 23:8 */
+ denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
+
+ /* 3. set memory low address bits 23:8 */
+ denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
+
+ /* 4. interrupt when complete, burst len = 64 bytes */
+ denali->host_write(denali, mode | 0x14000, 0x2400);
+}
+
+static int denali_pio_read(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw)
+{
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
+ uint32_t *buf32 = (uint32_t *)buf;
+ uint32_t irq_status, ecc_err_mask;
+ int i;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ else
+ ecc_err_mask = INTR__ECC_ERR;
+
+ denali_reset_irq(denali);
+
+ for (i = 0; i < size / 4; i++)
+ *buf32++ = denali->host_read(denali, addr);
+
+ irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
+ if (!(irq_status & INTR__PAGE_XFER_INC))
+ return -EIO;
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return irq_status & ecc_err_mask ? -EBADMSG : 0;
+}
+
+static int denali_pio_write(struct denali_nand_info *denali,
+ const void *buf, size_t size, int page, int raw)
+{
+ u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
+ const uint32_t *buf32 = (uint32_t *)buf;
+ uint32_t irq_status;
+ int i;
+
+ denali_reset_irq(denali);
+
+ for (i = 0; i < size / 4; i++)
+ denali->host_write(denali, addr, *buf32++);
+
+ irq_status = denali_wait_for_irq(denali,
+ INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
+ if (!(irq_status & INTR__PROGRAM_COMP))
+ return -EIO;
+
+ return 0;
+}
+
+static int denali_pio_xfer(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw, int write)
+{
+ if (write)
+ return denali_pio_write(denali, buf, size, page, raw);
+ else
+ return denali_pio_read(denali, buf, size, page, raw);
+}
+
+static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw, int write)
+{
+ dma_addr_t dma_addr;
+ uint32_t irq_mask, irq_status, ecc_err_mask;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ dma_addr = dma_map_single(buf, size, dir);
+ if (dma_mapping_error(denali->dev, dma_addr)) {
+ dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n");
+ return denali_pio_xfer(denali, buf, size, page, raw, write);
+ }
+
+ if (write) {
+ /*
+ * INTR__PROGRAM_COMP is never asserted for the DMA transfer.
+ * We can use INTR__DMA_CMD_COMP instead. This flag is asserted
+ * when the page program is completed.
+ */
+ irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL;
+ ecc_err_mask = 0;
+ } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_UNCOR_ERR;
+ } else {
+ irq_mask = INTR__DMA_CMD_COMP;
+ ecc_err_mask = INTR__ECC_ERR;
+ }
+
+ iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+ /*
+ * The ->setup_dma() hook kicks DMA by using the data/command
+ * interface, which belongs to a different AXI port from the
+ * register interface. Read back the register to avoid a race.
+ */
+ ioread32(denali->reg + DMA_ENABLE);
+
+ denali_reset_irq(denali);
+ denali->setup_dma(denali, dma_addr, page, write);
+
+ irq_status = denali_wait_for_irq(denali, irq_mask);
+ if (!(irq_status & INTR__DMA_CMD_COMP))
+ ret = -EIO;
+ else if (irq_status & ecc_err_mask)
+ ret = -EBADMSG;
+
+ iowrite32(0, denali->reg + DMA_ENABLE);
+
+ dma_unmap_single(dma_addr, size, dir);
+
+ if (irq_status & INTR__ERASED_PAGE)
+ memset(buf, 0xff, size);
+
+ return ret;
+}
+
+static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
+ size_t size, int page, int raw, int write)
+{
+ iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
+ iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
+ denali->reg + TRANSFER_SPARE_REG);
+
+ if (denali->dma_avail)
+ return denali_dma_xfer(denali, buf, size, page, raw, write);
+ else
+ return denali_pio_xfer(denali, buf, size, page, raw, write);
+}
+
+static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int write)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0;
+ unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT;
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ uint8_t *bufpoi = chip->oob_poi;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int oob_skip = denali->oob_skip_bytes;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+
+ /* BBM at the beginning of the OOB area */
+ chip->cmdfunc(mtd, start_cmd, writesize, page);
+ if (write)
+ chip->write_buf(mtd, bufpoi, oob_skip);
+ else
+ chip->read_buf(mtd, bufpoi, oob_skip);
+ bufpoi += oob_skip;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ chip->cmdfunc(mtd, rnd_cmd, pos, -1);
+ if (write)
+ chip->write_buf(mtd, bufpoi, len);
+ else
+ chip->read_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1);
+ if (write)
+ chip->write_buf(mtd, bufpoi, len);
+ else
+ chip->read_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ }
+ }
+
+ /* OOB free */
+ len = oobsize - (bufpoi - chip->oob_poi);
+ chip->cmdfunc(mtd, rnd_cmd, size - len, -1);
+ if (write)
+ chip->write_buf(mtd, bufpoi, len);
+ else
+ chip->read_buf(mtd, bufpoi, len);
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = denali->buf;
+ int oob_skip = denali->oob_skip_bytes;
+ size_t size = writesize + oobsize;
+ int ret, i, pos, len;
+
+ ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0);
+ if (ret)
+ return ret;
+
+ /* Arrange the buffer for syndrome payload/ecc layout */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(buf, tmp_buf + pos, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(buf, tmp_buf + writesize + oob_skip,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ uint8_t *oob = chip->oob_poi;
+
+ /* BBM at the beginning of the OOB area */
+ memcpy(oob, tmp_buf + writesize, oob_skip);
+ oob += oob_skip;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(oob, tmp_buf + pos, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(oob, tmp_buf + writesize + oob_skip,
+ len);
+ oob += len;
+ }
+ }
+
+ /* OOB free */
+ len = oobsize - (oob - chip->oob_poi);
+ memcpy(oob, tmp_buf + size - len, len);
+ }
+
+ return 0;
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ denali_oob_xfer(mtd, chip, page, 0);
+
+ return 0;
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int status;
+
+ denali_reset_irq(denali);
+
+ denali_oob_xfer(mtd, chip, page, 1);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ unsigned long uncor_ecc_flags = 0;
+ int stat = 0;
+ int ret;
+
+ ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0);
+ if (ret && ret != -EBADMSG)
+ return ret;
+
+ if (denali->caps & DENALI_CAP_HW_ECC_FIXUP)
+ stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags);
+ else if (ret == -EBADMSG)
+ stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf);
+
+ if (stat < 0)
+ return stat;
+
+ if (uncor_ecc_flags) {
+ ret = denali_read_oob(mtd, chip, page);
+ if (ret)
+ return ret;
+
+ stat = denali_check_erased_page(mtd, chip, buf,
+ uncor_ecc_flags, stat);
+ }
+
+ return stat;
+}
+
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ int writesize = mtd->writesize;
+ int oobsize = mtd->oobsize;
+ int ecc_steps = chip->ecc.steps;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ void *tmp_buf = denali->buf;
+ int oob_skip = denali->oob_skip_bytes;
+ size_t size = writesize + oobsize;
+ int i, pos, len;
+
+ /*
+ * Fill the buffer with 0xff first except the full page transfer.
+ * This simplifies the logic.
+ */
+ if (!buf || !oob_required)
+ memset(tmp_buf, 0xff, size);
+
+ /* Arrange the buffer for syndrome payload/ecc layout */
+ if (buf) {
+ for (i = 0; i < ecc_steps; i++) {
+ pos = i * (ecc_size + ecc_bytes);
+ len = ecc_size;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, buf, len);
+ buf += len;
+ if (len < ecc_size) {
+ len = ecc_size - len;
+ memcpy(tmp_buf + writesize + oob_skip, buf,
+ len);
+ buf += len;
+ }
+ }
+ }
+
+ if (oob_required) {
+ const uint8_t *oob = chip->oob_poi;
+
+ /* BBM at the beginning of the OOB area */
+ memcpy(tmp_buf + writesize, oob, oob_skip);
+ oob += oob_skip;
+
+ /* OOB ECC */
+ for (i = 0; i < ecc_steps; i++) {
+ pos = ecc_size + i * (ecc_size + ecc_bytes);
+ len = ecc_bytes;
+
+ if (pos >= writesize)
+ pos += oob_skip;
+ else if (pos + len > writesize)
+ len = writesize - pos;
+
+ memcpy(tmp_buf + pos, oob, len);
+ oob += len;
+ if (len < ecc_bytes) {
+ len = ecc_bytes - len;
+ memcpy(tmp_buf + writesize + oob_skip, oob,
+ len);
+ oob += len;
+ }
+ }
+
+ /* OOB free */
+ len = oobsize - (oob - chip->oob_poi);
+ memcpy(tmp_buf + size - len, oob, len);
+ }
+
+ return denali_data_xfer(denali, tmp_buf, size, page, 1, 1);
+}
+
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ return denali_data_xfer(denali, (void *)buf, mtd->writesize,
+ page, 0, 1);
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+ denali->active_bank = chip;
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status;
+
+ /* R/B# pin transitioned from low to high? */
+ irq_status = denali_wait_for_irq(denali, INTR__INT_ACT);
+
+ return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL;
+}
+
+static int denali_erase(struct mtd_info *mtd, int page)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ uint32_t irq_status;
+
+ denali_reset_irq(denali);
+
+ denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
+ DENALI_ERASE);
+
+ /* wait for erase to complete or failure to occur */
+ irq_status = denali_wait_for_irq(denali,
+ INTR__ERASE_COMP | INTR__ERASE_FAIL);
+
+ return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL;
+}
+
+static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ const struct nand_sdr_timings *timings;
+ unsigned long t_x, mult_x;
+ int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data;
+ int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup;
+ int addr_2_data_mask;
+ uint32_t tmp;
+
+ timings = nand_get_sdr_timings(conf);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ /* clk_x period in picoseconds */
+ t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate);
+ if (!t_x)
+ return -EINVAL;
+
+ /*
+ * The bus interface clock, clk_x, is phase aligned with the core clock.
+ * The clk_x is an integral multiple N of the core clk. The value N is
+ * configured at IP delivery time, and its available value is 4, 5, 6.
+ */
+ mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate);
+ if (mult_x < 4 || mult_x > 6)
+ return -EINVAL;
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ /* tREA -> ACC_CLKS */
+ acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x);
+ acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE);
+
+ tmp = ioread32(denali->reg + ACC_CLKS);
+ tmp &= ~ACC_CLKS__VALUE;
+ tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
+ iowrite32(tmp, denali->reg + ACC_CLKS);
+
+ /* tRWH -> RE_2_WE */
+ re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x);
+ re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_WE);
+ tmp &= ~RE_2_WE__VALUE;
+ tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
+ iowrite32(tmp, denali->reg + RE_2_WE);
+
+ /* tRHZ -> RE_2_RE */
+ re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x);
+ re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE);
+
+ tmp = ioread32(denali->reg + RE_2_RE);
+ tmp &= ~RE_2_RE__VALUE;
+ tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
+ iowrite32(tmp, denali->reg + RE_2_RE);
+
+ /*
+ * tCCS, tWHR -> WE_2_RE
+ *
+ * With WE_2_RE properly set, the Denali controller automatically takes
+ * care of the delay; the driver need not set NAND_WAIT_TCCS.
+ */
+ we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x);
+ we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
+
+ tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
+ tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
+ tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
+ iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
+
+ /* tADL -> ADDR_2_DATA */
+
+ /* for older versions, ADDR_2_DATA is only 6 bit wide */
+ addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ if (denali->revision < 0x0501)
+ addr_2_data_mask >>= 1;
+
+ addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x);
+ addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
+
+ tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
+ tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
+ tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
+ iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
+
+ /* tREH, tWH -> RDWR_EN_HI_CNT */
+ rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min),
+ t_x);
+ rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
+ tmp &= ~RDWR_EN_HI_CNT__VALUE;
+ tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
+ iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
+
+ /* tRP, tWP -> RDWR_EN_LO_CNT */
+ rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x);
+ rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min),
+ t_x);
+ rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x);
+ rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi);
+ rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
+ tmp &= ~RDWR_EN_LO_CNT__VALUE;
+ tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
+ iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
+
+ /* tCS, tCEA -> CS_SETUP_CNT */
+ cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo,
+ (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks,
+ 0);
+ cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE);
+
+ tmp = ioread32(denali->reg + CS_SETUP_CNT);
+ tmp &= ~CS_SETUP_CNT__VALUE;
+ tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
+ iowrite32(tmp, denali->reg + CS_SETUP_CNT);
+
+ return 0;
+}
+
+static void denali_reset_banks(struct denali_nand_info *denali)
+{
+ u32 irq_status;
+ int i;
+
+ for (i = 0; i < denali->max_banks; i++) {
+ denali->active_bank = i;
+
+ denali_reset_irq(denali);
+
+ iowrite32(DEVICE_RESET__BANK(i),
+ denali->reg + DEVICE_RESET);
+
+ irq_status = denali_wait_for_irq(denali,
+ INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
+ if (!(irq_status & INTR__INT_ACT))
+ break;
+ }
+
+ dev_dbg(denali->dev, "%d chips connected\n", i);
+ denali->max_banks = i;
+}
+
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+ /*
+ * The REVISION register may not be reliable. Platforms are allowed to
+ * override it.
+ */
+ if (!denali->revision)
+ denali->revision = swab16(ioread32(denali->reg + REVISION));
+
+ /*
+ * Set how many bytes should be skipped before writing data in OOB.
+ * If a platform requests a non-zero value, set it to the register.
+ * Otherwise, read the value out, expecting it has already been set up
+ * by firmware.
+ */
+ if (denali->oob_skip_bytes)
+ iowrite32(denali->oob_skip_bytes,
+ denali->reg + SPARE_AREA_SKIP_BYTES);
+ else
+ denali->oob_skip_bytes = ioread32(denali->reg +
+ SPARE_AREA_SKIP_BYTES);
+
+ denali_detect_max_banks(denali);
+ iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
+ iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
+
+ iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
+ iowrite32(WRITE_PROTECT__FLAG, denali->reg + WRITE_PROTECT);
+}
+
+int denali_calc_ecc_bytes(int step_size, int strength)
+{
+ /* BCH code. Denali requires ecc.bytes to be multiple of 2 */
+ return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2;
+}
+EXPORT_SYMBOL(denali_calc_ecc_bytes);
+
+static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip,
+ struct denali_nand_info *denali)
+{
+ int oobavail = mtd->oobsize - denali->oob_skip_bytes;
+ int ret;
+
+ /*
+ * If .size and .strength are already set (usually by DT),
+ * check if they are supported by this controller.
+ */
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail);
+
+ /*
+ * We want .size and .strength closest to the chip's requirement
+ * unless NAND_ECC_MAXIMIZE is requested.
+ */
+ if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
+ ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail);
+ if (!ret)
+ return 0;
+ }
+
+ /* Max ECC strength is the last thing we can do */
+ return nand_maximize_ecc(chip, denali->ecc_caps, oobavail);
+}
+
+static struct nand_ecclayout nand_oob;
+
+static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = denali->oob_skip_bytes;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int denali_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total + denali->oob_skip_bytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
+ .ecc = denali_ooblayout_ecc,
+ .rfree = denali_ooblayout_free,
+};
+
+static int denali_multidev_fixup(struct denali_nand_info *denali)
+{
+ struct nand_chip *chip = &denali->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * Support for multi device:
+ * When the IP configuration is x16 capable and two x8 chips are
+ * connected in parallel, DEVICES_CONNECTED should be set to 2.
+ * In this case, the core framework knows nothing about this fact,
+ * so we should tell it the _logical_ pagesize and anything necessary.
+ */
+ denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED);
+
+ /*
+ * On some SoCs, DEVICES_CONNECTED is not auto-detected.
+ * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case.
+ */
+ if (denali->devs_per_cs == 0) {
+ denali->devs_per_cs = 1;
+ iowrite32(1, denali->reg + DEVICES_CONNECTED);
+ }
+
+ if (denali->devs_per_cs == 1)
+ return 0;
+
+ if (denali->devs_per_cs != 2) {
+ dev_err(denali->dev, "unsupported number of devices %d\n",
+ denali->devs_per_cs);
+ return -EINVAL;
+ }
+
+ /* 2 chips in parallel */
+ mtd->size <<= 1;
+ mtd->erasesize <<= 1;
+ mtd->writesize <<= 1;
+ mtd->oobsize <<= 1;
+ chip->chipsize <<= 1;
+ chip->page_shift += 1;
+ chip->phys_erase_shift += 1;
+ chip->bbt_erase_shift += 1;
+ chip->chip_shift += 1;
+ chip->pagemask <<= 1;
+ chip->ecc.size <<= 1;
+ chip->ecc.bytes <<= 1;
+ chip->ecc.strength <<= 1;
+ denali->oob_skip_bytes <<= 1;
+
+ return 0;
+}
+
+int denali_wait_reset_complete(struct denali_nand_info *denali)
+{
+ u32 irq_status;
+
+ irq_status = denali_wait_for_irq(denali, INTR__RST_COMP);
+ if (!(irq_status & INTR__RST_COMP))
+ return -EIO;
+
+ return 0;
+}
+
+int denali_init(struct denali_nand_info *denali)
+{
+ struct nand_chip *chip = &denali->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 features = ioread32(denali->reg + FEATURES);
+ int ret;
+
+ denali_hw_init(denali);
+
+ denali_clear_irq_all(denali);
+
+ denali_reset_banks(denali);
+
+ denali->active_bank = DENALI_INVALID_BANK;
+
+ chip->flash_node = dev_of_offset(denali->dev);
+ /* Fallback to the default name if DT did not give "label" property */
+ if (!mtd->name)
+ mtd->name = "denali-nand";
+
+ chip->select_chip = denali_select_chip;
+ chip->read_byte = denali_read_byte;
+ chip->write_byte = denali_write_byte;
+ chip->read_word = denali_read_word;
+ chip->cmd_ctrl = denali_cmd_ctrl;
+ chip->dev_ready = denali_dev_ready;
+ chip->waitfunc = denali_waitfunc;
+
+ if (features & FEATURES__INDEX_ADDR) {
+ denali->host_read = denali_indexed_read;
+ denali->host_write = denali_indexed_write;
+ } else {
+ denali->host_read = denali_direct_read;
+ denali->host_write = denali_direct_write;
+ }
+
+ /* clk rate info is needed for setup_data_interface */
+ if (denali->clk_x_rate)
+ chip->setup_data_interface = denali_setup_data_interface;
+
+ ret = nand_scan_ident(mtd, denali->max_banks, NULL);
+ if (ret)
+ return ret;
+
+ if (ioread32(denali->reg + FEATURES) & FEATURES__DMA)
+ denali->dma_avail = 1;
+
+ if (denali->dma_avail) {
+ chip->buf_align = ARCH_DMA_MINALIGN;
+ if (denali->caps & DENALI_CAP_DMA_64BIT)
+ denali->setup_dma = denali_setup_dma64;
+ else
+ denali->setup_dma = denali_setup_dma32;
+ } else {
+ chip->buf_align = 4;
+ }
+
+ chip->options |= NAND_USE_BOUNCE_BUFFER;
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+ /* no subpage writes on denali */
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ ret = denali_ecc_setup(mtd, chip, denali);
+ if (ret) {
+ dev_err(denali->dev, "Failed to setup ECC settings.\n");
+ return ret;
+ }
+
+ dev_dbg(denali->dev,
+ "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
+ chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
+
+ iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
+ FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
+ denali->reg + ECC_CORRECTION);
+ iowrite32(mtd->erasesize / mtd->writesize,
+ denali->reg + PAGES_PER_BLOCK);
+ iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
+ denali->reg + DEVICE_WIDTH);
+ iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
+ denali->reg + TWO_ROW_ADDR_CYCLES);
+ iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
+ iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
+
+ iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE);
+ iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE);
+ /* chip->ecc.steps is set by nand_scan_tail(); not available here */
+ iowrite32(mtd->writesize / chip->ecc.size,
+ denali->reg + CFG_NUM_DATA_BLOCKS);
+
+ mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+
+ nand_oob.eccbytes = denali->nand.ecc.bytes;
+ denali->nand.ecc.layout = &nand_oob;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->read_buf = denali_read_buf16;
+ chip->write_buf = denali_write_buf16;
+ } else {
+ chip->read_buf = denali_read_buf;
+ chip->write_buf = denali_write_buf;
+ }
+ chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
+ chip->ecc.read_page = denali_read_page;
+ chip->ecc.read_page_raw = denali_read_page_raw;
+ chip->ecc.write_page = denali_write_page;
+ chip->ecc.write_page_raw = denali_write_page_raw;
+ chip->ecc.read_oob = denali_read_oob;
+ chip->ecc.write_oob = denali_write_oob;
+ chip->erase = denali_erase;
+
+ ret = denali_multidev_fixup(denali);
+ if (ret)
+ return ret;
+
+ /*
+ * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not
+ * use devm_kmalloc() because the memory allocated by devm_ does not
+ * guarantee DMA-safe alignment.
+ */
+ denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!denali->buf)
+ return -ENOMEM;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ goto free_buf;
+
+ ret = nand_register(0, mtd);
+ if (ret) {
+ dev_err(denali->dev, "Failed to register MTD: %d\n", ret);
+ goto free_buf;
+ }
+ return 0;
+
+free_buf:
+ kfree(denali->buf);
+
+ return ret;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/denali.h b/roms/u-boot/drivers/mtd/nand/raw/denali.h
new file mode 100644
index 000000000..6cd02b2e2
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/denali.h
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2013-2014 Altera Corporation <www.altera.com>
+ * Copyright (C) 2009-2010, Intel Corporation and its suppliers.
+ */
+
+#ifndef __DENALI_H__
+#define __DENALI_H__
+
+#include <linux/bitops.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/types.h>
+
+#define DEVICE_RESET 0x0
+#define DEVICE_RESET__BANK(bank) BIT(bank)
+
+#define TRANSFER_SPARE_REG 0x10
+#define TRANSFER_SPARE_REG__FLAG BIT(0)
+
+#define LOAD_WAIT_CNT 0x20
+#define LOAD_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define PROGRAM_WAIT_CNT 0x30
+#define PROGRAM_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define ERASE_WAIT_CNT 0x40
+#define ERASE_WAIT_CNT__VALUE GENMASK(15, 0)
+
+#define INT_MON_CYCCNT 0x50
+#define INT_MON_CYCCNT__VALUE GENMASK(15, 0)
+
+#define RB_PIN_ENABLED 0x60
+#define RB_PIN_ENABLED__BANK(bank) BIT(bank)
+
+#define MULTIPLANE_OPERATION 0x70
+#define MULTIPLANE_OPERATION__FLAG BIT(0)
+
+#define MULTIPLANE_READ_ENABLE 0x80
+#define MULTIPLANE_READ_ENABLE__FLAG BIT(0)
+
+#define COPYBACK_DISABLE 0x90
+#define COPYBACK_DISABLE__FLAG BIT(0)
+
+#define CACHE_WRITE_ENABLE 0xa0
+#define CACHE_WRITE_ENABLE__FLAG BIT(0)
+
+#define CACHE_READ_ENABLE 0xb0
+#define CACHE_READ_ENABLE__FLAG BIT(0)
+
+#define PREFETCH_MODE 0xc0
+#define PREFETCH_MODE__PREFETCH_EN BIT(0)
+#define PREFETCH_MODE__PREFETCH_BURST_LENGTH GENMASK(15, 4)
+
+#define CHIP_ENABLE_DONT_CARE 0xd0
+#define CHIP_EN_DONT_CARE__FLAG BIT(0)
+
+#define ECC_ENABLE 0xe0
+#define ECC_ENABLE__FLAG BIT(0)
+
+#define GLOBAL_INT_ENABLE 0xf0
+#define GLOBAL_INT_EN_FLAG BIT(0)
+
+#define TWHR2_AND_WE_2_RE 0x100
+#define TWHR2_AND_WE_2_RE__WE_2_RE GENMASK(5, 0)
+#define TWHR2_AND_WE_2_RE__TWHR2 GENMASK(13, 8)
+
+#define TCWAW_AND_ADDR_2_DATA 0x110
+/* The width of ADDR_2_DATA is 6 bit for old IP, 7 bit for new IP */
+#define TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA GENMASK(6, 0)
+#define TCWAW_AND_ADDR_2_DATA__TCWAW GENMASK(13, 8)
+
+#define RE_2_WE 0x120
+#define RE_2_WE__VALUE GENMASK(5, 0)
+
+#define ACC_CLKS 0x130
+#define ACC_CLKS__VALUE GENMASK(3, 0)
+
+#define NUMBER_OF_PLANES 0x140
+#define NUMBER_OF_PLANES__VALUE GENMASK(2, 0)
+
+#define PAGES_PER_BLOCK 0x150
+#define PAGES_PER_BLOCK__VALUE GENMASK(15, 0)
+
+#define DEVICE_WIDTH 0x160
+#define DEVICE_WIDTH__VALUE GENMASK(1, 0)
+
+#define DEVICE_MAIN_AREA_SIZE 0x170
+#define DEVICE_MAIN_AREA_SIZE__VALUE GENMASK(15, 0)
+
+#define DEVICE_SPARE_AREA_SIZE 0x180
+#define DEVICE_SPARE_AREA_SIZE__VALUE GENMASK(15, 0)
+
+#define TWO_ROW_ADDR_CYCLES 0x190
+#define TWO_ROW_ADDR_CYCLES__FLAG BIT(0)
+
+#define MULTIPLANE_ADDR_RESTRICT 0x1a0
+#define MULTIPLANE_ADDR_RESTRICT__FLAG BIT(0)
+
+#define ECC_CORRECTION 0x1b0
+#define ECC_CORRECTION__VALUE GENMASK(4, 0)
+#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
+
+#define READ_MODE 0x1c0
+#define READ_MODE__VALUE GENMASK(3, 0)
+
+#define WRITE_MODE 0x1d0
+#define WRITE_MODE__VALUE GENMASK(3, 0)
+
+#define COPYBACK_MODE 0x1e0
+#define COPYBACK_MODE__VALUE GENMASK(3, 0)
+
+#define RDWR_EN_LO_CNT 0x1f0
+#define RDWR_EN_LO_CNT__VALUE GENMASK(4, 0)
+
+#define RDWR_EN_HI_CNT 0x200
+#define RDWR_EN_HI_CNT__VALUE GENMASK(4, 0)
+
+#define MAX_RD_DELAY 0x210
+#define MAX_RD_DELAY__VALUE GENMASK(3, 0)
+
+#define CS_SETUP_CNT 0x220
+#define CS_SETUP_CNT__VALUE GENMASK(4, 0)
+#define CS_SETUP_CNT__TWB GENMASK(17, 12)
+
+#define SPARE_AREA_SKIP_BYTES 0x230
+#define SPARE_AREA_SKIP_BYTES__VALUE GENMASK(5, 0)
+
+#define SPARE_AREA_MARKER 0x240
+#define SPARE_AREA_MARKER__VALUE GENMASK(15, 0)
+
+#define DEVICES_CONNECTED 0x250
+#define DEVICES_CONNECTED__VALUE GENMASK(2, 0)
+
+#define DIE_MASK 0x260
+#define DIE_MASK__VALUE GENMASK(7, 0)
+
+#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
+#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE GENMASK(15, 0)
+
+#define WRITE_PROTECT 0x280
+#define WRITE_PROTECT__FLAG BIT(0)
+
+#define RE_2_RE 0x290
+#define RE_2_RE__VALUE GENMASK(5, 0)
+
+#define MANUFACTURER_ID 0x300
+#define MANUFACTURER_ID__VALUE GENMASK(7, 0)
+
+#define DEVICE_ID 0x310
+#define DEVICE_ID__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_0 0x320
+#define DEVICE_PARAM_0__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_1 0x330
+#define DEVICE_PARAM_1__VALUE GENMASK(7, 0)
+
+#define DEVICE_PARAM_2 0x340
+#define DEVICE_PARAM_2__VALUE GENMASK(7, 0)
+
+#define LOGICAL_PAGE_DATA_SIZE 0x350
+#define LOGICAL_PAGE_DATA_SIZE__VALUE GENMASK(15, 0)
+
+#define LOGICAL_PAGE_SPARE_SIZE 0x360
+#define LOGICAL_PAGE_SPARE_SIZE__VALUE GENMASK(15, 0)
+
+#define REVISION 0x370
+#define REVISION__VALUE GENMASK(15, 0)
+
+#define ONFI_DEVICE_FEATURES 0x380
+#define ONFI_DEVICE_FEATURES__VALUE GENMASK(5, 0)
+
+#define ONFI_OPTIONAL_COMMANDS 0x390
+#define ONFI_OPTIONAL_COMMANDS__VALUE GENMASK(5, 0)
+
+#define ONFI_TIMING_MODE 0x3a0
+#define ONFI_TIMING_MODE__VALUE GENMASK(5, 0)
+
+#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
+#define ONFI_PGM_CACHE_TIMING_MODE__VALUE GENMASK(5, 0)
+
+#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
+#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS GENMASK(7, 0)
+#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE BIT(8)
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE GENMASK(15, 0)
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE GENMASK(15, 0)
+
+#define FEATURES 0x3f0
+#define FEATURES__N_BANKS GENMASK(1, 0)
+#define FEATURES__ECC_MAX_ERR GENMASK(5, 2)
+#define FEATURES__DMA BIT(6)
+#define FEATURES__CMD_DMA BIT(7)
+#define FEATURES__PARTITION BIT(8)
+#define FEATURES__XDMA_SIDEBAND BIT(9)
+#define FEATURES__GPREG BIT(10)
+#define FEATURES__INDEX_ADDR BIT(11)
+
+#define TRANSFER_MODE 0x400
+#define TRANSFER_MODE__VALUE GENMASK(1, 0)
+
+#define INTR_STATUS(bank) (0x410 + (bank) * 0x50)
+#define INTR_EN(bank) (0x420 + (bank) * 0x50)
+/* bit[1:0] is used differently depending on IP version */
+#define INTR__ECC_UNCOR_ERR BIT(0) /* new IP */
+#define INTR__ECC_TRANSACTION_DONE BIT(0) /* old IP */
+#define INTR__ECC_ERR BIT(1) /* old IP */
+#define INTR__DMA_CMD_COMP BIT(2)
+#define INTR__TIME_OUT BIT(3)
+#define INTR__PROGRAM_FAIL BIT(4)
+#define INTR__ERASE_FAIL BIT(5)
+#define INTR__LOAD_COMP BIT(6)
+#define INTR__PROGRAM_COMP BIT(7)
+#define INTR__ERASE_COMP BIT(8)
+#define INTR__PIPE_CPYBCK_CMD_COMP BIT(9)
+#define INTR__LOCKED_BLK BIT(10)
+#define INTR__UNSUP_CMD BIT(11)
+#define INTR__INT_ACT BIT(12)
+#define INTR__RST_COMP BIT(13)
+#define INTR__PIPE_CMD_ERR BIT(14)
+#define INTR__PAGE_XFER_INC BIT(15)
+#define INTR__ERASED_PAGE BIT(16)
+
+#define PAGE_CNT(bank) (0x430 + (bank) * 0x50)
+#define ERR_PAGE_ADDR(bank) (0x440 + (bank) * 0x50)
+#define ERR_BLOCK_ADDR(bank) (0x450 + (bank) * 0x50)
+
+#define ECC_THRESHOLD 0x600
+#define ECC_THRESHOLD__VALUE GENMASK(9, 0)
+
+#define ECC_ERROR_BLOCK_ADDRESS 0x610
+#define ECC_ERROR_BLOCK_ADDRESS__VALUE GENMASK(15, 0)
+
+#define ECC_ERROR_PAGE_ADDRESS 0x620
+#define ECC_ERROR_PAGE_ADDRESS__VALUE GENMASK(11, 0)
+#define ECC_ERROR_PAGE_ADDRESS__BANK GENMASK(15, 12)
+
+#define ECC_ERROR_ADDRESS 0x630
+#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
+#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
+
+#define ERR_CORRECTION_INFO 0x640
+#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
+#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
+#define ERR_CORRECTION_INFO__UNCOR BIT(14)
+#define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
+
+#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
+#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
+#define ECC_COR_INFO__MAX_ERRORS GENMASK(6, 0)
+#define ECC_COR_INFO__UNCOR_ERR BIT(7)
+
+#define CFG_DATA_BLOCK_SIZE 0x6b0
+
+#define CFG_LAST_DATA_BLOCK_SIZE 0x6c0
+
+#define CFG_NUM_DATA_BLOCKS 0x6d0
+
+#define CFG_META_DATA_SIZE 0x6e0
+
+#define DMA_ENABLE 0x700
+#define DMA_ENABLE__FLAG BIT(0)
+
+#define IGNORE_ECC_DONE 0x710
+#define IGNORE_ECC_DONE__FLAG BIT(0)
+
+#define DMA_INTR 0x720
+#define DMA_INTR_EN 0x730
+#define DMA_INTR__TARGET_ERROR BIT(0)
+#define DMA_INTR__DESC_COMP_CHANNEL0 BIT(1)
+#define DMA_INTR__DESC_COMP_CHANNEL1 BIT(2)
+#define DMA_INTR__DESC_COMP_CHANNEL2 BIT(3)
+#define DMA_INTR__DESC_COMP_CHANNEL3 BIT(4)
+#define DMA_INTR__MEMCOPY_DESC_COMP BIT(5)
+
+#define TARGET_ERR_ADDR_LO 0x740
+#define TARGET_ERR_ADDR_LO__VALUE GENMASK(15, 0)
+
+#define TARGET_ERR_ADDR_HI 0x750
+#define TARGET_ERR_ADDR_HI__VALUE GENMASK(15, 0)
+
+#define CHNL_ACTIVE 0x760
+#define CHNL_ACTIVE__CHANNEL0 BIT(0)
+#define CHNL_ACTIVE__CHANNEL1 BIT(1)
+#define CHNL_ACTIVE__CHANNEL2 BIT(2)
+#define CHNL_ACTIVE__CHANNEL3 BIT(3)
+
+struct udevice;
+
+struct denali_nand_info {
+ struct nand_chip nand;
+ unsigned long clk_rate; /* core clock rate */
+ unsigned long clk_x_rate; /* bus interface clock rate */
+ int active_bank; /* currently selected bank */
+ struct udevice *dev;
+ uint32_t page;
+ void __iomem *reg; /* Register Interface */
+ void __iomem *host; /* Host Data/Command Interface */
+ u32 irq_mask; /* interrupts we are waiting for */
+ u32 irq_status; /* interrupts that have happened */
+ int irq;
+ void *buf; /* for syndrome layout conversion */
+ dma_addr_t dma_addr;
+ int dma_avail; /* can support DMA? */
+ int devs_per_cs; /* devices connected in parallel */
+ int oob_skip_bytes; /* number of bytes reserved for BBM */
+ int max_banks;
+ unsigned int revision; /* IP revision */
+ unsigned int caps; /* IP capability (or quirk) */
+ const struct nand_ecc_caps *ecc_caps;
+ u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
+ void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
+ void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
+ int page, int write);
+};
+
+#define DENALI_CAP_HW_ECC_FIXUP BIT(0)
+#define DENALI_CAP_DMA_64BIT BIT(1)
+
+int denali_calc_ecc_bytes(int step_size, int strength);
+int denali_wait_reset_complete(struct denali_nand_info *denali);
+int denali_init(struct denali_nand_info *denali);
+
+#endif /* __DENALI_H__ */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/denali_dt.c b/roms/u-boot/drivers/mtd/nand/raw/denali_dt.c
new file mode 100644
index 000000000..cf4df0168
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/denali_dt.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Socionext Inc.
+ * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/printk.h>
+#include <reset.h>
+
+#include "denali.h"
+
+struct denali_dt_data {
+ unsigned int revision;
+ unsigned int caps;
+ unsigned int oob_skip_bytes;
+ const struct nand_ecc_caps *ecc_caps;
+};
+
+NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes,
+ 512, 8, 15);
+static const struct denali_dt_data denali_socfpga_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP,
+ .oob_skip_bytes = 2,
+ .ecc_caps = &denali_socfpga_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16, 24);
+static const struct denali_dt_data denali_uniphier_v5a_data = {
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
+ .ecc_caps = &denali_uniphier_v5a_ecc_caps,
+};
+
+NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes,
+ 1024, 8, 16);
+static const struct denali_dt_data denali_uniphier_v5b_data = {
+ .revision = 0x0501,
+ .caps = DENALI_CAP_HW_ECC_FIXUP |
+ DENALI_CAP_DMA_64BIT,
+ .oob_skip_bytes = 8,
+ .ecc_caps = &denali_uniphier_v5b_ecc_caps,
+};
+
+static const struct udevice_id denali_nand_dt_ids[] = {
+ {
+ .compatible = "altr,socfpga-denali-nand",
+ .data = (unsigned long)&denali_socfpga_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5a",
+ .data = (unsigned long)&denali_uniphier_v5a_data,
+ },
+ {
+ .compatible = "socionext,uniphier-denali-nand-v5b",
+ .data = (unsigned long)&denali_uniphier_v5b_data,
+ },
+ { /* sentinel */ }
+};
+
+static int denali_dt_probe(struct udevice *dev)
+{
+ struct denali_nand_info *denali = dev_get_priv(dev);
+ const struct denali_dt_data *data;
+ struct clk clk, clk_x, clk_ecc;
+ struct reset_ctl_bulk resets;
+ struct resource res;
+ int ret;
+
+ data = (void *)dev_get_driver_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ denali->revision = data->revision;
+ denali->caps = data->caps;
+ denali->oob_skip_bytes = data->oob_skip_bytes;
+ denali->ecc_caps = data->ecc_caps;
+
+ denali->dev = dev;
+
+ ret = dev_read_resource_byname(dev, "denali_reg", &res);
+ if (ret)
+ return ret;
+
+ denali->reg = devm_ioremap(dev, res.start, resource_size(&res));
+
+ ret = dev_read_resource_byname(dev, "nand_data", &res);
+ if (ret)
+ return ret;
+
+ denali->host = devm_ioremap(dev, res.start, resource_size(&res));
+
+ ret = clk_get_by_name(dev, "nand", &clk);
+ if (ret)
+ ret = clk_get_by_index(dev, 0, &clk);
+ if (ret)
+ clk.dev = NULL;
+
+ ret = clk_get_by_name(dev, "nand_x", &clk_x);
+ if (ret)
+ clk_x.dev = NULL;
+
+ ret = clk_get_by_name(dev, "ecc", &clk_ecc);
+ if (ret)
+ clk_ecc.dev = NULL;
+
+ if (clk.dev) {
+ ret = clk_enable(&clk);
+ if (ret)
+ return ret;
+ }
+
+ if (clk_x.dev) {
+ ret = clk_enable(&clk_x);
+ if (ret)
+ return ret;
+ }
+
+ if (clk_ecc.dev) {
+ ret = clk_enable(&clk_ecc);
+ if (ret)
+ return ret;
+ }
+
+ if (clk_x.dev) {
+ denali->clk_rate = clk_get_rate(&clk);
+ denali->clk_x_rate = clk_get_rate(&clk_x);
+ } else {
+ /*
+ * Hardcode the clock rates for the backward compatibility.
+ * This works for both SOCFPGA and UniPhier.
+ */
+ dev_notice(dev,
+ "necessary clock is missing. default clock rates are used.\n");
+ denali->clk_rate = 50000000;
+ denali->clk_x_rate = 200000000;
+ }
+
+ ret = reset_get_bulk(dev, &resets);
+ if (ret) {
+ dev_warn(dev, "Can't get reset: %d\n", ret);
+ } else {
+ reset_assert_bulk(&resets);
+ udelay(2);
+ reset_deassert_bulk(&resets);
+
+ /*
+ * When the reset is deasserted, the initialization sequence is
+ * kicked (bootstrap process). The driver must wait until it is
+ * finished. Otherwise, it will result in unpredictable behavior.
+ */
+ ret = denali_wait_reset_complete(denali);
+ if (ret) {
+ dev_err(denali->dev, "reset not completed.\n");
+ return ret;
+ }
+ }
+
+ return denali_init(denali);
+}
+
+U_BOOT_DRIVER(denali_nand_dt) = {
+ .name = "denali-nand-dt",
+ .id = UCLASS_MTD,
+ .of_match = denali_nand_dt_ids,
+ .probe = denali_dt_probe,
+ .priv_auto = sizeof(struct denali_nand_info),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(denali_nand_dt),
+ &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize Denali NAND controller. (error %d)\n",
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/denali_spl.c b/roms/u-boot/drivers/mtd/nand/raw/denali_spl.c
new file mode 100644
index 000000000..f72142817
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/denali_spl.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014 Panasonic Corporation
+ * Copyright (C) 2014-2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ */
+
+#include <common.h>
+#include <log.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/mtd/rawnand.h>
+#include "denali.h"
+
+#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
+#define DENALI_MAP10 (2 << 26) /* high-level control plane */
+
+#define INDEX_CTRL_REG 0x0
+#define INDEX_DATA_REG 0x10
+
+#define SPARE_ACCESS 0x41
+#define MAIN_ACCESS 0x42
+#define PIPELINE_ACCESS 0x2000
+
+#define BANK(x) ((x) << 24)
+
+static void __iomem *denali_flash_mem =
+ (void __iomem *)CONFIG_SYS_NAND_DATA_BASE;
+static void __iomem *denali_flash_reg =
+ (void __iomem *)CONFIG_SYS_NAND_REGS_BASE;
+
+static const int flash_bank;
+static int page_size, oob_size, pages_per_block;
+
+static void index_addr(uint32_t address, uint32_t data)
+{
+ writel(address, denali_flash_mem + INDEX_CTRL_REG);
+ writel(data, denali_flash_mem + INDEX_DATA_REG);
+}
+
+static int wait_for_irq(uint32_t irq_mask)
+{
+ unsigned long timeout = 1000000;
+ uint32_t intr_status;
+
+ do {
+ intr_status = readl(denali_flash_reg + INTR_STATUS(flash_bank));
+
+ if (intr_status & INTR__ECC_UNCOR_ERR) {
+ debug("Uncorrected ECC detected\n");
+ return -EBADMSG;
+ }
+
+ if (intr_status & irq_mask)
+ break;
+
+ udelay(1);
+ timeout--;
+ } while (timeout);
+
+ if (!timeout) {
+ debug("Timeout with interrupt status %08x\n", intr_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void read_data_from_flash_mem(uint8_t *buf, int len)
+{
+ int i;
+ uint32_t *buf32;
+
+ /* transfer the data from the flash */
+ buf32 = (uint32_t *)buf;
+
+ /*
+ * Let's take care of unaligned access although it rarely happens.
+ * Avoid put_unaligned() for the normal use cases since it leads to
+ * a bit performance regression.
+ */
+ if ((unsigned long)buf32 % 4) {
+ for (i = 0; i < len / 4; i++)
+ put_unaligned(readl(denali_flash_mem + INDEX_DATA_REG),
+ buf32++);
+ } else {
+ for (i = 0; i < len / 4; i++)
+ *buf32++ = readl(denali_flash_mem + INDEX_DATA_REG);
+ }
+
+ if (len % 4) {
+ u32 tmp;
+
+ tmp = cpu_to_le32(readl(denali_flash_mem + INDEX_DATA_REG));
+ buf = (uint8_t *)buf32;
+ for (i = 0; i < len % 4; i++) {
+ *buf++ = tmp;
+ tmp >>= 8;
+ }
+ }
+}
+
+int denali_send_pipeline_cmd(int page, int ecc_en, int access_type)
+{
+ uint32_t addr, cmd;
+ static uint32_t page_count = 1;
+
+ writel(ecc_en, denali_flash_reg + ECC_ENABLE);
+
+ /* clear all bits of intr_status. */
+ writel(0xffff, denali_flash_reg + INTR_STATUS(flash_bank));
+
+ addr = BANK(flash_bank) | page;
+
+ /* setup the acccess type */
+ cmd = DENALI_MAP10 | addr;
+ index_addr(cmd, access_type);
+
+ /* setup the pipeline command */
+ index_addr(cmd, PIPELINE_ACCESS | page_count);
+
+ cmd = DENALI_MAP01 | addr;
+ writel(cmd, denali_flash_mem + INDEX_CTRL_REG);
+
+ return wait_for_irq(INTR__LOAD_COMP);
+}
+
+static int nand_read_oob(void *buf, int page)
+{
+ int ret;
+
+ ret = denali_send_pipeline_cmd(page, 0, SPARE_ACCESS);
+ if (ret < 0)
+ return ret;
+
+ read_data_from_flash_mem(buf, oob_size);
+
+ return 0;
+}
+
+static int nand_read_page(void *buf, int page)
+{
+ int ret;
+
+ ret = denali_send_pipeline_cmd(page, 1, MAIN_ACCESS);
+ if (ret < 0)
+ return ret;
+
+ read_data_from_flash_mem(buf, page_size);
+
+ return 0;
+}
+
+static int nand_block_isbad(void *buf, int block)
+{
+ int ret;
+
+ ret = nand_read_oob(buf, block * pages_per_block);
+ if (ret < 0)
+ return ret;
+
+ return *((uint8_t *)buf + CONFIG_SYS_NAND_BAD_BLOCK_POS) != 0xff;
+}
+
+/* nand_init() - initialize data to make nand usable by SPL */
+void nand_init(void)
+{
+ /* access to main area */
+ writel(0, denali_flash_reg + TRANSFER_SPARE_REG);
+
+ /*
+ * These registers are expected to be already set by the hardware
+ * or earlier boot code. So we read these values out.
+ */
+ page_size = readl(denali_flash_reg + DEVICE_MAIN_AREA_SIZE);
+ oob_size = readl(denali_flash_reg + DEVICE_SPARE_AREA_SIZE);
+ pages_per_block = readl(denali_flash_reg + PAGES_PER_BLOCK);
+
+ /* Do as denali_hw_init() does. */
+ writel(CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES,
+ denali_flash_reg + SPARE_AREA_SKIP_BYTES);
+ writel(0x0F, denali_flash_reg + RB_PIN_ENABLED);
+ writel(CHIP_EN_DONT_CARE__FLAG, denali_flash_reg + CHIP_ENABLE_DONT_CARE);
+ writel(0xffff, denali_flash_reg + SPARE_AREA_MARKER);
+}
+
+int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
+{
+ int block, page, column, readlen;
+ int ret;
+ int force_bad_block_check = 1;
+
+ page = offs / page_size;
+ column = offs % page_size;
+
+ block = page / pages_per_block;
+ page = page % pages_per_block;
+
+ while (size) {
+ if (force_bad_block_check || page == 0) {
+ ret = nand_block_isbad(dst, block);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ block++;
+ continue;
+ }
+ }
+
+ force_bad_block_check = 0;
+
+ ret = nand_read_page(dst, block * pages_per_block + page);
+ if (ret < 0)
+ return ret;
+
+ readlen = min(page_size - column, (int)size);
+
+ if (unlikely(column)) {
+ /* Partial page read */
+ memmove(dst, dst + column, readlen);
+ column = 0;
+ }
+
+ size -= readlen;
+ dst += readlen;
+ page++;
+ if (page == pages_per_block) {
+ block++;
+ page = 0;
+ }
+ }
+
+ return 0;
+}
+
+void nand_deselect(void) {}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_nand.c b/roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_nand.c
new file mode 100644
index 000000000..0c1bd7b47
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -0,0 +1,812 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Freescale Enhanced Local Bus Controller FCM NAND driver
+ *
+ * Copyright (c) 2006-2008 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ */
+
+#include <common.h>
+#include <command.h>
+#include <malloc.h>
+#include <nand.h>
+#include <dm/devres.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+
+#include <asm/io.h>
+#include <linux/errno.h>
+
+#ifdef VERBOSE_DEBUG
+#define DEBUG_ELBC
+#define vdbg(format, arg...) printf("DEBUG: " format, ##arg)
+#else
+#define vdbg(format, arg...) do {} while (0)
+#endif
+
+/* Can't use plain old DEBUG because the linux mtd
+ * headers define it as a macro.
+ */
+#ifdef DEBUG_ELBC
+#define dbg(format, arg...) printf("DEBUG: " format, ##arg)
+#else
+#define dbg(format, arg...) do {} while (0)
+#endif
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+
+struct fsl_elbc_ctrl;
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct nand_chip chip;
+ struct fsl_elbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* overview of the fsl elbc controller */
+
+struct fsl_elbc_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ /* device info */
+ fsl_lbc_t *regs;
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+/* Small Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
+ .eccbytes = 3,
+ .eccpos = {6, 7, 8},
+ .oobfree = { {0, 5}, {9, 7} },
+};
+
+/* Small Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
+ .eccbytes = 3,
+ .eccpos = {8, 9, 10},
+ .oobfree = { {0, 5}, {6, 2}, {11, 5} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
+ .eccbytes = 12,
+ .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
+ .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
+};
+
+/* Large Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
+ .eccbytes = 12,
+ .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
+ .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+};
+
+/*
+ * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
+ * 1, so we have to adjust bad block pattern. This pattern should be used for
+ * x8 chips only. So far hardware does not support x16 chips anyway.
+ */
+static u8 scan_ff_pattern[] = { 0xff, };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 1,
+ .pattern = scan_ff_pattern,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 11,
+ .len = 4,
+ .veroffs = 15,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+ int buf_num;
+
+ ctrl->page = page_addr;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fbar, page_addr >> 6);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ out_be32(&lbc->fbar, page_addr >> 5);
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ ctrl->addr = priv->vbase + buf_num * 1024;
+ ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ctrl->index += priv->page_size ? 2048 : 512;
+
+ vdbg("set_addr: bank=%d, ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, ctrl->addr, priv->vbase, ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+ u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
+ u32 time_start;
+ u32 ltesr;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (ctrl->use_mdr)
+ out_be32(&lbc->mdr, ctrl->mdr);
+
+ vdbg("fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ vdbg("fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ time_start = get_timer(0);
+
+ ltesr = 0;
+ while (get_timer(time_start) < timeo) {
+ ltesr = in_be32(&lbc->ltesr);
+ if (ltesr & LTESR_CC)
+ break;
+ }
+
+ ctrl->status = ltesr & LTESR_NAND_MASK;
+ out_be32(&lbc->ltesr, ctrl->status);
+ out_be32(&lbc->lteatr, 0);
+
+ /* store mdr value in case it was needed */
+ if (ctrl->use_mdr)
+ ctrl->mdr = in_be32(&lbc->mdr);
+
+ ctrl->use_mdr = 0;
+
+ vdbg("fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
+ ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
+
+ /* returns 0 on success otherwise non-zero) */
+ return ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr,
+ NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+
+ ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+
+ /* fall-through */
+ case NAND_CMD_READ0:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+
+ return;
+
+ /* READID must read all 5 possible bytes while CEB is active */
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD 0x%x.\n", command);
+
+ out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ ctrl->read_bytes = 256;
+ ctrl->use_mdr = 1;
+ ctrl->mdr = column;
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP2_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ ctrl->read_bytes = 0;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 fcr;
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ ctrl->column = column;
+ ctrl->oob = 0;
+
+ if (priv->page_size) {
+ fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) |
+ (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT);
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP4_SHIFT));
+ } else {
+ fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT));
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ ctrl->oob = 1;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ } else {
+ /* Second 256 bytes --> READ1 */
+ fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
+ }
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ vdbg("fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (ctrl->oob || ctrl->column != 0 ||
+ ctrl->index != mtd->writesize + mtd->oobsize)
+ out_be32(&lbc->fbcr, ctrl->index);
+ else
+ out_be32(&lbc->fbcr, 0);
+
+ fsl_elbc_run_command(mtd);
+
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dbg("fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ printf("fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ printf("write_buf of %d bytes", len);
+ ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ctrl->index) {
+ printf("write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - ctrl->index);
+ len = bufsize - ctrl->index;
+ }
+
+ memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
+ /*
+ * This is workaround for the weird elbc hangs during nand write,
+ * Scott Wood says: "...perhaps difference in how long it takes a
+ * write to make it through the localbus compared to a write to IMMR
+ * is causing problems, and sync isn't helping for some reason."
+ * Reading back the last byte helps though.
+ */
+ in_8(&ctrl->addr[ctrl->index] + len - 1);
+
+ ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (ctrl->index < ctrl->read_bytes)
+ return in_8(&ctrl->addr[ctrl->index++]);
+
+ printf("read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
+ memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail);
+ ctrl->index += avail;
+
+ if (len > avail)
+ printf("read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ fsl_lbc_t *lbc = ctrl->regs;
+
+ if (ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ctrl->use_mdr = 0;
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ if (ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP);
+ return fsl_elbc_read_byte(mtd);
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static struct fsl_elbc_ctrl *elbc_ctrl;
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *buf, int oob_required, int page)
+{
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void fsl_elbc_ctrl_init(void)
+{
+ elbc_ctrl = kzalloc(sizeof(*elbc_ctrl), GFP_KERNEL);
+ if (!elbc_ctrl)
+ return;
+
+ elbc_ctrl->regs = LBC_BASE_ADDR;
+
+ /* clear event registers */
+ out_be32(&elbc_ctrl->regs->ltesr, LTESR_NAND_MASK);
+ out_be32(&elbc_ctrl->regs->lteatr, 0);
+
+ /* Enable interrupts for any detected events */
+ out_be32(&elbc_ctrl->regs->lteir, LTESR_NAND_MASK);
+
+ elbc_ctrl->read_bytes = 0;
+ elbc_ctrl->index = 0;
+ elbc_ctrl->addr = NULL;
+}
+
+static int fsl_elbc_chip_init(int devnum, u8 *addr)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct fsl_elbc_mtd *priv;
+ uint32_t br = 0, or = 0;
+ int ret;
+
+ if (!elbc_ctrl) {
+ fsl_elbc_ctrl_init();
+ if (!elbc_ctrl)
+ return -1;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ctrl = elbc_ctrl;
+ priv->vbase = addr;
+
+ /* Find which chip select it is connected to. It'd be nice
+ * if we could pass more than one datum to the NAND driver...
+ */
+ for (priv->bank = 0; priv->bank < MAX_BANKS; priv->bank++) {
+ phys_addr_t phys_addr = virt_to_phys(addr);
+
+ br = in_be32(&elbc_ctrl->regs->bank[priv->bank].br);
+ or = in_be32(&elbc_ctrl->regs->bank[priv->bank].or);
+
+ if ((br & BR_V) && (br & BR_MSEL) == BR_MS_FCM &&
+ (br & or & BR_BA) == BR_PHYS_ADDR(phys_addr))
+ break;
+ }
+
+ if (priv->bank >= MAX_BANKS) {
+ printf("fsl_elbc_nand: address did not match any "
+ "chip selects\n");
+ kfree(priv);
+ return -ENODEV;
+ }
+
+ nand = &priv->chip;
+ mtd = nand_to_mtd(nand);
+
+ elbc_ctrl->chips[priv->bank] = priv;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ nand->read_byte = fsl_elbc_read_byte;
+ nand->write_buf = fsl_elbc_write_buf;
+ nand->read_buf = fsl_elbc_read_buf;
+ nand->select_chip = fsl_elbc_select_chip;
+ nand->cmdfunc = fsl_elbc_cmdfunc;
+ nand->waitfunc = fsl_elbc_wait;
+
+ /* set up nand options */
+ nand->bbt_td = &bbt_main_descr;
+ nand->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ nand->options = NAND_NO_SUBPAGE_WRITE;
+ nand->bbt_options = NAND_BBT_USE_FLASH;
+
+ nand->controller = &elbc_ctrl->controller;
+ nand_set_controller_data(nand, priv);
+
+ nand->ecc.read_page = fsl_elbc_read_page;
+ nand->ecc.write_page = fsl_elbc_write_page;
+ nand->ecc.write_subpage = fsl_elbc_write_subpage;
+
+ priv->fmr = (15 << FMR_CWTO_SHIFT) | (2 << FMR_AL_SHIFT);
+
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((br & BR_DECC) == BR_DECC_CHK_GEN) {
+ nand->ecc.mode = NAND_ECC_HW;
+
+ nand->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_sp_eccm1 :
+ &fsl_elbc_oob_sp_eccm0;
+
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 3;
+ nand->ecc.steps = 1;
+ nand->ecc.strength = 1;
+ } else {
+ /* otherwise fall back to software ECC */
+#if defined(CONFIG_NAND_ECC_BCH)
+ nand->ecc.mode = NAND_ECC_SOFT_BCH;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif
+ }
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ /* Large-page-specific setup */
+ if (mtd->writesize == 2048) {
+ setbits_be32(&elbc_ctrl->regs->bank[priv->bank].or,
+ OR_FCM_PGS);
+ in_be32(&elbc_ctrl->regs->bank[priv->bank].or);
+
+ priv->page_size = 1;
+ nand->badblock_pattern = &largepage_memorybased;
+
+ /*
+ * Hardware expects small page has ECCM0, large page has
+ * ECCM1 when booting from NAND, and we follow that even
+ * when not booting from NAND.
+ */
+ priv->fmr |= FMR_ECCM;
+
+ /* adjust ecc setup if needed */
+ if ((br & BR_DECC) == BR_DECC_CHK_GEN) {
+ nand->ecc.steps = 4;
+ nand->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_lp_eccm1 :
+ &fsl_elbc_oob_lp_eccm0;
+ }
+ } else if (mtd->writesize == 512) {
+ clrbits_be32(&elbc_ctrl->regs->bank[priv->bank].or,
+ OR_FCM_PGS);
+ in_be32(&elbc_ctrl->regs->bank[priv->bank].or);
+ } else {
+ return -ENODEV;
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
+
+ ret = nand_register(devnum, mtd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#ifndef CONFIG_SYS_NAND_BASE_LIST
+#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE }
+#endif
+
+static unsigned long base_address[CONFIG_SYS_MAX_NAND_DEVICE] =
+ CONFIG_SYS_NAND_BASE_LIST;
+
+void board_nand_init(void)
+{
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++)
+ fsl_elbc_chip_init(i, (u8 *)base_address[i]);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_spl.c b/roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_spl.c
new file mode 100644
index 000000000..a62ab69ee
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/fsl_elbc_spl.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NAND boot for Freescale Enhanced Local Bus Controller, Flash Control Machine
+ *
+ * (C) Copyright 2006-2008
+ * Stefan Roese, DENX Software Engineering, sr@denx.de.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ * Author: Scott Wood <scottwood@freescale.com>
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <asm/io.h>
+#include <asm/fsl_lbc.h>
+#include <nand.h>
+
+#ifdef CONFIG_MPC83xx
+#include "../../../arch/powerpc/cpu/mpc83xx/elbc/elbc.h"
+#endif
+
+#define WINDOW_SIZE 8192
+
+static void nand_wait(void)
+{
+ fsl_lbc_t *regs = LBC_BASE_ADDR;
+
+ for (;;) {
+ uint32_t status = in_be32(&regs->ltesr);
+
+ if (status == 1)
+ return;
+
+ if (status & 1) {
+ puts("read failed (ltesr)\n");
+ for (;;);
+ }
+ }
+}
+
+#ifdef CONFIG_TPL_BUILD
+int nand_spl_load_image(uint32_t offs, unsigned int uboot_size, void *vdst)
+#else
+static int nand_load_image(uint32_t offs, unsigned int uboot_size, void *vdst)
+#endif
+{
+ fsl_lbc_t *regs = LBC_BASE_ADDR;
+ uchar *buf = (uchar *)CONFIG_SYS_NAND_BASE;
+ const int large = CONFIG_SYS_NAND_OR_PRELIM & OR_FCM_PGS;
+ const int block_shift = large ? 17 : 14;
+ const int block_size = 1 << block_shift;
+ const int page_size = large ? 2048 : 512;
+ const int bad_marker = large ? page_size + 0 : page_size + 5;
+ int fmr = (15 << FMR_CWTO_SHIFT) | (2 << FMR_AL_SHIFT) | 2;
+ int pos = 0;
+ char *dst = vdst;
+
+ if (offs & (block_size - 1)) {
+ puts("bad offset\n");
+ for (;;);
+ }
+
+ if (large) {
+ fmr |= FMR_ECCM;
+ out_be32(&regs->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ out_be32(&regs->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+ } else {
+ out_be32(&regs->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ out_be32(&regs->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+ }
+
+ out_be32(&regs->fbcr, 0);
+ clrsetbits_be32(&regs->bank[0].br, BR_DECC, BR_DECC_CHK_GEN);
+
+ while (pos < uboot_size) {
+ int i = 0;
+ out_be32(&regs->fbar, offs >> block_shift);
+
+ do {
+ int j;
+ unsigned int page_offs = (offs & (block_size - 1)) << 1;
+
+ out_be32(&regs->ltesr, ~0);
+ out_be32(&regs->lteatr, 0);
+ out_be32(&regs->fpar, page_offs);
+ out_be32(&regs->fmr, fmr);
+ out_be32(&regs->lsor, 0);
+ nand_wait();
+
+ page_offs %= WINDOW_SIZE;
+
+ /*
+ * If either of the first two pages are marked bad,
+ * continue to the next block.
+ */
+ if (i++ < 2 && buf[page_offs + bad_marker] != 0xff) {
+ puts("skipping\n");
+ offs = (offs + block_size) & ~(block_size - 1);
+ pos &= ~(block_size - 1);
+ break;
+ }
+
+ for (j = 0; j < page_size; j++)
+ dst[pos + j] = buf[page_offs + j];
+
+ pos += page_size;
+ offs += page_size;
+ } while ((offs & (block_size - 1)) && (pos < uboot_size));
+ }
+
+ return 0;
+}
+
+/*
+ * Defines a static function nand_load_image() here, because non-static makes
+ * the code too large for certain SPLs(minimal SPL, maximum size <= 4Kbytes)
+ */
+#ifndef CONFIG_TPL_BUILD
+#define nand_spl_load_image(offs, uboot_size, vdst) \
+ nand_load_image(offs, uboot_size, vdst)
+#endif
+
+/*
+ * The main entry for NAND booting. It's necessary that SDRAM is already
+ * configured and available since this code loads the main U-Boot image
+ * from NAND into SDRAM and starts it from there.
+ */
+void nand_boot(void)
+{
+ __attribute__((noreturn)) void (*uboot)(void);
+ /*
+ * Load U-Boot image from NAND into RAM
+ */
+ nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS,
+ CONFIG_SYS_NAND_U_BOOT_SIZE,
+ (void *)CONFIG_SYS_NAND_U_BOOT_DST);
+
+#ifdef CONFIG_NAND_ENV_DST
+ nand_spl_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE,
+ (void *)CONFIG_NAND_ENV_DST);
+
+#ifdef CONFIG_ENV_OFFSET_REDUND
+ nand_spl_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE,
+ (void *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE);
+#endif
+#endif
+
+#ifdef CONFIG_SPL_FLUSH_IMAGE
+ /*
+ * Clean d-cache and invalidate i-cache, to
+ * make sure that no stale data is executed.
+ */
+ flush_cache(CONFIG_SYS_NAND_U_BOOT_DST, CONFIG_SYS_NAND_U_BOOT_SIZE);
+#endif
+
+ puts("transfering control\n");
+ /*
+ * Jump to U-Boot image
+ */
+ uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START;
+ (*uboot)();
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_nand.c b/roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_nand.c
new file mode 100644
index 000000000..e5ff93787
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -0,0 +1,1069 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Integrated Flash Controller NAND Machine Driver
+ *
+ * Copyright (c) 2012 Freescale Semiconductor, Inc
+ *
+ * Authors: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ */
+
+#include <common.h>
+#include <command.h>
+#include <malloc.h>
+#include <nand.h>
+#include <dm/devres.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <fsl_ifc.h>
+
+#ifndef CONFIG_SYS_FSL_IFC_BANK_COUNT
+#define CONFIG_SYS_FSL_IFC_BANK_COUNT 4
+#endif
+
+#define MAX_BANKS CONFIG_SYS_FSL_IFC_BANK_COUNT
+#define ERR_BYTE 0xFF /* Value returned for read bytes
+ when read failed */
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+ struct nand_chip chip;
+ struct fsl_ifc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_ifc_mtd *chips[MAX_BANKS];
+
+ /* device info */
+ struct fsl_ifc regs;
+ void __iomem *addr; /* Address of assigned IFC buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from NEESR after last op */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ unsigned int eccread; /* Non zero for a full-page ECC read */
+};
+
+static struct fsl_ifc_ctrl *ifc_ctrl;
+
+/* 512-byte page with 4-bit ECC, 8-bit */
+static struct nand_ecclayout oob_512_8bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {0, 5}, {6, 2} },
+};
+
+/* 512-byte page with 4-bit ECC, 16-bit */
+static struct nand_ecclayout oob_512_16bit_ecc4 = {
+ .eccbytes = 8,
+ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 6}, },
+};
+
+/* 2048-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_2048_ecc4 = {
+ .eccbytes = 32,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobfree = { {2, 6}, {40, 24} },
+};
+
+/* 4096-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_4096_ecc4 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ },
+ .oobfree = { {2, 6}, {72, 56} },
+};
+
+/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_4096_ecc8 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 82} },
+};
+
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+ .eccbytes = 128,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ },
+ .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+ .eccbytes = 256,
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+ 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 138, 139, 140, 141, 142, 143,
+ 144, 145, 146, 147, 148, 149, 150, 151,
+ 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+ 192, 193, 194, 195, 196, 197, 198, 199,
+ 200, 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255,
+ 256, 257, 258, 259, 260, 261, 262, 263,
+ },
+ .oobfree = { {2, 6}, {264, 80} },
+};
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 2, /* 0 on 8-bit small page */
+ .len = 4,
+ .veroffs = 6,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime *ifc = ctrl->regs.rregs;
+ int buf_num;
+
+ ctrl->page = page_addr;
+
+ /* Program ROW0/COL0 */
+ ifc_out32(&ifc->ifc_nand.row0, page_addr);
+ ifc_out32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+
+ buf_num = page_addr & priv->bufnum_mask;
+
+ ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+ ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ctrl->index += mtd->writesize;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+ u32 eccstat, unsigned int bufnum)
+{
+ return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static int fsl_ifc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime *ifc = ctrl->regs.rregs;
+ u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
+ u32 time_start;
+ u32 eccstat;
+ int i;
+
+ /* set the chip select for NAND Transaction */
+ ifc_out32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+
+ /* start read/write seq */
+ ifc_out32(&ifc->ifc_nand.nandseq_strt,
+ IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for NAND Machine complete flag or timeout */
+ time_start = get_timer(0);
+
+ while (get_timer(time_start) < timeo) {
+ ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
+
+ if (ctrl->status & IFC_NAND_EVTER_STAT_OPC)
+ break;
+ }
+
+ ifc_out32(&ifc->ifc_nand.nand_evter_stat, ctrl->status);
+
+ if (ctrl->status & IFC_NAND_EVTER_STAT_FTOER)
+ printf("%s: Flash Time Out Error\n", __func__);
+ if (ctrl->status & IFC_NAND_EVTER_STAT_WPER)
+ printf("%s: Write Protect Error\n", __func__);
+
+ if (ctrl->eccread) {
+ int errors;
+ int bufnum = ctrl->page & priv->bufnum_mask;
+ int sector_start = bufnum * chip->ecc.steps;
+ int sector_end = sector_start + chip->ecc.steps - 1;
+ u32 *eccstat_regs;
+
+ eccstat_regs = ifc->ifc_nand.nand_eccstat;
+ eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
+
+ for (i = sector_start; i <= sector_end; i++) {
+ if ((i != sector_start) && !(i % 4))
+ eccstat = ifc_in32(&eccstat_regs[i / 4]);
+
+ errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+ if (errors == 15) {
+ /*
+ * Uncorrectable error.
+ * We'll check for blank pages later.
+ *
+ * We disable ECCER reporting due to erratum
+ * IFC-A002770 -- so report it now if we
+ * see an uncorrectable error in ECCSTAT.
+ */
+ ctrl->status |= IFC_NAND_EVTER_STAT_ECCER;
+ continue;
+ }
+
+ mtd->ecc_stats.corrected += errors;
+ }
+
+ ctrl->eccread = 0;
+ }
+
+ /* returns 0 on success otherwise non-zero) */
+ return ctrl->status == IFC_NAND_EVTER_STAT_OPC ? 0 : -EIO;
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+ int oob,
+ struct mtd_info *mtd)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime *ifc = ctrl->regs.rregs;
+
+ /* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+ if (mtd->writesize > 512) {
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ } else {
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
+
+ if (oob)
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+ else
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime *ifc = ctrl->regs.rregs;
+
+ /* clear the read buffer */
+ ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ0: {
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 0);
+ set_addr(mtd, 0, page_addr, 0);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ctrl->index += column;
+
+ if (chip->ecc.mode == NAND_ECC_HW)
+ ctrl->eccread = 1;
+
+ fsl_ifc_do_read(chip, 0, mtd);
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_ifc_do_read(chip, 1, mtd);
+ fsl_ifc_run_command(mtd);
+
+ return;
+
+ /* READID must read all possible bytes while CEB is active */
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ /*
+ * For READID, read 8 bytes that are currently used.
+ * For PARAM, read all 3 copies of 256-bytes pages.
+ */
+ int len = 8;
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM) {
+ timing = IFC_FIR_OP_RBCD;
+ len = 256 * 3;
+ }
+
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ command << IFC_NAND_FCR0_CMD0_SHIFT);
+ ifc_out32(&ifc->ifc_nand.row3, column);
+
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, len);
+ ctrl->read_bytes = len;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_ifc_run_command(mtd);
+ return;
+ }
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 0);
+ ctrl->read_bytes = 0;
+ fsl_ifc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ u32 nand_fcr0;
+ ctrl->column = column;
+ ctrl->oob = 0;
+
+ if (mtd->writesize > 512) {
+ nand_fcr0 =
+ (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD <<
+ IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fir1,
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT));
+ } else {
+ nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+ IFC_NAND_FCR0_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN <<
+ IFC_NAND_FCR0_CMD2_SHIFT) |
+ (NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD3_SHIFT));
+
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fir1,
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+ (IFC_FIR_OP_RDSTAT <<
+ IFC_NAND_FIR1_OP7_SHIFT) |
+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT));
+
+ if (column >= mtd->writesize)
+ nand_fcr0 |=
+ NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+ else
+ nand_fcr0 |=
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+ }
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ ctrl->oob = 1;
+ }
+ ifc_out32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+ set_addr(mtd, column, page_addr, ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG:
+ if (ctrl->oob)
+ ifc_out32(&ifc->ifc_nand.nand_fbcr,
+ ctrl->index - ctrl->column);
+ else
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 0);
+
+ fsl_ifc_run_command(mtd);
+ return;
+
+ case NAND_CMD_STATUS:
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ /*
+ * The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ ifc_out16(ctrl->addr,
+ ifc_in16(ctrl->addr) | NAND_STATUS_WP);
+ else
+ out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP);
+ return;
+
+ case NAND_CMD_RESET:
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+ fsl_ifc_run_command(mtd);
+ return;
+
+ default:
+ printf("%s: error, unsupported command 0x%x.\n",
+ __func__, command);
+ }
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len <= 0) {
+ printf("%s of %d bytes", __func__, len);
+ ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ctrl->index) {
+ printf("%s beyond end of buffer "
+ "(%d requested, %u available)\n",
+ __func__, len, bufsize - ctrl->index);
+ len = bufsize - ctrl->index;
+ }
+
+ memcpy_toio(ctrl->addr + ctrl->index, buf, len);
+ ctrl->index += len;
+}
+
+/*
+ * read a byte from either the IFC hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_ifc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ unsigned int offset;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ctrl->index < ctrl->read_bytes) {
+ offset = ctrl->index++;
+ return in_8(ctrl->addr + offset);
+ }
+
+ printf("%s beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ uint16_t data;
+
+ /*
+ * If there are still bytes in the IFC buffer, then use the
+ * next byte.
+ */
+ if (ctrl->index < ctrl->read_bytes) {
+ data = ifc_in16(ctrl->addr + ctrl->index);
+ ctrl->index += 2;
+ return (uint8_t)data;
+ }
+
+ printf("%s beyond end of buffer\n", __func__);
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
+ memcpy_fromio(buf, ctrl->addr + ctrl->index, avail);
+ ctrl->index += avail;
+
+ if (len > avail)
+ printf("%s beyond end of buffer "
+ "(%d requested, %d available)\n",
+ __func__, len, avail);
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_runtime *ifc = ctrl->regs.rregs;
+ u32 nand_fsr;
+ int status;
+
+ if (ctrl->status != IFC_NAND_EVTER_STAT_OPC)
+ return NAND_STATUS_FAIL;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
+ IFC_NAND_FCR0_CMD0_SHIFT);
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_ifc_run_command(mtd);
+
+ if (ctrl->status != IFC_NAND_EVTER_STAT_OPC)
+ return NAND_STATUS_FAIL;
+
+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
+ status = nand_fsr >> 24;
+
+ /* Chip sometimes reporting write protect even when it's not */
+ return status | NAND_STATUS_WP;
+}
+
+/*
+ * The controller does not check for bitflips in erased pages,
+ * therefore software must check instead.
+ */
+static int
+check_erased_page(struct nand_chip *chip, u8 *buf, struct mtd_info *mtd)
+{
+ u8 *ecc = chip->oob_poi;
+ const int ecc_size = chip->ecc.bytes;
+ const int pkt_size = chip->ecc.size;
+ int i, res, bitflips;
+
+ /* IFC starts ecc bytes at offset 8 in the spare area. */
+ ecc += 8;
+ bitflips = 0;
+ for (i = 0; i < chip->ecc.steps; i++) {
+ res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size,
+ NULL, 0, chip->ecc.strength);
+
+ if (res < 0) {
+ printf("fsl-ifc: NAND Flash ECC Uncorrectable Error\n");
+ mtd->ecc_stats.failed++;
+ } else if (res > 0) {
+ mtd->ecc_stats.corrected += res;
+ }
+ bitflips = max(res, bitflips);
+ buf += pkt_size;
+ ecc += ecc_size;
+ }
+
+ return bitflips;
+}
+
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+
+ fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (ctrl->status & IFC_NAND_EVTER_STAT_ECCER)
+ return check_erased_page(chip, buf, mtd);
+
+ if (ctrl->status != IFC_NAND_EVTER_STAT_OPC)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+ fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static void fsl_ifc_ctrl_init(void)
+{
+ uint32_t ver = 0;
+ ifc_ctrl = kzalloc(sizeof(*ifc_ctrl), GFP_KERNEL);
+ if (!ifc_ctrl)
+ return;
+
+ ifc_ctrl->regs.gregs = IFC_FCM_BASE_ADDR;
+
+ ver = ifc_in32(&ifc_ctrl->regs.gregs->ifc_rev);
+ if (ver >= FSL_IFC_V2_0_0)
+ ifc_ctrl->regs.rregs =
+ (void *)CONFIG_SYS_IFC_ADDR + IFC_RREGS_64KOFFSET;
+ else
+ ifc_ctrl->regs.rregs =
+ (void *)CONFIG_SYS_IFC_ADDR + IFC_RREGS_4KOFFSET;
+
+ /* clear event registers */
+ ifc_out32(&ifc_ctrl->regs.rregs->ifc_nand.nand_evter_stat, ~0U);
+ ifc_out32(&ifc_ctrl->regs.rregs->ifc_nand.pgrdcmpl_evt_stat, ~0U);
+
+ /* Enable error and event for any detected errors */
+ ifc_out32(&ifc_ctrl->regs.rregs->ifc_nand.nand_evter_en,
+ IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_PGRDCMPL_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN);
+
+ ifc_out32(&ifc_ctrl->regs.rregs->ifc_nand.ncfgr, 0x0);
+}
+
+static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+static int fsl_ifc_sram_init(struct fsl_ifc_mtd *priv, uint32_t ver)
+{
+ struct fsl_ifc_runtime *ifc = ifc_ctrl->regs.rregs;
+ uint32_t cs = 0, csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t ncfgr = 0;
+ u32 timeo = (CONFIG_SYS_HZ * 10) / 1000;
+ u32 time_start;
+
+ if (ver > FSL_IFC_V1_1_0) {
+ ncfgr = ifc_in32(&ifc->ifc_nand.ncfgr);
+ ifc_out32(&ifc->ifc_nand.ncfgr, ncfgr | IFC_NAND_SRAM_INIT_EN);
+
+ /* wait for SRAM_INIT bit to be clear or timeout */
+ time_start = get_timer(0);
+ while (get_timer(time_start) < timeo) {
+ ifc_ctrl->status =
+ ifc_in32(&ifc->ifc_nand.nand_evter_stat);
+
+ if (!(ifc_ctrl->status & IFC_NAND_SRAM_INIT_EN))
+ return 0;
+ }
+ printf("fsl-ifc: Failed to Initialise SRAM\n");
+ return 1;
+ }
+
+ cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+ csor = ifc_in32(&ifc_ctrl->regs.gregs->csor_cs[cs].csor);
+ csor_ext = ifc_in32(&ifc_ctrl->regs.gregs->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ ifc_out32(&ifc_ctrl->regs.gregs->csor_cs[cs].csor, csor_8k);
+ ifc_out32(&ifc_ctrl->regs.gregs->csor_cs[cs].csor_ext, 0x0000400);
+
+ /* READID */
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
+ ifc_out32(&ifc->ifc_nand.row3, 0x0);
+
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 0x0);
+
+ /* Program ROW0/COL0 */
+ ifc_out32(&ifc->ifc_nand.row0, 0x0);
+ ifc_out32(&ifc->ifc_nand.col0, 0x0);
+
+ /* set the chip select for NAND Transaction */
+ ifc_out32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+
+ /* start read seq */
+ ifc_out32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ time_start = get_timer(0);
+
+ while (get_timer(time_start) < timeo) {
+ ifc_ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
+
+ if (ifc_ctrl->status & IFC_NAND_EVTER_STAT_OPC)
+ break;
+ }
+
+ if (ifc_ctrl->status != IFC_NAND_EVTER_STAT_OPC) {
+ printf("fsl-ifc: Failed to Initialise SRAM\n");
+ return 1;
+ }
+
+ ifc_out32(&ifc->ifc_nand.nand_evter_stat, ifc_ctrl->status);
+
+ /* Restore CSOR and CSOR_ext */
+ ifc_out32(&ifc_ctrl->regs.gregs->csor_cs[cs].csor, csor);
+ ifc_out32(&ifc_ctrl->regs.gregs->csor_cs[cs].csor_ext, csor_ext);
+
+ return 0;
+}
+
+static int fsl_ifc_chip_init(int devnum, u8 *addr)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct fsl_ifc_mtd *priv;
+ struct nand_ecclayout *layout;
+ struct fsl_ifc_fcm *gregs = NULL;
+ uint32_t cspr = 0, csor = 0, ver = 0;
+ int ret = 0;
+
+ if (!ifc_ctrl) {
+ fsl_ifc_ctrl_init();
+ if (!ifc_ctrl)
+ return -1;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ctrl = ifc_ctrl;
+ priv->vbase = addr;
+ gregs = ifc_ctrl->regs.gregs;
+
+ /* Find which chip select it is connected to.
+ */
+ for (priv->bank = 0; priv->bank < MAX_BANKS; priv->bank++) {
+ phys_addr_t phys_addr = virt_to_phys(addr);
+
+ cspr = ifc_in32(&gregs->cspr_cs[priv->bank].cspr);
+ csor = ifc_in32(&gregs->csor_cs[priv->bank].csor);
+
+ if ((cspr & CSPR_V) && (cspr & CSPR_MSEL) == CSPR_MSEL_NAND &&
+ (cspr & CSPR_BA) == CSPR_PHYS_ADDR(phys_addr))
+ break;
+ }
+
+ if (priv->bank >= MAX_BANKS) {
+ printf("%s: address did not match any "
+ "chip selects\n", __func__);
+ kfree(priv);
+ return -ENODEV;
+ }
+
+ nand = &priv->chip;
+ mtd = nand_to_mtd(nand);
+
+ ifc_ctrl->chips[priv->bank] = priv;
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+
+ nand->write_buf = fsl_ifc_write_buf;
+ nand->read_buf = fsl_ifc_read_buf;
+ nand->select_chip = fsl_ifc_select_chip;
+ nand->cmdfunc = fsl_ifc_cmdfunc;
+ nand->waitfunc = fsl_ifc_wait;
+
+ /* set up nand options */
+ nand->bbt_td = &bbt_main_descr;
+ nand->bbt_md = &bbt_mirror_descr;
+
+ /* set up nand options */
+ nand->options = NAND_NO_SUBPAGE_WRITE;
+ nand->bbt_options = NAND_BBT_USE_FLASH;
+
+ if (cspr & CSPR_PORT_SIZE_16) {
+ nand->read_byte = fsl_ifc_read_byte16;
+ nand->options |= NAND_BUSWIDTH_16;
+ } else {
+ nand->read_byte = fsl_ifc_read_byte;
+ }
+
+ nand->controller = &ifc_ctrl->controller;
+ nand_set_controller_data(nand, priv);
+
+ nand->ecc.read_page = fsl_ifc_read_page;
+ nand->ecc.write_page = fsl_ifc_write_page;
+
+ /* Hardware generates ECC per 512 Bytes */
+ nand->ecc.size = 512;
+ nand->ecc.bytes = 8;
+
+ switch (csor & CSOR_NAND_PGS_MASK) {
+ case CSOR_NAND_PGS_512:
+ if (nand->options & NAND_BUSWIDTH_16) {
+ layout = &oob_512_16bit_ecc4;
+ } else {
+ layout = &oob_512_8bit_ecc4;
+
+ /* Avoid conflict with bad block marker */
+ bbt_main_descr.offs = 0;
+ bbt_mirror_descr.offs = 0;
+ }
+
+ nand->ecc.strength = 4;
+ priv->bufnum_mask = 15;
+ break;
+
+ case CSOR_NAND_PGS_2K:
+ layout = &oob_2048_ecc4;
+ nand->ecc.strength = 4;
+ priv->bufnum_mask = 3;
+ break;
+
+ case CSOR_NAND_PGS_4K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_4096_ecc4;
+ nand->ecc.strength = 4;
+ } else {
+ layout = &oob_4096_ecc8;
+ nand->ecc.strength = 8;
+ nand->ecc.bytes = 16;
+ }
+
+ priv->bufnum_mask = 1;
+ break;
+
+ case CSOR_NAND_PGS_8K:
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+ CSOR_NAND_ECC_MODE_4) {
+ layout = &oob_8192_ecc4;
+ nand->ecc.strength = 4;
+ } else {
+ layout = &oob_8192_ecc8;
+ nand->ecc.strength = 8;
+ nand->ecc.bytes = 16;
+ }
+
+ priv->bufnum_mask = 0;
+ break;
+
+
+ default:
+ printf("ifc nand: bad csor %#x: bad page size\n", csor);
+ return -ENODEV;
+ }
+
+ /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+ if (csor & CSOR_NAND_ECC_DEC_EN) {
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.layout = layout;
+ } else {
+ nand->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ ver = ifc_in32(&gregs->ifc_rev);
+ if (ver >= FSL_IFC_V1_1_0)
+ ret = fsl_ifc_sram_init(priv, ver);
+ if (ret)
+ return ret;
+
+ if (ver >= FSL_IFC_V2_0_0)
+ priv->bufnum_mask = (priv->bufnum_mask * 2) + 1;
+
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
+
+ ret = nand_register(devnum, mtd);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+#ifndef CONFIG_SYS_NAND_BASE_LIST
+#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE }
+#endif
+
+static unsigned long base_address[CONFIG_SYS_MAX_NAND_DEVICE] =
+ CONFIG_SYS_NAND_BASE_LIST;
+
+void board_nand_init(void)
+{
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++)
+ fsl_ifc_chip_init(i, (u8 *)base_address[i]);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_spl.c b/roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_spl.c
new file mode 100644
index 000000000..b7e37416a
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/fsl_ifc_spl.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NAND boot for Freescale Integrated Flash Controller, NAND FCM
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Author: Dipen Dudhat <dipen.dudhat@freescale.com>
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <asm/io.h>
+#include <fsl_ifc.h>
+#include <part.h>
+#include <linux/mtd/rawnand.h>
+#ifdef CONFIG_CHAIN_OF_TRUST
+#include <fsl_validate.h>
+#endif
+
+static inline int is_blank(uchar *addr, int page_size)
+{
+ int i;
+
+ for (i = 0; i < page_size; i++) {
+ if (__raw_readb(&addr[i]) != 0xff)
+ return 0;
+ }
+
+ /*
+ * For the SPL, don't worry about uncorrectable errors
+ * where the main area is all FFs but shouldn't be.
+ */
+ return 1;
+}
+
+/* returns nonzero if entire page is blank */
+static inline int check_read_ecc(uchar *buf, u32 *eccstat,
+ unsigned int bufnum, int page_size)
+{
+ u32 reg = eccstat[bufnum / 4];
+ int errors = (reg >> ((3 - bufnum % 4) * 8)) & 0xf;
+
+ if (errors == 0xf) { /* uncorrectable */
+ /* Blank pages fail hw ECC checks */
+ if (is_blank(buf, page_size))
+ return 1;
+
+ puts("ecc error\n");
+ for (;;)
+ ;
+ }
+
+ return 0;
+}
+
+static inline struct fsl_ifc_runtime *runtime_regs_address(void)
+{
+ struct fsl_ifc regs = {(void *)CONFIG_SYS_IFC_ADDR, NULL};
+ int ver = 0;
+
+ ver = ifc_in32(&regs.gregs->ifc_rev);
+ if (ver >= FSL_IFC_V2_0_0)
+ regs.rregs = (void *)CONFIG_SYS_IFC_ADDR + IFC_RREGS_64KOFFSET;
+ else
+ regs.rregs = (void *)CONFIG_SYS_IFC_ADDR + IFC_RREGS_4KOFFSET;
+
+ return regs.rregs;
+}
+
+static inline void nand_wait(uchar *buf, int bufnum, int page_size)
+{
+ struct fsl_ifc_runtime *ifc = runtime_regs_address();
+ u32 status;
+ u32 eccstat[8];
+ int bufperpage = page_size / 512;
+ int bufnum_end, i;
+
+ bufnum *= bufperpage;
+ bufnum_end = bufnum + bufperpage - 1;
+
+ do {
+ status = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
+ } while (!(status & IFC_NAND_EVTER_STAT_OPC));
+
+ if (status & IFC_NAND_EVTER_STAT_FTOER) {
+ puts("flash time out error\n");
+ for (;;)
+ ;
+ }
+
+ for (i = bufnum / 4; i <= bufnum_end / 4; i++)
+ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
+
+ for (i = bufnum; i <= bufnum_end; i++) {
+ if (check_read_ecc(buf, eccstat, i, page_size))
+ break;
+ }
+
+ ifc_out32(&ifc->ifc_nand.nand_evter_stat, status);
+}
+
+static inline int bad_block(uchar *marker, int port_size)
+{
+ if (port_size == 8)
+ return __raw_readb(marker) != 0xff;
+ else
+ return __raw_readw((u16 *)marker) != 0xffff;
+}
+
+int nand_spl_load_image(uint32_t offs, unsigned int uboot_size, void *vdst)
+{
+ struct fsl_ifc_fcm *gregs = (void *)CONFIG_SYS_IFC_ADDR;
+ struct fsl_ifc_runtime *ifc = NULL;
+ uchar *buf = (uchar *)CONFIG_SYS_NAND_BASE;
+ int page_size;
+ int port_size;
+ int pages_per_blk;
+ int blk_size;
+ int bad_marker = 0;
+ int bufnum_mask, bufnum, ver = 0;
+
+ int csor, cspr;
+ int pos = 0;
+ int j = 0;
+
+ int sram_addr;
+ int pg_no;
+ uchar *dst = vdst;
+
+ ifc = runtime_regs_address();
+
+ /* Get NAND Flash configuration */
+ csor = CONFIG_SYS_NAND_CSOR;
+ cspr = CONFIG_SYS_NAND_CSPR;
+
+ port_size = (cspr & CSPR_PORT_SIZE_16) ? 16 : 8;
+
+ if ((csor & CSOR_NAND_PGS_MASK) == CSOR_NAND_PGS_8K) {
+ page_size = 8192;
+ bufnum_mask = 0x0;
+ } else if ((csor & CSOR_NAND_PGS_MASK) == CSOR_NAND_PGS_4K) {
+ page_size = 4096;
+ bufnum_mask = 0x1;
+ } else if ((csor & CSOR_NAND_PGS_MASK) == CSOR_NAND_PGS_2K) {
+ page_size = 2048;
+ bufnum_mask = 0x3;
+ } else {
+ page_size = 512;
+ bufnum_mask = 0xf;
+
+ if (port_size == 8)
+ bad_marker = 5;
+ }
+
+ ver = ifc_in32(&gregs->ifc_rev);
+ if (ver >= FSL_IFC_V2_0_0)
+ bufnum_mask = (bufnum_mask * 2) + 1;
+
+ pages_per_blk =
+ 32 << ((csor & CSOR_NAND_PB_MASK) >> CSOR_NAND_PB_SHIFT);
+
+ blk_size = pages_per_blk * page_size;
+
+ /* Open Full SRAM mapping for spare are access */
+ ifc_out32(&ifc->ifc_nand.ncfgr, 0x0);
+
+ /* Clear Boot events */
+ ifc_out32(&ifc->ifc_nand.nand_evter_stat, 0xffffffff);
+
+ /* Program FIR/FCR for Large/Small page */
+ if (page_size > 512) {
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_BTRD << IFC_NAND_FIR0_OP4_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ } else {
+ ifc_out32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_BTRD << IFC_NAND_FIR0_OP3_SHIFT));
+ ifc_out32(&ifc->ifc_nand.nand_fir1, 0x0);
+
+ ifc_out32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ }
+
+ /* Program FBCR = 0 for full page read */
+ ifc_out32(&ifc->ifc_nand.nand_fbcr, 0);
+
+ /* Read and copy u-boot on SDRAM from NAND device, In parallel
+ * check for Bad block if found skip it and read continue to
+ * next Block
+ */
+ while (pos < uboot_size) {
+ int i = 0;
+ do {
+ pg_no = offs / page_size;
+ bufnum = pg_no & bufnum_mask;
+ sram_addr = bufnum * page_size * 2;
+
+ ifc_out32(&ifc->ifc_nand.row0, pg_no);
+ ifc_out32(&ifc->ifc_nand.col0, 0);
+ /* start read */
+ ifc_out32(&ifc->ifc_nand.nandseq_strt,
+ IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for read to complete */
+ nand_wait(&buf[sram_addr], bufnum, page_size);
+
+ /*
+ * If either of the first two pages are marked bad,
+ * continue to the next block.
+ */
+ if (i++ < 2 &&
+ bad_block(&buf[sram_addr + page_size + bad_marker],
+ port_size)) {
+ puts("skipping\n");
+ offs = (offs + blk_size) & ~(blk_size - 1);
+ pos &= ~(blk_size - 1);
+ break;
+ }
+
+ for (j = 0; j < page_size; j++)
+ dst[pos + j] = __raw_readb(&buf[sram_addr + j]);
+
+ pos += page_size;
+ offs += page_size;
+ } while ((offs & (blk_size - 1)) && (pos < uboot_size));
+ }
+
+ return 0;
+}
+
+/*
+ * Main entrypoint for NAND Boot. It's necessary that SDRAM is already
+ * configured and available since this code loads the main U-Boot image
+ * from NAND into SDRAM and starts from there.
+ */
+void nand_boot(void)
+{
+ __attribute__((noreturn)) void (*uboot)(void);
+ /*
+ * Load U-Boot image from NAND into RAM
+ */
+ nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS,
+ CONFIG_SYS_NAND_U_BOOT_SIZE,
+ (uchar *)CONFIG_SYS_NAND_U_BOOT_DST);
+
+#ifdef CONFIG_NAND_ENV_DST
+ nand_spl_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE,
+ (uchar *)CONFIG_NAND_ENV_DST);
+
+#ifdef CONFIG_ENV_OFFSET_REDUND
+ nand_spl_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE,
+ (uchar *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE);
+#endif
+#endif
+ /*
+ * Jump to U-Boot image
+ */
+#ifdef CONFIG_SPL_FLUSH_IMAGE
+ /*
+ * Clean d-cache and invalidate i-cache, to
+ * make sure that no stale data is executed.
+ */
+ flush_cache(CONFIG_SYS_NAND_U_BOOT_DST, CONFIG_SYS_NAND_U_BOOT_SIZE);
+#endif
+
+#ifdef CONFIG_CHAIN_OF_TRUST
+ /*
+ * U-Boot header is appended at end of U-boot image, so
+ * calculate U-boot header address using U-boot header size.
+ */
+#define CONFIG_U_BOOT_HDR_ADDR \
+ ((CONFIG_SYS_NAND_U_BOOT_START + \
+ CONFIG_SYS_NAND_U_BOOT_SIZE) - \
+ CONFIG_U_BOOT_HDR_SIZE)
+ spl_validate_uboot(CONFIG_U_BOOT_HDR_ADDR,
+ CONFIG_SYS_NAND_U_BOOT_START);
+ /*
+ * In case of failure in validation, spl_validate_uboot would
+ * not return back in case of Production environment with ITS=1.
+ * Thus U-Boot will not start.
+ * In Development environment (ITS=0 and SB_EN=1), the function
+ * may return back in case of non-fatal failures.
+ */
+#endif
+
+ uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START;
+ uboot();
+}
+
+#ifndef CONFIG_SPL_NAND_INIT
+void nand_init(void)
+{
+}
+
+void nand_deselect(void)
+{
+}
+#endif
diff --git a/roms/u-boot/drivers/mtd/nand/raw/fsl_upm.c b/roms/u-boot/drivers/mtd/nand/raw/fsl_upm.c
new file mode 100644
index 000000000..6c86a7e76
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/fsl_upm.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * FSL UPM NAND driver
+ *
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ */
+
+#include <config.h>
+#include <common.h>
+#include <log.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/fsl_upm.h>
+#include <nand.h>
+
+static void fsl_upm_start_pattern(struct fsl_upm *upm, u32 pat_offset)
+{
+ clrsetbits_be32(upm->mxmr, MxMR_MAD_MSK, MxMR_OP_RUNP | pat_offset);
+ (void)in_be32(upm->mxmr);
+}
+
+static void fsl_upm_end_pattern(struct fsl_upm *upm)
+{
+ clrbits_be32(upm->mxmr, MxMR_OP_RUNP);
+
+ while (in_be32(upm->mxmr) & MxMR_OP_RUNP)
+ eieio();
+}
+
+static void fsl_upm_run_pattern(struct fsl_upm *upm, int width,
+ void __iomem *io_addr, u32 mar)
+{
+ out_be32(upm->mar, mar);
+ (void)in_be32(upm->mar);
+ switch (width) {
+ case 8:
+ out_8(io_addr, 0x0);
+ break;
+ case 16:
+ out_be16(io_addr, 0x0);
+ break;
+ case 32:
+ out_be32(io_addr, 0x0);
+ break;
+ }
+}
+
+static void fun_wait(struct fsl_upm_nand *fun)
+{
+ if (fun->dev_ready) {
+ while (!fun->dev_ready(fun->chip_nr))
+ debug("unexpected busy state\n");
+ } else {
+ /*
+ * If the R/B pin is not connected,
+ * a short delay is necessary.
+ */
+ udelay(1);
+ }
+}
+
+#if CONFIG_SYS_NAND_MAX_CHIPS > 1
+static void fun_select_chip(struct mtd_info *mtd, int chip_nr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_upm_nand *fun = nand_get_controller_data(chip);
+
+ if (chip_nr >= 0) {
+ fun->chip_nr = chip_nr;
+ chip->IO_ADDR_R = chip->IO_ADDR_W =
+ fun->upm.io_addr + fun->chip_offset * chip_nr;
+ } else if (chip_nr == -1) {
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ }
+}
+#endif
+
+static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_upm_nand *fun = nand_get_controller_data(chip);
+ void __iomem *io_addr;
+ u32 mar;
+
+ if (!(ctrl & fun->last_ctrl)) {
+ fsl_upm_end_pattern(&fun->upm);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
+ }
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_ALE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+ else if (ctrl & NAND_CLE)
+ fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+ }
+
+ mar = cmd << (32 - fun->width);
+ io_addr = fun->upm.io_addr;
+#if CONFIG_SYS_NAND_MAX_CHIPS > 1
+ if (fun->chip_nr > 0) {
+ io_addr += fun->chip_offset * fun->chip_nr;
+ if (fun->upm_mar_chip_offset)
+ mar |= fun->upm_mar_chip_offset * fun->chip_nr;
+ }
+#endif
+ fsl_upm_run_pattern(&fun->upm, fun->width, io_addr, mar);
+
+ /*
+ * Some boards/chips needs this. At least the MPC8360E-RDK
+ * needs it. Probably weird chip, because I don't see any
+ * need for this on MPC8555E + Samsung K9F1G08U0A. Usually
+ * here are 0-2 unexpected busy states per block read.
+ */
+ if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
+ fun_wait(fun);
+}
+
+static u8 upm_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ return in_8(chip->IO_ADDR_R);
+}
+
+static void upm_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_upm_nand *fun = nand_get_controller_data(chip);
+
+ for (i = 0; i < len; i++) {
+ out_8(chip->IO_ADDR_W, buf[i]);
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
+ fun_wait(fun);
+ }
+
+ if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
+ fun_wait(fun);
+}
+
+static void upm_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ for (i = 0; i < len; i++)
+ buf[i] = in_8(chip->IO_ADDR_R);
+}
+
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_upm_nand *fun = nand_get_controller_data(chip);
+
+ return fun->dev_ready(fun->chip_nr);
+}
+
+int fsl_upm_nand_init(struct nand_chip *chip, struct fsl_upm_nand *fun)
+{
+ if (fun->width != 8 && fun->width != 16 && fun->width != 32)
+ return -ENOSYS;
+
+ fun->last_ctrl = NAND_CLE;
+
+ nand_set_controller_data(chip, fun);
+ chip->chip_delay = fun->chip_delay;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->cmd_ctrl = fun_cmd_ctrl;
+#if CONFIG_SYS_NAND_MAX_CHIPS > 1
+ chip->select_chip = fun_select_chip;
+#endif
+ chip->read_byte = upm_nand_read_byte;
+ chip->read_buf = upm_nand_read_buf;
+ chip->write_buf = upm_nand_write_buf;
+ if (fun->dev_ready)
+ chip->dev_ready = nand_dev_ready;
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/fsmc_nand.c b/roms/u-boot/drivers/mtd/nand/raw/fsmc_nand.c
new file mode 100644
index 000000000..1f4c74f0f
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/fsmc_nand.c
@@ -0,0 +1,518 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2010
+ * Vipin Kumar, ST Microelectronics, vipin.kumar@st.com.
+ *
+ * (C) Copyright 2012
+ * Amit Virdi, ST Microelectronics, amit.virdi@st.com.
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/fsmc_nand.h>
+#include <asm/arch/hardware.h>
+
+static u32 fsmc_version;
+static struct fsmc_regs *const fsmc_regs_p = (struct fsmc_regs *)
+ CONFIG_SYS_FSMC_BASE;
+
+/*
+ * ECC4 and ECC1 have 13 bytes and 3 bytes of ecc respectively for 512 bytes of
+ * data. ECC4 can correct up to 8 bits in 512 bytes of data while ECC1 can
+ * correct 1 bit in 512 bytes
+ */
+
+static struct nand_ecclayout fsmc_ecc4_lp_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 1}
+ }
+};
+
+/*
+ * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
+ * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
+ * bytes are free for use.
+ */
+static struct nand_ecclayout fsmc_ecc4_224_layout = {
+ .eccbytes = 104,
+ .eccpos = { 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14,
+ 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30,
+ 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46,
+ 50, 51, 52, 53, 54, 55, 56,
+ 57, 58, 59, 60, 61, 62,
+ 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75, 76, 77, 78,
+ 82, 83, 84, 85, 86, 87, 88,
+ 89, 90, 91, 92, 93, 94,
+ 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110,
+ 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126
+ },
+ .oobfree = {
+ {.offset = 15, .length = 3},
+ {.offset = 31, .length = 3},
+ {.offset = 47, .length = 3},
+ {.offset = 63, .length = 3},
+ {.offset = 79, .length = 3},
+ {.offset = 95, .length = 3},
+ {.offset = 111, .length = 3},
+ {.offset = 127, .length = 97}
+ }
+};
+
+/*
+ * ECC placement definitions in oobfree type format
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data
+ * Managing the ecc bytes in the following way makes it easier for software to
+ * read ecc bytes consecutive to data bytes. This way is similar to
+ * oobfree structure maintained already in u-boot nand driver
+ */
+static struct fsmc_eccplace fsmc_eccpl_lp = {
+ .eccplace = {
+ {.offset = 2, .length = 13},
+ {.offset = 18, .length = 13},
+ {.offset = 34, .length = 13},
+ {.offset = 50, .length = 13},
+ {.offset = 66, .length = 13},
+ {.offset = 82, .length = 13},
+ {.offset = 98, .length = 13},
+ {.offset = 114, .length = 13}
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc4_sp_layout = {
+ .eccbytes = 13,
+ .eccpos = { 0, 1, 2, 3, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14
+ },
+ .oobfree = {
+ {.offset = 15, .length = 1},
+ }
+};
+
+static struct fsmc_eccplace fsmc_eccpl_sp = {
+ .eccplace = {
+ {.offset = 0, .length = 4},
+ {.offset = 6, .length = 9}
+ }
+};
+
+static struct nand_ecclayout fsmc_ecc1_layout = {
+ .eccbytes = 24,
+ .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
+ 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
+ .oobfree = {
+ {.offset = 8, .length = 8},
+ {.offset = 24, .length = 8},
+ {.offset = 40, .length = 8},
+ {.offset = 56, .length = 8},
+ {.offset = 72, .length = 8},
+ {.offset = 88, .length = 8},
+ {.offset = 104, .length = 8},
+ {.offset = 120, .length = 8}
+ }
+};
+
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ int k, written_bits = 0;
+
+ for (k = 0; k < size; k++) {
+ written_bits += hweight8(~buff[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static void fsmc_nand_hwcontrol(struct mtd_info *mtd, int cmd, uint ctrl)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ ulong IO_ADDR_W;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ IO_ADDR_W = (ulong)this->IO_ADDR_W;
+
+ IO_ADDR_W &= ~(CONFIG_SYS_NAND_CLE | CONFIG_SYS_NAND_ALE);
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_CLE;
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= CONFIG_SYS_NAND_ALE;
+
+ if (ctrl & NAND_NCE) {
+ writel(readl(&fsmc_regs_p->pc) |
+ FSMC_ENABLE, &fsmc_regs_p->pc);
+ } else {
+ writel(readl(&fsmc_regs_p->pc) &
+ ~FSMC_ENABLE, &fsmc_regs_p->pc);
+ }
+ this->IO_ADDR_W = (void *)IO_ADDR_W;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ /* The calculated ecc is actually the correction index in data */
+ u32 err_idx[8];
+ u32 num_err, i;
+ u32 ecc1, ecc2, ecc3, ecc4;
+
+ num_err = (readl(&fsmc_regs_p->sts) >> 10) & 0xF;
+
+ if (likely(num_err == 0))
+ return 0;
+
+ if (unlikely(num_err > 8)) {
+ /*
+ * This is a temporary erase check. A newly erased page read
+ * would result in an ecc error because the oob data is also
+ * erased to FF and the calculated ecc for an FF data is not
+ * FF..FF.
+ * This is a workaround to skip performing correction in case
+ * data is FF..FF
+ *
+ * Logic:
+ * For every page, each bit written as 0 is counted until these
+ * number of bits are greater than 8 (the maximum correction
+ * capability of FSMC for each 512 + 13 bytes)
+ */
+
+ int bits_ecc = count_written_bits(read_ecc, 13, 8);
+ int bits_data = count_written_bits(dat, 512, 8);
+
+ if ((bits_ecc + bits_data) <= 8) {
+ if (bits_data)
+ memset(dat, 0xff, 512);
+ return bits_data + bits_ecc;
+ }
+
+ return -EBADMSG;
+ }
+
+ ecc1 = readl(&fsmc_regs_p->ecc1);
+ ecc2 = readl(&fsmc_regs_p->ecc2);
+ ecc3 = readl(&fsmc_regs_p->ecc3);
+ ecc4 = readl(&fsmc_regs_p->sts);
+
+ err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+ err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+ err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+ err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+ err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+ err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+ err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+ err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
+
+ i = 0;
+ while (i < num_err) {
+ err_idx[i] ^= 3;
+
+ if (err_idx[i] < 512 * 8)
+ __change_bit(err_idx[i], dat);
+
+ i++;
+ }
+
+ return num_err;
+}
+
+static int fsmc_read_hwecc(struct mtd_info *mtd,
+ const u_char *data, u_char *ecc)
+{
+ u_int ecc_tmp;
+ int timeout = CONFIG_SYS_HZ;
+ ulong start;
+
+ switch (fsmc_version) {
+ case FSMC_VER8:
+ start = get_timer(0);
+ while (get_timer(start) < timeout) {
+ /*
+ * Busy waiting for ecc computation
+ * to finish for 512 bytes
+ */
+ if (readl(&fsmc_regs_p->sts) & FSMC_CODE_RDY)
+ break;
+ }
+
+ ecc_tmp = readl(&fsmc_regs_p->ecc1);
+ ecc[0] = (u_char) (ecc_tmp >> 0);
+ ecc[1] = (u_char) (ecc_tmp >> 8);
+ ecc[2] = (u_char) (ecc_tmp >> 16);
+ ecc[3] = (u_char) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(&fsmc_regs_p->ecc2);
+ ecc[4] = (u_char) (ecc_tmp >> 0);
+ ecc[5] = (u_char) (ecc_tmp >> 8);
+ ecc[6] = (u_char) (ecc_tmp >> 16);
+ ecc[7] = (u_char) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(&fsmc_regs_p->ecc3);
+ ecc[8] = (u_char) (ecc_tmp >> 0);
+ ecc[9] = (u_char) (ecc_tmp >> 8);
+ ecc[10] = (u_char) (ecc_tmp >> 16);
+ ecc[11] = (u_char) (ecc_tmp >> 24);
+
+ ecc_tmp = readl(&fsmc_regs_p->sts);
+ ecc[12] = (u_char) (ecc_tmp >> 16);
+ break;
+
+ default:
+ ecc_tmp = readl(&fsmc_regs_p->ecc1);
+ ecc[0] = (u_char) (ecc_tmp >> 0);
+ ecc[1] = (u_char) (ecc_tmp >> 8);
+ ecc[2] = (u_char) (ecc_tmp >> 16);
+ break;
+ }
+
+ return 0;
+}
+
+void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ writel(readl(&fsmc_regs_p->pc) & ~FSMC_ECCPLEN_256,
+ &fsmc_regs_p->pc);
+ writel(readl(&fsmc_regs_p->pc) & ~FSMC_ECCEN,
+ &fsmc_regs_p->pc);
+ writel(readl(&fsmc_regs_p->pc) | FSMC_ECCEN,
+ &fsmc_regs_p->pc);
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * This routine is needed for fsmc verison 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(upto a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ struct fsmc_eccplace *fsmc_eccpl;
+ int i, j, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ int off, len, group = 0;
+ uint8_t oob[13] __attribute__ ((aligned (2)));
+
+ /* Differentiate between small and large page ecc place definitions */
+ if (mtd->writesize == 512)
+ fsmc_eccpl = &fsmc_eccpl_sp;
+ else
+ fsmc_eccpl = &fsmc_eccpl_lp;
+
+ for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ chip->read_buf(mtd, p, eccsize);
+
+ for (j = 0; j < eccbytes;) {
+ off = fsmc_eccpl->eccplace[group].offset;
+ len = fsmc_eccpl->eccplace[group].length;
+ group++;
+
+ /*
+ * length is intentionally kept a higher multiple of 2
+ * to read at least 13 bytes even in case of 16 bit NAND
+ * devices
+ */
+ if (chip->options & NAND_BUSWIDTH_16)
+ len = roundup(len, 2);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
+ chip->read_buf(mtd, oob + j, len);
+ j += len;
+ }
+
+ memcpy(&ecc_code[i], oob, 13);
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i],
+ &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+
+ return 0;
+}
+
+#ifndef CONFIG_SPL_BUILD
+/*
+ * fsmc_nand_switch_ecc - switch the ECC operation between different engines
+ *
+ * @eccstrength - the number of bits that could be corrected
+ * (1 - HW, 4 - SW BCH4)
+ */
+int fsmc_nand_switch_ecc(uint32_t eccstrength)
+{
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int err;
+
+ /*
+ * This functions is only called on SPEAr600 platforms, supporting
+ * 1 bit HW ECC. The BCH8 HW ECC (FSMC_VER8) from the ST-Ericsson
+ * Nomadik SoC is currently supporting this fsmc_nand_switch_ecc()
+ * function, as it doesn't need to switch to a different ECC layout.
+ */
+ mtd = get_nand_dev_by_index(nand_curr_device);
+ nand = mtd_to_nand(mtd);
+
+ /* Setup the ecc configurations again */
+ if (eccstrength == 1) {
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ nand->ecc.layout = &fsmc_ecc1_layout;
+ nand->ecc.calculate = fsmc_read_hwecc;
+ nand->ecc.correct = nand_correct_data;
+ } else if (eccstrength == 4) {
+ /*
+ * .calculate .correct and .bytes will be set in
+ * nand_scan_tail()
+ */
+ nand->ecc.mode = NAND_ECC_SOFT_BCH;
+ nand->ecc.strength = 4;
+ nand->ecc.layout = NULL;
+ } else {
+ printf("Error: ECC strength %d not supported!\n", eccstrength);
+ }
+
+ /* Update NAND handling after ECC mode switch */
+ err = nand_scan_tail(mtd);
+
+ return err;
+}
+#endif /* CONFIG_SPL_BUILD */
+
+int fsmc_nand_init(struct nand_chip *nand)
+{
+ static int chip_nr;
+ struct mtd_info *mtd;
+ u32 peripid2 = readl(&fsmc_regs_p->peripid2);
+
+ fsmc_version = (peripid2 >> FSMC_REVISION_SHFT) &
+ FSMC_REVISION_MSK;
+
+ writel(readl(&fsmc_regs_p->ctrl) | FSMC_WP, &fsmc_regs_p->ctrl);
+
+#if defined(CONFIG_SYS_FSMC_NAND_16BIT)
+ writel(FSMC_DEVWID_16 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON,
+ &fsmc_regs_p->pc);
+#elif defined(CONFIG_SYS_FSMC_NAND_8BIT)
+ writel(FSMC_DEVWID_8 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON,
+ &fsmc_regs_p->pc);
+#else
+#error Please define CONFIG_SYS_FSMC_NAND_16BIT or CONFIG_SYS_FSMC_NAND_8BIT
+#endif
+ writel(readl(&fsmc_regs_p->pc) | FSMC_TCLR_1 | FSMC_TAR_1,
+ &fsmc_regs_p->pc);
+ writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
+ &fsmc_regs_p->comm);
+ writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0,
+ &fsmc_regs_p->attrib);
+
+ nand->options = 0;
+#if defined(CONFIG_SYS_FSMC_NAND_16BIT)
+ nand->options |= NAND_BUSWIDTH_16;
+#endif
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = 512;
+ nand->ecc.calculate = fsmc_read_hwecc;
+ nand->ecc.hwctl = fsmc_enable_hwecc;
+ nand->cmd_ctrl = fsmc_nand_hwcontrol;
+ nand->IO_ADDR_R = nand->IO_ADDR_W =
+ (void __iomem *)CONFIG_SYS_NAND_BASE;
+ nand->badblockbits = 7;
+
+ mtd = nand_to_mtd(nand);
+
+ switch (fsmc_version) {
+ case FSMC_VER8:
+ nand->ecc.bytes = 13;
+ nand->ecc.strength = 8;
+ nand->ecc.correct = fsmc_bch8_correct_data;
+ nand->ecc.read_page = fsmc_read_page_hwecc;
+ if (mtd->writesize == 512)
+ nand->ecc.layout = &fsmc_ecc4_sp_layout;
+ else {
+ if (mtd->oobsize == 224)
+ nand->ecc.layout = &fsmc_ecc4_224_layout;
+ else
+ nand->ecc.layout = &fsmc_ecc4_lp_layout;
+ }
+
+ break;
+ default:
+ nand->ecc.bytes = 3;
+ nand->ecc.strength = 1;
+ nand->ecc.layout = &fsmc_ecc1_layout;
+ nand->ecc.correct = nand_correct_data;
+ break;
+ }
+
+ /* Detect NAND chips */
+ if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
+ return -ENXIO;
+
+ if (nand_scan_tail(mtd))
+ return -ENXIO;
+
+ if (nand_register(chip_nr++, mtd))
+ return -ENXIO;
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/kb9202_nand.c b/roms/u-boot/drivers/mtd/nand/raw/kb9202_nand.c
new file mode 100644
index 000000000..0f68f1cd8
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/kb9202_nand.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2006
+ * KwikByte <kb9200_dev@kwikbyte.com>
+ *
+ * (C) Copyright 2009
+ * Matthias Kaehlcke <matthias@kaehlcke.net>
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/AT91RM9200.h>
+#include <asm/arch/hardware.h>
+
+#include <nand.h>
+
+/*
+ * hardware specific access to control-lines
+ */
+
+#define MASK_ALE (1 << 22) /* our ALE is A22 */
+#define MASK_CLE (1 << 21) /* our CLE is A21 */
+
+#define KB9202_NAND_NCE (1 << 28) /* EN* on D28 */
+#define KB9202_NAND_BUSY (1 << 29) /* RB* on D29 */
+
+#define KB9202_SMC2_NWS (1 << 2)
+#define KB9202_SMC2_TDF (1 << 8)
+#define KB9202_SMC2_RWSETUP (1 << 24)
+#define KB9202_SMC2_RWHOLD (1 << 29)
+
+/*
+ * Board-specific function to access device control signals
+ */
+static void kb9202_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ ulong IO_ADDR_W = (ulong) this->IO_ADDR_W;
+
+ /* clear ALE and CLE bits */
+ IO_ADDR_W &= ~(MASK_ALE | MASK_CLE);
+
+ if (ctrl & NAND_CLE)
+ IO_ADDR_W |= MASK_CLE;
+
+ if (ctrl & NAND_ALE)
+ IO_ADDR_W |= MASK_ALE;
+
+ this->IO_ADDR_W = (void *) IO_ADDR_W;
+
+ if (ctrl & NAND_NCE)
+ writel(KB9202_NAND_NCE, AT91C_PIOC_CODR);
+ else
+ writel(KB9202_NAND_NCE, AT91C_PIOC_SODR);
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+
+/*
+ * Board-specific function to access the device ready signal.
+ */
+static int kb9202_nand_ready(struct mtd_info *mtd)
+{
+ return readl(AT91C_PIOC_PDSR) & KB9202_NAND_BUSY;
+}
+
+
+/*
+ * Board-specific NAND init. Copied from include/linux/mtd/nand.h for reference.
+ *
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the flash device
+ * @hwcontrol: [BOARDSPECIFIC] hardwarespecific function for accesing control-lines
+ * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line
+ * If set to NULL no access to ready/busy is available and the ready/busy information
+ * is read from the chip status register
+ * @enable_hwecc: [BOARDSPECIFIC] function to enable (reset) hardware ecc generator. Must only
+ * be provided if a hardware ECC is available
+ * @eccmode: [BOARDSPECIFIC] mode of ecc, see defines
+ * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
+ * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
+ * special functionality. See the defines for further explanation
+*/
+/*
+ * This routine initializes controller and GPIOs.
+ */
+int board_nand_init(struct nand_chip *nand)
+{
+ unsigned int value;
+
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->cmd_ctrl = kb9202_nand_hwcontrol;
+ nand->dev_ready = kb9202_nand_ready;
+
+ /* in case running outside of bootloader */
+ writel(1 << AT91C_ID_PIOC, AT91C_PMC_PCER);
+
+ /* setup nand flash access (allow ample margin) */
+ /* 4 wait states, 1 setup, 1 hold, 1 float for 8-bit device */
+ writel(AT91C_SMC2_WSEN | KB9202_SMC2_NWS | KB9202_SMC2_TDF |
+ AT91C_SMC2_DBW_8 | KB9202_SMC2_RWSETUP | KB9202_SMC2_RWHOLD,
+ AT91C_SMC_CSR3);
+
+ /* enable internal NAND controller */
+ value = readl(AT91C_EBI_CSA);
+ value |= AT91C_EBI_CS3A_SMC_SmartMedia;
+ writel(value, AT91C_EBI_CSA);
+
+ /* enable SMOE/SMWE */
+ writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_ASR);
+ writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_PDR);
+ writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_OER);
+
+ /* set NCE to high */
+ writel(KB9202_NAND_NCE, AT91C_PIOC_SODR);
+
+ /* disable output on pin connected to the busy line of the NAND */
+ writel(KB9202_NAND_BUSY, AT91C_PIOC_ODR);
+
+ /* enable the PIO to control NCE and BUSY */
+ writel(KB9202_NAND_NCE | KB9202_NAND_BUSY, AT91C_PIOC_PER);
+
+ /* enable output for NCE */
+ writel(KB9202_NAND_NCE, AT91C_PIOC_OER);
+
+ return (0);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/kirkwood_nand.c b/roms/u-boot/drivers/mtd/nand/raw/kirkwood_nand.c
new file mode 100644
index 000000000..0757fa840
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/kirkwood_nand.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2009
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/arch/soc.h>
+#include <asm/arch/mpp.h>
+#include <nand.h>
+
+/* NAND Flash Soc registers */
+struct kwnandf_registers {
+ u32 rd_params; /* 0x10418 */
+ u32 wr_param; /* 0x1041c */
+ u8 pad[0x10470 - 0x1041c - 4];
+ u32 ctrl; /* 0x10470 */
+};
+
+static struct kwnandf_registers *nf_reg =
+ (struct kwnandf_registers *)KW_NANDF_BASE;
+
+static u32 nand_mpp_backup[9] = { 0 };
+
+/*
+ * hardware specific access to control-lines/bits
+ */
+#define NAND_ACTCEBOOT_BIT 0x02
+
+static void kw_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *nc = mtd_to_nand(mtd);
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << 0); /* Commands with A[1:0] == 01 */
+ else if (ctrl & NAND_ALE)
+ offs = (1 << 1); /* Addresses with A[1:0] == 10 */
+ else
+ return;
+
+ writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+void kw_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ u32 data;
+ static const u32 nand_config[] = {
+ MPP0_NF_IO2,
+ MPP1_NF_IO3,
+ MPP2_NF_IO4,
+ MPP3_NF_IO5,
+ MPP4_NF_IO6,
+ MPP5_NF_IO7,
+ MPP18_NF_IO0,
+ MPP19_NF_IO1,
+ 0
+ };
+
+ if (chip >= 0)
+ kirkwood_mpp_conf(nand_config, nand_mpp_backup);
+ else
+ kirkwood_mpp_conf(nand_mpp_backup, NULL);
+
+ data = readl(&nf_reg->ctrl);
+ data |= NAND_ACTCEBOOT_BIT;
+ writel(data, &nf_reg->ctrl);
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+ nand->options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING;
+#if defined(CONFIG_SYS_NAND_NO_SUBPAGE_WRITE)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+#endif
+#if defined(CONFIG_NAND_ECC_BCH)
+ nand->ecc.mode = NAND_ECC_SOFT_BCH;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif
+ nand->cmd_ctrl = kw_nand_hwcontrol;
+ nand->chip_delay = 40;
+ nand->select_chip = kw_nand_select_chip;
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/kmeter1_nand.c b/roms/u-boot/drivers/mtd/nand/raw/kmeter1_nand.c
new file mode 100644
index 000000000..cf33f2863
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/kmeter1_nand.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2009
+ * Heiko Schocher, DENX Software Engineering, hs@denx.de
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+#define CONFIG_NAND_MODE_REG (void *)(CONFIG_SYS_NAND_BASE + 0x20000)
+#define CONFIG_NAND_DATA_REG (void *)(CONFIG_SYS_NAND_BASE + 0x30000)
+
+#define read_mode() in_8(CONFIG_NAND_MODE_REG)
+#define write_mode(val) out_8(CONFIG_NAND_MODE_REG, val)
+#define read_data() in_8(CONFIG_NAND_DATA_REG)
+#define write_data(val) out_8(CONFIG_NAND_DATA_REG, val)
+
+#define KPN_RDY2 (1 << 7)
+#define KPN_RDY1 (1 << 6)
+#define KPN_WPN (1 << 4)
+#define KPN_CE2N (1 << 3)
+#define KPN_CE1N (1 << 2)
+#define KPN_ALE (1 << 1)
+#define KPN_CLE (1 << 0)
+
+#define KPN_DEFAULT_CHIP_DELAY 50
+
+static int kpn_chip_ready(void)
+{
+ if (read_mode() & KPN_RDY1)
+ return 1;
+
+ return 0;
+}
+
+static void kpn_wait_rdy(void)
+{
+ int cnt = 1000000;
+
+ while (--cnt && !kpn_chip_ready())
+ udelay(1);
+
+ if (!cnt)
+ printf ("timeout while waiting for RDY\n");
+}
+
+static void kpn_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ u8 reg_val = read_mode();
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ reg_val = reg_val & ~(KPN_ALE + KPN_CLE);
+
+ if (ctrl & NAND_CLE)
+ reg_val = reg_val | KPN_CLE;
+ if (ctrl & NAND_ALE)
+ reg_val = reg_val | KPN_ALE;
+ if (ctrl & NAND_NCE)
+ reg_val = reg_val & ~KPN_CE1N;
+ else
+ reg_val = reg_val | KPN_CE1N;
+
+ write_mode(reg_val);
+ }
+ if (cmd != NAND_CMD_NONE)
+ write_data(cmd);
+
+ /* wait until flash is ready */
+ kpn_wait_rdy();
+}
+
+static u_char kpn_nand_read_byte(struct mtd_info *mtd)
+{
+ return read_data();
+}
+
+static void kpn_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ write_data(buf[i]);
+ kpn_wait_rdy();
+ }
+}
+
+static void kpn_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = read_data();
+}
+
+static int kpn_nand_dev_ready(struct mtd_info *mtd)
+{
+ kpn_wait_rdy();
+
+ return 1;
+}
+
+int board_nand_init(struct nand_chip *nand)
+{
+#if defined(CONFIG_NAND_ECC_BCH)
+ nand->ecc.mode = NAND_ECC_SOFT_BCH;
+#else
+ nand->ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* Reference hardware control function */
+ nand->cmd_ctrl = kpn_nand_hwcontrol;
+ nand->read_byte = kpn_nand_read_byte;
+ nand->write_buf = kpn_nand_write_buf;
+ nand->read_buf = kpn_nand_read_buf;
+ nand->dev_ready = kpn_nand_dev_ready;
+ nand->chip_delay = KPN_DEFAULT_CHIP_DELAY;
+
+ /* reset mode register */
+ write_mode(KPN_CE1N + KPN_CE2N + KPN_WPN);
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_mlc.c b/roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_mlc.c
new file mode 100644
index 000000000..b3232ed59
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_mlc.c
@@ -0,0 +1,766 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * LPC32xx MLC NAND flash controller driver
+ *
+ * (C) Copyright 2014 3ADEV <http://3adev.com>
+ * Written by Albert ARIBAUD <albert.aribaud@3adev.fr>
+ *
+ * NOTE:
+ *
+ * The MLC NAND flash controller provides hardware Reed-Solomon ECC
+ * covering in- and out-of-band data together. Therefore, in- and out-
+ * of-band data must be written together in order to have a valid ECC.
+ *
+ * Consequently, pages with meaningful in-band data are written with
+ * blank (all-ones) out-of-band data and a valid ECC, and any later
+ * out-of-band data write will void the ECC.
+ *
+ * Therefore, code which reads such late-written out-of-band data
+ * should not rely on the ECC validity.
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <nand.h>
+#include <asm/arch/clk.h>
+#include <asm/arch/sys_proto.h>
+
+/*
+ * MLC NAND controller registers.
+ */
+struct lpc32xx_nand_mlc_registers {
+ u8 buff[32768]; /* controller's serial data buffer */
+ u8 data[32768]; /* NAND's raw data buffer */
+ u32 cmd;
+ u32 addr;
+ u32 ecc_enc_reg;
+ u32 ecc_dec_reg;
+ u32 ecc_auto_enc_reg;
+ u32 ecc_auto_dec_reg;
+ u32 rpr;
+ u32 wpr;
+ u32 rubp;
+ u32 robp;
+ u32 sw_wp_add_low;
+ u32 sw_wp_add_hig;
+ u32 icr;
+ u32 time_reg;
+ u32 irq_mr;
+ u32 irq_sr;
+ u32 lock_pr;
+ u32 isr;
+ u32 ceh;
+};
+
+/* LOCK_PR register defines */
+#define LOCK_PR_UNLOCK_KEY 0x0000A25E /* Magic unlock value */
+
+/* ICR defines */
+#define ICR_LARGE_BLOCKS 0x00000004 /* configure for 2KB blocks */
+#define ICR_ADDR4 0x00000002 /* configure for 4-word addrs */
+
+/* CEH defines */
+#define CEH_NORMAL_CE 0x00000001 /* do not force CE ON */
+
+/* ISR register defines */
+#define ISR_NAND_READY 0x00000001
+#define ISR_CONTROLLER_READY 0x00000002
+#define ISR_ECC_READY 0x00000004
+#define ISR_DECODER_ERRORS(s) ((((s) >> 4) & 3)+1)
+#define ISR_DECODER_FAILURE 0x00000040
+#define ISR_DECODER_ERROR 0x00000008
+
+/* time-out for NAND chip / controller loops, in us */
+#define LPC32X_NAND_TIMEOUT 5000
+
+/*
+ * There is a single instance of the NAND MLC controller
+ */
+
+static struct lpc32xx_nand_mlc_registers __iomem *lpc32xx_nand_mlc_registers
+ = (struct lpc32xx_nand_mlc_registers __iomem *)MLC_NAND_BASE;
+
+#if !defined(CONFIG_SYS_MAX_NAND_CHIPS)
+#define CONFIG_SYS_MAX_NAND_CHIPS 1
+#endif
+
+#define clkdiv(v, w, o) (((1+(clk/v)) & w) << o)
+
+/**
+ * OOB data in each small page are 6 'free' then 10 ECC bytes.
+ * To make things easier, when reading large pages, the four pages'
+ * 'free' OOB bytes are grouped in the first 24 bytes of the OOB buffer,
+ * while the the four ECC bytes are groupe in its last 40 bytes.
+ *
+ * The struct below represents how free vs ecc oob bytes are stored
+ * in the buffer.
+ *
+ * Note: the OOB bytes contain the bad block marker at offsets 0 and 1.
+ */
+
+struct lpc32xx_oob {
+ struct {
+ uint8_t free_oob_bytes[6];
+ } free[4];
+ struct {
+ uint8_t ecc_oob_bytes[10];
+ } ecc[4];
+};
+
+/*
+ * Initialize the controller
+ */
+
+static void lpc32xx_nand_init(void)
+{
+ unsigned int clk;
+
+ /* Configure controller for no software write protection, x8 bus
+ width, large block device, and 4 address words */
+
+ /* unlock controller registers with magic key */
+ writel(LOCK_PR_UNLOCK_KEY,
+ &lpc32xx_nand_mlc_registers->lock_pr);
+
+ /* enable large blocks and large NANDs */
+ writel(ICR_LARGE_BLOCKS | ICR_ADDR4,
+ &lpc32xx_nand_mlc_registers->icr);
+
+ /* Make sure MLC interrupts are disabled */
+ writel(0, &lpc32xx_nand_mlc_registers->irq_mr);
+
+ /* Normal chip enable operation */
+ writel(CEH_NORMAL_CE,
+ &lpc32xx_nand_mlc_registers->ceh);
+
+ /* Setup NAND timing */
+ clk = get_hclk_clk_rate();
+
+ writel(
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_TCEA_DELAY, 0x03, 24) |
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_BUSY_DELAY, 0x1F, 19) |
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_NAND_TA, 0x07, 16) |
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_RD_HIGH, 0x0F, 12) |
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_RD_LOW, 0x0F, 8) |
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_WR_HIGH, 0x0F, 4) |
+ clkdiv(CONFIG_LPC32XX_NAND_MLC_WR_LOW, 0x0F, 0),
+ &lpc32xx_nand_mlc_registers->time_reg);
+}
+
+#if !defined(CONFIG_SPL_BUILD)
+
+/**
+ * lpc32xx_cmd_ctrl - write command to either cmd or data register
+ */
+
+static void lpc32xx_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writeb(cmd & 0Xff, &lpc32xx_nand_mlc_registers->cmd);
+ else if (ctrl & NAND_ALE)
+ writeb(cmd & 0Xff, &lpc32xx_nand_mlc_registers->addr);
+}
+
+/**
+ * lpc32xx_read_byte - read a byte from the NAND
+ * @mtd: MTD device structure
+ */
+
+static uint8_t lpc32xx_read_byte(struct mtd_info *mtd)
+{
+ return readb(&lpc32xx_nand_mlc_registers->data);
+}
+
+/**
+ * lpc32xx_dev_ready - test if NAND device (actually controller) is ready
+ * @mtd: MTD device structure
+ * @mode: mode to set the ECC HW to.
+ */
+
+static int lpc32xx_dev_ready(struct mtd_info *mtd)
+{
+ /* means *controller* ready for us */
+ int status = readl(&lpc32xx_nand_mlc_registers->isr);
+ return status & ISR_CONTROLLER_READY;
+}
+
+/**
+ * ECC layout -- this is needed whatever ECC mode we are using.
+ * In a 2KB (4*512B) page, R/S codes occupy 40 (4*10) bytes.
+ * To make U-Boot's life easier, we pack 'useable' OOB at the
+ * front and R/S ECC at the back.
+ */
+
+static struct nand_ecclayout lpc32xx_largepage_ecclayout = {
+ .eccbytes = 40,
+ .eccpos = {24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 48, 50, 51, 52, 53,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = {
+ /* bytes 0 and 1 are used for the bad block marker */
+ {
+ .offset = 2,
+ .length = 22
+ },
+ }
+};
+
+/**
+ * lpc32xx_read_page_hwecc - read in- and out-of-band data with ECC
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Use large block Auto Decode Read Mode(1) as described in User Manual
+ * section 8.6.2.1.
+ *
+ * The initial Read Mode and Read Start commands are sent by the caller.
+ *
+ * ECC will be false if out-of-band data has been updated since in-band
+ * data was initially written.
+ */
+
+static int lpc32xx_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ unsigned int i, status, timeout, err, max_bitflips = 0;
+ struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
+
+ /* go through all four small pages */
+ for (i = 0; i < 4; i++) {
+ /* start auto decode (reads 528 NAND bytes) */
+ writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_dec_reg);
+ /* wait for controller to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_CONTROLLER_READY)
+ break;
+ udelay(1);
+ }
+ /* if decoder failed, return failure */
+ if (status & ISR_DECODER_FAILURE)
+ return -1;
+ /* keep count of maximum bitflips performed */
+ if (status & ISR_DECODER_ERROR) {
+ err = ISR_DECODER_ERRORS(status);
+ if (err > max_bitflips)
+ max_bitflips = err;
+ }
+ /* copy first 512 bytes into buffer */
+ memcpy(buf+512*i, lpc32xx_nand_mlc_registers->buff, 512);
+ /* copy next 6 bytes at front of OOB buffer */
+ memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->buff, 6);
+ /* copy last 10 bytes (R/S ECC) at back of OOB buffer */
+ memcpy(&oob->ecc[i], lpc32xx_nand_mlc_registers->buff, 10);
+ }
+ return max_bitflips;
+}
+
+/**
+ * lpc32xx_read_page_raw - read raw (in-band, out-of-band and ECC) data
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Read NAND directly; can read pages with invalid ECC.
+ */
+
+static int lpc32xx_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ unsigned int i, status, timeout;
+ struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
+
+ /* when we get here we've already had the Read Mode(1) */
+
+ /* go through all four small pages */
+ for (i = 0; i < 4; i++) {
+ /* wait for NAND to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_NAND_READY)
+ break;
+ udelay(1);
+ }
+ /* if NAND stalled, return failure */
+ if (!(status & ISR_NAND_READY))
+ return -1;
+ /* copy first 512 bytes into buffer */
+ memcpy(buf+512*i, lpc32xx_nand_mlc_registers->data, 512);
+ /* copy next 6 bytes at front of OOB buffer */
+ memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->data, 6);
+ /* copy last 10 bytes (R/S ECC) at back of OOB buffer */
+ memcpy(&oob->ecc[i], lpc32xx_nand_mlc_registers->data, 10);
+ }
+ return 0;
+}
+
+/**
+ * lpc32xx_read_oob - read out-of-band data
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ *
+ * Read out-of-band data. User Manual section 8.6.4 suggests using Read
+ * Mode(3) which the controller will turn into a Read Mode(1) internally
+ * but nand_base.c will turn Mode(3) into Mode(0), so let's use Mode(0)
+ * directly.
+ *
+ * ECC covers in- and out-of-band data and was written when out-of-band
+ * data was blank. Therefore, if the out-of-band being read here is not
+ * blank, then the ECC will be false and the read will return bitflips,
+ * even in case of ECC failure where we will return 5 bitflips. The
+ * caller should be prepared to handle this.
+ */
+
+static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ unsigned int i, status, timeout, err, max_bitflips = 0;
+ struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
+
+ /* No command was sent before calling read_oob() so send one */
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ /* go through all four small pages */
+ for (i = 0; i < 4; i++) {
+ /* start auto decode (reads 528 NAND bytes) */
+ writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_dec_reg);
+ /* wait for controller to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_CONTROLLER_READY)
+ break;
+ udelay(1);
+ }
+ /* if decoder failure, count 'one too many' bitflips */
+ if (status & ISR_DECODER_FAILURE)
+ max_bitflips = 5;
+ /* keep count of maximum bitflips performed */
+ if (status & ISR_DECODER_ERROR) {
+ err = ISR_DECODER_ERRORS(status);
+ if (err > max_bitflips)
+ max_bitflips = err;
+ }
+ /* set read pointer to OOB area */
+ writel(0, &lpc32xx_nand_mlc_registers->robp);
+ /* copy next 6 bytes at front of OOB buffer */
+ memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->buff, 6);
+ /* copy next 10 bytes (R/S ECC) at back of OOB buffer */
+ memcpy(&oob->ecc[i], lpc32xx_nand_mlc_registers->buff, 10);
+ }
+ return max_bitflips;
+}
+
+/**
+ * lpc32xx_write_page_hwecc - write in- and out-of-band data with ECC
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Use large block Auto Encode as per User Manual section 8.6.4.
+ *
+ * The initial Write Serial Input and final Auto Program commands are
+ * sent by the caller.
+ */
+
+static int lpc32xx_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
+{
+ unsigned int i, status, timeout;
+ struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
+
+ /* when we get here we've already had the SEQIN */
+ for (i = 0; i < 4; i++) {
+ /* start encode (expects 518 writes to buff) */
+ writel(0, &lpc32xx_nand_mlc_registers->ecc_enc_reg);
+ /* copy first 512 bytes from buffer */
+ memcpy(&lpc32xx_nand_mlc_registers->buff, buf+512*i, 512);
+ /* copy next 6 bytes from OOB buffer -- excluding ECC */
+ memcpy(&lpc32xx_nand_mlc_registers->buff, &oob->free[i], 6);
+ /* wait for ECC to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_ECC_READY)
+ break;
+ udelay(1);
+ }
+ /* if ECC stalled, return failure */
+ if (!(status & ISR_ECC_READY))
+ return -1;
+ /* Trigger auto encode (writes 528 bytes to NAND) */
+ writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_enc_reg);
+ /* wait for controller to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_CONTROLLER_READY)
+ break;
+ udelay(1);
+ }
+ /* if controller stalled, return error */
+ if (!(status & ISR_CONTROLLER_READY))
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * lpc32xx_write_page_raw - write raw (in-band, out-of-band and ECC) data
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Use large block write but without encode.
+ *
+ * The initial Write Serial Input and final Auto Program commands are
+ * sent by the caller.
+ *
+ * This function will write the full out-of-band data, including the
+ * ECC area. Therefore, it can write pages with valid *or* invalid ECC.
+ */
+
+static int lpc32xx_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
+{
+ unsigned int i;
+ struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
+
+ /* when we get here we've already had the Read Mode(1) */
+ for (i = 0; i < 4; i++) {
+ /* copy first 512 bytes from buffer */
+ memcpy(lpc32xx_nand_mlc_registers->buff, buf+512*i, 512);
+ /* copy next 6 bytes into OOB buffer -- excluding ECC */
+ memcpy(lpc32xx_nand_mlc_registers->buff, &oob->free[i], 6);
+ /* copy next 10 bytes into OOB buffer -- that is 'ECC' */
+ memcpy(lpc32xx_nand_mlc_registers->buff, &oob->ecc[i], 10);
+ }
+ return 0;
+}
+
+/**
+ * lpc32xx_write_oob - write out-of-band data
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ *
+ * Since ECC covers in- and out-of-band data, writing out-of-band data
+ * with ECC will render the page ECC wrong -- or, if the page was blank,
+ * then it will produce a good ECC but a later in-band data write will
+ * render it wrong.
+ *
+ * Therefore, do not compute or write any ECC, and always return success.
+ *
+ * This implies that we do four writes, since non-ECC out-of-band data
+ * are not contiguous in a large page.
+ */
+
+static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ /* update oob on all 4 subpages in sequence */
+ unsigned int i, status, timeout;
+ struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
+
+ for (i = 0; i < 4; i++) {
+ /* start data input */
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x200+0x210*i, page);
+ /* copy 6 non-ECC out-of-band bytes directly into NAND */
+ memcpy(lpc32xx_nand_mlc_registers->data, &oob->free[i], 6);
+ /* program page */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ /* wait for NAND to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_NAND_READY)
+ break;
+ udelay(1);
+ }
+ /* if NAND stalled, return error */
+ if (!(status & ISR_NAND_READY))
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * lpc32xx_waitfunc - wait until a command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for controller and FLASH to both be ready.
+ */
+
+static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ int status;
+ unsigned int timeout;
+ /* wait until both controller and NAND are ready */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if ((status & (ISR_CONTROLLER_READY || ISR_NAND_READY))
+ == (ISR_CONTROLLER_READY || ISR_NAND_READY))
+ break;
+ udelay(1);
+ }
+ /* if controller or NAND stalled, return error */
+ if ((status & (ISR_CONTROLLER_READY || ISR_NAND_READY))
+ != (ISR_CONTROLLER_READY || ISR_NAND_READY))
+ return -1;
+ /* write NAND status command */
+ writel(NAND_CMD_STATUS, &lpc32xx_nand_mlc_registers->cmd);
+ /* read back status and return it */
+ return readb(&lpc32xx_nand_mlc_registers->data);
+}
+
+/*
+ * We are self-initializing, so we need our own chip struct
+ */
+
+static struct nand_chip lpc32xx_chip;
+
+/*
+ * Initialize the controller
+ */
+
+void board_nand_init(void)
+{
+ struct mtd_info *mtd = nand_to_mtd(&lpc32xx_chip);
+ int ret;
+
+ /* Set all BOARDSPECIFIC (actually core-specific) fields */
+
+ lpc32xx_chip.IO_ADDR_R = &lpc32xx_nand_mlc_registers->buff;
+ lpc32xx_chip.IO_ADDR_W = &lpc32xx_nand_mlc_registers->buff;
+ lpc32xx_chip.cmd_ctrl = lpc32xx_cmd_ctrl;
+ /* do not set init_size: nand_base.c will read sizes from chip */
+ lpc32xx_chip.dev_ready = lpc32xx_dev_ready;
+ /* do not set setup_read_retry: this is NAND-chip-specific */
+ /* do not set chip_delay: we have dev_ready defined. */
+ lpc32xx_chip.options |= NAND_NO_SUBPAGE_WRITE;
+
+ /* Set needed ECC fields */
+
+ lpc32xx_chip.ecc.mode = NAND_ECC_HW;
+ lpc32xx_chip.ecc.layout = &lpc32xx_largepage_ecclayout;
+ lpc32xx_chip.ecc.size = 512;
+ lpc32xx_chip.ecc.bytes = 10;
+ lpc32xx_chip.ecc.strength = 4;
+ lpc32xx_chip.ecc.read_page = lpc32xx_read_page_hwecc;
+ lpc32xx_chip.ecc.read_page_raw = lpc32xx_read_page_raw;
+ lpc32xx_chip.ecc.write_page = lpc32xx_write_page_hwecc;
+ lpc32xx_chip.ecc.write_page_raw = lpc32xx_write_page_raw;
+ lpc32xx_chip.ecc.read_oob = lpc32xx_read_oob;
+ lpc32xx_chip.ecc.write_oob = lpc32xx_write_oob;
+ lpc32xx_chip.waitfunc = lpc32xx_waitfunc;
+
+ lpc32xx_chip.read_byte = lpc32xx_read_byte; /* FIXME: NEEDED? */
+
+ /* BBT options: read from last two pages */
+ lpc32xx_chip.bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_LASTBLOCK
+ | NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE
+ | NAND_BBT_WRITE;
+
+ /* Initialize NAND interface */
+ lpc32xx_nand_init();
+
+ /* identify chip */
+ ret = nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_CHIPS, NULL);
+ if (ret) {
+ pr_err("nand_scan_ident returned %i", ret);
+ return;
+ }
+
+ /* finish scanning the chip */
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ pr_err("nand_scan_tail returned %i", ret);
+ return;
+ }
+
+ /* chip is good, register it */
+ ret = nand_register(0, mtd);
+ if (ret)
+ pr_err("nand_register returned %i", ret);
+}
+
+#else /* defined(CONFIG_SPL_BUILD) */
+
+void nand_init(void)
+{
+ /* enable NAND controller */
+ lpc32xx_mlc_nand_init();
+ /* initialize NAND controller */
+ lpc32xx_nand_init();
+}
+
+void nand_deselect(void)
+{
+ /* nothing to do, but SPL requires this function */
+}
+
+static int read_single_page(uint8_t *dest, int page,
+ struct lpc32xx_oob *oob)
+{
+ int status, i, timeout, err, max_bitflips = 0;
+
+ /* enter read mode */
+ writel(NAND_CMD_READ0, &lpc32xx_nand_mlc_registers->cmd);
+ /* send column (lsb then MSB) and page (lsb to MSB) */
+ writel(0, &lpc32xx_nand_mlc_registers->addr);
+ writel(0, &lpc32xx_nand_mlc_registers->addr);
+ writel(page & 0xff, &lpc32xx_nand_mlc_registers->addr);
+ writel((page>>8) & 0xff, &lpc32xx_nand_mlc_registers->addr);
+ writel((page>>16) & 0xff, &lpc32xx_nand_mlc_registers->addr);
+ /* start reading */
+ writel(NAND_CMD_READSTART, &lpc32xx_nand_mlc_registers->cmd);
+
+ /* large page auto decode read */
+ for (i = 0; i < 4; i++) {
+ /* start auto decode (reads 528 NAND bytes) */
+ writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_dec_reg);
+ /* wait for controller to return to ready state */
+ for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
+ status = readl(&lpc32xx_nand_mlc_registers->isr);
+ if (status & ISR_CONTROLLER_READY)
+ break;
+ udelay(1);
+ }
+ /* if controller stalled, return error */
+ if (!(status & ISR_CONTROLLER_READY))
+ return -1;
+ /* if decoder failure, return error */
+ if (status & ISR_DECODER_FAILURE)
+ return -1;
+ /* keep count of maximum bitflips performed */
+ if (status & ISR_DECODER_ERROR) {
+ err = ISR_DECODER_ERRORS(status);
+ if (err > max_bitflips)
+ max_bitflips = err;
+ }
+ /* copy first 512 bytes into buffer */
+ memcpy(dest+i*512, lpc32xx_nand_mlc_registers->buff, 512);
+ /* copy next 6 bytes bytes into OOB buffer */
+ memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->buff, 6);
+ }
+ return max_bitflips;
+}
+
+/*
+ * Load U-Boot signed image.
+ * This loads an image from NAND, skipping bad blocks.
+ * A block is declared bad if at least one of its readable pages has
+ * a bad block marker in its OOB at position 0.
+ * If all pages ion a block are unreadable, the block is considered
+ * bad (i.e., assumed not to be part of the image) and skipped.
+ *
+ * IMPORTANT NOTE:
+ *
+ * If the first block of the image is fully unreadable, it will be
+ * ignored and skipped as if it had been marked bad. If it was not
+ * actually marked bad at the time of writing the image, the resulting
+ * image loaded will lack a header and magic number. It could thus be
+ * considered as a raw, headerless, image and SPL might erroneously
+ * jump into it.
+ *
+ * In order to avoid this risk, LPC32XX-based boards which use this
+ * driver MUST define CONFIG_SPL_PANIC_ON_RAW_IMAGE.
+ */
+
+#define BYTES_PER_PAGE 2048
+#define PAGES_PER_BLOCK 64
+#define BYTES_PER_BLOCK (BYTES_PER_PAGE * PAGES_PER_BLOCK)
+#define PAGES_PER_CHIP_MAX 524288
+
+int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
+{
+ int bytes_left = size;
+ int pages_left = DIV_ROUND_UP(size, BYTES_PER_PAGE);
+ int blocks_left = DIV_ROUND_UP(size, BYTES_PER_BLOCK);
+ int block = 0;
+ int page = offs / BYTES_PER_PAGE;
+ /* perform reads block by block */
+ while (blocks_left) {
+ /* compute first page number to read */
+ void *block_page_dst = dst;
+ /* read at most one block, possibly less */
+ int block_bytes_left = bytes_left;
+ if (block_bytes_left > BYTES_PER_BLOCK)
+ block_bytes_left = BYTES_PER_BLOCK;
+ /* keep track of good, failed, and "bad" pages */
+ int block_pages_good = 0;
+ int block_pages_bad = 0;
+ int block_pages_err = 0;
+ /* we shall read a full block of pages, maybe less */
+ int block_pages_left = pages_left;
+ if (block_pages_left > PAGES_PER_BLOCK)
+ block_pages_left = PAGES_PER_BLOCK;
+ int block_pages = block_pages_left;
+ int block_page = page;
+ /* while pages are left and the block is not known as bad */
+ while ((block_pages > 0) && (block_pages_bad == 0)) {
+ /* we will read OOB, too, for bad block markers */
+ struct lpc32xx_oob oob;
+ /* read page */
+ int res = read_single_page(block_page_dst, block_page,
+ &oob);
+ /* count readable pages */
+ if (res >= 0) {
+ /* this page is good */
+ block_pages_good++;
+ /* this page is bad */
+ if ((oob.free[0].free_oob_bytes[0] != 0xff)
+ | (oob.free[0].free_oob_bytes[1] != 0xff))
+ block_pages_bad++;
+ } else
+ /* count errors */
+ block_pages_err++;
+ /* we're done with this page */
+ block_page++;
+ block_page_dst += BYTES_PER_PAGE;
+ if (block_pages)
+ block_pages--;
+ }
+ /* a fully unreadable block is considered bad */
+ if (block_pages_good == 0)
+ block_pages_bad = block_pages_err;
+ /* errors are fatal only in good blocks */
+ if ((block_pages_err > 0) && (block_pages_bad == 0))
+ return -1;
+ /* we keep reads only of good blocks */
+ if (block_pages_bad == 0) {
+ dst += block_bytes_left;
+ bytes_left -= block_bytes_left;
+ pages_left -= block_pages_left;
+ blocks_left--;
+ }
+ /* good or bad, we're done with this block */
+ block++;
+ page += PAGES_PER_BLOCK;
+ }
+
+ /* report success */
+ return 0;
+}
+
+#endif /* CONFIG_SPL_BUILD */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_slc.c b/roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_slc.c
new file mode 100644
index 000000000..9cca3c55c
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/lpc32xx_nand_slc.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * LPC32xx SLC NAND flash controller driver
+ *
+ * (C) Copyright 2015-2018 Vladimir Zapolskiy <vz@mleia.com>
+ * Copyright (c) 2015 Tyco Fire Protection Products.
+ *
+ * Hardware ECC support original source code
+ * Copyright (C) 2008 by NXP Semiconductors
+ * Author: Kevin Wells
+ */
+
+#include <common.h>
+#include <log.h>
+#include <nand.h>
+#include <linux/bug.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <asm/arch/config.h>
+#include <asm/arch/clk.h>
+#include <asm/arch/sys_proto.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/cpu.h>
+
+struct lpc32xx_nand_slc_regs {
+ u32 data;
+ u32 addr;
+ u32 cmd;
+ u32 stop;
+ u32 ctrl;
+ u32 cfg;
+ u32 stat;
+ u32 int_stat;
+ u32 ien;
+ u32 isr;
+ u32 icr;
+ u32 tac;
+ u32 tc;
+ u32 ecc;
+ u32 dma_data;
+};
+
+/* CFG register */
+#define CFG_CE_LOW (1 << 5)
+#define CFG_DMA_ECC (1 << 4) /* Enable DMA ECC bit */
+#define CFG_ECC_EN (1 << 3) /* ECC enable bit */
+#define CFG_DMA_BURST (1 << 2) /* DMA burst bit */
+#define CFG_DMA_DIR (1 << 1) /* DMA write(0)/read(1) bit */
+
+/* CTRL register */
+#define CTRL_SW_RESET (1 << 2)
+#define CTRL_ECC_CLEAR (1 << 1) /* Reset ECC bit */
+#define CTRL_DMA_START (1 << 0) /* Start DMA channel bit */
+
+/* STAT register */
+#define STAT_DMA_FIFO (1 << 2) /* DMA FIFO has data bit */
+#define STAT_NAND_READY (1 << 0)
+
+/* INT_STAT register */
+#define INT_STAT_TC (1 << 1)
+#define INT_STAT_RDY (1 << 0)
+
+/* TAC register bits, be aware of overflows */
+#define TAC_W_RDY(n) (max_t(uint32_t, (n), 0xF) << 28)
+#define TAC_W_WIDTH(n) (max_t(uint32_t, (n), 0xF) << 24)
+#define TAC_W_HOLD(n) (max_t(uint32_t, (n), 0xF) << 20)
+#define TAC_W_SETUP(n) (max_t(uint32_t, (n), 0xF) << 16)
+#define TAC_R_RDY(n) (max_t(uint32_t, (n), 0xF) << 12)
+#define TAC_R_WIDTH(n) (max_t(uint32_t, (n), 0xF) << 8)
+#define TAC_R_HOLD(n) (max_t(uint32_t, (n), 0xF) << 4)
+#define TAC_R_SETUP(n) (max_t(uint32_t, (n), 0xF) << 0)
+
+/* NAND ECC Layout for small page NAND devices
+ * Note: For large page devices, the default layouts are used. */
+static struct nand_ecclayout lpc32xx_nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = { 10, 11, 12, 13, 14, 15, },
+ .oobfree = {
+ { .offset = 0, .length = 4, },
+ { .offset = 6, .length = 4, },
+ }
+};
+
+#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_SPL_BUILD)
+#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / CONFIG_SYS_NAND_ECCSIZE)
+
+/*
+ * DMA Descriptors
+ * For Large Block: 17 descriptors = ((16 Data and ECC Read) + 1 Spare Area)
+ * For Small Block: 5 descriptors = ((4 Data and ECC Read) + 1 Spare Area)
+ */
+static struct lpc32xx_dmac_ll dmalist[ECCSTEPS * 2 + 1];
+static u32 ecc_buffer[8]; /* MAX ECC size */
+static unsigned int dmachan = (unsigned int)-1; /* Invalid channel */
+
+/*
+ * Helper macro for the DMA client (i.e. NAND SLC):
+ * - to write the next DMA linked list item address
+ * (see arch/include/asm/arch-lpc32xx/dma.h).
+ * - to assign the DMA data register to DMA source or destination address.
+ * - to assign the ECC register to DMA source or destination address.
+ */
+#define lpc32xx_dmac_next_lli(x) ((u32)x)
+#define lpc32xx_dmac_set_dma_data() ((u32)&lpc32xx_nand_slc_regs->dma_data)
+#define lpc32xx_dmac_set_ecc() ((u32)&lpc32xx_nand_slc_regs->ecc)
+#endif
+
+static struct lpc32xx_nand_slc_regs __iomem *lpc32xx_nand_slc_regs
+ = (struct lpc32xx_nand_slc_regs __iomem *)SLC_NAND_BASE;
+
+static void lpc32xx_nand_init(void)
+{
+ uint32_t hclk = get_hclk_clk_rate();
+
+ /* Reset SLC NAND controller */
+ writel(CTRL_SW_RESET, &lpc32xx_nand_slc_regs->ctrl);
+
+ /* 8-bit bus, no DMA, no ECC, ordinary CE signal */
+ writel(0, &lpc32xx_nand_slc_regs->cfg);
+
+ /* Interrupts disabled and cleared */
+ writel(0, &lpc32xx_nand_slc_regs->ien);
+ writel(INT_STAT_TC | INT_STAT_RDY,
+ &lpc32xx_nand_slc_regs->icr);
+
+ /* Configure NAND flash timings */
+ writel(TAC_W_RDY(CONFIG_LPC32XX_NAND_SLC_WDR_CLKS) |
+ TAC_W_WIDTH(hclk / CONFIG_LPC32XX_NAND_SLC_WWIDTH) |
+ TAC_W_HOLD(hclk / CONFIG_LPC32XX_NAND_SLC_WHOLD) |
+ TAC_W_SETUP(hclk / CONFIG_LPC32XX_NAND_SLC_WSETUP) |
+ TAC_R_RDY(CONFIG_LPC32XX_NAND_SLC_RDR_CLKS) |
+ TAC_R_WIDTH(hclk / CONFIG_LPC32XX_NAND_SLC_RWIDTH) |
+ TAC_R_HOLD(hclk / CONFIG_LPC32XX_NAND_SLC_RHOLD) |
+ TAC_R_SETUP(hclk / CONFIG_LPC32XX_NAND_SLC_RSETUP),
+ &lpc32xx_nand_slc_regs->tac);
+}
+
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ debug("ctrl: 0x%08x, cmd: 0x%08x\n", ctrl, cmd);
+
+ if (ctrl & NAND_NCE)
+ setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_CE_LOW);
+ else
+ clrbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_CE_LOW);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ writel(cmd & 0xFF, &lpc32xx_nand_slc_regs->cmd);
+ else if (ctrl & NAND_ALE)
+ writel(cmd & 0xFF, &lpc32xx_nand_slc_regs->addr);
+}
+
+static int lpc32xx_nand_dev_ready(struct mtd_info *mtd)
+{
+ return readl(&lpc32xx_nand_slc_regs->stat) & STAT_NAND_READY;
+}
+
+#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_SPL_BUILD)
+/*
+ * Prepares DMA descriptors for NAND RD/WR operations
+ * If the size is < 256 Bytes then it is assumed to be
+ * an OOB transfer
+ */
+static void lpc32xx_nand_dma_configure(struct nand_chip *chip,
+ const u8 *buffer, int size,
+ int read)
+{
+ u32 i, dmasrc, ctrl, ecc_ctrl, oob_ctrl, dmadst;
+ struct lpc32xx_dmac_ll *dmalist_cur;
+ struct lpc32xx_dmac_ll *dmalist_cur_ecc;
+
+ /*
+ * CTRL descriptor entry for reading ECC
+ * Copy Multiple times to sync DMA with Flash Controller
+ */
+ ecc_ctrl = 0x5 |
+ DMAC_CHAN_SRC_BURST_1 |
+ DMAC_CHAN_DEST_BURST_1 |
+ DMAC_CHAN_SRC_WIDTH_32 |
+ DMAC_CHAN_DEST_WIDTH_32 |
+ DMAC_CHAN_DEST_AHB1;
+
+ /* CTRL descriptor entry for reading/writing Data */
+ ctrl = (CONFIG_SYS_NAND_ECCSIZE / 4) |
+ DMAC_CHAN_SRC_BURST_4 |
+ DMAC_CHAN_DEST_BURST_4 |
+ DMAC_CHAN_SRC_WIDTH_32 |
+ DMAC_CHAN_DEST_WIDTH_32 |
+ DMAC_CHAN_DEST_AHB1;
+
+ /* CTRL descriptor entry for reading/writing Spare Area */
+ oob_ctrl = (CONFIG_SYS_NAND_OOBSIZE / 4) |
+ DMAC_CHAN_SRC_BURST_4 |
+ DMAC_CHAN_DEST_BURST_4 |
+ DMAC_CHAN_SRC_WIDTH_32 |
+ DMAC_CHAN_DEST_WIDTH_32 |
+ DMAC_CHAN_DEST_AHB1;
+
+ if (read) {
+ dmasrc = lpc32xx_dmac_set_dma_data();
+ dmadst = (u32)buffer;
+ ctrl |= DMAC_CHAN_DEST_AUTOINC;
+ } else {
+ dmadst = lpc32xx_dmac_set_dma_data();
+ dmasrc = (u32)buffer;
+ ctrl |= DMAC_CHAN_SRC_AUTOINC;
+ }
+
+ /*
+ * Write Operation Sequence for Small Block NAND
+ * ----------------------------------------------------------
+ * 1. X'fer 256 bytes of data from Memory to Flash.
+ * 2. Copy generated ECC data from Register to Spare Area
+ * 3. X'fer next 256 bytes of data from Memory to Flash.
+ * 4. Copy generated ECC data from Register to Spare Area.
+ * 5. X'fer 16 byets of Spare area from Memory to Flash.
+ * Read Operation Sequence for Small Block NAND
+ * ----------------------------------------------------------
+ * 1. X'fer 256 bytes of data from Flash to Memory.
+ * 2. Copy generated ECC data from Register to ECC calc Buffer.
+ * 3. X'fer next 256 bytes of data from Flash to Memory.
+ * 4. Copy generated ECC data from Register to ECC calc Buffer.
+ * 5. X'fer 16 bytes of Spare area from Flash to Memory.
+ * Write Operation Sequence for Large Block NAND
+ * ----------------------------------------------------------
+ * 1. Steps(1-4) of Write Operations repeate for four times
+ * which generates 16 DMA descriptors to X'fer 2048 bytes of
+ * data & 32 bytes of ECC data.
+ * 2. X'fer 64 bytes of Spare area from Memory to Flash.
+ * Read Operation Sequence for Large Block NAND
+ * ----------------------------------------------------------
+ * 1. Steps(1-4) of Read Operations repeate for four times
+ * which generates 16 DMA descriptors to X'fer 2048 bytes of
+ * data & 32 bytes of ECC data.
+ * 2. X'fer 64 bytes of Spare area from Flash to Memory.
+ */
+
+ for (i = 0; i < size/CONFIG_SYS_NAND_ECCSIZE; i++) {
+ dmalist_cur = &dmalist[i * 2];
+ dmalist_cur_ecc = &dmalist[(i * 2) + 1];
+
+ dmalist_cur->dma_src = (read ? (dmasrc) : (dmasrc + (i*256)));
+ dmalist_cur->dma_dest = (read ? (dmadst + (i*256)) : dmadst);
+ dmalist_cur->next_lli = lpc32xx_dmac_next_lli(dmalist_cur_ecc);
+ dmalist_cur->next_ctrl = ctrl;
+
+ dmalist_cur_ecc->dma_src = lpc32xx_dmac_set_ecc();
+ dmalist_cur_ecc->dma_dest = (u32)&ecc_buffer[i];
+ dmalist_cur_ecc->next_lli =
+ lpc32xx_dmac_next_lli(&dmalist[(i * 2) + 2]);
+ dmalist_cur_ecc->next_ctrl = ecc_ctrl;
+ }
+
+ if (i) { /* Data only transfer */
+ dmalist_cur_ecc = &dmalist[(i * 2) - 1];
+ dmalist_cur_ecc->next_lli = 0;
+ dmalist_cur_ecc->next_ctrl |= DMAC_CHAN_INT_TC_EN;
+ return;
+ }
+
+ /* OOB only transfer */
+ if (read) {
+ dmasrc = lpc32xx_dmac_set_dma_data();
+ dmadst = (u32)buffer;
+ oob_ctrl |= DMAC_CHAN_DEST_AUTOINC;
+ } else {
+ dmadst = lpc32xx_dmac_set_dma_data();
+ dmasrc = (u32)buffer;
+ oob_ctrl |= DMAC_CHAN_SRC_AUTOINC;
+ }
+
+ /* Read/ Write Spare Area Data To/From Flash */
+ dmalist_cur = &dmalist[i * 2];
+ dmalist_cur->dma_src = dmasrc;
+ dmalist_cur->dma_dest = dmadst;
+ dmalist_cur->next_lli = 0;
+ dmalist_cur->next_ctrl = (oob_ctrl | DMAC_CHAN_INT_TC_EN);
+}
+
+static void lpc32xx_nand_xfer(struct mtd_info *mtd, const u8 *buf,
+ int len, int read)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 config;
+ int ret;
+
+ /* DMA Channel Configuration */
+ config = (read ? DMAC_CHAN_FLOW_D_P2M : DMAC_CHAN_FLOW_D_M2P) |
+ (read ? DMAC_DEST_PERIP(0) : DMAC_DEST_PERIP(DMA_PERID_NAND1)) |
+ (read ? DMAC_SRC_PERIP(DMA_PERID_NAND1) : DMAC_SRC_PERIP(0)) |
+ DMAC_CHAN_ENABLE;
+
+ /* Prepare DMA descriptors */
+ lpc32xx_nand_dma_configure(chip, buf, len, read);
+
+ /* Setup SLC controller and start transfer */
+ if (read)
+ setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_DMA_DIR);
+ else /* NAND_ECC_WRITE */
+ clrbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_DMA_DIR);
+ setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_DMA_BURST);
+
+ /* Write length for new transfers */
+ if (!((readl(&lpc32xx_nand_slc_regs->stat) & STAT_DMA_FIFO) |
+ readl(&lpc32xx_nand_slc_regs->tc))) {
+ int tmp = (len != mtd->oobsize) ? mtd->oobsize : 0;
+ writel(len + tmp, &lpc32xx_nand_slc_regs->tc);
+ }
+
+ setbits_le32(&lpc32xx_nand_slc_regs->ctrl, CTRL_DMA_START);
+
+ /* Start DMA transfers */
+ ret = lpc32xx_dma_start_xfer(dmachan, dmalist, config);
+ if (unlikely(ret < 0))
+ BUG();
+
+ /* Wait for NAND to be ready */
+ while (!lpc32xx_nand_dev_ready(mtd))
+ ;
+
+ /* Wait till DMA transfer is DONE */
+ if (lpc32xx_dma_wait_status(dmachan))
+ pr_err("NAND DMA transfer error!\r\n");
+
+ /* Stop DMA & HW ECC */
+ clrbits_le32(&lpc32xx_nand_slc_regs->ctrl, CTRL_DMA_START);
+ clrbits_le32(&lpc32xx_nand_slc_regs->cfg,
+ CFG_DMA_DIR | CFG_DMA_BURST | CFG_ECC_EN | CFG_DMA_ECC);
+}
+
+static u32 slc_ecc_copy_to_buffer(u8 *spare, const u32 *ecc, int count)
+{
+ int i;
+ for (i = 0; i < (count * CONFIG_SYS_NAND_ECCBYTES);
+ i += CONFIG_SYS_NAND_ECCBYTES) {
+ u32 ce = ecc[i / CONFIG_SYS_NAND_ECCBYTES];
+ ce = ~(ce << 2) & 0xFFFFFF;
+ spare[i+2] = (u8)(ce & 0xFF); ce >>= 8;
+ spare[i+1] = (u8)(ce & 0xFF); ce >>= 8;
+ spare[i] = (u8)(ce & 0xFF);
+ }
+ return 0;
+}
+
+static int lpc32xx_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ return slc_ecc_copy_to_buffer(ecc_code, ecc_buffer, ECCSTEPS);
+}
+
+/*
+ * Enables and prepares SLC NAND controller
+ * for doing data transfers with H/W ECC enabled.
+ */
+static void lpc32xx_hwecc_enable(struct mtd_info *mtd, int mode)
+{
+ /* Clear ECC */
+ writel(CTRL_ECC_CLEAR, &lpc32xx_nand_slc_regs->ctrl);
+
+ /* Setup SLC controller for H/W ECC operations */
+ setbits_le32(&lpc32xx_nand_slc_regs->cfg, CFG_ECC_EN | CFG_DMA_ECC);
+}
+
+/*
+ * lpc32xx_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * mtd: MTD block structure
+ * dat: raw data read from the chip
+ * read_ecc: ECC from the chip
+ * calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256 byte block
+ */
+int lpc32xx_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ unsigned int i;
+ int ret1, ret2 = 0;
+ u_char *r = read_ecc;
+ u_char *c = calc_ecc;
+ u16 data_offset = 0;
+
+ for (i = 0 ; i < ECCSTEPS ; i++) {
+ r += CONFIG_SYS_NAND_ECCBYTES;
+ c += CONFIG_SYS_NAND_ECCBYTES;
+ data_offset += CONFIG_SYS_NAND_ECCSIZE;
+
+ ret1 = nand_correct_data(mtd, dat + data_offset, r, c);
+ if (ret1 < 0)
+ return -EBADMSG;
+ else
+ ret2 += ret1;
+ }
+
+ return ret2;
+}
+
+static void lpc32xx_dma_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ lpc32xx_nand_xfer(mtd, buf, len, 1);
+}
+
+static void lpc32xx_dma_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ lpc32xx_nand_xfer(mtd, buf, len, 0);
+}
+
+/* Reuse the logic from "nand_read_page_hwecc()" */
+static int lpc32xx_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i;
+ int stat;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+
+ /*
+ * As per the "LPC32x0 and LPC32x0/01 User manual" table 173 notes
+ * and section 9.7, the NAND SLC & DMA allowed single DMA transaction
+ * of a page size using DMA controller scatter/gather mode through
+ * linked list; the ECC read is done without any software intervention.
+ */
+
+ lpc32xx_hwecc_enable(mtd, NAND_ECC_READ);
+ lpc32xx_dma_read_buf(mtd, p, chip->ecc.size * chip->ecc.steps);
+ lpc32xx_ecc_calculate(mtd, p, &ecc_calc[0]);
+ lpc32xx_dma_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[0], &ecc_calc[0]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/* Reuse the logic from "nand_write_page_hwecc()" */
+static int lpc32xx_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int i;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /*
+ * As per the "LPC32x0 and LPC32x0/01 User manual" table 173 notes
+ * and section 9.7, the NAND SLC & DMA allowed single DMA transaction
+ * of a page size using DMA controller scatter/gather mode through
+ * linked list; the ECC read is done without any software intervention.
+ */
+
+ lpc32xx_hwecc_enable(mtd, NAND_ECC_WRITE);
+ lpc32xx_dma_write_buf(mtd, p, chip->ecc.size * chip->ecc.steps);
+ lpc32xx_ecc_calculate(mtd, p, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ lpc32xx_dma_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+#else
+static void lpc32xx_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ while (len-- > 0)
+ *buf++ = readl(&lpc32xx_nand_slc_regs->data);
+}
+
+static void lpc32xx_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ while (len-- > 0)
+ writel(*buf++, &lpc32xx_nand_slc_regs->data);
+}
+#endif
+
+static uint8_t lpc32xx_read_byte(struct mtd_info *mtd)
+{
+ return readl(&lpc32xx_nand_slc_regs->data);
+}
+
+static void lpc32xx_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ writel(byte, &lpc32xx_nand_slc_regs->data);
+}
+
+/*
+ * LPC32xx has only one SLC NAND controller, don't utilize
+ * CONFIG_SYS_NAND_SELF_INIT to be able to reuse this function
+ * both in SPL NAND and U-Boot images.
+ */
+int board_nand_init(struct nand_chip *lpc32xx_chip)
+{
+#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_SPL_BUILD)
+ int ret;
+
+ /* Acquire a channel for our use */
+ ret = lpc32xx_dma_get_channel();
+ if (unlikely(ret < 0)) {
+ pr_info("Unable to get free DMA channel for NAND transfers\n");
+ return -1;
+ }
+ dmachan = (unsigned int)ret;
+#endif
+
+ lpc32xx_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+ lpc32xx_chip->dev_ready = lpc32xx_nand_dev_ready;
+
+ /*
+ * The implementation of these functions is quite common, but
+ * they MUST be defined, because access to data register
+ * is strictly 32-bit aligned.
+ */
+ lpc32xx_chip->read_byte = lpc32xx_read_byte;
+ lpc32xx_chip->write_byte = lpc32xx_write_byte;
+
+#if defined(CONFIG_DMA_LPC32XX) && !defined(CONFIG_SPL_BUILD)
+ /* Hardware ECC calculation is supported when DMA driver is selected */
+ lpc32xx_chip->ecc.mode = NAND_ECC_HW;
+
+ lpc32xx_chip->read_buf = lpc32xx_dma_read_buf;
+ lpc32xx_chip->write_buf = lpc32xx_dma_write_buf;
+
+ lpc32xx_chip->ecc.calculate = lpc32xx_ecc_calculate;
+ lpc32xx_chip->ecc.correct = lpc32xx_correct_data;
+ lpc32xx_chip->ecc.hwctl = lpc32xx_hwecc_enable;
+ lpc32xx_chip->chip_delay = 2000;
+
+ lpc32xx_chip->ecc.read_page = lpc32xx_read_page_hwecc;
+ lpc32xx_chip->ecc.write_page = lpc32xx_write_page_hwecc;
+ lpc32xx_chip->options |= NAND_NO_SUBPAGE_WRITE;
+#else
+ /*
+ * Hardware ECC calculation is not supported by the driver,
+ * because it requires DMA support, see LPC32x0 User Manual,
+ * note after SLC_ECC register description (UM10326, p.198)
+ */
+ lpc32xx_chip->ecc.mode = NAND_ECC_SOFT;
+
+ /*
+ * The implementation of these functions is quite common, but
+ * they MUST be defined, because access to data register
+ * is strictly 32-bit aligned.
+ */
+ lpc32xx_chip->read_buf = lpc32xx_read_buf;
+ lpc32xx_chip->write_buf = lpc32xx_write_buf;
+#endif
+
+ /*
+ * These values are predefined
+ * for both small and large page NAND flash devices.
+ */
+ lpc32xx_chip->ecc.size = CONFIG_SYS_NAND_ECCSIZE;
+ lpc32xx_chip->ecc.bytes = CONFIG_SYS_NAND_ECCBYTES;
+ lpc32xx_chip->ecc.strength = 1;
+
+ if (CONFIG_SYS_NAND_PAGE_SIZE != NAND_LARGE_BLOCK_PAGE_SIZE)
+ lpc32xx_chip->ecc.layout = &lpc32xx_nand_oob_16;
+
+#if defined(CONFIG_SYS_NAND_USE_FLASH_BBT)
+ lpc32xx_chip->bbt_options |= NAND_BBT_USE_FLASH;
+#endif
+
+ /* Initialize NAND interface */
+ lpc32xx_nand_init();
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/mxc_nand.c b/roms/u-boot/drivers/mtd/nand/raw/mxc_nand.c
new file mode 100644
index 000000000..59cef2057
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/mxc_nand.c
@@ -0,0 +1,1309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, <yanok@emcraft.com>
+ */
+
+#include <common.h>
+#include <log.h>
+#include <nand.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <asm/io.h>
+#if defined(CONFIG_MX25) || defined(CONFIG_MX27) || defined(CONFIG_MX35) || \
+ defined(CONFIG_MX51) || defined(CONFIG_MX53)
+#include <asm/arch/imx-regs.h>
+#endif
+#include "mxc_nand.h"
+
+#define DRIVER_NAME "mxc_nand"
+
+struct mxc_nand_host {
+ struct nand_chip *nand;
+
+ struct mxc_nand_regs __iomem *regs;
+#ifdef MXC_NFC_V3_2
+ struct mxc_nand_ip_regs __iomem *ip_regs;
+#endif
+ int spare_only;
+ int status_request;
+ int pagesize_2k;
+ int clk_act;
+ uint16_t col_addr;
+ unsigned int page_addr;
+};
+
+static struct mxc_nand_host mxc_host;
+static struct mxc_nand_host *host = &mxc_host;
+
+/* Define delays in microsec for NAND device operations */
+#define TROP_US_DELAY 2000
+/* Macros to get byte and bit positions of ECC */
+#define COLPOS(x) ((x) >> 3)
+#define BITPOS(x) ((x) & 0xf)
+
+/* Define single bit Error positions in Main & Spare area */
+#define MAIN_SINGLEBIT_ERROR 0x4
+#define SPARE_SINGLEBIT_ERROR 0x1
+
+/* OOB placement block for use with hardware ecc generation */
+#if defined(MXC_NFC_V1)
+#ifndef CONFIG_SYS_NAND_LARGEPAGE
+static struct nand_ecclayout nand_hw_eccoob = {
+ .eccbytes = 5,
+ .eccpos = {6, 7, 8, 9, 10},
+ .oobfree = { {0, 5}, {11, 5}, }
+};
+#else
+static struct nand_ecclayout nand_hw_eccoob2k = {
+ .eccbytes = 20,
+ .eccpos = {
+ 6, 7, 8, 9, 10,
+ 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42,
+ 54, 55, 56, 57, 58,
+ },
+ .oobfree = { {2, 4}, {11, 11}, {27, 11}, {43, 11}, {59, 5} },
+};
+#endif
+#elif defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2)
+#ifndef CONFIG_SYS_NAND_LARGEPAGE
+static struct nand_ecclayout nand_hw_eccoob = {
+ .eccbytes = 9,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
+ .oobfree = { {2, 5} }
+};
+#else
+static struct nand_ecclayout nand_hw_eccoob2k = {
+ .eccbytes = 36,
+ .eccpos = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ },
+ .oobfree = { {2, 5}, {16, 7}, {32, 7}, {48, 7} },
+};
+#endif
+#endif
+
+static int is_16bit_nand(void)
+{
+#if defined(CONFIG_SYS_NAND_BUSWIDTH_16BIT)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static uint32_t *mxc_nand_memcpy32(uint32_t *dest, uint32_t *source, size_t size)
+{
+ uint32_t *d = dest;
+
+ size >>= 2;
+ while (size--)
+ __raw_writel(__raw_readl(source++), d++);
+ return dest;
+}
+
+/*
+ * This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit.
+ */
+static void wait_op_done(struct mxc_nand_host *host, int max_retries,
+ uint16_t param)
+{
+ uint32_t tmp;
+
+ while (max_retries-- > 0) {
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ tmp = readnfc(&host->regs->config2);
+ if (tmp & NFC_V1_V2_CONFIG2_INT) {
+ tmp &= ~NFC_V1_V2_CONFIG2_INT;
+ writenfc(tmp, &host->regs->config2);
+#elif defined(MXC_NFC_V3_2)
+ tmp = readnfc(&host->ip_regs->ipc);
+ if (tmp & NFC_V3_IPC_INT) {
+ tmp &= ~NFC_V3_IPC_INT;
+ writenfc(tmp, &host->ip_regs->ipc);
+#endif
+ break;
+ }
+ udelay(1);
+ }
+ if (max_retries < 0) {
+ pr_debug("%s(%d): INT not set\n",
+ __func__, param);
+ }
+}
+
+/*
+ * This function issues the specified command to the NAND device and
+ * waits for completion.
+ */
+static void send_cmd(struct mxc_nand_host *host, uint16_t cmd)
+{
+ pr_debug("send_cmd(host, 0x%x)\n", cmd);
+
+ writenfc(cmd, &host->regs->flash_cmd);
+ writenfc(NFC_CMD, &host->regs->operation);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, cmd);
+}
+
+/*
+ * This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command.
+ */
+static void send_addr(struct mxc_nand_host *host, uint16_t addr)
+{
+ pr_debug("send_addr(host, 0x%x)\n", addr);
+
+ writenfc(addr, &host->regs->flash_addr);
+ writenfc(NFC_ADDR, &host->regs->operation);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, addr);
+}
+
+/*
+ * This function requests the NANDFC to initiate the transfer
+ * of data currently in the NANDFC RAM buffer to the NAND device.
+ */
+static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id,
+ int spare_only)
+{
+ if (spare_only)
+ pr_debug("send_prog_page (%d)\n", spare_only);
+
+ if (is_mxc_nfc_21() || is_mxc_nfc_32()) {
+ int i;
+ /*
+ * The controller copies the 64 bytes of spare data from
+ * the first 16 bytes of each of the 4 64 byte spare buffers.
+ * Copy the contiguous data starting in spare_area[0] to
+ * the four spare area buffers.
+ */
+ for (i = 1; i < 4; i++) {
+ void __iomem *src = &host->regs->spare_area[0][i * 16];
+ void __iomem *dst = &host->regs->spare_area[i][0];
+
+ mxc_nand_memcpy32(dst, src, 16);
+ }
+ }
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ writenfc(buf_id, &host->regs->buf_addr);
+#elif defined(MXC_NFC_V3_2)
+ uint32_t tmp = readnfc(&host->regs->config1);
+ tmp &= ~NFC_V3_CONFIG1_RBA_MASK;
+ tmp |= NFC_V3_CONFIG1_RBA(buf_id);
+ writenfc(tmp, &host->regs->config1);
+#endif
+
+ /* Configure spare or page+spare access */
+ if (!host->pagesize_2k) {
+ uint32_t config1 = readnfc(&host->regs->config1);
+ if (spare_only)
+ config1 |= NFC_CONFIG1_SP_EN;
+ else
+ config1 &= ~NFC_CONFIG1_SP_EN;
+ writenfc(config1, &host->regs->config1);
+ }
+
+ writenfc(NFC_INPUT, &host->regs->operation);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, spare_only);
+}
+
+/*
+ * Requests NANDFC to initiate the transfer of data from the
+ * NAND device into in the NANDFC ram buffer.
+ */
+static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id,
+ int spare_only)
+{
+ pr_debug("send_read_page (%d)\n", spare_only);
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ writenfc(buf_id, &host->regs->buf_addr);
+#elif defined(MXC_NFC_V3_2)
+ uint32_t tmp = readnfc(&host->regs->config1);
+ tmp &= ~NFC_V3_CONFIG1_RBA_MASK;
+ tmp |= NFC_V3_CONFIG1_RBA(buf_id);
+ writenfc(tmp, &host->regs->config1);
+#endif
+
+ /* Configure spare or page+spare access */
+ if (!host->pagesize_2k) {
+ uint32_t config1 = readnfc(&host->regs->config1);
+ if (spare_only)
+ config1 |= NFC_CONFIG1_SP_EN;
+ else
+ config1 &= ~NFC_CONFIG1_SP_EN;
+ writenfc(config1, &host->regs->config1);
+ }
+
+ writenfc(NFC_OUTPUT, &host->regs->operation);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, spare_only);
+
+ if (is_mxc_nfc_21() || is_mxc_nfc_32()) {
+ int i;
+
+ /*
+ * The controller copies the 64 bytes of spare data to
+ * the first 16 bytes of each of the 4 spare buffers.
+ * Make the data contiguous starting in spare_area[0].
+ */
+ for (i = 1; i < 4; i++) {
+ void __iomem *src = &host->regs->spare_area[i][0];
+ void __iomem *dst = &host->regs->spare_area[0][i * 16];
+
+ mxc_nand_memcpy32(dst, src, 16);
+ }
+ }
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id(struct mxc_nand_host *host)
+{
+ uint32_t tmp;
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ /* NANDFC buffer 0 is used for device ID output */
+ writenfc(0x0, &host->regs->buf_addr);
+#elif defined(MXC_NFC_V3_2)
+ tmp = readnfc(&host->regs->config1);
+ tmp &= ~NFC_V3_CONFIG1_RBA_MASK;
+ writenfc(tmp, &host->regs->config1);
+#endif
+
+ /* Read ID into main buffer */
+ tmp = readnfc(&host->regs->config1);
+ tmp &= ~NFC_CONFIG1_SP_EN;
+ writenfc(tmp, &host->regs->config1);
+
+ writenfc(NFC_ID, &host->regs->operation);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, 0);
+}
+
+/*
+ * This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status.
+ */
+static uint16_t get_dev_status(struct mxc_nand_host *host)
+{
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ void __iomem *main_buf = host->regs->main_area[1];
+ uint32_t store;
+#endif
+ uint32_t ret, tmp;
+ /* Issue status request to NAND device */
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ /* store the main area1 first word, later do recovery */
+ store = readl(main_buf);
+ /* NANDFC buffer 1 is used for device status */
+ writenfc(1, &host->regs->buf_addr);
+#endif
+
+ /* Read status into main buffer */
+ tmp = readnfc(&host->regs->config1);
+ tmp &= ~NFC_CONFIG1_SP_EN;
+ writenfc(tmp, &host->regs->config1);
+
+ writenfc(NFC_STATUS, &host->regs->operation);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, TROP_US_DELAY, 0);
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ /*
+ * Status is placed in first word of main buffer
+ * get status, then recovery area 1 data
+ */
+ ret = readw(main_buf);
+ writel(store, main_buf);
+#elif defined(MXC_NFC_V3_2)
+ ret = readnfc(&host->regs->config1) >> 16;
+#endif
+
+ return ret;
+}
+
+/* This function is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct mtd_info *mtd)
+{
+ /*
+ * NFC handles R/B internally. Therefore, this function
+ * always returns status as ready.
+ */
+ return 1;
+}
+
+static void _mxc_nand_enable_hwecc(struct mtd_info *mtd, int on)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ uint16_t tmp = readnfc(&host->regs->config1);
+
+ if (on)
+ tmp |= NFC_V1_V2_CONFIG1_ECC_EN;
+ else
+ tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN;
+ writenfc(tmp, &host->regs->config1);
+#elif defined(MXC_NFC_V3_2)
+ uint32_t tmp = readnfc(&host->ip_regs->config2);
+
+ if (on)
+ tmp |= NFC_V3_CONFIG2_ECC_EN;
+ else
+ tmp &= ~NFC_V3_CONFIG2_ECC_EN;
+ writenfc(tmp, &host->ip_regs->config2);
+#endif
+}
+
+#ifdef CONFIG_MXC_NAND_HWECC
+static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ /*
+ * If HW ECC is enabled, we turn it on during init. There is
+ * no need to enable again here.
+ */
+}
+
+#if defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2)
+static int mxc_nand_read_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ uint8_t *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+ int eccpitch = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *bufpoi = buf;
+ int i, toread;
+
+ pr_debug("%s: Reading OOB area of page %u to oob %p\n",
+ __func__, page, buf);
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, mtd->writesize, page);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ toread = min_t(int, length, chip->ecc.prepad);
+ if (toread) {
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ bufpoi += chip->ecc.bytes;
+ host->col_addr += chip->ecc.bytes;
+ length -= chip->ecc.bytes;
+
+ toread = min_t(int, length, chip->ecc.postpad);
+ if (toread) {
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += toread;
+ length -= toread;
+ }
+ }
+ if (length > 0)
+ chip->read_buf(mtd, bufpoi, length);
+
+ _mxc_nand_enable_hwecc(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ mtd->writesize + chip->ecc.prepad, page);
+ bufpoi = buf + chip->ecc.prepad;
+ length = mtd->oobsize - chip->ecc.prepad;
+ for (i = 0; i < chip->ecc.steps; i++) {
+ toread = min_t(int, length, chip->ecc.bytes);
+ chip->read_buf(mtd, bufpoi, toread);
+ bufpoi += eccpitch;
+ length -= eccpitch;
+ host->col_addr += chip->ecc.postpad + chip->ecc.prepad;
+ }
+ _mxc_nand_enable_hwecc(mtd, 1);
+ return 1;
+}
+
+static int mxc_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf,
+ int oob_required,
+ int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+ int n;
+
+ _mxc_nand_enable_hwecc(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+ for (n = 0, steps = chip->ecc.steps; steps > 0; n++, steps--) {
+ host->col_addr = n * eccsize;
+ chip->read_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->read_buf(mtd, oob, size);
+ _mxc_nand_enable_hwecc(mtd, 1);
+
+ return 0;
+}
+
+static int mxc_nand_read_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf,
+ int oob_required,
+ int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int n, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ pr_debug("Reading page %u to buf %p oob %p\n",
+ page, buf, oob);
+
+ /* first read the data area and the available portion of OOB */
+ for (n = 0; eccsteps; n++, eccsteps--, p += eccsize) {
+ int stat;
+
+ host->col_addr = n * eccsize;
+
+ chip->read_buf(mtd, p, eccsize);
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+
+ if (chip->ecc.prepad) {
+ chip->read_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->read_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ n = mtd->oobsize - (oob - chip->oob_poi);
+ if (n)
+ chip->read_buf(mtd, oob, n);
+
+ /* Then switch ECC off and read the OOB area to get the ECC code */
+ _mxc_nand_enable_hwecc(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, mtd->writesize, page);
+ eccsteps = chip->ecc.steps;
+ oob = chip->oob_poi + chip->ecc.prepad;
+ for (n = 0; eccsteps; n++, eccsteps--, p += eccsize) {
+ host->col_addr = mtd->writesize +
+ n * eccpitch +
+ chip->ecc.prepad;
+ chip->read_buf(mtd, oob, eccbytes);
+ oob += eccbytes + chip->ecc.postpad;
+ }
+ _mxc_nand_enable_hwecc(mtd, 1);
+ return 0;
+}
+
+static int mxc_nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int eccpitch = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int length = mtd->oobsize;
+ int i, len, status, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ for (i = 0; i < steps; i++) {
+ len = min_t(int, length, eccpitch);
+
+ chip->write_buf(mtd, bufpoi, len);
+ bufpoi += len;
+ length -= len;
+ host->col_addr += chip->ecc.prepad + chip->ecc.postpad;
+ }
+ if (length > 0)
+ chip->write_buf(mtd, bufpoi, length);
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int mxc_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size;
+ int n;
+
+ for (n = 0, steps = chip->ecc.steps; steps > 0; n++, steps--) {
+ host->col_addr = n * eccsize;
+ chip->write_buf(mtd, buf, eccsize);
+ buf += eccsize;
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ host->col_addr += eccbytes;
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size)
+ chip->write_buf(mtd, oob, size);
+ return 0;
+}
+
+static int mxc_nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+ int i, n, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ for (i = n = 0;
+ eccsteps;
+ n++, eccsteps--, i += eccbytes, p += eccsize) {
+ host->col_addr = n * eccsize;
+
+ chip->write_buf(mtd, p, eccsize);
+
+ host->col_addr = mtd->writesize + n * eccpitch;
+
+ if (chip->ecc.prepad) {
+ chip->write_buf(mtd, oob, chip->ecc.prepad);
+ oob += chip->ecc.prepad;
+ }
+
+ chip->write_buf(mtd, oob, eccbytes);
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ chip->write_buf(mtd, oob, chip->ecc.postpad);
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i)
+ chip->write_buf(mtd, oob, i);
+ return 0;
+}
+
+static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint32_t ecc_status = readl(&host->regs->ecc_status_result);
+ int subpages = mtd->writesize / nand_chip->subpagesize;
+ int pg2blk_shift = nand_chip->phys_erase_shift -
+ nand_chip->page_shift;
+
+ do {
+ if ((ecc_status & 0xf) > 4) {
+ static int last_bad = -1;
+
+ if (last_bad != host->page_addr >> pg2blk_shift) {
+ last_bad = host->page_addr >> pg2blk_shift;
+ printk(KERN_DEBUG
+ "MXC_NAND: HWECC uncorrectable ECC error"
+ " in block %u page %u subpage %d\n",
+ last_bad, host->page_addr,
+ mtd->writesize / nand_chip->subpagesize
+ - subpages);
+ }
+ return -EBADMSG;
+ }
+ ecc_status >>= 4;
+ subpages--;
+ } while (subpages > 0);
+
+ return 0;
+}
+#else
+#define mxc_nand_read_page_syndrome NULL
+#define mxc_nand_read_page_raw_syndrome NULL
+#define mxc_nand_read_oob_syndrome NULL
+#define mxc_nand_write_page_syndrome NULL
+#define mxc_nand_write_page_raw_syndrome NULL
+#define mxc_nand_write_oob_syndrome NULL
+
+static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ /*
+ * 1-Bit errors are automatically corrected in HW. No need for
+ * additional correction. 2-Bit errors cannot be corrected by
+ * HW ECC, so we need to return failure
+ */
+ uint16_t ecc_status = readnfc(&host->regs->ecc_status_result);
+
+ if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
+ pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+#endif
+
+static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ return 0;
+}
+#endif
+
+static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint8_t ret = 0;
+ uint16_t col;
+ uint16_t __iomem *main_buf =
+ (uint16_t __iomem *)host->regs->main_area[0];
+ uint16_t __iomem *spare_buf =
+ (uint16_t __iomem *)host->regs->spare_area[0];
+ union {
+ uint16_t word;
+ uint8_t bytes[2];
+ } nfc_word;
+
+ /* Check for status request */
+ if (host->status_request)
+ return get_dev_status(host) & 0xFF;
+
+ /* Get column for 16-bit access */
+ col = host->col_addr >> 1;
+
+ /* If we are accessing the spare region */
+ if (host->spare_only)
+ nfc_word.word = readw(&spare_buf[col]);
+ else
+ nfc_word.word = readw(&main_buf[col]);
+
+ /* Pick upper/lower byte of word from RAM buffer */
+ ret = nfc_word.bytes[host->col_addr & 0x1];
+
+ /* Update saved column address */
+ if (nand_chip->options & NAND_BUSWIDTH_16)
+ host->col_addr += 2;
+ else
+ host->col_addr++;
+
+ return ret;
+}
+
+static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ uint16_t col, ret;
+ uint16_t __iomem *p;
+
+ pr_debug("mxc_nand_read_word(col = %d)\n", host->col_addr);
+
+ col = host->col_addr;
+ /* Adjust saved column address */
+ if (col < mtd->writesize && host->spare_only)
+ col += mtd->writesize;
+
+ if (col < mtd->writesize) {
+ p = (uint16_t __iomem *)(host->regs->main_area[0] +
+ (col >> 1));
+ } else {
+ p = (uint16_t __iomem *)(host->regs->spare_area[0] +
+ ((col - mtd->writesize) >> 1));
+ }
+
+ if (col & 1) {
+ union {
+ uint16_t word;
+ uint8_t bytes[2];
+ } nfc_word[3];
+
+ nfc_word[0].word = readw(p);
+ nfc_word[1].word = readw(p + 1);
+
+ nfc_word[2].bytes[0] = nfc_word[0].bytes[1];
+ nfc_word[2].bytes[1] = nfc_word[1].bytes[0];
+
+ ret = nfc_word[2].word;
+ } else {
+ ret = readw(p);
+ }
+
+ /* Update saved column address */
+ host->col_addr = col + 2;
+
+ return ret;
+}
+
+/*
+ * Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash
+ */
+static void mxc_nand_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int n, col, i = 0;
+
+ pr_debug("mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr,
+ len);
+
+ col = host->col_addr;
+
+ /* Adjust saved column address */
+ if (col < mtd->writesize && host->spare_only)
+ col += mtd->writesize;
+
+ n = mtd->writesize + mtd->oobsize - col;
+ n = min(len, n);
+
+ pr_debug("%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n);
+
+ while (n > 0) {
+ void __iomem *p;
+
+ if (col < mtd->writesize) {
+ p = host->regs->main_area[0] + (col & ~3);
+ } else {
+ p = host->regs->spare_area[0] -
+ mtd->writesize + (col & ~3);
+ }
+
+ pr_debug("%s:%d: p = %p\n", __func__,
+ __LINE__, p);
+
+ if (((col | (unsigned long)&buf[i]) & 3) || n < 4) {
+ union {
+ uint32_t word;
+ uint8_t bytes[4];
+ } nfc_word;
+
+ nfc_word.word = readl(p);
+ nfc_word.bytes[col & 3] = buf[i++];
+ n--;
+ col++;
+
+ writel(nfc_word.word, p);
+ } else {
+ int m = mtd->writesize - col;
+
+ if (col >= mtd->writesize)
+ m += mtd->oobsize;
+
+ m = min(n, m) & ~3;
+
+ pr_debug("%s:%d: n = %d, m = %d, i = %d, col = %d\n",
+ __func__, __LINE__, n, m, i, col);
+
+ mxc_nand_memcpy32(p, (uint32_t *)&buf[i], m);
+ col += m;
+ i += m;
+ n -= m;
+ }
+ }
+ /* Update saved column address */
+ host->col_addr = col;
+}
+
+/*
+ * Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+ int n, col, i = 0;
+
+ pr_debug("mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr,
+ len);
+
+ col = host->col_addr;
+
+ /* Adjust saved column address */
+ if (col < mtd->writesize && host->spare_only)
+ col += mtd->writesize;
+
+ n = mtd->writesize + mtd->oobsize - col;
+ n = min(len, n);
+
+ while (n > 0) {
+ void __iomem *p;
+
+ if (col < mtd->writesize) {
+ p = host->regs->main_area[0] + (col & ~3);
+ } else {
+ p = host->regs->spare_area[0] -
+ mtd->writesize + (col & ~3);
+ }
+
+ if (((col | (int)&buf[i]) & 3) || n < 4) {
+ union {
+ uint32_t word;
+ uint8_t bytes[4];
+ } nfc_word;
+
+ nfc_word.word = readl(p);
+ buf[i++] = nfc_word.bytes[col & 3];
+ n--;
+ col++;
+ } else {
+ int m = mtd->writesize - col;
+
+ if (col >= mtd->writesize)
+ m += mtd->oobsize;
+
+ m = min(n, m) & ~3;
+ mxc_nand_memcpy32((uint32_t *)&buf[i], p, m);
+
+ col += m;
+ i += m;
+ n -= m;
+ }
+ }
+ /* Update saved column address */
+ host->col_addr = col;
+}
+
+/*
+ * This function is used by upper layer for select and
+ * deselect of the NAND chip
+ */
+static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ switch (chip) {
+ case -1:
+ /* TODO: Disable the NFC clock */
+ if (host->clk_act)
+ host->clk_act = 0;
+ break;
+ case 0:
+ /* TODO: Enable the NFC clock */
+ if (!host->clk_act)
+ host->clk_act = 1;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash
+ */
+void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+ pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+ command, column, page_addr);
+
+ /* Reset command state information */
+ host->status_request = false;
+
+ /* Command pre-processing step */
+ switch (command) {
+
+ case NAND_CMD_STATUS:
+ host->col_addr = 0;
+ host->status_request = true;
+ break;
+
+ case NAND_CMD_READ0:
+ host->page_addr = page_addr;
+ host->col_addr = column;
+ host->spare_only = false;
+ break;
+
+ case NAND_CMD_READOOB:
+ host->col_addr = column;
+ host->spare_only = true;
+ if (host->pagesize_2k)
+ command = NAND_CMD_READ0; /* only READ0 is valid */
+ break;
+
+ case NAND_CMD_SEQIN:
+ if (column >= mtd->writesize) {
+ /*
+ * before sending SEQIN command for partial write,
+ * we need read one page out. FSL NFC does not support
+ * partial write. It always sends out 512+ecc+512+ecc
+ * for large page nand flash. But for small page nand
+ * flash, it does support SPARE ONLY operation.
+ */
+ if (host->pagesize_2k) {
+ /* call ourself to read a page */
+ mxc_nand_command(mtd, NAND_CMD_READ0, 0,
+ page_addr);
+ }
+
+ host->col_addr = column - mtd->writesize;
+ host->spare_only = true;
+
+ /* Set program pointer to spare region */
+ if (!host->pagesize_2k)
+ send_cmd(host, NAND_CMD_READOOB);
+ } else {
+ host->spare_only = false;
+ host->col_addr = column;
+
+ /* Set program pointer to page start */
+ if (!host->pagesize_2k)
+ send_cmd(host, NAND_CMD_READ0);
+ }
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ send_prog_page(host, 0, host->spare_only);
+
+ if (host->pagesize_2k && is_mxc_nfc_1()) {
+ /* data in 4 areas */
+ send_prog_page(host, 1, host->spare_only);
+ send_prog_page(host, 2, host->spare_only);
+ send_prog_page(host, 3, host->spare_only);
+ }
+
+ break;
+ }
+
+ /* Write out the command to the device. */
+ send_cmd(host, command);
+
+ /* Write out column address, if necessary */
+ if (column != -1) {
+ /*
+ * MXC NANDFC can only perform full page+spare or
+ * spare-only read/write. When the upper layers perform
+ * a read/write buffer operation, we will use the saved
+ * column address to index into the full page.
+ */
+ send_addr(host, 0);
+ if (host->pagesize_2k)
+ /* another col addr cycle for 2k page */
+ send_addr(host, 0);
+ }
+
+ /* Write out page address, if necessary */
+ if (page_addr != -1) {
+ u32 page_mask = nand_chip->pagemask;
+ do {
+ send_addr(host, page_addr & 0xFF);
+ page_addr >>= 8;
+ page_mask >>= 8;
+ } while (page_mask);
+ }
+
+ /* Command post-processing step */
+ switch (command) {
+
+ case NAND_CMD_RESET:
+ break;
+
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READ0:
+ if (host->pagesize_2k) {
+ /* send read confirm command */
+ send_cmd(host, NAND_CMD_READSTART);
+ /* read for each AREA */
+ send_read_page(host, 0, host->spare_only);
+ if (is_mxc_nfc_1()) {
+ send_read_page(host, 1, host->spare_only);
+ send_read_page(host, 2, host->spare_only);
+ send_read_page(host, 3, host->spare_only);
+ }
+ } else {
+ send_read_page(host, 0, host->spare_only);
+ }
+ break;
+
+ case NAND_CMD_READID:
+ host->col_addr = 0;
+ send_read_id(host);
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ break;
+
+ case NAND_CMD_STATUS:
+ break;
+
+ case NAND_CMD_ERASE2:
+ break;
+ }
+}
+
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 0,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = 4,
+ .pattern = mirror_pattern,
+};
+
+#endif
+
+int board_nand_init(struct nand_chip *this)
+{
+ struct mtd_info *mtd;
+#if defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2)
+ uint32_t tmp;
+#endif
+
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ this->bbt_options |= NAND_BBT_USE_FLASH;
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+#endif
+
+ /* structures must be linked */
+ mtd = &this->mtd;
+ host->nand = this;
+
+ /* 5 us command delay time */
+ this->chip_delay = 5;
+
+ nand_set_controller_data(this, host);
+ this->dev_ready = mxc_nand_dev_ready;
+ this->cmdfunc = mxc_nand_command;
+ this->select_chip = mxc_nand_select_chip;
+ this->read_byte = mxc_nand_read_byte;
+ this->read_word = mxc_nand_read_word;
+ this->write_buf = mxc_nand_write_buf;
+ this->read_buf = mxc_nand_read_buf;
+
+ host->regs = (struct mxc_nand_regs __iomem *)CONFIG_MXC_NAND_REGS_BASE;
+#ifdef MXC_NFC_V3_2
+ host->ip_regs =
+ (struct mxc_nand_ip_regs __iomem *)CONFIG_MXC_NAND_IP_REGS_BASE;
+#endif
+ host->clk_act = 1;
+
+#ifdef CONFIG_MXC_NAND_HWECC
+ this->ecc.calculate = mxc_nand_calculate_ecc;
+ this->ecc.hwctl = mxc_nand_enable_hwecc;
+ this->ecc.correct = mxc_nand_correct_data;
+ if (is_mxc_nfc_21() || is_mxc_nfc_32()) {
+ this->ecc.mode = NAND_ECC_HW_SYNDROME;
+ this->ecc.read_page = mxc_nand_read_page_syndrome;
+ this->ecc.read_page_raw = mxc_nand_read_page_raw_syndrome;
+ this->ecc.read_oob = mxc_nand_read_oob_syndrome;
+ this->ecc.write_page = mxc_nand_write_page_syndrome;
+ this->ecc.write_page_raw = mxc_nand_write_page_raw_syndrome;
+ this->ecc.write_oob = mxc_nand_write_oob_syndrome;
+ this->ecc.bytes = 9;
+ this->ecc.prepad = 7;
+ } else {
+ this->ecc.mode = NAND_ECC_HW;
+ }
+
+ if (is_mxc_nfc_1())
+ this->ecc.strength = 1;
+ else
+ this->ecc.strength = 4;
+
+ host->pagesize_2k = 0;
+
+ this->ecc.size = 512;
+ _mxc_nand_enable_hwecc(mtd, 1);
+#else
+ this->ecc.layout = &nand_soft_eccoob;
+ this->ecc.mode = NAND_ECC_SOFT;
+ _mxc_nand_enable_hwecc(mtd, 0);
+#endif
+ /* Reset NAND */
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* NAND bus width determines access functions used by upper layer */
+ if (is_16bit_nand())
+ this->options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_SYS_NAND_LARGEPAGE
+ host->pagesize_2k = 1;
+ this->ecc.layout = &nand_hw_eccoob2k;
+#else
+ host->pagesize_2k = 0;
+ this->ecc.layout = &nand_hw_eccoob;
+#endif
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+#ifdef MXC_NFC_V2_1
+ tmp = readnfc(&host->regs->config1);
+ tmp |= NFC_V2_CONFIG1_ONE_CYCLE;
+ tmp |= NFC_V2_CONFIG1_ECC_MODE_4;
+ writenfc(tmp, &host->regs->config1);
+ if (host->pagesize_2k)
+ writenfc(64/2, &host->regs->spare_area_size);
+ else
+ writenfc(16/2, &host->regs->spare_area_size);
+#endif
+
+ /*
+ * preset operation
+ * Unlock the internal RAM Buffer
+ */
+ writenfc(0x2, &host->regs->config);
+
+ /* Blocks to be unlocked */
+ writenfc(0x0, &host->regs->unlockstart_blkaddr);
+ /* Originally (Freescale LTIB 2.6.21) 0x4000 was written to the
+ * unlockend_blkaddr, but the magic 0x4000 does not always work
+ * when writing more than some 32 megabytes (on 2k page nands)
+ * However 0xFFFF doesn't seem to have this kind
+ * of limitation (tried it back and forth several times).
+ * The linux kernel driver sets this to 0xFFFF for the v2 controller
+ * only, but probably this was not tested there for v1.
+ * The very same limitation seems to apply to this kernel driver.
+ * This might be NAND chip specific and the i.MX31 datasheet is
+ * extremely vague about the semantics of this register.
+ */
+ writenfc(0xFFFF, &host->regs->unlockend_blkaddr);
+
+ /* Unlock Block Command for given address range */
+ writenfc(0x4, &host->regs->wrprot);
+#elif defined(MXC_NFC_V3_2)
+ writenfc(NFC_V3_CONFIG1_RBA(0), &host->regs->config1);
+ writenfc(NFC_V3_IPC_CREQ, &host->ip_regs->ipc);
+
+ /* Unlock the internal RAM Buffer */
+ writenfc(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+ &host->ip_regs->wrprot);
+
+ /* Blocks to be unlocked */
+ for (tmp = 0; tmp < CONFIG_SYS_NAND_MAX_CHIPS; tmp++)
+ writenfc(0x0 | 0xFFFF << 16,
+ &host->ip_regs->wrprot_unlock_blkaddr[tmp]);
+
+ writenfc(0, &host->ip_regs->ipc);
+
+ tmp = readnfc(&host->ip_regs->config2);
+ tmp &= ~(NFC_V3_CONFIG2_SPAS_MASK | NFC_V3_CONFIG2_EDC_MASK |
+ NFC_V3_CONFIG2_ECC_MODE_8 | NFC_V3_CONFIG2_PS_MASK);
+ tmp |= NFC_V3_CONFIG2_ONE_CYCLE;
+
+ if (host->pagesize_2k) {
+ tmp |= NFC_V3_CONFIG2_SPAS(64/2);
+ tmp |= NFC_V3_CONFIG2_PS_2048;
+ } else {
+ tmp |= NFC_V3_CONFIG2_SPAS(16/2);
+ tmp |= NFC_V3_CONFIG2_PS_512;
+ }
+
+ writenfc(tmp, &host->ip_regs->config2);
+
+ tmp = NFC_V3_CONFIG3_NUM_OF_DEVS(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+
+ if (!(this->options & NAND_BUSWIDTH_16))
+ tmp |= NFC_V3_CONFIG3_FW8;
+
+ writenfc(tmp, &host->ip_regs->config3);
+
+ writenfc(0, &host->ip_regs->delay_line);
+#endif
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/mxc_nand.h b/roms/u-boot/drivers/mtd/nand/raw/mxc_nand.h
new file mode 100644
index 000000000..1c7f3a2e2
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/mxc_nand.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (c) 2009 Magnus Lilja <lilja.magnus@gmail.com>
+ */
+
+#ifndef __MXC_NAND_H
+#define __MXC_NAND_H
+
+/*
+ * Register map and bit definitions for the Freescale NAND Flash Controller
+ * present in various i.MX devices.
+ *
+ * MX31 and MX27 have version 1, which has:
+ * 4 512-byte main buffers and
+ * 4 16-byte spare buffers
+ * to support up to 2K byte pagesize nand.
+ * Reading or writing a 2K page requires 4 FDI/FDO cycles.
+ *
+ * MX25 and MX35 have version 2.1, and MX51 and MX53 have version 3.2, which
+ * have:
+ * 8 512-byte main buffers and
+ * 8 64-byte spare buffers
+ * to support up to 4K byte pagesize nand.
+ * Reading or writing a 2K or 4K page requires only 1 FDI/FDO cycle.
+ * Also some of registers are moved and/or changed meaning as seen below.
+ */
+#if defined(CONFIG_MX27) || defined(CONFIG_MX31)
+#define MXC_NFC_V1
+#define is_mxc_nfc_1() 1
+#define is_mxc_nfc_21() 0
+#define is_mxc_nfc_32() 0
+#elif defined(CONFIG_MX25) || defined(CONFIG_MX35)
+#define MXC_NFC_V2_1
+#define is_mxc_nfc_1() 0
+#define is_mxc_nfc_21() 1
+#define is_mxc_nfc_32() 0
+#elif defined(CONFIG_MX51) || defined(CONFIG_MX53)
+#define MXC_NFC_V3
+#define MXC_NFC_V3_2
+#define is_mxc_nfc_1() 0
+#define is_mxc_nfc_21() 0
+#define is_mxc_nfc_32() 1
+#else
+#error "MXC NFC implementation not supported"
+#endif
+#define is_mxc_nfc_3() is_mxc_nfc_32()
+
+#if defined(MXC_NFC_V1)
+#define NAND_MXC_NR_BUFS 4
+#define NAND_MXC_SPARE_BUF_SIZE 16
+#define NAND_MXC_REG_OFFSET 0xe00
+#define NAND_MXC_2K_MULTI_CYCLE
+#elif defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2)
+#define NAND_MXC_NR_BUFS 8
+#define NAND_MXC_SPARE_BUF_SIZE 64
+#define NAND_MXC_REG_OFFSET 0x1e00
+#endif
+
+struct mxc_nand_regs {
+ u8 main_area[NAND_MXC_NR_BUFS][0x200];
+ u8 spare_area[NAND_MXC_NR_BUFS][NAND_MXC_SPARE_BUF_SIZE];
+ /*
+ * reserved size is offset of nfc registers
+ * minus total main and spare sizes
+ */
+ u8 reserved1[NAND_MXC_REG_OFFSET
+ - NAND_MXC_NR_BUFS * (512 + NAND_MXC_SPARE_BUF_SIZE)];
+#if defined(MXC_NFC_V1)
+ u16 buf_size;
+ u16 reserved2;
+ u16 buf_addr;
+ u16 flash_addr;
+ u16 flash_cmd;
+ u16 config;
+ u16 ecc_status_result;
+ u16 rsltmain_area;
+ u16 rsltspare_area;
+ u16 wrprot;
+ u16 unlockstart_blkaddr;
+ u16 unlockend_blkaddr;
+ u16 nf_wrprst;
+ u16 config1;
+ u16 config2;
+#elif defined(MXC_NFC_V2_1)
+ u16 reserved2[2];
+ u16 buf_addr;
+ u16 flash_addr;
+ u16 flash_cmd;
+ u16 config;
+ u32 ecc_status_result;
+ u16 spare_area_size;
+ u16 wrprot;
+ u16 reserved3[2];
+ u16 nf_wrprst;
+ u16 config1;
+ u16 config2;
+ u16 reserved4;
+ u16 unlockstart_blkaddr;
+ u16 unlockend_blkaddr;
+ u16 unlockstart_blkaddr1;
+ u16 unlockend_blkaddr1;
+ u16 unlockstart_blkaddr2;
+ u16 unlockend_blkaddr2;
+ u16 unlockstart_blkaddr3;
+ u16 unlockend_blkaddr3;
+#elif defined(MXC_NFC_V3_2)
+ u32 flash_cmd;
+ u32 flash_addr[12];
+ u32 config1;
+ u32 ecc_status_result;
+ u32 status_sum;
+ u32 launch;
+#endif
+};
+
+#ifdef MXC_NFC_V3_2
+struct mxc_nand_ip_regs {
+ u32 wrprot;
+ u32 wrprot_unlock_blkaddr[8];
+ u32 config2;
+ u32 config3;
+ u32 ipc;
+ u32 err_addr;
+ u32 delay_line;
+};
+#endif
+
+/* Set FCMD to 1, rest to 0 for Command operation */
+#define NFC_CMD 0x1
+
+/* Set FADD to 1, rest to 0 for Address operation */
+#define NFC_ADDR 0x2
+
+/* Set FDI to 1, rest to 0 for Input operation */
+#define NFC_INPUT 0x4
+
+/* Set FDO to 001, rest to 0 for Data Output operation */
+#define NFC_OUTPUT 0x8
+
+/* Set FDO to 010, rest to 0 for Read ID operation */
+#define NFC_ID 0x10
+
+/* Set FDO to 100, rest to 0 for Read Status operation */
+#define NFC_STATUS 0x20
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+#define NFC_CONFIG1_SP_EN (1 << 2)
+#define NFC_CONFIG1_RST (1 << 6)
+#define NFC_CONFIG1_CE (1 << 7)
+#elif defined(MXC_NFC_V3_2)
+#define NFC_CONFIG1_SP_EN (1 << 0)
+#define NFC_CONFIG1_CE (1 << 1)
+#define NFC_CONFIG1_RST (1 << 2)
+#endif
+#define NFC_V1_V2_CONFIG1_ECC_EN (1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK (1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG (1 << 5)
+#define NFC_V2_CONFIG1_ECC_MODE_4 (1 << 0)
+#define NFC_V2_CONFIG1_ONE_CYCLE (1 << 8)
+#define NFC_V2_CONFIG1_FP_INT (1 << 11)
+#define NFC_V3_CONFIG1_RBA_MASK (0x7 << 4)
+#define NFC_V3_CONFIG1_RBA(x) (((x) & 0x7) << 4)
+
+#define NFC_V1_V2_CONFIG2_INT (1 << 15)
+#define NFC_V3_CONFIG2_PS_MASK (0x3 << 0)
+#define NFC_V3_CONFIG2_PS_512 (0 << 0)
+#define NFC_V3_CONFIG2_PS_2048 (1 << 0)
+#define NFC_V3_CONFIG2_PS_4096 (2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE (1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN (1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES (1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PH0 (1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8 (1 << 6)
+#define NFC_V3_CONFIG2_PPB_MASK (0x3 << 7)
+#define NFC_V3_CONFIG2_PPB(x) (((x) & 0x3) << 7)
+#define NFC_V3_CONFIG2_EDC_MASK (0x7 << 9)
+#define NFC_V3_CONFIG2_EDC(x) (((x) & 0x7) << 9)
+#define NFC_V3_CONFIG2_NUM_ADDR_PH1(x) (((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK (1 << 15)
+#define NFC_V3_CONFIG2_SPAS_MASK (0xff << 16)
+#define NFC_V3_CONFIG2_SPAS(x) (((x) & 0xff) << 16)
+#define NFC_V3_CONFIG2_ST_CMD_MASK (0xff << 24)
+#define NFC_V3_CONFIG2_ST_CMD(x) (((x) & 0xff) << 24)
+
+#define NFC_V3_CONFIG3_ADD_OP(x) (((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8 (1 << 3)
+#define NFC_V3_CONFIG3_SBB(x) (((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVS(x) (((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE (1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA (1 << 20)
+
+#define NFC_V3_WRPROT_UNLOCK (1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK (2 << 6)
+
+#define NFC_V3_IPC_CREQ (1 << 0)
+#define NFC_V3_IPC_INT (1 << 31)
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+#define operation config2
+#define readnfc readw
+#define writenfc writew
+#elif defined(MXC_NFC_V3_2)
+#define operation launch
+#define readnfc readl
+#define writenfc writel
+#endif
+
+#endif /* __MXC_NAND_H */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/mxc_nand_spl.c b/roms/u-boot/drivers/mtd/nand/raw/mxc_nand_spl.c
new file mode 100644
index 000000000..2f054b60e
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/mxc_nand_spl.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2009
+ * Magnus Lilja <lilja.magnus@gmail.com>
+ *
+ * (C) Copyright 2008
+ * Maxim Artamonov, <scn1874 at yandex.ru>
+ *
+ * (C) Copyright 2006-2008
+ * Stefan Roese, DENX Software Engineering, sr at denx.de.
+ */
+
+#include <common.h>
+#include <hang.h>
+#include <nand.h>
+#include <asm/arch/imx-regs.h>
+#include <asm/io.h>
+#include "mxc_nand.h"
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+static struct mxc_nand_regs *const nfc = (void *)NFC_BASE_ADDR;
+#elif defined(MXC_NFC_V3_2)
+static struct mxc_nand_regs *const nfc = (void *)NFC_BASE_ADDR_AXI;
+static struct mxc_nand_ip_regs *const nfc_ip = (void *)NFC_BASE_ADDR;
+#endif
+
+static void nfc_wait_ready(void)
+{
+ uint32_t tmp;
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ while (!(readnfc(&nfc->config2) & NFC_V1_V2_CONFIG2_INT))
+ ;
+
+ /* Reset interrupt flag */
+ tmp = readnfc(&nfc->config2);
+ tmp &= ~NFC_V1_V2_CONFIG2_INT;
+ writenfc(tmp, &nfc->config2);
+#elif defined(MXC_NFC_V3_2)
+ while (!(readnfc(&nfc_ip->ipc) & NFC_V3_IPC_INT))
+ ;
+
+ /* Reset interrupt flag */
+ tmp = readnfc(&nfc_ip->ipc);
+ tmp &= ~NFC_V3_IPC_INT;
+ writenfc(tmp, &nfc_ip->ipc);
+#endif
+}
+
+static void nfc_nand_init(void)
+{
+#if defined(MXC_NFC_V3_2)
+ int ecc_per_page = CONFIG_SYS_NAND_PAGE_SIZE / 512;
+ int tmp;
+
+ tmp = (readnfc(&nfc_ip->config2) & ~(NFC_V3_CONFIG2_SPAS_MASK |
+ NFC_V3_CONFIG2_EDC_MASK | NFC_V3_CONFIG2_PS_MASK)) |
+ NFC_V3_CONFIG2_SPAS(CONFIG_SYS_NAND_OOBSIZE / 2) |
+ NFC_V3_CONFIG2_INT_MSK | NFC_V3_CONFIG2_ECC_EN |
+ NFC_V3_CONFIG2_ONE_CYCLE;
+ if (CONFIG_SYS_NAND_PAGE_SIZE == 4096)
+ tmp |= NFC_V3_CONFIG2_PS_4096;
+ else if (CONFIG_SYS_NAND_PAGE_SIZE == 2048)
+ tmp |= NFC_V3_CONFIG2_PS_2048;
+ else if (CONFIG_SYS_NAND_PAGE_SIZE == 512)
+ tmp |= NFC_V3_CONFIG2_PS_512;
+ /*
+ * if spare size is larger that 16 bytes per 512 byte hunk
+ * then use 8 symbol correction instead of 4
+ */
+ if (CONFIG_SYS_NAND_OOBSIZE / ecc_per_page > 16)
+ tmp |= NFC_V3_CONFIG2_ECC_MODE_8;
+ else
+ tmp &= ~NFC_V3_CONFIG2_ECC_MODE_8;
+ writenfc(tmp, &nfc_ip->config2);
+
+ tmp = NFC_V3_CONFIG3_NUM_OF_DEVS(0) |
+ NFC_V3_CONFIG3_NO_SDMA |
+ NFC_V3_CONFIG3_RBB_MODE |
+ NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+ NFC_V3_CONFIG3_ADD_OP(0);
+#ifndef CONFIG_SYS_NAND_BUSWIDTH_16
+ tmp |= NFC_V3_CONFIG3_FW8;
+#endif
+ writenfc(tmp, &nfc_ip->config3);
+
+ writenfc(0, &nfc_ip->delay_line);
+#elif defined(MXC_NFC_V2_1)
+ int ecc_per_page = CONFIG_SYS_NAND_PAGE_SIZE / 512;
+ int config1;
+
+ writenfc(CONFIG_SYS_NAND_OOBSIZE / 2, &nfc->spare_area_size);
+
+ /* unlocking RAM Buff */
+ writenfc(0x2, &nfc->config);
+
+ /* hardware ECC checking and correct */
+ config1 = readnfc(&nfc->config1) | NFC_V1_V2_CONFIG1_ECC_EN |
+ NFC_V1_V2_CONFIG1_INT_MSK | NFC_V2_CONFIG1_ONE_CYCLE |
+ NFC_V2_CONFIG1_FP_INT;
+ /*
+ * if spare size is larger that 16 bytes per 512 byte hunk
+ * then use 8 symbol correction instead of 4
+ */
+ if (CONFIG_SYS_NAND_OOBSIZE / ecc_per_page > 16)
+ config1 &= ~NFC_V2_CONFIG1_ECC_MODE_4;
+ else
+ config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+ writenfc(config1, &nfc->config1);
+#elif defined(MXC_NFC_V1)
+ /* unlocking RAM Buff */
+ writenfc(0x2, &nfc->config);
+
+ /* hardware ECC checking and correct */
+ writenfc(NFC_V1_V2_CONFIG1_ECC_EN | NFC_V1_V2_CONFIG1_INT_MSK,
+ &nfc->config1);
+#endif
+}
+
+static void nfc_nand_command(unsigned short command)
+{
+ writenfc(command, &nfc->flash_cmd);
+ writenfc(NFC_CMD, &nfc->operation);
+ nfc_wait_ready();
+}
+
+static void nfc_nand_address(unsigned short address)
+{
+ writenfc(address, &nfc->flash_addr);
+ writenfc(NFC_ADDR, &nfc->operation);
+ nfc_wait_ready();
+}
+
+static void nfc_nand_page_address(unsigned int page_address)
+{
+ unsigned int page_count;
+
+ nfc_nand_address(0x00);
+
+ /* code only for large page flash */
+ if (CONFIG_SYS_NAND_PAGE_SIZE > 512)
+ nfc_nand_address(0x00);
+
+ page_count = CONFIG_SYS_NAND_SIZE / CONFIG_SYS_NAND_PAGE_SIZE;
+
+ if (page_address <= page_count) {
+ page_count--; /* transform 0x01000000 to 0x00ffffff */
+ do {
+ nfc_nand_address(page_address & 0xff);
+ page_address = page_address >> 8;
+ page_count = page_count >> 8;
+ } while (page_count);
+ }
+
+ nfc_nand_address(0x00);
+}
+
+static void nfc_nand_data_output(void)
+{
+#ifdef NAND_MXC_2K_MULTI_CYCLE
+ int i;
+#endif
+
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ writenfc(0, &nfc->buf_addr);
+#elif defined(MXC_NFC_V3_2)
+ int config1 = readnfc(&nfc->config1);
+ config1 &= ~NFC_V3_CONFIG1_RBA_MASK;
+ writenfc(config1, &nfc->config1);
+#endif
+ writenfc(NFC_OUTPUT, &nfc->operation);
+ nfc_wait_ready();
+#ifdef NAND_MXC_2K_MULTI_CYCLE
+ /*
+ * This NAND controller requires multiple input commands
+ * for pages larger than 512 bytes.
+ */
+ for (i = 1; i < CONFIG_SYS_NAND_PAGE_SIZE / 512; i++) {
+ writenfc(i, &nfc->buf_addr);
+ writenfc(NFC_OUTPUT, &nfc->operation);
+ nfc_wait_ready();
+ }
+#endif
+}
+
+static int nfc_nand_check_ecc(void)
+{
+#if defined(MXC_NFC_V1)
+ u16 ecc_status = readw(&nfc->ecc_status_result);
+ return (ecc_status & 0x3) == 2 || (ecc_status >> 2) == 2;
+#elif defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2)
+ u32 ecc_status = readl(&nfc->ecc_status_result);
+ int ecc_per_page = CONFIG_SYS_NAND_PAGE_SIZE / 512;
+ int err_limit = CONFIG_SYS_NAND_OOBSIZE / ecc_per_page > 16 ? 8 : 4;
+ int subpages = CONFIG_SYS_NAND_PAGE_SIZE / 512;
+
+ do {
+ if ((ecc_status & 0xf) > err_limit)
+ return 1;
+ ecc_status >>= 4;
+ } while (--subpages);
+
+ return 0;
+#endif
+}
+
+static void nfc_nand_read_page(unsigned int page_address)
+{
+ /* read in first 0 buffer */
+#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1)
+ writenfc(0, &nfc->buf_addr);
+#elif defined(MXC_NFC_V3_2)
+ int config1 = readnfc(&nfc->config1);
+ config1 &= ~NFC_V3_CONFIG1_RBA_MASK;
+ writenfc(config1, &nfc->config1);
+#endif
+ nfc_nand_command(NAND_CMD_READ0);
+ nfc_nand_page_address(page_address);
+
+ if (CONFIG_SYS_NAND_PAGE_SIZE > 512)
+ nfc_nand_command(NAND_CMD_READSTART);
+
+ nfc_nand_data_output(); /* fill the main buffer 0 */
+}
+
+static int nfc_read_page(unsigned int page_address, unsigned char *buf)
+{
+ int i;
+ u32 *src;
+ u32 *dst;
+
+ nfc_nand_read_page(page_address);
+
+ if (nfc_nand_check_ecc())
+ return -EBADMSG;
+
+ src = (u32 *)&nfc->main_area[0][0];
+ dst = (u32 *)buf;
+
+ /* main copy loop from NAND-buffer to SDRAM memory */
+ for (i = 0; i < CONFIG_SYS_NAND_PAGE_SIZE / 4; i++) {
+ writel(readl(src), dst);
+ src++;
+ dst++;
+ }
+
+ return 0;
+}
+
+static int is_badblock(int pagenumber)
+{
+ int page = pagenumber;
+ u32 badblock;
+ u32 *src;
+
+ /* Check the first two pages for bad block markers */
+ for (page = pagenumber; page < pagenumber + 2; page++) {
+ nfc_nand_read_page(page);
+
+ src = (u32 *)&nfc->spare_area[0][0];
+
+ /*
+ * IMPORTANT NOTE: The nand flash controller uses a non-
+ * standard layout for large page devices. This can
+ * affect the position of the bad block marker.
+ */
+ /* Get the bad block marker */
+ badblock = readl(&src[CONFIG_SYS_NAND_BAD_BLOCK_POS / 4]);
+ badblock >>= 8 * (CONFIG_SYS_NAND_BAD_BLOCK_POS % 4);
+ badblock &= 0xff;
+
+ /* bad block marker verify */
+ if (badblock != 0xff)
+ return 1; /* potential bad block */
+ }
+
+ return 0;
+}
+
+int nand_spl_load_image(uint32_t from, unsigned int size, void *buf)
+{
+ int i;
+ unsigned int page;
+ unsigned int maxpages = CONFIG_SYS_NAND_SIZE /
+ CONFIG_SYS_NAND_PAGE_SIZE;
+
+ nfc_nand_init();
+
+ /* Convert to page number */
+ page = from / CONFIG_SYS_NAND_PAGE_SIZE;
+ i = 0;
+
+ size = roundup(size, CONFIG_SYS_NAND_PAGE_SIZE);
+ while (i < size / CONFIG_SYS_NAND_PAGE_SIZE) {
+ if (nfc_read_page(page, buf) < 0)
+ return -1;
+
+ page++;
+ i++;
+ buf = buf + CONFIG_SYS_NAND_PAGE_SIZE;
+
+ /*
+ * Check if we have crossed a block boundary, and if so
+ * check for bad block.
+ */
+ if (!(page % CONFIG_SYS_NAND_PAGE_COUNT)) {
+ /*
+ * Yes, new block. See if this block is good. If not,
+ * loop until we find a good block.
+ */
+ while (is_badblock(page)) {
+ page = page + CONFIG_SYS_NAND_PAGE_COUNT;
+ /* Check i we've reached the end of flash. */
+ if (page >= maxpages)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#ifndef CONFIG_SPL_FRAMEWORK
+/*
+ * The main entry for NAND booting. It's necessary that SDRAM is already
+ * configured and available since this code loads the main U-Boot image
+ * from NAND into SDRAM and starts it from there.
+ */
+__used void nand_boot(void)
+{
+ __attribute__((noreturn)) void (*uboot)(void);
+
+ /*
+ * CONFIG_SYS_NAND_U_BOOT_OFFS and CONFIG_SYS_NAND_U_BOOT_SIZE must
+ * be aligned to full pages
+ */
+ if (!nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS,
+ CONFIG_SYS_NAND_U_BOOT_SIZE,
+ (uchar *)CONFIG_SYS_NAND_U_BOOT_DST)) {
+ /* Copy from NAND successful, start U-Boot */
+ uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START;
+ uboot();
+ } else {
+ /* Unrecoverable error when copying from NAND */
+ hang();
+ }
+}
+#endif
+
+void nand_init(void) {}
+void nand_deselect(void) {}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/mxs_nand.c b/roms/u-boot/drivers/mtd/nand/raw/mxs_nand.c
new file mode 100644
index 000000000..e6bbfac4d
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/mxs_nand.c
@@ -0,0 +1,1599 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale i.MX28 NAND flash driver
+ *
+ * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
+ * on behalf of DENX Software Engineering GmbH
+ *
+ * Based on code from LTIB:
+ * Freescale GPMI NFC NAND Flash Driver
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2017-2019 NXP
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <malloc.h>
+#include <mxs_nand.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/imx-regs.h>
+#include <asm/arch/sys_proto.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/mach-imx/regs-bch.h>
+#include <asm/mach-imx/regs-gpmi.h>
+#include <linux/errno.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
+
+#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
+ defined(CONFIG_IMX8M)
+#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
+#else
+#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
+#endif
+#define MXS_NAND_METADATA_SIZE 10
+#define MXS_NAND_BITS_PER_ECC_LEVEL 13
+
+#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
+#define MXS_NAND_COMMAND_BUFFER_SIZE 32
+#else
+#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
+#endif
+
+#define MXS_NAND_BCH_TIMEOUT 10000
+
+struct nand_ecclayout fake_ecc_layout;
+
+/*
+ * Cache management functions
+ */
+#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
+static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
+{
+ uint32_t addr = (uintptr_t)info->data_buf;
+
+ flush_dcache_range(addr, addr + info->data_buf_size);
+}
+
+static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
+{
+ uint32_t addr = (uintptr_t)info->data_buf;
+
+ invalidate_dcache_range(addr, addr + info->data_buf_size);
+}
+
+static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
+{
+ uint32_t addr = (uintptr_t)info->cmd_buf;
+
+ flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
+}
+#else
+static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
+static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
+static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
+#endif
+
+static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
+{
+ struct mxs_dma_desc *desc;
+
+ if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
+ printf("MXS NAND: Too many DMA descriptors requested\n");
+ return NULL;
+ }
+
+ desc = info->desc[info->desc_index];
+ info->desc_index++;
+
+ return desc;
+}
+
+static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
+{
+ int i;
+ struct mxs_dma_desc *desc;
+
+ for (i = 0; i < info->desc_index; i++) {
+ desc = info->desc[i];
+ memset(desc, 0, sizeof(struct mxs_dma_desc));
+ desc->address = (dma_addr_t)desc;
+ }
+
+ info->desc_index = 0;
+}
+
+static uint32_t mxs_nand_aux_status_offset(void)
+{
+ return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
+}
+
+static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
+ struct mtd_info *mtd,
+ unsigned int *chunk_num)
+{
+ unsigned int i, j;
+
+ if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
+ dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
+ return false;
+ }
+
+ i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
+ (geo->gf_len * geo->ecc_strength +
+ geo->ecc_chunkn_size * 8);
+
+ j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
+ (geo->gf_len * geo->ecc_strength +
+ geo->ecc_chunkn_size * 8) * i;
+
+ if (j < geo->ecc_chunkn_size * 8) {
+ *chunk_num = i + 1;
+ dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
+ geo->ecc_strength, *chunk_num);
+ return true;
+ }
+
+ return false;
+}
+
+static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
+ struct mtd_info *mtd,
+ unsigned int ecc_strength,
+ unsigned int ecc_step)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ unsigned int block_mark_bit_offset;
+
+ switch (ecc_step) {
+ case SZ_512:
+ geo->gf_len = 13;
+ break;
+ case SZ_1K:
+ geo->gf_len = 14;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ geo->ecc_chunk0_size = ecc_step;
+ geo->ecc_chunkn_size = ecc_step;
+ geo->ecc_strength = round_up(ecc_strength, 2);
+
+ /* Keep the C >= O */
+ if (geo->ecc_chunkn_size < mtd->oobsize)
+ return -EINVAL;
+
+ if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
+ return -EINVAL;
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
+
+ /* For bit swap. */
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + MXS_NAND_METADATA_SIZE * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+
+ return 0;
+}
+
+static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
+ struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ unsigned int block_mark_bit_offset;
+
+ /* The default for the length of Galois Field. */
+ geo->gf_len = 13;
+
+ /* The default for chunk size. */
+ geo->ecc_chunk0_size = 512;
+ geo->ecc_chunkn_size = 512;
+
+ if (geo->ecc_chunkn_size < mtd->oobsize) {
+ geo->gf_len = 14;
+ geo->ecc_chunk0_size *= 2;
+ geo->ecc_chunkn_size *= 2;
+ }
+
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
+
+ /*
+ * Determine the ECC layout with the formula:
+ * ECC bits per chunk = (total page spare data bits) /
+ * (bits per ECC level) / (chunks per page)
+ * where:
+ * total page spare data bits =
+ * (page oob size - meta data size) * (bits per byte)
+ */
+ geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+
+ geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
+ nand_info->max_ecc_strength_supported);
+
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+ + MXS_NAND_METADATA_SIZE * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+
+ return 0;
+}
+
+static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
+ struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ unsigned int block_mark_bit_offset;
+ unsigned int max_ecc;
+ unsigned int bbm_chunk;
+ unsigned int i;
+
+ /* sanity check for the minimum ecc nand required */
+ if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+ return -EINVAL;
+ geo->ecc_strength = chip->ecc_strength_ds;
+
+ /* calculate the maximum ecc platform can support*/
+ geo->gf_len = 14;
+ geo->ecc_chunk0_size = 1024;
+ geo->ecc_chunkn_size = 1024;
+ geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
+ max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
+ / (geo->gf_len * geo->ecc_chunk_count);
+ max_ecc = min(round_down(max_ecc, 2),
+ nand_info->max_ecc_strength_supported);
+
+
+ /* search a supported ecc strength that makes bbm */
+ /* located in data chunk */
+ geo->ecc_strength = chip->ecc_strength_ds;
+ while (!(geo->ecc_strength > max_ecc)) {
+ if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
+ break;
+ geo->ecc_strength += 2;
+ }
+
+ /* if none of them works, keep using the minimum ecc */
+ /* nand required but changing ecc page layout */
+ if (geo->ecc_strength > max_ecc) {
+ geo->ecc_strength = chip->ecc_strength_ds;
+ /* add extra ecc for meta data */
+ geo->ecc_chunk0_size = 0;
+ geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
+ geo->ecc_for_meta = 1;
+ /* check if oob can afford this extra ecc chunk */
+ if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
+ geo->gf_len * geo->ecc_strength
+ * geo->ecc_chunk_count) {
+ printf("unsupported NAND chip with new layout\n");
+ return -EINVAL;
+ }
+
+ /* calculate in which chunk bbm located */
+ bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
+ geo->gf_len * geo->ecc_strength) /
+ (geo->gf_len * geo->ecc_strength +
+ geo->ecc_chunkn_size * 8) + 1;
+ }
+
+ /* calculate the number of ecc chunk behind the bbm */
+ i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
+
+ block_mark_bit_offset = mtd->writesize * 8 -
+ (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
+ + MXS_NAND_METADATA_SIZE * 8);
+
+ geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+ geo->block_mark_bit_offset = block_mark_bit_offset % 8;
+
+ return 0;
+}
+
+/*
+ * Wait for BCH complete IRQ and clear the IRQ
+ */
+static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
+{
+ int timeout = MXS_NAND_BCH_TIMEOUT;
+ int ret;
+
+ ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
+ BCH_CTRL_COMPLETE_IRQ, timeout);
+
+ writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
+
+ return ret;
+}
+
+/*
+ * This is the function that we install in the cmd_ctrl function pointer of the
+ * owning struct nand_chip. The only functions in the reference implementation
+ * that use these functions pointers are cmdfunc and select_chip.
+ *
+ * In this driver, we implement our own select_chip, so this function will only
+ * be called by the reference implementation's cmdfunc. For this reason, we can
+ * ignore the chip enable bit and concentrate only on sending bytes to the NAND
+ * Flash.
+ */
+static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct mxs_dma_desc *d;
+ uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ int ret;
+
+ /*
+ * If this condition is true, something is _VERY_ wrong in MTD
+ * subsystem!
+ */
+ if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
+ printf("MXS NAND: Command queue too long\n");
+ return;
+ }
+
+ /*
+ * Every operation begins with a command byte and a series of zero or
+ * more address bytes. These are distinguished by either the Address
+ * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+ * asserted. When MTD is ready to execute the command, it will
+ * deasert both latch enables.
+ *
+ * Rather than run a separate DMA operation for every single byte, we
+ * queue them up and run a single DMA operation for the entire series
+ * of command and data bytes.
+ */
+ if (ctrl & (NAND_ALE | NAND_CLE)) {
+ if (data != NAND_CMD_NONE)
+ nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
+ return;
+ }
+
+ /*
+ * If control arrives here, MTD has deasserted both the ALE and CLE,
+ * which means it's ready to run an operation. Check if we have any
+ * bytes to send.
+ */
+ if (nand_info->cmd_queue_len == 0)
+ return;
+
+ /* Compile the DMA descriptor -- a descriptor that sends command. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
+ MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
+ MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
+ (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
+
+ d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_WRITE |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_CLE |
+ GPMI_CTRL0_ADDRESS_INCREMENT |
+ nand_info->cmd_queue_len;
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Flush caches */
+ mxs_nand_flush_cmd_buf(nand_info);
+
+ /* Execute the DMA chain. */
+ ret = mxs_dma_go(channel);
+ if (ret)
+ printf("MXS NAND: Error sending command\n");
+
+ mxs_nand_return_dma_descs(nand_info);
+
+ /* Reset the command queue. */
+ nand_info->cmd_queue_len = 0;
+}
+
+/*
+ * Test if the NAND flash is ready.
+ */
+static int mxs_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ uint32_t tmp;
+
+ tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
+ tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
+
+ return tmp & 1;
+}
+
+/*
+ * Select the NAND chip.
+ */
+static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+
+ nand_info->cur_chip = chip;
+}
+
+/*
+ * Handle block mark swapping.
+ *
+ * Note that, when this function is called, it doesn't know whether it's
+ * swapping the block mark, or swapping it *back* -- but it doesn't matter
+ * because the the operation is the same.
+ */
+static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
+ uint8_t *data_buf, uint8_t *oob_buf)
+{
+ uint32_t bit_offset = geo->block_mark_bit_offset;
+ uint32_t buf_offset = geo->block_mark_byte_offset;
+
+ uint32_t src;
+ uint32_t dst;
+
+ /*
+ * Get the byte from the data area that overlays the block mark. Since
+ * the ECC engine applies its own view to the bits in the page, the
+ * physical block mark won't (in general) appear on a byte boundary in
+ * the data.
+ */
+ src = data_buf[buf_offset] >> bit_offset;
+ src |= data_buf[buf_offset + 1] << (8 - bit_offset);
+
+ dst = oob_buf[0];
+
+ oob_buf[0] = src;
+
+ data_buf[buf_offset] &= ~(0xff << bit_offset);
+ data_buf[buf_offset + 1] &= 0xff << bit_offset;
+
+ data_buf[buf_offset] |= dst << bit_offset;
+ data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
+}
+
+/*
+ * Read data from NAND.
+ */
+static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct mxs_dma_desc *d;
+ uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ int ret;
+
+ if (length > NAND_MAX_PAGESIZE) {
+ printf("MXS NAND: DMA buffer too big\n");
+ return;
+ }
+
+ if (!buf) {
+ printf("MXS NAND: DMA buffer is NULL\n");
+ return;
+ }
+
+ /* Compile the DMA descriptor - a descriptor that reads data. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
+ MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
+ (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
+ (length << MXS_DMA_DESC_BYTES_OFFSET);
+
+ d->cmd.address = (dma_addr_t)nand_info->data_buf;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_READ |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA |
+ length;
+
+ mxs_dma_desc_append(channel, d);
+
+ /*
+ * A DMA descriptor that waits for the command to end and the chip to
+ * become ready.
+ *
+ * I think we actually should *not* be waiting for the chip to become
+ * ready because, after all, we don't care. I think the original code
+ * did that and no one has re-thought it yet.
+ */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
+ MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
+ MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
+
+ d->cmd.address = 0;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA;
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Invalidate caches */
+ mxs_nand_inval_data_buf(nand_info);
+
+ /* Execute the DMA chain. */
+ ret = mxs_dma_go(channel);
+ if (ret) {
+ printf("MXS NAND: DMA read error\n");
+ goto rtn;
+ }
+
+ /* Invalidate caches */
+ mxs_nand_inval_data_buf(nand_info);
+
+ memcpy(buf, nand_info->data_buf, length);
+
+rtn:
+ mxs_nand_return_dma_descs(nand_info);
+}
+
+/*
+ * Write data to NAND.
+ */
+static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int length)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct mxs_dma_desc *d;
+ uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ int ret;
+
+ if (length > NAND_MAX_PAGESIZE) {
+ printf("MXS NAND: DMA buffer too big\n");
+ return;
+ }
+
+ if (!buf) {
+ printf("MXS NAND: DMA buffer is NULL\n");
+ return;
+ }
+
+ memcpy(nand_info->data_buf, buf, length);
+
+ /* Compile the DMA descriptor - a descriptor that writes data. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
+ MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
+ (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
+ (length << MXS_DMA_DESC_BYTES_OFFSET);
+
+ d->cmd.address = (dma_addr_t)nand_info->data_buf;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_WRITE |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA |
+ length;
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Flush caches */
+ mxs_nand_flush_data_buf(nand_info);
+
+ /* Execute the DMA chain. */
+ ret = mxs_dma_go(channel);
+ if (ret)
+ printf("MXS NAND: DMA write error\n");
+
+ mxs_nand_return_dma_descs(nand_info);
+}
+
+/*
+ * Read a single byte from NAND.
+ */
+static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
+{
+ uint8_t buf;
+ mxs_nand_read_buf(mtd, &buf, 1);
+ return buf;
+}
+
+static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
+ u8 *buf, int chunk, int page)
+{
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct bch_geometry *geo = &nand_info->bch_geometry;
+ unsigned int flip_bits = 0, flip_bits_noecc = 0;
+ unsigned int threshold;
+ unsigned int base = geo->ecc_chunkn_size * chunk;
+ u32 *dma_buf = (u32 *)buf;
+ int i;
+
+ threshold = geo->gf_len / 2;
+ if (threshold > geo->ecc_strength)
+ threshold = geo->ecc_strength;
+
+ for (i = 0; i < geo->ecc_chunkn_size; i++) {
+ flip_bits += hweight8(~buf[base + i]);
+ if (flip_bits > threshold)
+ return false;
+ }
+
+ nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ nand->read_buf(mtd, buf, mtd->writesize);
+
+ for (i = 0; i < mtd->writesize / 4; i++) {
+ flip_bits_noecc += hweight32(~dma_buf[i]);
+ if (flip_bits_noecc > threshold)
+ return false;
+ }
+
+ mtd->ecc_stats.corrected += flip_bits;
+
+ memset(buf, 0xff, mtd->writesize);
+
+ printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
+
+ return true;
+}
+
+/*
+ * Read a page from NAND.
+ */
+static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct bch_geometry *geo = &nand_info->bch_geometry;
+ struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
+ struct mxs_dma_desc *d;
+ uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ uint32_t corrected = 0, failed = 0;
+ uint8_t *status;
+ int i, ret;
+ int flag = 0;
+
+ /* Compile the DMA descriptor - wait for ready. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
+ MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
+ (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
+
+ d->cmd.address = 0;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA;
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Compile the DMA descriptor - enable the BCH block and read. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
+ MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
+
+ d->cmd.address = 0;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_READ |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA |
+ (mtd->writesize + mtd->oobsize);
+ d->cmd.pio_words[1] = 0;
+ d->cmd.pio_words[2] =
+ GPMI_ECCCTRL_ENABLE_ECC |
+ GPMI_ECCCTRL_ECC_CMD_DECODE |
+ GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
+ d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
+ d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
+ d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
+
+ if (nand_info->en_randomizer) {
+ d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
+ GPMI_ECCCTRL_RANDOMIZER_TYPE2;
+ d->cmd.pio_words[3] |= (page % 256) << 16;
+ }
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Compile the DMA descriptor - disable the BCH block. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
+ MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
+ (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
+
+ d->cmd.address = 0;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA |
+ (mtd->writesize + mtd->oobsize);
+ d->cmd.pio_words[1] = 0;
+ d->cmd.pio_words[2] = 0;
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
+ MXS_DMA_DESC_DEC_SEM;
+
+ d->cmd.address = 0;
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Invalidate caches */
+ mxs_nand_inval_data_buf(nand_info);
+
+ /* Execute the DMA chain. */
+ ret = mxs_dma_go(channel);
+ if (ret) {
+ printf("MXS NAND: DMA read error\n");
+ goto rtn;
+ }
+
+ ret = mxs_nand_wait_for_bch_complete(nand_info);
+ if (ret) {
+ printf("MXS NAND: BCH read timeout\n");
+ goto rtn;
+ }
+
+ mxs_nand_return_dma_descs(nand_info);
+
+ /* Invalidate caches */
+ mxs_nand_inval_data_buf(nand_info);
+
+ /* Read DMA completed, now do the mark swapping. */
+ mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
+
+ /* Loop over status bytes, accumulating ECC status. */
+ status = nand_info->oob_buf + mxs_nand_aux_status_offset();
+ for (i = 0; i < geo->ecc_chunk_count; i++) {
+ if (status[i] == 0x00)
+ continue;
+
+ if (status[i] == 0xff) {
+ if (!nand_info->en_randomizer &&
+ (is_mx6dqp() || is_mx7() || is_mx6ul() ||
+ is_imx8() || is_imx8m()))
+ if (readl(&bch_regs->hw_bch_debug1))
+ flag = 1;
+ continue;
+ }
+
+ if (status[i] == 0xfe) {
+ if (mxs_nand_erased_page(mtd, nand,
+ nand_info->data_buf, i, page))
+ break;
+ failed++;
+ continue;
+ }
+
+ corrected += status[i];
+ }
+
+ /* Propagate ECC status to the owning MTD. */
+ mtd->ecc_stats.failed += failed;
+ mtd->ecc_stats.corrected += corrected;
+
+ /*
+ * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
+ * details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the block
+ * mark to the caller's buffer. Note that, if block mark swapping was
+ * necessary, it has already been done, so we can rely on the first
+ * byte of the auxiliary buffer to contain the block mark.
+ */
+ memset(nand->oob_poi, 0xff, mtd->oobsize);
+
+ nand->oob_poi[0] = nand_info->oob_buf[0];
+
+ memcpy(buf, nand_info->data_buf, mtd->writesize);
+
+ if (flag)
+ memset(buf, 0xff, mtd->writesize);
+rtn:
+ mxs_nand_return_dma_descs(nand_info);
+
+ return ret;
+}
+
+/*
+ * Write a page to NAND.
+ */
+static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *nand, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct bch_geometry *geo = &nand_info->bch_geometry;
+ struct mxs_dma_desc *d;
+ uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
+ int ret;
+
+ memcpy(nand_info->data_buf, buf, mtd->writesize);
+ memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
+
+ /* Handle block mark swapping. */
+ mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
+
+ /* Compile the DMA descriptor - write data. */
+ d = mxs_nand_get_dma_desc(nand_info);
+ d->cmd.data =
+ MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
+ MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
+ (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
+
+ d->cmd.address = 0;
+
+ d->cmd.pio_words[0] =
+ GPMI_CTRL0_COMMAND_MODE_WRITE |
+ GPMI_CTRL0_WORD_LENGTH |
+ (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
+ GPMI_CTRL0_ADDRESS_NAND_DATA;
+ d->cmd.pio_words[1] = 0;
+ d->cmd.pio_words[2] =
+ GPMI_ECCCTRL_ENABLE_ECC |
+ GPMI_ECCCTRL_ECC_CMD_ENCODE |
+ GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
+ d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
+ d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
+ d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
+
+ if (nand_info->en_randomizer) {
+ d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
+ GPMI_ECCCTRL_RANDOMIZER_TYPE2;
+ /*
+ * Write NAND page number needed to be randomized
+ * to GPMI_ECCCOUNT register.
+ *
+ * The value is between 0-255. For additional details
+ * check 9.6.6.4 of i.MX7D Applications Processor reference
+ */
+ d->cmd.pio_words[3] |= (page % 256) << 16;
+ }
+
+ mxs_dma_desc_append(channel, d);
+
+ /* Flush caches */
+ mxs_nand_flush_data_buf(nand_info);
+
+ /* Execute the DMA chain. */
+ ret = mxs_dma_go(channel);
+ if (ret) {
+ printf("MXS NAND: DMA write error\n");
+ goto rtn;
+ }
+
+ ret = mxs_nand_wait_for_bch_complete(nand_info);
+ if (ret) {
+ printf("MXS NAND: BCH write timeout\n");
+ goto rtn;
+ }
+
+rtn:
+ mxs_nand_return_dma_descs(nand_info);
+ return 0;
+}
+
+/*
+ * Read OOB from NAND.
+ *
+ * This function is a veneer that replaces the function originally installed by
+ * the NAND Flash MTD code.
+ */
+static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ int ret;
+
+ if (ops->mode == MTD_OPS_RAW)
+ nand_info->raw_oob_mode = 1;
+ else
+ nand_info->raw_oob_mode = 0;
+
+ ret = nand_info->hooked_read_oob(mtd, from, ops);
+
+ nand_info->raw_oob_mode = 0;
+
+ return ret;
+}
+
+/*
+ * Write OOB to NAND.
+ *
+ * This function is a veneer that replaces the function originally installed by
+ * the NAND Flash MTD code.
+ */
+static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ int ret;
+
+ if (ops->mode == MTD_OPS_RAW)
+ nand_info->raw_oob_mode = 1;
+ else
+ nand_info->raw_oob_mode = 0;
+
+ ret = nand_info->hooked_write_oob(mtd, to, ops);
+
+ nand_info->raw_oob_mode = 0;
+
+ return ret;
+}
+
+/*
+ * Mark a block bad in NAND.
+ *
+ * This function is a veneer that replaces the function originally installed by
+ * the NAND Flash MTD code.
+ */
+static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ int ret;
+
+ nand_info->marking_block_bad = 1;
+
+ ret = nand_info->hooked_block_markbad(mtd, ofs);
+
+ nand_info->marking_block_bad = 0;
+
+ return ret;
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark, so all
+ * write operations take measures to protect it.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ * true state of the block mark, no matter where that block mark appears in
+ * the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ * return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ * page, using the conventional definition of which bytes are data and which
+ * are OOB. This gives the caller a way to see the actual, physical bytes
+ * in the page, without the distortions applied by our ECC engine.
+ *
+ * What we do for this specific read operation depends on whether we're doing
+ * "raw" read, or an ECC-based read.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ *
+ * Since MTD assumes the OOB is not covered by ECC, there is no pair of
+ * ECC-based/raw functions for reading or or writing the OOB. The fact that the
+ * caller wants an ECC-based or raw view of the page is not propagated down to
+ * this driver.
+ *
+ * Since our OOB *is* covered by ECC, we need this information. So, we hook the
+ * ecc.read_oob and ecc.write_oob function pointers in the owning
+ * struct mtd_info with our own functions. These hook functions set the
+ * raw_oob_mode field so that, when control finally arrives here, we'll know
+ * what to do.
+ */
+static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+
+ /*
+ * First, fill in the OOB buffer. If we're doing a raw read, we need to
+ * get the bytes from the physical page. If we're not doing a raw read,
+ * we need to fill the buffer with set bits.
+ */
+ if (nand_info->raw_oob_mode) {
+ /*
+ * If control arrives here, we're doing a "raw" read. Send the
+ * command to read the conventional OOB and read it.
+ */
+ nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
+ } else {
+ /*
+ * If control arrives here, we're not doing a "raw" read. Fill
+ * the OOB buffer with set bits and correct the block mark.
+ */
+ memset(nand->oob_poi, 0xff, mtd->oobsize);
+
+ nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ mxs_nand_read_buf(mtd, nand->oob_poi, 1);
+ }
+
+ return 0;
+
+}
+
+/*
+ * Write OOB data to NAND.
+ */
+static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+ int page)
+{
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ uint8_t block_mark = 0;
+
+ /*
+ * There are fundamental incompatibilities between the i.MX GPMI NFC and
+ * the NAND Flash MTD model that make it essentially impossible to write
+ * the out-of-band bytes.
+ *
+ * We permit *ONE* exception. If the *intent* of writing the OOB is to
+ * mark a block bad, we can do that.
+ */
+
+ if (!nand_info->marking_block_bad) {
+ printf("NXS NAND: Writing OOB isn't supported\n");
+ return -EIO;
+ }
+
+ /* Write the block mark. */
+ nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ nand->write_buf(mtd, &block_mark, 1);
+ nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ /* Check if it worked. */
+ if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Claims all blocks are good.
+ *
+ * In principle, this function is *only* called when the NAND Flash MTD system
+ * isn't allowed to keep an in-memory bad block table, so it is forced to ask
+ * the driver for bad block information.
+ *
+ * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
+ * this function is *only* called when we take it away.
+ *
+ * Thus, this function is only called when we want *all* blocks to look good,
+ * so it *always* return success.
+ */
+static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ return 0;
+}
+
+static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+
+ if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
+ printf("unsupported NAND chip, minimum ecc required %d\n"
+ , chip->ecc_strength_ds);
+ return -EINVAL;
+ }
+
+ if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
+ mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) {
+ dev_warn(mtd->dev, "use legacy bch geometry\n");
+ return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
+ }
+
+ if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
+ return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
+
+ return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
+ chip->ecc_strength_ds, chip->ecc_step_ds);
+
+ return 0;
+}
+
+/*
+ * At this point, the physical NAND Flash chips have been identified and
+ * counted, so we know the physical geometry. This enables us to make some
+ * important configuration decisions.
+ *
+ * The return value of this function propagates directly back to this driver's
+ * board_nand_init(). Anything other than zero will cause this driver to
+ * tear everything down and declare failure.
+ */
+int mxs_nand_setup_ecc(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+ struct bch_geometry *geo = &nand_info->bch_geometry;
+ struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
+ uint32_t tmp;
+ int ret;
+
+ nand_info->en_randomizer = 0;
+ nand_info->oobsize = mtd->oobsize;
+ nand_info->writesize = mtd->writesize;
+
+ ret = mxs_nand_set_geometry(mtd, geo);
+ if (ret)
+ return ret;
+
+ /* Configure BCH and set NFC geometry */
+ mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
+
+ /* Configure layout 0 */
+ tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
+ tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
+ tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
+ tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
+ tmp |= (geo->gf_len == 14 ? 1 : 0) <<
+ BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
+ writel(tmp, &bch_regs->hw_bch_flash0layout0);
+ nand_info->bch_flash0layout0 = tmp;
+
+ tmp = (mtd->writesize + mtd->oobsize)
+ << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
+ tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
+ tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
+ tmp |= (geo->gf_len == 14 ? 1 : 0) <<
+ BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
+ writel(tmp, &bch_regs->hw_bch_flash0layout1);
+ nand_info->bch_flash0layout1 = tmp;
+
+ /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
+ if (is_mx6dqp() || is_mx7() ||
+ is_mx6ul() || is_imx8() || is_imx8m())
+ writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
+ &bch_regs->hw_bch_mode);
+
+ /* Set *all* chip selects to use layout 0 */
+ writel(0, &bch_regs->hw_bch_layoutselect);
+
+ /* Enable BCH complete interrupt */
+ writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
+
+ /* Hook some operations at the MTD level. */
+ if (mtd->_read_oob != mxs_nand_hook_read_oob) {
+ nand_info->hooked_read_oob = mtd->_read_oob;
+ mtd->_read_oob = mxs_nand_hook_read_oob;
+ }
+
+ if (mtd->_write_oob != mxs_nand_hook_write_oob) {
+ nand_info->hooked_write_oob = mtd->_write_oob;
+ mtd->_write_oob = mxs_nand_hook_write_oob;
+ }
+
+ if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
+ nand_info->hooked_block_markbad = mtd->_block_markbad;
+ mtd->_block_markbad = mxs_nand_hook_block_markbad;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate DMA buffers
+ */
+int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
+{
+ uint8_t *buf;
+ const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
+
+ nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
+
+ /* DMA buffers */
+ buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
+ if (!buf) {
+ printf("MXS NAND: Error allocating DMA buffers\n");
+ return -ENOMEM;
+ }
+
+ memset(buf, 0, nand_info->data_buf_size);
+
+ nand_info->data_buf = buf;
+ nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
+ /* Command buffers */
+ nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
+ MXS_NAND_COMMAND_BUFFER_SIZE);
+ if (!nand_info->cmd_buf) {
+ free(buf);
+ printf("MXS NAND: Error allocating command buffers\n");
+ return -ENOMEM;
+ }
+ memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
+ nand_info->cmd_queue_len = 0;
+
+ return 0;
+}
+
+/*
+ * Initializes the NFC hardware.
+ */
+static int mxs_nand_init_dma(struct mxs_nand_info *info)
+{
+ int i = 0, j, ret = 0;
+
+ info->desc = malloc(sizeof(struct mxs_dma_desc *) *
+ MXS_NAND_DMA_DESCRIPTOR_COUNT);
+ if (!info->desc) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ /* Allocate the DMA descriptors. */
+ for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
+ info->desc[i] = mxs_dma_desc_alloc();
+ if (!info->desc[i]) {
+ ret = -ENOMEM;
+ goto err2;
+ }
+ }
+
+ /* Init the DMA controller. */
+ mxs_dma_init();
+ for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
+ j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
+ ret = mxs_dma_init_channel(j);
+ if (ret)
+ goto err3;
+ }
+
+ /* Reset the GPMI block. */
+ mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
+ mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
+
+ /*
+ * Choose NAND mode, set IRQ polarity, disable write protection and
+ * select BCH ECC.
+ */
+ clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
+ GPMI_CTRL1_GPMI_MODE,
+ GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
+ GPMI_CTRL1_BCH_MODE);
+
+ return 0;
+
+err3:
+ for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
+ mxs_dma_release(j);
+err2:
+ for (--i; i >= 0; i--)
+ mxs_dma_desc_free(info->desc[i]);
+ free(info->desc);
+err1:
+ if (ret == -ENOMEM)
+ printf("MXS NAND: Unable to allocate DMA descriptors\n");
+ return ret;
+}
+
+int mxs_nand_init_spl(struct nand_chip *nand)
+{
+ struct mxs_nand_info *nand_info;
+ int err;
+
+ nand_info = malloc(sizeof(struct mxs_nand_info));
+ if (!nand_info) {
+ printf("MXS NAND: Failed to allocate private data\n");
+ return -ENOMEM;
+ }
+ memset(nand_info, 0, sizeof(struct mxs_nand_info));
+
+ nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
+ nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
+
+ if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
+ nand_info->max_ecc_strength_supported = 62;
+ else
+ nand_info->max_ecc_strength_supported = 40;
+
+ err = mxs_nand_alloc_buffers(nand_info);
+ if (err)
+ return err;
+
+ err = mxs_nand_init_dma(nand_info);
+ if (err)
+ return err;
+
+ nand_set_controller_data(nand, nand_info);
+
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ nand->cmd_ctrl = mxs_nand_cmd_ctrl;
+ nand->dev_ready = mxs_nand_device_ready;
+ nand->select_chip = mxs_nand_select_chip;
+
+ nand->read_byte = mxs_nand_read_byte;
+ nand->read_buf = mxs_nand_read_buf;
+
+ nand->ecc.read_page = mxs_nand_ecc_read_page;
+
+ nand->ecc.mode = NAND_ECC_HW;
+
+ return 0;
+}
+
+int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int err;
+
+ nand = &nand_info->chip;
+ mtd = nand_to_mtd(nand);
+ err = mxs_nand_alloc_buffers(nand_info);
+ if (err)
+ return err;
+
+ err = mxs_nand_init_dma(nand_info);
+ if (err)
+ goto err_free_buffers;
+
+ memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
+
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+#endif
+
+ nand_set_controller_data(nand, nand_info);
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ if (nand_info->dev)
+ nand->flash_node = dev_of_offset(nand_info->dev);
+
+ nand->cmd_ctrl = mxs_nand_cmd_ctrl;
+
+ nand->dev_ready = mxs_nand_device_ready;
+ nand->select_chip = mxs_nand_select_chip;
+ nand->block_bad = mxs_nand_block_bad;
+
+ nand->read_byte = mxs_nand_read_byte;
+
+ nand->read_buf = mxs_nand_read_buf;
+ nand->write_buf = mxs_nand_write_buf;
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
+ goto err_free_buffers;
+
+ if (mxs_nand_setup_ecc(mtd))
+ goto err_free_buffers;
+
+ nand->ecc.read_page = mxs_nand_ecc_read_page;
+ nand->ecc.write_page = mxs_nand_ecc_write_page;
+ nand->ecc.read_oob = mxs_nand_ecc_read_oob;
+ nand->ecc.write_oob = mxs_nand_ecc_write_oob;
+
+ nand->ecc.layout = &fake_ecc_layout;
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
+ nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
+
+ /* second phase scan */
+ err = nand_scan_tail(mtd);
+ if (err)
+ goto err_free_buffers;
+
+ err = nand_register(0, mtd);
+ if (err)
+ goto err_free_buffers;
+
+ return 0;
+
+err_free_buffers:
+ free(nand_info->data_buf);
+ free(nand_info->cmd_buf);
+
+ return err;
+}
+
+#ifndef CONFIG_NAND_MXS_DT
+void board_nand_init(void)
+{
+ struct mxs_nand_info *nand_info;
+
+ nand_info = malloc(sizeof(struct mxs_nand_info));
+ if (!nand_info) {
+ printf("MXS NAND: Failed to allocate private data\n");
+ return;
+ }
+ memset(nand_info, 0, sizeof(struct mxs_nand_info));
+
+ nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
+ nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
+
+ /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
+ if (is_mx6sx() || is_mx7())
+ nand_info->max_ecc_strength_supported = 62;
+ else
+ nand_info->max_ecc_strength_supported = 40;
+
+#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
+ nand_info->use_minimum_ecc = true;
+#endif
+
+ if (mxs_nand_init_ctrl(nand_info) < 0)
+ goto err;
+
+ return;
+
+err:
+ free(nand_info);
+}
+#endif
+
+/*
+ * Read NAND layout for FCB block generation.
+ */
+void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
+{
+ struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
+ u32 tmp;
+
+ tmp = readl(&bch_regs->hw_bch_flash0layout0);
+ l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
+ BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
+ l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
+ BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
+
+ tmp = readl(&bch_regs->hw_bch_flash0layout1);
+ l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
+ BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
+ l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
+ BCH_FLASHLAYOUT0_ECC0_OFFSET;
+ l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
+ BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
+ l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
+ BCH_FLASHLAYOUT1_ECCN_OFFSET;
+ l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
+ BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
+}
+
+/*
+ * Set BCH to specific layout used by ROM bootloader to read FCB.
+ */
+void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
+{
+ u32 tmp;
+ struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+
+ nand_info->en_randomizer = 1;
+
+ mtd->writesize = 1024;
+ mtd->oobsize = 1862 - 1024;
+
+ /* 8 ecc_chunks_*/
+ tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
+ /* 32 bytes for metadata */
+ tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
+ /* using ECC62 level to be performed */
+ tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
+ /* 0x20 * 4 bytes of the data0 block */
+ tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
+ tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
+ writel(tmp, &bch_regs->hw_bch_flash0layout0);
+
+ /* 1024 for data + 838 for OOB */
+ tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
+ /* using ECC62 level to be performed */
+ tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
+ /* 0x20 * 4 bytes of the data0 block */
+ tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
+ tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
+ writel(tmp, &bch_regs->hw_bch_flash0layout1);
+}
+
+/*
+ * Set BCH to specific layout used by ROM bootloader to read FCB.
+ */
+void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
+{
+ u32 tmp;
+ struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+
+ /* no randomizer in this setting*/
+ nand_info->en_randomizer = 0;
+
+ mtd->writesize = 1024;
+ mtd->oobsize = 1576 - 1024;
+
+ /* 8 ecc_chunks_*/
+ tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
+ /* 32 bytes for metadata */
+ tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
+ /* using ECC40 level to be performed */
+ tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
+ /* 0x20 * 4 bytes of the data0 block */
+ tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
+ tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
+ writel(tmp, &bch_regs->hw_bch_flash0layout0);
+
+ /* 1024 for data + 552 for OOB */
+ tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
+ /* using ECC40 level to be performed */
+ tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
+ /* 0x20 * 4 bytes of the data0 block */
+ tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
+ tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
+ writel(tmp, &bch_regs->hw_bch_flash0layout1);
+}
+
+/*
+ * Restore BCH to normal settings.
+ */
+void mxs_nand_mode_normal(struct mtd_info *mtd)
+{
+ struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
+
+ nand_info->en_randomizer = 0;
+
+ mtd->writesize = nand_info->writesize;
+ mtd->oobsize = nand_info->oobsize;
+
+ writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
+ writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
+}
+
+uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &nand_info->bch_geometry;
+
+ return geo->block_mark_byte_offset;
+}
+
+uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &nand_info->bch_geometry;
+
+ return geo->block_mark_bit_offset;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/mxs_nand_dt.c b/roms/u-boot/drivers/mtd/nand/raw/mxs_nand_dt.c
new file mode 100644
index 000000000..878796d55
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/mxs_nand_dt.c
@@ -0,0 +1,185 @@
+/*
+ * NXP GPMI NAND flash driver (DT initialization)
+ *
+ * Copyright (C) 2018 Toradex
+ * Copyright 2019 NXP
+ *
+ * Authors:
+ * Stefan Agner <stefan.agner@toradex.com>
+ *
+ * Based on denali_dt.c
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <dm.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/printk.h>
+#include <clk.h>
+
+#include <mxs_nand.h>
+
+struct mxs_nand_dt_data {
+ unsigned int max_ecc_strength_supported;
+};
+
+static const struct mxs_nand_dt_data mxs_nand_imx6q_data = {
+ .max_ecc_strength_supported = 40,
+};
+
+static const struct mxs_nand_dt_data mxs_nand_imx6sx_data = {
+ .max_ecc_strength_supported = 62,
+};
+
+static const struct mxs_nand_dt_data mxs_nand_imx7d_data = {
+ .max_ecc_strength_supported = 62,
+};
+
+static const struct mxs_nand_dt_data mxs_nand_imx8qxp_data = {
+ .max_ecc_strength_supported = 62,
+};
+
+static const struct udevice_id mxs_nand_dt_ids[] = {
+ {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = (unsigned long)&mxs_nand_imx6q_data,
+ },
+ {
+ .compatible = "fsl,imx6qp-gpmi-nand",
+ .data = (unsigned long)&mxs_nand_imx6q_data,
+ },
+ {
+ .compatible = "fsl,imx6sx-gpmi-nand",
+ .data = (unsigned long)&mxs_nand_imx6sx_data,
+ },
+ {
+ .compatible = "fsl,imx7d-gpmi-nand",
+ .data = (unsigned long)&mxs_nand_imx7d_data,
+ },
+ {
+ .compatible = "fsl,imx8qxp-gpmi-nand",
+ .data = (unsigned long)&mxs_nand_imx8qxp_data,
+ },
+ { /* sentinel */ }
+};
+
+static int mxs_nand_dt_probe(struct udevice *dev)
+{
+ struct mxs_nand_info *info = dev_get_priv(dev);
+ const struct mxs_nand_dt_data *data;
+ struct resource res;
+ int ret;
+
+ data = (void *)dev_get_driver_data(dev);
+ if (data)
+ info->max_ecc_strength_supported = data->max_ecc_strength_supported;
+
+ info->dev = dev;
+
+ ret = dev_read_resource_byname(dev, "gpmi-nand", &res);
+ if (ret)
+ return ret;
+
+ info->gpmi_regs = devm_ioremap(dev, res.start, resource_size(&res));
+
+
+ ret = dev_read_resource_byname(dev, "bch", &res);
+ if (ret)
+ return ret;
+
+ info->bch_regs = devm_ioremap(dev, res.start, resource_size(&res));
+
+ info->use_minimum_ecc = dev_read_bool(dev, "fsl,use-minimum-ecc");
+
+ info->legacy_bch_geometry = dev_read_bool(dev, "fsl,legacy-bch-geometry");
+
+ if (IS_ENABLED(CONFIG_CLK) && IS_ENABLED(CONFIG_IMX8)) {
+ /* Assigned clock already set clock */
+ struct clk gpmi_clk;
+
+ ret = clk_get_by_name(dev, "gpmi_io", &gpmi_clk);
+ if (ret < 0) {
+ debug("Can't get gpmi io clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_enable(&gpmi_clk);
+ if (ret < 0) {
+ debug("Can't enable gpmi io clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_get_by_name(dev, "gpmi_apb", &gpmi_clk);
+ if (ret < 0) {
+ debug("Can't get gpmi_apb clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_enable(&gpmi_clk);
+ if (ret < 0) {
+ debug("Can't enable gpmi_apb clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_get_by_name(dev, "gpmi_bch", &gpmi_clk);
+ if (ret < 0) {
+ debug("Can't get gpmi_bch clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_enable(&gpmi_clk);
+ if (ret < 0) {
+ debug("Can't enable gpmi_bch clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_get_by_name(dev, "gpmi_apb_bch", &gpmi_clk);
+ if (ret < 0) {
+ debug("Can't get gpmi_apb_bch clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_enable(&gpmi_clk);
+ if (ret < 0) {
+ debug("Can't enable gpmi_apb_bch clk: %d\n", ret);
+ return ret;
+ }
+
+ /* this clock is used for apbh_dma, since the apbh dma does not support DM,
+ * we optionally enable it here
+ */
+ ret = clk_get_by_name(dev, "gpmi_apbh_dma", &gpmi_clk);
+ if (ret < 0) {
+ debug("Can't get gpmi_apbh_dma clk: %d\n", ret);
+ } else {
+ ret = clk_enable(&gpmi_clk);
+ if (ret < 0) {
+ debug("Can't enable gpmi_apbh_dma clk: %d\n", ret);
+ }
+ }
+ }
+
+ return mxs_nand_init_ctrl(info);
+}
+
+U_BOOT_DRIVER(mxs_nand_dt) = {
+ .name = "mxs-nand-dt",
+ .id = UCLASS_MTD,
+ .of_match = mxs_nand_dt_ids,
+ .probe = mxs_nand_dt_probe,
+ .priv_auto = sizeof(struct mxs_nand_info),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(mxs_nand_dt),
+ &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize MXS NAND controller. (error %d)\n",
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/mxs_nand_spl.c b/roms/u-boot/drivers/mtd/nand/raw/mxs_nand_spl.c
new file mode 100644
index 000000000..17f46ae5e
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/mxs_nand_spl.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014 Gateworks Corporation
+ * Copyright 2019 NXP
+ * Author: Tim Harvey <tharvey@gateworks.com>
+ */
+#include <common.h>
+#include <log.h>
+#include <nand.h>
+#include <malloc.h>
+#include <mxs_nand.h>
+#include <asm/cache.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+static struct mtd_info *mtd;
+static struct nand_chip nand_chip;
+
+static void mxs_nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 timeo, time_start;
+
+ /* write out the command to the device */
+ chip->cmd_ctrl(mtd, command, NAND_CLE);
+
+ /* Serially input address */
+ if (column != -1) {
+ chip->cmd_ctrl(mtd, column, NAND_ALE);
+ chip->cmd_ctrl(mtd, column >> 8, NAND_ALE);
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, NAND_ALE);
+ chip->cmd_ctrl(mtd, page_addr >> 8, NAND_ALE);
+ /* One more address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20))
+ chip->cmd_ctrl(mtd, page_addr >> 16, NAND_ALE);
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0);
+
+ if (command == NAND_CMD_READ0) {
+ chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_CLE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0);
+ } else if (command == NAND_CMD_RNDOUT) {
+ /* No ready / busy check necessary */
+ chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE);
+ }
+
+ /* wait for nand ready */
+ ndelay(100);
+ timeo = (CONFIG_SYS_HZ * 20) / 1000;
+ time_start = get_timer(0);
+ while (get_timer(time_start) < timeo) {
+ if (chip->dev_ready(mtd))
+ break;
+ }
+}
+
+#if defined (CONFIG_SPL_NAND_IDENT)
+
+/* Trying to detect the NAND flash using ONFi, JEDEC, and (extended) IDs */
+static int mxs_flash_full_ident(struct mtd_info *mtd)
+{
+ int nand_maf_id, nand_dev_id;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_flash_dev *type;
+
+ type = nand_get_flash_type(mtd, chip, &nand_maf_id, &nand_dev_id, NULL);
+
+ if (IS_ERR(type)) {
+ chip->select_chip(mtd, -1);
+ return PTR_ERR(type);
+ }
+
+ return 0;
+}
+
+#else
+
+/* Trying to detect the NAND flash using ONFi only */
+static int mxs_flash_onfi_ident(struct mtd_info *mtd)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ int i;
+ u8 mfg_id, dev_id;
+ u8 id_data[8];
+ struct nand_onfi_params *p = &chip->onfi_params;
+
+ /* Reset the chip */
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Send the command for reading device ID */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ mfg_id = chip->read_byte(mtd);
+ dev_id = chip->read_byte(mtd);
+
+ /* Try again to make sure */
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ for (i = 0; i < 8; i++)
+ id_data[i] = chip->read_byte(mtd);
+ if (id_data[0] != mfg_id || id_data[1] != dev_id) {
+ printf("second ID read did not match");
+ return -1;
+ }
+ debug("0x%02x:0x%02x ", mfg_id, dev_id);
+
+ /* read ONFI */
+ chip->onfi_version = 0;
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
+ if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+ chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') {
+ return -2;
+ }
+
+ /* we have ONFI, probe it */
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+ chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
+ mtd->name = p->model;
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+ mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ chip->badblockbits = 8;
+
+ debug("erasesize=%d (>>%d)\n", mtd->erasesize, chip->phys_erase_shift);
+ debug("writesize=%d (>>%d)\n", mtd->writesize, chip->page_shift);
+ debug("oobsize=%d\n", mtd->oobsize);
+ debug("chipsize=%lld\n", chip->chipsize);
+
+ return 0;
+}
+
+#endif /* CONFIG_SPL_NAND_IDENT */
+
+static int mxs_flash_ident(struct mtd_info *mtd)
+{
+ int ret;
+#if defined (CONFIG_SPL_NAND_IDENT)
+ ret = mxs_flash_full_ident(mtd);
+#else
+ ret = mxs_flash_onfi_ident(mtd);
+#endif
+ return ret;
+}
+
+static int mxs_read_page_ecc(struct mtd_info *mtd, void *buf, unsigned int page)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x0, page);
+ ret = nand_chip.ecc.read_page(mtd, chip, buf, 1, page);
+ if (ret < 0) {
+ printf("read_page failed %d\n", ret);
+ return -1;
+ }
+ return 0;
+}
+
+static int is_badblock(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int block = offs >> chip->phys_erase_shift;
+ unsigned int page = offs >> chip->page_shift;
+
+ debug("%s offs=0x%08x block:%d page:%d\n", __func__, (int)offs, block,
+ page);
+ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+ memset(chip->oob_poi, 0, mtd->oobsize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return chip->oob_poi[0] != 0xff;
+}
+
+/* setup mtd and nand structs and init mxs_nand driver */
+void nand_init(void)
+{
+ /* return if already initalized */
+ if (nand_chip.numchips)
+ return;
+
+ /* init mxs nand driver */
+ mxs_nand_init_spl(&nand_chip);
+ mtd = nand_to_mtd(&nand_chip);
+ /* set mtd functions */
+ nand_chip.cmdfunc = mxs_nand_command;
+ nand_chip.scan_bbt = nand_default_bbt;
+ nand_chip.numchips = 1;
+
+ /* identify flash device */
+ if (mxs_flash_ident(mtd)) {
+ printf("Failed to identify\n");
+ nand_chip.numchips = 0; /* If fail, don't use nand */
+ return;
+ }
+
+ /* allocate and initialize buffers */
+ nand_chip.buffers = memalign(ARCH_DMA_MINALIGN,
+ sizeof(*nand_chip.buffers));
+ nand_chip.oob_poi = nand_chip.buffers->databuf + mtd->writesize;
+ /* setup flash layout (does not scan as we override that) */
+ mtd->size = nand_chip.chipsize;
+ nand_chip.scan_bbt(mtd);
+ mxs_nand_setup_ecc(mtd);
+}
+
+int nand_spl_load_image(uint32_t offs, unsigned int size, void *buf)
+{
+ struct nand_chip *chip;
+ unsigned int page;
+ unsigned int nand_page_per_block;
+ unsigned int sz = 0;
+ u8 *page_buf = NULL;
+ u32 page_off;
+
+ chip = mtd_to_nand(mtd);
+ if (!chip->numchips)
+ return -ENODEV;
+
+ page_buf = malloc(mtd->writesize);
+ if (!page_buf)
+ return -ENOMEM;
+
+ page = offs >> chip->page_shift;
+ page_off = offs & (mtd->writesize - 1);
+ nand_page_per_block = mtd->erasesize / mtd->writesize;
+
+ debug("%s offset:0x%08x len:%d page:%x\n", __func__, offs, size, page);
+
+ while (size) {
+ if (mxs_read_page_ecc(mtd, page_buf, page) < 0)
+ return -1;
+
+ if (size > (mtd->writesize - page_off))
+ sz = (mtd->writesize - page_off);
+ else
+ sz = size;
+
+ memcpy(buf, page_buf + page_off, sz);
+
+ offs += mtd->writesize;
+ page++;
+ buf += (mtd->writesize - page_off);
+ page_off = 0;
+ size -= sz;
+
+ /*
+ * Check if we have crossed a block boundary, and if so
+ * check for bad block.
+ */
+ if (!(page % nand_page_per_block)) {
+ /*
+ * Yes, new block. See if this block is good. If not,
+ * loop until we find a good block.
+ */
+ while (is_badblock(mtd, offs, 1)) {
+ page = page + nand_page_per_block;
+ /* Check i we've reached the end of flash. */
+ if (page >= mtd->size >> chip->page_shift) {
+ free(page_buf);
+ return -ENOMEM;
+ }
+ }
+ }
+ }
+
+ free(page_buf);
+
+ return 0;
+}
+
+struct mtd_info *nand_get_mtd(void)
+{
+ return mtd;
+}
+
+int nand_default_bbt(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+void nand_deselect(void)
+{
+}
+
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand.c b/roms/u-boot/drivers/mtd/nand/raw/nand.c
new file mode 100644
index 000000000..026419e4e
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) Copyright 2005
+ * 2N Telekomunikace, a.s. <www.2n.cz>
+ * Ladislav Michl <michl@2n.cz>
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <errno.h>
+#include <linux/mtd/concat.h>
+
+#ifndef CONFIG_SYS_NAND_BASE_LIST
+#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE }
+#endif
+
+int nand_curr_device = -1;
+
+static struct mtd_info *nand_info[CONFIG_SYS_MAX_NAND_DEVICE];
+
+#ifndef CONFIG_SYS_NAND_SELF_INIT
+static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE];
+static ulong base_address[CONFIG_SYS_MAX_NAND_DEVICE] = CONFIG_SYS_NAND_BASE_LIST;
+#endif
+
+static char dev_name[CONFIG_SYS_MAX_NAND_DEVICE][8];
+
+static unsigned long total_nand_size; /* in kiB */
+
+struct mtd_info *get_nand_dev_by_index(int dev)
+{
+ if (dev < 0 || dev >= CONFIG_SYS_MAX_NAND_DEVICE || !nand_info[dev] ||
+ !nand_info[dev]->name)
+ return NULL;
+
+ return nand_info[dev];
+}
+
+int nand_mtd_to_devnum(struct mtd_info *mtd)
+{
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) {
+ if (mtd && get_nand_dev_by_index(i) == mtd)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+/* Register an initialized NAND mtd device with the U-Boot NAND command. */
+int nand_register(int devnum, struct mtd_info *mtd)
+{
+ if (devnum >= CONFIG_SYS_MAX_NAND_DEVICE)
+ return -EINVAL;
+
+ nand_info[devnum] = mtd;
+
+ sprintf(dev_name[devnum], "nand%d", devnum);
+ mtd->name = dev_name[devnum];
+
+#ifdef CONFIG_MTD
+ /*
+ * Add MTD device so that we can reference it later
+ * via the mtdcore infrastructure (e.g. ubi).
+ */
+ add_mtd_device(mtd);
+#endif
+
+ total_nand_size += mtd->size / 1024;
+
+ if (nand_curr_device == -1)
+ nand_curr_device = devnum;
+
+ return 0;
+}
+
+#ifndef CONFIG_SYS_NAND_SELF_INIT
+static void nand_init_chip(int i)
+{
+ struct nand_chip *nand = &nand_chip[i];
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ ulong base_addr = base_address[i];
+ int maxchips = CONFIG_SYS_NAND_MAX_CHIPS;
+
+ if (maxchips < 1)
+ maxchips = 1;
+
+ nand->IO_ADDR_R = nand->IO_ADDR_W = (void __iomem *)base_addr;
+
+ if (board_nand_init(nand))
+ return;
+
+ if (nand_scan(mtd, maxchips))
+ return;
+
+ nand_register(i, mtd);
+}
+#endif
+
+#ifdef CONFIG_MTD_CONCAT
+static void create_mtd_concat(void)
+{
+ struct mtd_info *nand_info_list[CONFIG_SYS_MAX_NAND_DEVICE];
+ int nand_devices_found = 0;
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) {
+ struct mtd_info *mtd = get_nand_dev_by_index(i);
+ if (mtd != NULL) {
+ nand_info_list[nand_devices_found] = mtd;
+ nand_devices_found++;
+ }
+ }
+ if (nand_devices_found > 1) {
+ struct mtd_info *mtd;
+ char c_mtd_name[16];
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ sprintf(c_mtd_name, "nand%d", nand_devices_found);
+ mtd = mtd_concat_create(nand_info_list, nand_devices_found,
+ c_mtd_name);
+
+ if (mtd == NULL)
+ return;
+
+ nand_register(nand_devices_found, mtd);
+ }
+
+ return;
+}
+#else
+static void create_mtd_concat(void)
+{
+}
+#endif
+
+unsigned long nand_size(void)
+{
+ return total_nand_size;
+}
+
+void nand_init(void)
+{
+ static int initialized;
+
+ /*
+ * Avoid initializing NAND Flash multiple times,
+ * otherwise it will calculate a wrong total size.
+ */
+ if (initialized)
+ return;
+ initialized = 1;
+
+#ifdef CONFIG_SYS_NAND_SELF_INIT
+ board_nand_init();
+#else
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++)
+ nand_init_chip(i);
+#endif
+
+#ifdef CONFIG_SYS_NAND_SELECT_DEVICE
+ /*
+ * Select the chip in the board/cpu specific driver
+ */
+ board_nand_select_device(mtd_to_nand(get_nand_dev_by_index(nand_curr_device)),
+ nand_curr_device);
+#endif
+
+ create_mtd_concat();
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_base.c b/roms/u-boot/drivers/mtd/nand/raw/nand_base.c
new file mode 100644
index 000000000..3679ee727
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_base.c
@@ -0,0 +1,5324 @@
+/*
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ECC support.
+ * BBT table is not serialized, has to be fixed
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <common.h>
+#if CONFIG_IS_ENABLED(OF_CONTROL)
+#include <fdtdec.h>
+#endif
+#include <log.h>
+#include <malloc.h>
+#include <watchdog.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+#include <asm/io.h>
+#include <linux/errno.h>
+
+/* Define default oob placement schemes for large and small page devices */
+#ifndef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT
+static struct nand_ecclayout nand_oob_8 = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ {.offset = 3,
+ .length = 2},
+ {.offset = 6,
+ .length = 2} }
+};
+
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 6, 7},
+ .oobfree = {
+ {.offset = 8,
+ . length = 8} }
+};
+
+static struct nand_ecclayout nand_oob_64 = {
+ .eccbytes = 24,
+ .eccpos = {
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ {.offset = 2,
+ .length = 38} }
+};
+
+static struct nand_ecclayout nand_oob_128 = {
+ .eccbytes = 48,
+ .eccpos = {
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ .oobfree = {
+ {.offset = 2,
+ .length = 78} }
+};
+#endif
+
+static int nand_get_device(struct mtd_info *mtd, int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+
+/*
+ * For devices which display every fart in the system on a separate LED. Is
+ * compiled away when LED support is disabled.
+ */
+DEFINE_LED_TRIGGER(nand_led_trigger);
+
+static int check_offs_len(struct mtd_info *mtd,
+ loff_t ofs, uint64_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
+
+ /* Start address must align on block boundary */
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ ret = -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+ pr_debug("%s: length not block aligned\n", __func__);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Release chip lock and wake up anyone waiting on the device.
+ */
+static void nand_release_device(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /* De-select the NAND device */
+ chip->select_chip(mtd, -1);
+}
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 8bit buswidth
+ */
+uint8_t nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ return readb(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
+}
+
+/**
+ * nand_read_word - [DEFAULT] read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth without endianness conversion.
+ */
+static u16 nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ return readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ switch (chipnr) {
+ case -1:
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_onfi_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
+static void iowrite8_rep(void *addr, const uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], addr);
+}
+static void ioread8_rep(void *addr, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(addr);
+}
+
+static void ioread16_rep(void *addr, void *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(addr);
+}
+
+static void iowrite16_rep(void *addr, void *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], addr);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ iowrite8_rep(chip->IO_ADDR_W, buf, len);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ ioread8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u16 *p = (u16 *) buf;
+
+ iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u16 *p = (u16 *) buf;
+
+ ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+ int page, res = 0, i = 0;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u16 bad;
+
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+ do {
+ if (chip->options & NAND_BUSWIDTH_16) {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB,
+ chip->badblockpos & 0xFE, page);
+ bad = cpu_to_le16(chip->read_word(mtd));
+ if (chip->badblockpos & 0x1)
+ bad >>= 8;
+ else
+ bad &= 0xFF;
+ } else {
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+ page);
+ bad = chip->read_byte(mtd);
+ }
+
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ ofs += mtd->writesize;
+ page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ i++;
+ } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+
+ return res;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_oob_ops ops;
+ uint8_t buf[2] = { 0, 0 };
+ int ret = 0, res, i = 0;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.oobbuf = buf;
+ ops.ooboffs = chip->badblockpos;
+ if (chip->options & NAND_BUSWIDTH_16) {
+ ops.ooboffs &= ~0x01;
+ ops.len = ops.ooblen = 2;
+ } else {
+ ops.len = ops.ooblen = 1;
+ }
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ /* Write to first/last page(s) if necessary */
+ if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+ ofs += mtd->erasesize - mtd->writesize;
+ do {
+ res = nand_do_write_oob(mtd, ofs, &ops);
+ if (!ret)
+ ret = res;
+
+ i++;
+ ofs += mtd->writesize;
+ } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
+ * (1) erase the affected block, to allow OOB marker to be written cleanly
+ * (2) write bad block marker to OOB area of affected block (unless flag
+ * NAND_BBT_NO_OOB_BBM is present)
+ * (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int res, ret = 0;
+
+ if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+ struct erase_info einfo;
+
+ /* Attempt erase before marking OOB */
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = ofs;
+ einfo.len = 1ULL << chip->phys_erase_shift;
+ nand_erase_nand(mtd, &einfo, 0);
+
+ /* Write bad block marker to OOB */
+ nand_get_device(mtd, FL_WRITING);
+ ret = chip->block_markbad(mtd, ofs);
+ nand_release_device(mtd);
+ }
+
+ /* Mark block bad in BBT */
+ if (chip->bbt) {
+ res = nand_markbad_bbt(mtd, ofs);
+ if (!ret)
+ ret = res;
+ }
+
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u8 status;
+ int ret;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
+}
+
+/**
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check if the block is marked as reserved.
+ */
+static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->bbt)
+ return 0;
+ /* Return info from the table */
+ return nand_isreserved_bbt(mtd, ofs);
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!(chip->options & NAND_SKIP_BBTSCAN) &&
+ !(chip->options & NAND_BBT_SCANNED)) {
+ chip->options |= NAND_BBT_SCANNED;
+ chip->scan_bbt(mtd);
+ }
+
+ if (!chip->bbt)
+ return chip->block_bad(mtd, ofs);
+
+ /* Return info from the table */
+ return nand_isbad_bbt(mtd, ofs, allowbbt);
+}
+
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
+void nand_wait_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 timeo = (CONFIG_SYS_HZ * 400) / 1000;
+ u32 time_start;
+
+ time_start = get_timer(0);
+ /* Wait until command is processed or timeout occurs */
+ while (get_timer(time_start) < timeo) {
+ if (chip->dev_ready)
+ if (chip->dev_ready(mtd))
+ break;
+ }
+
+ if (!chip->dev_ready(mtd))
+ pr_warn("timeout while waiting for chip to become ready\n");
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ u32 time_start;
+ int ret;
+
+ timeo = (CONFIG_SYS_HZ * timeo) / 1000;
+ time_start = get_timer(0);
+ while (get_timer(time_start) < timeo) {
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ WATCHDOG_RESET();
+ }
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->cmd_ctrl(mtd, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ chip->cmd_ctrl(mtd, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd,
+ NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ register struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->cmd_ctrl(mtd, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->cmd_ctrl(mtd, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->cmd_ctrl(mtd, page_addr, ctrl);
+ chip->cmd_ctrl(mtd, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->cmd_ctrl(mtd, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_RNDIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->dev_ready)
+ break;
+ udelay(chip->chip_delay);
+ chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ return;
+
+ case NAND_CMD_READ0:
+ chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->dev_ready) {
+ udelay(chip->chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip,
+ struct mtd_info *mtd, int new_state)
+{
+ /* Hardware controller shared among independent devices */
+ chip->controller->active = chip;
+ chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct mtd_info *mtd, int new_state)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ chip->state = new_state;
+ return 0;
+}
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
+ unsigned long timeo)
+{
+ int i;
+ for (i = 0; i < timeo; i++) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ int ret;
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status),
+ true);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ mdelay(1);
+ }
+}
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only.
+ */
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ unsigned long timeo = 400;
+ u8 status;
+ int ret;
+
+ led_trigger_event(nand_led_trigger, LED_FULL);
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ u32 timer = (CONFIG_SYS_HZ * timeo) / 1000;
+ u32 time_start;
+
+ time_start = get_timer(0);
+ while (get_timer(time_start) < timer) {
+ if (chip->dev_ready) {
+ if (chip->dev_ready(mtd))
+ break;
+ } else {
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ }
+ led_trigger_event(nand_led_trigger, LED_OFF);
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true);
+ if (ret)
+ return ret;
+
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status;
+}
+
+/**
+ * nand_reset_data_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_data_interface *conf;
+ int ret;
+
+ if (!chip->setup_data_interface)
+ return 0;
+
+ /*
+ * The ONFI specification says:
+ * "
+ * To transition from NV-DDR or NV-DDR2 to the SDR data
+ * interface, the host shall use the Reset (FFh) command
+ * using SDR timing mode 0. A device in any timing mode is
+ * required to recognize Reset (FFh) command issued in SDR
+ * timing mode 0.
+ * "
+ *
+ * Configure the data interface in SDR mode and set the
+ * timings to timing mode 0.
+ */
+
+ conf = nand_get_default_data_interface();
+ ret = chip->setup_data_interface(mtd, chipnr, conf);
+ if (ret)
+ pr_err("Failed to configure data interface to SDR timing mode 0\n");
+
+ return ret;
+}
+
+/**
+ * nand_setup_data_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Find and configure the best data interface and NAND timings supported by
+ * the chip and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ if (!chip->setup_data_interface || !chip->data_interface)
+ return 0;
+
+ /*
+ * Ensure the timing mode has been changed on the chip side
+ * before changing timings on the controller side.
+ */
+ if (chip->onfi_version) {
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
+ chip->onfi_timing_mode_default,
+ };
+
+ ret = chip->onfi_set_features(mtd, chip,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ if (ret)
+ goto err;
+ }
+
+ ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
+err:
+ return ret;
+}
+
+/**
+ * nand_init_data_interface - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table. After this
+ * function nand_chip->data_interface is initialized with the best timing mode
+ * available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_init_data_interface(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int modes, mode, ret;
+
+ if (!chip->setup_data_interface)
+ return 0;
+
+ /*
+ * First try to identify the best timings from ONFI parameters and
+ * if the NAND does not support ONFI, fallback to the default ONFI
+ * timing mode.
+ */
+ modes = onfi_get_async_timing_mode(chip);
+ if (modes == ONFI_TIMING_MODE_UNKNOWN) {
+ if (!chip->onfi_timing_mode_default)
+ return 0;
+
+ modes = GENMASK(chip->onfi_timing_mode_default, 0);
+ }
+
+ chip->data_interface = kzalloc(sizeof(*chip->data_interface),
+ GFP_KERNEL);
+ if (!chip->data_interface)
+ return -ENOMEM;
+
+ for (mode = fls(modes) - 1; mode >= 0; mode--) {
+ ret = onfi_init_data_interface(chip, chip->data_interface,
+ NAND_SDR_IFACE, mode);
+ if (ret)
+ continue;
+
+ /* Pass -1 to only */
+ ret = chip->setup_data_interface(mtd,
+ NAND_DATA_IFACE_CHECK_ONLY,
+ chip->data_interface);
+ if (!ret) {
+ chip->onfi_timing_mode_default = mode;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void __maybe_unused nand_release_data_interface(struct nand_chip *chip)
+{
+ kfree(chip->data_interface);
+}
+
+/**
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
+
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *p = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
+
+/**
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->read_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
+
+/**
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
+
+/**
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
+ *
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_end_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
+
+/**
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status;
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
+ chip->write_buf(mtd, buf, len);
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
+
+/**
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->write_buf(mtd, buf, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
+
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int i;
+ u8 *id = buf;
+
+ if (len && !buf)
+ return -EINVAL;
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_readid_op);
+
+/**
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
+ *
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_status_op(struct nand_chip *chip, u8 *status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->read_byte(mtd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
+
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
+
+/**
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int status;
+
+ chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+ chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status < 0)
+ return status;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_erase_op);
+
+/**
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->write_byte(mtd, params[i]);
+
+ status = chip->waitfunc(mtd, chip);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 *params = data;
+ int i;
+
+ chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->read_byte(mtd);
+
+ return 0;
+}
+
+/**
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_reset_op(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_reset_op);
+
+/**
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ p[i] = chip->read_byte(mtd);
+ } else {
+ chip->read_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
+
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (!len || !buf)
+ return -EINVAL;
+
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ chip->write_byte(mtd, p[i]);
+ } else {
+ chip->write_buf(mtd, buf, len);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
+
+/**
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Returns 0 for success or negative error code otherwise
+ */
+int nand_reset(struct nand_chip *chip, int chipnr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_reset_data_interface(chip, chipnr);
+ if (ret)
+ return ret;
+
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird ->select_chip() dance.
+ */
+ chip->select_chip(mtd, chipnr);
+ ret = nand_reset_op(chip);
+ chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
+
+ chip->select_chip(mtd, chipnr);
+ ret = nand_setup_data_interface(chip, chipnr);
+ chip->select_chip(mtd, -1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
+
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len >= 4; len -= 4, bitmap += 4) {
+ weight = hweight32(*((u32 *)bitmap));
+ bitflips += 32 - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
+
+ return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
+{
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
+
+ bitflips_threshold -= data_bitflips;
+
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
+
+ bitflips_threshold -= ecc_bitflips;
+
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
+
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
+
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
+
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int ret;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_read_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
+ int page)
+{
+ int start_step, end_step, num_steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
+
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
+
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ if (data_col_addr != 0)
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+ p = bufpoi + data_col_addr;
+ ret = nand_read_data_op(chip, p, datafrag_len, false);
+ if (ret)
+ return ret;
+
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ for (i = 0; i < eccfrag_len - 1; i++) {
+ if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
+ gaps = 1;
+ break;
+ }
+ }
+ if (gaps) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ aligned_pos = eccpos[index] & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (eccpos[index] & (busw - 1))
+ aligned_len++;
+ if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
+ aligned_len++;
+
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < eccfrag_len; i++)
+ chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p,
+ &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ &chip->buffers->ecccode[i],
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i], eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
+ */
+static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ unsigned int max_bitflips = 0;
+ int ret;
+
+ /* Read the OOB area first */
+ ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i], eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int ret, i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ oob - eccpadbytes,
+ eccpadbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
+
+ return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, roffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Read request not from offset 0? */
+ if (unlikely(roffs)) {
+ if (roffs >= free->length) {
+ roffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + roffs;
+ bytes = min_t(size_t, len,
+ (free->length - roffs));
+ roffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(oob, chip->oob_poi + boffs, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret = 0;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
+
+ uint8_t *bufpoi, *oob, *buf;
+ int use_bufpoi;
+ unsigned int max_bitflips = 0;
+ int retry_mode = 0;
+ bool ecc_fail = false;
+
+ chipnr = (int)(from >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ col = (int)(from & (mtd->writesize - 1));
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
+
+ while (1) {
+ unsigned int ecc_failures = mtd->ecc_stats.failed;
+
+ WATCHDOG_RESET();
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+
+ if (!aligned)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bufpoi = 0;
+
+ /* Is the current page in the buffer? */
+ if (realpage != chip->pagebuf || oob) {
+ bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+
+ if (use_bufpoi && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
+
+read_retry:
+ if (nand_standard_page_accessors(&chip->ecc)) {
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ break;
+ }
+
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
+ ret = chip->ecc.read_subpage(mtd, chip,
+ col, bytes, bufpoi,
+ page);
+ else
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ oob_required, page);
+ if (ret < 0) {
+ if (use_bufpoi)
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ break;
+ }
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+ /* Transfer not aligned data */
+ if (use_bufpoi) {
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - ecc_failures) &&
+ (ops->mode != MTD_OPS_RAW)) {
+ chip->pagebuf = realpage;
+ chip->pagebuf_bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+ }
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ }
+
+ if (unlikely(oob)) {
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip,
+ oob, ops, toread);
+ oobreadlen -= toread;
+ }
+ }
+
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ if (mtd->ecc_stats.failed - ecc_failures) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(mtd,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset failures; retry */
+ mtd->ecc_stats.failed = ecc_failures;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
+ } else {
+ memcpy(buf, chip->buffers->databuf + col, bytes);
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagebuf_bitflips);
+ }
+
+ readlen -= bytes;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(mtd, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ chip->select_chip(mtd, -1);
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (ecc_fail)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = chip->oob_poi;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ int ret;
+
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
+ else
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false);
+ if (ret)
+ return ret;
+
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int page, realpage, chipnr;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_ecc_stats stats;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+ int ret = 0;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+
+ len = mtd_oobavail(mtd, ops);
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start read outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+ (from >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(from >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ while (1) {
+ WATCHDOG_RESET();
+
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(mtd, chip, page);
+ else
+ ret = chip->ecc.read_oob(mtd, chip, page);
+
+ if (ret < 0)
+ break;
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ if (chip->options & NAND_NEED_READRDY) {
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->dev_ready)
+ udelay(chip->chip_delay);
+ else
+ nand_wait_ready(mtd);
+ }
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ chip->select_chip(mtd, -1);
+
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (ops->datbuf && (from + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to read beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(mtd, FL_READING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_read_oob(mtd, from, ops);
+ else
+ ret = nand_do_read_ops(mtd, from, ops);
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ int ret;
+
+ ret = nand_write_data_op(chip, buf, mtd->writesize, false);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ const uint8_t *p = buf;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
+{
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, i;
+ int ret;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->buffers->ecccalc;
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* write OOB buffer to NAND device */
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_write_page_syndrome(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ int ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(mtd, p, oob);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_write_page - [REPLACEABLE] write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int raw)
+{
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
+
+ if (nand_standard_page_accessors(&chip->ecc)) {
+ status = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (status)
+ return status;
+ }
+
+ if (unlikely(raw))
+ status = chip->ecc.write_page_raw(mtd, chip, buf,
+ oob_required, page);
+ else if (subpage)
+ status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+ buf, oob_required, page);
+ else
+ status = chip->ecc.write_page(mtd, chip, buf, oob_required,
+ page);
+
+ if (status < 0)
+ return status;
+
+ if (nand_standard_page_accessors(&chip->ecc))
+ return nand_prog_page_end_op(chip);
+
+ return 0;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB: {
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ uint32_t boffs = 0, woffs = ops->ooboffs;
+ size_t bytes = 0;
+
+ for (; free->length && len; free++, len -= bytes) {
+ /* Write request not from offset 0? */
+ if (unlikely(woffs)) {
+ if (woffs >= free->length) {
+ woffs -= free->length;
+ continue;
+ }
+ boffs = free->offset + woffs;
+ bytes = min_t(size_t, len,
+ (free->length - woffs));
+ woffs = 0;
+ } else {
+ bytes = min_t(size_t, len, free->length);
+ boffs = free->offset;
+ }
+ memcpy(chip->oob_poi + boffs, oob, bytes);
+ oob += bytes;
+ }
+ return oob;
+ }
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, realpage, page, column;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret;
+ int oob_required = oob ? 1 : 0;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ /* Reject writes, which are not page aligned */
+ if (NOTALIGNED(to)) {
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
+ ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
+ chip->pagebuf = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ while (1) {
+ int bytes = mtd->writesize;
+ uint8_t *wbuf = buf;
+ int use_bufpoi;
+ int part_pagewr = (column || writelen < mtd->writesize);
+
+ if (part_pagewr)
+ use_bufpoi = 1;
+ else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+ use_bufpoi = !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bufpoi = 0;
+
+ WATCHDOG_RESET();
+ /* Partial page write?, or need to use bounce buffer */
+ if (use_bufpoi) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
+ chip->pagebuf = -1;
+ memset(chip->buffers->databuf, 0xff, mtd->writesize);
+ memcpy(&chip->buffers->databuf[column], buf, bytes);
+ wbuf = chip->buffers->databuf;
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(mtd, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+ oob_required, page,
+ (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+
+err_out:
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const uint8_t *buf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_oob_ops ops;
+ int ret;
+
+ /* Wait for the device to get ready */
+ panic_nand_wait(mtd, chip, 400);
+
+ /* Grab the device */
+ panic_nand_get_device(chip, mtd, FL_WRITING);
+
+ memset(&ops, 0, sizeof(ops));
+ ops.len = len;
+ ops.datbuf = (uint8_t *)buf;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ ret = nand_do_write_ops(mtd, to, &ops);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, status, len;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ len = mtd_oobavail(mtd, ops);
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (unlikely(ops->ooboffs >= len)) {
+ pr_debug("%s: attempt to start write outside oob\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do not allow write past end of device */
+ if (unlikely(to >= mtd->size ||
+ ops->ooboffs + ops->ooblen >
+ ((mtd->size >> chip->page_shift) -
+ (to >> chip->page_shift)) * len)) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ nand_reset(chip, chipnr);
+
+ chip->select_chip(mtd, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ chip->select_chip(mtd, -1);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+
+ chip->select_chip(mtd, -1);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret = -ENOTSUPP;
+
+ ops->retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (ops->datbuf && (to + ops->len) > mtd->size) {
+ pr_debug("%s: attempt to write beyond end of device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ nand_get_device(mtd, FL_WRITING);
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(mtd, to, ops);
+ else
+ ret = nand_do_write_ops(mtd, to, ops);
+
+out:
+ nand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * single_erase - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips. Returns NAND status.
+ */
+static int single_erase(struct mtd_info *mtd, int page)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ unsigned int eraseblock;
+
+ /* Send commands to erase a block */
+ eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
+
+ return nand_erase_op(chip, eraseblock);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd, instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt)
+{
+ int page, status, pages_per_block, ret, chipnr;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ loff_t len;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(mtd, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(mtd, FL_ERASING);
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ chip->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ WATCHDOG_RESET();
+
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (!instr->scrub && nand_block_checkbad(mtd, ((loff_t) page) <<
+ chip->page_shift, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagebuf && chip->pagebuf <
+ (page + pages_per_block))
+ chip->pagebuf = -1;
+
+ status = chip->erase(mtd, page & chip->pagemask);
+
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1ULL << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ chip->select_chip(mtd, -1);
+ chip->select_chip(mtd, chipnr);
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+ /* Deselect and wake up anyone waiting on the device */
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device(mtd, FL_SYNCING);
+ /* Release it and go back */
+ nand_release_device(mtd);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(offs >> chip->chip_shift);
+ int ret;
+
+ /* Select the NAND device */
+ nand_get_device(mtd, FL_READING);
+ chip->select_chip(mtd, chipnr);
+
+ ret = nand_block_checkbad(mtd, offs, 0);
+
+ chip->select_chip(mtd, -1);
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return nand_block_markbad_lowlevel(mtd, ofs);
+}
+
+/**
+ * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -ENOTSUPP;
+#endif
+
+ return nand_set_features_op(chip, addr, subfeature_param);
+}
+
+/**
+ * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+ int addr, uint8_t *subfeature_param)
+{
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+ if (!chip->onfi_version ||
+ !(le16_to_cpu(chip->onfi_params.opt_cmd)
+ & ONFI_OPT_CMD_SET_GET_FEATURES))
+ return -ENOTSUPP;
+#endif
+
+ return nand_get_features_op(chip, addr, subfeature_param);
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip, int busw)
+{
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->chip_delay)
+ chip->chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (chip->cmdfunc == NULL)
+ chip->cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->waitfunc == NULL)
+ chip->waitfunc = nand_wait;
+
+ if (!chip->select_chip)
+ chip->select_chip = nand_select_chip;
+
+ /* set for ONFI nand */
+ if (!chip->onfi_set_features)
+ chip->onfi_set_features = nand_onfi_set_features;
+ if (!chip->onfi_get_features)
+ chip->onfi_get_features = nand_onfi_get_features;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->read_byte || chip->read_byte == nand_read_byte)
+ chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->read_word)
+ chip->read_word = nand_read_word;
+ if (!chip->block_bad)
+ chip->block_bad = nand_block_bad;
+ if (!chip->block_markbad)
+ chip->block_markbad = nand_default_block_markbad;
+ if (!chip->write_buf || chip->write_buf == nand_write_buf)
+ chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->write_byte || chip->write_byte == nand_write_byte)
+ chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->read_buf || chip->read_buf == nand_read_buf)
+ chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+ if (!chip->scan_bbt)
+ chip->scan_bbt = nand_default_bbt;
+
+ if (!chip->controller) {
+ chip->controller = &chip->hwcontrol;
+ spin_lock_init(&chip->controller->lock);
+ init_waitqueue_head(&chip->controller->wq);
+ }
+
+ if (!chip->buf_align)
+ chip->buf_align = 1;
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+static void sanitize_string(char *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+ struct nand_chip *chip, struct nand_onfi_params *p)
+{
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /* Send our own NAND_CMD_PARAM. */
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ goto ext_out;
+
+ /* Use the Change Read Column command to skip the ONFI param pages. */
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
+
+ ret = -EINVAL;
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp((char *)ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+ feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+ if (le16_to_cpu(p->vendor_revision) < 1)
+ return;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_onfi_params *p = &chip->onfi_params;
+ char id[4];
+ int i, ret, val;
+
+ /* Try ONFI for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0, NULL, 0);
+ if (ret)
+ return 0;
+
+ for (i = 0; i < 3; i++) {
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+ le16_to_cpu(p->crc)) {
+ break;
+ }
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid ONFI parameter page; aborting\n");
+ return 0;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 5))
+ chip->onfi_version = 23;
+ else if (val & (1 << 4))
+ chip->onfi_version = 22;
+ else if (val & (1 << 3))
+ chip->onfi_version = 21;
+ else if (val & (1 << 2))
+ chip->onfi_version = 20;
+ else if (val & (1 << 1))
+ chip->onfi_version = 10;
+
+ if (!chip->onfi_version) {
+ pr_info("unsupported ONFI version: %d\n", val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* See erasesize comment */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ if (p->ecc_bits != 0xff) {
+ chip->ecc_strength_ds = p->ecc_bits;
+ chip->ecc_step_ds = 512;
+ } else if (chip->onfi_version >= 21 &&
+ (onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->cmdfunc. So try to update the chip->cmdfunc
+ * now. We do not replace user supplied command function.
+ */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(mtd, chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ if (p->jedec_id == NAND_MFR_MICRON)
+ nand_onfi_detect_micron(chip, p);
+
+ return 1;
+}
+#else
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
+ int *busw)
+{
+ struct nand_jedec_params *p = &chip->jedec_params;
+ struct jedec_ecc_info *ecc;
+ char id[5];
+ int i, val, ret;
+
+ /* Try JEDEC for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
+ if (ret)
+ return 0;
+
+ for (i = 0; i < 3; i++) {
+ ret = nand_read_data_op(chip, p, sizeof(*p), true);
+ if (ret)
+ return 0;
+
+ if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+ le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == 3) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ return 0;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ chip->jedec_version = 10;
+ else if (val & (1 << 1))
+ chip->jedec_version = 1; /* vendor specific version */
+
+ if (!chip->jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ return 0;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ if (!mtd->name)
+ mtd->name = p->model;
+
+ mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize *= mtd->writesize;
+
+ mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+ chip->bits_per_cell = p->bits_per_cell;
+
+ if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
+ *busw = NAND_BUSWIDTH_16;
+ else
+ *busw = 0;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ chip->ecc_strength_ds = ecc->ecc_bits;
+ chip->ecc_step_ds = 1 << ecc->codeword_size;
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ return 1;
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 id_data[8], int *busw)
+{
+ int extid, id_len;
+ /* The 3rd id byte holds MLC / multichip data */
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ id_len = nand_id_len(id_data, 8);
+
+ /*
+ * Field definitions are in the following datasheets:
+ * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+ * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
+ * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
+ *
+ * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+ * ID to decide what to do.
+ */
+ if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+ !nand_is_slc(chip) && id_data[5] != 0x00) {
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 1:
+ mtd->oobsize = 128;
+ break;
+ case 2:
+ mtd->oobsize = 218;
+ break;
+ case 3:
+ mtd->oobsize = 400;
+ break;
+ case 4:
+ mtd->oobsize = 436;
+ break;
+ case 5:
+ mtd->oobsize = 512;
+ break;
+ case 6:
+ mtd->oobsize = 640;
+ break;
+ case 7:
+ default: /* Other cases are "reserved" (unknown) */
+ mtd->oobsize = 1024;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+ *busw = 0;
+ } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
+ !nand_is_slc(chip)) {
+ unsigned int tmp;
+
+ /* Calc pagesize */
+ mtd->writesize = 2048 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+ case 0:
+ mtd->oobsize = 128;
+ break;
+ case 1:
+ mtd->oobsize = 224;
+ break;
+ case 2:
+ mtd->oobsize = 448;
+ break;
+ case 3:
+ mtd->oobsize = 64;
+ break;
+ case 4:
+ mtd->oobsize = 32;
+ break;
+ case 5:
+ mtd->oobsize = 16;
+ break;
+ default:
+ mtd->oobsize = 640;
+ break;
+ }
+ extid >>= 2;
+ /* Calc blocksize */
+ tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
+ if (tmp < 0x03)
+ mtd->erasesize = (128 * 1024) << tmp;
+ else if (tmp == 0x03)
+ mtd->erasesize = 768 * 1024;
+ else
+ mtd->erasesize = (64 * 1024) << tmp;
+ *busw = 0;
+ } else {
+ /* Calc pagesize */
+ mtd->writesize = 1024 << (extid & 0x03);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) *
+ (mtd->writesize >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+ nand_is_slc(chip) &&
+ (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(id_data[4] & 0x80) /* !BENAND */) {
+ mtd->oobsize = 32 * mtd->writesize >> 9;
+ }
+
+ }
+}
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 id_data[8],
+ int *busw)
+{
+ int maf_id = id_data[0];
+
+ mtd->erasesize = type->erasesize;
+ mtd->writesize = type->pagesize;
+ mtd->oobsize = mtd->writesize / 32;
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ /* All legacy ID NAND are small-page, SLC */
+ chip->bits_per_cell = 1;
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
+ && id_data[6] == 0x00 && id_data[7] == 0x00
+ && mtd->writesize == 512) {
+ mtd->erasesize = 128 * 1024;
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 id_data[8])
+{
+ int maf_id = id_data[0];
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+ else
+ chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+
+ /*
+ * Bad block marker is stored in the last page of each block on Samsung
+ * and Hynix MLC devices; stored in first two pages of each block on
+ * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
+ * AMD/Spansion, and Macronix. All others scan only the first page.
+ */
+ if (!nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX))
+ chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+ else if ((nand_is_slc(chip) &&
+ (maf_id == NAND_MFR_SAMSUNG ||
+ maf_id == NAND_MFR_HYNIX ||
+ maf_id == NAND_MFR_TOSHIBA ||
+ maf_id == NAND_MFR_AMD ||
+ maf_id == NAND_MFR_MACRONIX)) ||
+ (mtd->writesize == 2048 &&
+ maf_id == NAND_MFR_MICRON))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+ struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+ if (!strncmp((char *)type->id, (char *)id_data, type->id_len)) {
+ mtd->writesize = type->pagesize;
+ mtd->erasesize = type->erasesize;
+ mtd->oobsize = type->oobsize;
+
+ chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+ chip->options |= type->options;
+ chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+ chip->ecc_step_ds = NAND_ECC_STEP(type);
+ chip->onfi_timing_mode_default =
+ type->onfi_timing_mode_default;
+
+ *busw = type->options & NAND_BUSWIDTH_16;
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
+ */
+struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int *maf_id, int *dev_id,
+ struct nand_flash_dev *type)
+{
+ int busw, ret;
+ int maf_idx;
+ u8 id_data[8];
+
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up.
+ */
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Select the device */
+ chip->select_chip(mtd, 0);
+
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Read manufacturer and device IDs */
+ *maf_id = id_data[0];
+ *dev_id = id_data[1];
+
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
+
+ /* Read entire ID string */
+ ret = nand_readid_op(chip, 0, id_data, 8);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+ *maf_id, *dev_id, id_data[0], id_data[1]);
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!type)
+ type = nand_flash_ids;
+
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+ goto ident_done;
+ } else if (*dev_id == type->dev_id) {
+ break;
+ }
+ }
+
+ chip->onfi_version = 0;
+ if (!type->name || !type->pagesize) {
+ /* Check if the chip is ONFI compliant */
+ if (nand_flash_detect_onfi(mtd, chip, &busw))
+ goto ident_done;
+
+ /* Check if the chip is JEDEC compliant */
+ if (nand_flash_detect_jedec(mtd, chip, &busw))
+ goto ident_done;
+ }
+
+ if (!type->name)
+ return ERR_PTR(-ENODEV);
+
+ if (!mtd->name)
+ mtd->name = type->name;
+
+ chip->chipsize = (uint64_t)type->chipsize << 20;
+
+ if (!type->pagesize) {
+ /* Decode parameters from extended ID */
+ nand_decode_ext_id(mtd, chip, id_data, &busw);
+ } else {
+ nand_decode_id(mtd, chip, type, id_data, &busw);
+ }
+ /* Get chip options */
+ chip->options |= type->options;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+ * options for chips which do not have an extended id.
+ */
+ if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
+ chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ident_done:
+
+ /* Try to identify manufacturer */
+ for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+ if (nand_manuf_ids[maf_idx].id == *maf_id)
+ break;
+ }
+
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(chip->options & NAND_BUSWIDTH_16);
+ chip->options |= busw;
+ nand_set_defaults(chip, busw);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+ pr_warn("bus width %d instead %d bit\n",
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nand_decode_bbm_options(mtd, chip, id_data);
+
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (chip->chipsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+ chip->chip_shift += 32 - 1;
+ }
+
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
+
+ chip->badblockbits = 8;
+ chip->erase = single_erase;
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+ chip->cmdfunc = nand_command_lp;
+
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ *maf_id, *dev_id);
+
+#ifdef CONFIG_SYS_NAND_ONFI_DETECTION
+ if (chip->onfi_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->onfi_params.model);
+ else if (chip->jedec_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->jedec_params.model);
+ else
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ type->name);
+#else
+ if (chip->jedec_version)
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ chip->jedec_params.model);
+ else
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ type->name);
+
+ pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+ type->name);
+#endif
+
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
+ (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
+ return type;
+}
+EXPORT_SYMBOL(nand_get_flash_type);
+
+#if CONFIG_IS_ENABLED(OF_CONTROL)
+#include <asm/global_data.h>
+DECLARE_GLOBAL_DATA_PTR;
+
+static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip, int node)
+{
+ int ret, ecc_mode = -1, ecc_strength, ecc_step;
+ const void *blob = gd->fdt_blob;
+ const char *str;
+
+ ret = fdtdec_get_int(blob, node, "nand-bus-width", -1);
+ if (ret == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (fdtdec_get_bool(blob, node, "nand-on-flash-bbt"))
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ str = fdt_getprop(blob, node, "nand-ecc-mode", NULL);
+ if (str) {
+ if (!strcmp(str, "none"))
+ ecc_mode = NAND_ECC_NONE;
+ else if (!strcmp(str, "soft"))
+ ecc_mode = NAND_ECC_SOFT;
+ else if (!strcmp(str, "hw"))
+ ecc_mode = NAND_ECC_HW;
+ else if (!strcmp(str, "hw_syndrome"))
+ ecc_mode = NAND_ECC_HW_SYNDROME;
+ else if (!strcmp(str, "hw_oob_first"))
+ ecc_mode = NAND_ECC_HW_OOB_FIRST;
+ else if (!strcmp(str, "soft_bch"))
+ ecc_mode = NAND_ECC_SOFT_BCH;
+ }
+
+
+ ecc_strength = fdtdec_get_int(blob, node, "nand-ecc-strength", -1);
+ ecc_step = fdtdec_get_int(blob, node, "nand-ecc-step-size", -1);
+
+ if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
+ (!(ecc_step >= 0) && ecc_strength >= 0)) {
+ pr_err("must set both strength and step size in DT\n");
+ return -EINVAL;
+ }
+
+ if (ecc_mode >= 0)
+ chip->ecc.mode = ecc_mode;
+
+ if (ecc_strength >= 0)
+ chip->ecc.strength = ecc_strength;
+
+ if (ecc_step > 0)
+ chip->ecc.size = ecc_step;
+
+ if (fdt_getprop(blob, node, "nand-ecc-maximize", NULL))
+ chip->ecc.options |= NAND_ECC_MAXIMIZE;
+
+ return 0;
+}
+#else
+static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip, int node)
+{
+ return 0;
+}
+#endif /* CONFIG_IS_ENABLED(OF_CONTROL) */
+
+/**
+ * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ */
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+ struct nand_flash_dev *table)
+{
+ int i, nand_maf_id, nand_dev_id;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_flash_dev *type;
+ int ret;
+
+ if (chip->flash_node) {
+ ret = nand_dt_init(mtd, chip, chip->flash_node);
+ if (ret)
+ return ret;
+ }
+
+ /* Set the default functions */
+ nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
+
+ /* Read the flash type */
+ type = nand_get_flash_type(mtd, chip, &nand_maf_id,
+ &nand_dev_id, table);
+
+ if (IS_ERR(type)) {
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
+ chip->select_chip(mtd, -1);
+ return PTR_ERR(type);
+ }
+
+ /* Initialize the ->data_interface field. */
+ ret = nand_init_data_interface(chip);
+ if (ret)
+ return ret;
+
+ /*
+ * Setup the data interface correctly on the chip and controller side.
+ * This explicit call to nand_setup_data_interface() is only required
+ * for the first die, because nand_reset() has been called before
+ * ->data_interface and ->default_onfi_timing_mode were set.
+ * For the other dies, nand_reset() will automatically switch to the
+ * best mode for us.
+ */
+ ret = nand_setup_data_interface(chip, 0);
+ if (ret)
+ return ret;
+
+ chip->select_chip(mtd, -1);
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
+ /* See comment in nand_get_flash_type for reset */
+ nand_reset(chip, i);
+
+ chip->select_chip(mtd, i);
+ /* Send the command for reading device ID */
+ nand_readid_op(chip, 0, id, sizeof(id));
+
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
+ chip->select_chip(mtd, -1);
+ break;
+ }
+ chip->select_chip(mtd, -1);
+ }
+
+#ifdef DEBUG
+ if (i > 1)
+ pr_info("%d chips detected\n", i);
+#endif
+
+ /* Store the number of chips and calc total size for mtd */
+ chip->numchips = i;
+ mtd->size = i * chip->chipsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_scan_ident);
+
+/**
+ * nand_check_ecc_caps - check the sanity of preset ECC settings
+ * @chip: nand chip info structure
+ * @caps: ECC caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * When ECC step size and strength are already set, check if they are supported
+ * by the controller and the calculated ECC bytes fit within the chip's OOB.
+ * On success, the calculated ECC bytes is set.
+ */
+int nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int preset_step = chip->ecc.size;
+ int preset_strength = chip->ecc.strength;
+ int nsteps, ecc_bytes;
+ int i, j;
+
+ if (WARN_ON(oobavail < 0))
+ return -EINVAL;
+
+ if (!preset_step || !preset_strength)
+ return -ENODATA;
+
+ nsteps = mtd->writesize / preset_step;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+
+ if (stepinfo->stepsize != preset_step)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ if (stepinfo->strengths[j] != preset_strength)
+ continue;
+
+ ecc_bytes = caps->calc_ecc_bytes(preset_step,
+ preset_strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ return ecc_bytes;
+
+ if (ecc_bytes * nsteps > oobavail) {
+ pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
+ preset_step, preset_strength);
+ return -ENOSPC;
+ }
+
+ chip->ecc.bytes = ecc_bytes;
+
+ return 0;
+ }
+ }
+
+ pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
+ preset_step, preset_strength);
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
+
+/**
+ * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * If a chip's ECC requirement is provided, try to meet it with the least
+ * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
+ * On success, the chosen ECC settings are set.
+ */
+int nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int req_step = chip->ecc_step_ds;
+ int req_strength = chip->ecc_strength_ds;
+ int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
+ int best_step, best_strength, best_ecc_bytes;
+ int best_ecc_bytes_total = INT_MAX;
+ int i, j;
+
+ if (WARN_ON(oobavail < 0))
+ return -EINVAL;
+
+ /* No information provided by the NAND chip */
+ if (!req_step || !req_strength)
+ return -ENOTSUPP;
+
+ /* number of correctable bits the chip requires in a page */
+ req_corr = mtd->writesize / req_step * req_strength;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ /*
+ * If both step size and strength are smaller than the
+ * chip's requirement, it is not easy to compare the
+ * resulted reliability.
+ */
+ if (step_size < req_step && strength < req_strength)
+ continue;
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+ ecc_bytes_total = ecc_bytes * nsteps;
+
+ if (ecc_bytes_total > oobavail ||
+ strength * nsteps < req_corr)
+ continue;
+
+ /*
+ * We assume the best is to meet the chip's requrement
+ * with the least number of ECC bytes.
+ */
+ if (ecc_bytes_total < best_ecc_bytes_total) {
+ best_ecc_bytes_total = ecc_bytes_total;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (best_ecc_bytes_total == INT_MAX)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_match_ecc_req);
+
+/**
+ * nand_maximize_ecc - choose the max ECC strength available
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the max ECC strength that is supported on the controller, and can fit
+ * within the chip's OOB. On success, the chosen ECC settings are set.
+ */
+int nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int step_size, strength, nsteps, ecc_bytes, corr;
+ int best_corr = 0;
+ int best_step = 0;
+ int best_strength, best_ecc_bytes;
+ int i, j;
+
+ if (WARN_ON(oobavail < 0))
+ return -EINVAL;
+
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
+
+ /* If chip->ecc.size is already set, respect it */
+ if (chip->ecc.size && step_size != chip->ecc.size)
+ continue;
+
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
+
+ if (mtd->writesize % step_size)
+ continue;
+
+ nsteps = mtd->writesize / step_size;
+
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ON_ONCE(ecc_bytes < 0))
+ continue;
+
+ if (ecc_bytes * nsteps > oobavail)
+ continue;
+
+ corr = strength * nsteps;
+
+ /*
+ * If the number of correctable bits is the same,
+ * bigger step_size has more reliability.
+ */
+ if (corr > best_corr ||
+ (corr == best_corr && step_size > best_step)) {
+ best_corr = corr;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
+ }
+
+ if (!best_corr)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_maximize_ecc);
+
+/*
+ * Check if the chip configuration meet the datasheet requirements.
+
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+static bool nand_ecc_strength_good(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int corr, ds_corr;
+
+ if (ecc->size == 0 || chip->ecc_step_ds == 0)
+ /* Not enough information */
+ return true;
+
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * ecc->strength) / ecc->size;
+ ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+
+ return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+}
+
+static bool invalid_ecc_page_accessors(struct nand_chip *chip)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (nand_standard_page_accessors(ecc))
+ return false;
+
+ /*
+ * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
+ * controller driver implements all the page accessors because
+ * default helpers are not suitable when the core does not
+ * send the READ0/PAGEPROG commands.
+ */
+ return (!ecc->read_page || !ecc->write_page ||
+ !ecc->read_page_raw || !ecc->write_page_raw ||
+ (NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
+ (NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
+ ecc->hwctl && ecc->calculate));
+}
+
+/**
+ * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
+ */
+int nand_scan_tail(struct mtd_info *mtd)
+{
+ int i;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct nand_buffers *nbuf;
+
+ /* New bad blocks should be marked in OOB, flash-based BBT, or both */
+ BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH));
+
+ if (invalid_ecc_page_accessors(chip)) {
+ pr_err("Invalid ECC page accessors setup\n");
+ return -EINVAL;
+ }
+
+ if (!(chip->options & NAND_OWN_BUFFERS)) {
+ nbuf = kzalloc(sizeof(struct nand_buffers), GFP_KERNEL);
+ chip->buffers = nbuf;
+ } else {
+ if (!chip->buffers)
+ return -ENOMEM;
+ }
+
+ /* Set the internal oob buffer location, just after the page data */
+ chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+
+ /*
+ * If no default placement scheme is given, select an appropriate one.
+ */
+ if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
+ switch (mtd->oobsize) {
+#ifndef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT
+ case 8:
+ ecc->layout = &nand_oob_8;
+ break;
+ case 16:
+ ecc->layout = &nand_oob_16;
+ break;
+ case 64:
+ ecc->layout = &nand_oob_64;
+ break;
+ case 128:
+ ecc->layout = &nand_oob_128;
+ break;
+#endif
+ default:
+ pr_warn("No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
+ }
+
+ if (!chip->write_page)
+ chip->write_page = nand_write_page;
+
+ /*
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
+ */
+
+ switch (ecc->mode) {
+ case NAND_ECC_HW_OOB_FIRST:
+ /* Similar to NAND_ECC_HW, but a separate read_page handle */
+ if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
+ pr_warn("No ECC functions supplied; hardware ECC not possible\n");
+ BUG();
+ }
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc_oob_first;
+
+ case NAND_ECC_HW:
+ /* Use standard hwecc read page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+ ecc->write_subpage = nand_write_subpage_hwecc;
+
+ case NAND_ECC_HW_SYNDROME:
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ pr_warn("No ECC functions supplied; hardware ECC not possible\n");
+ BUG();
+ }
+ /* Use standard syndrome read/write page function? */
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
+ pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+ BUG();
+ }
+ break;
+ }
+ pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+ ecc->size, mtd->writesize);
+ ecc->mode = NAND_ECC_SOFT;
+
+ case NAND_ECC_SOFT:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ break;
+
+ case NAND_ECC_SOFT_BCH:
+ if (!mtd_nand_has_bch()) {
+ pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ BUG();
+ }
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ /*
+ * Board driver should supply ecc.size and ecc.strength values
+ * to select how many bits are correctable. Otherwise, default
+ * to 4 bits for large page devices.
+ */
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->strength = 4;
+ }
+
+ /* See nand_bch_init() for details. */
+ ecc->bytes = 0;
+ ecc->priv = nand_bch_init(mtd);
+ if (!ecc->priv) {
+ pr_warn("BCH ECC initialization failed!\n");
+ BUG();
+ }
+ break;
+
+ case NAND_ECC_NONE:
+ pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
+ break;
+
+ default:
+ pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
+ BUG();
+ }
+
+ /* For many systems, the standard OOB write also works for raw */
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ mtd->oobavail = 0;
+ if (ecc->layout) {
+ for (i = 0; ecc->layout->oobfree[i].length; i++)
+ mtd->oobavail += ecc->layout->oobfree[i].length;
+ }
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(mtd))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ mtd->name);
+
+ /*
+ * Set the number of read / write steps for one page depending on ECC
+ * mode.
+ */
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ pr_warn("Invalid ECC parameters\n");
+ BUG();
+ }
+ ecc->total = ecc->steps * ecc->bytes;
+
+ /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+ switch (ecc->steps) {
+ case 2:
+ mtd->subpage_sft = 1;
+ break;
+ case 4:
+ case 8:
+ case 16:
+ mtd->subpage_sft = 2;
+ break;
+ }
+ }
+ chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /* Initialize state */
+ chip->state = FL_READY;
+
+ /* Invalidate the pagebuffer reference */
+ chip->pagebuf = -1;
+
+ /* Large page NAND with SOFT_ECC should support subpage reads */
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT:
+ case NAND_ECC_SOFT_BCH:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
+ mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+ MTD_CAP_NANDFLASH;
+ mtd->_erase = nand_erase;
+ mtd->_panic_write = panic_nand_write;
+ mtd->_read_oob = nand_read_oob;
+ mtd->_write_oob = nand_write_oob;
+ mtd->_sync = nand_sync;
+ mtd->_lock = NULL;
+ mtd->_unlock = NULL;
+ mtd->_block_isreserved = nand_block_isreserved;
+ mtd->_block_isbad = nand_block_isbad;
+ mtd->_block_markbad = nand_block_markbad;
+ mtd->writebufsize = mtd->writesize;
+
+ /* propagate ecc info to mtd_info */
+ mtd->ecclayout = ecc->layout;
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+ return 0;
+}
+EXPORT_SYMBOL(nand_scan_tail);
+
+/**
+ * nand_scan - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values.
+ */
+int nand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int ret;
+
+ ret = nand_scan_ident(mtd, maxchips, NULL);
+ if (!ret)
+ ret = nand_scan_tail(mtd);
+ return ret;
+}
+EXPORT_SYMBOL(nand_scan);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_bbt.c b/roms/u-boot/drivers/mtd/nand/raw/nand_bbt.c
new file mode 100644
index 000000000..911472e91
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_bbt.c
@@ -0,0 +1,1376 @@
+/*
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+
+#include <common.h>
+#include <log.h>
+#include <malloc.h>
+#include <dm/devres.h>
+#include <linux/bug.h>
+#include <linux/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+
+#define BBT_BLOCK_GOOD 0x00
+#define BBT_BLOCK_WORN 0x01
+#define BBT_BLOCK_RESERVED 0x02
+#define BBT_BLOCK_FACTORY_BAD 0x03
+
+#define BBT_ENTRY_MASK 0x03
+#define BBT_ENTRY_SHIFT 2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+ uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+ entry >>= (block & BBT_ENTRY_MASK) * 2;
+ return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+ uint8_t mark)
+{
+ uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+ chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ if (memcmp(buf, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return check_pattern_no_oob(buf, td);
+
+ /* Compare the pattern */
+ if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+ /* Compare the pattern */
+ if (memcmp(buf + td->offs, td->pattern, td->len))
+ return -1;
+ return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+ u32 len;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ return 0;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+ return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
+{
+ int res, ret = 0, i, j, act = 0;
+ struct nand_chip *this = mtd_to_nand(mtd);
+ size_t retlen, len, totlen;
+ loff_t from;
+ int bits = td->options & NAND_BBT_NRBITS_MSK;
+ uint8_t msk = (uint8_t)((1 << bits) - 1);
+ u32 marker_len;
+ int reserved_block_code = td->reserved_block_code;
+
+ totlen = (num * bits) >> 3;
+ marker_len = add_marker_len(td);
+ from = ((loff_t)page) << this->page_shift;
+
+ while (totlen) {
+ len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+ if (marker_len) {
+ /*
+ * In case the BBT marker is not in the OOB area it
+ * will be just in the first page.
+ */
+ len -= marker_len;
+ from += marker_len;
+ marker_len = 0;
+ }
+ res = mtd_read(mtd, from, len, &retlen, buf);
+ if (res < 0) {
+ if (mtd_is_eccerr(res)) {
+ pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ return res;
+ } else if (mtd_is_bitflip(res)) {
+ pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
+ from & ~mtd->writesize);
+ ret = res;
+ } else {
+ pr_info("nand_bbt: error reading BBT\n");
+ return res;
+ }
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act++) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code && (tmp == reserved_block_code)) {
+ pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_RESERVED);
+ mtd->ecc_stats.bbtblocks++;
+ continue;
+ }
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+ (loff_t)(offs + act) <<
+ this->bbt_erase_shift);
+ /* Factory marked bad or worn out? */
+ if (tmp == 0)
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_FACTORY_BAD);
+ else
+ bbt_mark_entry(this, offs + act,
+ BBT_BLOCK_WORN);
+ mtd->ecc_stats.badblocks++;
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ * NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int res = 0, i;
+
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < this->numchips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt(mtd, buf, td->pages[i],
+ this->chipsize >> this->bbt_erase_shift,
+ td, offs);
+ if (res)
+ return res;
+ offs += this->chipsize >> this->bbt_erase_shift;
+ }
+ } else {
+ res = read_bbt(mtd, buf, td->pages[0],
+ mtd->size >> this->bbt_erase_shift, td, 0);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
+{
+ size_t retlen;
+ size_t len;
+
+ len = td->len;
+ if (td->options & NAND_BBT_VERSION)
+ len++;
+
+ return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len)
+{
+ struct mtd_oob_ops ops;
+ int res, ret = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+
+ while (len > 0) {
+ ops.datbuf = buf;
+ ops.len = min(len, (size_t)mtd->writesize);
+ ops.oobbuf = buf + ops.len;
+
+ res = mtd_read_oob(mtd, offs, &ops);
+ if (res) {
+ if (!mtd_is_bitflip_or_eccerr(res))
+ return res;
+ else if (mtd_is_eccerr(res) || !ret)
+ ret = res;
+ }
+
+ buf += mtd->oobsize + mtd->writesize;
+ len -= mtd->writesize;
+ offs += mtd->writesize;
+ }
+ return ret;
+}
+
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
+{
+ if (td->options & NAND_BBT_NO_OOB)
+ return scan_read_data(mtd, buf, offs, td);
+ else
+ return scan_read_oob(mtd, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+ uint8_t *buf, uint8_t *oob)
+{
+ struct mtd_oob_ops ops;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooboffs = 0;
+ ops.ooblen = mtd->oobsize;
+ ops.datbuf = buf;
+ ops.oobbuf = oob;
+ ops.len = len;
+
+ return mtd_write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ u32 ver_offs = td->veroffs;
+
+ if (!(td->options & NAND_BBT_NO_OOB))
+ ver_offs += mtd->writesize;
+ return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ pr_info("Bad block table at page %d, version 0x%02X\n",
+ md->pages[0], md->version[0]);
+ }
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+ loff_t offs, uint8_t *buf, int numpages)
+{
+ struct mtd_oob_ops ops;
+ int j, ret;
+
+ ops.ooblen = mtd->oobsize;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ for (j = 0; j < numpages; j++) {
+ /*
+ * Read the full oob until read_oob is fixed to handle single
+ * byte reads for 16 bit buswidth.
+ */
+ ret = mtd_read_oob(mtd, offs, &ops);
+ /* Ignore ECC errors when checking for BBM */
+ if (ret && !mtd_is_bitflip_or_eccerr(ret))
+ return ret;
+
+ if (check_short_pattern(buf, bd))
+ return 1;
+
+ offs += mtd->writesize;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ * if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int i, numblocks, numpages;
+ int startblock;
+ loff_t from;
+
+ pr_info("Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ numpages = 2;
+ else
+ numpages = 1;
+
+ if (chip == -1) {
+ numblocks = mtd->size >> this->bbt_erase_shift;
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ numblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = (loff_t)startblock << this->bbt_erase_shift;
+ }
+
+ if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+ from += mtd->erasesize - (mtd->writesize * numpages);
+
+ for (i = startblock; i < numblocks; i++) {
+ int ret;
+
+ BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+ ret = scan_block_fast(mtd, bd, from, buf, numpages);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+ pr_warn("Bad eraseblock %d at 0x%012llx\n",
+ i, (unsigned long long)from);
+ mtd->ecc_stats.badblocks++;
+ }
+
+ from += (1 << this->bbt_erase_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int i, chips;
+ int startblock, block, dir;
+ int scanlen = mtd->writesize + mtd->oobsize;
+ int bbtblocks;
+ int blocktopage = this->bbt_erase_shift - this->page_shift;
+
+ /* Search direction top -> down? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = mtd->size >> this->bbt_erase_shift;
+ }
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+
+ int actblock = startblock + dir * block;
+ loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+ /* Read first page */
+ scan_read(mtd, buf, offs, mtd->writesize, td);
+ if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+ td->pages[i] = actblock << blocktopage;
+ if (td->options & NAND_BBT_VERSION) {
+ offs = bbt_get_ver_offs(mtd, td);
+ td->version[i] = buf[offs];
+ }
+ break;
+ }
+ }
+ startblock += this->chipsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ pr_warn("Bad block table not found for chip %d\n", i);
+ else
+ pr_info("Bad block table found at page %d, version 0x%02X\n",
+ td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt(mtd, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt(mtd, buf, md);
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+ int chipsel)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct erase_info einfo;
+ int i, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, pageoffs, ooboffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+ struct mtd_oob_ops ops;
+
+ ops.ooblen = mtd->oobsize;
+ ops.ooboffs = 0;
+ ops.datbuf = NULL;
+ ops.mode = MTD_OPS_PLACE_OOB;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ /* Full device write or specific chip? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+ /*
+ * There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /*
+ * Automatic placement of the bad block table. Search direction
+ * top -> down?
+ */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch (bbt_get_entry(this, block)) {
+ case BBT_BLOCK_WORN:
+ case BBT_BLOCK_FACTORY_BAD:
+ continue;
+ }
+ page = block <<
+ (this->bbt_erase_shift - this->page_shift);
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ pr_err("No space left to write bad block table\n");
+ return -ENOSPC;
+ write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ msk[2] = ~rcode;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x01;
+ break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+ msk[3] = 0x03;
+ break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+ msk[3] = 0x0f;
+ break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+ msk[3] = 0xff;
+ break;
+ default: return -EINVAL;
+ }
+
+ to = ((loff_t)page) << this->page_shift;
+
+ /* Must we save the block contents? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
+ len = 1 << this->bbt_erase_shift;
+ res = mtd_read(mtd, to, len, &retlen, buf);
+ if (res < 0) {
+ if (retlen != len) {
+ pr_info("nand_bbt: error reading block for writing the bad block table\n");
+ return res;
+ }
+ pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
+ }
+ /* Read oob data */
+ ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+ ops.oobbuf = &buf[len];
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+ if (res < 0 || ops.oobretlen != ops.ooblen)
+ goto outerr;
+
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ ooboffs = len + (pageoffs * mtd->oobsize);
+
+ } else if (td->options & NAND_BBT_NO_OOB) {
+ ooboffs = 0;
+ offs = td->len;
+ /* The version byte */
+ if (td->options & NAND_BBT_VERSION)
+ offs++;
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ len += offs;
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len);
+ /* Pattern is located at the begin of first page */
+ memcpy(buf, td->pattern, td->len);
+ } else {
+ /* Calc length */
+ len = (size_t)(numblocks >> sft);
+ /* Make it page aligned! */
+ len = ALIGN(len, mtd->writesize);
+ /* Preset the buffer with 0xff */
+ memset(buf, 0xff, len +
+ (len >> this->page_shift)* mtd->oobsize);
+ offs = 0;
+ ooboffs = len;
+ /* Pattern is located in oob area of first page */
+ memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+ }
+
+ if (td->options & NAND_BBT_VERSION)
+ buf[ooboffs + td->veroffs] = td->version[chip];
+
+ /* Walk through the memory table */
+ for (i = 0; i < numblocks; i++) {
+ uint8_t dat;
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ dat = bbt_get_entry(this, chip * numblocks + i);
+ /* Do not store the reserved bbt blocks! */
+ buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+ }
+
+ memset(&einfo, 0, sizeof(einfo));
+ einfo.mtd = mtd;
+ einfo.addr = to;
+ einfo.len = 1 << this->bbt_erase_shift;
+ res = nand_erase_nand(mtd, &einfo, 1);
+ if (res < 0)
+ goto outerr;
+
+ res = scan_write_bbt(mtd, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ? NULL :
+ &buf[len]);
+ if (res < 0)
+ goto outerr;
+
+ pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+ (unsigned long long)to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+
+ outerr:
+ pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+ return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ return create_bbt(mtd, this->buffers->databuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, create, chipsel, res, res2;
+ struct nand_chip *this = mtd_to_nand(mtd);
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = this->numchips;
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ create = 0;
+ rd = NULL;
+ rd2 = NULL;
+ res = res2 = 0;
+ /* Per chip or per device? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table available? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ create = 1;
+ writeops = 0x03;
+ } else if (td->pages[i] == -1) {
+ rd = md;
+ writeops = 0x01;
+ } else if (md->pages[i] == -1) {
+ rd = td;
+ writeops = 0x02;
+ } else if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ } else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ writeops = 0x02;
+ } else {
+ rd = md;
+ writeops = 0x01;
+ }
+ } else {
+ if (td->pages[i] == -1) {
+ create = 1;
+ writeops = 0x01;
+ } else {
+ rd = td;
+ }
+ }
+
+ if (create) {
+ /* Create the bad block table by scanning the device? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+ create_bbt(mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+ }
+
+ /* Read back first? */
+ if (rd) {
+ res = read_abs_bbt(mtd, buf, rd, chipsel);
+ if (mtd_is_eccerr(res)) {
+ /* Mark table as invalid */
+ rd->pages[i] = -1;
+ rd->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+ /* If they weren't versioned, read both */
+ if (rd2) {
+ res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ if (mtd_is_eccerr(res2)) {
+ /* Mark table as invalid */
+ rd2->pages[i] = -1;
+ rd2->version[i] = 0;
+ i--;
+ continue;
+ }
+ }
+
+ /* Scrub the flash table(s)? */
+ if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+ writeops = 0x03;
+
+ /* Update version numbers before writing */
+ if (md) {
+ td->version[i] = max(td->version[i], md->version[i]);
+ md->version[i] = td->version[i];
+ }
+
+ /* Write the bad block table to the device? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1)
+ continue;
+ block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if ((oldval != BBT_BLOCK_RESERVED) &&
+ td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)block <<
+ this->bbt_erase_shift);
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = bbt_get_entry(this, block);
+ bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+ if (oldval != BBT_BLOCK_RESERVED)
+ update = 1;
+ block++;
+ }
+ /*
+ * If we want reserved blocks to be recorded to flash, and some
+ * new ones have been marked, then we need to update the stored
+ * bbts. This should only happen once.
+ */
+ if (update && td->reserved_block_code)
+ nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ this->bbt_erase_shift);
+ }
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u32 pattern_len;
+ u32 bits;
+ u32 table_size;
+
+ if (!bd)
+ return;
+
+ pattern_len = bd->len;
+ bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+ BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+ !(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!bits);
+
+ if (bd->options & NAND_BBT_VERSION)
+ pattern_len++;
+
+ if (bd->options & NAND_BBT_NO_OOB) {
+ BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+ BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+ BUG_ON(bd->offs);
+ if (bd->options & NAND_BBT_VERSION)
+ BUG_ON(bd->veroffs != bd->len);
+ BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+ }
+
+ if (bd->options & NAND_BBT_PERCHIP)
+ table_size = this->chipsize >> this->bbt_erase_shift;
+ else
+ table_size = mtd->size >> this->bbt_erase_shift;
+ table_size >>= 3;
+ table_size *= bits;
+ if (bd->options & NAND_BBT_NO_OOB)
+ table_size += pattern_len;
+ BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int len, res;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
+ /*
+ * Allocate memory (2bit per block) and clear the memory bad block
+ * table.
+ */
+ this->bbt = kzalloc(len, GFP_KERNEL);
+ if (!this->bbt)
+ return -ENOMEM;
+
+ /*
+ * If no primary table decriptor is given, scan the device to build a
+ * memory based bad block table.
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(mtd, bd))) {
+ pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
+ goto err;
+ }
+ return 0;
+ }
+ verify_bbt_descr(mtd, td);
+ verify_bbt_descr(mtd, md);
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = vmalloc(len);
+ if (!buf) {
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* Is the bbt at a given page? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ read_abs_bbts(mtd, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ search_read_bbts(mtd, buf, td, md);
+ }
+
+ res = check_create(mtd, buf, bd);
+ if (res)
+ goto err;
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region(mtd, td);
+ if (md)
+ mark_bbt_region(mtd, md);
+
+ vfree(buf);
+ return 0;
+
+err:
+ kfree(this->bbt);
+ this->bbt = NULL;
+ return res;
+}
+
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(mtd, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(mtd, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+ | NAND_BBT_NO_OOB,
+ .len = 4,
+ .veroffs = 4,
+ .maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+ .pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+ struct nand_bbt_descr *bd;
+ if (this->badblock_pattern) {
+ pr_warn("Bad block pattern already allocated; not replacing\n");
+ return -EINVAL;
+ }
+ bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+ if (!bd)
+ return -ENOMEM;
+ bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+ bd->offs = this->badblockpos;
+ bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ bd->pattern = scan_ff_pattern;
+ bd->options |= NAND_BBT_DYNAMICSTRUCT;
+ this->badblock_pattern = bd;
+ return 0;
+}
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int nand_default_bbt(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int ret;
+
+ /* Is a flash based bad block table requested? */
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ if (this->bbt_options & NAND_BBT_NO_OOB) {
+ this->bbt_td = &bbt_main_no_oob_descr;
+ this->bbt_md = &bbt_mirror_no_oob_descr;
+ } else {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ }
+
+ if (!this->badblock_pattern) {
+ ret = nand_create_badblock_pattern(this);
+ if (ret)
+ return ret;
+ }
+
+ return nand_scan_bbt(mtd, this->badblock_pattern);
+}
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int block;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int block, res;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ res = bbt_get_entry(this, block);
+
+ pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block, res);
+
+ switch (res) {
+ case BBT_BLOCK_GOOD:
+ return 0;
+ case BBT_BLOCK_WORN:
+ return 1;
+ case BBT_BLOCK_RESERVED:
+ return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int block, ret = 0;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+
+ /* Mark bad block in memory */
+ bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+ /* Update flash-based bad block table */
+ if (this->bbt_options & NAND_BBT_USE_FLASH)
+ ret = nand_update_bbt(mtd, offs);
+
+ return ret;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_bch.c b/roms/u-boot/drivers/mtd/nand/raw/nand_bch.c
new file mode 100644
index 000000000..734d1c620
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_bch.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ */
+
+#include <common.h>
+#include <log.h>
+#include <dm/devres.h>
+/*#include <asm/io.h>*/
+#include <linux/types.h>
+
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+#include <malloc.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch: BCH control structure
+ * @ecclayout: private ecc layout for this BCH configuration
+ * @errloc: error location array
+ * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+ unsigned int *errloc;
+ unsigned char *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @mtd: MTD block structure
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+ unsigned char *code)
+{
+ const struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int i;
+
+ memset(code, 0, chip->ecc.bytes);
+ encode_bch(nbc->bch, buf, chip->ecc.size, code);
+
+ /* apply mask so that an erased page is a valid codeword */
+ for (i = 0; i < chip->ecc.bytes; i++)
+ code[i] ^= nbc->eccmask[i];
+
+ return 0;
+}
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ const struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_bch_control *nbc = chip->ecc.priv;
+ unsigned int *errloc = nbc->errloc;
+ int i, count;
+
+ count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+ NULL, errloc);
+ if (count > 0) {
+ for (i = 0; i < count; i++) {
+ if (errloc[i] < (chip->ecc.size*8))
+ /* error is located in data, correct it */
+ buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+ /* else error in ecc, no action needed */
+
+ pr_debug("%s: corrected bitflip %u\n",
+ __func__, errloc[i]);
+ }
+ } else if (count < 0) {
+ printk(KERN_ERR "ecc unrecoverable error\n");
+ count = -EBADMSG;
+ }
+ return count;
+}
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd: MTD block structure
+ *
+ * Returns:
+ * a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ unsigned int m, t, eccsteps, i;
+ struct nand_ecclayout *layout = nand->ecc.layout;
+ struct nand_bch_control *nbc = NULL;
+ unsigned char *erased_page;
+ unsigned int eccsize = nand->ecc.size;
+ unsigned int eccbytes = nand->ecc.bytes;
+ unsigned int eccstrength = nand->ecc.strength;
+
+ if (!eccbytes && eccstrength) {
+ eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
+ nand->ecc.bytes = eccbytes;
+ }
+
+ if (!eccsize || !eccbytes) {
+ printk(KERN_WARNING "ecc parameters not supplied\n");
+ goto fail;
+ }
+
+ m = fls(1+8*eccsize);
+ t = (eccbytes*8)/m;
+
+ nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+ if (!nbc)
+ goto fail;
+
+ nbc->bch = init_bch(m, t, 0);
+ if (!nbc->bch)
+ goto fail;
+
+ /* verify that eccbytes has the expected value */
+ if (nbc->bch->ecc_bytes != eccbytes) {
+ printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
+ goto fail;
+ }
+
+ eccsteps = mtd->writesize/eccsize;
+
+ /* if no ecc placement scheme was provided, build one */
+ if (!layout) {
+
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ printk(KERN_WARNING "must provide an oob scheme for "
+ "oobsize %d\n", mtd->oobsize);
+ goto fail;
+ }
+
+ layout = &nbc->ecclayout;
+ layout->eccbytes = eccsteps*eccbytes;
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ printk(KERN_WARNING "no suitable oob scheme available "
+ "for oobsize %d eccbytes %u\n", mtd->oobsize,
+ eccbytes);
+ goto fail;
+ }
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+
+ nand->ecc.layout = layout;
+ }
+
+ /* sanity checks */
+ if (8*(eccsize+eccbytes) >= (1 << m)) {
+ printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+ goto fail;
+ }
+ if (layout->eccbytes != (eccsteps*eccbytes)) {
+ printk(KERN_WARNING "invalid ecc layout\n");
+ goto fail;
+ }
+
+ nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ if (!nbc->eccmask || !nbc->errloc)
+ goto fail;
+ /*
+ * compute and store the inverted ecc of an erased ecc block
+ */
+ erased_page = kmalloc(eccsize, GFP_KERNEL);
+ if (!erased_page)
+ goto fail;
+
+ memset(erased_page, 0xff, eccsize);
+ memset(nbc->eccmask, 0, eccbytes);
+ encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+ kfree(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ nbc->eccmask[i] ^= 0xff;
+
+ if (!eccstrength)
+ nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
+
+ return nbc;
+fail:
+ nand_bch_free(nbc);
+ return NULL;
+}
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc: NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+ if (nbc) {
+ free_bch(nbc->bch);
+ kfree(nbc->errloc);
+ kfree(nbc->eccmask);
+ kfree(nbc);
+ }
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_ecc.c b/roms/u-boot/drivers/mtd/nand/raw/nand_ecc.c
new file mode 100644
index 000000000..2bc329be1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_ecc.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This file contains an ECC algorithm from Toshiba that detects and
+ * corrects 1 bit errors in a 256 byte block of data.
+ *
+ * drivers/mtd/nand/raw/nand_ecc.c
+ *
+ * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
+ * Toshiba America Electronics Components, Inc.
+ *
+ * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * As a special exception, if other files instantiate templates or use
+ * macros or inline functions from these files, or you compile these
+ * files and link them with other works to produce a work based on these
+ * files, these files do not by themselves cause the resulting work to be
+ * covered by the GNU General Public License. However the source code for
+ * these files must still be made available in accordance with section (3)
+ * of the GNU General Public License.
+ *
+ * This exception does not invalidate any other reasons why a work based on
+ * this file might be covered by the GNU General Public License.
+ */
+
+#include <common.h>
+
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand_ecc.h>
+
+/*
+ * NAND-SPL has no sofware ECC for now, so don't include nand_calculate_ecc(),
+ * only nand_correct_data() is needed
+ */
+
+#if !defined(CONFIG_NAND_SPL) || defined(CONFIG_SPL_NAND_SOFTECC)
+/*
+ * Pre-calculated 256-way 1 byte column parity
+ */
+static const u_char nand_ecc_precalc_table[] = {
+ 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
+ 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
+ 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
+ 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
+ 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
+ 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
+ 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
+ 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
+ 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
+ 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
+ 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
+ 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
+ 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
+ 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
+ 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
+ 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
+};
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
+ * @mtd: MTD block structure
+ * @dat: raw data
+ * @ecc_code: buffer for ECC
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
+ int i;
+
+ /* Initialize variables */
+ reg1 = reg2 = reg3 = 0;
+
+ /* Build up column parity */
+ for(i = 0; i < 256; i++) {
+ /* Get CP0 - CP5 from table */
+ idx = nand_ecc_precalc_table[*dat++];
+ reg1 ^= (idx & 0x3f);
+
+ /* All bit XOR = 1 ? */
+ if (idx & 0x40) {
+ reg3 ^= (uint8_t) i;
+ reg2 ^= ~((uint8_t) i);
+ }
+ }
+
+ /* Create non-inverted ECC code from line parity */
+ tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */
+ tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */
+ tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */
+ tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */
+ tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */
+ tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */
+ tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */
+ tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */
+
+ tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */
+ tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */
+ tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */
+ tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */
+ tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */
+ tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */
+ tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */
+ tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
+
+ /* Calculate final ECC code */
+ ecc_code[0] = ~tmp1;
+ ecc_code[1] = ~tmp2;
+ ecc_code[2] = ((~reg1) << 2) | 0x03;
+
+ return 0;
+}
+#endif /* CONFIG_NAND_SPL */
+
+static inline int countbits(uint32_t byte)
+{
+ int res = 0;
+
+ for (;byte; byte >>= 1)
+ res += byte & 0x01;
+ return res;
+}
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @dat: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ uint8_t s0, s1, s2;
+
+ s1 = calc_ecc[0] ^ read_ecc[0];
+ s0 = calc_ecc[1] ^ read_ecc[1];
+ s2 = calc_ecc[2] ^ read_ecc[2];
+ if ((s0 | s1 | s2) == 0)
+ return 0;
+
+ /* Check for a single bit error */
+ if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
+ ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
+ ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
+
+ uint32_t byteoffs, bitnum;
+
+ byteoffs = (s1 << 0) & 0x80;
+ byteoffs |= (s1 << 1) & 0x40;
+ byteoffs |= (s1 << 2) & 0x20;
+ byteoffs |= (s1 << 3) & 0x10;
+
+ byteoffs |= (s0 >> 4) & 0x08;
+ byteoffs |= (s0 >> 3) & 0x04;
+ byteoffs |= (s0 >> 2) & 0x02;
+ byteoffs |= (s0 >> 1) & 0x01;
+
+ bitnum = (s2 >> 5) & 0x04;
+ bitnum |= (s2 >> 4) & 0x02;
+ bitnum |= (s2 >> 3) & 0x01;
+
+ dat[byteoffs] ^= (1 << bitnum);
+
+ return 1;
+ }
+
+ if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
+ return 1;
+
+ return -EBADMSG;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_ids.c b/roms/u-boot/drivers/mtd/nand/raw/nand_ids.c
new file mode 100644
index 000000000..3104f879f
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_ids.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <common.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/sizes.h>
+
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+
+/*
+ * The chip ID list:
+ * name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+#ifdef CONFIG_MTD_NAND_MUSEUM_IDS
+ LEGACY_ID_NAND("NAND 1MiB 5V 8-bit", 0x6e, 1, SZ_4K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 2MiB 5V 8-bit", 0x64, 2, SZ_4K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 1MiB 3,3V 8-bit", 0xe8, 1, SZ_4K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 1MiB 3,3V 8-bit", 0xec, 1, SZ_4K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 2MiB 3,3V 8-bit", 0xea, 2, SZ_4K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xd5, 4, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xe6, 8, SZ_8K, SP_OPTIONS),
+#endif
+ /*
+ * Some incompatible NAND chips share device ID's and so must be
+ * listed by full ID. We list them first so that we can easily identify
+ * the most specific match.
+ */
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
+ 2 },
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG2S0H 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
+ { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG5D2 32G 3.3V 8-bit",
+ { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"TC58NVG6D2 64G 3.3V 8-bit",
+ { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+ {"H27UBG8T2BTR-BC 32G 3.3V 8-bit",
+ { .id = {0xad, 0xd7, 0x94, 0xda, 0x74, 0xc3} },
+ SZ_8K, SZ_4K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K), 0 },
+ {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K), 4 },
+ {"H27QCG8T2E5R‐BCF 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x14, 0xa7, 0x42, 0x4a} },
+ SZ_16K, SZ_8K, SZ_4M, NAND_NEED_SCRAMBLING, 6, 1664,
+ NAND_ECC_INFO(56, SZ_1K), 1 },
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+ /*
+ * These are the new chips with large page size. Their page size and
+ * eraseblock size are determined from the extended ID bytes.
+ */
+
+ /* 512 Megabit */
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
+
+ /* 1 Gigabit */
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+ /* 2 Gigabit */
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+ /* 4 Gigabit */
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+ /* 8 Gigabit */
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+ /* 16 Gigabit */
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+ /* 32 Gigabit */
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+ /* 64 Gigabit */
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+ /* 128 Gigabit */
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+ /* 256 Gigabit */
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+ /* 512 Gigabit */
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+ {NULL}
+};
+
+/* Manufacturer IDs */
+struct nand_manufacturers nand_manuf_ids[] = {
+ {NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_SAMSUNG, "Samsung"},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_HYNIX, "Hynix"},
+ {NAND_MFR_MICRON, "Micron"},
+ {NAND_MFR_AMD, "AMD/Spansion"},
+ {NAND_MFR_MACRONIX, "Macronix"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_SANDISK, "SanDisk"},
+ {NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_ATO, "ATO"},
+ {0x0, "Unknown"}
+};
+
+EXPORT_SYMBOL(nand_manuf_ids);
+EXPORT_SYMBOL(nand_flash_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_plat.c b/roms/u-boot/drivers/mtd/nand/raw/nand_plat.c
new file mode 100644
index 000000000..335c3e347
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_plat.c
@@ -0,0 +1,64 @@
+/*
+ * Genericish driver for memory mapped NAND devices
+ *
+ * Copyright (c) 2006-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+/* Your board must implement the following macros:
+ * NAND_PLAT_WRITE_CMD(chip, cmd)
+ * NAND_PLAT_WRITE_ADR(chip, cmd)
+ * NAND_PLAT_INIT()
+ *
+ * It may also implement the following:
+ * NAND_PLAT_DEV_READY(chip)
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#ifdef NAND_PLAT_GPIO_DEV_READY
+# include <asm/gpio.h>
+# define NAND_PLAT_DEV_READY(chip) gpio_get_value(NAND_PLAT_GPIO_DEV_READY)
+#endif
+
+#include <nand.h>
+
+static void plat_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ NAND_PLAT_WRITE_CMD(this, cmd);
+ else
+ NAND_PLAT_WRITE_ADR(this, cmd);
+}
+
+#ifdef NAND_PLAT_DEV_READY
+static int plat_dev_ready(struct mtd_info *mtd)
+{
+ return NAND_PLAT_DEV_READY((struct nand_chip *)mtd_to_nand(mtd));
+}
+#else
+# define plat_dev_ready NULL
+#endif
+
+int board_nand_init(struct nand_chip *nand)
+{
+#ifdef NAND_PLAT_GPIO_DEV_READY
+ gpio_request(NAND_PLAT_GPIO_DEV_READY, "nand-plat");
+ gpio_direction_input(NAND_PLAT_GPIO_DEV_READY);
+#endif
+
+#ifdef NAND_PLAT_INIT
+ NAND_PLAT_INIT();
+#endif
+
+ nand->cmd_ctrl = plat_cmd_ctrl;
+ nand->dev_ready = plat_dev_ready;
+ nand->ecc.mode = NAND_ECC_SOFT;
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_spl_load.c b/roms/u-boot/drivers/mtd/nand/raw/nand_spl_load.c
new file mode 100644
index 000000000..ecd373e05
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_spl_load.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2011
+ * Heiko Schocher, DENX Software Engineering, hs@denx.de.
+ */
+
+#include <common.h>
+#include <nand.h>
+
+/*
+ * The main entry for NAND booting. It's necessary that SDRAM is already
+ * configured and available since this code loads the main U-Boot image
+ * from NAND into SDRAM and starts it from there.
+ */
+void nand_boot(void)
+{
+ __attribute__((noreturn)) void (*uboot)(void);
+
+ /*
+ * Load U-Boot image from NAND into RAM
+ */
+ nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS,
+ CONFIG_SYS_NAND_U_BOOT_SIZE,
+ (void *)CONFIG_SYS_NAND_U_BOOT_DST);
+
+#ifdef CONFIG_NAND_ENV_DST
+ nand_spl_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE,
+ (void *)CONFIG_NAND_ENV_DST);
+
+#ifdef CONFIG_ENV_OFFSET_REDUND
+ nand_spl_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE,
+ (void *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE);
+#endif
+#endif
+
+ /*
+ * Jump to U-Boot image
+ */
+ uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START;
+ (*uboot)();
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_spl_loaders.c b/roms/u-boot/drivers/mtd/nand/raw/nand_spl_loaders.c
new file mode 100644
index 000000000..4befc75c0
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_spl_loaders.c
@@ -0,0 +1,132 @@
+int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
+{
+ unsigned int block, lastblock;
+ unsigned int page, page_offset;
+
+ /* offs has to be aligned to a page address! */
+ block = offs / CONFIG_SYS_NAND_BLOCK_SIZE;
+ lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE;
+ page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE;
+ page_offset = offs % CONFIG_SYS_NAND_PAGE_SIZE;
+
+ while (block <= lastblock) {
+ if (!nand_is_bad_block(block)) {
+ /* Skip bad blocks */
+ while (page < CONFIG_SYS_NAND_PAGE_COUNT) {
+ nand_read_page(block, page, dst);
+ /*
+ * When offs is not aligned to page address the
+ * extra offset is copied to dst as well. Copy
+ * the image such that its first byte will be
+ * at the dst.
+ */
+ if (unlikely(page_offset)) {
+ memmove(dst, dst + page_offset,
+ CONFIG_SYS_NAND_PAGE_SIZE);
+ dst = (void *)((int)dst - page_offset);
+ page_offset = 0;
+ }
+ dst += CONFIG_SYS_NAND_PAGE_SIZE;
+ page++;
+ }
+
+ page = 0;
+ } else {
+ lastblock++;
+ }
+
+ block++;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_spl_adjust_offset - Adjust offset from a starting sector
+ * @sector: Address of the sector
+ * @offs: Offset starting from @sector
+ *
+ * If one or more bad blocks are in the address space between @sector
+ * and @sector + @offs, @offs is increased by the NAND block size for
+ * each bad block found.
+ */
+u32 nand_spl_adjust_offset(u32 sector, u32 offs)
+{
+ unsigned int block, lastblock;
+
+ block = sector / CONFIG_SYS_NAND_BLOCK_SIZE;
+ lastblock = (sector + offs) / CONFIG_SYS_NAND_BLOCK_SIZE;
+
+ while (block <= lastblock) {
+ if (nand_is_bad_block(block)) {
+ offs += CONFIG_SYS_NAND_BLOCK_SIZE;
+ lastblock++;
+ }
+
+ block++;
+ }
+
+ return offs;
+}
+
+#ifdef CONFIG_SPL_UBI
+/*
+ * Temporary storage for non NAND page aligned and non NAND page sized
+ * reads. Note: This does not support runtime detected FLASH yet, but
+ * that should be reasonably easy to fix by making the buffer large
+ * enough :)
+ */
+static u8 scratch_buf[CONFIG_SYS_NAND_PAGE_SIZE];
+
+/**
+ * nand_spl_read_block - Read data from physical eraseblock into a buffer
+ * @block: Number of the physical eraseblock
+ * @offset: Data offset from the start of @peb
+ * @len: Data size to read
+ * @dst: Address of the destination buffer
+ *
+ * This could be further optimized if we'd have a subpage read
+ * function in the simple code. On NAND which allows subpage reads
+ * this would spare quite some time to readout e.g. the VID header of
+ * UBI.
+ *
+ * Notes:
+ * @offset + @len are not allowed to be larger than a physical
+ * erase block. No sanity check done for simplicity reasons.
+ *
+ * To support runtime detected flash this needs to be extended by
+ * information about the actual flash geometry, but thats beyond the
+ * scope of this effort and for most applications where fast boot is
+ * required it is not an issue anyway.
+ */
+int nand_spl_read_block(int block, int offset, int len, void *dst)
+{
+ int page, read;
+
+ /* Calculate the page number */
+ page = offset / CONFIG_SYS_NAND_PAGE_SIZE;
+
+ /* Offset to the start of a flash page */
+ offset = offset % CONFIG_SYS_NAND_PAGE_SIZE;
+
+ while (len) {
+ /*
+ * Non page aligned reads go to the scratch buffer.
+ * Page aligned reads go directly to the destination.
+ */
+ if (offset || len < CONFIG_SYS_NAND_PAGE_SIZE) {
+ nand_read_page(block, page, scratch_buf);
+ read = min(len, CONFIG_SYS_NAND_PAGE_SIZE - offset);
+ memcpy(dst, scratch_buf + offset, read);
+ offset = 0;
+ } else {
+ nand_read_page(block, page, dst);
+ read = CONFIG_SYS_NAND_PAGE_SIZE;
+ }
+ page++;
+ len -= read;
+ dst += read;
+ }
+ return 0;
+}
+#endif
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_spl_simple.c b/roms/u-boot/drivers/mtd/nand/raw/nand_spl_simple.c
new file mode 100644
index 000000000..09e053541
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_spl_simple.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2006-2008
+ * Stefan Roese, DENX Software Engineering, sr@denx.de.
+ */
+
+#include <common.h>
+#include <nand.h>
+#include <asm/io.h>
+#include <linux/mtd/nand_ecc.h>
+
+static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS;
+static struct mtd_info *mtd;
+static struct nand_chip nand_chip;
+
+#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / \
+ CONFIG_SYS_NAND_ECCSIZE)
+#define ECCTOTAL (ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES)
+
+
+#if (CONFIG_SYS_NAND_PAGE_SIZE <= 512)
+/*
+ * NAND command for small page NAND devices (512)
+ */
+static int nand_command(int block, int page, uint32_t offs,
+ u8 cmd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT;
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ /* Begin command latch cycle */
+ this->cmd_ctrl(mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ /* Set ALE and clear CLE to start address cycle */
+ /* Column address */
+ this->cmd_ctrl(mtd, offs, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+ this->cmd_ctrl(mtd, page_addr & 0xff, NAND_CTRL_ALE); /* A[16:9] */
+ this->cmd_ctrl(mtd, (page_addr >> 8) & 0xff,
+ NAND_CTRL_ALE); /* A[24:17] */
+#ifdef CONFIG_SYS_NAND_4_ADDR_CYCLE
+ /* One more address cycle for devices > 32MiB */
+ this->cmd_ctrl(mtd, (page_addr >> 16) & 0x0f,
+ NAND_CTRL_ALE); /* A[28:25] */
+#endif
+ /* Latch in address */
+ this->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Wait a while for the data to be ready
+ */
+ while (!this->dev_ready(mtd))
+ ;
+
+ return 0;
+}
+#else
+/*
+ * NAND command for large page NAND devices (2k)
+ */
+static int nand_command(int block, int page, uint32_t offs,
+ u8 cmd)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT;
+ void (*hwctrl)(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl) = this->cmd_ctrl;
+
+ while (!this->dev_ready(mtd))
+ ;
+
+ /* Emulate NAND_CMD_READOOB */
+ if (cmd == NAND_CMD_READOOB) {
+ offs += CONFIG_SYS_NAND_PAGE_SIZE;
+ cmd = NAND_CMD_READ0;
+ }
+
+ /* Shift the offset from byte addressing to word addressing. */
+ if ((this->options & NAND_BUSWIDTH_16) && !nand_opcode_8bits(cmd))
+ offs >>= 1;
+
+ /* Begin command latch cycle */
+ hwctrl(mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ /* Set ALE and clear CLE to start address cycle */
+ /* Column address */
+ hwctrl(mtd, offs & 0xff,
+ NAND_CTRL_ALE | NAND_CTRL_CHANGE); /* A[7:0] */
+ hwctrl(mtd, (offs >> 8) & 0xff, NAND_CTRL_ALE); /* A[11:9] */
+ /* Row address */
+ hwctrl(mtd, (page_addr & 0xff), NAND_CTRL_ALE); /* A[19:12] */
+ hwctrl(mtd, ((page_addr >> 8) & 0xff),
+ NAND_CTRL_ALE); /* A[27:20] */
+#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE
+ /* One more address cycle for devices > 128MiB */
+ hwctrl(mtd, (page_addr >> 16) & 0x0f,
+ NAND_CTRL_ALE); /* A[31:28] */
+#endif
+ /* Latch in address */
+ hwctrl(mtd, NAND_CMD_READSTART,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ hwctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Wait a while for the data to be ready
+ */
+ while (!this->dev_ready(mtd))
+ ;
+
+ return 0;
+}
+#endif
+
+static int nand_is_bad_block(int block)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_char bb_data[2];
+
+ nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS,
+ NAND_CMD_READOOB);
+
+ /*
+ * Read one byte (or two if it's a 16 bit chip).
+ */
+ if (this->options & NAND_BUSWIDTH_16) {
+ this->read_buf(mtd, bb_data, 2);
+ if (bb_data[0] != 0xff || bb_data[1] != 0xff)
+ return 1;
+ } else {
+ this->read_buf(mtd, bb_data, 1);
+ if (bb_data[0] != 0xff)
+ return 1;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_SYS_NAND_HW_ECC_OOBFIRST)
+static int nand_read_page(int block, int page, uchar *dst)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_char ecc_calc[ECCTOTAL];
+ u_char ecc_code[ECCTOTAL];
+ u_char oob_data[CONFIG_SYS_NAND_OOBSIZE];
+ int i;
+ int eccsize = CONFIG_SYS_NAND_ECCSIZE;
+ int eccbytes = CONFIG_SYS_NAND_ECCBYTES;
+ int eccsteps = ECCSTEPS;
+ uint8_t *p = dst;
+
+ nand_command(block, page, 0, NAND_CMD_READOOB);
+ this->read_buf(mtd, oob_data, CONFIG_SYS_NAND_OOBSIZE);
+ nand_command(block, page, 0, NAND_CMD_READ0);
+
+ /* Pick the ECC bytes out of the oob data */
+ for (i = 0; i < ECCTOTAL; i++)
+ ecc_code[i] = oob_data[nand_ecc_pos[i]];
+
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ this->ecc.hwctl(mtd, NAND_ECC_READ);
+ this->read_buf(mtd, p, eccsize);
+ this->ecc.calculate(mtd, p, &ecc_calc[i]);
+ this->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ }
+
+ return 0;
+}
+#else
+static int nand_read_page(int block, int page, void *dst)
+{
+ struct nand_chip *this = mtd_to_nand(mtd);
+ u_char ecc_calc[ECCTOTAL];
+ u_char ecc_code[ECCTOTAL];
+ u_char oob_data[CONFIG_SYS_NAND_OOBSIZE];
+ int i;
+ int eccsize = CONFIG_SYS_NAND_ECCSIZE;
+ int eccbytes = CONFIG_SYS_NAND_ECCBYTES;
+ int eccsteps = ECCSTEPS;
+ uint8_t *p = dst;
+
+ nand_command(block, page, 0, NAND_CMD_READ0);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ if (this->ecc.mode != NAND_ECC_SOFT)
+ this->ecc.hwctl(mtd, NAND_ECC_READ);
+ this->read_buf(mtd, p, eccsize);
+ this->ecc.calculate(mtd, p, &ecc_calc[i]);
+ }
+ this->read_buf(mtd, oob_data, CONFIG_SYS_NAND_OOBSIZE);
+
+ /* Pick the ECC bytes out of the oob data */
+ for (i = 0; i < ECCTOTAL; i++)
+ ecc_code[i] = oob_data[nand_ecc_pos[i]];
+
+ eccsteps = ECCSTEPS;
+ p = dst;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ /* No chance to do something with the possible error message
+ * from correct_data(). We just hope that all possible errors
+ * are corrected by this routine.
+ */
+ this->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ }
+
+ return 0;
+}
+#endif
+
+/* nand_init() - initialize data to make nand usable by SPL */
+void nand_init(void)
+{
+ /*
+ * Init board specific nand support
+ */
+ mtd = nand_to_mtd(&nand_chip);
+ nand_chip.IO_ADDR_R = nand_chip.IO_ADDR_W =
+ (void __iomem *)CONFIG_SYS_NAND_BASE;
+ board_nand_init(&nand_chip);
+
+#ifdef CONFIG_SPL_NAND_SOFTECC
+ if (nand_chip.ecc.mode == NAND_ECC_SOFT) {
+ nand_chip.ecc.calculate = nand_calculate_ecc;
+ nand_chip.ecc.correct = nand_correct_data;
+ }
+#endif
+
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, 0);
+}
+
+/* Unselect after operation */
+void nand_deselect(void)
+{
+ if (nand_chip.select_chip)
+ nand_chip.select_chip(mtd, -1);
+}
+
+#include "nand_spl_loaders.c"
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_timings.c b/roms/u-boot/drivers/mtd/nand/raw/nand_timings.c
new file mode 100644
index 000000000..e6aa79039
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_timings.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <common.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mtd/rawnand.h>
+
+static const struct nand_data_interface onfi_sdr_timings[] = {
+ /* Mode 0 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tADL_min = 400000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRR_min = 40000,
+ .tRST_max = 250000000000ULL,
+ .tWB_max = 200000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 1 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 2 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 3 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 4 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
+ },
+ /* Mode 5 */
+ {
+ .type = NAND_SDR_IFACE,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
+ },
+};
+
+/**
+ * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
+ * timings according to the given ONFI timing mode
+ * @mode: ONFI timing mode
+ */
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
+{
+ if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
+ return ERR_PTR(-EINVAL);
+
+ return &onfi_sdr_timings[mode].timings.sdr;
+}
+EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
+
+/**
+ * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * given ONFI mode
+ * @iface: The data interface to be initialized
+ * @mode: The ONFI timing mode
+ */
+int onfi_init_data_interface(struct nand_chip *chip,
+ struct nand_data_interface *iface,
+ enum nand_data_interface_type type,
+ int timing_mode)
+{
+ if (type != NAND_SDR_IFACE)
+ return -EINVAL;
+
+ if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
+ return -EINVAL;
+
+ *iface = onfi_sdr_timings[timing_mode];
+
+ /*
+ * Initialize timings that cannot be deduced from timing mode:
+ * tR, tPROG, tCCS, ...
+ * These information are part of the ONFI parameter page.
+ */
+ if (chip->onfi_version) {
+ struct nand_onfi_params *params = &chip->onfi_params;
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
+ timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
+ timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(onfi_init_data_interface);
+
+/**
+ * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
+ * data interface for mode 0. This is used as default timing after
+ * reset.
+ */
+const struct nand_data_interface *nand_get_default_data_interface(void)
+{
+ return &onfi_sdr_timings[0];
+}
+EXPORT_SYMBOL(nand_get_default_data_interface);
diff --git a/roms/u-boot/drivers/mtd/nand/raw/nand_util.c b/roms/u-boot/drivers/mtd/nand/raw/nand_util.c
new file mode 100644
index 000000000..00c3c6c41
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/nand_util.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/mtd/nand/raw/nand_util.c
+ *
+ * Copyright (C) 2006 by Weiss-Electronic GmbH.
+ * All rights reserved.
+ *
+ * @author: Guido Classen <clagix@gmail.com>
+ * @descr: NAND Flash support
+ * @references: borrowed heavily from Linux mtd-utils code:
+ * flash_eraseall.c by Arcom Control System Ltd
+ * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
+ * and Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by
+ * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils
+ *
+ * Copyright 2010 Freescale Semiconductor
+ */
+
+#include <common.h>
+#include <command.h>
+#include <log.h>
+#include <watchdog.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <div64.h>
+#include <asm/cache.h>
+#include <dm/devres.h>
+
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#include <nand.h>
+#include <jffs2/jffs2.h>
+
+typedef struct erase_info erase_info_t;
+typedef struct mtd_info mtd_info_t;
+
+/* support only for native endian JFFS2 */
+#define cpu_to_je16(x) (x)
+#define cpu_to_je32(x) (x)
+
+/**
+ * nand_erase_opts: - erase NAND flash with support for various options
+ * (jffs2 formatting)
+ *
+ * @param mtd nand mtd instance to erase
+ * @param opts options, @see struct nand_erase_options
+ * @return 0 in case of success
+ *
+ * This code is ported from flash_eraseall.c from Linux mtd utils by
+ * Arcom Control System Ltd.
+ */
+int nand_erase_opts(struct mtd_info *mtd,
+ const nand_erase_options_t *opts)
+{
+ struct jffs2_unknown_node cleanmarker;
+ erase_info_t erase;
+ unsigned long erase_length, erased_length; /* in blocks */
+ int result;
+ int percent_complete = -1;
+ const char *mtd_device = mtd->name;
+ struct mtd_oob_ops oob_opts;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if ((opts->offset & (mtd->erasesize - 1)) != 0) {
+ printf("Attempt to erase non block-aligned data\n");
+ return -1;
+ }
+
+ memset(&erase, 0, sizeof(erase));
+ memset(&oob_opts, 0, sizeof(oob_opts));
+
+ erase.mtd = mtd;
+ erase.len = mtd->erasesize;
+ erase.addr = opts->offset;
+ erase_length = lldiv(opts->length + mtd->erasesize - 1,
+ mtd->erasesize);
+
+ cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
+ cleanmarker.totlen = cpu_to_je32(8);
+
+ /* scrub option allows to erase badblock. To prevent internal
+ * check from erase() method, set block check method to dummy
+ * and disable bad block table while erasing.
+ */
+ if (opts->scrub) {
+ erase.scrub = opts->scrub;
+ /*
+ * We don't need the bad block table anymore...
+ * after scrub, there are no bad blocks left!
+ */
+ if (chip->bbt) {
+ kfree(chip->bbt);
+ }
+ chip->bbt = NULL;
+ chip->options &= ~NAND_BBT_SCANNED;
+ }
+
+ for (erased_length = 0;
+ erased_length < erase_length;
+ erase.addr += mtd->erasesize) {
+
+ WATCHDOG_RESET();
+
+ if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) {
+ puts("Size of erase exceeds limit\n");
+ return -EFBIG;
+ }
+ if (!opts->scrub) {
+ int ret = mtd_block_isbad(mtd, erase.addr);
+ if (ret > 0) {
+ if (!opts->quiet)
+ printf("\rSkipping bad block at "
+ "0x%08llx "
+ " \n",
+ erase.addr);
+
+ if (!opts->spread)
+ erased_length++;
+
+ continue;
+
+ } else if (ret < 0) {
+ printf("\n%s: MTD get bad block failed: %d\n",
+ mtd_device,
+ ret);
+ return -1;
+ }
+ }
+
+ erased_length++;
+
+ result = mtd_erase(mtd, &erase);
+ if (result != 0) {
+ printf("\n%s: MTD Erase failure: %d\n",
+ mtd_device, result);
+ continue;
+ }
+
+ /* format for JFFS2 ? */
+ if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
+ struct mtd_oob_ops ops;
+ ops.ooblen = 8;
+ ops.datbuf = NULL;
+ ops.oobbuf = (uint8_t *)&cleanmarker;
+ ops.ooboffs = 0;
+ ops.mode = MTD_OPS_AUTO_OOB;
+
+ result = mtd_write_oob(mtd, erase.addr, &ops);
+ if (result != 0) {
+ printf("\n%s: MTD writeoob failure: %d\n",
+ mtd_device, result);
+ continue;
+ }
+ }
+
+ if (!opts->quiet) {
+ unsigned long long n = erased_length * 100ULL;
+ int percent;
+
+ do_div(n, erase_length);
+ percent = (int)n;
+
+ /* output progress message only at whole percent
+ * steps to reduce the number of messages printed
+ * on (slow) serial consoles
+ */
+ if (percent != percent_complete) {
+ percent_complete = percent;
+
+ printf("\rErasing at 0x%llx -- %3d%% complete.",
+ erase.addr, percent);
+
+ if (opts->jffs2 && result == 0)
+ printf(" Cleanmarker written at 0x%llx.",
+ erase.addr);
+ }
+ }
+ }
+ if (!opts->quiet)
+ printf("\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
+
+#define NAND_CMD_LOCK_TIGHT 0x2c
+#define NAND_CMD_LOCK_STATUS 0x7a
+
+/******************************************************************************
+ * Support for locking / unlocking operations of some NAND devices
+ *****************************************************************************/
+
+/**
+ * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
+ * state
+ *
+ * @param mtd nand mtd instance
+ * @param tight bring device in lock tight mode
+ *
+ * @return 0 on success, -1 in case of error
+ *
+ * The lock / lock-tight command only applies to the whole chip. To get some
+ * parts of the chip lock and others unlocked use the following sequence:
+ *
+ * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
+ * - Call nand_unlock() once for each consecutive area to be unlocked
+ * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
+ *
+ * If the device is in lock-tight state software can't change the
+ * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
+ * calls will fail. It is only posible to leave lock-tight state by
+ * an hardware signal (low pulse on _WP pin) or by power down.
+ */
+int nand_lock(struct mtd_info *mtd, int tight)
+{
+ int ret = 0;
+ int status;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /* select the NAND device */
+ chip->select_chip(mtd, 0);
+
+ /* check the Lock Tight Status */
+ chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0);
+ if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
+ printf("nand_lock: Device is locked tight!\n");
+ ret = -1;
+ goto out;
+ }
+
+ chip->cmdfunc(mtd,
+ (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
+ -1, -1);
+
+ /* call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+
+ /* see if device thinks it succeeded */
+ if (status & 0x01) {
+ ret = -1;
+ }
+
+ out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * nand_get_lock_status: - query current lock state from one page of NAND
+ * flash
+ *
+ * @param mtd nand mtd instance
+ * @param offset page address to query (must be page-aligned!)
+ *
+ * @return -1 in case of error
+ * >0 lock status:
+ * bitfield with the following combinations:
+ * NAND_LOCK_STATUS_TIGHT: page in tight state
+ * NAND_LOCK_STATUS_UNLOCK: page unlocked
+ *
+ */
+int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
+{
+ int ret = 0;
+ int chipnr;
+ int page;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /* select the NAND device */
+ chipnr = (int)(offset >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+
+ if ((offset & (mtd->writesize - 1)) != 0) {
+ printf("nand_get_lock_status: "
+ "Start address must be beginning of "
+ "nand page!\n");
+ ret = -1;
+ goto out;
+ }
+
+ /* check the Lock Status */
+ page = (int)(offset >> chip->page_shift);
+ chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
+
+ ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
+ | NAND_LOCK_STATUS_UNLOCK);
+
+ out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+
+/**
+ * nand_unlock: - Unlock area of NAND pages
+ * only one consecutive area can be unlocked at one time!
+ *
+ * @param mtd nand mtd instance
+ * @param start start byte address
+ * @param length number of bytes to unlock (must be a multiple of
+ * page size mtd->writesize)
+ * @param allexcept if set, unlock everything not selected
+ *
+ * @return 0 on success, -1 in case of error
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length,
+ int allexcept)
+{
+ int ret = 0;
+ int chipnr;
+ int status;
+ int page;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ debug("nand_unlock%s: start: %08llx, length: %zd!\n",
+ allexcept ? " (allexcept)" : "", start, length);
+
+ /* select the NAND device */
+ chipnr = (int)(start >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
+ /* check the WP bit */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
+ printf("nand_unlock: Device is write protected!\n");
+ ret = -1;
+ goto out;
+ }
+
+ /* check the Lock Tight Status */
+ page = (int)(start >> chip->page_shift);
+ chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
+ if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
+ printf("nand_unlock: Device is locked tight!\n");
+ ret = -1;
+ goto out;
+ }
+
+ if ((start & (mtd->erasesize - 1)) != 0) {
+ printf("nand_unlock: Start address must be beginning of "
+ "nand block!\n");
+ ret = -1;
+ goto out;
+ }
+
+ if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
+ printf("nand_unlock: Length must be a multiple of nand block "
+ "size %08x!\n", mtd->erasesize);
+ ret = -1;
+ goto out;
+ }
+
+ /*
+ * Set length so that the last address is set to the
+ * starting address of the last block
+ */
+ length -= mtd->erasesize;
+
+ /* submit address of first page to unlock */
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+ /* submit ADDRESS of LAST page to unlock */
+ page += (int)(length >> chip->page_shift);
+
+ /*
+ * Page addresses for unlocking are supposed to be block-aligned.
+ * At least some NAND chips use the low bit to indicate that the
+ * page range should be inverted.
+ */
+ if (allexcept)
+ page |= 1;
+
+ chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
+
+ /* call wait ready function */
+ status = chip->waitfunc(mtd, chip);
+ /* see if device thinks it succeeded */
+ if (status & 0x01) {
+ /* there was an error */
+ ret = -1;
+ goto out;
+ }
+
+ out:
+ /* de-select the NAND device */
+ chip->select_chip(mtd, -1);
+ return ret;
+}
+#endif
+
+/**
+ * check_skip_len
+ *
+ * Check if there are any bad blocks, and whether length including bad
+ * blocks fits into device
+ *
+ * @param mtd nand mtd instance
+ * @param offset offset in flash
+ * @param length image length
+ * @param used length of flash needed for the requested length
+ * @return 0 if the image fits and there are no bad blocks
+ * 1 if the image fits, but there are bad blocks
+ * -1 if the image does not fit
+ */
+static int check_skip_len(struct mtd_info *mtd, loff_t offset, size_t length,
+ size_t *used)
+{
+ size_t len_excl_bad = 0;
+ int ret = 0;
+
+ while (len_excl_bad < length) {
+ size_t block_len, block_off;
+ loff_t block_start;
+
+ if (offset >= mtd->size)
+ return -1;
+
+ block_start = offset & ~(loff_t)(mtd->erasesize - 1);
+ block_off = offset & (mtd->erasesize - 1);
+ block_len = mtd->erasesize - block_off;
+
+ if (!nand_block_isbad(mtd, block_start))
+ len_excl_bad += block_len;
+ else
+ ret = 1;
+
+ offset += block_len;
+ *used += block_len;
+ }
+
+ /* If the length is not a multiple of block_len, adjust. */
+ if (len_excl_bad > length)
+ *used -= (len_excl_bad - length);
+
+ return ret;
+}
+
+#ifdef CONFIG_CMD_NAND_TRIMFFS
+static size_t drop_ffs(const struct mtd_info *mtd, const u_char *buf,
+ const size_t *len)
+{
+ size_t l = *len;
+ ssize_t i;
+
+ for (i = l - 1; i >= 0; i--)
+ if (buf[i] != 0xFF)
+ break;
+
+ /* The resulting length must be aligned to the minimum flash I/O size */
+ l = i + 1;
+ l = (l + mtd->writesize - 1) / mtd->writesize;
+ l *= mtd->writesize;
+
+ /*
+ * since the input length may be unaligned, prevent access past the end
+ * of the buffer
+ */
+ return min(l, *len);
+}
+#endif
+
+/**
+ * nand_verify_page_oob:
+ *
+ * Verify a page of NAND flash, including the OOB.
+ * Reads page of NAND and verifies the contents and OOB against the
+ * values in ops.
+ *
+ * @param mtd nand mtd instance
+ * @param ops MTD operations, including data to verify
+ * @param ofs offset in flash
+ * @return 0 in case of success
+ */
+int nand_verify_page_oob(struct mtd_info *mtd, struct mtd_oob_ops *ops,
+ loff_t ofs)
+{
+ int rval;
+ struct mtd_oob_ops vops;
+ size_t verlen = mtd->writesize + mtd->oobsize;
+
+ memcpy(&vops, ops, sizeof(vops));
+
+ vops.datbuf = memalign(ARCH_DMA_MINALIGN, verlen);
+
+ if (!vops.datbuf)
+ return -ENOMEM;
+
+ vops.oobbuf = vops.datbuf + mtd->writesize;
+
+ rval = mtd_read_oob(mtd, ofs, &vops);
+ if (!rval)
+ rval = memcmp(ops->datbuf, vops.datbuf, vops.len);
+ if (!rval)
+ rval = memcmp(ops->oobbuf, vops.oobbuf, vops.ooblen);
+
+ free(vops.datbuf);
+
+ return rval ? -EIO : 0;
+}
+
+/**
+ * nand_verify:
+ *
+ * Verify a region of NAND flash.
+ * Reads NAND in page-sized chunks and verifies the contents against
+ * the contents of a buffer. The offset into the NAND must be
+ * page-aligned, and the function doesn't handle skipping bad blocks.
+ *
+ * @param mtd nand mtd instance
+ * @param ofs offset in flash
+ * @param len buffer length
+ * @param buf buffer to read from
+ * @return 0 in case of success
+ */
+int nand_verify(struct mtd_info *mtd, loff_t ofs, size_t len, u_char *buf)
+{
+ int rval = 0;
+ size_t verofs;
+ size_t verlen = mtd->writesize;
+ uint8_t *verbuf = memalign(ARCH_DMA_MINALIGN, verlen);
+
+ if (!verbuf)
+ return -ENOMEM;
+
+ /* Read the NAND back in page-size groups to limit malloc size */
+ for (verofs = ofs; verofs < ofs + len;
+ verofs += verlen, buf += verlen) {
+ verlen = min(mtd->writesize, (uint32_t)(ofs + len - verofs));
+ rval = nand_read(mtd, verofs, &verlen, verbuf);
+ if (!rval || (rval == -EUCLEAN))
+ rval = memcmp(buf, verbuf, verlen);
+
+ if (rval)
+ break;
+ }
+
+ free(verbuf);
+
+ return rval ? -EIO : 0;
+}
+
+
+
+/**
+ * nand_write_skip_bad:
+ *
+ * Write image to NAND flash.
+ * Blocks that are marked bad are skipped and the is written to the next
+ * block instead as long as the image is short enough to fit even after
+ * skipping the bad blocks. Due to bad blocks we may not be able to
+ * perform the requested write. In the case where the write would
+ * extend beyond the end of the NAND device, both length and actual (if
+ * not NULL) are set to 0. In the case where the write would extend
+ * beyond the limit we are passed, length is set to 0 and actual is set
+ * to the required length.
+ *
+ * @param mtd nand mtd instance
+ * @param offset offset in flash
+ * @param length buffer length
+ * @param actual set to size required to write length worth of
+ * buffer or 0 on error, if not NULL
+ * @param lim maximum size that actual may be in order to not
+ * exceed the buffer
+ * @param buffer buffer to read from
+ * @param flags flags modifying the behaviour of the write to NAND
+ * @return 0 in case of success
+ */
+int nand_write_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
+ size_t *actual, loff_t lim, u_char *buffer, int flags)
+{
+ int rval = 0, blocksize;
+ size_t left_to_write = *length;
+ size_t used_for_write = 0;
+ u_char *p_buffer = buffer;
+ int need_skip;
+
+ if (actual)
+ *actual = 0;
+
+ blocksize = mtd->erasesize;
+
+ /*
+ * nand_write() handles unaligned, partial page writes.
+ *
+ * We allow length to be unaligned, for convenience in
+ * using the $filesize variable.
+ *
+ * However, starting at an unaligned offset makes the
+ * semantics of bad block skipping ambiguous (really,
+ * you should only start a block skipping access at a
+ * partition boundary). So don't try to handle that.
+ */
+ if ((offset & (mtd->writesize - 1)) != 0) {
+ printf("Attempt to write non page-aligned data\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ need_skip = check_skip_len(mtd, offset, *length, &used_for_write);
+
+ if (actual)
+ *actual = used_for_write;
+
+ if (need_skip < 0) {
+ printf("Attempt to write outside the flash area\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ if (used_for_write > lim) {
+ puts("Size of write exceeds partition or device limit\n");
+ *length = 0;
+ return -EFBIG;
+ }
+
+ if (!need_skip && !(flags & WITH_DROP_FFS)) {
+ rval = nand_write(mtd, offset, length, buffer);
+
+ if ((flags & WITH_WR_VERIFY) && !rval)
+ rval = nand_verify(mtd, offset, *length, buffer);
+
+ if (rval == 0)
+ return 0;
+
+ *length = 0;
+ printf("NAND write to offset %llx failed %d\n",
+ offset, rval);
+ return rval;
+ }
+
+ while (left_to_write > 0) {
+ loff_t block_start = offset & ~(loff_t)(mtd->erasesize - 1);
+ size_t block_offset = offset & (mtd->erasesize - 1);
+ size_t write_size, truncated_write_size;
+
+ WATCHDOG_RESET();
+
+ if (nand_block_isbad(mtd, block_start)) {
+ printf("Skip bad block 0x%08llx\n", block_start);
+ offset += mtd->erasesize - block_offset;
+ continue;
+ }
+
+ if (left_to_write < (blocksize - block_offset))
+ write_size = left_to_write;
+ else
+ write_size = blocksize - block_offset;
+
+ truncated_write_size = write_size;
+#ifdef CONFIG_CMD_NAND_TRIMFFS
+ if (flags & WITH_DROP_FFS)
+ truncated_write_size = drop_ffs(mtd, p_buffer,
+ &write_size);
+#endif
+
+ rval = nand_write(mtd, offset, &truncated_write_size,
+ p_buffer);
+
+ if ((flags & WITH_WR_VERIFY) && !rval)
+ rval = nand_verify(mtd, offset,
+ truncated_write_size, p_buffer);
+
+ offset += write_size;
+ p_buffer += write_size;
+
+ if (rval != 0) {
+ printf("NAND write to offset %llx failed %d\n",
+ offset, rval);
+ *length -= left_to_write;
+ return rval;
+ }
+
+ left_to_write -= write_size;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_read_skip_bad:
+ *
+ * Read image from NAND flash.
+ * Blocks that are marked bad are skipped and the next block is read
+ * instead as long as the image is short enough to fit even after
+ * skipping the bad blocks. Due to bad blocks we may not be able to
+ * perform the requested read. In the case where the read would extend
+ * beyond the end of the NAND device, both length and actual (if not
+ * NULL) are set to 0. In the case where the read would extend beyond
+ * the limit we are passed, length is set to 0 and actual is set to the
+ * required length.
+ *
+ * @param mtd nand mtd instance
+ * @param offset offset in flash
+ * @param length buffer length, on return holds number of read bytes
+ * @param actual set to size required to read length worth of buffer or 0
+ * on error, if not NULL
+ * @param lim maximum size that actual may be in order to not exceed the
+ * buffer
+ * @param buffer buffer to write to
+ * @return 0 in case of success
+ */
+int nand_read_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
+ size_t *actual, loff_t lim, u_char *buffer)
+{
+ int rval;
+ size_t left_to_read = *length;
+ size_t used_for_read = 0;
+ u_char *p_buffer = buffer;
+ int need_skip;
+
+ if ((offset & (mtd->writesize - 1)) != 0) {
+ printf("Attempt to read non page-aligned data\n");
+ *length = 0;
+ if (actual)
+ *actual = 0;
+ return -EINVAL;
+ }
+
+ need_skip = check_skip_len(mtd, offset, *length, &used_for_read);
+
+ if (actual)
+ *actual = used_for_read;
+
+ if (need_skip < 0) {
+ printf("Attempt to read outside the flash area\n");
+ *length = 0;
+ return -EINVAL;
+ }
+
+ if (used_for_read > lim) {
+ puts("Size of read exceeds partition or device limit\n");
+ *length = 0;
+ return -EFBIG;
+ }
+
+ if (!need_skip) {
+ rval = nand_read(mtd, offset, length, buffer);
+ if (!rval || rval == -EUCLEAN)
+ return 0;
+
+ *length = 0;
+ printf("NAND read from offset %llx failed %d\n",
+ offset, rval);
+ return rval;
+ }
+
+ while (left_to_read > 0) {
+ size_t block_offset = offset & (mtd->erasesize - 1);
+ size_t read_length;
+
+ WATCHDOG_RESET();
+
+ if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
+ printf("Skipping bad block 0x%08llx\n",
+ offset & ~(mtd->erasesize - 1));
+ offset += mtd->erasesize - block_offset;
+ continue;
+ }
+
+ if (left_to_read < (mtd->erasesize - block_offset))
+ read_length = left_to_read;
+ else
+ read_length = mtd->erasesize - block_offset;
+
+ rval = nand_read(mtd, offset, &read_length, p_buffer);
+ if (rval && rval != -EUCLEAN) {
+ printf("NAND read from offset %llx failed %d\n",
+ offset, rval);
+ *length -= left_to_read;
+ return rval;
+ }
+
+ left_to_read -= read_length;
+ offset += read_length;
+ p_buffer += read_length;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CMD_NAND_TORTURE
+
+/**
+ * check_pattern:
+ *
+ * Check if buffer contains only a certain byte pattern.
+ *
+ * @param buf buffer to check
+ * @param patt the pattern to check
+ * @param size buffer size in bytes
+ * @return 1 if there are only patt bytes in buf
+ * 0 if something else was found
+ */
+static int check_pattern(const u_char *buf, u_char patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (buf[i] != patt)
+ return 0;
+ return 1;
+}
+
+/**
+ * nand_torture:
+ *
+ * Torture a block of NAND flash.
+ * This is useful to determine if a block that caused a write error is still
+ * good or should be marked as bad.
+ *
+ * @param mtd nand mtd instance
+ * @param offset offset in flash
+ * @return 0 if the block is still good
+ */
+int nand_torture(struct mtd_info *mtd, loff_t offset)
+{
+ u_char patterns[] = {0xa5, 0x5a, 0x00};
+ struct erase_info instr = {
+ .mtd = mtd,
+ .addr = offset,
+ .len = mtd->erasesize,
+ };
+ size_t retlen;
+ int err, ret = -1, i, patt_count;
+ u_char *buf;
+
+ if ((offset & (mtd->erasesize - 1)) != 0) {
+ puts("Attempt to torture a block at a non block-aligned offset\n");
+ return -EINVAL;
+ }
+
+ if (offset + mtd->erasesize > mtd->size) {
+ puts("Attempt to torture a block outside the flash area\n");
+ return -EINVAL;
+ }
+
+ patt_count = ARRAY_SIZE(patterns);
+
+ buf = malloc_cache_aligned(mtd->erasesize);
+ if (buf == NULL) {
+ puts("Out of memory for erase block buffer\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < patt_count; i++) {
+ err = mtd_erase(mtd, &instr);
+ if (err) {
+ printf("%s: erase() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ /* Make sure the block contains only 0xff bytes */
+ err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
+ if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
+ printf("%s: read() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ err = check_pattern(buf, 0xff, mtd->erasesize);
+ if (!err) {
+ printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
+ offset);
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Write a pattern and check it */
+ memset(buf, patterns[i], mtd->erasesize);
+ err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
+ if (err || retlen != mtd->erasesize) {
+ printf("%s: write() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
+ if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
+ printf("%s: read() failed for block at 0x%llx: %d\n",
+ mtd->name, instr.addr, err);
+ goto out;
+ }
+
+ err = check_pattern(buf, patterns[i], mtd->erasesize);
+ if (!err) {
+ printf("Pattern 0x%.2x checking failed for block at "
+ "0x%llx\n", patterns[i], offset);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ free(buf);
+ return ret;
+}
+
+#endif
diff --git a/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.c b/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.c
new file mode 100644
index 000000000..a41772880
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <dm/of_access.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <nand.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <time.h>
+#include <linux/bitfield.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/libfdt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/dma-mapping.h>
+#include <asm/arch/clock.h>
+#include "octeontx_bch.h"
+
+#ifdef DEBUG
+# undef CONFIG_LOGLEVEL
+# define CONFIG_LOGLEVEL 8
+#endif
+
+LIST_HEAD(octeontx_bch_devices);
+static unsigned int num_vfs = BCH_NR_VF;
+static void *bch_pf;
+static void *bch_vf;
+static void *token;
+static bool bch_pf_initialized;
+static bool bch_vf_initialized;
+
+static int pci_enable_sriov(struct udevice *dev, int nr_virtfn)
+{
+ int ret;
+
+ ret = pci_sriov_init(dev, nr_virtfn);
+ if (ret)
+ printf("%s(%s): pci_sriov_init returned %d\n", __func__,
+ dev->name, ret);
+ return ret;
+}
+
+void *octeontx_bch_getv(void)
+{
+ if (!bch_vf)
+ return NULL;
+ if (bch_vf_initialized && bch_pf_initialized)
+ return bch_vf;
+ else
+ return NULL;
+}
+
+void octeontx_bch_putv(void *token)
+{
+ bch_vf_initialized = !!token;
+ bch_vf = token;
+}
+
+void *octeontx_bch_getp(void)
+{
+ return token;
+}
+
+void octeontx_bch_putp(void *token)
+{
+ bch_pf = token;
+ bch_pf_initialized = !!token;
+}
+
+static int do_bch_init(struct bch_device *bch)
+{
+ return 0;
+}
+
+static void bch_reset(struct bch_device *bch)
+{
+ writeq(1, bch->reg_base + BCH_CTL);
+ mdelay(2);
+}
+
+static void bch_disable(struct bch_device *bch)
+{
+ writeq(~0ull, bch->reg_base + BCH_ERR_INT_ENA_W1C);
+ writeq(~0ull, bch->reg_base + BCH_ERR_INT);
+ bch_reset(bch);
+}
+
+static u32 bch_check_bist_status(struct bch_device *bch)
+{
+ return readq(bch->reg_base + BCH_BIST_RESULT);
+}
+
+static int bch_device_init(struct bch_device *bch)
+{
+ u64 bist;
+ int rc;
+
+ debug("%s: Resetting...\n", __func__);
+ /* Reset the PF when probed first */
+ bch_reset(bch);
+
+ debug("%s: Checking BIST...\n", __func__);
+ /* Check BIST status */
+ bist = (u64)bch_check_bist_status(bch);
+ if (bist) {
+ dev_err(dev, "BCH BIST failed with code 0x%llx\n", bist);
+ return -ENODEV;
+ }
+
+ /* Get max VQs/VFs supported by the device */
+
+ bch->max_vfs = pci_sriov_get_totalvfs(bch->dev);
+ debug("%s: %d vfs\n", __func__, bch->max_vfs);
+ if (num_vfs > bch->max_vfs) {
+ dev_warn(dev, "Num of VFs to enable %d is greater than max available. Enabling %d VFs.\n",
+ num_vfs, bch->max_vfs);
+ num_vfs = bch->max_vfs;
+ }
+ bch->vfs_enabled = bch->max_vfs;
+ /* Get number of VQs/VFs to be enabled */
+ /* TODO: Get CLK frequency */
+ /* Reset device parameters */
+
+ debug("%s: Doing initialization\n", __func__);
+ rc = do_bch_init(bch);
+
+ return rc;
+}
+
+static int bch_sriov_configure(struct udevice *dev, int numvfs)
+{
+ struct bch_device *bch = dev_get_priv(dev);
+ int ret = -EBUSY;
+
+ debug("%s(%s, %d), bch: %p, vfs_in_use: %d, enabled: %d\n", __func__,
+ dev->name, numvfs, bch, bch->vfs_in_use, bch->vfs_enabled);
+ if (bch->vfs_in_use)
+ goto exit;
+
+ ret = 0;
+
+ if (numvfs > 0) {
+ debug("%s: Enabling sriov\n", __func__);
+ ret = pci_enable_sriov(dev, numvfs);
+ if (ret == 0) {
+ bch->flags |= BCH_FLAG_SRIOV_ENABLED;
+ ret = numvfs;
+ bch->vfs_enabled = numvfs;
+ }
+ }
+
+ debug("VFs enabled: %d\n", ret);
+exit:
+ debug("%s: Returning %d\n", __func__, ret);
+ return ret;
+}
+
+static int octeontx_pci_bchpf_probe(struct udevice *dev)
+{
+ struct bch_device *bch;
+ int ret;
+
+ debug("%s(%s)\n", __func__, dev->name);
+ bch = dev_get_priv(dev);
+ if (!bch)
+ return -ENOMEM;
+
+ bch->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+ bch->dev = dev;
+
+ debug("%s: base address: %p\n", __func__, bch->reg_base);
+ ret = bch_device_init(bch);
+ if (ret) {
+ printf("%s(%s): init returned %d\n", __func__, dev->name, ret);
+ return ret;
+ }
+ INIT_LIST_HEAD(&bch->list);
+ list_add(&bch->list, &octeontx_bch_devices);
+ token = (void *)dev;
+
+ debug("%s: Configuring SRIOV\n", __func__);
+ bch_sriov_configure(dev, num_vfs);
+ debug("%s: Done.\n", __func__);
+ octeontx_bch_putp(bch);
+
+ return 0;
+}
+
+static const struct pci_device_id octeontx_bchpf_pci_id_table[] = {
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCH) },
+ {},
+};
+
+static const struct pci_device_id octeontx_bchvf_pci_id_table[] = {
+ { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BCHVF)},
+ {},
+};
+
+/**
+ * Given a data block calculate the ecc data and fill in the response
+ *
+ * @param[in] block 8-byte aligned pointer to data block to calculate ECC
+ * @param block_size Size of block in bytes, must be a multiple of two.
+ * @param bch_level Number of errors that must be corrected. The number of
+ * parity bytes is equal to ((15 * bch_level) + 7) / 8.
+ * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] ecc 8-byte aligned pointer to where ecc data should go
+ * @param[in] resp pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
+ u8 bch_level, dma_addr_t ecc, dma_addr_t resp)
+{
+ union bch_cmd cmd;
+ int rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.s.cword.ecc_gen = eg_gen;
+ cmd.s.cword.ecc_level = bch_level;
+ cmd.s.cword.size = block_size;
+
+ cmd.s.oword.ptr = ecc;
+ cmd.s.iword.ptr = block;
+ cmd.s.rword.ptr = resp;
+ rc = octeontx_cmd_queue_write(QID_BCH, 1,
+ sizeof(cmd) / sizeof(uint64_t), cmd.u);
+ if (rc)
+ return -1;
+
+ octeontx_bch_write_doorbell(1, vf);
+
+ return 0;
+}
+
+/**
+ * Given a data block and ecc data correct the data block
+ *
+ * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
+ * data concatenated to the end to correct
+ * @param block_size Size of block in bytes, must be a multiple of
+ * two.
+ * @param bch_level Number of errors that must be corrected. The
+ * number of parity bytes is equal to
+ * ((15 * bch_level) + 7) / 8.
+ * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] block_out 8-byte aligned pointer to corrected data buffer.
+ * This should not be the same as block_ecc_in.
+ * @param[in] resp pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+
+int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
+ u16 block_size, u8 bch_level,
+ dma_addr_t block_out, dma_addr_t resp)
+{
+ union bch_cmd cmd;
+ int rc;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.s.cword.ecc_gen = eg_correct;
+ cmd.s.cword.ecc_level = bch_level;
+ cmd.s.cword.size = block_size;
+
+ cmd.s.oword.ptr = block_out;
+ cmd.s.iword.ptr = block_ecc_in;
+ cmd.s.rword.ptr = resp;
+ rc = octeontx_cmd_queue_write(QID_BCH, 1,
+ sizeof(cmd) / sizeof(uint64_t), cmd.u);
+ if (rc)
+ return -1;
+
+ octeontx_bch_write_doorbell(1, vf);
+ return 0;
+}
+EXPORT_SYMBOL(octeontx_bch_decode);
+
+int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
+ dma_addr_t handle)
+{
+ ulong start = get_timer(0);
+
+ __iormb(); /* HW is updating *resp */
+ while (!resp->s.done && get_timer(start) < 10)
+ __iormb(); /* HW is updating *resp */
+
+ if (resp->s.done)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+struct bch_q octeontx_bch_q[QID_MAX];
+
+static int octeontx_cmd_queue_initialize(struct udevice *dev, int queue_id,
+ int max_depth, int fpa_pool,
+ int pool_size)
+{
+ /* some params are for later merge with CPT or cn83xx */
+ struct bch_q *q = &octeontx_bch_q[queue_id];
+ unsigned long paddr;
+ u64 *chunk_buffer;
+ int chunk = max_depth + 1;
+ int i, size;
+
+ if ((unsigned int)queue_id >= QID_MAX)
+ return -EINVAL;
+ if (max_depth & chunk) /* must be 2^N - 1 */
+ return -EINVAL;
+
+ size = NQS * chunk * sizeof(u64);
+ chunk_buffer = dma_alloc_coherent(size, &paddr);
+ if (!chunk_buffer)
+ return -ENOMEM;
+
+ q->base_paddr = paddr;
+ q->dev = dev;
+ q->index = 0;
+ q->max_depth = max_depth;
+ q->pool_size_m1 = pool_size;
+ q->base_vaddr = chunk_buffer;
+
+ for (i = 0; i < NQS; i++) {
+ u64 *ixp;
+ int inext = (i + 1) * chunk - 1;
+ int j = (i + 1) % NQS;
+ int jnext = j * chunk;
+ dma_addr_t jbase = q->base_paddr + jnext * sizeof(u64);
+
+ ixp = &chunk_buffer[inext];
+ *ixp = jbase;
+ }
+
+ return 0;
+}
+
+static int octeontx_pci_bchvf_probe(struct udevice *dev)
+{
+ struct bch_vf *vf;
+ union bch_vqx_ctl ctl;
+ union bch_vqx_cmd_buf cbuf;
+ int err;
+
+ debug("%s(%s)\n", __func__, dev->name);
+ vf = dev_get_priv(dev);
+ if (!vf)
+ return -ENOMEM;
+
+ vf->dev = dev;
+
+ /* Map PF's configuration registers */
+ vf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+ debug("%s: reg base: %p\n", __func__, vf->reg_base);
+
+ err = octeontx_cmd_queue_initialize(dev, QID_BCH, QDEPTH - 1, 0,
+ sizeof(union bch_cmd) * QDEPTH);
+ if (err) {
+ dev_err(dev, "octeontx_cmd_queue_initialize() failed\n");
+ goto release;
+ }
+
+ ctl.u = readq(vf->reg_base + BCH_VQX_CTL(0));
+
+ cbuf.u = 0;
+ cbuf.s.ldwb = 1;
+ cbuf.s.dfb = 1;
+ cbuf.s.size = QDEPTH;
+ writeq(cbuf.u, vf->reg_base + BCH_VQX_CMD_BUF(0));
+
+ writeq(ctl.u, vf->reg_base + BCH_VQX_CTL(0));
+
+ writeq(octeontx_bch_q[QID_BCH].base_paddr,
+ vf->reg_base + BCH_VQX_CMD_PTR(0));
+
+ octeontx_bch_putv(vf);
+
+ debug("%s: bch vf initialization complete\n", __func__);
+
+ if (octeontx_bch_getv())
+ return octeontx_pci_nand_deferred_probe();
+
+ return -1;
+
+release:
+ return err;
+}
+
+static int octeontx_pci_bchpf_remove(struct udevice *dev)
+{
+ struct bch_device *bch = dev_get_priv(dev);
+
+ bch_disable(bch);
+ return 0;
+}
+
+U_BOOT_DRIVER(octeontx_pci_bchpf) = {
+ .name = BCHPF_DRIVER_NAME,
+ .id = UCLASS_MISC,
+ .probe = octeontx_pci_bchpf_probe,
+ .remove = octeontx_pci_bchpf_remove,
+ .priv_auto = sizeof(struct bch_device),
+ .flags = DM_FLAG_OS_PREPARE,
+};
+
+U_BOOT_DRIVER(octeontx_pci_bchvf) = {
+ .name = BCHVF_DRIVER_NAME,
+ .id = UCLASS_MISC,
+ .probe = octeontx_pci_bchvf_probe,
+ .priv_auto = sizeof(struct bch_vf),
+};
+
+U_BOOT_PCI_DEVICE(octeontx_pci_bchpf, octeontx_bchpf_pci_id_table);
+U_BOOT_PCI_DEVICE(octeontx_pci_bchvf, octeontx_bchvf_pci_id_table);
diff --git a/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.h b/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.h
new file mode 100644
index 000000000..3aaa52c26
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __OCTEONTX_BCH_H__
+#define __OCTEONTX_BCH_H__
+
+#include "octeontx_bch_regs.h"
+
+/* flags to indicate the features supported */
+#define BCH_FLAG_SRIOV_ENABLED BIT(1)
+
+/*
+ * BCH Registers map for 81xx
+ */
+
+/* PF registers */
+#define BCH_CTL 0x0ull
+#define BCH_ERR_CFG 0x10ull
+#define BCH_BIST_RESULT 0x80ull
+#define BCH_ERR_INT 0x88ull
+#define BCH_ERR_INT_W1S 0x90ull
+#define BCH_ERR_INT_ENA_W1C 0xA0ull
+#define BCH_ERR_INT_ENA_W1S 0xA8ull
+
+/* VF registers */
+#define BCH_VQX_CTL(z) 0x0ull
+#define BCH_VQX_CMD_BUF(z) 0x8ull
+#define BCH_VQX_CMD_PTR(z) 0x20ull
+#define BCH_VQX_DOORBELL(z) 0x800ull
+
+#define BCHPF_DRIVER_NAME "octeontx-bchpf"
+#define BCHVF_DRIVER_NAME "octeontx-bchvf"
+
+struct bch_device {
+ struct list_head list;
+ u8 max_vfs;
+ u8 vfs_enabled;
+ u8 vfs_in_use;
+ u32 flags;
+ void __iomem *reg_base;
+ struct udevice *dev;
+};
+
+struct bch_vf {
+ u16 flags;
+ u8 vfid;
+ u8 node;
+ u8 priority;
+ struct udevice *dev;
+ void __iomem *reg_base;
+};
+
+struct buf_ptr {
+ u8 *vptr;
+ dma_addr_t dma_addr;
+ u16 size;
+};
+
+void *octeontx_bch_getv(void);
+void octeontx_bch_putv(void *token);
+void *octeontx_bch_getp(void);
+void octeontx_bch_putp(void *token);
+int octeontx_bch_wait(struct bch_vf *vf, union bch_resp *resp,
+ dma_addr_t handle);
+/**
+ * Given a data block calculate the ecc data and fill in the response
+ *
+ * @param[in] block 8-byte aligned pointer to data block to calculate ECC
+ * @param block_size Size of block in bytes, must be a multiple of two.
+ * @param bch_level Number of errors that must be corrected. The number of
+ * parity bytes is equal to ((15 * bch_level) + 7) / 8.
+ * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] ecc 8-byte aligned pointer to where ecc data should go
+ * @param[in] resp pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeontx_bch_encode(struct bch_vf *vf, dma_addr_t block, u16 block_size,
+ u8 bch_level, dma_addr_t ecc, dma_addr_t resp);
+
+/**
+ * Given a data block and ecc data correct the data block
+ *
+ * @param[in] block_ecc_in 8-byte aligned pointer to data block with ECC
+ * data concatenated to the end to correct
+ * @param block_size Size of block in bytes, must be a multiple of
+ * two.
+ * @param bch_level Number of errors that must be corrected. The
+ * number of parity bytes is equal to
+ * ((15 * bch_level) + 7) / 8.
+ * Must be 4, 8, 16, 24, 32, 40, 48, 56, 60 or 64.
+ * @param[out] block_out 8-byte aligned pointer to corrected data buffer.
+ * This should not be the same as block_ecc_in.
+ * @param[in] resp pointer to where responses will be written.
+ *
+ * @return Zero on success, negative on failure.
+ */
+
+int octeontx_bch_decode(struct bch_vf *vf, dma_addr_t block_ecc_in,
+ u16 block_size, u8 bch_level,
+ dma_addr_t block_out, dma_addr_t resp);
+
+/**
+ * Ring the BCH doorbell telling it that new commands are
+ * available.
+ *
+ * @param num_commands Number of new commands
+ * @param vf virtual function handle
+ */
+static inline void octeontx_bch_write_doorbell(u64 num_commands,
+ struct bch_vf *vf)
+{
+ u64 num_words = num_commands * sizeof(union bch_cmd) / sizeof(uint64_t);
+
+ writeq(num_words, vf->reg_base + BCH_VQX_DOORBELL(0));
+}
+
+/**
+ * Since it's possible (and even likely) that the NAND device will be probed
+ * before the BCH device has been probed, we may need to defer the probing.
+ *
+ * In this case, the initial probe returns success but the actual probing
+ * is deferred until the BCH VF has been probed.
+ *
+ * @return 0 for success, otherwise error
+ */
+int octeontx_pci_nand_deferred_probe(void);
+
+#endif /* __OCTEONTX_BCH_H__ */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch_regs.h b/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch_regs.h
new file mode 100644
index 000000000..7d34438fe
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/octeontx_bch_regs.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#ifndef __OCTEONTX_BCH_REGS_H__
+#define __OCTEONTX_BCH_REGS_H__
+
+#define BCH_NR_VF 1
+
+union bch_cmd {
+ u64 u[4];
+ struct fields {
+ struct {
+ u64 size:12;
+ u64 reserved_12_31:20;
+ u64 ecc_level:4;
+ u64 reserved_36_61:26;
+ u64 ecc_gen:2;
+ } cword;
+ struct {
+ u64 ptr:49;
+ u64 reserved_49_55:7;
+ u64 nc:1;
+ u64 fw:1;
+ u64 reserved_58_63:6;
+ } oword;
+ struct {
+ u64 ptr:49;
+ u64 reserved_49_55:7;
+ u64 nc:1;
+ u64 reserved_57_63:7;
+ } iword;
+ struct {
+ u64 ptr:49;
+ u64 reserved_49_63:15;
+ } rword;
+ } s;
+};
+
+enum ecc_gen {
+ eg_correct,
+ eg_copy,
+ eg_gen,
+ eg_copy3,
+};
+
+/** Response from BCH instruction */
+union bch_resp {
+ u16 u16;
+ struct {
+ u16 num_errors:7; /** Number of errors in block */
+ u16 zero:6; /** Always zero, ignore */
+ u16 erased:1; /** Block is erased */
+ u16 uncorrectable:1;/** too many bits flipped */
+ u16 done:1; /** Block is done */
+ } s;
+};
+
+union bch_vqx_ctl {
+ u64 u;
+ struct {
+ u64 reserved_0:1;
+ u64 cmd_be:1;
+ u64 max_read:4;
+ u64 reserved_6_15:10;
+ u64 erase_disable:1;
+ u64 one_cmd:1;
+ u64 early_term:4;
+ u64 reserved_22_63:42;
+ } s;
+};
+
+union bch_vqx_cmd_buf {
+ u64 u;
+ struct {
+ u64 reserved_0_32:33;
+ u64 size:13;
+ u64 dfb:1;
+ u64 ldwb:1;
+ u64 reserved_48_63:16;
+ } s;
+};
+
+/* keep queue state indexed, even though just one supported here,
+ * for later generalization to similarly-shaped queues on other Cavium devices
+ */
+enum {
+ QID_BCH,
+ QID_MAX
+};
+
+struct bch_q {
+ struct udevice *dev;
+ int index;
+ u16 max_depth;
+ u16 pool_size_m1;
+ u64 *base_vaddr;
+ dma_addr_t base_paddr;
+};
+
+extern struct bch_q octeontx_bch_q[QID_MAX];
+
+/* with one dma-mapped area, virt<->phys conversions by +/- (vaddr-paddr) */
+static inline dma_addr_t qphys(int qid, void *v)
+{
+ struct bch_q *q = &octeontx_bch_q[qid];
+ int off = (u8 *)v - (u8 *)q->base_vaddr;
+
+ return q->base_paddr + off;
+}
+
+#define octeontx_ptr_to_phys(v) qphys(QID_BCH, (v))
+
+static inline void *qvirt(int qid, dma_addr_t p)
+{
+ struct bch_q *q = &octeontx_bch_q[qid];
+ int off = p - q->base_paddr;
+
+ return q->base_vaddr + off;
+}
+
+#define octeontx_phys_to_ptr(p) qvirt(QID_BCH, (p))
+
+/* plenty for interleaved r/w on two planes with 16k page, ecc_size 1k */
+/* QDEPTH >= 16, as successive chunks must align on 128-byte boundaries */
+#define QDEPTH 256 /* u64s in a command queue chunk, incl next-pointer */
+#define NQS 1 /* linked chunks in the chain */
+
+/**
+ * Write an arbitrary number of command words to a command queue.
+ * This is a generic function; the fixed number of command word
+ * functions yield higher performance.
+ *
+ * Could merge with crypto version for FPA use on cn83xx
+ */
+static inline int octeontx_cmd_queue_write(int queue_id, bool use_locking,
+ int cmd_count, const u64 *cmds)
+{
+ int ret = 0;
+ u64 *cmd_ptr;
+ struct bch_q *qptr = &octeontx_bch_q[queue_id];
+
+ if (unlikely(cmd_count < 1 || cmd_count > 32))
+ return -EINVAL;
+ if (unlikely(!cmds))
+ return -EINVAL;
+
+ cmd_ptr = qptr->base_vaddr;
+
+ while (cmd_count > 0) {
+ int slot = qptr->index % (QDEPTH * NQS);
+
+ if (slot % QDEPTH != QDEPTH - 1) {
+ cmd_ptr[slot] = *cmds++;
+ cmd_count--;
+ }
+
+ qptr->index++;
+ }
+
+ __iowmb(); /* flush commands before ringing bell */
+
+ return ret;
+}
+
+#endif /* __OCTEONTX_BCH_REGS_H__ */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/octeontx_nand.c b/roms/u-boot/drivers/mtd/nand/raw/octeontx_nand.c
new file mode 100644
index 000000000..e0ccc7b0d
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/octeontx_nand.c
@@ -0,0 +1,2258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <dm.h>
+#include <dm/device-internal.h>
+#include <dm/devres.h>
+#include <dm/of_access.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <nand.h>
+#include <pci.h>
+#include <time.h>
+#include <linux/bitfield.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ioport.h>
+#include <linux/libfdt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/types.h>
+#include <asm/dma-mapping.h>
+#include <asm/arch/clock.h>
+#include "octeontx_bch.h"
+
+#ifdef DEBUG
+# undef CONFIG_LOGLEVEL
+# define CONFIG_LOGLEVEL 8
+#endif
+
+/*
+ * The NDF_CMD queue takes commands between 16 - 128 bit.
+ * All commands must be 16 bit aligned and are little endian.
+ * WAIT_STATUS commands must be 64 bit aligned.
+ * Commands are selected by the 4 bit opcode.
+ *
+ * Available Commands:
+ *
+ * 16 Bit:
+ * NOP
+ * WAIT
+ * BUS_ACQ, BUS_REL
+ * CHIP_EN, CHIP_DIS
+ *
+ * 32 Bit:
+ * CLE_CMD
+ * RD_CMD, RD_EDO_CMD
+ * WR_CMD
+ *
+ * 64 Bit:
+ * SET_TM_PAR
+ *
+ * 96 Bit:
+ * ALE_CMD
+ *
+ * 128 Bit:
+ * WAIT_STATUS, WAIT_STATUS_ALE
+ */
+
+/* NDF Register offsets */
+#define NDF_CMD 0x0
+#define NDF_MISC 0x8
+#define NDF_ECC_CNT 0x10
+#define NDF_DRBELL 0x30
+#define NDF_ST_REG 0x38 /* status */
+#define NDF_INT 0x40
+#define NDF_INT_W1S 0x48
+#define NDF_DMA_CFG 0x50
+#define NDF_DMA_ADR 0x58
+#define NDF_INT_ENA_W1C 0x60
+#define NDF_INT_ENA_W1S 0x68
+
+/* NDF command opcodes */
+#define NDF_OP_NOP 0x0
+#define NDF_OP_SET_TM_PAR 0x1
+#define NDF_OP_WAIT 0x2
+#define NDF_OP_CHIP_EN_DIS 0x3
+#define NDF_OP_CLE_CMD 0x4
+#define NDF_OP_ALE_CMD 0x5
+#define NDF_OP_WR_CMD 0x8
+#define NDF_OP_RD_CMD 0x9
+#define NDF_OP_RD_EDO_CMD 0xa
+#define NDF_OP_WAIT_STATUS 0xb /* same opcode for WAIT_STATUS_ALE */
+#define NDF_OP_BUS_ACQ_REL 0xf
+
+#define NDF_BUS_ACQUIRE 1
+#define NDF_BUS_RELEASE 0
+
+#define DBGX_EDSCR(X) (0x87A008000088 + (X) * 0x80000)
+
+struct ndf_nop_cmd {
+ u16 opcode: 4;
+ u16 nop: 12;
+};
+
+struct ndf_wait_cmd {
+ u16 opcode:4;
+ u16 r_b:1; /* wait for one cycle or PBUS_WAIT deassert */
+ u16:3;
+ u16 wlen:3; /* timing parameter select */
+ u16:5;
+};
+
+struct ndf_bus_cmd {
+ u16 opcode:4;
+ u16 direction:4; /* 1 = acquire, 0 = release */
+ u16:8;
+};
+
+struct ndf_chip_cmd {
+ u16 opcode:4;
+ u16 chip:3; /* select chip, 0 = disable */
+ u16 enable:1; /* 1 = enable, 0 = disable */
+ u16 bus_width:2; /* 10 = 16 bit, 01 = 8 bit */
+ u16:6;
+};
+
+struct ndf_cle_cmd {
+ u32 opcode:4;
+ u32:4;
+ u32 cmd_data:8; /* command sent to the PBUS AD pins */
+ u32 clen1:3; /* time between PBUS CLE and WE asserts */
+ u32 clen2:3; /* time WE remains asserted */
+ u32 clen3:3; /* time between WE deassert and CLE */
+ u32:7;
+};
+
+/* RD_EDO_CMD uses the same layout as RD_CMD */
+struct ndf_rd_cmd {
+ u32 opcode:4;
+ u32 data:16; /* data bytes */
+ u32 rlen1:3;
+ u32 rlen2:3;
+ u32 rlen3:3;
+ u32 rlen4:3;
+};
+
+struct ndf_wr_cmd {
+ u32 opcode:4;
+ u32 data:16; /* data bytes */
+ u32:4;
+ u32 wlen1:3;
+ u32 wlen2:3;
+ u32:3;
+};
+
+struct ndf_set_tm_par_cmd {
+ u64 opcode:4;
+ u64 tim_mult:4; /* multiplier for the seven parameters */
+ u64 tm_par1:8; /* --> Following are the 7 timing parameters that */
+ u64 tm_par2:8; /* specify the number of coprocessor cycles. */
+ u64 tm_par3:8; /* A value of zero means one cycle. */
+ u64 tm_par4:8; /* All values are scaled by tim_mult */
+ u64 tm_par5:8; /* using tim_par * (2 ^ tim_mult). */
+ u64 tm_par6:8;
+ u64 tm_par7:8;
+};
+
+struct ndf_ale_cmd {
+ u32 opcode:4;
+ u32:4;
+ u32 adr_byte_num:4; /* number of address bytes to be sent */
+ u32:4;
+ u32 alen1:3;
+ u32 alen2:3;
+ u32 alen3:3;
+ u32 alen4:3;
+ u32:4;
+ u8 adr_byt1;
+ u8 adr_byt2;
+ u8 adr_byt3;
+ u8 adr_byt4;
+ u8 adr_byt5;
+ u8 adr_byt6;
+ u8 adr_byt7;
+ u8 adr_byt8;
+};
+
+struct ndf_wait_status_cmd {
+ u32 opcode:4;
+ u32:4;
+ u32 data:8; /** data */
+ u32 clen1:3;
+ u32 clen2:3;
+ u32 clen3:3;
+ u32:8;
+ /** set to 5 to select WAIT_STATUS_ALE command */
+ u32 ale_ind:8;
+ /** ALE only: number of address bytes to be sent */
+ u32 adr_byte_num:4;
+ u32:4;
+ u32 alen1:3; /* ALE only */
+ u32 alen2:3; /* ALE only */
+ u32 alen3:3; /* ALE only */
+ u32 alen4:3; /* ALE only */
+ u32:4;
+ u8 adr_byt[4]; /* ALE only */
+ u32 nine:4; /* set to 9 */
+ u32 and_mask:8;
+ u32 comp_byte:8;
+ u32 rlen1:3;
+ u32 rlen2:3;
+ u32 rlen3:3;
+ u32 rlen4:3;
+};
+
+union ndf_cmd {
+ u64 val[2];
+ union {
+ struct ndf_nop_cmd nop;
+ struct ndf_wait_cmd wait;
+ struct ndf_bus_cmd bus_acq_rel;
+ struct ndf_chip_cmd chip_en_dis;
+ struct ndf_cle_cmd cle_cmd;
+ struct ndf_rd_cmd rd_cmd;
+ struct ndf_wr_cmd wr_cmd;
+ struct ndf_set_tm_par_cmd set_tm_par;
+ struct ndf_ale_cmd ale_cmd;
+ struct ndf_wait_status_cmd wait_status;
+ } u;
+};
+
+/** Disable multi-bit error hangs */
+#define NDF_MISC_MB_DIS BIT_ULL(27)
+/** High watermark for NBR FIFO or load/store operations */
+#define NDF_MISC_NBR_HWM GENMASK_ULL(26, 24)
+/** Wait input filter count */
+#define NDF_MISC_WAIT_CNT GENMASK_ULL(23, 18)
+/** Unfilled NFD_CMD queue bytes */
+#define NDF_MISC_FR_BYTE GENMASK_ULL(17, 7)
+/** Set by HW when it reads the last 8 bytes of NDF_CMD */
+#define NDF_MISC_RD_DONE BIT_ULL(6)
+/** Set by HW when it reads. SW read of NDF_CMD clears it */
+#define NDF_MISC_RD_VAL BIT_ULL(5)
+/** Let HW read NDF_CMD queue. Cleared on SW NDF_CMD write */
+#define NDF_MISC_RD_CMD BIT_ULL(4)
+/** Boot disable */
+#define NDF_MISC_BT_DIS BIT_ULL(2)
+/** Stop command execution after completing command queue */
+#define NDF_MISC_EX_DIS BIT_ULL(1)
+/** Reset fifo */
+#define NDF_MISC_RST_FF BIT_ULL(0)
+
+/** DMA engine enable */
+#define NDF_DMA_CFG_EN BIT_ULL(63)
+/** Read or write */
+#define NDF_DMA_CFG_RW BIT_ULL(62)
+/** Terminates DMA and clears enable bit */
+#define NDF_DMA_CFG_CLR BIT_ULL(61)
+/** 32-bit swap enable */
+#define NDF_DMA_CFG_SWAP32 BIT_ULL(59)
+/** 16-bit swap enable */
+#define NDF_DMA_CFG_SWAP16 BIT_ULL(58)
+/** 8-bit swap enable */
+#define NDF_DMA_CFG_SWAP8 BIT_ULL(57)
+/** Endian mode */
+#define NDF_DMA_CFG_CMD_BE BIT_ULL(56)
+/** Number of 64 bit transfers */
+#define NDF_DMA_CFG_SIZE GENMASK_ULL(55, 36)
+
+/** Command execution status idle */
+#define NDF_ST_REG_EXE_IDLE BIT_ULL(15)
+/** Command execution SM states */
+#define NDF_ST_REG_EXE_SM GENMASK_ULL(14, 11)
+/** DMA and load SM states */
+#define NDF_ST_REG_BT_SM GENMASK_ULL(10, 7)
+/** Queue read-back SM bad state */
+#define NDF_ST_REG_RD_FF_BAD BIT_ULL(6)
+/** Queue read-back SM states */
+#define NDF_ST_REG_RD_FF GENMASK_ULL(5, 4)
+/** Main SM is in a bad state */
+#define NDF_ST_REG_MAIN_BAD BIT_ULL(3)
+/** Main SM states */
+#define NDF_ST_REG_MAIN_SM GENMASK_ULL(2, 0)
+
+#define MAX_NAND_NAME_LEN 64
+#if (defined(NAND_MAX_PAGESIZE) && (NAND_MAX_PAGESIZE > 4096)) || \
+ !defined(NAND_MAX_PAGESIZE)
+# undef NAND_MAX_PAGESIZE
+# define NAND_MAX_PAGESIZE 4096
+#endif
+#if (defined(NAND_MAX_OOBSIZE) && (NAND_MAX_OOBSIZE > 256)) || \
+ !defined(NAND_MAX_OOBSIZE)
+# undef NAND_MAX_OOBSIZE
+# define NAND_MAX_OOBSIZE 256
+#endif
+
+#define OCTEONTX_NAND_DRIVER_NAME "octeontx_nand"
+
+#define NDF_TIMEOUT 1000 /** Timeout in ms */
+#define USEC_PER_SEC 1000000 /** Linux compatibility */
+#ifndef NAND_MAX_CHIPS
+# define NAND_MAX_CHIPS 8 /** Linux compatibility */
+#endif
+
+struct octeontx_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ struct ndf_set_tm_par_cmd timings;
+ int cs;
+ int selected_page;
+ int iface_mode;
+ int row_bytes;
+ int col_bytes;
+ bool oob_only;
+ bool iface_set;
+};
+
+struct octeontx_nand_buf {
+ u8 *dmabuf;
+ dma_addr_t dmaaddr;
+ int dmabuflen;
+ int data_len;
+ int data_index;
+};
+
+/** NAND flash controller (NDF) related information */
+struct octeontx_nfc {
+ struct nand_hw_control controller;
+ struct udevice *dev;
+ void __iomem *base;
+ struct list_head chips;
+ int selected_chip; /* Currently selected NAND chip number */
+
+ /*
+ * Status is separate from octeontx_nand_buf because
+ * it can be used in parallel and during init.
+ */
+ u8 *stat;
+ dma_addr_t stat_addr;
+ bool use_status;
+
+ struct octeontx_nand_buf buf;
+ union bch_resp *bch_resp;
+ dma_addr_t bch_rhandle;
+
+ /* BCH of all-0xff, so erased pages read as error-free */
+ unsigned char *eccmask;
+};
+
+/* settable timings - 0..7 select timing of alen1..4/clen1..3/etc */
+enum tm_idx {
+ t0, /* fixed at 4<<mult cycles */
+ t1, t2, t3, t4, t5, t6, t7, /* settable per ONFI-timing mode */
+};
+
+struct octeontx_probe_device {
+ struct list_head list;
+ struct udevice *dev;
+};
+
+static struct bch_vf *bch_vf;
+/** Deferred devices due to BCH not being ready */
+LIST_HEAD(octeontx_pci_nand_deferred_devices);
+
+/** default parameters used for probing chips */
+#define MAX_ONFI_MODE 5
+
+static int default_onfi_timing;
+static int slew_ns = 2; /* default timing padding */
+static int def_ecc_size = 512; /* 1024 best for sw_bch, <= 4095 for hw_bch */
+static int default_width = 1; /* 8 bit */
+static int default_page_size = 2048;
+static struct ndf_set_tm_par_cmd default_timing_parms;
+
+/** Port from Linux */
+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+({ \
+ ulong __start = get_timer(0); \
+ void *__addr = (addr); \
+ const ulong __timeout_ms = timeout_us / 1000; \
+ do { \
+ (val) = readq(__addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && get_timer(__start) > __timeout_ms) { \
+ (val) = readq(__addr); \
+ break; \
+ } \
+ if (delay_us) \
+ udelay(delay_us); \
+ } while (1); \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/** Ported from Linux 4.9.0 include/linux/of.h for compatibility */
+static inline int of_get_child_count(const ofnode node)
+{
+ return fdtdec_get_child_count(gd->fdt_blob, ofnode_to_offset(node));
+}
+
+/**
+ * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
+ */
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+/**
+ * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
+ */
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 2;
+ oobregion->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+ .ecc = nand_ooblayout_ecc_lp,
+ .rfree = nand_ooblayout_free_lp,
+};
+
+static inline struct octeontx_nand_chip *to_otx_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct octeontx_nand_chip, nand);
+}
+
+static inline struct octeontx_nfc *to_otx_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct octeontx_nfc, controller);
+}
+
+static int octeontx_nand_calc_ecc_layout(struct nand_chip *nand)
+{
+ struct nand_ecclayout *layout = nand->ecc.layout;
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ struct mtd_info *mtd = &nand->mtd;
+ int oobsize = mtd->oobsize;
+ int i;
+ bool layout_alloc = false;
+
+ if (!layout) {
+ layout = devm_kzalloc(tn->dev, sizeof(*layout), GFP_KERNEL);
+ if (!layout)
+ return -ENOMEM;
+ nand->ecc.layout = layout;
+ layout_alloc = true;
+ }
+ layout->eccbytes = nand->ecc.steps * nand->ecc.bytes;
+ /* Reserve 2 bytes for bad block marker */
+ if (layout->eccbytes + 2 > oobsize) {
+ pr_err("No suitable oob scheme available for oobsize %d eccbytes %u\n",
+ oobsize, layout->eccbytes);
+ goto fail;
+ }
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = oobsize - layout->eccbytes + i;
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = oobsize - 2 - layout->eccbytes;
+ nand->ecc.layout = layout;
+ return 0;
+
+fail:
+ if (layout_alloc)
+ kfree(layout);
+ return -1;
+}
+
+/*
+ * Read a single byte from the temporary buffer. Used after READID
+ * to get the NAND information and for STATUS.
+ */
+static u8 octeontx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+
+ if (tn->use_status) {
+ tn->use_status = false;
+ return *tn->stat;
+ }
+
+ if (tn->buf.data_index < tn->buf.data_len)
+ return tn->buf.dmabuf[tn->buf.data_index++];
+
+ dev_err(tn->dev, "No data to read, idx: 0x%x, len: 0x%x\n",
+ tn->buf.data_index, tn->buf.data_len);
+
+ return 0xff;
+}
+
+/*
+ * Read a number of pending bytes from the temporary buffer. Used
+ * to get page and OOB data.
+ */
+static void octeontx_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+
+ if (len > tn->buf.data_len - tn->buf.data_index) {
+ dev_err(tn->dev, "Not enough data for read of %d bytes\n", len);
+ return;
+ }
+
+ memcpy(buf, tn->buf.dmabuf + tn->buf.data_index, len);
+ tn->buf.data_index += len;
+}
+
+static void octeontx_nand_write_buf(struct mtd_info *mtd,
+ const u8 *buf, int len)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+
+ memcpy(tn->buf.dmabuf + tn->buf.data_len, buf, len);
+ tn->buf.data_len += len;
+}
+
+/* Overwrite default function to avoid sync abort on chip = -1. */
+static void octeontx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+static inline int timing_to_cycle(u32 psec, unsigned long clock)
+{
+ unsigned int ns;
+ int ticks;
+
+ ns = DIV_ROUND_UP(psec, 1000);
+ ns += slew_ns;
+
+ /* no rounding needed since clock is multiple of 1MHz */
+ clock /= 1000000;
+ ns *= clock;
+
+ ticks = DIV_ROUND_UP(ns, 1000);
+
+ /* actual delay is (tm_parX+1)<<tim_mult */
+ if (ticks)
+ ticks--;
+
+ return ticks;
+}
+
+static void set_timings(struct octeontx_nand_chip *chip,
+ struct ndf_set_tm_par_cmd *tp,
+ const struct nand_sdr_timings *timings,
+ unsigned long sclk)
+{
+ /* scaled coprocessor-cycle values */
+ u32 s_wh, s_cls, s_clh, s_rp, s_wb, s_wc;
+
+ tp->tim_mult = 0;
+ s_wh = timing_to_cycle(timings->tWH_min, sclk);
+ s_cls = timing_to_cycle(timings->tCLS_min, sclk);
+ s_clh = timing_to_cycle(timings->tCLH_min, sclk);
+ s_rp = timing_to_cycle(timings->tRP_min, sclk);
+ s_wb = timing_to_cycle(timings->tWB_max, sclk);
+ s_wc = timing_to_cycle(timings->tWC_min, sclk);
+
+ tp->tm_par1 = s_wh;
+ tp->tm_par2 = s_clh;
+ tp->tm_par3 = s_rp + 1;
+ tp->tm_par4 = s_cls - s_wh;
+ tp->tm_par5 = s_wc - s_wh + 1;
+ tp->tm_par6 = s_wb;
+ tp->tm_par7 = 0;
+ tp->tim_mult++; /* overcompensate for bad math */
+
+ /* TODO: comment parameter re-use */
+
+ pr_debug("%s: tim_par: mult: %d p1: %d p2: %d p3: %d\n",
+ __func__, tp->tim_mult, tp->tm_par1, tp->tm_par2, tp->tm_par3);
+ pr_debug(" p4: %d p5: %d p6: %d p7: %d\n",
+ tp->tm_par4, tp->tm_par5, tp->tm_par6, tp->tm_par7);
+}
+
+static int set_default_timings(struct octeontx_nfc *tn,
+ const struct nand_sdr_timings *timings)
+{
+ unsigned long sclk = octeontx_get_io_clock();
+
+ set_timings(NULL, &default_timing_parms, timings, sclk);
+ return 0;
+}
+
+static int octeontx_nfc_chip_set_timings(struct octeontx_nand_chip *chip,
+ const struct nand_sdr_timings *timings)
+{
+ /*struct octeontx_nfc *tn = to_otx_nfc(chip->nand.controller);*/
+ unsigned long sclk = octeontx_get_io_clock();
+
+ set_timings(chip, &chip->timings, timings, sclk);
+ return 0;
+}
+
+/* How many bytes are free in the NFD_CMD queue? */
+static int ndf_cmd_queue_free(struct octeontx_nfc *tn)
+{
+ u64 ndf_misc;
+
+ ndf_misc = readq(tn->base + NDF_MISC);
+ return FIELD_GET(NDF_MISC_FR_BYTE, ndf_misc);
+}
+
+/* Submit a command to the NAND command queue. */
+static int ndf_submit(struct octeontx_nfc *tn, union ndf_cmd *cmd)
+{
+ int opcode = cmd->val[0] & 0xf;
+
+ switch (opcode) {
+ /* All these commands fit in one 64bit word */
+ case NDF_OP_NOP:
+ case NDF_OP_SET_TM_PAR:
+ case NDF_OP_WAIT:
+ case NDF_OP_CHIP_EN_DIS:
+ case NDF_OP_CLE_CMD:
+ case NDF_OP_WR_CMD:
+ case NDF_OP_RD_CMD:
+ case NDF_OP_RD_EDO_CMD:
+ case NDF_OP_BUS_ACQ_REL:
+ if (ndf_cmd_queue_free(tn) < 8)
+ goto full;
+ writeq(cmd->val[0], tn->base + NDF_CMD);
+ break;
+ case NDF_OP_ALE_CMD:
+ /* ALE commands take either one or two 64bit words */
+ if (cmd->u.ale_cmd.adr_byte_num < 5) {
+ if (ndf_cmd_queue_free(tn) < 8)
+ goto full;
+ writeq(cmd->val[0], tn->base + NDF_CMD);
+ } else {
+ if (ndf_cmd_queue_free(tn) < 16)
+ goto full;
+ writeq(cmd->val[0], tn->base + NDF_CMD);
+ writeq(cmd->val[1], tn->base + NDF_CMD);
+ }
+ break;
+ case NDF_OP_WAIT_STATUS: /* Wait status commands take two 64bit words */
+ if (ndf_cmd_queue_free(tn) < 16)
+ goto full;
+ writeq(cmd->val[0], tn->base + NDF_CMD);
+ writeq(cmd->val[1], tn->base + NDF_CMD);
+ break;
+ default:
+ dev_err(tn->dev, "%s: unknown command: %u\n", __func__, opcode);
+ return -EINVAL;
+ }
+ return 0;
+
+full:
+ dev_err(tn->dev, "%s: no space left in command queue\n", __func__);
+ return -ENOMEM;
+}
+
+/**
+ * Wait for the ready/busy signal. First wait for busy to be valid,
+ * then wait for busy to de-assert.
+ */
+static int ndf_build_wait_busy(struct octeontx_nfc *tn)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.wait.opcode = NDF_OP_WAIT;
+ cmd.u.wait.r_b = 1;
+ cmd.u.wait.wlen = t6;
+
+ if (ndf_submit(tn, &cmd))
+ return -ENOMEM;
+ return 0;
+}
+
+static bool ndf_dma_done(struct octeontx_nfc *tn)
+{
+ u64 dma_cfg;
+
+ /* Enable bit should be clear after a transfer */
+ dma_cfg = readq(tn->base + NDF_DMA_CFG);
+ if (!(dma_cfg & NDF_DMA_CFG_EN))
+ return true;
+
+ return false;
+}
+
+static int ndf_wait(struct octeontx_nfc *tn)
+{
+ ulong start = get_timer(0);
+ bool done;
+
+ while (!(done = ndf_dma_done(tn)) && get_timer(start) < NDF_TIMEOUT)
+ ;
+
+ if (!done) {
+ dev_err(tn->dev, "%s: timeout error\n", __func__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int ndf_wait_idle(struct octeontx_nfc *tn)
+{
+ u64 val;
+ u64 dval = 0;
+ int rc;
+ int pause = 100;
+ u64 tot_us = USEC_PER_SEC / 10;
+
+ rc = readq_poll_timeout(tn->base + NDF_ST_REG,
+ val, val & NDF_ST_REG_EXE_IDLE, pause, tot_us);
+ if (!rc)
+ rc = readq_poll_timeout(tn->base + NDF_DMA_CFG,
+ dval, !(dval & NDF_DMA_CFG_EN),
+ pause, tot_us);
+
+ return rc;
+}
+
+/** Issue set timing parameters */
+static int ndf_queue_cmd_timing(struct octeontx_nfc *tn,
+ struct ndf_set_tm_par_cmd *timings)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.set_tm_par.opcode = NDF_OP_SET_TM_PAR;
+ cmd.u.set_tm_par.tim_mult = timings->tim_mult;
+ cmd.u.set_tm_par.tm_par1 = timings->tm_par1;
+ cmd.u.set_tm_par.tm_par2 = timings->tm_par2;
+ cmd.u.set_tm_par.tm_par3 = timings->tm_par3;
+ cmd.u.set_tm_par.tm_par4 = timings->tm_par4;
+ cmd.u.set_tm_par.tm_par5 = timings->tm_par5;
+ cmd.u.set_tm_par.tm_par6 = timings->tm_par6;
+ cmd.u.set_tm_par.tm_par7 = timings->tm_par7;
+ return ndf_submit(tn, &cmd);
+}
+
+/** Issue bus acquire or release */
+static int ndf_queue_cmd_bus(struct octeontx_nfc *tn, int direction)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.bus_acq_rel.opcode = NDF_OP_BUS_ACQ_REL;
+ cmd.u.bus_acq_rel.direction = direction;
+ return ndf_submit(tn, &cmd);
+}
+
+/* Issue chip select or deselect */
+static int ndf_queue_cmd_chip(struct octeontx_nfc *tn, int enable, int chip,
+ int width)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.chip_en_dis.opcode = NDF_OP_CHIP_EN_DIS;
+ cmd.u.chip_en_dis.chip = chip;
+ cmd.u.chip_en_dis.enable = enable;
+ cmd.u.chip_en_dis.bus_width = width;
+ return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_wait(struct octeontx_nfc *tn, int t_delay)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.wait.opcode = NDF_OP_WAIT;
+ cmd.u.wait.wlen = t_delay;
+ return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_cle(struct octeontx_nfc *tn, int command)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.cle_cmd.opcode = NDF_OP_CLE_CMD;
+ cmd.u.cle_cmd.cmd_data = command;
+ cmd.u.cle_cmd.clen1 = t4;
+ cmd.u.cle_cmd.clen2 = t1;
+ cmd.u.cle_cmd.clen3 = t2;
+ return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_ale(struct octeontx_nfc *tn, int addr_bytes,
+ struct nand_chip *nand, u64 page,
+ u32 col, int page_size)
+{
+ struct octeontx_nand_chip *octeontx_nand = (nand) ?
+ to_otx_nand(nand) : NULL;
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.ale_cmd.opcode = NDF_OP_ALE_CMD;
+ cmd.u.ale_cmd.adr_byte_num = addr_bytes;
+
+ /* set column bit for OOB area, assume OOB follows page */
+ if (octeontx_nand && octeontx_nand->oob_only)
+ col += page_size;
+
+ /* page is u64 for this generality, even if cmdfunc() passes int */
+ switch (addr_bytes) {
+ /* 4-8 bytes: page, then 2-byte col */
+ case 8:
+ cmd.u.ale_cmd.adr_byt8 = (page >> 40) & 0xff;
+ fallthrough;
+ case 7:
+ cmd.u.ale_cmd.adr_byt7 = (page >> 32) & 0xff;
+ fallthrough;
+ case 6:
+ cmd.u.ale_cmd.adr_byt6 = (page >> 24) & 0xff;
+ fallthrough;
+ case 5:
+ cmd.u.ale_cmd.adr_byt5 = (page >> 16) & 0xff;
+ fallthrough;
+ case 4:
+ cmd.u.ale_cmd.adr_byt4 = (page >> 8) & 0xff;
+ cmd.u.ale_cmd.adr_byt3 = page & 0xff;
+ cmd.u.ale_cmd.adr_byt2 = (col >> 8) & 0xff;
+ cmd.u.ale_cmd.adr_byt1 = col & 0xff;
+ break;
+ /* 1-3 bytes: just the page address */
+ case 3:
+ cmd.u.ale_cmd.adr_byt3 = (page >> 16) & 0xff;
+ fallthrough;
+ case 2:
+ cmd.u.ale_cmd.adr_byt2 = (page >> 8) & 0xff;
+ fallthrough;
+ case 1:
+ cmd.u.ale_cmd.adr_byt1 = page & 0xff;
+ break;
+ default:
+ break;
+ }
+
+ cmd.u.ale_cmd.alen1 = t3;
+ cmd.u.ale_cmd.alen2 = t1;
+ cmd.u.ale_cmd.alen3 = t5;
+ cmd.u.ale_cmd.alen4 = t2;
+ return ndf_submit(tn, &cmd);
+}
+
+static int ndf_queue_cmd_write(struct octeontx_nfc *tn, int len)
+{
+ union ndf_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.u.wr_cmd.opcode = NDF_OP_WR_CMD;
+ cmd.u.wr_cmd.data = len;
+ cmd.u.wr_cmd.wlen1 = t3;
+ cmd.u.wr_cmd.wlen2 = t1;
+ return ndf_submit(tn, &cmd);
+}
+
+static int ndf_build_pre_cmd(struct octeontx_nfc *tn, int cmd1,
+ int addr_bytes, u64 page, u32 col, int cmd2)
+{
+ struct nand_chip *nand = tn->controller.active;
+ struct octeontx_nand_chip *octeontx_nand;
+ struct ndf_set_tm_par_cmd *timings;
+ int width, page_size, rc;
+
+ /* Also called before chip probing is finished */
+ if (!nand) {
+ timings = &default_timing_parms;
+ page_size = default_page_size;
+ width = default_width;
+ } else {
+ octeontx_nand = to_otx_nand(nand);
+ timings = &octeontx_nand->timings;
+ page_size = nand->mtd.writesize;
+ if (nand->options & NAND_BUSWIDTH_16)
+ width = 2;
+ else
+ width = 1;
+ }
+ rc = ndf_queue_cmd_timing(tn, timings);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_bus(tn, NDF_BUS_ACQUIRE);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_chip(tn, 1, tn->selected_chip, width);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_wait(tn, t1);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_cle(tn, cmd1);
+ if (rc)
+ return rc;
+
+ if (addr_bytes) {
+ rc = ndf_build_wait_busy(tn);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_ale(tn, addr_bytes, nand,
+ page, col, page_size);
+ if (rc)
+ return rc;
+ }
+
+ /* CLE 2 */
+ if (cmd2) {
+ rc = ndf_build_wait_busy(tn);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_cle(tn, cmd2);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int ndf_build_post_cmd(struct octeontx_nfc *tn, int hold_time)
+{
+ int rc;
+
+ /* Deselect chip */
+ rc = ndf_queue_cmd_chip(tn, 0, 0, 0);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_wait(tn, t2);
+ if (rc)
+ return rc;
+
+ /* Release bus */
+ rc = ndf_queue_cmd_bus(tn, 0);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_wait(tn, hold_time);
+ if (rc)
+ return rc;
+
+ /*
+ * Last action is ringing the doorbell with number of bus
+ * acquire-releases cycles (currently 1).
+ */
+ writeq(1, tn->base + NDF_DRBELL);
+ return 0;
+}
+
+/* Setup the NAND DMA engine for a transfer. */
+static void ndf_setup_dma(struct octeontx_nfc *tn, int is_write,
+ dma_addr_t bus_addr, int len)
+{
+ u64 dma_cfg;
+
+ dma_cfg = FIELD_PREP(NDF_DMA_CFG_RW, is_write) |
+ FIELD_PREP(NDF_DMA_CFG_SIZE, (len >> 3) - 1);
+ dma_cfg |= NDF_DMA_CFG_EN;
+ writeq(bus_addr, tn->base + NDF_DMA_ADR);
+ writeq(dma_cfg, tn->base + NDF_DMA_CFG);
+}
+
+static int octeontx_nand_reset(struct octeontx_nfc *tn)
+{
+ int rc;
+
+ rc = ndf_build_pre_cmd(tn, NAND_CMD_RESET, 0, 0, 0, 0);
+ if (rc)
+ return rc;
+
+ rc = ndf_build_wait_busy(tn);
+ if (rc)
+ return rc;
+
+ rc = ndf_build_post_cmd(tn, t2);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int ndf_read(struct octeontx_nfc *tn, int cmd1, int addr_bytes,
+ u64 page, u32 col, int cmd2, int len)
+{
+ dma_addr_t bus_addr = tn->use_status ? tn->stat_addr : tn->buf.dmaaddr;
+ struct nand_chip *nand = tn->controller.active;
+ int timing_mode, bytes, rc;
+ union ndf_cmd cmd;
+ u64 start, end;
+
+ pr_debug("%s(%p, 0x%x, 0x%x, 0x%llx, 0x%x, 0x%x, 0x%x)\n", __func__,
+ tn, cmd1, addr_bytes, page, col, cmd2, len);
+ if (!nand)
+ timing_mode = default_onfi_timing;
+ else
+ timing_mode = nand->onfi_timing_mode_default;
+
+ /* Build the command and address cycles */
+ rc = ndf_build_pre_cmd(tn, cmd1, addr_bytes, page, col, cmd2);
+ if (rc) {
+ dev_err(tn->dev, "Build pre command failed\n");
+ return rc;
+ }
+
+ /* This waits for some time, then waits for busy to be de-asserted. */
+ rc = ndf_build_wait_busy(tn);
+ if (rc) {
+ dev_err(tn->dev, "Wait timeout\n");
+ return rc;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (timing_mode < 4)
+ cmd.u.rd_cmd.opcode = NDF_OP_RD_CMD;
+ else
+ cmd.u.rd_cmd.opcode = NDF_OP_RD_EDO_CMD;
+
+ cmd.u.rd_cmd.data = len;
+ cmd.u.rd_cmd.rlen1 = t7;
+ cmd.u.rd_cmd.rlen2 = t3;
+ cmd.u.rd_cmd.rlen3 = t1;
+ cmd.u.rd_cmd.rlen4 = t7;
+ rc = ndf_submit(tn, &cmd);
+ if (rc) {
+ dev_err(tn->dev, "Error submitting command\n");
+ return rc;
+ }
+
+ start = (u64)bus_addr;
+ ndf_setup_dma(tn, 0, bus_addr, len);
+
+ rc = ndf_build_post_cmd(tn, t2);
+ if (rc) {
+ dev_err(tn->dev, "Build post command failed\n");
+ return rc;
+ }
+
+ /* Wait for the DMA to complete */
+ rc = ndf_wait(tn);
+ if (rc) {
+ dev_err(tn->dev, "DMA timed out\n");
+ return rc;
+ }
+
+ end = readq(tn->base + NDF_DMA_ADR);
+ bytes = end - start;
+
+ /* Make sure NDF is really done */
+ rc = ndf_wait_idle(tn);
+ if (rc) {
+ dev_err(tn->dev, "poll idle failed\n");
+ return rc;
+ }
+
+ pr_debug("%s: Read %d bytes\n", __func__, bytes);
+ return bytes;
+}
+
+static int octeontx_nand_get_features(struct mtd_info *mtd,
+ struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para)
+{
+ struct nand_chip *nand = chip;
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ int len = 8;
+ int rc;
+
+ pr_debug("%s: feature addr: 0x%x\n", __func__, feature_addr);
+ memset(tn->buf.dmabuf, 0xff, len);
+ tn->buf.data_index = 0;
+ tn->buf.data_len = 0;
+ rc = ndf_read(tn, NAND_CMD_GET_FEATURES, 1, feature_addr, 0, 0, len);
+ if (rc)
+ return rc;
+
+ memcpy(subfeature_para, tn->buf.dmabuf, ONFI_SUBFEATURE_PARAM_LEN);
+
+ return 0;
+}
+
+static int octeontx_nand_set_features(struct mtd_info *mtd,
+ struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para)
+{
+ struct nand_chip *nand = chip;
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ const int len = ONFI_SUBFEATURE_PARAM_LEN;
+ int rc;
+
+ rc = ndf_build_pre_cmd(tn, NAND_CMD_SET_FEATURES,
+ 1, feature_addr, 0, 0);
+ if (rc)
+ return rc;
+
+ memcpy(tn->buf.dmabuf, subfeature_para, len);
+ memset(tn->buf.dmabuf + len, 0, 8 - len);
+
+ ndf_setup_dma(tn, 1, tn->buf.dmaaddr, 8);
+
+ rc = ndf_queue_cmd_write(tn, 8);
+ if (rc)
+ return rc;
+
+ rc = ndf_build_wait_busy(tn);
+ if (rc)
+ return rc;
+
+ rc = ndf_build_post_cmd(tn, t2);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Read a page from NAND. If the buffer has room, the out of band
+ * data will be included.
+ */
+static int ndf_page_read(struct octeontx_nfc *tn, u64 page, int col, int len)
+{
+ debug("%s(%p, 0x%llx, 0x%x, 0x%x) active: %p\n", __func__,
+ tn, page, col, len, tn->controller.active);
+ struct nand_chip *nand = tn->controller.active;
+ struct octeontx_nand_chip *chip = to_otx_nand(nand);
+ int addr_bytes = chip->row_bytes + chip->col_bytes;
+
+ memset(tn->buf.dmabuf, 0xff, len);
+ return ndf_read(tn, NAND_CMD_READ0, addr_bytes,
+ page, col, NAND_CMD_READSTART, len);
+}
+
+/* Erase a NAND block */
+static int ndf_block_erase(struct octeontx_nfc *tn, u64 page_addr)
+{
+ struct nand_chip *nand = tn->controller.active;
+ struct octeontx_nand_chip *chip = to_otx_nand(nand);
+ int addr_bytes = chip->row_bytes;
+ int rc;
+
+ rc = ndf_build_pre_cmd(tn, NAND_CMD_ERASE1, addr_bytes,
+ page_addr, 0, NAND_CMD_ERASE2);
+ if (rc)
+ return rc;
+
+ /* Wait for R_B to signal erase is complete */
+ rc = ndf_build_wait_busy(tn);
+ if (rc)
+ return rc;
+
+ rc = ndf_build_post_cmd(tn, t2);
+ if (rc)
+ return rc;
+
+ /* Wait until the command queue is idle */
+ return ndf_wait_idle(tn);
+}
+
+/*
+ * Write a page (or less) to NAND.
+ */
+static int ndf_page_write(struct octeontx_nfc *tn, int page)
+{
+ int len, rc;
+ struct nand_chip *nand = tn->controller.active;
+ struct octeontx_nand_chip *chip = to_otx_nand(nand);
+ int addr_bytes = chip->row_bytes + chip->col_bytes;
+
+ len = tn->buf.data_len - tn->buf.data_index;
+ chip->oob_only = (tn->buf.data_index >= nand->mtd.writesize);
+ WARN_ON_ONCE(len & 0x7);
+
+ ndf_setup_dma(tn, 1, tn->buf.dmaaddr + tn->buf.data_index, len);
+ rc = ndf_build_pre_cmd(tn, NAND_CMD_SEQIN, addr_bytes, page, 0, 0);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_write(tn, len);
+ if (rc)
+ return rc;
+
+ rc = ndf_queue_cmd_cle(tn, NAND_CMD_PAGEPROG);
+ if (rc)
+ return rc;
+
+ /* Wait for R_B to signal program is complete */
+ rc = ndf_build_wait_busy(tn);
+ if (rc)
+ return rc;
+
+ rc = ndf_build_post_cmd(tn, t2);
+ if (rc)
+ return rc;
+
+ /* Wait for the DMA to complete */
+ rc = ndf_wait(tn);
+ if (rc)
+ return rc;
+
+ /* Data transfer is done but NDF is not, it is waiting for R/B# */
+ return ndf_wait_idle(tn);
+}
+
+static void octeontx_nand_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nand_chip *octeontx_nand = to_otx_nand(nand);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ int rc;
+
+ tn->selected_chip = octeontx_nand->cs;
+ if (tn->selected_chip < 0 || tn->selected_chip >= NAND_MAX_CHIPS) {
+ dev_err(tn->dev, "invalid chip select\n");
+ return;
+ }
+
+ tn->use_status = false;
+
+ pr_debug("%s(%p, 0x%x, 0x%x, 0x%x) cs: %d\n", __func__, mtd, command,
+ column, page_addr, tn->selected_chip);
+ switch (command) {
+ case NAND_CMD_READID:
+ tn->buf.data_index = 0;
+ octeontx_nand->oob_only = false;
+ rc = ndf_read(tn, command, 1, column, 0, 0, 8);
+ if (rc < 0)
+ dev_err(tn->dev, "READID failed with %d\n", rc);
+ else
+ tn->buf.data_len = rc;
+ break;
+
+ case NAND_CMD_READOOB:
+ octeontx_nand->oob_only = true;
+ tn->buf.data_index = 0;
+ tn->buf.data_len = 0;
+ rc = ndf_page_read(tn, page_addr, column, mtd->oobsize);
+ if (rc < mtd->oobsize)
+ dev_err(tn->dev, "READOOB failed with %d\n",
+ tn->buf.data_len);
+ else
+ tn->buf.data_len = rc;
+ break;
+
+ case NAND_CMD_READ0:
+ octeontx_nand->oob_only = false;
+ tn->buf.data_index = 0;
+ tn->buf.data_len = 0;
+ rc = ndf_page_read(tn, page_addr, column,
+ mtd->writesize + mtd->oobsize);
+
+ if (rc < mtd->writesize + mtd->oobsize)
+ dev_err(tn->dev, "READ0 failed with %d\n", rc);
+ else
+ tn->buf.data_len = rc;
+ break;
+
+ case NAND_CMD_STATUS:
+ /* used in oob/not states */
+ tn->use_status = true;
+ rc = ndf_read(tn, command, 0, 0, 0, 0, 8);
+ if (rc < 0)
+ dev_err(tn->dev, "STATUS failed with %d\n", rc);
+ break;
+
+ case NAND_CMD_RESET:
+ /* used in oob/not states */
+ rc = octeontx_nand_reset(tn);
+ if (rc < 0)
+ dev_err(tn->dev, "RESET failed with %d\n", rc);
+ break;
+
+ case NAND_CMD_PARAM:
+ octeontx_nand->oob_only = false;
+ tn->buf.data_index = 0;
+ rc = ndf_read(tn, command, 1, 0, 0, 0,
+ min(tn->buf.dmabuflen, 3 * 512));
+ if (rc < 0)
+ dev_err(tn->dev, "PARAM failed with %d\n", rc);
+ else
+ tn->buf.data_len = rc;
+ break;
+
+ case NAND_CMD_RNDOUT:
+ tn->buf.data_index = column;
+ break;
+
+ case NAND_CMD_ERASE1:
+ if (ndf_block_erase(tn, page_addr))
+ dev_err(tn->dev, "ERASE1 failed\n");
+ break;
+
+ case NAND_CMD_ERASE2:
+ /* We do all erase processing in the first command, so ignore
+ * this one.
+ */
+ break;
+
+ case NAND_CMD_SEQIN:
+ octeontx_nand->oob_only = (column >= mtd->writesize);
+ tn->buf.data_index = column;
+ tn->buf.data_len = column;
+
+ octeontx_nand->selected_page = page_addr;
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ rc = ndf_page_write(tn, octeontx_nand->selected_page);
+ if (rc)
+ dev_err(tn->dev, "PAGEPROG failed with %d\n", rc);
+ break;
+
+ case NAND_CMD_SET_FEATURES:
+ octeontx_nand->oob_only = false;
+ /* assume tn->buf.data_len == 4 of data has been set there */
+ rc = octeontx_nand_set_features(mtd, nand,
+ page_addr, tn->buf.dmabuf);
+ if (rc)
+ dev_err(tn->dev, "SET_FEATURES failed with %d\n", rc);
+ break;
+
+ case NAND_CMD_GET_FEATURES:
+ octeontx_nand->oob_only = false;
+ rc = octeontx_nand_get_features(mtd, nand,
+ page_addr, tn->buf.dmabuf);
+ if (!rc) {
+ tn->buf.data_index = 0;
+ tn->buf.data_len = 4;
+ } else {
+ dev_err(tn->dev, "GET_FEATURES failed with %d\n", rc);
+ }
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ dev_err(tn->dev, "unhandled nand cmd: %x\n", command);
+ }
+}
+
+static int octeontx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
+ int ret;
+
+ ret = ndf_wait_idle(tn);
+ return (ret < 0) ? -EIO : 0;
+}
+
+/* check compatibility with ONFI timing mode#N, and optionally apply */
+/* TODO: Implement chipnr support? */
+static int octeontx_nand_setup_dat_intf(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ static const bool check_only;
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nand_chip *chip = to_otx_nand(nand);
+ static u64 t_wc_n[MAX_ONFI_MODE + 2]; /* cache a mode signature */
+ int mode; /* deduced mode number, for reporting and restricting */
+ int rc;
+
+ /*
+ * Cache timing modes for reporting, and reducing needless change.
+ *
+ * Challenge: caller does not pass ONFI mode#, but reporting the mode
+ * and restricting to a maximum, or a list, are useful for diagnosing
+ * new hardware. So use tWC_min, distinct and monotonic across modes,
+ * to discover the requested/accepted mode number
+ */
+ for (mode = MAX_ONFI_MODE; mode >= 0 && !t_wc_n[0]; mode--) {
+ const struct nand_sdr_timings *t;
+
+ t = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (!t)
+ continue;
+ t_wc_n[mode] = t->tWC_min;
+ }
+
+ if (!conf) {
+ rc = -EINVAL;
+ } else if (check_only) {
+ rc = 0;
+ } else if (nand->data_interface &&
+ chip->iface_set && chip->iface_mode == mode) {
+ /*
+ * Cases:
+ * - called from nand_reset, which clears DDR timing
+ * mode back to SDR. BUT if we're already in SDR,
+ * timing mode persists over resets.
+ * While mtd/nand layer only supports SDR,
+ * this is always safe. And this driver only supports SDR.
+ *
+ * - called from post-power-event nand_reset (maybe
+ * NFC+flash power down, or system hibernate.
+ * Address this when CONFIG_PM support added
+ */
+ rc = 0;
+ } else {
+ rc = octeontx_nfc_chip_set_timings(chip, &conf->timings.sdr);
+ if (!rc) {
+ chip->iface_mode = mode;
+ chip->iface_set = true;
+ }
+ }
+ return rc;
+}
+
+static void octeontx_bch_reset(void)
+{
+}
+
+/*
+ * Given a page, calculate the ECC code
+ *
+ * chip: Pointer to NAND chip data structure
+ * buf: Buffer to calculate ECC on
+ * code: Buffer to hold ECC data
+ *
+ * Return 0 on success or -1 on failure
+ */
+static int octeontx_nand_bch_calculate_ecc_internal(struct mtd_info *mtd,
+ dma_addr_t ihandle,
+ u8 *code)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ int rc;
+ int i;
+ static u8 *ecc_buffer;
+ static int ecc_size;
+ static unsigned long ecc_handle;
+ union bch_resp *r = tn->bch_resp;
+
+ if (!ecc_buffer || ecc_size < nand->ecc.size) {
+ ecc_size = nand->ecc.size;
+ ecc_buffer = dma_alloc_coherent(ecc_size,
+ (unsigned long *)&ecc_handle);
+ }
+
+ memset(ecc_buffer, 0, nand->ecc.bytes);
+
+ r->u16 = 0;
+ __iowmb(); /* flush done=0 before making request */
+
+ rc = octeontx_bch_encode(bch_vf, ihandle, nand->ecc.size,
+ nand->ecc.strength,
+ (dma_addr_t)ecc_handle, tn->bch_rhandle);
+
+ if (!rc) {
+ octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
+ } else {
+ dev_err(tn->dev, "octeontx_bch_encode failed\n");
+ return -1;
+ }
+
+ if (!r->s.done || r->s.uncorrectable) {
+ dev_err(tn->dev,
+ "%s timeout, done:%d uncorr:%d corr:%d erased:%d\n",
+ __func__, r->s.done, r->s.uncorrectable,
+ r->s.num_errors, r->s.erased);
+ octeontx_bch_reset();
+ return -1;
+ }
+
+ memcpy(code, ecc_buffer, nand->ecc.bytes);
+
+ for (i = 0; i < nand->ecc.bytes; i++)
+ code[i] ^= tn->eccmask[i];
+
+ return tn->bch_resp->s.num_errors;
+}
+
+/*
+ * Given a page, calculate the ECC code
+ *
+ * mtd: MTD block structure
+ * dat: raw data (unused)
+ * ecc_code: buffer for ECC
+ */
+static int octeontx_nand_bch_calculate(struct mtd_info *mtd,
+ const u8 *dat, u8 *ecc_code)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ dma_addr_t handle = dma_map_single((u8 *)dat,
+ nand->ecc.size, DMA_TO_DEVICE);
+ int ret;
+
+ ret = octeontx_nand_bch_calculate_ecc_internal(mtd, handle,
+ (void *)ecc_code);
+
+ return ret;
+}
+
+/*
+ * Detect and correct multi-bit ECC for a page
+ *
+ * mtd: MTD block structure
+ * dat: raw data read from the chip
+ * read_ecc: ECC from the chip (unused)
+ * isnull: unused
+ *
+ * Returns number of bits corrected or -1 if unrecoverable
+ */
+static int octeontx_nand_bch_correct(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *isnull)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ int i = nand->ecc.size + nand->ecc.bytes;
+ static u8 *data_buffer;
+ static dma_addr_t ihandle;
+ static int buffer_size;
+ dma_addr_t ohandle;
+ union bch_resp *r = tn->bch_resp;
+ int rc;
+
+ if (i > buffer_size) {
+ if (buffer_size)
+ free(data_buffer);
+ data_buffer = dma_alloc_coherent(i,
+ (unsigned long *)&ihandle);
+ if (!data_buffer) {
+ dev_err(tn->dev,
+ "%s: Could not allocate %d bytes for buffer\n",
+ __func__, i);
+ goto error;
+ }
+ buffer_size = i;
+ }
+
+ memcpy(data_buffer, dat, nand->ecc.size);
+ memcpy(data_buffer + nand->ecc.size, read_ecc, nand->ecc.bytes);
+
+ for (i = 0; i < nand->ecc.bytes; i++)
+ data_buffer[nand->ecc.size + i] ^= tn->eccmask[i];
+
+ r->u16 = 0;
+ __iowmb(); /* flush done=0 before making request */
+
+ ohandle = dma_map_single(dat, nand->ecc.size, DMA_FROM_DEVICE);
+ rc = octeontx_bch_decode(bch_vf, ihandle, nand->ecc.size,
+ nand->ecc.strength, ohandle, tn->bch_rhandle);
+
+ if (!rc)
+ octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
+
+ if (rc) {
+ dev_err(tn->dev, "octeontx_bch_decode failed\n");
+ goto error;
+ }
+
+ if (!r->s.done) {
+ dev_err(tn->dev, "Error: BCH engine timeout\n");
+ octeontx_bch_reset();
+ goto error;
+ }
+
+ if (r->s.erased) {
+ debug("Info: BCH block is erased\n");
+ return 0;
+ }
+
+ if (r->s.uncorrectable) {
+ debug("Cannot correct NAND block, response: 0x%x\n",
+ r->u16);
+ goto error;
+ }
+
+ return r->s.num_errors;
+
+error:
+ debug("Error performing bch correction\n");
+ return -1;
+}
+
+void octeontx_nand_bch_hwctl(struct mtd_info *mtd, int mode)
+{
+ /* Do nothing. */
+}
+
+static int octeontx_nand_hw_bch_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p;
+ u8 *ecc_code = chip->buffers->ecccode;
+ unsigned int max_bitflips = 0;
+
+ /* chip->read_buf() insists on sequential order, we do OOB first */
+ memcpy(chip->oob_poi, tn->buf.dmabuf + mtd->writesize, mtd->oobsize);
+
+ /* Use private buffer as input for ECC correction */
+ p = tn->buf.dmabuf;
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ debug("Correcting block offset %lx, ecc offset %x\n",
+ p - buf, i);
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ debug("Cannot correct NAND page %d\n", page);
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Copy corrected data to caller's buffer now */
+ memcpy(buf, tn->buf.dmabuf, mtd->writesize);
+
+ return max_bitflips;
+}
+
+static int octeontx_nand_hw_bch_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const u8 *p;
+ u8 *ecc_calc = chip->buffers->ecccalc;
+
+ debug("%s(buf?%p, oob%d p%x)\n",
+ __func__, buf, oob_required, page);
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_calc[i] = 0xFF;
+
+ /* Copy the page data from caller's buffers to private buffer */
+ chip->write_buf(mtd, buf, mtd->writesize);
+ /* Use private date as source for ECC calculation */
+ p = tn->buf.dmabuf;
+
+ /* Hardware ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int ret;
+
+ ret = chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ if (ret < 0)
+ debug("calculate(mtd, p?%p, &ecc_calc[%d]?%p) returned %d\n",
+ p, i, &ecc_calc[i], ret);
+
+ debug("block offset %lx, ecc offset %x\n", p - buf, i);
+ }
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* Store resulting OOB into private buffer, will be sent to HW */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int octeontx_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+/**
+ * octeontx_nand_write_oob_std - [REPLACEABLE] the most common OOB data write
+ * function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int octeontx_nand_write_oob_std(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ int status = 0;
+ const u8 *buf = chip->oob_poi;
+ int length = mtd->oobsize;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+ chip->write_buf(mtd, buf, length);
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/**
+ * octeontx_nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int octeontx_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ chip->read_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int octeontx_nand_read_oob_std(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+static int octeontx_nand_calc_bch_ecc_strength(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ int nsteps = mtd->writesize / ecc->size;
+ int oobchunk = mtd->oobsize / nsteps;
+
+ /* ecc->strength determines ecc_level and OOB's ecc_bytes. */
+ const u8 strengths[] = {4, 8, 16, 24, 32, 40, 48, 56, 60, 64};
+ /* first set the desired ecc_level to match strengths[] */
+ int index = ARRAY_SIZE(strengths) - 1;
+ int need;
+
+ while (index > 0 && !(ecc->options & NAND_ECC_MAXIMIZE) &&
+ strengths[index - 1] >= ecc->strength)
+ index--;
+
+ do {
+ need = DIV_ROUND_UP(15 * strengths[index], 8);
+ if (need <= oobchunk - 2)
+ break;
+ } while (index > 0);
+
+ debug("%s: steps ds: %d, strength ds: %d\n", __func__,
+ nand->ecc_step_ds, nand->ecc_strength_ds);
+ ecc->strength = strengths[index];
+ ecc->bytes = need;
+ debug("%s: strength: %d, bytes: %d\n", __func__, ecc->strength,
+ ecc->bytes);
+
+ if (!tn->eccmask)
+ tn->eccmask = devm_kzalloc(tn->dev, ecc->bytes, GFP_KERNEL);
+ if (!tn->eccmask)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* sample the BCH signature of an erased (all 0xff) page,
+ * to XOR into all page traffic, so erased pages have no ECC errors
+ */
+static int octeontx_bch_save_empty_eccmask(struct nand_chip *nand)
+{
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
+ unsigned int eccsize = nand->ecc.size;
+ unsigned int eccbytes = nand->ecc.bytes;
+ u8 erased_ecc[eccbytes];
+ unsigned long erased_handle;
+ unsigned char *erased_page = dma_alloc_coherent(eccsize,
+ &erased_handle);
+ int i;
+ int rc = 0;
+
+ if (!erased_page)
+ return -ENOMEM;
+
+ memset(erased_page, 0xff, eccsize);
+ memset(erased_ecc, 0, eccbytes);
+
+ rc = octeontx_nand_bch_calculate_ecc_internal(mtd,
+ (dma_addr_t)erased_handle,
+ erased_ecc);
+
+ free(erased_page);
+
+ for (i = 0; i < eccbytes; i++)
+ tn->eccmask[i] = erased_ecc[i] ^ 0xff;
+
+ return rc;
+}
+
+static void octeontx_nfc_chip_sizing(struct nand_chip *nand)
+{
+ struct octeontx_nand_chip *chip = to_otx_nand(nand);
+ struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ chip->row_bytes = nand->onfi_params.addr_cycles & 0xf;
+ chip->col_bytes = nand->onfi_params.addr_cycles >> 4;
+ debug("%s(%p) row bytes: %d, col bytes: %d, ecc mode: %d\n",
+ __func__, nand, chip->row_bytes, chip->col_bytes, ecc->mode);
+
+ /*
+ * HW_BCH using OcteonTX BCH engine, or SOFT_BCH laid out in
+ * HW_BCH-compatible fashion, depending on devtree advice
+ * and kernel config.
+ * BCH/NFC hardware capable of subpage ops, not implemented.
+ */
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+ debug("%s: start steps: %d, size: %d, bytes: %d\n",
+ __func__, ecc->steps, ecc->size, ecc->bytes);
+ debug("%s: step ds: %d, strength ds: %d\n", __func__,
+ nand->ecc_step_ds, nand->ecc_strength_ds);
+
+ if (ecc->mode != NAND_ECC_NONE) {
+ int nsteps = ecc->steps ? ecc->steps : 1;
+
+ if (ecc->size && ecc->size != mtd->writesize)
+ nsteps = mtd->writesize / ecc->size;
+ else if (mtd->writesize > def_ecc_size &&
+ !(mtd->writesize & (def_ecc_size - 1)))
+ nsteps = mtd->writesize / def_ecc_size;
+ ecc->steps = nsteps;
+ ecc->size = mtd->writesize / nsteps;
+ ecc->bytes = mtd->oobsize / nsteps;
+
+ if (nand->ecc_strength_ds)
+ ecc->strength = nand->ecc_strength_ds;
+ if (nand->ecc_step_ds)
+ ecc->size = nand->ecc_step_ds;
+ /*
+ * no subpage ops, but set subpage-shift to match ecc->steps
+ * so mtd_nandbiterrs tests appropriate boundaries
+ */
+ if (!mtd->subpage_sft && !(ecc->steps & (ecc->steps - 1)))
+ mtd->subpage_sft = fls(ecc->steps) - 1;
+
+ if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
+ debug("%s: ecc mode: %d\n", __func__, ecc->mode);
+ if (ecc->mode != NAND_ECC_SOFT &&
+ !octeontx_nand_calc_bch_ecc_strength(nand)) {
+ struct octeontx_nfc *tn =
+ to_otx_nfc(nand->controller);
+
+ debug("Using hardware BCH engine support\n");
+ ecc->mode = NAND_ECC_HW_SYNDROME;
+ ecc->read_page = octeontx_nand_hw_bch_read_page;
+ ecc->write_page =
+ octeontx_nand_hw_bch_write_page;
+ ecc->read_page_raw =
+ octeontx_nand_read_page_raw;
+ ecc->write_page_raw =
+ octeontx_nand_write_page_raw;
+ ecc->read_oob = octeontx_nand_read_oob_std;
+ ecc->write_oob = octeontx_nand_write_oob_std;
+
+ ecc->calculate = octeontx_nand_bch_calculate;
+ ecc->correct = octeontx_nand_bch_correct;
+ ecc->hwctl = octeontx_nand_bch_hwctl;
+
+ debug("NAND chip %d using hw_bch\n",
+ tn->selected_chip);
+ debug(" %d bytes ECC per %d byte block\n",
+ ecc->bytes, ecc->size);
+ debug(" for %d bits of correction per block.",
+ ecc->strength);
+ octeontx_nand_calc_ecc_layout(nand);
+ octeontx_bch_save_empty_eccmask(nand);
+ }
+ }
+ }
+}
+
+static int octeontx_nfc_chip_init(struct octeontx_nfc *tn, struct udevice *dev,
+ ofnode node)
+{
+ struct octeontx_nand_chip *chip;
+ struct nand_chip *nand;
+ struct mtd_info *mtd;
+ int ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ debug("%s: Getting chip select\n", __func__);
+ ret = ofnode_read_s32(node, "reg", &chip->cs);
+ if (ret) {
+ dev_err(dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ if (chip->cs >= NAND_MAX_CHIPS) {
+ dev_err(dev, "invalid reg value: %u (max CS = 7)\n", chip->cs);
+ return -EINVAL;
+ }
+ debug("%s: chip select: %d\n", __func__, chip->cs);
+ nand = &chip->nand;
+ nand->controller = &tn->controller;
+ if (!tn->controller.active)
+ tn->controller.active = nand;
+
+ debug("%s: Setting flash node\n", __func__);
+ nand_set_flash_node(nand, node);
+
+ nand->options = 0;
+ nand->select_chip = octeontx_nand_select_chip;
+ nand->cmdfunc = octeontx_nand_cmdfunc;
+ nand->waitfunc = octeontx_nand_waitfunc;
+ nand->read_byte = octeontx_nand_read_byte;
+ nand->read_buf = octeontx_nand_read_buf;
+ nand->write_buf = octeontx_nand_write_buf;
+ nand->onfi_set_features = octeontx_nand_set_features;
+ nand->onfi_get_features = octeontx_nand_get_features;
+ nand->setup_data_interface = octeontx_nand_setup_dat_intf;
+
+ mtd = nand_to_mtd(nand);
+ debug("%s: mtd: %p\n", __func__, mtd);
+ mtd->dev->parent = dev;
+
+ debug("%s: NDF_MISC: 0x%llx\n", __func__,
+ readq(tn->base + NDF_MISC));
+
+ /* TODO: support more then 1 chip */
+ debug("%s: Scanning identification\n", __func__);
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ return ret;
+
+ debug("%s: Sizing chip\n", __func__);
+ octeontx_nfc_chip_sizing(nand);
+
+ debug("%s: Scanning tail\n", __func__);
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ debug("%s: Registering mtd\n", __func__);
+ ret = nand_register(0, mtd);
+
+ debug("%s: Adding tail\n", __func__);
+ list_add_tail(&chip->node, &tn->chips);
+ return 0;
+}
+
+static int octeontx_nfc_chips_init(struct octeontx_nfc *tn)
+{
+ struct udevice *dev = tn->dev;
+ ofnode node = dev_ofnode(dev);
+ ofnode nand_node;
+ int nr_chips = of_get_child_count(node);
+ int ret;
+
+ debug("%s: node: %s\n", __func__, ofnode_get_name(node));
+ debug("%s: %d chips\n", __func__, nr_chips);
+ if (nr_chips > NAND_MAX_CHIPS) {
+ dev_err(dev, "too many NAND chips: %d\n", nr_chips);
+ return -EINVAL;
+ }
+
+ if (!nr_chips) {
+ debug("no DT NAND chips found\n");
+ return -ENODEV;
+ }
+
+ pr_info("%s: scanning %d chips DTs\n", __func__, nr_chips);
+
+ ofnode_for_each_subnode(nand_node, node) {
+ debug("%s: Calling octeontx_nfc_chip_init(%p, %s, %ld)\n",
+ __func__, tn, dev->name, nand_node.of_offset);
+ ret = octeontx_nfc_chip_init(tn, dev, nand_node);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Reset NFC and initialize registers. */
+static int octeontx_nfc_init(struct octeontx_nfc *tn)
+{
+ const struct nand_sdr_timings *timings;
+ u64 ndf_misc;
+ int rc;
+
+ /* Initialize values and reset the fifo */
+ ndf_misc = readq(tn->base + NDF_MISC);
+
+ ndf_misc &= ~NDF_MISC_EX_DIS;
+ ndf_misc |= (NDF_MISC_BT_DIS | NDF_MISC_RST_FF);
+ writeq(ndf_misc, tn->base + NDF_MISC);
+ debug("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
+
+ /* Bring the fifo out of reset */
+ ndf_misc &= ~(NDF_MISC_RST_FF);
+
+ /* Maximum of co-processor cycles for glitch filtering */
+ ndf_misc |= FIELD_PREP(NDF_MISC_WAIT_CNT, 0x3f);
+
+ writeq(ndf_misc, tn->base + NDF_MISC);
+
+ /* Set timing parameters to onfi mode 0 for probing */
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+ rc = set_default_timings(tn, timings);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int octeontx_pci_nand_probe(struct udevice *dev)
+{
+ struct octeontx_nfc *tn = dev_get_priv(dev);
+ int ret;
+ static bool probe_done;
+
+ debug("%s(%s) tn: %p\n", __func__, dev->name, tn);
+ if (probe_done)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
+ bch_vf = octeontx_bch_getv();
+ if (!bch_vf) {
+ struct octeontx_probe_device *probe_dev;
+
+ debug("%s: bch not yet initialized\n", __func__);
+ probe_dev = calloc(sizeof(*probe_dev), 1);
+ if (!probe_dev) {
+ printf("%s: Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ probe_dev->dev = dev;
+ INIT_LIST_HEAD(&probe_dev->list);
+ list_add_tail(&probe_dev->list,
+ &octeontx_pci_nand_deferred_devices);
+ debug("%s: Defering probe until after BCH initialization\n",
+ __func__);
+ return 0;
+ }
+ }
+
+ tn->dev = dev;
+ INIT_LIST_HEAD(&tn->chips);
+
+ tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
+ if (!tn->base) {
+ ret = -EINVAL;
+ goto release;
+ }
+ debug("%s: bar at %p\n", __func__, tn->base);
+ tn->buf.dmabuflen = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
+ tn->buf.dmabuf = dma_alloc_coherent(tn->buf.dmabuflen,
+ (unsigned long *)&tn->buf.dmaaddr);
+ if (!tn->buf.dmabuf) {
+ ret = -ENOMEM;
+ debug("%s: Could not allocate DMA buffer\n", __func__);
+ goto unclk;
+ }
+
+ /* one hw-bch response, for one outstanding transaction */
+ tn->bch_resp = dma_alloc_coherent(sizeof(*tn->bch_resp),
+ (unsigned long *)&tn->bch_rhandle);
+
+ tn->stat = dma_alloc_coherent(8, (unsigned long *)&tn->stat_addr);
+ if (!tn->stat || !tn->bch_resp) {
+ debug("%s: Could not allocate bch status or response\n",
+ __func__);
+ ret = -ENOMEM;
+ goto unclk;
+ }
+
+ debug("%s: Calling octeontx_nfc_init()\n", __func__);
+ octeontx_nfc_init(tn);
+ debug("%s: Initializing chips\n", __func__);
+ ret = octeontx_nfc_chips_init(tn);
+ debug("%s: init chips ret: %d\n", __func__, ret);
+ if (ret) {
+ if (ret != -ENODEV)
+ dev_err(dev, "failed to init nand chips\n");
+ goto unclk;
+ }
+ dev_info(dev, "probed\n");
+ return 0;
+
+unclk:
+release:
+ return ret;
+}
+
+int octeontx_pci_nand_disable(struct udevice *dev)
+{
+ struct octeontx_nfc *tn = dev_get_priv(dev);
+ u64 dma_cfg;
+ u64 ndf_misc;
+
+ debug("%s: Disabling NAND device %s\n", __func__, dev->name);
+ dma_cfg = readq(tn->base + NDF_DMA_CFG);
+ dma_cfg &= ~NDF_DMA_CFG_EN;
+ dma_cfg |= NDF_DMA_CFG_CLR;
+ writeq(dma_cfg, tn->base + NDF_DMA_CFG);
+
+ /* Disable execution and put FIFO in reset mode */
+ ndf_misc = readq(tn->base + NDF_MISC);
+ ndf_misc |= NDF_MISC_EX_DIS | NDF_MISC_RST_FF;
+ writeq(ndf_misc, tn->base + NDF_MISC);
+ ndf_misc &= ~NDF_MISC_RST_FF;
+ writeq(ndf_misc, tn->base + NDF_MISC);
+#ifdef DEBUG
+ printf("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
+#endif
+ /* Clear any interrupts and enable bits */
+ writeq(~0ull, tn->base + NDF_INT_ENA_W1C);
+ writeq(~0ull, tn->base + NDF_INT);
+ debug("%s: NDF_ST_REG: 0x%llx\n", __func__,
+ readq(tn->base + NDF_ST_REG));
+ return 0;
+}
+
+/**
+ * Since it's possible (and even likely) that the NAND device will be probed
+ * before the BCH device has been probed, we may need to defer the probing.
+ *
+ * In this case, the initial probe returns success but the actual probing
+ * is deferred until the BCH VF has been probed.
+ *
+ * @return 0 for success, otherwise error
+ */
+int octeontx_pci_nand_deferred_probe(void)
+{
+ int rc = 0;
+ struct octeontx_probe_device *pdev;
+
+ debug("%s: Performing deferred probing\n", __func__);
+ list_for_each_entry(pdev, &octeontx_pci_nand_deferred_devices, list) {
+ debug("%s: Probing %s\n", __func__, pdev->dev->name);
+ dev_get_flags(pdev->dev) &= ~DM_FLAG_ACTIVATED;
+ rc = device_probe(pdev->dev);
+ if (rc && rc != -ENODEV) {
+ printf("%s: Error %d with deferred probe of %s\n",
+ __func__, rc, pdev->dev->name);
+ break;
+ }
+ }
+ return rc;
+}
+
+static const struct pci_device_id octeontx_nfc_pci_id_table[] = {
+ { PCI_VDEVICE(CAVIUM, 0xA04F) },
+ {}
+};
+
+static int octeontx_nand_of_to_plat(struct udevice *dev)
+{
+ return 0;
+}
+
+static const struct udevice_id octeontx_nand_ids[] = {
+ { .compatible = "cavium,cn8130-nand" },
+ { },
+};
+
+U_BOOT_DRIVER(octeontx_pci_nand) = {
+ .name = OCTEONTX_NAND_DRIVER_NAME,
+ .id = UCLASS_MTD,
+ .of_match = of_match_ptr(octeontx_nand_ids),
+ .of_to_plat = octeontx_nand_of_to_plat,
+ .probe = octeontx_pci_nand_probe,
+ .priv_auto = sizeof(struct octeontx_nfc),
+ .remove = octeontx_pci_nand_disable,
+ .flags = DM_FLAG_OS_PREPARE,
+};
+
+U_BOOT_PCI_DEVICE(octeontx_pci_nand, octeontx_nfc_pci_id_table);
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
+ ret = uclass_get_device_by_driver(UCLASS_MISC,
+ DM_DRIVER_GET(octeontx_pci_bchpf),
+ &dev);
+ if (ret && ret != -ENODEV) {
+ pr_err("Failed to initialize OcteonTX BCH PF controller. (error %d)\n",
+ ret);
+ }
+ ret = uclass_get_device_by_driver(UCLASS_MISC,
+ DM_DRIVER_GET(octeontx_pci_bchvf),
+ &dev);
+ if (ret && ret != -ENODEV) {
+ pr_err("Failed to initialize OcteonTX BCH VF controller. (error %d)\n",
+ ret);
+ }
+ }
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(octeontx_pci_nand),
+ &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize OcteonTX NAND controller. (error %d)\n",
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/omap_elm.c b/roms/u-boot/drivers/mtd/nand/raw/omap_elm.c
new file mode 100644
index 000000000..35c6dd1f1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/omap_elm.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2010-2011 Texas Instruments, <www.ti.com>
+ * Mansoor Ahamed <mansoor.ahamed@ti.com>
+ *
+ * BCH Error Location Module (ELM) support.
+ *
+ * NOTE:
+ * 1. Supports only continuous mode. Dont see need for page mode in uboot
+ * 2. Supports only syndrome polynomial 0. i.e. poly local variable is
+ * always set to ELM_DEFAULT_POLY. Dont see need for other polynomial
+ * sets in uboot
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/mtd/omap_elm.h>
+#include <asm/arch/hardware.h>
+
+#define DRIVER_NAME "omap-elm"
+#define ELM_DEFAULT_POLY (0)
+
+struct elm *elm_cfg;
+
+/**
+ * elm_load_syndromes - Load BCH syndromes based on bch_type selection
+ * @syndrome: BCH syndrome
+ * @bch_type: BCH4/BCH8/BCH16
+ * @poly: Syndrome Polynomial set to use
+ */
+static void elm_load_syndromes(u8 *syndrome, enum bch_level bch_type, u8 poly)
+{
+ u32 *ptr;
+ u32 val;
+
+ /* reg 0 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[0];
+ val = syndrome[0] | (syndrome[1] << 8) | (syndrome[2] << 16) |
+ (syndrome[3] << 24);
+ writel(val, ptr);
+ /* reg 1 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[1];
+ val = syndrome[4] | (syndrome[5] << 8) | (syndrome[6] << 16) |
+ (syndrome[7] << 24);
+ writel(val, ptr);
+
+ if (bch_type == BCH_8_BIT || bch_type == BCH_16_BIT) {
+ /* reg 2 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[2];
+ val = syndrome[8] | (syndrome[9] << 8) | (syndrome[10] << 16) |
+ (syndrome[11] << 24);
+ writel(val, ptr);
+ /* reg 3 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[3];
+ val = syndrome[12] | (syndrome[13] << 8) |
+ (syndrome[14] << 16) | (syndrome[15] << 24);
+ writel(val, ptr);
+ }
+
+ if (bch_type == BCH_16_BIT) {
+ /* reg 4 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[4];
+ val = syndrome[16] | (syndrome[17] << 8) |
+ (syndrome[18] << 16) | (syndrome[19] << 24);
+ writel(val, ptr);
+
+ /* reg 5 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[5];
+ val = syndrome[20] | (syndrome[21] << 8) |
+ (syndrome[22] << 16) | (syndrome[23] << 24);
+ writel(val, ptr);
+
+ /* reg 6 */
+ ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[6];
+ val = syndrome[24] | (syndrome[25] << 8) |
+ (syndrome[26] << 16) | (syndrome[27] << 24);
+ writel(val, ptr);
+ }
+}
+
+/**
+ * elm_check_errors - Check for BCH errors and return error locations
+ * @syndrome: BCH syndrome
+ * @bch_type: BCH4/BCH8/BCH16
+ * @error_count: Returns number of errrors in the syndrome
+ * @error_locations: Returns error locations (in decimal) in this array
+ *
+ * Check the provided syndrome for BCH errors and return error count
+ * and locations in the array passed. Returns -1 if error is not correctable,
+ * else returns 0
+ */
+int elm_check_error(u8 *syndrome, enum bch_level bch_type, u32 *error_count,
+ u32 *error_locations)
+{
+ u8 poly = ELM_DEFAULT_POLY;
+ s8 i;
+ u32 location_status;
+
+ elm_load_syndromes(syndrome, bch_type, poly);
+
+ /* start processing */
+ writel((readl(&elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[6])
+ | ELM_SYNDROME_FRAGMENT_6_SYNDROME_VALID),
+ &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[6]);
+
+ /* wait for processing to complete */
+ while ((readl(&elm_cfg->irqstatus) & (0x1 << poly)) != 0x1)
+ ;
+ /* clear status */
+ writel((readl(&elm_cfg->irqstatus) | (0x1 << poly)),
+ &elm_cfg->irqstatus);
+
+ /* check if correctable */
+ location_status = readl(&elm_cfg->error_location[poly].location_status);
+ if (!(location_status & ELM_LOCATION_STATUS_ECC_CORRECTABLE_MASK)) {
+ printf("%s: uncorrectable ECC errors\n", DRIVER_NAME);
+ return -EBADMSG;
+ }
+
+ /* get error count */
+ *error_count = readl(&elm_cfg->error_location[poly].location_status) &
+ ELM_LOCATION_STATUS_ECC_NB_ERRORS_MASK;
+
+ for (i = 0; i < *error_count; i++) {
+ error_locations[i] =
+ readl(&elm_cfg->error_location[poly].error_location_x[i]);
+ }
+
+ return 0;
+}
+
+
+/**
+ * elm_config - Configure ELM module
+ * @level: 4 / 8 / 16 bit BCH
+ *
+ * Configure ELM module based on BCH level.
+ * Set mode as continuous mode.
+ * Currently we are using only syndrome 0 and syndromes 1 to 6 are not used.
+ * Also, the mode is set only for syndrome 0
+ */
+int elm_config(enum bch_level level)
+{
+ u32 val;
+ u8 poly = ELM_DEFAULT_POLY;
+ u32 buffer_size = 0x7FF;
+
+ /* config size and level */
+ val = (u32)(level) & ELM_LOCATION_CONFIG_ECC_BCH_LEVEL_MASK;
+ val |= ((buffer_size << ELM_LOCATION_CONFIG_ECC_SIZE_POS) &
+ ELM_LOCATION_CONFIG_ECC_SIZE_MASK);
+ writel(val, &elm_cfg->location_config);
+
+ /* config continous mode */
+ /* enable interrupt generation for syndrome polynomial set */
+ writel((readl(&elm_cfg->irqenable) | (0x1 << poly)),
+ &elm_cfg->irqenable);
+ /* set continuous mode for the syndrome polynomial set */
+ writel((readl(&elm_cfg->page_ctrl) & ~(0x1 << poly)),
+ &elm_cfg->page_ctrl);
+
+ return 0;
+}
+
+/**
+ * elm_reset - Do a soft reset of ELM
+ *
+ * Perform a soft reset of ELM and return after reset is done.
+ */
+void elm_reset(void)
+{
+ /* initiate reset */
+ writel((readl(&elm_cfg->sysconfig) | ELM_SYSCONFIG_SOFTRESET),
+ &elm_cfg->sysconfig);
+
+ /* wait for reset complete and normal operation */
+ while ((readl(&elm_cfg->sysstatus) & ELM_SYSSTATUS_RESETDONE) !=
+ ELM_SYSSTATUS_RESETDONE)
+ ;
+}
+
+/**
+ * elm_init - Initialize ELM module
+ *
+ * Initialize ELM support. Currently it does only base address init
+ * and ELM reset.
+ */
+void elm_init(void)
+{
+ elm_cfg = (struct elm *)ELM_BASE;
+ elm_reset();
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/omap_gpmc.c b/roms/u-boot/drivers/mtd/nand/raw/omap_gpmc.c
new file mode 100644
index 000000000..97fd5690f
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/omap_gpmc.c
@@ -0,0 +1,1038 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2004-2008 Texas Instruments, <www.ti.com>
+ * Rohit Choraria <rohitkc@ti.com>
+ */
+
+#include <common.h>
+#include <log.h>
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <asm/arch/mem.h>
+#include <linux/mtd/omap_gpmc.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/bch.h>
+#include <linux/compiler.h>
+#include <nand.h>
+#include <linux/mtd/omap_elm.h>
+
+#define BADBLOCK_MARKER_LENGTH 2
+#define SECTOR_BYTES 512
+#define ECCCLEAR (0x1 << 8)
+#define ECCRESULTREG1 (0x1 << 0)
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+
+#ifdef CONFIG_BCH
+static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+ 0x97, 0x79, 0xe5, 0x24, 0xb5};
+#endif
+static uint8_t cs_next;
+static __maybe_unused struct nand_ecclayout omap_ecclayout;
+
+#if defined(CONFIG_NAND_OMAP_GPMC_WSCFG)
+static const int8_t wscfg[CONFIG_SYS_MAX_NAND_DEVICE] =
+ { CONFIG_NAND_OMAP_GPMC_WSCFG };
+#else
+/* wscfg is preset to zero since its a static variable */
+static const int8_t wscfg[CONFIG_SYS_MAX_NAND_DEVICE];
+#endif
+
+/*
+ * Driver configurations
+ */
+struct omap_nand_info {
+ struct bch_control *control;
+ enum omap_ecc ecc_scheme;
+ uint8_t cs;
+ uint8_t ws; /* wait status pin (0,1) */
+};
+
+/* We are wasting a bit of memory but al least we are safe */
+static struct omap_nand_info omap_nand_info[GPMC_MAX_CS];
+
+/*
+ * omap_nand_hwcontrol - Set the address pointers corretly for the
+ * following address/data/command operation
+ */
+static void omap_nand_hwcontrol(struct mtd_info *mtd, int32_t cmd,
+ uint32_t ctrl)
+{
+ register struct nand_chip *this = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(this);
+ int cs = info->cs;
+
+ /*
+ * Point the IO_ADDR to DATA and ADDRESS registers instead
+ * of chip address
+ */
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_cmd;
+ break;
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_adr;
+ break;
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_dat;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+/* Check wait pin as dev ready indicator */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ register struct nand_chip *this = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(this);
+ return gpmc_cfg->status & (1 << (8 + info->ws));
+}
+
+/*
+ * gen_true_ecc - This function will generate true ECC value, which
+ * can be used when correcting data read from NAND flash memory core
+ *
+ * @ecc_buf: buffer to store ecc code
+ *
+ * @return: re-formatted ECC value
+ */
+static uint32_t gen_true_ecc(uint8_t *ecc_buf)
+{
+ return ecc_buf[0] | (ecc_buf[1] << 16) | ((ecc_buf[2] & 0xF0) << 20) |
+ ((ecc_buf[2] & 0x0F) << 8);
+}
+
+/*
+ * omap_correct_data - Compares the ecc read from nand spare area with ECC
+ * registers values and corrects one bit error if it has occurred
+ * Further details can be had from OMAP TRM and the following selected links:
+ * http://en.wikipedia.org/wiki/Hamming_code
+ * http://www.cs.utexas.edu/users/plaxton/c/337/05f/slides/ErrorCorrection-4.pdf
+ *
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from ECC registers
+ *
+ * @return 0 if data is OK or corrected, else returns -1
+ */
+static int __maybe_unused omap_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ uint32_t orig_ecc, new_ecc, res, hm;
+ uint16_t parity_bits, byte;
+ uint8_t bit;
+
+ /* Regenerate the orginal ECC */
+ orig_ecc = gen_true_ecc(read_ecc);
+ new_ecc = gen_true_ecc(calc_ecc);
+ /* Get the XOR of real ecc */
+ res = orig_ecc ^ new_ecc;
+ if (res) {
+ /* Get the hamming width */
+ hm = hweight32(res);
+ /* Single bit errors can be corrected! */
+ if (hm == 12) {
+ /* Correctable data! */
+ parity_bits = res >> 16;
+ bit = (parity_bits & 0x7);
+ byte = (parity_bits >> 3) & 0x1FF;
+ /* Flip the bit to correct */
+ dat[byte] ^= (0x1 << bit);
+ } else if (hm == 1) {
+ printf("Error: Ecc is wrong\n");
+ /* ECC itself is corrupted */
+ return 2;
+ } else {
+ /*
+ * hm distance != parity pairs OR one, could mean 2 bit
+ * error OR potentially be on a blank page..
+ * orig_ecc: contains spare area data from nand flash.
+ * new_ecc: generated ecc while reading data area.
+ * Note: if the ecc = 0, all data bits from which it was
+ * generated are 0xFF.
+ * The 3 byte(24 bits) ecc is generated per 512byte
+ * chunk of a page. If orig_ecc(from spare area)
+ * is 0xFF && new_ecc(computed now from data area)=0x0,
+ * this means that data area is 0xFF and spare area is
+ * 0xFF. A sure sign of a erased page!
+ */
+ if ((orig_ecc == 0x0FFF0FFF) && (new_ecc == 0x00000000))
+ return 0;
+ printf("Error: Bad compare! failed\n");
+ /* detected 2 bit error */
+ return -EBADMSG;
+ }
+ }
+ return 0;
+}
+
+/*
+ * omap_enable_hwecc - configures GPMC as per ECC scheme before read/write
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+__maybe_unused
+static void omap_enable_hwecc(struct mtd_info *mtd, int32_t mode)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(nand);
+ unsigned int dev_width = (nand->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ unsigned int ecc_algo = 0;
+ unsigned int bch_type = 0;
+ unsigned int eccsize1 = 0x00, eccsize0 = 0x00, bch_wrapmode = 0x00;
+ u32 ecc_size_config_val = 0;
+ u32 ecc_config_val = 0;
+ int cs = info->cs;
+
+ /* configure GPMC for specific ecc-scheme */
+ switch (info->ecc_scheme) {
+ case OMAP_ECC_HAM1_CODE_SW:
+ return;
+ case OMAP_ECC_HAM1_CODE_HW:
+ ecc_algo = 0x0;
+ bch_type = 0x0;
+ bch_wrapmode = 0x00;
+ eccsize0 = 0xFF;
+ eccsize1 = 0xFF;
+ break;
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+ case OMAP_ECC_BCH8_CODE_HW:
+ ecc_algo = 0x1;
+ bch_type = 0x1;
+ if (mode == NAND_ECC_WRITE) {
+ bch_wrapmode = 0x01;
+ eccsize0 = 0; /* extra bits in nibbles per sector */
+ eccsize1 = 28; /* OOB bits in nibbles per sector */
+ } else {
+ bch_wrapmode = 0x01;
+ eccsize0 = 26; /* ECC bits in nibbles per sector */
+ eccsize1 = 2; /* non-ECC bits in nibbles per sector */
+ }
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ ecc_algo = 0x1;
+ bch_type = 0x2;
+ if (mode == NAND_ECC_WRITE) {
+ bch_wrapmode = 0x01;
+ eccsize0 = 0; /* extra bits in nibbles per sector */
+ eccsize1 = 52; /* OOB bits in nibbles per sector */
+ } else {
+ bch_wrapmode = 0x01;
+ eccsize0 = 52; /* ECC bits in nibbles per sector */
+ eccsize1 = 0; /* non-ECC bits in nibbles per sector */
+ }
+ break;
+ default:
+ return;
+ }
+ /* Clear ecc and enable bits */
+ writel(ECCCLEAR | ECCRESULTREG1, &gpmc_cfg->ecc_control);
+ /* Configure ecc size for BCH */
+ ecc_size_config_val = (eccsize1 << 22) | (eccsize0 << 12);
+ writel(ecc_size_config_val, &gpmc_cfg->ecc_size_config);
+
+ /* Configure device details for BCH engine */
+ ecc_config_val = ((ecc_algo << 16) | /* HAM1 | BCHx */
+ (bch_type << 12) | /* BCH4/BCH8/BCH16 */
+ (bch_wrapmode << 8) | /* wrap mode */
+ (dev_width << 7) | /* bus width */
+ (0x0 << 4) | /* number of sectors */
+ (cs << 1) | /* ECC CS */
+ (0x1)); /* enable ECC */
+ writel(ecc_config_val, &gpmc_cfg->ecc_config);
+}
+
+/*
+ * omap_calculate_ecc - Read ECC result
+ * @mtd: MTD structure
+ * @dat: unused
+ * @ecc_code: ecc_code buffer
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as
+ * long nobody is trying to write data on the seemingly unused page.
+ * Reading an erased page will produce an ECC mismatch between
+ * generated and read ECC bytes that has to be dealt with separately.
+ * E.g. if page is 0xFF (fresh erased), and if HW ECC engine within GPMC
+ * is used, the result of read will be 0x0 while the ECC offsets of the
+ * spare area will be 0xFF which will result in an ECC mismatch.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(chip);
+ const uint32_t *ptr;
+ uint32_t val = 0;
+ int8_t i = 0, j;
+
+ switch (info->ecc_scheme) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ val = readl(&gpmc_cfg->ecc1_result);
+ ecc_code[0] = val & 0xFF;
+ ecc_code[1] = (val >> 16) & 0xFF;
+ ecc_code[2] = ((val >> 8) & 0x0F) | ((val >> 20) & 0xF0);
+ break;
+#ifdef CONFIG_BCH
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+#endif
+ case OMAP_ECC_BCH8_CODE_HW:
+ ptr = &gpmc_cfg->bch_result_0_3[0].bch_result_x[3];
+ val = readl(ptr);
+ ecc_code[i++] = (val >> 0) & 0xFF;
+ ptr--;
+ for (j = 0; j < 3; j++) {
+ val = readl(ptr);
+ ecc_code[i++] = (val >> 24) & 0xFF;
+ ecc_code[i++] = (val >> 16) & 0xFF;
+ ecc_code[i++] = (val >> 8) & 0xFF;
+ ecc_code[i++] = (val >> 0) & 0xFF;
+ ptr--;
+ }
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ val = readl(&gpmc_cfg->bch_result_4_6[0].bch_result_x[2]);
+ ecc_code[i++] = (val >> 8) & 0xFF;
+ ecc_code[i++] = (val >> 0) & 0xFF;
+ val = readl(&gpmc_cfg->bch_result_4_6[0].bch_result_x[1]);
+ ecc_code[i++] = (val >> 24) & 0xFF;
+ ecc_code[i++] = (val >> 16) & 0xFF;
+ ecc_code[i++] = (val >> 8) & 0xFF;
+ ecc_code[i++] = (val >> 0) & 0xFF;
+ val = readl(&gpmc_cfg->bch_result_4_6[0].bch_result_x[0]);
+ ecc_code[i++] = (val >> 24) & 0xFF;
+ ecc_code[i++] = (val >> 16) & 0xFF;
+ ecc_code[i++] = (val >> 8) & 0xFF;
+ ecc_code[i++] = (val >> 0) & 0xFF;
+ for (j = 3; j >= 0; j--) {
+ val = readl(&gpmc_cfg->bch_result_0_3[0].bch_result_x[j]
+ );
+ ecc_code[i++] = (val >> 24) & 0xFF;
+ ecc_code[i++] = (val >> 16) & 0xFF;
+ ecc_code[i++] = (val >> 8) & 0xFF;
+ ecc_code[i++] = (val >> 0) & 0xFF;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* ECC scheme specific syndrome customizations */
+ switch (info->ecc_scheme) {
+ case OMAP_ECC_HAM1_CODE_HW:
+ break;
+#ifdef CONFIG_BCH
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+
+ for (i = 0; i < chip->ecc.bytes; i++)
+ *(ecc_code + i) = *(ecc_code + i) ^
+ bch8_polynomial[i];
+ break;
+#endif
+ case OMAP_ECC_BCH8_CODE_HW:
+ ecc_code[chip->ecc.bytes - 1] = 0x00;
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_NAND_OMAP_GPMC_PREFETCH
+
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define PREFETCH_FIFOTHRESHOLD_MAX 0x40
+#define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
+#define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
+#define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
+#define ENABLE_PREFETCH (1 << 7)
+
+/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ * @cs: chip select to use
+ */
+static int omap_prefetch_enable(int fifo_th, unsigned int count, int is_write, int cs)
+{
+ uint32_t val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -EINVAL;
+
+ if (readl(&gpmc_cfg->prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(count, &gpmc_cfg->prefetch_config2);
+
+ val = (cs << PREFETCH_CONFIG1_CS_SHIFT) | (is_write & 1) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH;
+ writel(val, &gpmc_cfg->prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(1, &gpmc_cfg->prefetch_control);
+
+ return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static void omap_prefetch_reset(void)
+{
+ writel(0, &gpmc_cfg->prefetch_control);
+ writel(0, &gpmc_cfg->prefetch_config1);
+}
+
+static int __read_prefetch_aligned(struct nand_chip *chip, uint32_t *buf, int len)
+{
+ int ret;
+ uint32_t cnt;
+ struct omap_nand_info *info = nand_get_controller_data(chip);
+
+ ret = omap_prefetch_enable(PREFETCH_FIFOTHRESHOLD_MAX, len, 0, info->cs);
+ if (ret < 0)
+ return ret;
+
+ do {
+ int i;
+
+ cnt = readl(&gpmc_cfg->prefetch_status);
+ cnt = PREFETCH_STATUS_FIFO_CNT(cnt);
+
+ for (i = 0; i < cnt / 4; i++) {
+ *buf++ = readl(CONFIG_SYS_NAND_BASE);
+ len -= 4;
+ }
+ } while (len);
+
+ omap_prefetch_reset();
+
+ return 0;
+}
+
+static inline void omap_nand_read(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ nand_read_buf16(mtd, buf, len);
+ else
+ nand_read_buf(mtd, buf, len);
+}
+
+static void omap_nand_read_prefetch(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int ret;
+ uint32_t head, tail;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /*
+ * If the destination buffer is unaligned, start with reading
+ * the overlap byte-wise.
+ */
+ head = ((uint32_t) buf) % 4;
+ if (head) {
+ omap_nand_read(mtd, buf, head);
+ buf += head;
+ len -= head;
+ }
+
+ /*
+ * Only transfer multiples of 4 bytes in a pre-fetched fashion.
+ * If there's a residue, care for it byte-wise afterwards.
+ */
+ tail = len % 4;
+
+ ret = __read_prefetch_aligned(chip, (uint32_t *)buf, len - tail);
+ if (ret < 0) {
+ /* fallback in case the prefetch engine is busy */
+ omap_nand_read(mtd, buf, len);
+ } else if (tail) {
+ buf += len - tail;
+ omap_nand_read(mtd, buf, tail);
+ }
+}
+#endif /* CONFIG_NAND_OMAP_GPMC_PREFETCH */
+
+#ifdef CONFIG_NAND_OMAP_ELM
+/*
+ * omap_reverse_list - re-orders list elements in reverse order [internal]
+ * @list: pointer to start of list
+ * @length: length of list
+*/
+static void omap_reverse_list(u8 *list, unsigned int length)
+{
+ unsigned int i, j;
+ unsigned int half_length = length / 2;
+ u8 tmp;
+ for (i = 0, j = length - 1; i < half_length; i++, j--) {
+ tmp = list[i];
+ list[i] = list[j];
+ list[j] = tmp;
+ }
+}
+
+/*
+ * omap_correct_data_bch - Compares the ecc read from nand spare area
+ * with ECC registers values and corrects one bit error if it has occurred
+ *
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash (ignored)
+ * @calc_ecc: ecc read from ECC registers
+ *
+ * @return 0 if data is OK or corrected, else returns -1
+ */
+static int omap_correct_data_bch(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ uint32_t error_count = 0, error_max;
+ uint32_t error_loc[ELM_MAX_ERROR_COUNT];
+ enum bch_level bch_type;
+ uint32_t i, ecc_flag = 0;
+ uint8_t count;
+ uint32_t byte_pos, bit_pos;
+ int err = 0;
+
+ /* check calculated ecc */
+ for (i = 0; i < ecc->bytes && !ecc_flag; i++) {
+ if (calc_ecc[i] != 0x00)
+ ecc_flag = 1;
+ }
+ if (!ecc_flag)
+ return 0;
+
+ /* check for whether its a erased-page */
+ ecc_flag = 0;
+ for (i = 0; i < ecc->bytes && !ecc_flag; i++) {
+ if (read_ecc[i] != 0xff)
+ ecc_flag = 1;
+ }
+ if (!ecc_flag)
+ return 0;
+
+ /*
+ * while reading ECC result we read it in big endian.
+ * Hence while loading to ELM we have rotate to get the right endian.
+ */
+ switch (info->ecc_scheme) {
+ case OMAP_ECC_BCH8_CODE_HW:
+ bch_type = BCH_8_BIT;
+ omap_reverse_list(calc_ecc, ecc->bytes - 1);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ bch_type = BCH_16_BIT;
+ omap_reverse_list(calc_ecc, ecc->bytes);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* use elm module to check for errors */
+ elm_config(bch_type);
+ err = elm_check_error(calc_ecc, bch_type, &error_count, error_loc);
+ if (err)
+ return err;
+
+ /* correct bch error */
+ for (count = 0; count < error_count; count++) {
+ switch (info->ecc_scheme) {
+ case OMAP_ECC_BCH8_CODE_HW:
+ /* 14th byte in ECC is reserved to match ROM layout */
+ error_max = SECTOR_BYTES + (ecc->bytes - 1);
+ break;
+ case OMAP_ECC_BCH16_CODE_HW:
+ error_max = SECTOR_BYTES + ecc->bytes;
+ break;
+ default:
+ return -EINVAL;
+ }
+ byte_pos = error_max - (error_loc[count] / 8) - 1;
+ bit_pos = error_loc[count] % 8;
+ if (byte_pos < SECTOR_BYTES) {
+ dat[byte_pos] ^= 1 << bit_pos;
+ debug("nand: bit-flip corrected @data=%d\n", byte_pos);
+ } else if (byte_pos < error_max) {
+ read_ecc[byte_pos - SECTOR_BYTES] ^= 1 << bit_pos;
+ debug("nand: bit-flip corrected @oob=%d\n", byte_pos -
+ SECTOR_BYTES);
+ } else {
+ err = -EBADMSG;
+ printf("nand: error: invalid bit-flip location\n");
+ }
+ }
+ return (err) ? err : error_count;
+}
+
+/**
+ * omap_read_page_bch - hardware ecc based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *oob = chip->oob_poi;
+ uint32_t data_pos;
+ uint32_t oob_pos;
+
+ data_pos = 0;
+ /* oob area start */
+ oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0];
+ oob += chip->ecc.layout->eccpos[0];
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize,
+ oob += eccbytes) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+ /* read data */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, -1);
+ chip->read_buf(mtd, p, eccsize);
+
+ /* read respective ecc from oob area */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+ chip->read_buf(mtd, oob, eccbytes);
+ /* read syndrome */
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ data_pos += eccsize;
+ oob_pos += eccbytes;
+ }
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+#endif /* CONFIG_NAND_OMAP_ELM */
+
+/*
+ * OMAP3 BCH8 support (with BCH library)
+ */
+#ifdef CONFIG_BCH
+/**
+ * omap_correct_data_bch_sw - Decode received data and correct errors
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ */
+static int omap_correct_data_bch_sw(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ int i, count;
+ /* cannot correct more than 8 errors */
+ unsigned int errloc[8];
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(chip);
+
+ count = decode_bch(info->control, NULL, SECTOR_BYTES,
+ read_ecc, calc_ecc, NULL, errloc);
+ if (count > 0) {
+ /* correct errors */
+ for (i = 0; i < count; i++) {
+ /* correct data only, not ecc bytes */
+ if (errloc[i] < SECTOR_BYTES << 3)
+ data[errloc[i] >> 3] ^= 1 << (errloc[i] & 7);
+ debug("corrected bitflip %u\n", errloc[i]);
+#ifdef DEBUG
+ puts("read_ecc: ");
+ /*
+ * BCH8 have 13 bytes of ECC; BCH4 needs adoption
+ * here!
+ */
+ for (i = 0; i < 13; i++)
+ printf("%02x ", read_ecc[i]);
+ puts("\n");
+ puts("calc_ecc: ");
+ for (i = 0; i < 13; i++)
+ printf("%02x ", calc_ecc[i]);
+ puts("\n");
+#endif
+ }
+ } else if (count < 0) {
+ puts("ecc unrecoverable error\n");
+ }
+ return count;
+}
+
+/**
+ * omap_free_bch - Release BCH ecc resources
+ * @mtd: MTD device structure
+ */
+static void __maybe_unused omap_free_bch(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct omap_nand_info *info = nand_get_controller_data(chip);
+
+ if (info->control) {
+ free_bch(info->control);
+ info->control = NULL;
+ }
+}
+#endif /* CONFIG_BCH */
+
+/**
+ * omap_select_ecc_scheme - configures driver for particular ecc-scheme
+ * @nand: NAND chip device structure
+ * @ecc_scheme: ecc scheme to configure
+ * @pagesize: number of main-area bytes per page of NAND device
+ * @oobsize: number of OOB/spare bytes per page of NAND device
+ */
+static int omap_select_ecc_scheme(struct nand_chip *nand,
+ enum omap_ecc ecc_scheme, unsigned int pagesize, unsigned int oobsize) {
+ struct omap_nand_info *info = nand_get_controller_data(nand);
+ struct nand_ecclayout *ecclayout = &omap_ecclayout;
+ int eccsteps = pagesize / SECTOR_BYTES;
+ int i;
+
+ switch (ecc_scheme) {
+ case OMAP_ECC_HAM1_CODE_SW:
+ debug("nand: selected OMAP_ECC_HAM1_CODE_SW\n");
+ /* For this ecc-scheme, ecc.bytes, ecc.layout, ... are
+ * initialized in nand_scan_tail(), so just set ecc.mode */
+ info->control = NULL;
+ nand->ecc.mode = NAND_ECC_SOFT;
+ nand->ecc.layout = NULL;
+ nand->ecc.size = 0;
+ break;
+
+ case OMAP_ECC_HAM1_CODE_HW:
+ debug("nand: selected OMAP_ECC_HAM1_CODE_HW\n");
+ /* check ecc-scheme requirements before updating ecc info */
+ if ((3 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) {
+ printf("nand: error: insufficient OOB: require=%d\n", (
+ (3 * eccsteps) + BADBLOCK_MARKER_LENGTH));
+ return -EINVAL;
+ }
+ info->control = NULL;
+ /* populate ecc specific fields */
+ memset(&nand->ecc, 0, sizeof(struct nand_ecc_ctrl));
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.strength = 1;
+ nand->ecc.size = SECTOR_BYTES;
+ nand->ecc.bytes = 3;
+ nand->ecc.hwctl = omap_enable_hwecc;
+ nand->ecc.correct = omap_correct_data;
+ nand->ecc.calculate = omap_calculate_ecc;
+ /* define ecc-layout */
+ ecclayout->eccbytes = nand->ecc.bytes * eccsteps;
+ for (i = 0; i < ecclayout->eccbytes; i++) {
+ if (nand->options & NAND_BUSWIDTH_16)
+ ecclayout->eccpos[i] = i + 2;
+ else
+ ecclayout->eccpos[i] = i + 1;
+ }
+ ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree[0].length = oobsize - ecclayout->eccbytes -
+ BADBLOCK_MARKER_LENGTH;
+ break;
+
+ case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+#ifdef CONFIG_BCH
+ debug("nand: selected OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+ /* check ecc-scheme requirements before updating ecc info */
+ if ((13 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) {
+ printf("nand: error: insufficient OOB: require=%d\n", (
+ (13 * eccsteps) + BADBLOCK_MARKER_LENGTH));
+ return -EINVAL;
+ }
+ /* check if BCH S/W library can be used for error detection */
+ info->control = init_bch(13, 8, 0x201b);
+ if (!info->control) {
+ printf("nand: error: could not init_bch()\n");
+ return -ENODEV;
+ }
+ /* populate ecc specific fields */
+ memset(&nand->ecc, 0, sizeof(struct nand_ecc_ctrl));
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.strength = 8;
+ nand->ecc.size = SECTOR_BYTES;
+ nand->ecc.bytes = 13;
+ nand->ecc.hwctl = omap_enable_hwecc;
+ nand->ecc.correct = omap_correct_data_bch_sw;
+ nand->ecc.calculate = omap_calculate_ecc;
+ /* define ecc-layout */
+ ecclayout->eccbytes = nand->ecc.bytes * eccsteps;
+ ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ for (i = 1; i < ecclayout->eccbytes; i++) {
+ if (i % nand->ecc.bytes)
+ ecclayout->eccpos[i] =
+ ecclayout->eccpos[i - 1] + 1;
+ else
+ ecclayout->eccpos[i] =
+ ecclayout->eccpos[i - 1] + 2;
+ }
+ ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree[0].length = oobsize - ecclayout->eccbytes -
+ BADBLOCK_MARKER_LENGTH;
+ break;
+#else
+ printf("nand: error: CONFIG_BCH required for ECC\n");
+ return -EINVAL;
+#endif
+
+ case OMAP_ECC_BCH8_CODE_HW:
+#ifdef CONFIG_NAND_OMAP_ELM
+ debug("nand: selected OMAP_ECC_BCH8_CODE_HW\n");
+ /* check ecc-scheme requirements before updating ecc info */
+ if ((14 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) {
+ printf("nand: error: insufficient OOB: require=%d\n", (
+ (14 * eccsteps) + BADBLOCK_MARKER_LENGTH));
+ return -EINVAL;
+ }
+ /* intialize ELM for ECC error detection */
+ elm_init();
+ info->control = NULL;
+ /* populate ecc specific fields */
+ memset(&nand->ecc, 0, sizeof(struct nand_ecc_ctrl));
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.strength = 8;
+ nand->ecc.size = SECTOR_BYTES;
+ nand->ecc.bytes = 14;
+ nand->ecc.hwctl = omap_enable_hwecc;
+ nand->ecc.correct = omap_correct_data_bch;
+ nand->ecc.calculate = omap_calculate_ecc;
+ nand->ecc.read_page = omap_read_page_bch;
+ /* define ecc-layout */
+ ecclayout->eccbytes = nand->ecc.bytes * eccsteps;
+ for (i = 0; i < ecclayout->eccbytes; i++)
+ ecclayout->eccpos[i] = i + BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree[0].length = oobsize - ecclayout->eccbytes -
+ BADBLOCK_MARKER_LENGTH;
+ break;
+#else
+ printf("nand: error: CONFIG_NAND_OMAP_ELM required for ECC\n");
+ return -EINVAL;
+#endif
+
+ case OMAP_ECC_BCH16_CODE_HW:
+#ifdef CONFIG_NAND_OMAP_ELM
+ debug("nand: using OMAP_ECC_BCH16_CODE_HW\n");
+ /* check ecc-scheme requirements before updating ecc info */
+ if ((26 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) {
+ printf("nand: error: insufficient OOB: require=%d\n", (
+ (26 * eccsteps) + BADBLOCK_MARKER_LENGTH));
+ return -EINVAL;
+ }
+ /* intialize ELM for ECC error detection */
+ elm_init();
+ /* populate ecc specific fields */
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.size = SECTOR_BYTES;
+ nand->ecc.bytes = 26;
+ nand->ecc.strength = 16;
+ nand->ecc.hwctl = omap_enable_hwecc;
+ nand->ecc.correct = omap_correct_data_bch;
+ nand->ecc.calculate = omap_calculate_ecc;
+ nand->ecc.read_page = omap_read_page_bch;
+ /* define ecc-layout */
+ ecclayout->eccbytes = nand->ecc.bytes * eccsteps;
+ for (i = 0; i < ecclayout->eccbytes; i++)
+ ecclayout->eccpos[i] = i + BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH;
+ ecclayout->oobfree[0].length = oobsize - nand->ecc.bytes -
+ BADBLOCK_MARKER_LENGTH;
+ break;
+#else
+ printf("nand: error: CONFIG_NAND_OMAP_ELM required for ECC\n");
+ return -EINVAL;
+#endif
+ default:
+ debug("nand: error: ecc scheme not enabled or supported\n");
+ return -EINVAL;
+ }
+
+ /* nand_scan_tail() sets ham1 sw ecc; hw ecc layout is set by driver */
+ if (ecc_scheme != OMAP_ECC_HAM1_CODE_SW)
+ nand->ecc.layout = ecclayout;
+
+ info->ecc_scheme = ecc_scheme;
+ return 0;
+}
+
+#ifndef CONFIG_SPL_BUILD
+/*
+ * omap_nand_switch_ecc - switch the ECC operation between different engines
+ * (h/w and s/w) and different algorithms (hamming and BCHx)
+ *
+ * @hardware - true if one of the HW engines should be used
+ * @eccstrength - the number of bits that could be corrected
+ * (1 - hamming, 4 - BCH4, 8 - BCH8, 16 - BCH16)
+ */
+int __maybe_unused omap_nand_switch_ecc(uint32_t hardware, uint32_t eccstrength)
+{
+ struct nand_chip *nand;
+ struct mtd_info *mtd = get_nand_dev_by_index(nand_curr_device);
+ int err = 0;
+
+ if (!mtd) {
+ printf("nand: error: no NAND devices found\n");
+ return -ENODEV;
+ }
+
+ nand = mtd_to_nand(mtd);
+ nand->options |= NAND_OWN_BUFFERS;
+ nand->options &= ~NAND_SUBPAGE_READ;
+ /* Setup the ecc configurations again */
+ if (hardware) {
+ if (eccstrength == 1) {
+ err = omap_select_ecc_scheme(nand,
+ OMAP_ECC_HAM1_CODE_HW,
+ mtd->writesize, mtd->oobsize);
+ } else if (eccstrength == 8) {
+ err = omap_select_ecc_scheme(nand,
+ OMAP_ECC_BCH8_CODE_HW,
+ mtd->writesize, mtd->oobsize);
+ } else if (eccstrength == 16) {
+ err = omap_select_ecc_scheme(nand,
+ OMAP_ECC_BCH16_CODE_HW,
+ mtd->writesize, mtd->oobsize);
+ } else {
+ printf("nand: error: unsupported ECC scheme\n");
+ return -EINVAL;
+ }
+ } else {
+ if (eccstrength == 1) {
+ err = omap_select_ecc_scheme(nand,
+ OMAP_ECC_HAM1_CODE_SW,
+ mtd->writesize, mtd->oobsize);
+ } else if (eccstrength == 8) {
+ err = omap_select_ecc_scheme(nand,
+ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW,
+ mtd->writesize, mtd->oobsize);
+ } else {
+ printf("nand: error: unsupported ECC scheme\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Update NAND handling after ECC mode switch */
+ if (!err)
+ err = nand_scan_tail(mtd);
+ return err;
+}
+#endif /* CONFIG_SPL_BUILD */
+
+/*
+ * Board-specific NAND initialization. The following members of the
+ * argument are board-specific:
+ * - IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * - IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * - cmd_ctrl: hardwarespecific function for accesing control-lines
+ * - waitfunc: hardwarespecific function for accesing device ready/busy line
+ * - ecc.hwctl: function to enable (reset) hardware ecc generator
+ * - ecc.mode: mode of ecc, see defines
+ * - chip_delay: chip dependent delay for transfering data from array to
+ * read regs (tR)
+ * - options: various chip options. They can partly be set to inform
+ * nand_scan about special functionality. See the defines for further
+ * explanation
+ */
+int board_nand_init(struct nand_chip *nand)
+{
+ int32_t gpmc_config = 0;
+ int cs = cs_next++;
+ int err = 0;
+ /*
+ * xloader/Uboot's gpmc configuration would have configured GPMC for
+ * nand type of memory. The following logic scans and latches on to the
+ * first CS with NAND type memory.
+ * TBD: need to make this logic generic to handle multiple CS NAND
+ * devices.
+ */
+ while (cs < GPMC_MAX_CS) {
+ /* Check if NAND type is set */
+ if ((readl(&gpmc_cfg->cs[cs].config1) & 0xC00) == 0x800) {
+ /* Found it!! */
+ break;
+ }
+ cs++;
+ }
+ if (cs >= GPMC_MAX_CS) {
+ printf("nand: error: Unable to find NAND settings in "
+ "GPMC Configuration - quitting\n");
+ return -ENODEV;
+ }
+
+ gpmc_config = readl(&gpmc_cfg->config);
+ /* Disable Write protect */
+ gpmc_config |= 0x10;
+ writel(gpmc_config, &gpmc_cfg->config);
+
+ nand->IO_ADDR_R = (void __iomem *)&gpmc_cfg->cs[cs].nand_dat;
+ nand->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_cmd;
+ omap_nand_info[cs].control = NULL;
+ omap_nand_info[cs].cs = cs;
+ omap_nand_info[cs].ws = wscfg[cs];
+ nand_set_controller_data(nand, &omap_nand_info[cs]);
+ nand->cmd_ctrl = omap_nand_hwcontrol;
+ nand->options |= NAND_NO_PADDING | NAND_CACHEPRG;
+ nand->chip_delay = 100;
+ nand->ecc.layout = &omap_ecclayout;
+
+ /* configure driver and controller based on NAND device bus-width */
+ gpmc_config = readl(&gpmc_cfg->cs[cs].config1);
+#if defined(CONFIG_SYS_NAND_BUSWIDTH_16BIT)
+ nand->options |= NAND_BUSWIDTH_16;
+ writel(gpmc_config | (0x1 << 12), &gpmc_cfg->cs[cs].config1);
+#else
+ nand->options &= ~NAND_BUSWIDTH_16;
+ writel(gpmc_config & ~(0x1 << 12), &gpmc_cfg->cs[cs].config1);
+#endif
+ /* select ECC scheme */
+#if defined(CONFIG_NAND_OMAP_ECCSCHEME)
+ err = omap_select_ecc_scheme(nand, CONFIG_NAND_OMAP_ECCSCHEME,
+ CONFIG_SYS_NAND_PAGE_SIZE, CONFIG_SYS_NAND_OOBSIZE);
+#else
+ /* pagesize and oobsize are not required to configure sw ecc-scheme */
+ err = omap_select_ecc_scheme(nand, OMAP_ECC_HAM1_CODE_SW,
+ 0, 0);
+#endif
+ if (err)
+ return err;
+
+#ifdef CONFIG_NAND_OMAP_GPMC_PREFETCH
+ nand->read_buf = omap_nand_read_prefetch;
+#else
+ if (nand->options & NAND_BUSWIDTH_16)
+ nand->read_buf = nand_read_buf16;
+ else
+ nand->read_buf = nand_read_buf;
+#endif
+
+ nand->dev_ready = omap_dev_ready;
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.c b/roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.c
new file mode 100644
index 000000000..8ff58a703
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.c
@@ -0,0 +1,1956 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/mtd/nand/raw/pxa3xx_nand.c
+ *
+ * Copyright © 2005 Intel Corporation
+ * Copyright © 2006 Marvell International Ltd.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <fdtdec.h>
+#include <nand.h>
+#include <asm/global_data.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <asm/arch/cpu.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/types.h>
+#include <syscon.h>
+#include <regmap.h>
+#include <dm/uclass.h>
+#include <dm/read.h>
+
+#include "pxa3xx_nand.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
+#define CHIP_DELAY_TIMEOUT 200
+#define NAND_STOP_DELAY 40
+
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM.
+ * ONFI param page is 256 bytes, and there are three redundant copies
+ * to be read. JEDEC param page is 512 bytes, and there are also three
+ * redundant copies to be read.
+ * Hence this buffer should be at least 512 x 3. Let's pick 2048.
+ */
+#define INIT_BUFFER_SIZE 2048
+
+/* registers and bit definitions */
+#define NDCR (0x00) /* Control register */
+#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
+#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
+#define NDSR (0x14) /* Status Register */
+#define NDPCR (0x18) /* Page Count Register */
+#define NDBDR0 (0x1C) /* Bad Block Register 0 */
+#define NDBDR1 (0x20) /* Bad Block Register 1 */
+#define NDECCCTRL (0x28) /* ECC control */
+#define NDDB (0x40) /* Data Buffer */
+#define NDCB0 (0x48) /* Command Buffer0 */
+#define NDCB1 (0x4C) /* Command Buffer1 */
+#define NDCB2 (0x50) /* Command Buffer2 */
+
+#define NDCR_SPARE_EN (0x1 << 31)
+#define NDCR_ECC_EN (0x1 << 30)
+#define NDCR_DMA_EN (0x1 << 29)
+#define NDCR_ND_RUN (0x1 << 28)
+#define NDCR_DWIDTH_C (0x1 << 27)
+#define NDCR_DWIDTH_M (0x1 << 26)
+#define NDCR_PAGE_SZ (0x1 << 24)
+#define NDCR_NCSX (0x1 << 23)
+#define NDCR_ND_MODE (0x3 << 21)
+#define NDCR_NAND_MODE (0x0)
+#define NDCR_CLR_PG_CNT (0x1 << 20)
+#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
+#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
+#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
+
+#define NDCR_RA_START (0x1 << 15)
+#define NDCR_PG_PER_BLK (0x1 << 14)
+#define NDCR_ND_ARB_EN (0x1 << 12)
+#define NDCR_INT_MASK (0xFFF)
+
+#define NDSR_MASK (0xfff)
+#define NDSR_ERR_CNT_OFF (16)
+#define NDSR_ERR_CNT_MASK (0x1f)
+#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
+#define NDSR_RDY (0x1 << 12)
+#define NDSR_FLASH_RDY (0x1 << 11)
+#define NDSR_CS0_PAGED (0x1 << 10)
+#define NDSR_CS1_PAGED (0x1 << 9)
+#define NDSR_CS0_CMDD (0x1 << 8)
+#define NDSR_CS1_CMDD (0x1 << 7)
+#define NDSR_CS0_BBD (0x1 << 6)
+#define NDSR_CS1_BBD (0x1 << 5)
+#define NDSR_UNCORERR (0x1 << 4)
+#define NDSR_CORERR (0x1 << 3)
+#define NDSR_WRDREQ (0x1 << 2)
+#define NDSR_RDDREQ (0x1 << 1)
+#define NDSR_WRCMDREQ (0x1)
+
+#define NDCB0_LEN_OVRD (0x1 << 28)
+#define NDCB0_ST_ROW_EN (0x1 << 26)
+#define NDCB0_AUTO_RS (0x1 << 25)
+#define NDCB0_CSEL (0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
+#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
+#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
+#define NDCB0_NC (0x1 << 20)
+#define NDCB0_DBC (0x1 << 19)
+#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
+#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
+#define NDCB0_CMD2_MASK (0xff << 8)
+#define NDCB0_CMD1_MASK (0xff)
+#define NDCB0_ADDR_CYC_SHIFT (16)
+
+#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ 4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL 3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
+
+/* System control register and bit to enable NAND on some SoCs */
+#define GENCONF_SOC_DEVICE_MUX 0x208
+#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
+
+/*
+ * This should be large enough to read 'ONFI' and 'JEDEC'.
+ * Let's use 7 bytes, which is the maximum ID count supported
+ * by the controller (see NDCR_RD_ID_CNT_MASK).
+ */
+#define READ_ID_BYTES 7
+
+/* macros for registers read/write */
+#define nand_writel(info, off, val) \
+ writel((val), (info)->mmio_base + (off))
+
+#define nand_readl(info, off) \
+ readl((info)->mmio_base + (off))
+
+/* error code and state */
+enum {
+ ERR_NONE = 0,
+ ERR_DMABUSERR = -1,
+ ERR_SENDCMD = -2,
+ ERR_UNCORERR = -3,
+ ERR_BBERR = -4,
+ ERR_CORERR = -5,
+};
+
+enum {
+ STATE_IDLE = 0,
+ STATE_PREPARED,
+ STATE_CMD_HANDLE,
+ STATE_DMA_READING,
+ STATE_DMA_WRITING,
+ STATE_DMA_DONE,
+ STATE_PIO_READING,
+ STATE_PIO_WRITING,
+ STATE_CMD_DONE,
+ STATE_READY,
+};
+
+enum pxa3xx_nand_variant {
+ PXA3XX_NAND_VARIANT_PXA,
+ PXA3XX_NAND_VARIANT_ARMADA370,
+ PXA3XX_NAND_VARIANT_ARMADA_8K,
+};
+
+struct pxa3xx_nand_host {
+ struct nand_chip chip;
+ void *info_data;
+
+ /* page size of attached chip */
+ int use_ecc;
+ int cs;
+
+ /* calculated from pxa3xx_nand_flash data */
+ unsigned int col_addr_cycles;
+ unsigned int row_addr_cycles;
+};
+
+struct pxa3xx_nand_info {
+ struct nand_hw_control controller;
+ struct pxa3xx_nand_platform_data *pdata;
+
+ struct clk *clk;
+ void __iomem *mmio_base;
+ unsigned long mmio_phys;
+ int cmd_complete, dev_ready;
+
+ unsigned int buf_start;
+ unsigned int buf_count;
+ unsigned int buf_size;
+ unsigned int data_buff_pos;
+ unsigned int oob_buff_pos;
+
+ unsigned char *data_buff;
+ unsigned char *oob_buff;
+
+ struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
+ unsigned int state;
+
+ /*
+ * This driver supports NFCv1 (as found in PXA SoC)
+ * and NFCv2 (as found in Armada 370/XP SoC).
+ */
+ enum pxa3xx_nand_variant variant;
+
+ int cs;
+ int use_ecc; /* use HW ECC ? */
+ int force_raw; /* prevent use_ecc to be set */
+ int ecc_bch; /* using BCH ECC? */
+ int use_spare; /* use spare ? */
+ int need_wait;
+
+ /* Amount of real data per full chunk */
+ unsigned int chunk_size;
+
+ /* Amount of spare data per full chunk */
+ unsigned int spare_size;
+
+ /* Number of full chunks (i.e chunk_size + spare_size) */
+ unsigned int nfullchunks;
+
+ /*
+ * Total number of chunks. If equal to nfullchunks, then there
+ * are only full chunks. Otherwise, there is one last chunk of
+ * size (last_chunk_size + last_spare_size)
+ */
+ unsigned int ntotalchunks;
+
+ /* Amount of real data in the last chunk */
+ unsigned int last_chunk_size;
+
+ /* Amount of spare data in the last chunk */
+ unsigned int last_spare_size;
+
+ unsigned int ecc_size;
+ unsigned int ecc_err_cnt;
+ unsigned int max_bitflips;
+ int retcode;
+
+ /*
+ * Variables only valid during command
+ * execution. step_chunk_size and step_spare_size is the
+ * amount of real data and spare data in the current
+ * chunk. cur_chunk is the current chunk being
+ * read/programmed.
+ */
+ unsigned int step_chunk_size;
+ unsigned int step_spare_size;
+ unsigned int cur_chunk;
+
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+ uint32_t ndtr1cs0;
+
+ /* generated NDCBx register values */
+ uint32_t ndcb0;
+ uint32_t ndcb1;
+ uint32_t ndcb2;
+ uint32_t ndcb3;
+};
+
+static struct pxa3xx_nand_timing timing[] = {
+ /*
+ * tCH Enable signal hold time
+ * tCS Enable signal setup time
+ * tWH ND_nWE high duration
+ * tWP ND_nWE pulse time
+ * tRH ND_nRE high duration
+ * tRP ND_nRE pulse width
+ * tR ND_nWE high to ND_nRE low for read
+ * tWHR ND_nWE high to ND_nRE low for status read
+ * tAR ND_ALE low to ND_nRE low delay
+ */
+ /*ch cs wh wp rh rp r whr ar */
+ { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
+ { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
+ { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
+ { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
+ { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
+};
+
+static struct pxa3xx_nand_flash builtin_flash_types[] = {
+ /*
+ * chip_id
+ * flash_width Width of Flash memory (DWIDTH_M)
+ * dfc_width Width of flash controller(DWIDTH_C)
+ * *timing
+ * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
+ */
+ { 0x46ec, 16, 16, &timing[1] },
+ { 0xdaec, 8, 8, &timing[1] },
+ { 0xd7ec, 8, 8, &timing[1] },
+ { 0xa12c, 8, 8, &timing[2] },
+ { 0xb12c, 16, 16, &timing[2] },
+ { 0xdc2c, 8, 8, &timing[2] },
+ { 0xcc2c, 16, 16, &timing[2] },
+ { 0xba20, 16, 16, &timing[3] },
+ { 0xda98, 8, 8, &timing[4] },
+};
+
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION,
+ .offs = 8,
+ .len = 6,
+ .veroffs = 14,
+ .maxblocks = 8, /* Last 8 blocks in each chip */
+ .pattern = bbt_mirror_pattern
+};
+#endif
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+ .eccbytes = 32,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
+ .eccbytes = 64,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95},
+ .oobfree = { {1, 4}, {6, 26} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+ .eccbytes = 64,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127},
+ /* Bootrom looks in bytes 0 & 5 for bad blocks */
+ .oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
+ .eccbytes = 128,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+
+ 96, 97, 98, 99, 100, 101, 102, 103,
+ 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119,
+ 120, 121, 122, 123, 124, 125, 126, 127,
+
+ 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 170, 171, 172, 173, 174, 175,
+ 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191,
+
+ 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 234, 235, 236, 237, 238, 239,
+ 240, 241, 242, 243, 244, 245, 246, 247,
+ 248, 249, 250, 251, 252, 253, 254, 255},
+
+ /* Bootrom looks in bytes 0 & 5 for bad blocks */
+ .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+ .eccbytes = 128,
+ .eccpos = {
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { }
+};
+
+static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
+ .eccbytes = 256,
+ .eccpos = {},
+ /* HW ECC handles all ECC data and all spare area is free for OOB */
+ .oobfree = {{0, 160} }
+};
+
+#define NDTR0_tCH(c) (min((c), 7) << 19)
+#define NDTR0_tCS(c) (min((c), 7) << 16)
+#define NDTR0_tWH(c) (min((c), 7) << 11)
+#define NDTR0_tWP(c) (min((c), 7) << 8)
+#define NDTR0_tRH(c) (min((c), 7) << 3)
+#define NDTR0_tRP(c) (min((c), 7) << 0)
+
+#define NDTR1_tR(c) (min((c), 65535) << 16)
+#define NDTR1_tWHR(c) (min((c), 15) << 4)
+#define NDTR1_tAR(c) (min((c), 15) << 0)
+
+/* convert nano-seconds to nand flash controller clock cycles */
+#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
+
+static const struct udevice_id pxa3xx_nand_dt_ids[] = {
+ {
+ .compatible = "marvell,mvebu-pxa3xx-nand",
+ .data = PXA3XX_NAND_VARIANT_ARMADA370,
+ },
+ {
+ .compatible = "marvell,armada-8k-nand-controller",
+ .data = PXA3XX_NAND_VARIANT_ARMADA_8K,
+ },
+ {}
+};
+
+static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev)
+{
+ return dev_get_driver_data(dev);
+}
+
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
+ const struct pxa3xx_nand_timing *t)
+{
+ struct pxa3xx_nand_info *info = host->info_data;
+ unsigned long nand_clk = mvebu_get_nand_clock();
+ uint32_t ndtr0, ndtr1;
+
+ ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
+ NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
+ NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
+ NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
+ NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
+ NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
+
+ ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
+ NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
+ NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
+
+ info->ndtr0cs0 = ndtr0;
+ info->ndtr1cs0 = ndtr1;
+ nand_writel(info, NDTR0CS0, ndtr0);
+ nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
+ const struct nand_sdr_timings *t)
+{
+ struct pxa3xx_nand_info *info = host->info_data;
+ struct nand_chip *chip = &host->chip;
+ unsigned long nand_clk = mvebu_get_nand_clock();
+ uint32_t ndtr0, ndtr1;
+
+ u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
+ u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
+ u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
+ u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
+ u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
+ u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
+ u32 tR = chip->chip_delay * 1000;
+ u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
+ u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
+
+ /* fallback to a default value if tR = 0 */
+ if (!tR)
+ tR = 20000;
+
+ ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
+ NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
+ NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
+ NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
+ NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
+ NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
+
+ ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
+ NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
+ NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
+
+ info->ndtr0cs0 = ndtr0;
+ info->ndtr1cs0 = ndtr1;
+ nand_writel(info, NDTR0CS0, ndtr0);
+ nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
+{
+ const struct nand_sdr_timings *timings;
+ struct nand_chip *chip = &host->chip;
+ struct pxa3xx_nand_info *info = host->info_data;
+ const struct pxa3xx_nand_flash *f = NULL;
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ int mode, id, ntypes, i;
+
+ mode = onfi_get_async_timing_mode(chip);
+ if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+ ntypes = ARRAY_SIZE(builtin_flash_types);
+
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ id = chip->read_byte(mtd);
+ id |= chip->read_byte(mtd) << 0x8;
+
+ for (i = 0; i < ntypes; i++) {
+ f = &builtin_flash_types[i];
+
+ if (f->chip_id == id)
+ break;
+ }
+
+ if (i == ntypes) {
+ dev_err(mtd->dev, "Error: timings not found\n");
+ return -EINVAL;
+ }
+
+ pxa3xx_nand_set_timing(host, f->timing);
+
+ if (f->flash_width == 16) {
+ info->reg_ndcr |= NDCR_DWIDTH_M;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
+ } else {
+ mode = fls(mode) - 1;
+ if (mode < 0)
+ mode = 0;
+
+ timings = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ pxa3xx_nand_set_sdr_timing(host, timings);
+ }
+
+ return 0;
+}
+
+/**
+ * NOTE: it is a must to set ND_RUN first, then write
+ * command buffer, otherwise, it does not work.
+ * We enable all the interrupt at the same time, and
+ * let pxa3xx_nand_irq to handle all logic.
+ */
+static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
+{
+ uint32_t ndcr;
+
+ ndcr = info->reg_ndcr;
+
+ if (info->use_ecc) {
+ ndcr |= NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x1);
+ } else {
+ ndcr &= ~NDCR_ECC_EN;
+ if (info->ecc_bch)
+ nand_writel(info, NDECCCTRL, 0x0);
+ }
+
+ ndcr &= ~NDCR_DMA_EN;
+
+ if (info->use_spare)
+ ndcr |= NDCR_SPARE_EN;
+ else
+ ndcr &= ~NDCR_SPARE_EN;
+
+ ndcr |= NDCR_ND_RUN;
+
+ /* clear status bits and run */
+ nand_writel(info, NDSR, NDSR_MASK);
+ nand_writel(info, NDCR, 0);
+ nand_writel(info, NDCR, ndcr);
+}
+
+static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+ uint32_t ndcr;
+
+ ndcr = nand_readl(info, NDCR);
+ nand_writel(info, NDCR, ndcr | int_mask);
+}
+
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
+{
+ if (info->ecc_bch && !info->force_raw) {
+ u32 ts;
+
+ /*
+ * According to the datasheet, when reading from NDDB
+ * with BCH enabled, after each 32 bytes reads, we
+ * have to make sure that the NDSR.RDDREQ bit is set.
+ *
+ * Drain the FIFO 8 32 bits reads at a time, and skip
+ * the polling on the last read.
+ */
+ while (len > 8) {
+ readsl(info->mmio_base + NDDB, data, 8);
+
+ ts = get_timer(0);
+ while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
+ if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
+ dev_err(info->controller.active->mtd.dev,
+ "Timeout on RDDREQ while draining the FIFO\n");
+ return;
+ }
+ }
+
+ data += 32;
+ len -= 8;
+ }
+ }
+
+ readsl(info->mmio_base + NDDB, data, len);
+}
+
+static void handle_data_pio(struct pxa3xx_nand_info *info)
+{
+ int data_len = info->step_chunk_size;
+
+ /*
+ * In raw mode, include the spare area and the ECC bytes that are not
+ * consumed by the controller in the data section. Do not reorganize
+ * here, do it in the ->read_page_raw() handler instead.
+ */
+ if (info->force_raw)
+ data_len += info->step_spare_size + info->ecc_size;
+
+ switch (info->state) {
+ case STATE_PIO_WRITING:
+ if (info->step_chunk_size)
+ writesl(info->mmio_base + NDDB,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(data_len, 4));
+
+ if (info->step_spare_size)
+ writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->step_spare_size, 4));
+ break;
+ case STATE_PIO_READING:
+ if (data_len)
+ drain_fifo(info,
+ info->data_buff + info->data_buff_pos,
+ DIV_ROUND_UP(data_len, 4));
+
+ if (info->force_raw)
+ break;
+
+ if (info->step_spare_size)
+ drain_fifo(info,
+ info->oob_buff + info->oob_buff_pos,
+ DIV_ROUND_UP(info->step_spare_size, 4));
+ break;
+ default:
+ dev_err(info->controller.active->mtd.dev,
+ "%s: invalid state %d\n", __func__, info->state);
+ BUG();
+ }
+
+ /* Update buffer pointers for multi-page read/write */
+ info->data_buff_pos += data_len;
+ info->oob_buff_pos += info->step_spare_size;
+}
+
+static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
+{
+ handle_data_pio(info);
+
+ info->state = STATE_CMD_DONE;
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+}
+
+static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
+{
+ unsigned int status, is_completed = 0, is_ready = 0;
+ unsigned int ready, cmd_done;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ if (info->cs == 0) {
+ ready = NDSR_FLASH_RDY;
+ cmd_done = NDSR_CS0_CMDD;
+ } else {
+ ready = NDSR_RDY;
+ cmd_done = NDSR_CS1_CMDD;
+ }
+
+ /* TODO - find out why we need the delay during write operation. */
+ ndelay(1);
+
+ status = nand_readl(info, NDSR);
+
+ if (status & NDSR_UNCORERR)
+ info->retcode = ERR_UNCORERR;
+ if (status & NDSR_CORERR) {
+ info->retcode = ERR_CORERR;
+ if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
+ info->ecc_bch)
+ info->ecc_err_cnt = NDSR_ERR_CNT(status);
+ else
+ info->ecc_err_cnt = 1;
+
+ /*
+ * Each chunk composing a page is corrected independently,
+ * and we need to store maximum number of corrected bitflips
+ * to return it to the MTD layer in ecc.read_page().
+ */
+ info->max_bitflips = max_t(unsigned int,
+ info->max_bitflips,
+ info->ecc_err_cnt);
+ }
+ if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
+ info->state = (status & NDSR_RDDREQ) ?
+ STATE_PIO_READING : STATE_PIO_WRITING;
+ /* Call the IRQ thread in U-Boot directly */
+ pxa3xx_nand_irq_thread(info);
+ return 0;
+ }
+ if (status & cmd_done) {
+ info->state = STATE_CMD_DONE;
+ is_completed = 1;
+ }
+ if (status & ready) {
+ info->state = STATE_READY;
+ is_ready = 1;
+ }
+
+ /*
+ * Clear all status bit before issuing the next command, which
+ * can and will alter the status bits and will deserve a new
+ * interrupt on its own. This lets the controller exit the IRQ
+ */
+ nand_writel(info, NDSR, status);
+
+ if (status & NDSR_WRCMDREQ) {
+ status &= ~NDSR_WRCMDREQ;
+ info->state = STATE_CMD_HANDLE;
+
+ /*
+ * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+ * must be loaded by writing directly either 12 or 16
+ * bytes directly to NDCB0, four bytes at a time.
+ *
+ * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+ * but each NDCBx register can be read.
+ */
+ nand_writel(info, NDCB0, info->ndcb0);
+ nand_writel(info, NDCB0, info->ndcb1);
+ nand_writel(info, NDCB0, info->ndcb2);
+
+ /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
+ nand_writel(info, NDCB0, info->ndcb3);
+ }
+
+ if (is_completed)
+ info->cmd_complete = 1;
+ if (is_ready)
+ info->dev_ready = 1;
+
+ return ret;
+}
+
+static inline int is_buf_blank(uint8_t *buf, size_t len)
+{
+ for (; len > 0; len--)
+ if (*buf++ != 0xff)
+ return 0;
+ return 1;
+}
+
+static void set_command_address(struct pxa3xx_nand_info *info,
+ unsigned int page_size, uint16_t column, int page_addr)
+{
+ /* small page addr setting */
+ if (page_size < info->chunk_size) {
+ info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+ | (column & 0xFF);
+
+ info->ndcb2 = 0;
+ } else {
+ info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+ | (column & 0xFFFF);
+
+ if (page_addr & 0xFF0000)
+ info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+ else
+ info->ndcb2 = 0;
+ }
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+
+ /* reset data and oob column point to handle data */
+ info->buf_start = 0;
+ info->buf_count = 0;
+ info->data_buff_pos = 0;
+ info->oob_buff_pos = 0;
+ info->step_chunk_size = 0;
+ info->step_spare_size = 0;
+ info->cur_chunk = 0;
+ info->use_ecc = 0;
+ info->use_spare = 1;
+ info->retcode = ERR_NONE;
+ info->ecc_err_cnt = 0;
+ info->ndcb3 = 0;
+ info->need_wait = 0;
+
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_PAGEPROG:
+ if (!info->force_raw)
+ info->use_ecc = 1;
+ break;
+ case NAND_CMD_PARAM:
+ info->use_spare = 0;
+ break;
+ default:
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ break;
+ }
+
+ /*
+ * If we are about to issue a read command, or about to set
+ * the write address, then clean the data buffer.
+ */
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READOOB ||
+ command == NAND_CMD_SEQIN) {
+ info->buf_count = mtd->writesize + mtd->oobsize;
+ memset(info->data_buff, 0xFF, info->buf_count);
+ }
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+ int ext_cmd_type, uint16_t column, int page_addr)
+{
+ int addr_cycle, exec_cmd;
+ struct pxa3xx_nand_host *host;
+ struct mtd_info *mtd;
+
+ host = info->host[info->cs];
+ mtd = nand_to_mtd(&host->chip);
+ addr_cycle = 0;
+ exec_cmd = 1;
+
+ if (info->cs != 0)
+ info->ndcb0 = NDCB0_CSEL;
+ else
+ info->ndcb0 = 0;
+
+ if (command == NAND_CMD_SEQIN)
+ exec_cmd = 0;
+
+ addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+ + host->col_addr_cycles);
+
+ switch (command) {
+ case NAND_CMD_READOOB:
+ case NAND_CMD_READ0:
+ info->buf_start = column;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | addr_cycle
+ | NAND_CMD_READ0;
+
+ if (command == NAND_CMD_READOOB)
+ info->buf_start += mtd->writesize;
+
+ if (info->cur_chunk < info->nfullchunks) {
+ info->step_chunk_size = info->chunk_size;
+ info->step_spare_size = info->spare_size;
+ } else {
+ info->step_chunk_size = info->last_chunk_size;
+ info->step_spare_size = info->last_spare_size;
+ }
+
+ /*
+ * Multiple page read needs an 'extended command type' field,
+ * which is either naked-read or last-read according to the
+ * state.
+ */
+ if (info->force_raw) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
+ NDCB0_LEN_OVRD |
+ NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size + info->ecc_size;
+ } else if (mtd->writesize == info->chunk_size) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+ } else if (mtd->writesize > info->chunk_size) {
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size;
+ }
+
+ set_command_address(info, mtd->writesize, column, page_addr);
+ break;
+
+ case NAND_CMD_SEQIN:
+
+ info->buf_start = column;
+ set_command_address(info, mtd->writesize, 0, page_addr);
+
+ /*
+ * Multiple page programming needs to execute the initial
+ * SEQIN command that sets the page address.
+ */
+ if (mtd->writesize > info->chunk_size) {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | addr_cycle
+ | command;
+ exec_cmd = 1;
+ }
+ break;
+
+ case NAND_CMD_PAGEPROG:
+ if (is_buf_blank(info->data_buff,
+ (mtd->writesize + mtd->oobsize))) {
+ exec_cmd = 0;
+ break;
+ }
+
+ if (info->cur_chunk < info->nfullchunks) {
+ info->step_chunk_size = info->chunk_size;
+ info->step_spare_size = info->spare_size;
+ } else {
+ info->step_chunk_size = info->last_chunk_size;
+ info->step_spare_size = info->last_spare_size;
+ }
+
+ /* Second command setting for large pages */
+ if (mtd->writesize > info->chunk_size) {
+ /*
+ * Multiple page write uses the 'extended command'
+ * field. This can be used to issue a command dispatch
+ * or a naked-write depending on the current stage.
+ */
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+ info->ndcb3 = info->step_chunk_size +
+ info->step_spare_size;
+
+ /*
+ * This is the command dispatch that completes a chunked
+ * page program operation.
+ */
+ if (info->cur_chunk == info->ntotalchunks) {
+ info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | command;
+ info->ndcb1 = 0;
+ info->ndcb2 = 0;
+ info->ndcb3 = 0;
+ }
+ } else {
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_AUTO_RS
+ | NDCB0_ST_ROW_EN
+ | NDCB0_DBC
+ | (NAND_CMD_PAGEPROG << 8)
+ | NAND_CMD_SEQIN
+ | addr_cycle;
+ }
+ break;
+
+ case NAND_CMD_PARAM:
+ info->buf_count = INIT_BUFFER_SIZE;
+ info->ndcb0 |= NDCB0_CMD_TYPE(0)
+ | NDCB0_ADDR_CYC(1)
+ | NDCB0_LEN_OVRD
+ | command;
+ info->ndcb1 = (column & 0xFF);
+ info->ndcb3 = INIT_BUFFER_SIZE;
+ info->step_chunk_size = INIT_BUFFER_SIZE;
+ break;
+
+ case NAND_CMD_READID:
+ info->buf_count = READ_ID_BYTES;
+ info->ndcb0 |= NDCB0_CMD_TYPE(3)
+ | NDCB0_ADDR_CYC(1)
+ | command;
+ info->ndcb1 = (column & 0xFF);
+
+ info->step_chunk_size = 8;
+ break;
+ case NAND_CMD_STATUS:
+ info->buf_count = 1;
+ info->ndcb0 |= NDCB0_CMD_TYPE(4)
+ | NDCB0_ADDR_CYC(1)
+ | command;
+
+ info->step_chunk_size = 8;
+ break;
+
+ case NAND_CMD_ERASE1:
+ info->ndcb0 |= NDCB0_CMD_TYPE(2)
+ | NDCB0_AUTO_RS
+ | NDCB0_ADDR_CYC(3)
+ | NDCB0_DBC
+ | (NAND_CMD_ERASE2 << 8)
+ | NAND_CMD_ERASE1;
+ info->ndcb1 = page_addr;
+ info->ndcb2 = 0;
+
+ break;
+ case NAND_CMD_RESET:
+ info->ndcb0 |= NDCB0_CMD_TYPE(5)
+ | command;
+
+ break;
+
+ case NAND_CMD_ERASE2:
+ exec_cmd = 0;
+ break;
+
+ default:
+ exec_cmd = 0;
+ dev_err(mtd->dev, "non-supported command %x\n",
+ command);
+ break;
+ }
+
+ return exec_cmd;
+}
+
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int exec_cmd;
+
+ /*
+ * if this is a x16 device ,then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ prepare_start_command(info, command);
+
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
+ if (exec_cmd) {
+ u32 ts;
+
+ info->cmd_complete = 0;
+ info->dev_ready = 0;
+ info->need_wait = 1;
+ pxa3xx_nand_start(info);
+
+ ts = get_timer(0);
+ while (1) {
+ u32 status;
+
+ status = nand_readl(info, NDSR);
+ if (status)
+ pxa3xx_nand_irq(info);
+
+ if (info->cmd_complete)
+ break;
+
+ if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
+ dev_err(mtd->dev, "Wait timeout!!!\n");
+ return;
+ }
+ }
+ }
+ info->state = STATE_IDLE;
+}
+
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+ const unsigned command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int exec_cmd, ext_cmd_type;
+
+ /*
+ * if this is a x16 device then convert the input
+ * "byte" address into a "word" address appropriate
+ * for indexing a word-oriented device
+ */
+ if (info->reg_ndcr & NDCR_DWIDTH_M)
+ column /= 2;
+
+ /*
+ * There may be different NAND chip hooked to
+ * different chip select, so check whether
+ * chip select has been changed, if yes, reset the timing
+ */
+ if (info->cs != host->cs) {
+ info->cs = host->cs;
+ nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+ nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+ }
+
+ /* Select the extended command for the first command */
+ switch (command) {
+ case NAND_CMD_READ0:
+ case NAND_CMD_READOOB:
+ ext_cmd_type = EXT_CMD_TYPE_MONO;
+ break;
+ case NAND_CMD_SEQIN:
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ break;
+ case NAND_CMD_PAGEPROG:
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+ break;
+ default:
+ ext_cmd_type = 0;
+ break;
+ }
+
+ prepare_start_command(info, command);
+
+ /*
+ * Prepare the "is ready" completion before starting a command
+ * transaction sequence. If the command is not executed the
+ * completion will be completed, see below.
+ *
+ * We can do that inside the loop because the command variable
+ * is invariant and thus so is the exec_cmd.
+ */
+ info->need_wait = 1;
+ info->dev_ready = 0;
+
+ do {
+ u32 ts;
+
+ info->state = STATE_PREPARED;
+ exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+ column, page_addr);
+ if (!exec_cmd) {
+ info->need_wait = 0;
+ info->dev_ready = 1;
+ break;
+ }
+
+ info->cmd_complete = 0;
+ pxa3xx_nand_start(info);
+
+ ts = get_timer(0);
+ while (1) {
+ u32 status;
+
+ status = nand_readl(info, NDSR);
+ if (status)
+ pxa3xx_nand_irq(info);
+
+ if (info->cmd_complete)
+ break;
+
+ if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
+ dev_err(mtd->dev, "Wait timeout!!!\n");
+ return;
+ }
+ }
+
+ /* Only a few commands need several steps */
+ if (command != NAND_CMD_PAGEPROG &&
+ command != NAND_CMD_READ0 &&
+ command != NAND_CMD_READOOB)
+ break;
+
+ info->cur_chunk++;
+
+ /* Check if the sequence is complete */
+ if (info->cur_chunk == info->ntotalchunks &&
+ command != NAND_CMD_PAGEPROG)
+ break;
+
+ /*
+ * After a splitted program command sequence has issued
+ * the command dispatch, the command sequence is complete.
+ */
+ if (info->cur_chunk == (info->ntotalchunks + 1) &&
+ command == NAND_CMD_PAGEPROG &&
+ ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+ break;
+
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+ /* Last read: issue a 'last naked read' */
+ if (info->cur_chunk == info->ntotalchunks - 1)
+ ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+ else
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+ /*
+ * If a splitted program command has no more data to transfer,
+ * the command dispatch must be issued to complete.
+ */
+ } else if (command == NAND_CMD_PAGEPROG &&
+ info->cur_chunk == info->ntotalchunks) {
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ }
+ } while (1);
+
+ info->state = STATE_IDLE;
+}
+
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
+{
+ chip->write_buf(mtd, buf, mtd->writesize);
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ return 0;
+}
+
+static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
+{
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int bf;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (info->retcode == ERR_CORERR && info->use_ecc) {
+ mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+ } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
+ /*
+ * Empty pages will trigger uncorrectable errors. Re-read the
+ * entire page in raw mode and check for bits not being "1".
+ * If there are more than the supported strength, then it means
+ * this is an actual uncorrectable error.
+ */
+ chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
+ bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ NULL, 0, chip->ecc.strength);
+ if (bf < 0) {
+ mtd->ecc_stats.failed++;
+ } else if (bf) {
+ mtd->ecc_stats.corrected += bf;
+ info->max_bitflips = max_t(unsigned int,
+ info->max_bitflips, bf);
+ info->retcode = ERR_CORERR;
+ } else {
+ info->retcode = ERR_NONE;
+ }
+
+ } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
+ /* Raw read is not supported with Hamming ECC engine */
+ if (is_buf_blank(buf, mtd->writesize))
+ info->retcode = ERR_NONE;
+ else
+ mtd->ecc_stats.failed++;
+ }
+
+ return info->max_bitflips;
+}
+
+static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct pxa3xx_nand_host *host = chip->priv;
+ struct pxa3xx_nand_info *info = host->info_data;
+ int chunk, ecc_off_buf;
+
+ if (!info->ecc_bch)
+ return -ENOTSUPP;
+
+ /*
+ * Set the force_raw boolean, then re-call ->cmdfunc() that will run
+ * pxa3xx_nand_start(), which will actually disable the ECC engine.
+ */
+ info->force_raw = true;
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+ ecc_off_buf = (info->nfullchunks * info->spare_size) +
+ info->last_spare_size;
+ for (chunk = 0; chunk < info->nfullchunks; chunk++) {
+ chip->read_buf(mtd,
+ buf + (chunk * info->chunk_size),
+ info->chunk_size);
+ chip->read_buf(mtd,
+ chip->oob_poi +
+ (chunk * (info->spare_size)),
+ info->spare_size);
+ chip->read_buf(mtd,
+ chip->oob_poi + ecc_off_buf +
+ (chunk * (info->ecc_size)),
+ info->ecc_size - 2);
+ }
+
+ if (info->ntotalchunks > info->nfullchunks) {
+ chip->read_buf(mtd,
+ buf + (info->nfullchunks * info->chunk_size),
+ info->last_chunk_size);
+ chip->read_buf(mtd,
+ chip->oob_poi +
+ (info->nfullchunks * (info->spare_size)),
+ info->last_spare_size);
+ chip->read_buf(mtd,
+ chip->oob_poi + ecc_off_buf +
+ (info->nfullchunks * (info->ecc_size)),
+ info->ecc_size - 2);
+ }
+
+ info->force_raw = false;
+
+ return 0;
+}
+
+static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, int page)
+{
+ /* Invalidate page cache */
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
+ page);
+}
+
+static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ char retval = 0xFF;
+
+ if (info->buf_start < info->buf_count)
+ /* Has just send a new command? */
+ retval = info->data_buff[info->buf_start++];
+
+ return retval;
+}
+
+static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ u16 retval = 0xFFFF;
+
+ if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
+ retval = *((u16 *)(info->data_buff+info->buf_start));
+ info->buf_start += 2;
+ }
+ return retval;
+}
+
+static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+ memcpy(buf, info->data_buff + info->buf_start, real_len);
+ info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+ memcpy(info->data_buff + info->buf_start, buf, real_len);
+ info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ return;
+}
+
+static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+
+ if (info->need_wait) {
+ u32 ts;
+
+ info->need_wait = 0;
+
+ ts = get_timer(0);
+ while (1) {
+ u32 status;
+
+ status = nand_readl(info, NDSR);
+ if (status)
+ pxa3xx_nand_irq(info);
+
+ if (info->dev_ready)
+ break;
+
+ if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
+ dev_err(mtd->dev, "Ready timeout!!!\n");
+ return NAND_STATUS_FAIL;
+ }
+ }
+ }
+
+ /* pxa3xx_nand_send_command has waited for command complete */
+ if (this->state == FL_WRITING || this->state == FL_ERASING) {
+ if (info->retcode == ERR_NONE)
+ return 0;
+ else
+ return NAND_STATUS_FAIL;
+ }
+
+ return NAND_STATUS_READY;
+}
+
+static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_platform_data *pdata = info->pdata;
+
+ /* Configure default flash values */
+ info->reg_ndcr = 0x0; /* enable all interrupts */
+ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+ info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
+ info->reg_ndcr |= NDCR_SPARE_EN;
+
+ return 0;
+}
+
+static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_host *host = info->host[info->cs];
+ struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+ info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
+ info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
+}
+
+static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_platform_data *pdata = info->pdata;
+ uint32_t ndcr = nand_readl(info, NDCR);
+
+ /* Set an initial chunk size */
+ info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
+ info->reg_ndcr = ndcr &
+ ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
+ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+ info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+ info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+}
+
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
+{
+ struct pxa3xx_nand_info *info = host->info_data;
+ struct pxa3xx_nand_platform_data *pdata = info->pdata;
+ struct mtd_info *mtd;
+ struct nand_chip *chip;
+ const struct nand_sdr_timings *timings;
+ int ret;
+
+ mtd = nand_to_mtd(&info->host[info->cs]->chip);
+ chip = mtd_to_nand(mtd);
+
+ /* configure default flash values */
+ info->reg_ndcr = 0x0; /* enable all interrupts */
+ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+ info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
+ info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
+
+ /* use the common timing to make a try */
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ pxa3xx_nand_set_sdr_timing(host, timings);
+
+ chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+ ret = chip->waitfunc(mtd, chip);
+ if (ret & NAND_STATUS_FAIL)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+ struct nand_ecc_ctrl *ecc,
+ int strength, int ecc_stepsize, int page_size)
+{
+ if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 40;
+ info->ecc_size = 24;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+
+ } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
+ info->chunk_size = 512;
+ info->spare_size = 8;
+ info->ecc_size = 8;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = 512;
+ ecc->strength = 1;
+
+ /*
+ * Required ECC: 4-bit correction per 512 bytes
+ * Select: 16-bit correction per 2048 bytes
+ */
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
+ info->nfullchunks = 1;
+ info->ntotalchunks = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_2KB_bch4bit;
+ ecc->strength = 16;
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->nfullchunks = 2;
+ info->ntotalchunks = 2;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch4bit;
+ ecc->strength = 16;
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
+ info->ecc_bch = 1;
+ info->nfullchunks = 4;
+ info->ntotalchunks = 4;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_8KB_bch4bit;
+ ecc->strength = 16;
+
+ /*
+ * Required ECC: 8-bit correction per 512 bytes
+ * Select: 16-bit correction per 1024 bytes
+ */
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
+ info->nfullchunks = 1;
+ info->ntotalchunks = 2;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->last_chunk_size = 1024;
+ info->last_spare_size = 32;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_2KB_bch8bit;
+ ecc->strength = 16;
+
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
+ info->nfullchunks = 4;
+ info->ntotalchunks = 5;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->last_chunk_size = 0;
+ info->last_spare_size = 64;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_4KB_bch8bit;
+ ecc->strength = 16;
+
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
+ info->ecc_bch = 1;
+ info->nfullchunks = 8;
+ info->ntotalchunks = 9;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
+ info->last_chunk_size = 0;
+ info->last_spare_size = 160;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;
+ ecc->layout = &ecc_layout_8KB_bch8bit;
+ ecc->strength = 16;
+
+ } else {
+ dev_err(info->controller.active->mtd.dev,
+ "ECC strength %d at page size %d is not supported\n",
+ strength, page_size);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int pxa3xx_nand_scan(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ struct pxa3xx_nand_platform_data *pdata = info->pdata;
+ int ret;
+ uint16_t ecc_strength, ecc_step;
+
+ if (pdata->keep_config) {
+ pxa3xx_nand_detect_config(info);
+ } else {
+ ret = pxa3xx_nand_config_ident(info);
+ if (ret)
+ return ret;
+ ret = pxa3xx_nand_sensing(host);
+ if (ret) {
+ dev_info(mtd->dev, "There is no chip on cs %d!\n",
+ info->cs);
+ return ret;
+ }
+ }
+
+ /* Device detection must be done with ECC disabled */
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
+ nand_writel(info, NDECCCTRL, 0x0);
+
+ if (nand_scan_ident(mtd, 1, NULL))
+ return -ENODEV;
+
+ if (!pdata->keep_config) {
+ ret = pxa3xx_nand_init_timings(host);
+ if (ret) {
+ dev_err(mtd->dev,
+ "Failed to set timings: %d\n", ret);
+ return ret;
+ }
+ }
+
+#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
+ /*
+ * We'll use a bad block table stored in-flash and don't
+ * allow writing the bad block marker to the flash.
+ */
+ chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
+ chip->bbt_td = &bbt_main_descr;
+ chip->bbt_md = &bbt_mirror_descr;
+#endif
+
+ if (pdata->ecc_strength && pdata->ecc_step_size) {
+ ecc_strength = pdata->ecc_strength;
+ ecc_step = pdata->ecc_step_size;
+ } else {
+ ecc_strength = chip->ecc_strength_ds;
+ ecc_step = chip->ecc_step_ds;
+ }
+
+ /* Set default ECC strength requirements on non-ONFI devices */
+ if (ecc_strength < 1 && ecc_step < 1) {
+ ecc_strength = 1;
+ ecc_step = 512;
+ }
+
+ ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+ ecc_step, mtd->writesize);
+ if (ret)
+ return ret;
+
+ /*
+ * If the page size is bigger than the FIFO size, let's check
+ * we are given the right variant and then switch to the extended
+ * (aka split) command handling,
+ */
+ if (mtd->writesize > info->chunk_size) {
+ if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
+ info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ chip->cmdfunc = nand_cmdfunc_extended;
+ } else {
+ dev_err(mtd->dev,
+ "unsupported page size on this variant\n");
+ return -ENODEV;
+ }
+ }
+
+ /* calculate addressing information */
+ if (mtd->writesize >= 2048)
+ host->col_addr_cycles = 2;
+ else
+ host->col_addr_cycles = 1;
+
+ /* release the initial buffer */
+ kfree(info->data_buff);
+
+ /* allocate the real data + oob buffer */
+ info->buf_size = mtd->writesize + mtd->oobsize;
+ ret = pxa3xx_nand_init_buff(info);
+ if (ret)
+ return ret;
+ info->oob_buff = info->data_buff + mtd->writesize;
+
+ if ((mtd->size >> chip->page_shift) > 65536)
+ host->row_addr_cycles = 3;
+ else
+ host->row_addr_cycles = 2;
+
+ if (!pdata->keep_config)
+ pxa3xx_nand_config_tail(info);
+
+ return nand_scan_tail(mtd);
+}
+
+static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct pxa3xx_nand_host *host;
+ struct nand_chip *chip = NULL;
+ struct mtd_info *mtd;
+ int cs;
+
+ pdata = info->pdata;
+ if (pdata->num_cs <= 0)
+ return -ENODEV;
+
+ info->variant = pxa3xx_nand_get_variant(dev);
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ chip = (struct nand_chip *)
+ ((u8 *)&info[1] + sizeof(*host) * cs);
+ mtd = nand_to_mtd(chip);
+ host = (struct pxa3xx_nand_host *)chip;
+ info->host[cs] = host;
+ host->cs = cs;
+ host->info_data = info;
+ mtd->owner = THIS_MODULE;
+
+ nand_set_controller_data(chip, host);
+ chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
+ chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
+ chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
+ chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
+ chip->controller = &info->controller;
+ chip->waitfunc = pxa3xx_nand_waitfunc;
+ chip->select_chip = pxa3xx_nand_select_chip;
+ chip->read_word = pxa3xx_nand_read_word;
+ chip->read_byte = pxa3xx_nand_read_byte;
+ chip->read_buf = pxa3xx_nand_read_buf;
+ chip->write_buf = pxa3xx_nand_write_buf;
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->cmdfunc = nand_cmdfunc;
+ }
+
+ /* Allocate a buffer to allow flash detection */
+ info->buf_size = INIT_BUFFER_SIZE;
+ info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+ if (info->data_buff == NULL)
+ return -ENOMEM;
+
+ /* initialize all interrupts to be disabled */
+ disable_int(info, NDSR_MASK);
+
+ /*
+ * Some SoCs like A7k/A8k need to enable manually the NAND
+ * controller to avoid being bootloader dependent. This is done
+ * through the use of a single bit in the System Functions registers.
+ */
+ if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
+ struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
+ dev, "marvell,system-controller");
+ u32 reg;
+
+ if (IS_ERR(sysctrl_base))
+ return PTR_ERR(sysctrl_base);
+
+ regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
+ reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
+ regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
+ }
+
+ return 0;
+}
+
+static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ info->mmio_base = dev_read_addr_ptr(dev);
+
+ pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
+ if (pdata->num_cs != 1) {
+ pr_err("pxa3xx driver supports single CS only\n");
+ return -EINVAL;
+ }
+
+ if (dev_read_bool(dev, "nand-enable-arbiter"))
+ pdata->enable_arbiter = 1;
+
+ if (dev_read_bool(dev, "nand-keep-config"))
+ pdata->keep_config = 1;
+
+ /*
+ * ECC parameters.
+ * If these are not set, they will be selected according
+ * to the detected flash type.
+ */
+ /* ECC strength */
+ pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
+
+ /* ECC step size */
+ pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
+ 0);
+
+ info->pdata = pdata;
+
+ return 0;
+}
+
+static int pxa3xx_nand_probe(struct udevice *dev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ int ret, cs, probe_success;
+ struct pxa3xx_nand_info *info = dev_get_priv(dev);
+
+ ret = pxa3xx_nand_probe_dt(dev, info);
+ if (ret)
+ return ret;
+
+ pdata = info->pdata;
+
+ ret = alloc_nand_resource(dev, info);
+ if (ret) {
+ dev_err(dev, "alloc nand resource failed\n");
+ return ret;
+ }
+
+ probe_success = 0;
+ for (cs = 0; cs < pdata->num_cs; cs++) {
+ struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
+
+ /*
+ * The mtd name matches the one used in 'mtdparts' kernel
+ * parameter. This name cannot be changed or otherwise
+ * user's mtd partitions configuration would get broken.
+ */
+ mtd->name = "pxa3xx_nand-0";
+ info->cs = cs;
+ ret = pxa3xx_nand_scan(mtd);
+ if (ret) {
+ dev_info(mtd->dev, "failed to scan nand at cs %d\n",
+ cs);
+ continue;
+ }
+
+ if (nand_register(cs, mtd))
+ continue;
+
+ probe_success = 1;
+ }
+
+ if (!probe_success)
+ return -ENODEV;
+
+ return 0;
+}
+
+U_BOOT_DRIVER(pxa3xx_nand) = {
+ .name = "pxa3xx-nand",
+ .id = UCLASS_MTD,
+ .of_match = pxa3xx_nand_dt_ids,
+ .probe = pxa3xx_nand_probe,
+ .priv_auto = sizeof(struct pxa3xx_nand_info) +
+ sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(pxa3xx_nand), &dev);
+ if (ret && ret != -ENODEV) {
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+ }
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.h b/roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.h
new file mode 100644
index 000000000..8f24ae6d1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/pxa3xx_nand.h
@@ -0,0 +1,64 @@
+#ifndef __ASM_ARCH_PXA3XX_NAND_H
+#define __ASM_ARCH_PXA3XX_NAND_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+struct pxa3xx_nand_timing {
+ unsigned int tCH; /* Enable signal hold time */
+ unsigned int tCS; /* Enable signal setup time */
+ unsigned int tWH; /* ND_nWE high duration */
+ unsigned int tWP; /* ND_nWE pulse time */
+ unsigned int tRH; /* ND_nRE high duration */
+ unsigned int tRP; /* ND_nRE pulse width */
+ unsigned int tR; /* ND_nWE high to ND_nRE low for read */
+ unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
+ unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
+};
+
+struct pxa3xx_nand_flash {
+ uint32_t chip_id;
+ unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
+ unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
+ struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
+};
+
+/*
+ * Current pxa3xx_nand controller has two chip select which
+ * both be workable.
+ *
+ * Notice should be taken that:
+ * When you want to use this feature, you should not enable the
+ * keep configuration feature, for two chip select could be
+ * attached with different nand chip. The different page size
+ * and timing requirement make the keep configuration impossible.
+ */
+
+/* The max num of chip select current support */
+#define NUM_CHIP_SELECT (2)
+struct pxa3xx_nand_platform_data {
+ /* the data flash bus is shared between the Static Memory
+ * Controller and the Data Flash Controller, the arbiter
+ * controls the ownership of the bus
+ */
+ int enable_arbiter;
+
+ /* allow platform code to keep OBM/bootloader defined NFC config */
+ int keep_config;
+
+ /* indicate how many chip selects will be used */
+ int num_cs;
+
+ /* use an flash-based bad block table */
+ bool flash_bbt;
+
+ /* requested ECC strength and ECC step size */
+ int ecc_strength, ecc_step_size;
+
+ const struct mtd_partition *parts[NUM_CHIP_SELECT];
+ unsigned int nr_parts[NUM_CHIP_SELECT];
+
+ const struct pxa3xx_nand_flash *flash;
+ size_t num_flash;
+};
+#endif /* __ASM_ARCH_PXA3XX_NAND_H */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/roms/u-boot/drivers/mtd/nand/raw/stm32_fmc2_nand.c
new file mode 100644
index 000000000..fd81a9500
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (C) STMicroelectronics 2019
+ * Author: Christophe Kerello <christophe.kerello@st.com>
+ */
+
+#define LOG_CATEGORY UCLASS_MTD
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <nand.h>
+#include <reset.h>
+#include <dm/device_compat.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+
+/* Bad block marker length */
+#define FMC2_BBM_LEN 2
+
+/* ECC step size */
+#define FMC2_ECC_STEP_SIZE 512
+
+/* Command delay */
+#define FMC2_RB_DELAY_US 30
+
+/* Max chip enable */
+#define FMC2_MAX_CE 2
+
+/* Timings */
+#define FMC2_THIZ 1
+#define FMC2_TIO 8000
+#define FMC2_TSYNC 3000
+#define FMC2_PCR_TIMING_MASK 0xf
+#define FMC2_PMEM_PATT_TIMING_MASK 0xff
+
+/* FMC2 Controller Registers */
+#define FMC2_BCR1 0x0
+#define FMC2_PCR 0x80
+#define FMC2_SR 0x84
+#define FMC2_PMEM 0x88
+#define FMC2_PATT 0x8c
+#define FMC2_HECCR 0x94
+#define FMC2_BCHISR 0x254
+#define FMC2_BCHICR 0x258
+#define FMC2_BCHPBR1 0x260
+#define FMC2_BCHPBR2 0x264
+#define FMC2_BCHPBR3 0x268
+#define FMC2_BCHPBR4 0x26c
+#define FMC2_BCHDSR0 0x27c
+#define FMC2_BCHDSR1 0x280
+#define FMC2_BCHDSR2 0x284
+#define FMC2_BCHDSR3 0x288
+#define FMC2_BCHDSR4 0x28c
+
+/* Register: FMC2_BCR1 */
+#define FMC2_BCR1_FMC2EN BIT(31)
+
+/* Register: FMC2_PCR */
+#define FMC2_PCR_PWAITEN BIT(1)
+#define FMC2_PCR_PBKEN BIT(2)
+#define FMC2_PCR_PWID GENMASK(5, 4)
+#define FMC2_PCR_PWID_BUSWIDTH_8 0
+#define FMC2_PCR_PWID_BUSWIDTH_16 1
+#define FMC2_PCR_ECCEN BIT(6)
+#define FMC2_PCR_ECCALG BIT(8)
+#define FMC2_PCR_TCLR GENMASK(12, 9)
+#define FMC2_PCR_TCLR_DEFAULT 0xf
+#define FMC2_PCR_TAR GENMASK(16, 13)
+#define FMC2_PCR_TAR_DEFAULT 0xf
+#define FMC2_PCR_ECCSS GENMASK(19, 17)
+#define FMC2_PCR_ECCSS_512 1
+#define FMC2_PCR_ECCSS_2048 3
+#define FMC2_PCR_BCHECC BIT(24)
+#define FMC2_PCR_WEN BIT(25)
+
+/* Register: FMC2_SR */
+#define FMC2_SR_NWRF BIT(6)
+
+/* Register: FMC2_PMEM */
+#define FMC2_PMEM_MEMSET GENMASK(7, 0)
+#define FMC2_PMEM_MEMWAIT GENMASK(15, 8)
+#define FMC2_PMEM_MEMHOLD GENMASK(23, 16)
+#define FMC2_PMEM_MEMHIZ GENMASK(31, 24)
+#define FMC2_PMEM_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_PATT */
+#define FMC2_PATT_ATTSET GENMASK(7, 0)
+#define FMC2_PATT_ATTWAIT GENMASK(15, 8)
+#define FMC2_PATT_ATTHOLD GENMASK(23, 16)
+#define FMC2_PATT_ATTHIZ GENMASK(31, 24)
+#define FMC2_PATT_DEFAULT 0x0a0a0a0a
+
+/* Register: FMC2_BCHISR */
+#define FMC2_BCHISR_DERF BIT(1)
+#define FMC2_BCHISR_EPBRF BIT(4)
+
+/* Register: FMC2_BCHICR */
+#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0)
+
+/* Register: FMC2_BCHDSR0 */
+#define FMC2_BCHDSR0_DUE BIT(0)
+#define FMC2_BCHDSR0_DEF BIT(1)
+#define FMC2_BCHDSR0_DEN GENMASK(7, 4)
+
+/* Register: FMC2_BCHDSR1 */
+#define FMC2_BCHDSR1_EBP1 GENMASK(12, 0)
+#define FMC2_BCHDSR1_EBP2 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR2 */
+#define FMC2_BCHDSR2_EBP3 GENMASK(12, 0)
+#define FMC2_BCHDSR2_EBP4 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR3 */
+#define FMC2_BCHDSR3_EBP5 GENMASK(12, 0)
+#define FMC2_BCHDSR3_EBP6 GENMASK(28, 16)
+
+/* Register: FMC2_BCHDSR4 */
+#define FMC2_BCHDSR4_EBP7 GENMASK(12, 0)
+#define FMC2_BCHDSR4_EBP8 GENMASK(28, 16)
+
+#define FMC2_NSEC_PER_SEC 1000000000L
+
+#define FMC2_TIMEOUT_5S 5000000
+
+enum stm32_fmc2_ecc {
+ FMC2_ECC_HAM = 1,
+ FMC2_ECC_BCH4 = 4,
+ FMC2_ECC_BCH8 = 8
+};
+
+struct stm32_fmc2_timings {
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 twait;
+ u8 thold_mem;
+ u8 tset_mem;
+ u8 thold_att;
+ u8 tset_att;
+};
+
+struct stm32_fmc2_nand {
+ struct nand_chip chip;
+ struct stm32_fmc2_timings timings;
+ int ncs;
+ int cs_used[FMC2_MAX_CE];
+};
+
+static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct stm32_fmc2_nand, chip);
+}
+
+struct stm32_fmc2_nfc {
+ struct nand_hw_control base;
+ struct stm32_fmc2_nand nand;
+ struct nand_ecclayout ecclayout;
+ fdt_addr_t io_base;
+ fdt_addr_t data_base[FMC2_MAX_CE];
+ fdt_addr_t cmd_base[FMC2_MAX_CE];
+ fdt_addr_t addr_base[FMC2_MAX_CE];
+ struct clk clk;
+
+ u8 cs_assigned;
+ int cs_sel;
+};
+
+static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_hw_control *base)
+{
+ return container_of(base, struct stm32_fmc2_nfc, base);
+}
+
+static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *timings = &nand->timings;
+ u32 pmem, patt;
+
+ /* Set tclr/tar timings */
+ clrsetbits_le32(nfc->io_base + FMC2_PCR,
+ FMC2_PCR_TCLR | FMC2_PCR_TAR,
+ FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) |
+ FIELD_PREP(FMC2_PCR_TAR, timings->tar));
+
+ /* Set tset/twait/thold/thiz timings in common bank */
+ pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem);
+ pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz);
+ writel(pmem, nfc->io_base + FMC2_PMEM);
+
+ /* Set tset/twait/thold/thiz timings in attribut bank */
+ patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att);
+ patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz);
+ writel(patt, nfc->io_base + FMC2_PATT);
+}
+
+static void stm32_fmc2_nfc_setup(struct nand_chip *chip)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 pcr = 0, pcr_mask;
+
+ /* Configure ECC algorithm (default configuration is Hamming) */
+ pcr_mask = FMC2_PCR_ECCALG;
+ pcr_mask |= FMC2_PCR_BCHECC;
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ pcr |= FMC2_PCR_ECCALG;
+ pcr |= FMC2_PCR_BCHECC;
+ } else if (chip->ecc.strength == FMC2_ECC_BCH4) {
+ pcr |= FMC2_PCR_ECCALG;
+ }
+
+ /* Set buswidth */
+ pcr_mask |= FMC2_PCR_PWID;
+ if (chip->options & NAND_BUSWIDTH_16)
+ pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16);
+
+ /* Set ECC sector size */
+ pcr_mask |= FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512);
+
+ clrsetbits_le32(nfc->io_base + FMC2_PCR, pcr_mask, pcr);
+}
+
+static void stm32_fmc2_nfc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+
+ if (chipnr < 0 || chipnr >= nand->ncs)
+ return;
+
+ if (nand->cs_used[chipnr] == nfc->cs_sel)
+ return;
+
+ nfc->cs_sel = nand->cs_used[chipnr];
+ chip->IO_ADDR_R = (void __iomem *)nfc->data_base[nfc->cs_sel];
+ chip->IO_ADDR_W = (void __iomem *)nfc->data_base[nfc->cs_sel];
+
+ stm32_fmc2_nfc_setup(chip);
+ stm32_fmc2_nfc_timings_init(chip);
+}
+
+static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc,
+ bool set)
+{
+ u32 pcr;
+
+ pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) :
+ FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8);
+
+ clrsetbits_le32(nfc->io_base + FMC2_PCR, FMC2_PCR_PWID, pcr);
+}
+
+static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable)
+{
+ clrsetbits_le32(nfc->io_base + FMC2_PCR, FMC2_PCR_ECCEN,
+ enable ? FMC2_PCR_ECCEN : 0);
+}
+
+static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc)
+{
+ writel(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR);
+}
+
+static void stm32_fmc2_nfc_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE) {
+ writeb(cmd, nfc->cmd_base[nfc->cs_sel]);
+ return;
+ }
+
+ writeb(cmd, nfc->addr_base[nfc->cs_sel]);
+}
+
+/*
+ * Enable ECC logic and reset syndrome/parity bits previously calculated
+ * Syndrome/parity bits is cleared by setting the ECCEN bit to 0
+ */
+static void stm32_fmc2_nfc_hwctl(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ if (chip->ecc.strength != FMC2_ECC_HAM) {
+ clrsetbits_le32(nfc->io_base + FMC2_PCR, FMC2_PCR_WEN,
+ mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0);
+
+ stm32_fmc2_nfc_clear_bch_irq(nfc);
+ }
+
+ stm32_fmc2_nfc_set_ecc(nfc, true);
+}
+
+/*
+ * ECC Hamming calculation
+ * ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static int stm32_fmc2_nfc_ham_calculate(struct mtd_info *mtd, const u8 *data,
+ u8 *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 heccr, sr;
+ int ret;
+
+ ret = readl_poll_timeout(nfc->io_base + FMC2_SR, sr,
+ sr & FMC2_SR_NWRF, FMC2_TIMEOUT_5S);
+ if (ret < 0) {
+ log_err("Ham timeout\n");
+ return ret;
+ }
+
+ heccr = readl(nfc->io_base + FMC2_HECCR);
+
+ ecc[0] = heccr;
+ ecc[1] = heccr >> 8;
+ ecc[2] = heccr >> 16;
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_ham_correct(struct mtd_info *mtd, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ u8 bit_position = 0, b0, b1, b2;
+ u32 byte_addr = 0, b;
+ u32 i, shifting = 1;
+
+ /* Indicate which bit and byte is faulty (if any) */
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ b2 = read_ecc[2] ^ calc_ecc[2];
+ b = b0 | (b1 << 8) | (b2 << 16);
+
+ /* No errors */
+ if (likely(!b))
+ return 0;
+
+ /* Calculate bit position */
+ for (i = 0; i < 3; i++) {
+ switch (b % 4) {
+ case 2:
+ bit_position += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Calculate byte position */
+ shifting = 1;
+ for (i = 0; i < 9; i++) {
+ switch (b % 4) {
+ case 2:
+ byte_addr += shifting;
+ case 1:
+ break;
+ default:
+ return -EBADMSG;
+ }
+ shifting <<= 1;
+ b >>= 2;
+ }
+
+ /* Flip the bit */
+ dat[byte_addr] ^= (1 << bit_position);
+
+ return 1;
+}
+
+/*
+ * ECC BCH calculation and correction
+ * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to
+ * max of 4-bit/8-bit)
+ */
+
+static int stm32_fmc2_nfc_bch_calculate(struct mtd_info *mtd, const u8 *data,
+ u8 *ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 bchpbr, bchisr;
+ int ret;
+
+ /* Wait until the BCH code is ready */
+ ret = readl_poll_timeout(nfc->io_base + FMC2_BCHISR, bchisr,
+ bchisr & FMC2_BCHISR_EPBRF, FMC2_TIMEOUT_5S);
+ if (ret < 0) {
+ log_err("Bch timeout\n");
+ return ret;
+ }
+
+ /* Read parity bits */
+ bchpbr = readl(nfc->io_base + FMC2_BCHPBR1);
+ ecc[0] = bchpbr;
+ ecc[1] = bchpbr >> 8;
+ ecc[2] = bchpbr >> 16;
+ ecc[3] = bchpbr >> 24;
+
+ bchpbr = readl(nfc->io_base + FMC2_BCHPBR2);
+ ecc[4] = bchpbr;
+ ecc[5] = bchpbr >> 8;
+ ecc[6] = bchpbr >> 16;
+
+ if (chip->ecc.strength == FMC2_ECC_BCH8) {
+ ecc[7] = bchpbr >> 24;
+
+ bchpbr = readl(nfc->io_base + FMC2_BCHPBR3);
+ ecc[8] = bchpbr;
+ ecc[9] = bchpbr >> 8;
+ ecc[10] = bchpbr >> 16;
+ ecc[11] = bchpbr >> 24;
+
+ bchpbr = readl(nfc->io_base + FMC2_BCHPBR4);
+ ecc[12] = bchpbr;
+ }
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_bch_correct(struct mtd_info *mtd, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ u32 bchdsr0, bchdsr1, bchdsr2, bchdsr3, bchdsr4, bchisr;
+ u16 pos[8];
+ int i, ret, den, eccsize = chip->ecc.size;
+ unsigned int nb_errs = 0;
+
+ /* Wait until the decoding error is ready */
+ ret = readl_poll_timeout(nfc->io_base + FMC2_BCHISR, bchisr,
+ bchisr & FMC2_BCHISR_DERF, FMC2_TIMEOUT_5S);
+ if (ret < 0) {
+ log_err("Bch timeout\n");
+ return ret;
+ }
+
+ bchdsr0 = readl(nfc->io_base + FMC2_BCHDSR0);
+ bchdsr1 = readl(nfc->io_base + FMC2_BCHDSR1);
+ bchdsr2 = readl(nfc->io_base + FMC2_BCHDSR2);
+ bchdsr3 = readl(nfc->io_base + FMC2_BCHDSR3);
+ bchdsr4 = readl(nfc->io_base + FMC2_BCHDSR4);
+
+ stm32_fmc2_nfc_set_ecc(nfc, false);
+
+ /* No errors found */
+ if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF)))
+ return 0;
+
+ /* Too many errors detected */
+ if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE))
+ return -EBADMSG;
+
+ pos[0] = FIELD_GET(FMC2_BCHDSR1_EBP1, bchdsr1);
+ pos[1] = FIELD_GET(FMC2_BCHDSR1_EBP2, bchdsr1);
+ pos[2] = FIELD_GET(FMC2_BCHDSR2_EBP3, bchdsr2);
+ pos[3] = FIELD_GET(FMC2_BCHDSR2_EBP4, bchdsr2);
+ pos[4] = FIELD_GET(FMC2_BCHDSR3_EBP5, bchdsr3);
+ pos[5] = FIELD_GET(FMC2_BCHDSR3_EBP6, bchdsr3);
+ pos[6] = FIELD_GET(FMC2_BCHDSR4_EBP7, bchdsr4);
+ pos[7] = FIELD_GET(FMC2_BCHDSR4_EBP8, bchdsr4);
+
+ den = FIELD_GET(FMC2_BCHDSR0_DEN, bchdsr0);
+ for (i = 0; i < den; i++) {
+ if (pos[i] < eccsize * 8) {
+ __change_bit(pos[i], (unsigned long *)dat);
+ nb_errs++;
+ }
+ }
+
+ return nb_errs;
+}
+
+static int stm32_fmc2_nfc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ int i, s, stat, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccstrength = chip->ecc.strength;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_code = chip->buffers->ecccode;
+ unsigned int max_bitflips = 0;
+
+ for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps;
+ s++, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ /* Read the nand page sector (512 bytes) */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, s * eccsize, -1);
+ chip->read_buf(mtd, p, eccsize);
+
+ /* Read the corresponding ECC bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, i, -1);
+ chip->read_buf(mtd, ecc_code, eccbytes);
+
+ /* Correct the data */
+ stat = chip->ecc.correct(mtd, p, ecc_code, ecc_calc);
+ if (stat == -EBADMSG)
+ /* Check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ ecc_code, eccbytes,
+ NULL, 0,
+ eccstrength);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Read oob */
+ if (oob_required) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ }
+
+ return max_bitflips;
+}
+
+static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc, bool has_parent)
+{
+ u32 pcr = readl(nfc->io_base + FMC2_PCR);
+
+ /* Set CS used to undefined */
+ nfc->cs_sel = -1;
+
+ /* Enable wait feature and nand flash memory bank */
+ pcr |= FMC2_PCR_PWAITEN;
+ pcr |= FMC2_PCR_PBKEN;
+
+ /* Set buswidth to 8 bits mode for identification */
+ pcr &= ~FMC2_PCR_PWID;
+
+ /* ECC logic is disabled */
+ pcr &= ~FMC2_PCR_ECCEN;
+
+ /* Default mode */
+ pcr &= ~FMC2_PCR_ECCALG;
+ pcr &= ~FMC2_PCR_BCHECC;
+ pcr &= ~FMC2_PCR_WEN;
+
+ /* Set default ECC sector size */
+ pcr &= ~FMC2_PCR_ECCSS;
+ pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_2048);
+
+ /* Set default tclr/tar timings */
+ pcr &= ~FMC2_PCR_TCLR;
+ pcr |= FIELD_PREP(FMC2_PCR_TCLR, FMC2_PCR_TCLR_DEFAULT);
+ pcr &= ~FMC2_PCR_TAR;
+ pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT);
+
+ /* Enable FMC2 controller */
+ if (!has_parent)
+ setbits_le32(nfc->io_base + FMC2_BCR1, FMC2_BCR1_FMC2EN);
+
+ writel(pcr, nfc->io_base + FMC2_PCR);
+ writel(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM);
+ writel(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT);
+}
+
+static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip,
+ const struct nand_sdr_timings *sdrt)
+{
+ struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller);
+ struct stm32_fmc2_nand *nand = to_fmc2_nand(chip);
+ struct stm32_fmc2_timings *tims = &nand->timings;
+ unsigned long hclk = clk_get_rate(&nfc->clk);
+ unsigned long hclkp = FMC2_NSEC_PER_SEC / (hclk / 1000);
+ unsigned long timing, tar, tclr, thiz, twait;
+ unsigned long tset_mem, tset_att, thold_mem, thold_att;
+
+ tar = max_t(unsigned long, hclkp, sdrt->tAR_min);
+ timing = DIV_ROUND_UP(tar, hclkp) - 1;
+ tims->tar = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tclr = max_t(unsigned long, hclkp, sdrt->tCLR_min);
+ timing = DIV_ROUND_UP(tclr, hclkp) - 1;
+ tims->tclr = min_t(unsigned long, timing, FMC2_PCR_TIMING_MASK);
+
+ tims->thiz = FMC2_THIZ;
+ thiz = (tims->thiz + 1) * hclkp;
+
+ /*
+ * tWAIT > tRP
+ * tWAIT > tWP
+ * tWAIT > tREA + tIO
+ */
+ twait = max_t(unsigned long, hclkp, sdrt->tRP_min);
+ twait = max_t(unsigned long, twait, sdrt->tWP_min);
+ twait = max_t(unsigned long, twait, sdrt->tREA_max + FMC2_TIO);
+ timing = DIV_ROUND_UP(twait, hclkp);
+ tims->twait = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_MEM > tCS - tWAIT
+ * tSETUP_MEM > tALS - tWAIT
+ * tSETUP_MEM > tDS - (tWAIT - tHIZ)
+ */
+ tset_mem = hclkp;
+ if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait))
+ tset_mem = sdrt->tCS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait))
+ tset_mem = sdrt->tALS_min - twait;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_mem < sdrt->tDS_min - (twait - thiz)))
+ tset_mem = sdrt->tDS_min - (twait - thiz);
+ timing = DIV_ROUND_UP(tset_mem, hclkp);
+ tims->tset_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_MEM > tCH
+ * tHOLD_MEM > tREH - tSETUP_MEM
+ * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT)
+ */
+ thold_mem = max_t(unsigned long, hclkp, sdrt->tCH_min);
+ if (sdrt->tREH_min > tset_mem &&
+ (thold_mem < sdrt->tREH_min - tset_mem))
+ thold_mem = sdrt->tREH_min - tset_mem;
+ if ((sdrt->tRC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tRC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tRC_min - (tset_mem + twait);
+ if ((sdrt->tWC_min > tset_mem + twait) &&
+ (thold_mem < sdrt->tWC_min - (tset_mem + twait)))
+ thold_mem = sdrt->tWC_min - (tset_mem + twait);
+ timing = DIV_ROUND_UP(thold_mem, hclkp);
+ tims->thold_mem = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tSETUP_ATT > tCS - tWAIT
+ * tSETUP_ATT > tCLS - tWAIT
+ * tSETUP_ATT > tALS - tWAIT
+ * tSETUP_ATT > tRHW - tHOLD_MEM
+ * tSETUP_ATT > tDS - (tWAIT - tHIZ)
+ */
+ tset_att = hclkp;
+ if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait))
+ tset_att = sdrt->tCS_min - twait;
+ if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait))
+ tset_att = sdrt->tCLS_min - twait;
+ if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait))
+ tset_att = sdrt->tALS_min - twait;
+ if (sdrt->tRHW_min > thold_mem &&
+ (tset_att < sdrt->tRHW_min - thold_mem))
+ tset_att = sdrt->tRHW_min - thold_mem;
+ if (twait > thiz && (sdrt->tDS_min > twait - thiz) &&
+ (tset_att < sdrt->tDS_min - (twait - thiz)))
+ tset_att = sdrt->tDS_min - (twait - thiz);
+ timing = DIV_ROUND_UP(tset_att, hclkp);
+ tims->tset_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+
+ /*
+ * tHOLD_ATT > tALH
+ * tHOLD_ATT > tCH
+ * tHOLD_ATT > tCLH
+ * tHOLD_ATT > tCOH
+ * tHOLD_ATT > tDH
+ * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM
+ * tHOLD_ATT > tADL - tSETUP_MEM
+ * tHOLD_ATT > tWH - tSETUP_MEM
+ * tHOLD_ATT > tWHR - tSETUP_MEM
+ * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT)
+ * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT)
+ */
+ thold_att = max_t(unsigned long, hclkp, sdrt->tALH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCLH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tCOH_min);
+ thold_att = max_t(unsigned long, thold_att, sdrt->tDH_min);
+ if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) &&
+ (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem))
+ thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem;
+ if (sdrt->tADL_min > tset_mem &&
+ (thold_att < sdrt->tADL_min - tset_mem))
+ thold_att = sdrt->tADL_min - tset_mem;
+ if (sdrt->tWH_min > tset_mem &&
+ (thold_att < sdrt->tWH_min - tset_mem))
+ thold_att = sdrt->tWH_min - tset_mem;
+ if (sdrt->tWHR_min > tset_mem &&
+ (thold_att < sdrt->tWHR_min - tset_mem))
+ thold_att = sdrt->tWHR_min - tset_mem;
+ if ((sdrt->tRC_min > tset_att + twait) &&
+ (thold_att < sdrt->tRC_min - (tset_att + twait)))
+ thold_att = sdrt->tRC_min - (tset_att + twait);
+ if ((sdrt->tWC_min > tset_att + twait) &&
+ (thold_att < sdrt->tWC_min - (tset_att + twait)))
+ thold_att = sdrt->tWC_min - (tset_att + twait);
+ timing = DIV_ROUND_UP(thold_att, hclkp);
+ tims->thold_att = clamp_val(timing, 1, FMC2_PMEM_PATT_TIMING_MASK);
+}
+
+static int stm32_fmc2_nfc_setup_interface(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *cf)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const struct nand_sdr_timings *sdrt;
+
+ sdrt = nand_get_sdr_timings(cf);
+ if (IS_ERR(sdrt))
+ return PTR_ERR(sdrt);
+
+ if (chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return 0;
+
+ stm32_fmc2_nfc_calc_timings(chip, sdrt);
+ stm32_fmc2_nfc_timings_init(chip);
+
+ return 0;
+}
+
+static void stm32_fmc2_nfc_nand_callbacks_setup(struct nand_chip *chip)
+{
+ chip->ecc.hwctl = stm32_fmc2_nfc_hwctl;
+
+ /*
+ * Specific callbacks to read/write a page depending on
+ * the algo used (Hamming, BCH).
+ */
+ if (chip->ecc.strength == FMC2_ECC_HAM) {
+ /* Hamming is used */
+ chip->ecc.calculate = stm32_fmc2_nfc_ham_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_ham_correct;
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3;
+ chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK;
+ return;
+ }
+
+ /* BCH is used */
+ chip->ecc.read_page = stm32_fmc2_nfc_read_page;
+ chip->ecc.calculate = stm32_fmc2_nfc_bch_calculate;
+ chip->ecc.correct = stm32_fmc2_nfc_bch_correct;
+
+ if (chip->ecc.strength == FMC2_ECC_BCH8)
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13;
+ else
+ chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7;
+}
+
+static int stm32_fmc2_nfc_calc_ecc_bytes(int step_size, int strength)
+{
+ /* Hamming */
+ if (strength == FMC2_ECC_HAM)
+ return 4;
+
+ /* BCH8 */
+ if (strength == FMC2_ECC_BCH8)
+ return 14;
+
+ /* BCH4 */
+ return 8;
+}
+
+NAND_ECC_CAPS_SINGLE(stm32_fmc2_nfc_ecc_caps, stm32_fmc2_nfc_calc_ecc_bytes,
+ FMC2_ECC_STEP_SIZE,
+ FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8);
+
+static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, ofnode node)
+{
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ u32 cs[FMC2_MAX_CE];
+ int ret, i;
+
+ if (!ofnode_get_property(node, "reg", &nand->ncs))
+ return -EINVAL;
+
+ nand->ncs /= sizeof(u32);
+ if (!nand->ncs) {
+ log_err("Invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ ret = ofnode_read_u32_array(node, "reg", cs, nand->ncs);
+ if (ret < 0) {
+ log_err("Could not retrieve reg property\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nand->ncs; i++) {
+ if (cs[i] >= FMC2_MAX_CE) {
+ log_err("Invalid reg value: %d\n", nand->cs_used[i]);
+ return -EINVAL;
+ }
+
+ if (nfc->cs_assigned & BIT(cs[i])) {
+ log_err("Cs already assigned: %d\n", nand->cs_used[i]);
+ return -EINVAL;
+ }
+
+ nfc->cs_assigned |= BIT(cs[i]);
+ nand->cs_used[i] = cs[i];
+ }
+
+ nand->chip.flash_node = ofnode_to_offset(node);
+
+ return 0;
+}
+
+static int stm32_fmc2_nfc_parse_dt(struct udevice *dev,
+ struct stm32_fmc2_nfc *nfc)
+{
+ ofnode child;
+ int ret, nchips = 0;
+
+ dev_for_each_subnode(child, dev)
+ nchips++;
+
+ if (!nchips) {
+ log_err("NAND chip not defined\n");
+ return -EINVAL;
+ }
+
+ if (nchips > 1) {
+ log_err("Too many NAND chips defined\n");
+ return -EINVAL;
+ }
+
+ dev_for_each_subnode(child, dev) {
+ ret = stm32_fmc2_nfc_parse_child(nfc, child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct udevice *stm32_fmc2_nfc_get_cdev(struct udevice *dev)
+{
+ struct udevice *pdev = dev_get_parent(dev);
+ struct udevice *cdev = NULL;
+ bool ebi_found = false;
+
+ if (pdev && ofnode_device_is_compatible(dev_ofnode(pdev),
+ "st,stm32mp1-fmc2-ebi"))
+ ebi_found = true;
+
+ if (ofnode_device_is_compatible(dev_ofnode(dev),
+ "st,stm32mp1-fmc2-nfc")) {
+ if (ebi_found)
+ cdev = pdev;
+
+ return cdev;
+ }
+
+ if (!ebi_found)
+ cdev = dev;
+
+ return cdev;
+}
+
+static int stm32_fmc2_nfc_probe(struct udevice *dev)
+{
+ struct stm32_fmc2_nfc *nfc = dev_get_priv(dev);
+ struct stm32_fmc2_nand *nand = &nfc->nand;
+ struct nand_chip *chip = &nand->chip;
+ struct mtd_info *mtd = &chip->mtd;
+ struct nand_ecclayout *ecclayout;
+ struct udevice *cdev;
+ struct reset_ctl reset;
+ int oob_index, chip_cs, mem_region, ret;
+ unsigned int i;
+ int start_region = 0;
+ fdt_addr_t addr;
+
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+
+ cdev = stm32_fmc2_nfc_get_cdev(dev);
+ if (!cdev)
+ return -EINVAL;
+
+ ret = stm32_fmc2_nfc_parse_dt(dev, nfc);
+ if (ret)
+ return ret;
+
+ nfc->io_base = dev_read_addr(cdev);
+ if (nfc->io_base == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ if (dev == cdev)
+ start_region = 1;
+
+ for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE;
+ chip_cs++, mem_region += 3) {
+ if (!(nfc->cs_assigned & BIT(chip_cs)))
+ continue;
+
+ addr = dev_read_addr_index(dev, mem_region);
+ if (addr == FDT_ADDR_T_NONE) {
+ dev_err(dev, "Resource data_base not found for cs%d", chip_cs);
+ return ret;
+ }
+ nfc->data_base[chip_cs] = addr;
+
+ addr = dev_read_addr_index(dev, mem_region + 1);
+ if (addr == FDT_ADDR_T_NONE) {
+ dev_err(dev, "Resource cmd_base not found for cs%d", chip_cs);
+ return ret;
+ }
+ nfc->cmd_base[chip_cs] = addr;
+
+ addr = dev_read_addr_index(dev, mem_region + 2);
+ if (addr == FDT_ADDR_T_NONE) {
+ dev_err(dev, "Resource addr_base not found for cs%d", chip_cs);
+ return ret;
+ }
+ nfc->addr_base[chip_cs] = addr;
+ }
+
+ /* Enable the clock */
+ ret = clk_get_by_index(cdev, 0, &nfc->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(&nfc->clk);
+ if (ret)
+ return ret;
+
+ /* Reset */
+ ret = reset_get_by_index(dev, 0, &reset);
+ if (!ret) {
+ reset_assert(&reset);
+ udelay(2);
+ reset_deassert(&reset);
+ }
+
+ stm32_fmc2_nfc_init(nfc, dev != cdev);
+
+ chip->controller = &nfc->base;
+ chip->select_chip = stm32_fmc2_nfc_select_chip;
+ chip->setup_data_interface = stm32_fmc2_nfc_setup_interface;
+ chip->cmd_ctrl = stm32_fmc2_nfc_cmd_ctrl;
+ chip->chip_delay = FMC2_RB_DELAY_US;
+ chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE |
+ NAND_USE_BOUNCE_BUFFER;
+
+ /* Default ECC settings */
+ chip->ecc.mode = NAND_ECC_HW;
+ chip->ecc.size = FMC2_ECC_STEP_SIZE;
+ chip->ecc.strength = FMC2_ECC_BCH8;
+
+ ret = nand_scan_ident(mtd, nand->ncs, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * Only NAND_ECC_HW mode is actually supported
+ * Hamming => ecc.strength = 1
+ * BCH4 => ecc.strength = 4
+ * BCH8 => ecc.strength = 8
+ * ECC sector size = 512
+ */
+ if (chip->ecc.mode != NAND_ECC_HW) {
+ dev_err(dev, "Nand_ecc_mode is not well defined in the DT\n");
+ return -EINVAL;
+ }
+
+ ret = nand_check_ecc_caps(chip, &stm32_fmc2_nfc_ecc_caps,
+ mtd->oobsize - FMC2_BBM_LEN);
+ if (ret) {
+ dev_err(dev, "No valid ECC settings set\n");
+ return ret;
+ }
+
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ stm32_fmc2_nfc_nand_callbacks_setup(chip);
+
+ /* Define ECC layout */
+ ecclayout = &nfc->ecclayout;
+ ecclayout->eccbytes = chip->ecc.bytes *
+ (mtd->writesize / chip->ecc.size);
+ oob_index = FMC2_BBM_LEN;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ ecclayout->oobfree->offset = oob_index;
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
+ chip->ecc.layout = ecclayout;
+
+ if (chip->options & NAND_BUSWIDTH_16)
+ stm32_fmc2_nfc_set_buswidth_16(nfc, true);
+
+ ret = nand_scan_tail(mtd);
+ if (ret)
+ return ret;
+
+ return nand_register(0, mtd);
+}
+
+static const struct udevice_id stm32_fmc2_nfc_match[] = {
+ { .compatible = "st,stm32mp15-fmc2" },
+ { .compatible = "st,stm32mp1-fmc2-nfc" },
+ { /* Sentinel */ }
+};
+
+U_BOOT_DRIVER(stm32_fmc2_nfc) = {
+ .name = "stm32_fmc2_nfc",
+ .id = UCLASS_MTD,
+ .of_match = stm32_fmc2_nfc_match,
+ .probe = stm32_fmc2_nfc_probe,
+ .priv_auto = sizeof(struct stm32_fmc2_nfc),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(stm32_fmc2_nfc),
+ &dev);
+ if (ret && ret != -ENODEV)
+ log_err("Failed to initialize STM32 FMC2 NFC controller. (error %d)\n",
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/sunxi_nand.c b/roms/u-boot/drivers/mtd/nand/raw/sunxi_nand.c
new file mode 100644
index 000000000..7bc6ec7be
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/sunxi_nand.c
@@ -0,0 +1,1858 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ * Copyright (C) 2015 Roy Spliet <r.spliet@ultimaker.com>
+ *
+ * Derived from:
+ * https://github.com/yuq/sunxi-nfc-mtd
+ * Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ * https://github.com/hno/Allwinner-Info
+ * Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ * Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ * Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <common.h>
+#include <fdtdec.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <nand.h>
+#include <asm/global_data.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+#include <asm/gpio.h>
+#include <asm/arch/clock.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define NFC_REG_CTL 0x0000
+#define NFC_REG_ST 0x0004
+#define NFC_REG_INT 0x0008
+#define NFC_REG_TIMING_CTL 0x000C
+#define NFC_REG_TIMING_CFG 0x0010
+#define NFC_REG_ADDR_LOW 0x0014
+#define NFC_REG_ADDR_HIGH 0x0018
+#define NFC_REG_SECTOR_NUM 0x001C
+#define NFC_REG_CNT 0x0020
+#define NFC_REG_CMD 0x0024
+#define NFC_REG_RCMD_SET 0x0028
+#define NFC_REG_WCMD_SET 0x002C
+#define NFC_REG_IO_DATA 0x0030
+#define NFC_REG_ECC_CTL 0x0034
+#define NFC_REG_ECC_ST 0x0038
+#define NFC_REG_DEBUG 0x003C
+#define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
+#define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4))
+#define NFC_REG_SPARE_AREA 0x00A0
+#define NFC_REG_PAT_ID 0x00A4
+#define NFC_RAM0_BASE 0x0400
+#define NFC_RAM1_BASE 0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN BIT(0)
+#define NFC_RESET BIT(1)
+#define NFC_BUS_WIDTH_MSK BIT(2)
+#define NFC_BUS_WIDTH_8 (0 << 2)
+#define NFC_BUS_WIDTH_16 (1 << 2)
+#define NFC_RB_SEL_MSK BIT(3)
+#define NFC_RB_SEL(x) ((x) << 3)
+#define NFC_CE_SEL_MSK (0x7 << 24)
+#define NFC_CE_SEL(x) ((x) << 24)
+#define NFC_CE_CTL BIT(6)
+#define NFC_PAGE_SHIFT_MSK (0xf << 8)
+#define NFC_PAGE_SHIFT(x) (((x) < 10 ? 0 : (x) - 10) << 8)
+#define NFC_SAM BIT(12)
+#define NFC_RAM_METHOD BIT(14)
+#define NFC_DEBUG_CTL BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R BIT(0)
+#define NFC_CMD_INT_FLAG BIT(1)
+#define NFC_DMA_INT_FLAG BIT(2)
+#define NFC_CMD_FIFO_STATUS BIT(3)
+#define NFC_STA BIT(4)
+#define NFC_NATCH_INT_FLAG BIT(5)
+#define NFC_RB_STATE(x) BIT(x + 8)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE BIT(0)
+#define NFC_CMD_INT_ENABLE BIT(1)
+#define NFC_DMA_INT_ENABLE BIT(2)
+#define NFC_INT_MASK (NFC_B2R_INT_ENABLE | \
+ NFC_CMD_INT_ENABLE | \
+ NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD) \
+ (((tWB) & 0x3) | (((tADL) & 0x3) << 2) | \
+ (((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) | \
+ (((tCAD) & 0x7) << 8))
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE_MSK 0xff
+#define NFC_CMD_HIGH_BYTE_MSK (0xff << 8)
+#define NFC_CMD(x) (x)
+#define NFC_ADR_NUM_MSK (0x7 << 16)
+#define NFC_ADR_NUM(x) (((x) - 1) << 16)
+#define NFC_SEND_ADR BIT(19)
+#define NFC_ACCESS_DIR BIT(20)
+#define NFC_DATA_TRANS BIT(21)
+#define NFC_SEND_CMD1 BIT(22)
+#define NFC_WAIT_FLAG BIT(23)
+#define NFC_SEND_CMD2 BIT(24)
+#define NFC_SEQ BIT(25)
+#define NFC_DATA_SWAP_METHOD BIT(26)
+#define NFC_ROW_AUTO_INC BIT(27)
+#define NFC_SEND_CMD3 BIT(28)
+#define NFC_SEND_CMD4 BIT(29)
+#define NFC_CMD_TYPE_MSK (0x3 << 30)
+#define NFC_NORMAL_OP (0 << 30)
+#define NFC_ECC_OP (1 << 30)
+#define NFC_PAGE_OP (2 << 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD_MSK 0xff
+#define NFC_RND_READ_CMD0_MSK (0xff << 8)
+#define NFC_RND_READ_CMD1_MSK (0xff << 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD_MSK 0xff
+#define NFC_RND_WRITE_CMD_MSK (0xff << 8)
+#define NFC_READ_CMD0_MSK (0xff << 16)
+#define NFC_READ_CMD1_MSK (0xff << 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN BIT(0)
+#define NFC_ECC_PIPELINE BIT(3)
+#define NFC_ECC_EXCEPTION BIT(4)
+#define NFC_ECC_BLOCK_SIZE_MSK BIT(5)
+#define NFC_ECC_BLOCK_512 (1 << 5)
+#define NFC_RANDOM_EN BIT(9)
+#define NFC_RANDOM_DIRECTION BIT(10)
+#define NFC_ECC_MODE_MSK (0xf << 12)
+#define NFC_ECC_MODE(x) ((x) << 12)
+#define NFC_RANDOM_SEED_MSK (0x7fff << 16)
+#define NFC_RANDOM_SEED(x) ((x) << 16)
+
+/* define bit use in NFC_ECC_ST */
+#define NFC_ECC_ERR(x) BIT(x)
+#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
+#define NFC_ECC_ERR_CNT(b, x) (((x) >> ((b) * 8)) & 0xff)
+
+#define NFC_DEFAULT_TIMEOUT_MS 1000
+
+#define NFC_SRAM_SIZE 1024
+
+#define NFC_MAX_CS 7
+
+/*
+ * Ready/Busy detection type: describes the Ready/Busy detection modes
+ *
+ * @RB_NONE: no external detection available, rely on STATUS command
+ * and software timeouts
+ * @RB_NATIVE: use sunxi NAND controller Ready/Busy support. The Ready/Busy
+ * pin of the NAND flash chip must be connected to one of the
+ * native NAND R/B pins (those which can be muxed to the NAND
+ * Controller)
+ * @RB_GPIO: use a simple GPIO to handle Ready/Busy status. The Ready/Busy
+ * pin of the NAND flash chip must be connected to a GPIO capable
+ * pin.
+ */
+enum sunxi_nand_rb_type {
+ RB_NONE,
+ RB_NATIVE,
+ RB_GPIO,
+};
+
+/*
+ * Ready/Busy structure: stores information related to Ready/Busy detection
+ *
+ * @type: the Ready/Busy detection mode
+ * @info: information related to the R/B detection mode. Either a gpio
+ * id or a native R/B id (those supported by the NAND controller).
+ */
+struct sunxi_nand_rb {
+ enum sunxi_nand_rb_type type;
+ union {
+ struct gpio_desc gpio;
+ int nativeid;
+ } info;
+};
+
+/*
+ * Chip Select structure: stores information related to NAND Chip Select
+ *
+ * @cs: the NAND CS id used to communicate with a NAND Chip
+ * @rb: the Ready/Busy description
+ */
+struct sunxi_nand_chip_sel {
+ u8 cs;
+ struct sunxi_nand_rb rb;
+};
+
+/*
+ * sunxi HW ECC infos: stores information related to HW ECC support
+ *
+ * @mode: the sunxi ECC mode field deduced from ECC requirements
+ * @layout: the OOB layout depending on the ECC requirements and the
+ * selected ECC mode
+ */
+struct sunxi_nand_hw_ecc {
+ int mode;
+ struct nand_ecclayout layout;
+};
+
+/*
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @node: used to store NAND chips into a list
+ * @nand: base NAND chip structure
+ * @mtd: base MTD structure
+ * @clk_rate: clk_rate required for this NAND chip
+ * @timing_cfg TIMING_CFG register value for this NAND chip
+ * @selected: current active CS
+ * @nsels: number of CS lines required by the NAND chip
+ * @sels: array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+ struct list_head node;
+ struct nand_chip nand;
+ unsigned long clk_rate;
+ u32 timing_cfg;
+ u32 timing_ctl;
+ int selected;
+ int addr_cycles;
+ u32 addr[2];
+ int cmd_cycles;
+ u8 cmd[2];
+ int nsels;
+ struct sunxi_nand_chip_sel sels[0];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+ return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller structure: stores sunxi NAND controller information
+ *
+ * @controller: base controller structure
+ * @dev: parent device (used to print error messages)
+ * @regs: NAND controller registers
+ * @ahb_clk: NAND Controller AHB clock
+ * @mod_clk: NAND Controller mod clock
+ * @assigned_cs: bitmask describing already assigned CS lines
+ * @clk_rate: NAND controller current clock rate
+ * @chips: a list containing all the NAND chips attached to
+ * this NAND controller
+ * @complete: a completion object used to wait for NAND
+ * controller events
+ */
+struct sunxi_nfc {
+ struct nand_hw_control controller;
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ahb_clk;
+ struct clk *mod_clk;
+ unsigned long assigned_cs;
+ unsigned long clk_rate;
+ struct list_head chips;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+{
+ return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static void sunxi_nfc_set_clk_rate(unsigned long hz)
+{
+ struct sunxi_ccm_reg *const ccm =
+ (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
+ int div_m, div_n;
+
+ div_m = (clock_get_pll6() + hz - 1) / hz;
+ for (div_n = 0; div_n < 3 && div_m > 16; div_n++) {
+ if (div_m % 2)
+ div_m++;
+ div_m >>= 1;
+ }
+ if (div_m > 16)
+ div_m = 16;
+
+ /* config mod clock */
+ writel(CCM_NAND_CTRL_ENABLE | CCM_NAND_CTRL_PLL6 |
+ CCM_NAND_CTRL_N(div_n) | CCM_NAND_CTRL_M(div_m),
+ &ccm->nand0_clk_cfg);
+
+ /* gate on nand clock */
+ setbits_le32(&ccm->ahb_gate0, (1 << AHB_GATE_OFFSET_NAND0));
+#ifdef CONFIG_MACH_SUN9I
+ setbits_le32(&ccm->ahb_gate1, (1 << AHB_GATE_OFFSET_DMA));
+#else
+ setbits_le32(&ccm->ahb_gate0, (1 << AHB_GATE_OFFSET_DMA));
+#endif
+}
+
+static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags,
+ unsigned int timeout_ms)
+{
+ unsigned int timeout_ticks;
+ u32 time_start, status;
+ int ret = -ETIMEDOUT;
+
+ if (!timeout_ms)
+ timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+ timeout_ticks = (timeout_ms * CONFIG_SYS_HZ) / 1000;
+
+ time_start = get_timer(0);
+
+ do {
+ status = readl(nfc->regs + NFC_REG_ST);
+ if ((status & flags) == flags) {
+ ret = 0;
+ break;
+ }
+
+ udelay(1);
+ } while (get_timer(time_start) < timeout_ticks);
+
+ writel(status & flags, nfc->regs + NFC_REG_ST);
+
+ return ret;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = (CONFIG_SYS_HZ *
+ NFC_DEFAULT_TIMEOUT_MS) / 1000;
+ u32 time_start;
+
+ time_start = get_timer(0);
+ do {
+ if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS))
+ return 0;
+ } while (get_timer(time_start) < timeout);
+
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+ unsigned long timeout = (CONFIG_SYS_HZ *
+ NFC_DEFAULT_TIMEOUT_MS) / 1000;
+ u32 time_start;
+
+ writel(0, nfc->regs + NFC_REG_ECC_CTL);
+ writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+ time_start = get_timer(0);
+ do {
+ if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET))
+ return 0;
+ } while (get_timer(time_start) < timeout);
+
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+ return -ETIMEDOUT;
+}
+
+static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_rb *rb;
+ unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20);
+ int ret;
+
+ if (sunxi_nand->selected < 0)
+ return 0;
+
+ rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
+
+ switch (rb->type) {
+ case RB_NATIVE:
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ NFC_RB_STATE(rb->info.nativeid));
+ if (ret)
+ break;
+
+ sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
+ ret = !!(readl(nfc->regs + NFC_REG_ST) &
+ NFC_RB_STATE(rb->info.nativeid));
+ break;
+ case RB_GPIO:
+ ret = dm_gpio_get_value(&rb->info.gpio);
+ break;
+ case RB_NONE:
+ default:
+ ret = 0;
+ dev_err(nfc->dev, "cannot check R/B NAND status!\n");
+ break;
+ }
+
+ return ret;
+}
+
+static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ struct sunxi_nand_chip_sel *sel;
+ u32 ctl;
+
+ if (chip > 0 && chip >= sunxi_nand->nsels)
+ return;
+
+ if (chip == sunxi_nand->selected)
+ return;
+
+ ctl = readl(nfc->regs + NFC_REG_CTL) &
+ ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
+
+ if (chip >= 0) {
+ sel = &sunxi_nand->sels[chip];
+
+ ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
+ NFC_PAGE_SHIFT(nand->page_shift - 10);
+ if (sel->rb.type == RB_NONE) {
+ nand->dev_ready = NULL;
+ } else {
+ nand->dev_ready = sunxi_nfc_dev_ready;
+ if (sel->rb.type == RB_NATIVE)
+ ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
+ }
+
+ writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+ if (nfc->clk_rate != sunxi_nand->clk_rate) {
+ sunxi_nfc_set_clk_rate(sunxi_nand->clk_rate);
+ nfc->clk_rate = sunxi_nand->clk_rate;
+ }
+ }
+
+ writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+ writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
+ writel(ctl, nfc->regs + NFC_REG_CTL);
+
+ sunxi_nand->selected = chip;
+}
+
+static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ if (buf)
+ memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+ cnt);
+ offs += cnt;
+ }
+}
+
+static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ int cnt;
+ int offs = 0;
+ u32 tmp;
+
+ while (len > offs) {
+ cnt = min(len - offs, NFC_SRAM_SIZE);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ break;
+
+ writel(cnt, nfc->regs + NFC_REG_CNT);
+ memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+ tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR;
+ writel(tmp, nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ret)
+ break;
+
+ offs += cnt;
+ }
+}
+
+static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
+{
+ uint8_t ret;
+
+ sunxi_nfc_read_buf(mtd, &ret, 1);
+
+ return ret;
+}
+
+static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
+ unsigned int ctrl)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+ int ret;
+ u32 tmp;
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ tmp = readl(nfc->regs + NFC_REG_CTL);
+ if (ctrl & NAND_NCE)
+ tmp |= NFC_CE_CTL;
+ else
+ tmp &= ~NFC_CE_CTL;
+ writel(tmp, nfc->regs + NFC_REG_CTL);
+ }
+
+ if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
+ !(ctrl & (NAND_CLE | NAND_ALE))) {
+ u32 cmd = 0;
+
+ if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
+ return;
+
+ if (sunxi_nand->cmd_cycles--)
+ cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
+
+ if (sunxi_nand->cmd_cycles--) {
+ cmd |= NFC_SEND_CMD2;
+ writel(sunxi_nand->cmd[1],
+ nfc->regs + NFC_REG_RCMD_SET);
+ }
+
+ sunxi_nand->cmd_cycles = 0;
+
+ if (sunxi_nand->addr_cycles) {
+ cmd |= NFC_SEND_ADR |
+ NFC_ADR_NUM(sunxi_nand->addr_cycles);
+ writel(sunxi_nand->addr[0],
+ nfc->regs + NFC_REG_ADDR_LOW);
+ }
+
+ if (sunxi_nand->addr_cycles > 4)
+ writel(sunxi_nand->addr[1],
+ nfc->regs + NFC_REG_ADDR_HIGH);
+
+ writel(cmd, nfc->regs + NFC_REG_CMD);
+ sunxi_nand->addr[0] = 0;
+ sunxi_nand->addr[1] = 0;
+ sunxi_nand->addr_cycles = 0;
+ sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ }
+
+ if (ctrl & NAND_CLE) {
+ sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
+ } else if (ctrl & NAND_ALE) {
+ sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
+ dat << ((sunxi_nand->addr_cycles % 4) * 8);
+ sunxi_nand->addr_cycles++;
+ }
+}
+
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+ 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+ 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+ 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+ 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+ 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+ 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+ 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+ 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+ 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+ 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+ 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+ 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+ 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+ 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+ 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+ 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+ 0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+ 0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+ 0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+ 0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+ 0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+ 0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+ 0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+ 0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+ 0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+ 0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+ 0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+ 0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+ 0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+ 0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+ 0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+ 0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+ 0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+ 0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+ 0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+ 0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+ 0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+ 0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+ 0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+ 0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+ 0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+ 0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+ 0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+ 0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+ 0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+ 0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+ 0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+ 0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+ state &= 0x7fff;
+
+ /*
+ * This loop is just a simple implementation of a Fibonacci LFSR using
+ * the x16 + x15 + 1 polynomial.
+ */
+ while (count--)
+ state = ((state >> 1) |
+ (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+ return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+{
+ const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+ int mod = mtd->erasesize / mtd->writesize;
+
+ if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+ mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+ if (ecc) {
+ if (mtd->ecc_step_size == 512)
+ seeds = sunxi_nfc_randomizer_ecc512_seeds;
+ else
+ seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+ }
+
+ return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
+ int page, bool ecc)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ u16 state;
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+ writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ if (!(nand->options & NAND_NEED_SCRAMBLING))
+ return;
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+{
+ u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+
+ bbm[0] ^= state;
+ bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+ const uint8_t *buf, int len,
+ bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(mtd, page, ecc);
+ sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_write_buf(mtd, buf, len);
+ sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+ int len, bool ecc, int page)
+{
+ sunxi_nfc_randomizer_config(mtd, page, ecc);
+ sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_read_buf(mtd, buf, len);
+ sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
+ u32 ecc_ctl;
+
+ ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+ ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
+ NFC_ECC_BLOCK_SIZE_MSK);
+ ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION;
+
+ if (nand->ecc.size == 512)
+ ecc_ctl |= NFC_ECC_BLOCK_512;
+
+ writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+ nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
+{
+ buf[0] = user_data;
+ buf[1] = user_data >> 8;
+ buf[2] = user_data >> 16;
+ buf[3] = user_data >> 24;
+}
+
+static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
+ u8 *data, int data_off,
+ u8 *oob, int oob_off,
+ int *cur_off,
+ unsigned int *max_bitflips,
+ bool bbm, int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int raw_mode = 0;
+ u32 status;
+ int ret;
+
+ if (*cur_off != data_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+
+ sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
+
+ if (data_off + ecc->size != oob_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_randomizer_enable(mtd);
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ sunxi_nfc_randomizer_disable(mtd);
+ if (ret)
+ return ret;
+
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+ if (status & NFC_ECC_PAT_FOUND(0)) {
+ u8 pattern = 0xff;
+
+ if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
+ pattern = 0x0;
+
+ memset(data, pattern, ecc->size);
+ memset(oob, pattern, ecc->bytes + 4);
+
+ return 1;
+ }
+
+ ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
+
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
+
+ if (status & NFC_ECC_ERR(0)) {
+ /*
+ * Re-read the data with the randomizer disabled to identify
+ * bitflips in erased pages.
+ */
+ if (nand->options & NAND_NEED_SCRAMBLING) {
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+ nand->read_buf(mtd, data, ecc->size);
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+ }
+
+ ret = nand_check_erased_ecc_chunk(data, ecc->size,
+ oob, ecc->bytes + 4,
+ NULL, 0, ecc->strength);
+ if (ret >= 0)
+ raw_mode = 1;
+ } else {
+ /*
+ * The engine protects 4 bytes of OOB data per chunk.
+ * Retrieve the corrected OOB bytes.
+ */
+ sunxi_nfc_user_data_to_buf(readl(nfc->regs +
+ NFC_REG_USER_DATA(0)),
+ oob);
+
+ /* De-randomize the Bad Block Marker. */
+ if (bbm && nand->options & NAND_NEED_SCRAMBLING)
+ sunxi_nfc_randomize_bbm(mtd, page, oob);
+ }
+
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
+
+ return raw_mode;
+}
+
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
+ u8 *oob, int *cur_off,
+ bool randomize, int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (*cur_off != offset)
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ offset + mtd->writesize, -1);
+
+ if (!randomize)
+ sunxi_nfc_read_buf(mtd, oob + offset, len);
+ else
+ sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+ false, page);
+
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
+{
+ return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
+ const u8 *data, int data_off,
+ const u8 *oob, int oob_off,
+ int *cur_off, bool bbm,
+ int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int ret;
+
+ if (data_off != *cur_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+
+ sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
+
+ /* Fill OOB data in */
+ if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
+ u8 user_data[4];
+
+ memcpy(user_data, oob, 4);
+ sunxi_nfc_randomize_bbm(mtd, page, user_data);
+ writel(sunxi_nfc_buf_to_user_data(user_data),
+ nfc->regs + NFC_REG_USER_DATA(0));
+ } else {
+ writel(sunxi_nfc_buf_to_user_data(oob),
+ nfc->regs + NFC_REG_USER_DATA(0));
+ }
+
+ if (data_off + ecc->size != oob_off)
+ nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+
+ ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+ if (ret)
+ return ret;
+
+ sunxi_nfc_randomizer_enable(mtd);
+ writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+ NFC_ACCESS_DIR | NFC_ECC_OP,
+ nfc->regs + NFC_REG_CMD);
+
+ ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ sunxi_nfc_randomizer_disable(mtd);
+ if (ret)
+ return ret;
+
+ *cur_off = oob_off + ecc->bytes + 4;
+
+ return 0;
+}
+
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
+ u8 *oob, int *cur_off,
+ int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ int offset = ((ecc->bytes + 4) * ecc->steps);
+ int len = mtd->oobsize - offset;
+
+ if (len <= 0)
+ return;
+
+ if (*cur_off != offset)
+ nand->cmdfunc(mtd, NAND_CMD_RNDIN,
+ offset + mtd->writesize, -1);
+
+ sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
+
+ *cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, cur_off = 0;
+ bool raw_mode = false;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = buf + data_off;
+ u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips,
+ !i, page);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ raw_mode = true;
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+ !raw_mode, page);
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint32_t data_offs, uint32_t readlen,
+ uint8_t *bufpoi, int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+ unsigned int max_bitflips = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = bufpoi + data_off;
+ u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
+ oob, oob_off + mtd->writesize,
+ &cur_off, &max_bitflips, !i, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ &cur_off, page);
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u32 data_offs, u32 data_len,
+ const u8 *buf, int oob_required,
+ int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ const u8 *data = buf + data_off;
+ const u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+ oob_off + mtd->writesize,
+ &cur_off, !i, page);
+ if (ret)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return 0;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf, int oob_required,
+ int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ unsigned int max_bitflips = 0;
+ int ret, i, cur_off = 0;
+ bool raw_mode = false;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * (ecc->size + ecc->bytes + 4);
+ int oob_off = data_off + ecc->size;
+ u8 *data = buf + (i * ecc->size);
+ u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+ oob_off, &cur_off,
+ &max_bitflips, !i, page);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ raw_mode = true;
+ }
+
+ if (oob_required)
+ sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+ !raw_mode, page);
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return max_bitflips;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ for (i = 0; i < ecc->steps; i++) {
+ int data_off = i * (ecc->size + ecc->bytes + 4);
+ int oob_off = data_off + ecc->size;
+ const u8 *data = buf + (i * ecc->size);
+ const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
+
+ ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
+ oob, oob_off, &cur_off,
+ false, page);
+ if (ret)
+ return ret;
+ }
+
+ if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+ &cur_off, page);
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return 0;
+}
+
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+ u32 clk_period)
+{
+ u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+ int i;
+
+ for (i = 0; i < lut_size; i++) {
+ if (clk_cycles <= lut[i])
+ return i;
+ }
+
+ /* Doesn't fit */
+ return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
+static int sunxi_nand_chip_set_timings(struct sunxi_nfc *nfc,
+ struct sunxi_nand_chip *chip,
+ const struct nand_sdr_timings *timings)
+{
+ u32 min_clk_period = 0;
+ s32 tWB, tADL, tWHR, tRHW, tCAD;
+
+ /* T1 <=> tCLS */
+ if (timings->tCLS_min > min_clk_period)
+ min_clk_period = timings->tCLS_min;
+
+ /* T2 <=> tCLH */
+ if (timings->tCLH_min > min_clk_period)
+ min_clk_period = timings->tCLH_min;
+
+ /* T3 <=> tCS */
+ if (timings->tCS_min > min_clk_period)
+ min_clk_period = timings->tCS_min;
+
+ /* T4 <=> tCH */
+ if (timings->tCH_min > min_clk_period)
+ min_clk_period = timings->tCH_min;
+
+ /* T5 <=> tWP */
+ if (timings->tWP_min > min_clk_period)
+ min_clk_period = timings->tWP_min;
+
+ /* T6 <=> tWH */
+ if (timings->tWH_min > min_clk_period)
+ min_clk_period = timings->tWH_min;
+
+ /* T7 <=> tALS */
+ if (timings->tALS_min > min_clk_period)
+ min_clk_period = timings->tALS_min;
+
+ /* T8 <=> tDS */
+ if (timings->tDS_min > min_clk_period)
+ min_clk_period = timings->tDS_min;
+
+ /* T9 <=> tDH */
+ if (timings->tDH_min > min_clk_period)
+ min_clk_period = timings->tDH_min;
+
+ /* T10 <=> tRR */
+ if (timings->tRR_min > (min_clk_period * 3))
+ min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+ /* T11 <=> tALH */
+ if (timings->tALH_min > min_clk_period)
+ min_clk_period = timings->tALH_min;
+
+ /* T12 <=> tRP */
+ if (timings->tRP_min > min_clk_period)
+ min_clk_period = timings->tRP_min;
+
+ /* T13 <=> tREH */
+ if (timings->tREH_min > min_clk_period)
+ min_clk_period = timings->tREH_min;
+
+ /* T14 <=> tRC */
+ if (timings->tRC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+ /* T15 <=> tWC */
+ if (timings->tWC_min > (min_clk_period * 2))
+ min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+ /* T16 - T19 + tCAD */
+ tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+ min_clk_period);
+ if (tWB < 0) {
+ dev_err(nfc->dev, "unsupported tWB\n");
+ return tWB;
+ }
+
+ tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+ if (tADL > 3) {
+ dev_err(nfc->dev, "unsupported tADL\n");
+ return -EINVAL;
+ }
+
+ tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+ if (tWHR > 3) {
+ dev_err(nfc->dev, "unsupported tWHR\n");
+ return -EINVAL;
+ }
+
+ tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+ min_clk_period);
+ if (tRHW < 0) {
+ dev_err(nfc->dev, "unsupported tRHW\n");
+ return tRHW;
+ }
+
+ /*
+ * TODO: according to ONFI specs this value only applies for DDR NAND,
+ * but Allwinner seems to set this to 0x7. Mimic them for now.
+ */
+ tCAD = 0x7;
+
+ /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+ chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+ /*
+ * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+ * output cycle timings shall be used if the host drives tRC less than
+ * 30 ns.
+ */
+ chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0;
+
+ /* Convert min_clk_period from picoseconds to nanoseconds */
+ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+ /*
+ * Convert min_clk_period into a clk frequency, then get the
+ * appropriate rate for the NAND controller IP given this formula
+ * (specified in the datasheet):
+ * nand clk_rate = min_clk_rate
+ */
+ chip->clk_rate = 1000000000L / min_clk_period;
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init_timings(struct sunxi_nfc *nfc,
+ struct sunxi_nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(&chip->nand);
+ const struct nand_sdr_timings *timings;
+ int ret;
+ int mode;
+
+ mode = onfi_get_async_timing_mode(&chip->nand);
+ if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+ mode = chip->nand.onfi_timing_mode_default;
+ } else {
+ uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {};
+ int i;
+
+ mode = fls(mode) - 1;
+ if (mode < 0)
+ mode = 0;
+
+ feature[0] = mode;
+ for (i = 0; i < chip->nsels; i++) {
+ chip->nand.select_chip(mtd, i);
+ ret = chip->nand.onfi_set_features(mtd,
+ &chip->nand,
+ ONFI_FEATURE_ADDR_TIMING_MODE,
+ feature);
+ chip->nand.select_chip(mtd, -1);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+ }
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(mode);
+ if (IS_ERR(timings))
+ return PTR_ERR(timings);
+
+ return sunxi_nand_chip_set_timings(nfc, chip, timings);
+}
+
+static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+ struct sunxi_nand_hw_ecc *data;
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int ret;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (ecc->size != 512 && ecc->size != 1024)
+ return -EINVAL;
+
+ /* Prefer 1k ECC chunk over 512 ones */
+ if (ecc->size == 512 && mtd->writesize > 512) {
+ ecc->size = 1024;
+ ecc->strength *= 2;
+ }
+
+ /* Add ECC info retrieval from DT */
+ for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+ if (ecc->strength <= strengths[i]) {
+ /*
+ * Update ecc->strength value with the actual strength
+ * that will be used by the ECC engine.
+ */
+ ecc->strength = strengths[i];
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(strengths)) {
+ dev_err(mtd->dev, "unsupported strength\n");
+ ret = -ENOTSUPP;
+ goto err;
+ }
+
+ data->mode = i;
+
+ /* HW ECC always request ECC bytes for 1024 bytes blocks */
+ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+ /* HW ECC always work with even numbers of ECC bytes */
+ ecc->bytes = ALIGN(ecc->bytes, 2);
+
+ layout = &data->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ layout->eccbytes = (ecc->bytes * nsectors);
+
+ ecc->layout = layout;
+ ecc->priv = data;
+
+ return 0;
+
+err:
+ kfree(data);
+
+ return ret;
+}
+
+#ifndef __UBOOT__
+static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ kfree(ecc->priv);
+}
+#endif /* __UBOOT__ */
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i, j;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+
+ ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+ ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < nsectors; i++) {
+ if (i) {
+ layout->oobfree[i].offset =
+ layout->oobfree[i - 1].offset +
+ layout->oobfree[i - 1].length +
+ ecc->bytes;
+ layout->oobfree[i].length = 4;
+ } else {
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ layout->oobfree[i].length = 2;
+ layout->oobfree[i].offset = 2;
+ }
+
+ for (j = 0; j < ecc->bytes; j++)
+ layout->eccpos[(ecc->bytes * i) + j] =
+ layout->oobfree[i].offset +
+ layout->oobfree[i].length + j;
+ }
+
+ if (mtd->oobsize > (ecc->bytes + 4) * nsectors) {
+ layout->oobfree[nsectors].offset =
+ layout->oobfree[nsectors - 1].offset +
+ layout->oobfree[nsectors - 1].length +
+ ecc->bytes;
+ layout->oobfree[nsectors].length = mtd->oobsize -
+ ((ecc->bytes + 4) * nsectors);
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
+ struct nand_ecc_ctrl *ecc)
+{
+ struct nand_ecclayout *layout;
+ int nsectors;
+ int i;
+ int ret;
+
+ ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+
+ ecc->prepad = 4;
+ ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
+ ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
+
+ layout = ecc->layout;
+ nsectors = mtd->writesize / ecc->size;
+
+ for (i = 0; i < (ecc->bytes * nsectors); i++)
+ layout->eccpos[i] = i;
+
+ layout->oobfree[0].length = mtd->oobsize - i;
+ layout->oobfree[0].offset = i;
+
+ return 0;
+}
+
+#ifndef __UBOOT__
+static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+{
+ switch (ecc->mode) {
+ case NAND_ECC_HW:
+ case NAND_ECC_HW_SYNDROME:
+ sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
+ break;
+ case NAND_ECC_NONE:
+ kfree(ecc->layout);
+ default:
+ break;
+ }
+}
+#endif /* __UBOOT__ */
+
+static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ int ret;
+
+ if (!ecc->size) {
+ ecc->size = nand->ecc_step_ds;
+ ecc->strength = nand->ecc_strength_ds;
+ }
+
+ if (!ecc->size || !ecc->strength)
+ return -EINVAL;
+
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT_BCH:
+ break;
+ case NAND_ECC_HW:
+ ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_HW_SYNDROME:
+ ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc);
+ if (ret)
+ return ret;
+ break;
+ case NAND_ECC_NONE:
+ ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL);
+ if (!ecc->layout)
+ return -ENOMEM;
+ ecc->layout->oobfree[0].length = mtd->oobsize;
+ case NAND_ECC_SOFT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sunxi_nand_chip_init(int node, struct sunxi_nfc *nfc, int devnum)
+{
+ const struct nand_sdr_timings *timings;
+ const void *blob = gd->fdt_blob;
+ struct sunxi_nand_chip *chip;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ int nsels;
+ int ret;
+ int i;
+ u32 cs[8], rb[8];
+
+ if (!fdt_getprop(blob, node, "reg", &nsels))
+ return -EINVAL;
+
+ nsels /= sizeof(u32);
+ if (!nsels || nsels > 8) {
+ dev_err(nfc->dev, "invalid reg property size\n");
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(*chip) +
+ (nsels * sizeof(struct sunxi_nand_chip_sel)),
+ GFP_KERNEL);
+ if (!chip) {
+ dev_err(nfc->dev, "could not allocate chip\n");
+ return -ENOMEM;
+ }
+
+ chip->nsels = nsels;
+ chip->selected = -1;
+
+ for (i = 0; i < nsels; i++) {
+ cs[i] = -1;
+ rb[i] = -1;
+ }
+
+ ret = fdtdec_get_int_array(gd->fdt_blob, node, "reg", cs, nsels);
+ if (ret) {
+ dev_err(nfc->dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ ret = fdtdec_get_int_array(gd->fdt_blob, node, "allwinner,rb", rb,
+ nsels);
+ if (ret) {
+ dev_err(nfc->dev, "could not retrieve reg property: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < nsels; i++) {
+ int tmp = cs[i];
+
+ if (tmp > NFC_MAX_CS) {
+ dev_err(nfc->dev,
+ "invalid reg value: %u (max CS = 7)\n", tmp);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+ dev_err(nfc->dev, "CS %d already assigned\n", tmp);
+ return -EINVAL;
+ }
+
+ chip->sels[i].cs = tmp;
+
+ tmp = rb[i];
+ if (tmp >= 0 && tmp < 2) {
+ chip->sels[i].rb.type = RB_NATIVE;
+ chip->sels[i].rb.info.nativeid = tmp;
+ } else {
+ ret = gpio_request_by_name_nodev(offset_to_ofnode(node),
+ "rb-gpios", i,
+ &chip->sels[i].rb.info.gpio,
+ GPIOD_IS_IN);
+ if (ret)
+ chip->sels[i].rb.type = RB_GPIO;
+ else
+ chip->sels[i].rb.type = RB_NONE;
+ }
+ }
+
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings)) {
+ ret = PTR_ERR(timings);
+ dev_err(nfc->dev,
+ "could not retrieve timings for ONFI mode 0: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_chip_set_timings(nfc, chip, timings);
+ if (ret) {
+ dev_err(nfc->dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ nand = &chip->nand;
+ /* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+ nand->chip_delay = 200;
+ nand->controller = &nfc->controller;
+ /*
+ * Set the ECC mode to the default value in case nothing is specified
+ * in the DT.
+ */
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->flash_node = node;
+ nand->select_chip = sunxi_nfc_select_chip;
+ nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
+ nand->read_buf = sunxi_nfc_read_buf;
+ nand->write_buf = sunxi_nfc_write_buf;
+ nand->read_byte = sunxi_nfc_read_byte;
+
+ mtd = nand_to_mtd(nand);
+ ret = nand_scan_ident(mtd, nsels, NULL);
+ if (ret)
+ return ret;
+
+ if (nand->bbt_options & NAND_BBT_USE_FLASH)
+ nand->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (nand->options & NAND_NEED_SCRAMBLING)
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ nand->options |= NAND_SUBPAGE_READ;
+
+ ret = sunxi_nand_chip_init_timings(nfc, chip);
+ if (ret) {
+ dev_err(nfc->dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_ecc_init(mtd, &nand->ecc);
+ if (ret) {
+ dev_err(nfc->dev, "ECC init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = nand_scan_tail(mtd);
+ if (ret) {
+ dev_err(nfc->dev, "nand_scan_tail failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = nand_register(devnum, mtd);
+ if (ret) {
+ dev_err(nfc->dev, "failed to register mtd device: %d\n", ret);
+ return ret;
+ }
+
+ list_add_tail(&chip->node, &nfc->chips);
+
+ return 0;
+}
+
+static int sunxi_nand_chips_init(int node, struct sunxi_nfc *nfc)
+{
+ const void *blob = gd->fdt_blob;
+ int nand_node;
+ int ret, i = 0;
+
+ for (nand_node = fdt_first_subnode(blob, node); nand_node >= 0;
+ nand_node = fdt_next_subnode(blob, nand_node))
+ i++;
+
+ if (i > 8) {
+ dev_err(nfc->dev, "too many NAND chips: %d (max = 8)\n", i);
+ return -EINVAL;
+ }
+
+ i = 0;
+ for (nand_node = fdt_first_subnode(blob, node); nand_node >= 0;
+ nand_node = fdt_next_subnode(blob, nand_node)) {
+ ret = sunxi_nand_chip_init(nand_node, nfc, i++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifndef __UBOOT__
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+ struct sunxi_nand_chip *chip;
+
+ while (!list_empty(&nfc->chips)) {
+ chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
+ node);
+ nand_release(&chip->mtd);
+ sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+ list_del(&chip->node);
+ kfree(chip);
+ }
+}
+#endif /* __UBOOT__ */
+
+void sunxi_nand_init(void)
+{
+ const void *blob = gd->fdt_blob;
+ struct sunxi_nfc *nfc;
+ fdt_addr_t regs;
+ int node;
+ int ret;
+
+ nfc = kzalloc(sizeof(*nfc), GFP_KERNEL);
+ if (!nfc)
+ return;
+
+ spin_lock_init(&nfc->controller.lock);
+ init_waitqueue_head(&nfc->controller.wq);
+ INIT_LIST_HEAD(&nfc->chips);
+
+ node = fdtdec_next_compatible(blob, 0, COMPAT_SUNXI_NAND);
+ if (node < 0) {
+ pr_err("unable to find nfc node in device tree\n");
+ goto err;
+ }
+
+ if (!fdtdec_get_is_enabled(blob, node)) {
+ pr_err("nfc disabled in device tree\n");
+ goto err;
+ }
+
+ regs = fdtdec_get_addr(blob, node, "reg");
+ if (regs == FDT_ADDR_T_NONE) {
+ pr_err("unable to find nfc address in device tree\n");
+ goto err;
+ }
+
+ nfc->regs = (void *)regs;
+
+ ret = sunxi_nfc_rst(nfc);
+ if (ret)
+ goto err;
+
+ ret = sunxi_nand_chips_init(node, nfc);
+ if (ret) {
+ dev_err(nfc->dev, "failed to init nand chips\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ kfree(nfc);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
diff --git a/roms/u-boot/drivers/mtd/nand/raw/sunxi_nand_spl.c b/roms/u-boot/drivers/mtd/nand/raw/sunxi_nand_spl.c
new file mode 100644
index 000000000..85d8013b1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/sunxi_nand_spl.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014-2015, Antmicro Ltd <www.antmicro.com>
+ * Copyright (c) 2015, AW-SOM Technologies <www.aw-som.com>
+ */
+
+#include <asm/arch/clock.h>
+#include <asm/io.h>
+#include <common.h>
+#include <config.h>
+#include <nand.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+
+/* registers */
+#define NFC_CTL 0x00000000
+#define NFC_ST 0x00000004
+#define NFC_INT 0x00000008
+#define NFC_TIMING_CTL 0x0000000C
+#define NFC_TIMING_CFG 0x00000010
+#define NFC_ADDR_LOW 0x00000014
+#define NFC_ADDR_HIGH 0x00000018
+#define NFC_SECTOR_NUM 0x0000001C
+#define NFC_CNT 0x00000020
+#define NFC_CMD 0x00000024
+#define NFC_RCMD_SET 0x00000028
+#define NFC_WCMD_SET 0x0000002C
+#define NFC_IO_DATA 0x00000030
+#define NFC_ECC_CTL 0x00000034
+#define NFC_ECC_ST 0x00000038
+#define NFC_DEBUG 0x0000003C
+#define NFC_ECC_CNT0 0x00000040
+#define NFC_ECC_CNT1 0x00000044
+#define NFC_ECC_CNT2 0x00000048
+#define NFC_ECC_CNT3 0x0000004C
+#define NFC_USER_DATA_BASE 0x00000050
+#define NFC_EFNAND_STATUS 0x00000090
+#define NFC_SPARE_AREA 0x000000A0
+#define NFC_PATTERN_ID 0x000000A4
+#define NFC_RAM0_BASE 0x00000400
+#define NFC_RAM1_BASE 0x00000800
+
+#define NFC_CTL_EN (1 << 0)
+#define NFC_CTL_RESET (1 << 1)
+#define NFC_CTL_RAM_METHOD (1 << 14)
+#define NFC_CTL_PAGE_SIZE_MASK (0xf << 8)
+#define NFC_CTL_PAGE_SIZE(a) ((fls(a) - 11) << 8)
+
+
+#define NFC_ECC_EN (1 << 0)
+#define NFC_ECC_PIPELINE (1 << 3)
+#define NFC_ECC_EXCEPTION (1 << 4)
+#define NFC_ECC_BLOCK_SIZE (1 << 5)
+#define NFC_ECC_RANDOM_EN (1 << 9)
+#define NFC_ECC_RANDOM_DIRECTION (1 << 10)
+
+
+#define NFC_ADDR_NUM_OFFSET 16
+#define NFC_SEND_ADDR (1 << 19)
+#define NFC_ACCESS_DIR (1 << 20)
+#define NFC_DATA_TRANS (1 << 21)
+#define NFC_SEND_CMD1 (1 << 22)
+#define NFC_WAIT_FLAG (1 << 23)
+#define NFC_SEND_CMD2 (1 << 24)
+#define NFC_SEQ (1 << 25)
+#define NFC_DATA_SWAP_METHOD (1 << 26)
+#define NFC_ROW_AUTO_INC (1 << 27)
+#define NFC_SEND_CMD3 (1 << 28)
+#define NFC_SEND_CMD4 (1 << 29)
+#define NFC_RAW_CMD (0 << 30)
+#define NFC_ECC_CMD (1 << 30)
+#define NFC_PAGE_CMD (2 << 30)
+
+#define NFC_ST_CMD_INT_FLAG (1 << 1)
+#define NFC_ST_DMA_INT_FLAG (1 << 2)
+#define NFC_ST_CMD_FIFO_STAT (1 << 3)
+
+#define NFC_READ_CMD_OFFSET 0
+#define NFC_RANDOM_READ_CMD0_OFFSET 8
+#define NFC_RANDOM_READ_CMD1_OFFSET 16
+
+#define NFC_CMD_RNDOUTSTART 0xE0
+#define NFC_CMD_RNDOUT 0x05
+#define NFC_CMD_READSTART 0x30
+
+struct nfc_config {
+ int page_size;
+ int ecc_strength;
+ int ecc_size;
+ int addr_cycles;
+ int nseeds;
+ bool randomize;
+ bool valid;
+};
+
+/* minimal "boot0" style NAND support for Allwinner A20 */
+
+/* random seed used by linux */
+const uint16_t random_seed[128] = {
+ 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+ 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+ 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+ 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+ 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+ 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+ 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+ 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+ 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+ 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+ 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+ 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+ 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+ 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+ 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+ 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+#define DEFAULT_TIMEOUT_US 100000
+
+static int check_value_inner(int offset, int expected_bits,
+ int timeout_us, int negation)
+{
+ do {
+ int val = readl(offset) & expected_bits;
+ if (negation ? !val : val)
+ return 1;
+ udelay(1);
+ } while (--timeout_us);
+
+ return 0;
+}
+
+static inline int check_value(int offset, int expected_bits,
+ int timeout_us)
+{
+ return check_value_inner(offset, expected_bits, timeout_us, 0);
+}
+
+static inline int check_value_negated(int offset, int unexpected_bits,
+ int timeout_us)
+{
+ return check_value_inner(offset, unexpected_bits, timeout_us, 1);
+}
+
+static int nand_wait_cmd_fifo_empty(void)
+{
+ if (!check_value_negated(SUNXI_NFC_BASE + NFC_ST, NFC_ST_CMD_FIFO_STAT,
+ DEFAULT_TIMEOUT_US)) {
+ printf("nand: timeout waiting for empty cmd FIFO\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int nand_wait_int(void)
+{
+ if (!check_value(SUNXI_NFC_BASE + NFC_ST, NFC_ST_CMD_INT_FLAG,
+ DEFAULT_TIMEOUT_US)) {
+ printf("nand: timeout waiting for interruption\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int nand_exec_cmd(u32 cmd)
+{
+ int ret;
+
+ ret = nand_wait_cmd_fifo_empty();
+ if (ret)
+ return ret;
+
+ writel(NFC_ST_CMD_INT_FLAG, SUNXI_NFC_BASE + NFC_ST);
+ writel(cmd, SUNXI_NFC_BASE + NFC_CMD);
+
+ return nand_wait_int();
+}
+
+void nand_init(void)
+{
+ uint32_t val;
+
+ board_nand_init();
+
+ val = readl(SUNXI_NFC_BASE + NFC_CTL);
+ /* enable and reset CTL */
+ writel(val | NFC_CTL_EN | NFC_CTL_RESET,
+ SUNXI_NFC_BASE + NFC_CTL);
+
+ if (!check_value_negated(SUNXI_NFC_BASE + NFC_CTL,
+ NFC_CTL_RESET, DEFAULT_TIMEOUT_US)) {
+ printf("Couldn't initialize nand\n");
+ }
+
+ /* reset NAND */
+ nand_exec_cmd(NFC_SEND_CMD1 | NFC_WAIT_FLAG | NAND_CMD_RESET);
+}
+
+static void nand_apply_config(const struct nfc_config *conf)
+{
+ u32 val;
+
+ nand_wait_cmd_fifo_empty();
+
+ val = readl(SUNXI_NFC_BASE + NFC_CTL);
+ val &= ~NFC_CTL_PAGE_SIZE_MASK;
+ writel(val | NFC_CTL_RAM_METHOD | NFC_CTL_PAGE_SIZE(conf->page_size),
+ SUNXI_NFC_BASE + NFC_CTL);
+ writel(conf->ecc_size, SUNXI_NFC_BASE + NFC_CNT);
+ writel(conf->page_size, SUNXI_NFC_BASE + NFC_SPARE_AREA);
+}
+
+static int nand_load_page(const struct nfc_config *conf, u32 offs)
+{
+ int page = offs / conf->page_size;
+
+ writel((NFC_CMD_RNDOUTSTART << NFC_RANDOM_READ_CMD1_OFFSET) |
+ (NFC_CMD_RNDOUT << NFC_RANDOM_READ_CMD0_OFFSET) |
+ (NFC_CMD_READSTART << NFC_READ_CMD_OFFSET),
+ SUNXI_NFC_BASE + NFC_RCMD_SET);
+ writel(((page & 0xFFFF) << 16), SUNXI_NFC_BASE + NFC_ADDR_LOW);
+ writel((page >> 16) & 0xFF, SUNXI_NFC_BASE + NFC_ADDR_HIGH);
+
+ return nand_exec_cmd(NFC_SEND_CMD1 | NFC_SEND_CMD2 | NFC_RAW_CMD |
+ NFC_SEND_ADDR | NFC_WAIT_FLAG |
+ ((conf->addr_cycles - 1) << NFC_ADDR_NUM_OFFSET));
+}
+
+static int nand_change_column(u16 column)
+{
+ int ret;
+
+ writel((NFC_CMD_RNDOUTSTART << NFC_RANDOM_READ_CMD1_OFFSET) |
+ (NFC_CMD_RNDOUT << NFC_RANDOM_READ_CMD0_OFFSET) |
+ (NFC_CMD_RNDOUTSTART << NFC_READ_CMD_OFFSET),
+ SUNXI_NFC_BASE + NFC_RCMD_SET);
+ writel(column, SUNXI_NFC_BASE + NFC_ADDR_LOW);
+
+ ret = nand_exec_cmd(NFC_SEND_CMD1 | NFC_SEND_CMD2 | NFC_RAW_CMD |
+ (1 << NFC_ADDR_NUM_OFFSET) | NFC_SEND_ADDR |
+ NFC_CMD_RNDOUT);
+ if (ret)
+ return ret;
+
+ /* Ensure tCCS has passed before reading data */
+ udelay(1);
+
+ return 0;
+}
+
+static const int ecc_bytes[] = {32, 46, 54, 60, 74, 88, 102, 110, 116};
+
+static int nand_read_page(const struct nfc_config *conf, u32 offs,
+ void *dest, int len)
+{
+ int nsectors = len / conf->ecc_size;
+ u16 rand_seed = 0;
+ int oob_chunk_sz = ecc_bytes[conf->ecc_strength];
+ int page = offs / conf->page_size;
+ u32 ecc_st;
+ int i;
+
+ if (offs % conf->page_size || len % conf->ecc_size ||
+ len > conf->page_size || len < 0)
+ return -EINVAL;
+
+ /* Choose correct seed if randomized */
+ if (conf->randomize)
+ rand_seed = random_seed[page % conf->nseeds];
+
+ /* Retrieve data from SRAM (PIO) */
+ for (i = 0; i < nsectors; i++) {
+ int data_off = i * conf->ecc_size;
+ int oob_off = conf->page_size + (i * oob_chunk_sz);
+ u8 *data = dest + data_off;
+
+ /* Clear ECC status and restart ECC engine */
+ writel(0, SUNXI_NFC_BASE + NFC_ECC_ST);
+ writel((rand_seed << 16) | (conf->ecc_strength << 12) |
+ (conf->randomize ? NFC_ECC_RANDOM_EN : 0) |
+ (conf->ecc_size == 512 ? NFC_ECC_BLOCK_SIZE : 0) |
+ NFC_ECC_EN | NFC_ECC_EXCEPTION,
+ SUNXI_NFC_BASE + NFC_ECC_CTL);
+
+ /* Move the data in SRAM */
+ nand_change_column(data_off);
+ writel(conf->ecc_size, SUNXI_NFC_BASE + NFC_CNT);
+ nand_exec_cmd(NFC_DATA_TRANS);
+
+ /*
+ * Let the ECC engine consume the ECC bytes and possibly correct
+ * the data.
+ */
+ nand_change_column(oob_off);
+ nand_exec_cmd(NFC_DATA_TRANS | NFC_ECC_CMD);
+
+ /* Get the ECC status */
+ ecc_st = readl(SUNXI_NFC_BASE + NFC_ECC_ST);
+
+ /* ECC error detected. */
+ if (ecc_st & 0xffff)
+ return -EIO;
+
+ /*
+ * Return 1 if the first chunk is empty (needed for
+ * configuration detection).
+ */
+ if (!i && (ecc_st & 0x10000))
+ return 1;
+
+ /* Retrieve the data from SRAM */
+ memcpy_fromio(data, SUNXI_NFC_BASE + NFC_RAM0_BASE,
+ conf->ecc_size);
+
+ /* Stop the ECC engine */
+ writel(readl(SUNXI_NFC_BASE + NFC_ECC_CTL) & ~NFC_ECC_EN,
+ SUNXI_NFC_BASE + NFC_ECC_CTL);
+
+ if (data_off + conf->ecc_size >= len)
+ break;
+ }
+
+ return 0;
+}
+
+static int nand_max_ecc_strength(struct nfc_config *conf)
+{
+ int max_oobsize, max_ecc_bytes;
+ int nsectors = conf->page_size / conf->ecc_size;
+ int i;
+
+ /*
+ * ECC strength is limited by the size of the OOB area which is
+ * correlated with the page size.
+ */
+ switch (conf->page_size) {
+ case 2048:
+ max_oobsize = 64;
+ break;
+ case 4096:
+ max_oobsize = 256;
+ break;
+ case 8192:
+ max_oobsize = 640;
+ break;
+ case 16384:
+ max_oobsize = 1664;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ max_ecc_bytes = max_oobsize / nsectors;
+
+ for (i = 0; i < ARRAY_SIZE(ecc_bytes); i++) {
+ if (ecc_bytes[i] > max_ecc_bytes)
+ break;
+ }
+
+ if (!i)
+ return -EINVAL;
+
+ return i - 1;
+}
+
+static int nand_detect_ecc_config(struct nfc_config *conf, u32 offs,
+ void *dest)
+{
+ /* NAND with pages > 4k will likely require 1k sector size. */
+ int min_ecc_size = conf->page_size > 4096 ? 1024 : 512;
+ int page = offs / conf->page_size;
+ int ret;
+
+ /*
+ * In most cases, 1k sectors are preferred over 512b ones, start
+ * testing this config first.
+ */
+ for (conf->ecc_size = 1024; conf->ecc_size >= min_ecc_size;
+ conf->ecc_size >>= 1) {
+ int max_ecc_strength = nand_max_ecc_strength(conf);
+
+ nand_apply_config(conf);
+
+ /*
+ * We are starting from the maximum ECC strength because
+ * most of the time NAND vendors provide an OOB area that
+ * barely meets the ECC requirements.
+ */
+ for (conf->ecc_strength = max_ecc_strength;
+ conf->ecc_strength >= 0;
+ conf->ecc_strength--) {
+ conf->randomize = false;
+ if (nand_change_column(0))
+ return -EIO;
+
+ /*
+ * Only read the first sector to speedup detection.
+ */
+ ret = nand_read_page(conf, offs, dest, conf->ecc_size);
+ if (!ret) {
+ return 0;
+ } else if (ret > 0) {
+ /*
+ * If page is empty we can't deduce anything
+ * about the ECC config => stop the detection.
+ */
+ return -EINVAL;
+ }
+
+ conf->randomize = true;
+ conf->nseeds = ARRAY_SIZE(random_seed);
+ do {
+ if (nand_change_column(0))
+ return -EIO;
+
+ if (!nand_read_page(conf, offs, dest,
+ conf->ecc_size))
+ return 0;
+
+ /*
+ * Find the next ->nseeds value that would
+ * change the randomizer seed for the page
+ * we're trying to read.
+ */
+ while (conf->nseeds >= 16) {
+ int seed = page % conf->nseeds;
+
+ conf->nseeds >>= 1;
+ if (seed != page % conf->nseeds)
+ break;
+ }
+ } while (conf->nseeds >= 16);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int nand_detect_config(struct nfc_config *conf, u32 offs, void *dest)
+{
+ if (conf->valid)
+ return 0;
+
+ /*
+ * Modern NANDs are more likely than legacy ones, so we start testing
+ * with 5 address cycles.
+ */
+ for (conf->addr_cycles = 5;
+ conf->addr_cycles >= 4;
+ conf->addr_cycles--) {
+ int max_page_size = conf->addr_cycles == 4 ? 2048 : 16384;
+
+ /*
+ * Ignoring 1k pages cause I'm not even sure this case exist
+ * in the real world.
+ */
+ for (conf->page_size = 2048; conf->page_size <= max_page_size;
+ conf->page_size <<= 1) {
+ if (nand_load_page(conf, offs))
+ return -1;
+
+ if (!nand_detect_ecc_config(conf, offs, dest)) {
+ conf->valid = true;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int nand_read_buffer(struct nfc_config *conf, uint32_t offs,
+ unsigned int size, void *dest)
+{
+ int first_seed = 0, page, ret;
+
+ size = ALIGN(size, conf->page_size);
+ page = offs / conf->page_size;
+ if (conf->randomize)
+ first_seed = page % conf->nseeds;
+
+ for (; size; size -= conf->page_size) {
+ if (nand_load_page(conf, offs))
+ return -1;
+
+ ret = nand_read_page(conf, offs, dest, conf->page_size);
+ /*
+ * The ->nseeds value should be equal to the number of pages
+ * in an eraseblock. Since we don't know this information in
+ * advance we might have picked a wrong value.
+ */
+ if (ret < 0 && conf->randomize) {
+ int cur_seed = page % conf->nseeds;
+
+ /*
+ * We already tried all the seed values => we are
+ * facing a real corruption.
+ */
+ if (cur_seed < first_seed)
+ return -EIO;
+
+ /* Try to adjust ->nseeds and read the page again... */
+ conf->nseeds = cur_seed;
+
+ if (nand_change_column(0))
+ return -EIO;
+
+ /* ... it still fails => it's a real corruption. */
+ if (nand_read_page(conf, offs, dest, conf->page_size))
+ return -EIO;
+ } else if (ret && conf->randomize) {
+ memset(dest, 0xff, conf->page_size);
+ }
+
+ page++;
+ offs += conf->page_size;
+ dest += conf->page_size;
+ }
+
+ return 0;
+}
+
+int nand_spl_load_image(uint32_t offs, unsigned int size, void *dest)
+{
+ static struct nfc_config conf = { };
+ int ret;
+
+ ret = nand_detect_config(&conf, offs, dest);
+ if (ret)
+ return ret;
+
+ return nand_read_buffer(&conf, offs, size, dest);
+}
+
+void nand_deselect(void)
+{
+ struct sunxi_ccm_reg *const ccm =
+ (struct sunxi_ccm_reg *)SUNXI_CCM_BASE;
+
+ clrbits_le32(&ccm->ahb_gate0, (CLK_GATE_OPEN << AHB_GATE_OFFSET_NAND0));
+#ifdef CONFIG_MACH_SUN9I
+ clrbits_le32(&ccm->ahb_gate1, (1 << AHB_GATE_OFFSET_DMA));
+#else
+ clrbits_le32(&ccm->ahb_gate0, (1 << AHB_GATE_OFFSET_DMA));
+#endif
+ clrbits_le32(&ccm->nand0_clk_cfg, CCM_NAND_CTRL_ENABLE | AHB_DIV_1);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/tegra_nand.c b/roms/u-boot/drivers/mtd/nand/raw/tegra_nand.c
new file mode 100644
index 000000000..6310253ef
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/tegra_nand.c
@@ -0,0 +1,1007 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2011 The Chromium OS Authors.
+ * (C) Copyright 2011 NVIDIA Corporation <www.nvidia.com>
+ * (C) Copyright 2006 Detlev Zundel, dzu@denx.de
+ * (C) Copyright 2006 DENX Software Engineering
+ */
+
+#include <common.h>
+#include <log.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <memalign.h>
+#include <nand.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/funcmux.h>
+#include <asm/arch-tegra/clk_rst.h>
+#include <dm/device_compat.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <asm/gpio.h>
+#include <fdtdec.h>
+#include <bouncebuf.h>
+#include <dm.h>
+#include "tegra_nand.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define NAND_CMD_TIMEOUT_MS 10
+
+#define SKIPPED_SPARE_BYTES 4
+
+/* ECC bytes to be generated for tag data */
+#define TAG_ECC_BYTES 4
+
+static const struct udevice_id tegra_nand_dt_ids[] = {
+ {
+ .compatible = "nvidia,tegra20-nand",
+ },
+ { /* sentinel */ }
+};
+
+/* 64 byte oob block info for large page (== 2KB) device
+ *
+ * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
+ * Skipped bytes(4)
+ * Main area Ecc(36)
+ * Tag data(20)
+ * Tag data Ecc(4)
+ *
+ * Yaffs2 will use 16 tag bytes.
+ */
+static struct nand_ecclayout eccoob = {
+ .eccbytes = 36,
+ .eccpos = {
+ 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ },
+ .oobavail = 20,
+ .oobfree = {
+ {
+ .offset = 40,
+ .length = 20,
+ },
+ }
+};
+
+enum {
+ ECC_OK,
+ ECC_TAG_ERROR = 1 << 0,
+ ECC_DATA_ERROR = 1 << 1
+};
+
+/* Timing parameters */
+enum {
+ FDT_NAND_MAX_TRP_TREA,
+ FDT_NAND_TWB,
+ FDT_NAND_MAX_TCR_TAR_TRR,
+ FDT_NAND_TWHR,
+ FDT_NAND_MAX_TCS_TCH_TALS_TALH,
+ FDT_NAND_TWH,
+ FDT_NAND_TWP,
+ FDT_NAND_TRH,
+ FDT_NAND_TADL,
+
+ FDT_NAND_TIMING_COUNT
+};
+
+/* Information about an attached NAND chip */
+struct fdt_nand {
+ struct nand_ctlr *reg;
+ int enabled; /* 1 to enable, 0 to disable */
+ struct gpio_desc wp_gpio; /* write-protect GPIO */
+ s32 width; /* bit width, normally 8 */
+ u32 timing[FDT_NAND_TIMING_COUNT];
+};
+
+struct nand_drv {
+ struct nand_ctlr *reg;
+ struct fdt_nand config;
+};
+
+struct tegra_nand_info {
+ struct udevice *dev;
+ struct nand_drv nand_ctrl;
+ struct nand_chip nand_chip;
+};
+
+/**
+ * Wait for command completion
+ *
+ * @param reg nand_ctlr structure
+ * @return
+ * 1 - Command completed
+ * 0 - Timeout
+ */
+static int nand_waitfor_cmd_completion(struct nand_ctlr *reg)
+{
+ u32 reg_val;
+ int running;
+ int i;
+
+ for (i = 0; i < NAND_CMD_TIMEOUT_MS * 1000; i++) {
+ if ((readl(&reg->command) & CMD_GO) ||
+ !(readl(&reg->status) & STATUS_RBSY0) ||
+ !(readl(&reg->isr) & ISR_IS_CMD_DONE)) {
+ udelay(1);
+ continue;
+ }
+ reg_val = readl(&reg->dma_mst_ctrl);
+ /*
+ * If DMA_MST_CTRL_EN_A_ENABLE or DMA_MST_CTRL_EN_B_ENABLE
+ * is set, that means DMA engine is running.
+ *
+ * Then we have to wait until DMA_MST_CTRL_IS_DMA_DONE
+ * is cleared, indicating DMA transfer completion.
+ */
+ running = reg_val & (DMA_MST_CTRL_EN_A_ENABLE |
+ DMA_MST_CTRL_EN_B_ENABLE);
+ if (!running || (reg_val & DMA_MST_CTRL_IS_DMA_DONE))
+ return 1;
+ udelay(1);
+ }
+ return 0;
+}
+
+/**
+ * Read one byte from the chip
+ *
+ * @param mtd MTD device structure
+ * @return data byte
+ *
+ * Read function for 8bit bus-width
+ */
+static uint8_t read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info;
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+
+ writel(CMD_GO | CMD_PIO | CMD_RX | CMD_CE0 | CMD_A_VALID,
+ &info->reg->command);
+ if (!nand_waitfor_cmd_completion(info->reg))
+ printf("Command timeout\n");
+
+ return (uint8_t)readl(&info->reg->resp);
+}
+
+/**
+ * Read len bytes from the chip into a buffer
+ *
+ * @param mtd MTD device structure
+ * @param buf buffer to store data to
+ * @param len number of bytes to read
+ *
+ * Read function for 8bit bus-width
+ */
+static void read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i, s;
+ unsigned int reg;
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info = (struct nand_drv *)nand_get_controller_data(chip);
+
+ for (i = 0; i < len; i += 4) {
+ s = (len - i) > 4 ? 4 : len - i;
+ writel(CMD_PIO | CMD_RX | CMD_A_VALID | CMD_CE0 |
+ ((s - 1) << CMD_TRANS_SIZE_SHIFT) | CMD_GO,
+ &info->reg->command);
+ if (!nand_waitfor_cmd_completion(info->reg))
+ puts("Command timeout during read_buf\n");
+ reg = readl(&info->reg->resp);
+ memcpy(buf + i, &reg, s);
+ }
+}
+
+/**
+ * Check NAND status to see if it is ready or not
+ *
+ * @param mtd MTD device structure
+ * @return
+ * 1 - ready
+ * 0 - not ready
+ */
+static int nand_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int reg_val;
+ struct nand_drv *info;
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+
+ reg_val = readl(&info->reg->status);
+ if (reg_val & STATUS_RBSY0)
+ return 1;
+ else
+ return 0;
+}
+
+/* Dummy implementation: we don't support multiple chips */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ switch (chipnr) {
+ case -1:
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * Clear all interrupt status bits
+ *
+ * @param reg nand_ctlr structure
+ */
+static void nand_clear_interrupt_status(struct nand_ctlr *reg)
+{
+ u32 reg_val;
+
+ /* Clear interrupt status */
+ reg_val = readl(&reg->isr);
+ writel(reg_val, &reg->isr);
+}
+
+/**
+ * Send command to NAND device
+ *
+ * @param mtd MTD device structure
+ * @param command the command to be sent
+ * @param column the column address for this command, -1 if none
+ * @param page_addr the page address for this command, -1 if none
+ */
+static void nand_command(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *info;
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+
+ /*
+ * Write out the command to the device.
+ *
+ * Only command NAND_CMD_RESET or NAND_CMD_READID will come
+ * here before mtd->writesize is initialized.
+ */
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ assert(mtd->writesize != 0);
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Adjust columns for 16 bit bus-width */
+ if (column != -1 && (chip->options & NAND_BUSWIDTH_16))
+ column >>= 1;
+
+ nand_clear_interrupt_status(info->reg);
+
+ /* Stop DMA engine, clear DMA completion status */
+ writel(DMA_MST_CTRL_EN_A_DISABLE
+ | DMA_MST_CTRL_EN_B_DISABLE
+ | DMA_MST_CTRL_IS_DMA_DONE,
+ &info->reg->dma_mst_ctrl);
+
+ /*
+ * Program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+ case NAND_CMD_READID:
+ writel(NAND_CMD_READID, &info->reg->cmd_reg1);
+ writel(column & 0xFF, &info->reg->addr_reg1);
+ writel(CMD_GO | CMD_CLE | CMD_ALE | CMD_CE0,
+ &info->reg->command);
+ break;
+ case NAND_CMD_PARAM:
+ writel(NAND_CMD_PARAM, &info->reg->cmd_reg1);
+ writel(column & 0xFF, &info->reg->addr_reg1);
+ writel(CMD_GO | CMD_CLE | CMD_ALE | CMD_CE0,
+ &info->reg->command);
+ break;
+ case NAND_CMD_READ0:
+ writel(NAND_CMD_READ0, &info->reg->cmd_reg1);
+ writel(NAND_CMD_READSTART, &info->reg->cmd_reg2);
+ writel((page_addr << 16) | (column & 0xFFFF),
+ &info->reg->addr_reg1);
+ writel(page_addr >> 16, &info->reg->addr_reg2);
+ return;
+ case NAND_CMD_SEQIN:
+ writel(NAND_CMD_SEQIN, &info->reg->cmd_reg1);
+ writel(NAND_CMD_PAGEPROG, &info->reg->cmd_reg2);
+ writel((page_addr << 16) | (column & 0xFFFF),
+ &info->reg->addr_reg1);
+ writel(page_addr >> 16,
+ &info->reg->addr_reg2);
+ return;
+ case NAND_CMD_PAGEPROG:
+ return;
+ case NAND_CMD_ERASE1:
+ writel(NAND_CMD_ERASE1, &info->reg->cmd_reg1);
+ writel(NAND_CMD_ERASE2, &info->reg->cmd_reg2);
+ writel(page_addr, &info->reg->addr_reg1);
+ writel(CMD_GO | CMD_CLE | CMD_ALE |
+ CMD_SEC_CMD | CMD_CE0 | CMD_ALE_BYTES3,
+ &info->reg->command);
+ break;
+ case NAND_CMD_ERASE2:
+ return;
+ case NAND_CMD_STATUS:
+ writel(NAND_CMD_STATUS, &info->reg->cmd_reg1);
+ writel(CMD_GO | CMD_CLE | CMD_PIO | CMD_RX
+ | ((1 - 0) << CMD_TRANS_SIZE_SHIFT)
+ | CMD_CE0,
+ &info->reg->command);
+ break;
+ case NAND_CMD_RESET:
+ writel(NAND_CMD_RESET, &info->reg->cmd_reg1);
+ writel(CMD_GO | CMD_CLE | CMD_CE0,
+ &info->reg->command);
+ break;
+ case NAND_CMD_RNDOUT:
+ default:
+ printf("%s: Unsupported command %d\n", __func__, command);
+ return;
+ }
+ if (!nand_waitfor_cmd_completion(info->reg))
+ printf("Command 0x%02X timeout\n", command);
+}
+
+/**
+ * Check whether the pointed buffer are all 0xff (blank).
+ *
+ * @param buf data buffer for blank check
+ * @param len length of the buffer in byte
+ * @return
+ * 1 - blank
+ * 0 - non-blank
+ */
+static int blank_check(u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != 0xFF)
+ return 0;
+ return 1;
+}
+
+/**
+ * After a DMA transfer for read, we call this function to see whether there
+ * is any uncorrectable error on the pointed data buffer or oob buffer.
+ *
+ * @param reg nand_ctlr structure
+ * @param databuf data buffer
+ * @param a_len data buffer length
+ * @param oobbuf oob buffer
+ * @param b_len oob buffer length
+ * @return
+ * ECC_OK - no ECC error or correctable ECC error
+ * ECC_TAG_ERROR - uncorrectable tag ECC error
+ * ECC_DATA_ERROR - uncorrectable data ECC error
+ * ECC_DATA_ERROR + ECC_TAG_ERROR - uncorrectable data+tag ECC error
+ */
+static int check_ecc_error(struct nand_ctlr *reg, u8 *databuf,
+ int a_len, u8 *oobbuf, int b_len)
+{
+ int return_val = ECC_OK;
+ u32 reg_val;
+
+ if (!(readl(&reg->isr) & ISR_IS_ECC_ERR))
+ return ECC_OK;
+
+ /*
+ * Area A is used for the data block (databuf). Area B is used for
+ * the spare block (oobbuf)
+ */
+ reg_val = readl(&reg->dec_status);
+ if ((reg_val & DEC_STATUS_A_ECC_FAIL) && databuf) {
+ reg_val = readl(&reg->bch_dec_status_buf);
+ /*
+ * If uncorrectable error occurs on data area, then see whether
+ * they are all FF. If all are FF, it's a blank page.
+ * Not error.
+ */
+ if ((reg_val & BCH_DEC_STATUS_FAIL_SEC_FLAG_MASK) &&
+ !blank_check(databuf, a_len))
+ return_val |= ECC_DATA_ERROR;
+ }
+
+ if ((reg_val & DEC_STATUS_B_ECC_FAIL) && oobbuf) {
+ reg_val = readl(&reg->bch_dec_status_buf);
+ /*
+ * If uncorrectable error occurs on tag area, then see whether
+ * they are all FF. If all are FF, it's a blank page.
+ * Not error.
+ */
+ if ((reg_val & BCH_DEC_STATUS_FAIL_TAG_MASK) &&
+ !blank_check(oobbuf, b_len))
+ return_val |= ECC_TAG_ERROR;
+ }
+
+ return return_val;
+}
+
+/**
+ * Set GO bit to send command to device
+ *
+ * @param reg nand_ctlr structure
+ */
+static void start_command(struct nand_ctlr *reg)
+{
+ u32 reg_val;
+
+ reg_val = readl(&reg->command);
+ reg_val |= CMD_GO;
+ writel(reg_val, &reg->command);
+}
+
+/**
+ * Clear command GO bit, DMA GO bit, and DMA completion status
+ *
+ * @param reg nand_ctlr structure
+ */
+static void stop_command(struct nand_ctlr *reg)
+{
+ /* Stop command */
+ writel(0, &reg->command);
+
+ /* Stop DMA engine and clear DMA completion status */
+ writel(DMA_MST_CTRL_GO_DISABLE
+ | DMA_MST_CTRL_IS_DMA_DONE,
+ &reg->dma_mst_ctrl);
+}
+
+/**
+ * Set up NAND bus width and page size
+ *
+ * @param info nand_info structure
+ * @param *reg_val address of reg_val
+ * @return 0 if ok, -1 on error
+ */
+static int set_bus_width_page_size(struct mtd_info *our_mtd,
+ struct fdt_nand *config, u32 *reg_val)
+{
+ if (config->width == 8)
+ *reg_val = CFG_BUS_WIDTH_8BIT;
+ else if (config->width == 16)
+ *reg_val = CFG_BUS_WIDTH_16BIT;
+ else {
+ debug("%s: Unsupported bus width %d\n", __func__,
+ config->width);
+ return -1;
+ }
+
+ if (our_mtd->writesize == 512)
+ *reg_val |= CFG_PAGE_SIZE_512;
+ else if (our_mtd->writesize == 2048)
+ *reg_val |= CFG_PAGE_SIZE_2048;
+ else if (our_mtd->writesize == 4096)
+ *reg_val |= CFG_PAGE_SIZE_4096;
+ else {
+ debug("%s: Unsupported page size %d\n", __func__,
+ our_mtd->writesize);
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Page read/write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf data buffer
+ * @param page page number
+ * @param with_ecc 1 to enable ECC, 0 to disable ECC
+ * @param is_writing 0 for read, 1 for write
+ * @return 0 when successfully completed
+ * -EIO when command timeout
+ */
+static int nand_rw_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int page, int with_ecc, int is_writing)
+{
+ u32 reg_val;
+ int tag_size;
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ /* 4*128=512 (byte) is the value that our HW can support. */
+ ALLOC_CACHE_ALIGN_BUFFER(u32, tag_buf, 128);
+ char *tag_ptr;
+ struct nand_drv *info;
+ struct fdt_nand *config;
+ unsigned int bbflags;
+ struct bounce_buffer bbstate, bbstate_oob;
+
+ if ((uintptr_t)buf & 0x03) {
+ printf("buf %p has to be 4-byte aligned\n", buf);
+ return -EINVAL;
+ }
+
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+ config = &info->config;
+ if (set_bus_width_page_size(mtd, config, &reg_val))
+ return -EINVAL;
+
+ /* Need to be 4-byte aligned */
+ tag_ptr = (char *)tag_buf;
+
+ stop_command(info->reg);
+
+ if (is_writing)
+ bbflags = GEN_BB_READ;
+ else
+ bbflags = GEN_BB_WRITE;
+
+ bounce_buffer_start(&bbstate, (void *)buf, 1 << chip->page_shift,
+ bbflags);
+ writel((1 << chip->page_shift) - 1, &info->reg->dma_cfg_a);
+ writel(virt_to_phys(bbstate.bounce_buffer), &info->reg->data_block_ptr);
+
+ /* Set ECC selection, configure ECC settings */
+ if (with_ecc) {
+ if (is_writing)
+ memcpy(tag_ptr, chip->oob_poi + free->offset,
+ chip->ecc.layout->oobavail + TAG_ECC_BYTES);
+ tag_size = chip->ecc.layout->oobavail + TAG_ECC_BYTES;
+ reg_val |= (CFG_SKIP_SPARE_SEL_4
+ | CFG_SKIP_SPARE_ENABLE
+ | CFG_HW_ECC_CORRECTION_ENABLE
+ | CFG_ECC_EN_TAG_DISABLE
+ | CFG_HW_ECC_SEL_RS
+ | CFG_HW_ECC_ENABLE
+ | CFG_TVAL4
+ | (tag_size - 1));
+
+ if (!is_writing)
+ tag_size += SKIPPED_SPARE_BYTES;
+ bounce_buffer_start(&bbstate_oob, (void *)tag_ptr, tag_size,
+ bbflags);
+ } else {
+ tag_size = mtd->oobsize;
+ reg_val |= (CFG_SKIP_SPARE_DISABLE
+ | CFG_HW_ECC_CORRECTION_DISABLE
+ | CFG_ECC_EN_TAG_DISABLE
+ | CFG_HW_ECC_DISABLE
+ | (tag_size - 1));
+ bounce_buffer_start(&bbstate_oob, (void *)chip->oob_poi,
+ tag_size, bbflags);
+ }
+ writel(reg_val, &info->reg->config);
+ writel(virt_to_phys(bbstate_oob.bounce_buffer), &info->reg->tag_ptr);
+ writel(BCH_CONFIG_BCH_ECC_DISABLE, &info->reg->bch_config);
+ writel(tag_size - 1, &info->reg->dma_cfg_b);
+
+ nand_clear_interrupt_status(info->reg);
+
+ reg_val = CMD_CLE | CMD_ALE
+ | CMD_SEC_CMD
+ | (CMD_ALE_BYTES5 << CMD_ALE_BYTE_SIZE_SHIFT)
+ | CMD_A_VALID
+ | CMD_B_VALID
+ | (CMD_TRANS_SIZE_PAGE << CMD_TRANS_SIZE_SHIFT)
+ | CMD_CE0;
+ if (!is_writing)
+ reg_val |= (CMD_AFT_DAT_DISABLE | CMD_RX);
+ else
+ reg_val |= (CMD_AFT_DAT_ENABLE | CMD_TX);
+ writel(reg_val, &info->reg->command);
+
+ /* Setup DMA engine */
+ reg_val = DMA_MST_CTRL_GO_ENABLE
+ | DMA_MST_CTRL_BURST_8WORDS
+ | DMA_MST_CTRL_EN_A_ENABLE
+ | DMA_MST_CTRL_EN_B_ENABLE;
+
+ if (!is_writing)
+ reg_val |= DMA_MST_CTRL_DIR_READ;
+ else
+ reg_val |= DMA_MST_CTRL_DIR_WRITE;
+
+ writel(reg_val, &info->reg->dma_mst_ctrl);
+
+ start_command(info->reg);
+
+ if (!nand_waitfor_cmd_completion(info->reg)) {
+ if (!is_writing)
+ printf("Read Page 0x%X timeout ", page);
+ else
+ printf("Write Page 0x%X timeout ", page);
+ if (with_ecc)
+ printf("with ECC");
+ else
+ printf("without ECC");
+ printf("\n");
+ return -EIO;
+ }
+
+ bounce_buffer_stop(&bbstate_oob);
+ bounce_buffer_stop(&bbstate);
+
+ if (with_ecc && !is_writing) {
+ memcpy(chip->oob_poi, tag_ptr,
+ SKIPPED_SPARE_BYTES);
+ memcpy(chip->oob_poi + free->offset,
+ tag_ptr + SKIPPED_SPARE_BYTES,
+ chip->ecc.layout->oobavail);
+ reg_val = (u32)check_ecc_error(info->reg, (u8 *)buf,
+ 1 << chip->page_shift,
+ (u8 *)(tag_ptr + SKIPPED_SPARE_BYTES),
+ chip->ecc.layout->oobavail);
+ if (reg_val & ECC_TAG_ERROR)
+ printf("Read Page 0x%X tag ECC error\n", page);
+ if (reg_val & ECC_DATA_ERROR)
+ printf("Read Page 0x%X data ECC error\n",
+ page);
+ if (reg_val & (ECC_DATA_ERROR | ECC_TAG_ERROR))
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * Hardware ecc based page read function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf buffer to store read data
+ * @param page page number to read
+ * @return 0 when successfully completed
+ * -EIO when command timeout
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ return nand_rw_page(mtd, chip, buf, page, 1, 0);
+}
+
+/**
+ * Hardware ecc based page write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf data buffer
+ */
+static int nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf, int oob_required,
+ int page)
+{
+ nand_rw_page(mtd, chip, (uint8_t *)buf, page, 1, 1);
+ return 0;
+}
+
+
+/**
+ * Read raw page data without ecc
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf buffer to store read data
+ * @param page page number to read
+ * @return 0 when successfully completed
+ * -EINVAL when chip->oob_poi is not double-word aligned
+ * -EIO when command timeout
+ */
+static int nand_read_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+ return nand_rw_page(mtd, chip, buf, page, 0, 0);
+}
+
+/**
+ * Raw page write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param buf data buffer
+ */
+static int nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ nand_rw_page(mtd, chip, (uint8_t *)buf, page, 0, 1);
+ return 0;
+}
+
+/**
+ * OOB data read/write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param page page number to read
+ * @param with_ecc 1 to enable ECC, 0 to disable ECC
+ * @param is_writing 0 for read, 1 for write
+ * @return 0 when successfully completed
+ * -EINVAL when chip->oob_poi is not double-word aligned
+ * -EIO when command timeout
+ */
+static int nand_rw_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page, int with_ecc, int is_writing)
+{
+ u32 reg_val;
+ int tag_size;
+ struct nand_oobfree *free = chip->ecc.layout->oobfree;
+ struct nand_drv *info;
+ unsigned int bbflags;
+ struct bounce_buffer bbstate_oob;
+
+ if (((int)chip->oob_poi) & 0x03)
+ return -EINVAL;
+ info = (struct nand_drv *)nand_get_controller_data(chip);
+ if (set_bus_width_page_size(mtd, &info->config, &reg_val))
+ return -EINVAL;
+
+ stop_command(info->reg);
+
+ /* Set ECC selection */
+ tag_size = mtd->oobsize;
+ if (with_ecc)
+ reg_val |= CFG_ECC_EN_TAG_ENABLE;
+ else
+ reg_val |= (CFG_ECC_EN_TAG_DISABLE);
+
+ reg_val |= ((tag_size - 1) |
+ CFG_SKIP_SPARE_DISABLE |
+ CFG_HW_ECC_CORRECTION_DISABLE |
+ CFG_HW_ECC_DISABLE);
+ writel(reg_val, &info->reg->config);
+
+ if (is_writing && with_ecc)
+ tag_size -= TAG_ECC_BYTES;
+
+ if (is_writing)
+ bbflags = GEN_BB_READ;
+ else
+ bbflags = GEN_BB_WRITE;
+
+ bounce_buffer_start(&bbstate_oob, (void *)chip->oob_poi, tag_size,
+ bbflags);
+ writel(virt_to_phys(bbstate_oob.bounce_buffer), &info->reg->tag_ptr);
+
+ writel(BCH_CONFIG_BCH_ECC_DISABLE, &info->reg->bch_config);
+
+ writel(tag_size - 1, &info->reg->dma_cfg_b);
+
+ nand_clear_interrupt_status(info->reg);
+
+ reg_val = CMD_CLE | CMD_ALE
+ | CMD_SEC_CMD
+ | (CMD_ALE_BYTES5 << CMD_ALE_BYTE_SIZE_SHIFT)
+ | CMD_B_VALID
+ | CMD_CE0;
+ if (!is_writing)
+ reg_val |= (CMD_AFT_DAT_DISABLE | CMD_RX);
+ else
+ reg_val |= (CMD_AFT_DAT_ENABLE | CMD_TX);
+ writel(reg_val, &info->reg->command);
+
+ /* Setup DMA engine */
+ reg_val = DMA_MST_CTRL_GO_ENABLE
+ | DMA_MST_CTRL_BURST_8WORDS
+ | DMA_MST_CTRL_EN_B_ENABLE;
+ if (!is_writing)
+ reg_val |= DMA_MST_CTRL_DIR_READ;
+ else
+ reg_val |= DMA_MST_CTRL_DIR_WRITE;
+
+ writel(reg_val, &info->reg->dma_mst_ctrl);
+
+ start_command(info->reg);
+
+ if (!nand_waitfor_cmd_completion(info->reg)) {
+ if (!is_writing)
+ printf("Read OOB of Page 0x%X timeout\n", page);
+ else
+ printf("Write OOB of Page 0x%X timeout\n", page);
+ return -EIO;
+ }
+
+ bounce_buffer_stop(&bbstate_oob);
+
+ if (with_ecc && !is_writing) {
+ reg_val = (u32)check_ecc_error(info->reg, 0, 0,
+ (u8 *)(chip->oob_poi + free->offset),
+ chip->ecc.layout->oobavail);
+ if (reg_val & ECC_TAG_ERROR)
+ printf("Read OOB of Page 0x%X tag ECC error\n", page);
+ }
+ return 0;
+}
+
+/**
+ * OOB data read function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param page page number to read
+ */
+static int nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+ nand_rw_oob(mtd, chip, page, 0, 0);
+ return 0;
+}
+
+/**
+ * OOB data write function
+ *
+ * @param mtd mtd info structure
+ * @param chip nand chip info structure
+ * @param page page number to write
+ * @return 0 when successfully completed
+ * -EINVAL when chip->oob_poi is not double-word aligned
+ * -EIO when command timeout
+ */
+static int nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+
+ return nand_rw_oob(mtd, chip, page, 0, 1);
+}
+
+/**
+ * Set up NAND memory timings according to the provided parameters
+ *
+ * @param timing Timing parameters
+ * @param reg NAND controller register address
+ */
+static void setup_timing(unsigned timing[FDT_NAND_TIMING_COUNT],
+ struct nand_ctlr *reg)
+{
+ u32 reg_val, clk_rate, clk_period, time_val;
+
+ clk_rate = (u32)clock_get_periph_rate(PERIPH_ID_NDFLASH,
+ CLOCK_ID_PERIPH) / 1000000;
+ clk_period = 1000 / clk_rate;
+ reg_val = ((timing[FDT_NAND_MAX_TRP_TREA] / clk_period) <<
+ TIMING_TRP_RESP_CNT_SHIFT) & TIMING_TRP_RESP_CNT_MASK;
+ reg_val |= ((timing[FDT_NAND_TWB] / clk_period) <<
+ TIMING_TWB_CNT_SHIFT) & TIMING_TWB_CNT_MASK;
+ time_val = timing[FDT_NAND_MAX_TCR_TAR_TRR] / clk_period;
+ if (time_val > 2)
+ reg_val |= ((time_val - 2) << TIMING_TCR_TAR_TRR_CNT_SHIFT) &
+ TIMING_TCR_TAR_TRR_CNT_MASK;
+ reg_val |= ((timing[FDT_NAND_TWHR] / clk_period) <<
+ TIMING_TWHR_CNT_SHIFT) & TIMING_TWHR_CNT_MASK;
+ time_val = timing[FDT_NAND_MAX_TCS_TCH_TALS_TALH] / clk_period;
+ if (time_val > 1)
+ reg_val |= ((time_val - 1) << TIMING_TCS_CNT_SHIFT) &
+ TIMING_TCS_CNT_MASK;
+ reg_val |= ((timing[FDT_NAND_TWH] / clk_period) <<
+ TIMING_TWH_CNT_SHIFT) & TIMING_TWH_CNT_MASK;
+ reg_val |= ((timing[FDT_NAND_TWP] / clk_period) <<
+ TIMING_TWP_CNT_SHIFT) & TIMING_TWP_CNT_MASK;
+ reg_val |= ((timing[FDT_NAND_TRH] / clk_period) <<
+ TIMING_TRH_CNT_SHIFT) & TIMING_TRH_CNT_MASK;
+ reg_val |= ((timing[FDT_NAND_MAX_TRP_TREA] / clk_period) <<
+ TIMING_TRP_CNT_SHIFT) & TIMING_TRP_CNT_MASK;
+ writel(reg_val, &reg->timing);
+
+ reg_val = 0;
+ time_val = timing[FDT_NAND_TADL] / clk_period;
+ if (time_val > 2)
+ reg_val = (time_val - 2) & TIMING2_TADL_CNT_MASK;
+ writel(reg_val, &reg->timing2);
+}
+
+/**
+ * Decode NAND parameters from the device tree
+ *
+ * @param dev Driver model device
+ * @param config Device tree NAND configuration
+ * @return 0 if ok, -ve on error (FDT_ERR_...)
+ */
+static int fdt_decode_nand(struct udevice *dev, struct fdt_nand *config)
+{
+ int err;
+
+ config->reg = (struct nand_ctlr *)dev_read_addr(dev);
+ config->enabled = dev_read_enabled(dev);
+ config->width = dev_read_u32_default(dev, "nvidia,nand-width", 8);
+ err = gpio_request_by_name(dev, "nvidia,wp-gpios", 0, &config->wp_gpio,
+ GPIOD_IS_OUT);
+ if (err)
+ return err;
+ err = dev_read_u32_array(dev, "nvidia,timing", config->timing,
+ FDT_NAND_TIMING_COUNT);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tegra_probe(struct udevice *dev)
+{
+ struct tegra_nand_info *tegra = dev_get_priv(dev);
+ struct nand_chip *nand = &tegra->nand_chip;
+ struct nand_drv *info = &tegra->nand_ctrl;
+ struct fdt_nand *config = &info->config;
+ struct mtd_info *our_mtd;
+ int ret;
+
+ if (fdt_decode_nand(dev, config)) {
+ printf("Could not decode nand-flash in device tree\n");
+ return -1;
+ }
+ if (!config->enabled)
+ return -1;
+ info->reg = config->reg;
+ nand->ecc.mode = NAND_ECC_HW;
+ nand->ecc.layout = &eccoob;
+
+ nand->options = LP_OPTIONS;
+ nand->cmdfunc = nand_command;
+ nand->read_byte = read_byte;
+ nand->read_buf = read_buf;
+ nand->ecc.read_page = nand_read_page_hwecc;
+ nand->ecc.write_page = nand_write_page_hwecc;
+ nand->ecc.read_page_raw = nand_read_page_raw;
+ nand->ecc.write_page_raw = nand_write_page_raw;
+ nand->ecc.read_oob = nand_read_oob;
+ nand->ecc.write_oob = nand_write_oob;
+ nand->ecc.strength = 1;
+ nand->select_chip = nand_select_chip;
+ nand->dev_ready = nand_dev_ready;
+ nand_set_controller_data(nand, &tegra->nand_ctrl);
+
+ /* Disable subpage writes as we do not provide ecc->hwctl */
+ nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+ /* Adjust controller clock rate */
+ clock_start_periph_pll(PERIPH_ID_NDFLASH, CLOCK_ID_PERIPH, 52000000);
+
+ /* Adjust timing for NAND device */
+ setup_timing(config->timing, info->reg);
+
+ dm_gpio_set_value(&config->wp_gpio, 1);
+
+ our_mtd = nand_to_mtd(nand);
+ ret = nand_scan_ident(our_mtd, CONFIG_SYS_NAND_MAX_CHIPS, NULL);
+ if (ret)
+ return ret;
+
+ nand->ecc.size = our_mtd->writesize;
+ nand->ecc.bytes = our_mtd->oobsize;
+
+ ret = nand_scan_tail(our_mtd);
+ if (ret)
+ return ret;
+
+ ret = nand_register(0, our_mtd);
+ if (ret) {
+ dev_err(dev, "Failed to register MTD: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+U_BOOT_DRIVER(tegra_nand) = {
+ .name = "tegra-nand",
+ .id = UCLASS_MTD,
+ .of_match = tegra_nand_dt_ids,
+ .probe = tegra_probe,
+ .priv_auto = sizeof(struct tegra_nand_info),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(tegra_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name,
+ ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/raw/tegra_nand.h b/roms/u-boot/drivers/mtd/nand/raw/tegra_nand.h
new file mode 100644
index 000000000..774016066
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/tegra_nand.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2011 NVIDIA Corporation <www.nvidia.com>
+ */
+
+/* register offset */
+#define COMMAND_0 0x00
+#define CMD_GO (1 << 31)
+#define CMD_CLE (1 << 30)
+#define CMD_ALE (1 << 29)
+#define CMD_PIO (1 << 28)
+#define CMD_TX (1 << 27)
+#define CMD_RX (1 << 26)
+#define CMD_SEC_CMD (1 << 25)
+#define CMD_AFT_DAT_MASK (1 << 24)
+#define CMD_AFT_DAT_DISABLE 0
+#define CMD_AFT_DAT_ENABLE (1 << 24)
+#define CMD_TRANS_SIZE_SHIFT 20
+#define CMD_TRANS_SIZE_PAGE 8
+#define CMD_A_VALID (1 << 19)
+#define CMD_B_VALID (1 << 18)
+#define CMD_RD_STATUS_CHK (1 << 17)
+#define CMD_R_BSY_CHK (1 << 16)
+#define CMD_CE7 (1 << 15)
+#define CMD_CE6 (1 << 14)
+#define CMD_CE5 (1 << 13)
+#define CMD_CE4 (1 << 12)
+#define CMD_CE3 (1 << 11)
+#define CMD_CE2 (1 << 10)
+#define CMD_CE1 (1 << 9)
+#define CMD_CE0 (1 << 8)
+#define CMD_CLE_BYTE_SIZE_SHIFT 4
+enum {
+ CMD_CLE_BYTES1 = 0,
+ CMD_CLE_BYTES2,
+ CMD_CLE_BYTES3,
+ CMD_CLE_BYTES4,
+};
+#define CMD_ALE_BYTE_SIZE_SHIFT 0
+enum {
+ CMD_ALE_BYTES1 = 0,
+ CMD_ALE_BYTES2,
+ CMD_ALE_BYTES3,
+ CMD_ALE_BYTES4,
+ CMD_ALE_BYTES5,
+ CMD_ALE_BYTES6,
+ CMD_ALE_BYTES7,
+ CMD_ALE_BYTES8
+};
+
+#define STATUS_0 0x04
+#define STATUS_RBSY0 (1 << 8)
+
+#define ISR_0 0x08
+#define ISR_IS_CMD_DONE (1 << 5)
+#define ISR_IS_ECC_ERR (1 << 4)
+
+#define IER_0 0x0C
+
+#define CFG_0 0x10
+#define CFG_HW_ECC_MASK (1 << 31)
+#define CFG_HW_ECC_DISABLE 0
+#define CFG_HW_ECC_ENABLE (1 << 31)
+#define CFG_HW_ECC_SEL_MASK (1 << 30)
+#define CFG_HW_ECC_SEL_HAMMING 0
+#define CFG_HW_ECC_SEL_RS (1 << 30)
+#define CFG_HW_ECC_CORRECTION_MASK (1 << 29)
+#define CFG_HW_ECC_CORRECTION_DISABLE 0
+#define CFG_HW_ECC_CORRECTION_ENABLE (1 << 29)
+#define CFG_PIPELINE_EN_MASK (1 << 28)
+#define CFG_PIPELINE_EN_DISABLE 0
+#define CFG_PIPELINE_EN_ENABLE (1 << 28)
+#define CFG_ECC_EN_TAG_MASK (1 << 27)
+#define CFG_ECC_EN_TAG_DISABLE 0
+#define CFG_ECC_EN_TAG_ENABLE (1 << 27)
+#define CFG_TVALUE_MASK (3 << 24)
+enum {
+ CFG_TVAL4 = 0 << 24,
+ CFG_TVAL6 = 1 << 24,
+ CFG_TVAL8 = 2 << 24
+};
+#define CFG_SKIP_SPARE_MASK (1 << 23)
+#define CFG_SKIP_SPARE_DISABLE 0
+#define CFG_SKIP_SPARE_ENABLE (1 << 23)
+#define CFG_COM_BSY_MASK (1 << 22)
+#define CFG_COM_BSY_DISABLE 0
+#define CFG_COM_BSY_ENABLE (1 << 22)
+#define CFG_BUS_WIDTH_MASK (1 << 21)
+#define CFG_BUS_WIDTH_8BIT 0
+#define CFG_BUS_WIDTH_16BIT (1 << 21)
+#define CFG_LPDDR1_MODE_MASK (1 << 20)
+#define CFG_LPDDR1_MODE_DISABLE 0
+#define CFG_LPDDR1_MODE_ENABLE (1 << 20)
+#define CFG_EDO_MODE_MASK (1 << 19)
+#define CFG_EDO_MODE_DISABLE 0
+#define CFG_EDO_MODE_ENABLE (1 << 19)
+#define CFG_PAGE_SIZE_SEL_MASK (7 << 16)
+enum {
+ CFG_PAGE_SIZE_256 = 0 << 16,
+ CFG_PAGE_SIZE_512 = 1 << 16,
+ CFG_PAGE_SIZE_1024 = 2 << 16,
+ CFG_PAGE_SIZE_2048 = 3 << 16,
+ CFG_PAGE_SIZE_4096 = 4 << 16
+};
+#define CFG_SKIP_SPARE_SEL_MASK (3 << 14)
+enum {
+ CFG_SKIP_SPARE_SEL_4 = 0 << 14,
+ CFG_SKIP_SPARE_SEL_8 = 1 << 14,
+ CFG_SKIP_SPARE_SEL_12 = 2 << 14,
+ CFG_SKIP_SPARE_SEL_16 = 3 << 14
+};
+#define CFG_TAG_BYTE_SIZE_MASK 0x1FF
+
+#define TIMING_0 0x14
+#define TIMING_TRP_RESP_CNT_SHIFT 28
+#define TIMING_TRP_RESP_CNT_MASK (0xf << TIMING_TRP_RESP_CNT_SHIFT)
+#define TIMING_TWB_CNT_SHIFT 24
+#define TIMING_TWB_CNT_MASK (0xf << TIMING_TWB_CNT_SHIFT)
+#define TIMING_TCR_TAR_TRR_CNT_SHIFT 20
+#define TIMING_TCR_TAR_TRR_CNT_MASK (0xf << TIMING_TCR_TAR_TRR_CNT_SHIFT)
+#define TIMING_TWHR_CNT_SHIFT 16
+#define TIMING_TWHR_CNT_MASK (0xf << TIMING_TWHR_CNT_SHIFT)
+#define TIMING_TCS_CNT_SHIFT 14
+#define TIMING_TCS_CNT_MASK (3 << TIMING_TCS_CNT_SHIFT)
+#define TIMING_TWH_CNT_SHIFT 12
+#define TIMING_TWH_CNT_MASK (3 << TIMING_TWH_CNT_SHIFT)
+#define TIMING_TWP_CNT_SHIFT 8
+#define TIMING_TWP_CNT_MASK (0xf << TIMING_TWP_CNT_SHIFT)
+#define TIMING_TRH_CNT_SHIFT 4
+#define TIMING_TRH_CNT_MASK (3 << TIMING_TRH_CNT_SHIFT)
+#define TIMING_TRP_CNT_SHIFT 0
+#define TIMING_TRP_CNT_MASK (0xf << TIMING_TRP_CNT_SHIFT)
+
+#define RESP_0 0x18
+
+#define TIMING2_0 0x1C
+#define TIMING2_TADL_CNT_SHIFT 0
+#define TIMING2_TADL_CNT_MASK (0xf << TIMING2_TADL_CNT_SHIFT)
+
+#define CMD_REG1_0 0x20
+#define CMD_REG2_0 0x24
+#define ADDR_REG1_0 0x28
+#define ADDR_REG2_0 0x2C
+
+#define DMA_MST_CTRL_0 0x30
+#define DMA_MST_CTRL_GO_MASK (1 << 31)
+#define DMA_MST_CTRL_GO_DISABLE 0
+#define DMA_MST_CTRL_GO_ENABLE (1 << 31)
+#define DMA_MST_CTRL_DIR_MASK (1 << 30)
+#define DMA_MST_CTRL_DIR_READ 0
+#define DMA_MST_CTRL_DIR_WRITE (1 << 30)
+#define DMA_MST_CTRL_PERF_EN_MASK (1 << 29)
+#define DMA_MST_CTRL_PERF_EN_DISABLE 0
+#define DMA_MST_CTRL_PERF_EN_ENABLE (1 << 29)
+#define DMA_MST_CTRL_REUSE_BUFFER_MASK (1 << 27)
+#define DMA_MST_CTRL_REUSE_BUFFER_DISABLE 0
+#define DMA_MST_CTRL_REUSE_BUFFER_ENABLE (1 << 27)
+#define DMA_MST_CTRL_BURST_SIZE_SHIFT 24
+#define DMA_MST_CTRL_BURST_SIZE_MASK (7 << DMA_MST_CTRL_BURST_SIZE_SHIFT)
+enum {
+ DMA_MST_CTRL_BURST_1WORDS = 2 << DMA_MST_CTRL_BURST_SIZE_SHIFT,
+ DMA_MST_CTRL_BURST_4WORDS = 3 << DMA_MST_CTRL_BURST_SIZE_SHIFT,
+ DMA_MST_CTRL_BURST_8WORDS = 4 << DMA_MST_CTRL_BURST_SIZE_SHIFT,
+ DMA_MST_CTRL_BURST_16WORDS = 5 << DMA_MST_CTRL_BURST_SIZE_SHIFT
+};
+#define DMA_MST_CTRL_IS_DMA_DONE (1 << 20)
+#define DMA_MST_CTRL_EN_A_MASK (1 << 2)
+#define DMA_MST_CTRL_EN_A_DISABLE 0
+#define DMA_MST_CTRL_EN_A_ENABLE (1 << 2)
+#define DMA_MST_CTRL_EN_B_MASK (1 << 1)
+#define DMA_MST_CTRL_EN_B_DISABLE 0
+#define DMA_MST_CTRL_EN_B_ENABLE (1 << 1)
+
+#define DMA_CFG_A_0 0x34
+#define DMA_CFG_B_0 0x38
+#define FIFO_CTRL_0 0x3C
+#define DATA_BLOCK_PTR_0 0x40
+#define TAG_PTR_0 0x44
+#define ECC_PTR_0 0x48
+
+#define DEC_STATUS_0 0x4C
+#define DEC_STATUS_A_ECC_FAIL (1 << 1)
+#define DEC_STATUS_B_ECC_FAIL (1 << 0)
+
+#define BCH_CONFIG_0 0xCC
+#define BCH_CONFIG_BCH_TVALUE_SHIFT 4
+#define BCH_CONFIG_BCH_TVALUE_MASK (3 << BCH_CONFIG_BCH_TVALUE_SHIFT)
+enum {
+ BCH_CONFIG_BCH_TVAL4 = 0 << BCH_CONFIG_BCH_TVALUE_SHIFT,
+ BCH_CONFIG_BCH_TVAL8 = 1 << BCH_CONFIG_BCH_TVALUE_SHIFT,
+ BCH_CONFIG_BCH_TVAL14 = 2 << BCH_CONFIG_BCH_TVALUE_SHIFT,
+ BCH_CONFIG_BCH_TVAL16 = 3 << BCH_CONFIG_BCH_TVALUE_SHIFT
+};
+#define BCH_CONFIG_BCH_ECC_MASK (1 << 0)
+#define BCH_CONFIG_BCH_ECC_DISABLE 0
+#define BCH_CONFIG_BCH_ECC_ENABLE (1 << 0)
+
+#define BCH_DEC_RESULT_0 0xD0
+#define BCH_DEC_RESULT_CORRFAIL_ERR_MASK (1 << 8)
+#define BCH_DEC_RESULT_PAGE_COUNT_MASK 0xFF
+
+#define BCH_DEC_STATUS_BUF_0 0xD4
+#define BCH_DEC_STATUS_FAIL_SEC_FLAG_MASK 0xFF000000
+#define BCH_DEC_STATUS_CORR_SEC_FLAG_MASK 0x00FF0000
+#define BCH_DEC_STATUS_FAIL_TAG_MASK (1 << 14)
+#define BCH_DEC_STATUS_CORR_TAG_MASK (1 << 13)
+#define BCH_DEC_STATUS_MAX_CORR_CNT_MASK (0x1f << 8)
+#define BCH_DEC_STATUS_PAGE_NUMBER_MASK 0xFF
+
+#define LP_OPTIONS 0
+
+struct nand_ctlr {
+ u32 command; /* offset 00h */
+ u32 status; /* offset 04h */
+ u32 isr; /* offset 08h */
+ u32 ier; /* offset 0Ch */
+ u32 config; /* offset 10h */
+ u32 timing; /* offset 14h */
+ u32 resp; /* offset 18h */
+ u32 timing2; /* offset 1Ch */
+ u32 cmd_reg1; /* offset 20h */
+ u32 cmd_reg2; /* offset 24h */
+ u32 addr_reg1; /* offset 28h */
+ u32 addr_reg2; /* offset 2Ch */
+ u32 dma_mst_ctrl; /* offset 30h */
+ u32 dma_cfg_a; /* offset 34h */
+ u32 dma_cfg_b; /* offset 38h */
+ u32 fifo_ctrl; /* offset 3Ch */
+ u32 data_block_ptr; /* offset 40h */
+ u32 tag_ptr; /* offset 44h */
+ u32 resv1; /* offset 48h */
+ u32 dec_status; /* offset 4Ch */
+ u32 hwstatus_cmd; /* offset 50h */
+ u32 hwstatus_mask; /* offset 54h */
+ u32 resv2[29];
+ u32 bch_config; /* offset CCh */
+ u32 bch_dec_result; /* offset D0h */
+ u32 bch_dec_status_buf;
+ /* offset D4h */
+};
diff --git a/roms/u-boot/drivers/mtd/nand/raw/vf610_nfc.c b/roms/u-boot/drivers/mtd/nand/raw/vf610_nfc.c
new file mode 100644
index 000000000..e33953ec7
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/vf610_nfc.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. and others
+ *
+ * Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
+ * Ported to U-Boot by Stefan Agner
+ * Based on RFC driver posted on Kernel Mailing list by Bill Pringlemeir
+ * Jason ported to M54418TWR and MVFA5.
+ * Authors: Stefan Agner <stefan.agner@toradex.com>
+ * Bill Pringlemeir <bpringlemeir@nbsps.com>
+ * Shaohui Xie <b21989@freescale.com>
+ * Jason Jin <Jason.jin@freescale.com>
+ *
+ * Based on original driver mpc5121_nfc.c.
+ *
+ * Limitations:
+ * - Untested on MPC5125 and M54418.
+ * - DMA and pipelining not used.
+ * - 2K pages or less.
+ * - HW ECC: Only 2K page with 64+ OOB.
+ * - HW ECC: Only 24 and 32-bit error correction implemented.
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <dm/device_compat.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <nand.h>
+#include <errno.h>
+#include <asm/io.h>
+#if CONFIG_NAND_VF610_NFC_DT
+#include <dm.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#endif
+
+/* Register Offsets */
+#define NFC_FLASH_CMD1 0x3F00
+#define NFC_FLASH_CMD2 0x3F04
+#define NFC_COL_ADDR 0x3F08
+#define NFC_ROW_ADDR 0x3F0c
+#define NFC_ROW_ADDR_INC 0x3F14
+#define NFC_FLASH_STATUS1 0x3F18
+#define NFC_FLASH_STATUS2 0x3F1c
+#define NFC_CACHE_SWAP 0x3F28
+#define NFC_SECTOR_SIZE 0x3F2c
+#define NFC_FLASH_CONFIG 0x3F30
+#define NFC_IRQ_STATUS 0x3F38
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n) ((n) * 0x1000)
+
+#define PAGE_2K 0x0800
+#define OOB_64 0x0040
+#define OOB_MAX 0x0100
+
+/*
+ * NFC_CMD2[CODE] values. See section:
+ * - 31.4.7 Flash Command Code Description, Vybrid manual
+ * - 23.8.6 Flash Command Sequencer, MPC5125 manual
+ *
+ * Briefly these are bitmasks of controller cycles.
+ */
+#define READ_PAGE_CMD_CODE 0x7EE0
+#define READ_ONFI_PARAM_CMD_CODE 0x4860
+#define PROGRAM_PAGE_CMD_CODE 0x7FC0
+#define ERASE_CMD_CODE 0x4EC0
+#define READ_ID_CMD_CODE 0x4804
+#define RESET_CMD_CODE 0x4040
+#define STATUS_READ_CMD_CODE 0x4068
+
+/* NFC ECC mode define */
+#define ECC_BYPASS 0
+#define ECC_45_BYTE 6
+#define ECC_60_BYTE 7
+
+/*** Register Mask and bit definitions */
+
+/* NFC_FLASH_CMD1 Field */
+#define CMD_BYTE2_MASK 0xFF000000
+#define CMD_BYTE2_SHIFT 24
+
+/* NFC_FLASH_CM2 Field */
+#define CMD_BYTE1_MASK 0xFF000000
+#define CMD_BYTE1_SHIFT 24
+#define CMD_CODE_MASK 0x00FFFF00
+#define CMD_CODE_SHIFT 8
+#define BUFNO_MASK 0x00000006
+#define BUFNO_SHIFT 1
+#define START_BIT (1<<0)
+
+/* NFC_COL_ADDR Field */
+#define COL_ADDR_MASK 0x0000FFFF
+#define COL_ADDR_SHIFT 0
+
+/* NFC_ROW_ADDR Field */
+#define ROW_ADDR_MASK 0x00FFFFFF
+#define ROW_ADDR_SHIFT 0
+#define ROW_ADDR_CHIP_SEL_RB_MASK 0xF0000000
+#define ROW_ADDR_CHIP_SEL_RB_SHIFT 28
+#define ROW_ADDR_CHIP_SEL_MASK 0x0F000000
+#define ROW_ADDR_CHIP_SEL_SHIFT 24
+
+/* NFC_FLASH_STATUS2 Field */
+#define STATUS_BYTE1_MASK 0x000000FF
+
+/* NFC_FLASH_CONFIG Field */
+#define CONFIG_ECC_SRAM_ADDR_MASK 0x7FC00000
+#define CONFIG_ECC_SRAM_ADDR_SHIFT 22
+#define CONFIG_ECC_SRAM_REQ_BIT (1<<21)
+#define CONFIG_DMA_REQ_BIT (1<<20)
+#define CONFIG_ECC_MODE_MASK 0x000E0000
+#define CONFIG_ECC_MODE_SHIFT 17
+#define CONFIG_FAST_FLASH_BIT (1<<16)
+#define CONFIG_16BIT (1<<7)
+#define CONFIG_BOOT_MODE_BIT (1<<6)
+#define CONFIG_ADDR_AUTO_INCR_BIT (1<<5)
+#define CONFIG_BUFNO_AUTO_INCR_BIT (1<<4)
+#define CONFIG_PAGE_CNT_MASK 0xF
+#define CONFIG_PAGE_CNT_SHIFT 0
+
+/* NFC_IRQ_STATUS Field */
+#define IDLE_IRQ_BIT (1<<29)
+#define IDLE_EN_BIT (1<<20)
+#define CMD_DONE_CLEAR_BIT (1<<18)
+#define IDLE_CLEAR_BIT (1<<17)
+
+#define NFC_TIMEOUT (1000)
+
+/*
+ * ECC status - seems to consume 8 bytes (double word). The documented
+ * status byte is located in the lowest byte of the second word (which is
+ * the 4th or 7th byte depending on endianness).
+ * Calculate an offset to store the ECC status at the end of the buffer.
+ */
+#define ECC_SRAM_ADDR (PAGE_2K + OOB_MAX - 8)
+
+#define ECC_STATUS 0x4
+#define ECC_STATUS_MASK 0x80
+#define ECC_STATUS_ERR_COUNT 0x3F
+
+enum vf610_nfc_alt_buf {
+ ALT_BUF_DATA = 0,
+ ALT_BUF_ID = 1,
+ ALT_BUF_STAT = 2,
+ ALT_BUF_ONFI = 3,
+};
+
+struct vf610_nfc {
+ struct nand_chip chip;
+ /* NULL without CONFIG_NAND_VF610_NFC_DT */
+ struct udevice *dev;
+ void __iomem *regs;
+ uint buf_offset;
+ int write_sz;
+ /* Status and ID are in alternate locations. */
+ enum vf610_nfc_alt_buf alt_buf;
+};
+
+#define mtd_to_nfc(_mtd) nand_get_controller_data(mtd_to_nand(_mtd))
+
+#if defined(CONFIG_SYS_NAND_VF610_NFC_45_ECC_BYTES)
+#define ECC_HW_MODE ECC_45_BYTE
+
+static struct nand_ecclayout vf610_nfc_ecc = {
+ .eccbytes = 45,
+ .eccpos = {19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ {.offset = 2,
+ .length = 17} }
+};
+#elif defined(CONFIG_SYS_NAND_VF610_NFC_60_ECC_BYTES)
+#define ECC_HW_MODE ECC_60_BYTE
+
+static struct nand_ecclayout vf610_nfc_ecc = {
+ .eccbytes = 60,
+ .eccpos = { 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 32, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 53, 54, 55, 56, 57, 58, 59,
+ 60, 61, 62, 63 },
+ .oobfree = {
+ {.offset = 2,
+ .length = 2} }
+};
+#endif
+
+static inline u32 vf610_nfc_read(struct mtd_info *mtd, uint reg)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ return readl(nfc->regs + reg);
+}
+
+static inline void vf610_nfc_write(struct mtd_info *mtd, uint reg, u32 val)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ writel(val, nfc->regs + reg);
+}
+
+static inline void vf610_nfc_set(struct mtd_info *mtd, uint reg, u32 bits)
+{
+ vf610_nfc_write(mtd, reg, vf610_nfc_read(mtd, reg) | bits);
+}
+
+static inline void vf610_nfc_clear(struct mtd_info *mtd, uint reg, u32 bits)
+{
+ vf610_nfc_write(mtd, reg, vf610_nfc_read(mtd, reg) & ~bits);
+}
+
+static inline void vf610_nfc_set_field(struct mtd_info *mtd, u32 reg,
+ u32 mask, u32 shift, u32 val)
+{
+ vf610_nfc_write(mtd, reg,
+ (vf610_nfc_read(mtd, reg) & (~mask)) | val << shift);
+}
+
+static inline void vf610_nfc_memcpy(void *dst, const void *src, size_t n)
+{
+ /*
+ * Use this accessor for the internal SRAM buffers. On the ARM
+ * Freescale Vybrid SoC it's known that the driver can treat
+ * the SRAM buffer as if it's memory. Other platform might need
+ * to treat the buffers differently.
+ *
+ * For the time being, use memcpy
+ */
+ memcpy(dst, src, n);
+}
+
+/* Clear flags for upcoming command */
+static inline void vf610_nfc_clear_status(void __iomem *regbase)
+{
+ void __iomem *reg = regbase + NFC_IRQ_STATUS;
+ u32 tmp = __raw_readl(reg);
+ tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
+ __raw_writel(tmp, reg);
+}
+
+/* Wait for complete operation */
+static void vf610_nfc_done(struct mtd_info *mtd)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ uint start;
+
+ /*
+ * Barrier is needed after this write. This write need
+ * to be done before reading the next register the first
+ * time.
+ * vf610_nfc_set implicates such a barrier by using writel
+ * to write to the register.
+ */
+ vf610_nfc_set(mtd, NFC_FLASH_CMD2, START_BIT);
+
+ start = get_timer(0);
+
+ while (!(vf610_nfc_read(mtd, NFC_IRQ_STATUS) & IDLE_IRQ_BIT)) {
+ if (get_timer(start) > NFC_TIMEOUT) {
+ printf("Timeout while waiting for IDLE.\n");
+ return;
+ }
+ }
+ vf610_nfc_clear_status(nfc->regs);
+}
+
+static u8 vf610_nfc_get_id(struct mtd_info *mtd, int col)
+{
+ u32 flash_id;
+
+ if (col < 4) {
+ flash_id = vf610_nfc_read(mtd, NFC_FLASH_STATUS1);
+ flash_id >>= (3 - col) * 8;
+ } else {
+ flash_id = vf610_nfc_read(mtd, NFC_FLASH_STATUS2);
+ flash_id >>= 24;
+ }
+
+ return flash_id & 0xff;
+}
+
+static u8 vf610_nfc_get_status(struct mtd_info *mtd)
+{
+ return vf610_nfc_read(mtd, NFC_FLASH_STATUS2) & STATUS_BYTE1_MASK;
+}
+
+/* Single command */
+static void vf610_nfc_send_command(void __iomem *regbase, u32 cmd_byte1,
+ u32 cmd_code)
+{
+ void __iomem *reg = regbase + NFC_FLASH_CMD2;
+ u32 tmp;
+ vf610_nfc_clear_status(regbase);
+
+ tmp = __raw_readl(reg);
+ tmp &= ~(CMD_BYTE1_MASK | CMD_CODE_MASK | BUFNO_MASK);
+ tmp |= cmd_byte1 << CMD_BYTE1_SHIFT;
+ tmp |= cmd_code << CMD_CODE_SHIFT;
+ __raw_writel(tmp, reg);
+}
+
+/* Two commands */
+static void vf610_nfc_send_commands(void __iomem *regbase, u32 cmd_byte1,
+ u32 cmd_byte2, u32 cmd_code)
+{
+ void __iomem *reg = regbase + NFC_FLASH_CMD1;
+ u32 tmp;
+ vf610_nfc_send_command(regbase, cmd_byte1, cmd_code);
+
+ tmp = __raw_readl(reg);
+ tmp &= ~CMD_BYTE2_MASK;
+ tmp |= cmd_byte2 << CMD_BYTE2_SHIFT;
+ __raw_writel(tmp, reg);
+}
+
+static void vf610_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+ if (column != -1) {
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ if (nfc->chip.options & NAND_BUSWIDTH_16)
+ column = column / 2;
+ vf610_nfc_set_field(mtd, NFC_COL_ADDR, COL_ADDR_MASK,
+ COL_ADDR_SHIFT, column);
+ }
+ if (page != -1)
+ vf610_nfc_set_field(mtd, NFC_ROW_ADDR, ROW_ADDR_MASK,
+ ROW_ADDR_SHIFT, page);
+}
+
+static inline void vf610_nfc_ecc_mode(struct mtd_info *mtd, int ecc_mode)
+{
+ vf610_nfc_set_field(mtd, NFC_FLASH_CONFIG,
+ CONFIG_ECC_MODE_MASK,
+ CONFIG_ECC_MODE_SHIFT, ecc_mode);
+}
+
+static inline void vf610_nfc_transfer_size(void __iomem *regbase, int size)
+{
+ __raw_writel(size, regbase + NFC_SECTOR_SIZE);
+}
+
+/* Send command to NAND chip */
+static void vf610_nfc_command(struct mtd_info *mtd, unsigned command,
+ int column, int page)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ int trfr_sz = nfc->chip.options & NAND_BUSWIDTH_16 ? 1 : 0;
+
+ nfc->buf_offset = max(column, 0);
+ nfc->alt_buf = ALT_BUF_DATA;
+
+ switch (command) {
+ case NAND_CMD_SEQIN:
+ /* Use valid column/page from preread... */
+ vf610_nfc_addr_cycle(mtd, column, page);
+ nfc->buf_offset = 0;
+
+ /*
+ * SEQIN => data => PAGEPROG sequence is done by the controller
+ * hence we do not need to issue the command here...
+ */
+ return;
+ case NAND_CMD_PAGEPROG:
+ trfr_sz += nfc->write_sz;
+ vf610_nfc_ecc_mode(mtd, ECC_HW_MODE);
+ vf610_nfc_transfer_size(nfc->regs, trfr_sz);
+ vf610_nfc_send_commands(nfc->regs, NAND_CMD_SEQIN,
+ command, PROGRAM_PAGE_CMD_CODE);
+ break;
+
+ case NAND_CMD_RESET:
+ vf610_nfc_transfer_size(nfc->regs, 0);
+ vf610_nfc_send_command(nfc->regs, command, RESET_CMD_CODE);
+ break;
+
+ case NAND_CMD_READOOB:
+ trfr_sz += mtd->oobsize;
+ column = mtd->writesize;
+ vf610_nfc_transfer_size(nfc->regs, trfr_sz);
+ vf610_nfc_send_commands(nfc->regs, NAND_CMD_READ0,
+ NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
+ vf610_nfc_addr_cycle(mtd, column, page);
+ vf610_nfc_ecc_mode(mtd, ECC_BYPASS);
+ break;
+
+ case NAND_CMD_READ0:
+ trfr_sz += mtd->writesize + mtd->oobsize;
+ vf610_nfc_transfer_size(nfc->regs, trfr_sz);
+ vf610_nfc_ecc_mode(mtd, ECC_HW_MODE);
+ vf610_nfc_send_commands(nfc->regs, NAND_CMD_READ0,
+ NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
+ vf610_nfc_addr_cycle(mtd, column, page);
+ break;
+
+ case NAND_CMD_PARAM:
+ nfc->alt_buf = ALT_BUF_ONFI;
+ trfr_sz = 3 * sizeof(struct nand_onfi_params);
+ vf610_nfc_transfer_size(nfc->regs, trfr_sz);
+ vf610_nfc_send_command(nfc->regs, NAND_CMD_PARAM,
+ READ_ONFI_PARAM_CMD_CODE);
+ vf610_nfc_set_field(mtd, NFC_ROW_ADDR, ROW_ADDR_MASK,
+ ROW_ADDR_SHIFT, column);
+ vf610_nfc_ecc_mode(mtd, ECC_BYPASS);
+ break;
+
+ case NAND_CMD_ERASE1:
+ vf610_nfc_transfer_size(nfc->regs, 0);
+ vf610_nfc_send_commands(nfc->regs, command,
+ NAND_CMD_ERASE2, ERASE_CMD_CODE);
+ vf610_nfc_addr_cycle(mtd, column, page);
+ break;
+
+ case NAND_CMD_READID:
+ nfc->alt_buf = ALT_BUF_ID;
+ nfc->buf_offset = 0;
+ vf610_nfc_transfer_size(nfc->regs, 0);
+ vf610_nfc_send_command(nfc->regs, command, READ_ID_CMD_CODE);
+ vf610_nfc_set_field(mtd, NFC_ROW_ADDR, ROW_ADDR_MASK,
+ ROW_ADDR_SHIFT, column);
+ break;
+
+ case NAND_CMD_STATUS:
+ nfc->alt_buf = ALT_BUF_STAT;
+ vf610_nfc_transfer_size(nfc->regs, 0);
+ vf610_nfc_send_command(nfc->regs, command, STATUS_READ_CMD_CODE);
+ break;
+ default:
+ return;
+ }
+
+ vf610_nfc_done(mtd);
+
+ nfc->write_sz = 0;
+}
+
+/* Read data from NFC buffers */
+static void vf610_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ uint c = nfc->buf_offset;
+
+ /* Alternate buffers are only supported through read_byte */
+ if (nfc->alt_buf)
+ return;
+
+ vf610_nfc_memcpy(buf, nfc->regs + NFC_MAIN_AREA(0) + c, len);
+
+ nfc->buf_offset += len;
+}
+
+/* Write data to NFC buffers */
+static void vf610_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+ int len)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ uint c = nfc->buf_offset;
+ uint l;
+
+ l = min_t(uint, len, mtd->writesize + mtd->oobsize - c);
+ vf610_nfc_memcpy(nfc->regs + NFC_MAIN_AREA(0) + c, buf, l);
+
+ nfc->write_sz += l;
+ nfc->buf_offset += l;
+}
+
+/* Read byte from NFC buffers */
+static uint8_t vf610_nfc_read_byte(struct mtd_info *mtd)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ u8 tmp;
+ uint c = nfc->buf_offset;
+
+ switch (nfc->alt_buf) {
+ case ALT_BUF_ID:
+ tmp = vf610_nfc_get_id(mtd, c);
+ break;
+ case ALT_BUF_STAT:
+ tmp = vf610_nfc_get_status(mtd);
+ break;
+#ifdef __LITTLE_ENDIAN
+ case ALT_BUF_ONFI:
+ /* Reverse byte since the controller uses big endianness */
+ c = nfc->buf_offset ^ 0x3;
+ /* fall-through */
+#endif
+ default:
+ tmp = *((u8 *)(nfc->regs + NFC_MAIN_AREA(0) + c));
+ break;
+ }
+ nfc->buf_offset++;
+ return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 vf610_nfc_read_word(struct mtd_info *mtd)
+{
+ u16 tmp;
+
+ vf610_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+ return tmp;
+}
+
+/* If not provided, upper layers apply a fixed delay. */
+static int vf610_nfc_dev_ready(struct mtd_info *mtd)
+{
+ /* NFC handles R/B internally; always ready. */
+ return 1;
+}
+
+/*
+ * This function supports Vybrid only (MPC5125 would have full RB and four CS)
+ */
+static void vf610_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+#ifdef CONFIG_VF610
+ u32 tmp = vf610_nfc_read(mtd, NFC_ROW_ADDR);
+ tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
+
+ if (chip >= 0) {
+ tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+ tmp |= (1 << chip) << ROW_ADDR_CHIP_SEL_SHIFT;
+ }
+
+ vf610_nfc_write(mtd, NFC_ROW_ADDR, tmp);
+#endif
+}
+
+/* Count the number of 0's in buff upto max_bits */
+static inline int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+ uint32_t *buff32 = (uint32_t *)buff;
+ int k, written_bits = 0;
+
+ for (k = 0; k < (size / 4); k++) {
+ written_bits += hweight32(~buff32[k]);
+ if (written_bits > max_bits)
+ break;
+ }
+
+ return written_bits;
+}
+
+static inline int vf610_nfc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+ uint8_t *oob, int page)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
+ u8 ecc_status;
+ u8 ecc_count;
+ int flips;
+ int flips_threshold = nfc->chip.ecc.strength / 2;
+
+ ecc_status = vf610_nfc_read(mtd, ecc_status_off) & 0xff;
+ ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
+
+ if (!(ecc_status & ECC_STATUS_MASK))
+ return ecc_count;
+
+ /* Read OOB without ECC unit enabled */
+ vf610_nfc_command(mtd, NAND_CMD_READOOB, 0, page);
+ vf610_nfc_read_buf(mtd, oob, mtd->oobsize);
+
+ /*
+ * On an erased page, bit count (including OOB) should be zero or
+ * at least less then half of the ECC strength.
+ */
+ flips = count_written_bits(dat, nfc->chip.ecc.size, flips_threshold);
+ flips += count_written_bits(oob, mtd->oobsize, flips_threshold);
+
+ if (unlikely(flips > flips_threshold))
+ return -EINVAL;
+
+ /* Erased page. */
+ memset(dat, 0xff, nfc->chip.ecc.size);
+ memset(oob, 0xff, mtd->oobsize);
+ return flips;
+}
+
+static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ int eccsize = chip->ecc.size;
+ int stat;
+
+ vf610_nfc_read_buf(mtd, buf, eccsize);
+ if (oob_required)
+ vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ stat = vf610_nfc_correct_data(mtd, buf, chip->oob_poi, page);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ return 0;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ return stat;
+ }
+}
+
+/*
+ * ECC will be calculated automatically
+ */
+static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required, int page)
+{
+ struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+ vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+ if (oob_required)
+ vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ /* Always write whole page including OOB due to HW ECC */
+ nfc->write_sz = mtd->writesize + mtd->oobsize;
+
+ return 0;
+}
+
+struct vf610_nfc_config {
+ int hardware_ecc;
+ int width;
+ int flash_bbt;
+};
+
+static int vf610_nfc_nand_init(struct vf610_nfc *nfc, int devnum)
+{
+ struct nand_chip *chip = &nfc->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int err = 0;
+ struct vf610_nfc_config cfg = {
+ .hardware_ecc = 1,
+#ifdef CONFIG_SYS_NAND_BUSWIDTH_16BIT
+ .width = 16,
+#else
+ .width = 8,
+#endif
+ .flash_bbt = 1,
+ };
+
+ nand_set_controller_data(chip, nfc);
+
+ if (cfg.width == 16)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ chip->dev_ready = vf610_nfc_dev_ready;
+ chip->cmdfunc = vf610_nfc_command;
+ chip->read_byte = vf610_nfc_read_byte;
+ chip->read_word = vf610_nfc_read_word;
+ chip->read_buf = vf610_nfc_read_buf;
+ chip->write_buf = vf610_nfc_write_buf;
+ chip->select_chip = vf610_nfc_select_chip;
+
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+ chip->ecc.size = PAGE_2K;
+
+ /* Set configuration register. */
+ vf610_nfc_clear(mtd, NFC_FLASH_CONFIG, CONFIG_16BIT);
+ vf610_nfc_clear(mtd, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
+ vf610_nfc_clear(mtd, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
+ vf610_nfc_clear(mtd, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
+ vf610_nfc_clear(mtd, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
+ vf610_nfc_set(mtd, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
+
+ /* Disable virtual pages, only one elementary transfer unit */
+ vf610_nfc_set_field(mtd, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
+ CONFIG_PAGE_CNT_SHIFT, 1);
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) {
+ err = -ENXIO;
+ goto error;
+ }
+
+ if (cfg.width == 16)
+ vf610_nfc_set(mtd, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+ /* Bad block options. */
+ if (cfg.flash_bbt)
+ chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB |
+ NAND_BBT_CREATE;
+
+ /* Single buffer only, max 256 OOB minus ECC status */
+ if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+ dev_err(nfc->dev, "Unsupported flash page size\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ if (cfg.hardware_ecc) {
+ if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+ dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ if (chip->ecc.size != mtd->writesize) {
+ dev_err(nfc->dev, "ecc size: %d\n", chip->ecc.size);
+ dev_err(nfc->dev, "Step size needs to be page size\n");
+ err = -ENXIO;
+ goto error;
+ }
+
+ /* Current HW ECC layouts only use 64 bytes of OOB */
+ if (mtd->oobsize > 64)
+ mtd->oobsize = 64;
+
+ /* propagate ecc.layout to mtd_info */
+ mtd->ecclayout = chip->ecc.layout;
+ chip->ecc.read_page = vf610_nfc_read_page;
+ chip->ecc.write_page = vf610_nfc_write_page;
+ chip->ecc.mode = NAND_ECC_HW;
+
+ chip->ecc.size = PAGE_2K;
+ chip->ecc.layout = &vf610_nfc_ecc;
+#if defined(CONFIG_SYS_NAND_VF610_NFC_45_ECC_BYTES)
+ chip->ecc.strength = 24;
+ chip->ecc.bytes = 45;
+#elif defined(CONFIG_SYS_NAND_VF610_NFC_60_ECC_BYTES)
+ chip->ecc.strength = 32;
+ chip->ecc.bytes = 60;
+#endif
+
+ /* Set ECC_STATUS offset */
+ vf610_nfc_set_field(mtd, NFC_FLASH_CONFIG,
+ CONFIG_ECC_SRAM_ADDR_MASK,
+ CONFIG_ECC_SRAM_ADDR_SHIFT,
+ ECC_SRAM_ADDR >> 3);
+
+ /* Enable ECC status in SRAM */
+ vf610_nfc_set(mtd, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
+ }
+
+ /* second phase scan */
+ err = nand_scan_tail(mtd);
+ if (err)
+ return err;
+
+ err = nand_register(devnum, mtd);
+ if (err)
+ return err;
+
+ return 0;
+
+error:
+ return err;
+}
+
+#if CONFIG_NAND_VF610_NFC_DT
+static const struct udevice_id vf610_nfc_dt_ids[] = {
+ {
+ .compatible = "fsl,vf610-nfc",
+ },
+ { /* sentinel */ }
+};
+
+static int vf610_nfc_dt_probe(struct udevice *dev)
+{
+ struct resource res;
+ struct vf610_nfc *nfc = dev_get_priv(dev);
+ int ret;
+
+ ret = dev_read_resource(dev, 0, &res);
+ if (ret)
+ return ret;
+
+ nfc->regs = devm_ioremap(dev, res.start, resource_size(&res));
+ nfc->dev = dev;
+ return vf610_nfc_nand_init(nfc, 0);
+}
+
+U_BOOT_DRIVER(vf610_nfc_dt) = {
+ .name = "vf610-nfc-dt",
+ .id = UCLASS_MTD,
+ .of_match = vf610_nfc_dt_ids,
+ .priv_auto = sizeof(struct vf610_nfc),
+ .probe = vf610_nfc_dt_probe,
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(vf610_nfc_dt),
+ &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize NAND controller. (error %d)\n",
+ ret);
+}
+#else
+void board_nand_init(void)
+{
+ int err;
+ struct vf610_nfc *nfc;
+
+ nfc = calloc(1, sizeof(*nfc));
+ if (!nfc) {
+ printf("%s: Out of memory\n", __func__);
+ return;
+ }
+
+ nfc->regs = (void __iomem *)CONFIG_SYS_NAND_BASE;
+ err = vf610_nfc_nand_init(nfc, 0);
+ if (err)
+ printf("VF610 NAND init failed (err %d)\n", err);
+}
+#endif /* CONFIG_NAND_VF610_NFC_DT */
diff --git a/roms/u-boot/drivers/mtd/nand/raw/zynq_nand.c b/roms/u-boot/drivers/mtd/nand/raw/zynq_nand.c
new file mode 100644
index 000000000..d79252837
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/raw/zynq_nand.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2016 Xilinx, Inc.
+ *
+ * Xilinx Zynq NAND Flash Controller Driver
+ * This driver is based on plat_nand.c and mxc_nand.c drivers
+ */
+
+#include <common.h>
+#include <log.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <nand.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/sys_proto.h>
+#include <dm.h>
+
+/* The NAND flash driver defines */
+#define ZYNQ_NAND_CMD_PHASE 1
+#define ZYNQ_NAND_DATA_PHASE 2
+#define ZYNQ_NAND_ECC_SIZE 512
+#define ZYNQ_NAND_SET_OPMODE_8BIT (0 << 0)
+#define ZYNQ_NAND_SET_OPMODE_16BIT (1 << 0)
+#define ZYNQ_NAND_ECC_STATUS (1 << 6)
+#define ZYNQ_MEMC_CLRCR_INT_CLR1 (1 << 4)
+#define ZYNQ_MEMC_SR_RAW_INT_ST1 (1 << 6)
+#define ZYNQ_MEMC_SR_INT_ST1 (1 << 4)
+#define ZYNQ_MEMC_NAND_ECC_MODE_MASK 0xC
+
+/* Flash memory controller operating parameters */
+#define ZYNQ_NAND_CLR_CONFIG ((0x1 << 1) | /* Disable interrupt */ \
+ (0x1 << 4) | /* Clear interrupt */ \
+ (0x1 << 6)) /* Disable ECC interrupt */
+
+#ifndef CONFIG_NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
+
+/* Assuming 50MHz clock (20ns cycle time) and 3V operation */
+#define ZYNQ_NAND_SET_CYCLES ((0x2 << 20) | /* t_rr from nand_cycles */ \
+ (0x2 << 17) | /* t_ar from nand_cycles */ \
+ (0x1 << 14) | /* t_clr from nand_cycles */ \
+ (0x3 << 11) | /* t_wp from nand_cycles */ \
+ (0x2 << 8) | /* t_rea from nand_cycles */ \
+ (0x5 << 4) | /* t_wc from nand_cycles */ \
+ (0x5 << 0)) /* t_rc from nand_cycles */
+#endif
+
+
+#define ZYNQ_NAND_DIRECT_CMD ((0x4 << 23) | /* Chip 0 from interface 1 */ \
+ (0x2 << 21)) /* UpdateRegs operation */
+
+#define ZYNQ_NAND_ECC_CONFIG ((0x1 << 2) | /* ECC available on APB */ \
+ (0x1 << 4) | /* ECC read at end of page */ \
+ (0x0 << 5)) /* No Jumping */
+
+#define ZYNQ_NAND_ECC_CMD1 ((0x80) | /* Write command */ \
+ (0x00 << 8) | /* Read command */ \
+ (0x30 << 16) | /* Read End command */ \
+ (0x1 << 24)) /* Read End command calid */
+
+#define ZYNQ_NAND_ECC_CMD2 ((0x85) | /* Write col change cmd */ \
+ (0x05 << 8) | /* Read col change cmd */ \
+ (0xE0 << 16) | /* Read col change end cmd */ \
+ (0x1 << 24)) /* Read col change
+ end cmd valid */
+/* AXI Address definitions */
+#define START_CMD_SHIFT 3
+#define END_CMD_SHIFT 11
+#define END_CMD_VALID_SHIFT 20
+#define ADDR_CYCLES_SHIFT 21
+#define CLEAR_CS_SHIFT 21
+#define ECC_LAST_SHIFT 10
+#define COMMAND_PHASE (0 << 19)
+#define DATA_PHASE (1 << 19)
+#define ONDIE_ECC_FEATURE_ADDR 0x90
+#define ONDIE_ECC_FEATURE_ENABLE 0x08
+
+#define ZYNQ_NAND_ECC_LAST (1 << ECC_LAST_SHIFT) /* Set ECC_Last */
+#define ZYNQ_NAND_CLEAR_CS (1 << CLEAR_CS_SHIFT) /* Clear chip select */
+
+/* ECC block registers bit position and bit mask */
+#define ZYNQ_NAND_ECC_BUSY (1 << 6) /* ECC block is busy */
+#define ZYNQ_NAND_ECC_MASK 0x00FFFFFF /* ECC value mask */
+
+#define ZYNQ_NAND_ROW_ADDR_CYCL_MASK 0x0F
+#define ZYNQ_NAND_COL_ADDR_CYCL_MASK 0xF0
+
+#define ZYNQ_NAND_MIO_NUM_NAND_8BIT 13
+#define ZYNQ_NAND_MIO_NUM_NAND_16BIT 8
+
+enum zynq_nand_bus_width {
+ NAND_BW_UNKNOWN = -1,
+ NAND_BW_8BIT,
+ NAND_BW_16BIT,
+};
+
+#ifndef NAND_CMD_LOCK_TIGHT
+#define NAND_CMD_LOCK_TIGHT 0x2c
+#endif
+
+#ifndef NAND_CMD_LOCK_STATUS
+#define NAND_CMD_LOCK_STATUS 0x7a
+#endif
+
+/* SMC register set */
+struct zynq_nand_smc_regs {
+ u32 csr; /* 0x00 */
+ u32 reserved0[2];
+ u32 cfr; /* 0x0C */
+ u32 dcr; /* 0x10 */
+ u32 scr; /* 0x14 */
+ u32 sor; /* 0x18 */
+ u32 reserved1[249];
+ u32 esr; /* 0x400 */
+ u32 emcr; /* 0x404 */
+ u32 emcmd1r; /* 0x408 */
+ u32 emcmd2r; /* 0x40C */
+ u32 reserved2[2];
+ u32 eval0r; /* 0x418 */
+};
+
+/*
+ * struct nand_config - Defines the NAND flash driver instance
+ * @parts: Pointer to the mtd_partition structure
+ * @nand_base: Virtual address of the NAND flash device
+ * @end_cmd_pending: End command is pending
+ * @end_cmd: End command
+ */
+struct nand_config {
+ void __iomem *nand_base;
+ u8 end_cmd_pending;
+ u8 end_cmd;
+};
+
+struct nand_drv {
+ struct zynq_nand_smc_regs *reg;
+ struct nand_config config;
+};
+
+struct zynq_nand_info {
+ struct udevice *dev;
+ struct nand_drv nand_ctrl;
+ struct nand_chip nand_chip;
+};
+
+/*
+ * struct zynq_nand_command_format - Defines NAND flash command format
+ * @start_cmd: First cycle command (Start command)
+ * @end_cmd: Second cycle command (Last command)
+ * @addr_cycles: Number of address cycles required to send the address
+ * @end_cmd_valid: The second cycle command is valid for cmd or data phase
+ */
+struct zynq_nand_command_format {
+ u8 start_cmd;
+ u8 end_cmd;
+ u8 addr_cycles;
+ u8 end_cmd_valid;
+};
+
+/* The NAND flash operations command format */
+static const struct zynq_nand_command_format zynq_nand_commands[] = {
+ {NAND_CMD_READ0, NAND_CMD_READSTART, 5, ZYNQ_NAND_CMD_PHASE},
+ {NAND_CMD_RNDOUT, NAND_CMD_RNDOUTSTART, 2, ZYNQ_NAND_CMD_PHASE},
+ {NAND_CMD_READID, NAND_CMD_NONE, 1, 0},
+ {NAND_CMD_STATUS, NAND_CMD_NONE, 0, 0},
+ {NAND_CMD_SEQIN, NAND_CMD_PAGEPROG, 5, ZYNQ_NAND_DATA_PHASE},
+ {NAND_CMD_RNDIN, NAND_CMD_NONE, 2, 0},
+ {NAND_CMD_ERASE1, NAND_CMD_ERASE2, 3, ZYNQ_NAND_CMD_PHASE},
+ {NAND_CMD_RESET, NAND_CMD_NONE, 0, 0},
+ {NAND_CMD_PARAM, NAND_CMD_NONE, 1, 0},
+ {NAND_CMD_GET_FEATURES, NAND_CMD_NONE, 1, 0},
+ {NAND_CMD_SET_FEATURES, NAND_CMD_NONE, 1, 0},
+ {NAND_CMD_LOCK, NAND_CMD_NONE, 0, 0},
+ {NAND_CMD_LOCK_TIGHT, NAND_CMD_NONE, 0, 0},
+ {NAND_CMD_UNLOCK1, NAND_CMD_NONE, 3, 0},
+ {NAND_CMD_UNLOCK2, NAND_CMD_NONE, 3, 0},
+ {NAND_CMD_LOCK_STATUS, NAND_CMD_NONE, 3, 0},
+ {NAND_CMD_NONE, NAND_CMD_NONE, 0, 0},
+ /* Add all the flash commands supported by the flash device */
+};
+
+/* Define default oob placement schemes for large and small page devices */
+static struct nand_ecclayout nand_oob_16 = {
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = {
+ { .offset = 8, .length = 8 }
+ }
+};
+
+static struct nand_ecclayout nand_oob_64 = {
+ .eccbytes = 12,
+ .eccpos = {
+ 52, 53, 54, 55, 56, 57,
+ 58, 59, 60, 61, 62, 63},
+ .oobfree = {
+ { .offset = 2, .length = 50 }
+ }
+};
+
+static struct nand_ecclayout ondie_nand_oob_64 = {
+ .eccbytes = 32,
+
+ .eccpos = {
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 56, 57, 58, 59, 60, 61, 62, 63
+ },
+
+ .oobfree = {
+ { .offset = 4, .length = 4 },
+ { .offset = 20, .length = 4 },
+ { .offset = 36, .length = 4 },
+ { .offset = 52, .length = 4 }
+ }
+};
+
+/* bbt decriptors for chips with on-die ECC and
+ chips with 64-byte OOB */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+ NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 4,
+ .len = 4,
+ .veroffs = 20,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/*
+ * zynq_nand_waitfor_ecc_completion - Wait for ECC completion
+ *
+ * returns: status for command completion, -1 for Timeout
+ */
+static int zynq_nand_waitfor_ecc_completion(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *smc = nand_get_controller_data(nand_chip);
+ unsigned long timeout;
+ u32 status;
+
+ /* Wait max 10us */
+ timeout = 10;
+ status = readl(&smc->reg->esr);
+ while (status & ZYNQ_NAND_ECC_BUSY) {
+ status = readl(&smc->reg->esr);
+ if (timeout == 0)
+ return -1;
+ timeout--;
+ udelay(1);
+ }
+
+ return status;
+}
+
+/*
+ * zynq_nand_init_nand_flash - Initialize NAND controller
+ * @option: Device property flags
+ *
+ * This function initializes the NAND flash interface on the NAND controller.
+ *
+ * returns: 0 on success or error value on failure
+ */
+static int zynq_nand_init_nand_flash(struct mtd_info *mtd, int option)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *smc = nand_get_controller_data(nand_chip);
+ u32 status;
+
+ /* disable interrupts */
+ writel(ZYNQ_NAND_CLR_CONFIG, &smc->reg->cfr);
+#ifndef CONFIG_NAND_ZYNQ_USE_BOOTLOADER1_TIMINGS
+ /* Initialize the NAND interface by setting cycles and operation mode */
+ writel(ZYNQ_NAND_SET_CYCLES, &smc->reg->scr);
+#endif
+ if (option & NAND_BUSWIDTH_16)
+ writel(ZYNQ_NAND_SET_OPMODE_16BIT, &smc->reg->sor);
+ else
+ writel(ZYNQ_NAND_SET_OPMODE_8BIT, &smc->reg->sor);
+
+ writel(ZYNQ_NAND_DIRECT_CMD, &smc->reg->dcr);
+
+ /* Wait till the ECC operation is complete */
+ status = zynq_nand_waitfor_ecc_completion(mtd);
+ if (status < 0) {
+ printf("%s: Timeout\n", __func__);
+ return status;
+ }
+
+ /* Set the command1 and command2 register */
+ writel(ZYNQ_NAND_ECC_CMD1, &smc->reg->emcmd1r);
+ writel(ZYNQ_NAND_ECC_CMD2, &smc->reg->emcmd2r);
+
+ return 0;
+}
+
+/*
+ * zynq_nand_calculate_hwecc - Calculate Hardware ECC
+ * @mtd: Pointer to the mtd_info structure
+ * @data: Pointer to the page data
+ * @ecc_code: Pointer to the ECC buffer where ECC data needs to be stored
+ *
+ * This function retrieves the Hardware ECC data from the controller and returns
+ * ECC data back to the MTD subsystem.
+ *
+ * returns: 0 on success or error value on failure
+ */
+static int zynq_nand_calculate_hwecc(struct mtd_info *mtd, const u8 *data,
+ u8 *ecc_code)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *smc = nand_get_controller_data(nand_chip);
+ u32 ecc_value = 0;
+ u8 ecc_reg, ecc_byte;
+ u32 ecc_status;
+
+ /* Wait till the ECC operation is complete */
+ ecc_status = zynq_nand_waitfor_ecc_completion(mtd);
+ if (ecc_status < 0) {
+ printf("%s: Timeout\n", __func__);
+ return ecc_status;
+ }
+
+ for (ecc_reg = 0; ecc_reg < 4; ecc_reg++) {
+ /* Read ECC value for each block */
+ ecc_value = readl(&smc->reg->eval0r + ecc_reg);
+
+ /* Get the ecc status from ecc read value */
+ ecc_status = (ecc_value >> 24) & 0xFF;
+
+ /* ECC value valid */
+ if (ecc_status & ZYNQ_NAND_ECC_STATUS) {
+ for (ecc_byte = 0; ecc_byte < 3; ecc_byte++) {
+ /* Copy ECC bytes to MTD buffer */
+ *ecc_code = ecc_value & 0xFF;
+ ecc_value = ecc_value >> 8;
+ ecc_code++;
+ }
+ } else {
+ debug("%s: ecc status failed\n", __func__);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * onehot - onehot function
+ * @value: value to check for onehot
+ *
+ * This function checks whether a value is onehot or not.
+ * onehot is if and only if one bit is set.
+ *
+ * FIXME: Try to move this in common.h
+ */
+static bool onehot(unsigned short value)
+{
+ bool onehot;
+
+ onehot = value && !(value & (value - 1));
+ return onehot;
+}
+
+/*
+ * zynq_nand_correct_data - ECC correction function
+ * @mtd: Pointer to the mtd_info structure
+ * @buf: Pointer to the page data
+ * @read_ecc: Pointer to the ECC value read from spare data area
+ * @calc_ecc: Pointer to the calculated ECC value
+ *
+ * This function corrects the ECC single bit errors & detects 2-bit errors.
+ *
+ * returns: 0 if no ECC errors found
+ * 1 if single bit error found and corrected.
+ * -1 if multiple ECC errors found.
+ */
+static int zynq_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ unsigned char bit_addr;
+ unsigned int byte_addr;
+ unsigned short ecc_odd, ecc_even;
+ unsigned short read_ecc_lower, read_ecc_upper;
+ unsigned short calc_ecc_lower, calc_ecc_upper;
+
+ read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) & 0xfff;
+ read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) & 0xfff;
+
+ calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) & 0xfff;
+ calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) & 0xfff;
+
+ ecc_odd = read_ecc_lower ^ calc_ecc_lower;
+ ecc_even = read_ecc_upper ^ calc_ecc_upper;
+
+ if ((ecc_odd == 0) && (ecc_even == 0))
+ return 0; /* no error */
+
+ if (ecc_odd == (~ecc_even & 0xfff)) {
+ /* bits [11:3] of error code is byte offset */
+ byte_addr = (ecc_odd >> 3) & 0x1ff;
+ /* bits [2:0] of error code is bit offset */
+ bit_addr = ecc_odd & 0x7;
+ /* Toggling error bit */
+ buf[byte_addr] ^= (1 << bit_addr);
+ return 1;
+ }
+
+ if (onehot(ecc_odd | ecc_even))
+ return 1; /* one error in parity */
+
+ return -1; /* Uncorrectable error */
+}
+
+/*
+ * zynq_nand_read_oob - [REPLACABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
+ */
+static int zynq_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ unsigned long data_phase_addr = 0;
+ int data_width = 4;
+ u8 *p;
+
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+
+ p = chip->oob_poi;
+ chip->read_buf(mtd, p, (mtd->oobsize - data_width));
+ p += mtd->oobsize - data_width;
+
+ data_phase_addr = (unsigned long)chip->IO_ADDR_R;
+ data_phase_addr |= ZYNQ_NAND_CLEAR_CS;
+ chip->IO_ADDR_R = (void __iomem *)data_phase_addr;
+ chip->read_buf(mtd, p, data_width);
+
+ return 0;
+}
+
+/*
+ * zynq_nand_write_oob - [REPLACABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int zynq_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
+{
+ int status = 0, data_width = 4;
+ const u8 *buf = chip->oob_poi;
+ unsigned long data_phase_addr = 0;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+
+ chip->write_buf(mtd, buf, (mtd->oobsize - data_width));
+ buf += mtd->oobsize - data_width;
+
+ data_phase_addr = (unsigned long)chip->IO_ADDR_W;
+ data_phase_addr |= ZYNQ_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ chip->IO_ADDR_W = (void __iomem *)data_phase_addr;
+ chip->write_buf(mtd, buf, data_width);
+
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * zynq_nand_read_page_raw - [Intern] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to read
+ */
+static int zynq_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page)
+{
+ unsigned long data_width = 4;
+ unsigned long data_phase_addr = 0;
+ u8 *p;
+
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ p = chip->oob_poi;
+ chip->read_buf(mtd, p, (mtd->oobsize - data_width));
+ p += (mtd->oobsize - data_width);
+
+ data_phase_addr = (unsigned long)chip->IO_ADDR_R;
+ data_phase_addr |= ZYNQ_NAND_CLEAR_CS;
+ chip->IO_ADDR_R = (void __iomem *)data_phase_addr;
+
+ chip->read_buf(mtd, p, data_width);
+ return 0;
+}
+
+static int zynq_nand_read_page_raw_nooob(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf, int oob_required, int page)
+{
+ chip->read_buf(mtd, buf, mtd->writesize);
+ return 0;
+}
+
+static int zynq_nand_read_subpage_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, u32 data_offs,
+ u32 readlen, u8 *buf, int page)
+{
+ if (data_offs != 0) {
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_offs, -1);
+ buf += data_offs;
+ }
+ chip->read_buf(mtd, buf, readlen);
+
+ return 0;
+}
+
+/*
+ * zynq_nand_write_page_raw - [Intern] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int zynq_nand_write_page_raw(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf, int oob_required, int page)
+{
+ unsigned long data_width = 4;
+ unsigned long data_phase_addr = 0;
+ u8 *p;
+
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ p = chip->oob_poi;
+ chip->write_buf(mtd, p, (mtd->oobsize - data_width));
+ p += (mtd->oobsize - data_width);
+
+ data_phase_addr = (unsigned long)chip->IO_ADDR_W;
+ data_phase_addr |= ZYNQ_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ chip->IO_ADDR_W = (void __iomem *)data_phase_addr;
+
+ chip->write_buf(mtd, p, data_width);
+
+ return 0;
+}
+
+/*
+ * nand_write_page_hwecc - Hardware ECC based page write function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * This functions writes data and hardware generated ECC values in to the page.
+ */
+static int zynq_nand_write_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf, int oob_required, int page)
+{
+ int i, eccsteps, eccsize = chip->ecc.size;
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ const u8 *p = buf;
+ u32 *eccpos = chip->ecc.layout->eccpos;
+ unsigned long data_phase_addr = 0;
+ unsigned long data_width = 4;
+ u8 *oob_ptr;
+
+ for (eccsteps = chip->ecc.steps; (eccsteps - 1); eccsteps--) {
+ chip->write_buf(mtd, p, eccsize);
+ p += eccsize;
+ }
+ chip->write_buf(mtd, p, (eccsize - data_width));
+ p += eccsize - data_width;
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long) chip->IO_ADDR_W;
+ data_phase_addr |= ZYNQ_NAND_ECC_LAST;
+ chip->IO_ADDR_W = (void __iomem *)data_phase_addr;
+ chip->write_buf(mtd, p, data_width);
+
+ /* Wait for ECC to be calculated and read the error values */
+ p = buf;
+ chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ~(ecc_calc[i]);
+
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long)chip->IO_ADDR_W;
+ data_phase_addr &= ~ZYNQ_NAND_ECC_LAST;
+ chip->IO_ADDR_W = (void __iomem *)data_phase_addr;
+
+ /* Write the spare area with ECC bytes */
+ oob_ptr = chip->oob_poi;
+ chip->write_buf(mtd, oob_ptr, (mtd->oobsize - data_width));
+
+ data_phase_addr = (unsigned long)chip->IO_ADDR_W;
+ data_phase_addr |= ZYNQ_NAND_CLEAR_CS;
+ data_phase_addr |= (1 << END_CMD_VALID_SHIFT);
+ chip->IO_ADDR_W = (void __iomem *)data_phase_addr;
+ oob_ptr += (mtd->oobsize - data_width);
+ chip->write_buf(mtd, oob_ptr, data_width);
+
+ return 0;
+}
+
+/*
+ * zynq_nand_write_page_swecc - [REPLACABLE] software ecc based page
+ * write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ */
+static int zynq_nand_write_page_swecc(struct mtd_info *mtd,
+ struct nand_chip *chip, const u8 *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ const u8 *p = buf;
+ u32 *eccpos = chip->ecc.layout->eccpos;
+
+ /* Software ecc calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
+}
+
+/*
+ * nand_read_page_hwecc - Hardware ECC based page read function
+ * @mtd: Pointer to the mtd info structure
+ * @chip: Pointer to the NAND chip info structure
+ * @buf: Pointer to the buffer to store read data
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to read
+ *
+ * This functions reads data and checks the data integrity by comparing hardware
+ * generated ECC values and read ECC values from spare area.
+ *
+ * returns: 0 always and updates ECC operation status in to MTD structure
+ */
+static int zynq_nand_read_page_hwecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf, int oob_required, int page)
+{
+ int i, stat, eccsteps, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_code = chip->buffers->ecccode;
+ u32 *eccpos = chip->ecc.layout->eccpos;
+ unsigned long data_phase_addr = 0;
+ unsigned long data_width = 4;
+ u8 *oob_ptr;
+
+ for (eccsteps = chip->ecc.steps; (eccsteps - 1); eccsteps--) {
+ chip->read_buf(mtd, p, eccsize);
+ p += eccsize;
+ }
+ chip->read_buf(mtd, p, (eccsize - data_width));
+ p += eccsize - data_width;
+
+ /* Set ECC Last bit to 1 */
+ data_phase_addr = (unsigned long)chip->IO_ADDR_R;
+ data_phase_addr |= ZYNQ_NAND_ECC_LAST;
+ chip->IO_ADDR_R = (void __iomem *)data_phase_addr;
+ chip->read_buf(mtd, p, data_width);
+
+ /* Read the calculated ECC value */
+ p = buf;
+ chip->ecc.calculate(mtd, p, &ecc_calc[0]);
+
+ /* Clear ECC last bit */
+ data_phase_addr = (unsigned long)chip->IO_ADDR_R;
+ data_phase_addr &= ~ZYNQ_NAND_ECC_LAST;
+ chip->IO_ADDR_R = (void __iomem *)data_phase_addr;
+
+ /* Read the stored ECC value */
+ oob_ptr = chip->oob_poi;
+ chip->read_buf(mtd, oob_ptr, (mtd->oobsize - data_width));
+
+ /* de-assert chip select */
+ data_phase_addr = (unsigned long)chip->IO_ADDR_R;
+ data_phase_addr |= ZYNQ_NAND_CLEAR_CS;
+ chip->IO_ADDR_R = (void __iomem *)data_phase_addr;
+
+ oob_ptr += (mtd->oobsize - data_width);
+ chip->read_buf(mtd, oob_ptr, data_width);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = ~(chip->oob_poi[eccpos[i]]);
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ /* Check ECC error for all blocks and correct if it is correctable */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/*
+ * zynq_nand_read_page_swecc - [REPLACABLE] software ecc based page
+ * read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
+ */
+static int zynq_nand_read_page_swecc(struct mtd_info *mtd,
+ struct nand_chip *chip, u8 *buf, int oob_required, int page)
+{
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->buffers->ecccalc;
+ u8 *ecc_code = chip->buffers->ecccode;
+ u32 *eccpos = chip->ecc.layout->eccpos;
+
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ ecc_code[i] = chip->oob_poi[eccpos[i]];
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0)
+ mtd->ecc_stats.failed++;
+ else
+ mtd->ecc_stats.corrected += stat;
+ }
+ return 0;
+}
+
+/*
+ * zynq_nand_select_chip - Select the flash device
+ * @mtd: Pointer to the mtd_info structure
+ * @chip: Chip number to be selected
+ *
+ * This function is empty as the NAND controller handles chip select line
+ * internally based on the chip address passed in command and data phase.
+ */
+static void zynq_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* Not support multiple chips yet */
+}
+
+/*
+ * zynq_nand_cmd_function - Send command to NAND device
+ * @mtd: Pointer to the mtd_info structure
+ * @command: The command to be sent to the flash device
+ * @column: The column address for this command, -1 if none
+ * @page_addr: The page address for this command, -1 if none
+ */
+static void zynq_nand_cmd_function(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_drv *smc = nand_get_controller_data(chip);
+ const struct zynq_nand_command_format *curr_cmd = NULL;
+ u8 addr_cycles = 0;
+ struct nand_config *xnand = &smc->config;
+ void *cmd_addr;
+ unsigned long cmd_data = 0;
+ unsigned long cmd_phase_addr = 0;
+ unsigned long data_phase_addr = 0;
+ u8 end_cmd = 0;
+ u8 end_cmd_valid = 0;
+ u32 index;
+
+ if (xnand->end_cmd_pending) {
+ /* Check for end command if this command request is same as the
+ * pending command then return
+ */
+ if (xnand->end_cmd == command) {
+ xnand->end_cmd = 0;
+ xnand->end_cmd_pending = 0;
+ return;
+ }
+ }
+
+ /* Emulate NAND_CMD_READOOB for large page device */
+ if ((mtd->writesize > ZYNQ_NAND_ECC_SIZE) &&
+ (command == NAND_CMD_READOOB)) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Get the command format */
+ for (index = 0; index < ARRAY_SIZE(zynq_nand_commands); index++)
+ if (command == zynq_nand_commands[index].start_cmd)
+ break;
+
+ if (index == ARRAY_SIZE(zynq_nand_commands)) {
+ printf("%s: Unsupported start cmd %02x\n", __func__, command);
+ return;
+ }
+ curr_cmd = &zynq_nand_commands[index];
+
+ /* Clear interrupt */
+ writel(ZYNQ_MEMC_CLRCR_INT_CLR1, &smc->reg->cfr);
+
+ /* Get the command phase address */
+ if (curr_cmd->end_cmd_valid == ZYNQ_NAND_CMD_PHASE)
+ end_cmd_valid = 1;
+
+ if (curr_cmd->end_cmd == (u8)NAND_CMD_NONE)
+ end_cmd = 0x0;
+ else
+ end_cmd = curr_cmd->end_cmd;
+
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_SEQIN) {
+ addr_cycles = chip->onfi_params.addr_cycles &
+ ZYNQ_NAND_ROW_ADDR_CYCL_MASK;
+ addr_cycles += ((chip->onfi_params.addr_cycles &
+ ZYNQ_NAND_COL_ADDR_CYCL_MASK) >> 4);
+ } else {
+ addr_cycles = curr_cmd->addr_cycles;
+ }
+
+ cmd_phase_addr = (unsigned long)xnand->nand_base |
+ (addr_cycles << ADDR_CYCLES_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (COMMAND_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (curr_cmd->start_cmd << START_CMD_SHIFT);
+
+ cmd_addr = (void __iomem *)cmd_phase_addr;
+
+ /* Get the data phase address */
+ end_cmd_valid = 0;
+
+ data_phase_addr = (unsigned long)xnand->nand_base |
+ (0x0 << CLEAR_CS_SHIFT) |
+ (end_cmd_valid << END_CMD_VALID_SHIFT) |
+ (DATA_PHASE) |
+ (end_cmd << END_CMD_SHIFT) |
+ (0x0 << ECC_LAST_SHIFT);
+
+ chip->IO_ADDR_R = (void __iomem *)data_phase_addr;
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+ /* Command phase AXI Read & Write */
+ if (column != -1 && page_addr != -1) {
+ /* Adjust columns for 16 bit bus width */
+ if (chip->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ cmd_data = column;
+ if (mtd->writesize > ZYNQ_NAND_ECC_SIZE) {
+ cmd_data |= page_addr << 16;
+ /* Another address cycle for devices > 128MiB */
+ if (chip->chipsize > (128 << 20)) {
+ writel(cmd_data, cmd_addr);
+ cmd_data = (page_addr >> 16);
+ }
+ } else {
+ cmd_data |= page_addr << 8;
+ }
+ } else if (page_addr != -1) { /* Erase */
+ cmd_data = page_addr;
+ } else if (column != -1) { /* Change read/write column, read id etc */
+ /* Adjust columns for 16 bit bus width */
+ if ((chip->options & NAND_BUSWIDTH_16) &&
+ ((command == NAND_CMD_READ0) ||
+ (command == NAND_CMD_SEQIN) ||
+ (command == NAND_CMD_RNDOUT) ||
+ (command == NAND_CMD_RNDIN)))
+ column >>= 1;
+ cmd_data = column;
+ }
+
+ writel(cmd_data, cmd_addr);
+
+ if (curr_cmd->end_cmd_valid) {
+ xnand->end_cmd = curr_cmd->end_cmd;
+ xnand->end_cmd_pending = 1;
+ }
+
+ ndelay(100);
+
+ if ((command == NAND_CMD_READ0) ||
+ (command == NAND_CMD_RESET) ||
+ (command == NAND_CMD_PARAM) ||
+ (command == NAND_CMD_GET_FEATURES))
+ /* wait until command is processed */
+ nand_wait_ready(mtd);
+}
+
+/*
+ * zynq_nand_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void zynq_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ /* Make sure that buf is 32 bit aligned */
+ if (((unsigned long)buf & 0x3) != 0) {
+ if (((unsigned long)buf & 0x1) != 0) {
+ if (len) {
+ *buf = readb(chip->IO_ADDR_R);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((unsigned long)buf & 0x3) != 0) {
+ if (len >= 2) {
+ *(u16 *)buf = readw(chip->IO_ADDR_R);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ /* copy aligned data */
+ while (len >= 4) {
+ *(u32 *)buf = readl(chip->IO_ADDR_R);
+ buf += 4;
+ len -= 4;
+ }
+
+ /* mop up any remaining bytes */
+ if (len) {
+ if (len >= 2) {
+ *(u16 *)buf = readw(chip->IO_ADDR_R);
+ buf += 2;
+ len -= 2;
+ }
+ if (len)
+ *buf = readb(chip->IO_ADDR_R);
+ }
+}
+
+/*
+ * zynq_nand_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void zynq_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ const u32 *nand = chip->IO_ADDR_W;
+
+ /* Make sure that buf is 32 bit aligned */
+ if (((unsigned long)buf & 0x3) != 0) {
+ if (((unsigned long)buf & 0x1) != 0) {
+ if (len) {
+ writeb(*buf, nand);
+ buf += 1;
+ len--;
+ }
+ }
+
+ if (((unsigned long)buf & 0x3) != 0) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+ }
+ }
+
+ /* copy aligned data */
+ while (len >= 4) {
+ writel(*(u32 *)buf, nand);
+ buf += 4;
+ len -= 4;
+ }
+
+ /* mop up any remaining bytes */
+ if (len) {
+ if (len >= 2) {
+ writew(*(u16 *)buf, nand);
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len)
+ writeb(*buf, nand);
+ }
+}
+
+/*
+ * zynq_nand_device_ready - Check device ready/busy line
+ * @mtd: Pointer to the mtd_info structure
+ *
+ * returns: 0 on busy or 1 on ready state
+ */
+static int zynq_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ struct nand_drv *smc = nand_get_controller_data(nand_chip);
+ u32 csr_val;
+
+ csr_val = readl(&smc->reg->csr);
+ /* Check the raw_int_status1 bit */
+ if (csr_val & ZYNQ_MEMC_SR_RAW_INT_ST1) {
+ /* Clear the interrupt condition */
+ writel(ZYNQ_MEMC_SR_INT_ST1, &smc->reg->cfr);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int zynq_nand_check_is_16bit_bw_flash(void)
+{
+ int is_16bit_bw = NAND_BW_UNKNOWN;
+ int mio_num_8bit = 0, mio_num_16bit = 0;
+
+ mio_num_8bit = zynq_slcr_get_mio_pin_status("nand8");
+ if (mio_num_8bit == ZYNQ_NAND_MIO_NUM_NAND_8BIT)
+ is_16bit_bw = NAND_BW_8BIT;
+
+ mio_num_16bit = zynq_slcr_get_mio_pin_status("nand16");
+ if (mio_num_8bit == ZYNQ_NAND_MIO_NUM_NAND_8BIT &&
+ mio_num_16bit == ZYNQ_NAND_MIO_NUM_NAND_16BIT)
+ is_16bit_bw = NAND_BW_16BIT;
+
+ return is_16bit_bw;
+}
+
+static int zynq_nand_probe(struct udevice *dev)
+{
+ struct zynq_nand_info *zynq = dev_get_priv(dev);
+ struct nand_chip *nand_chip = &zynq->nand_chip;
+ struct nand_drv *smc = &zynq->nand_ctrl;
+ struct nand_config *xnand = &smc->config;
+ struct mtd_info *mtd;
+ struct resource res;
+ ofnode of_nand;
+ unsigned long ecc_page_size;
+ u8 maf_id, dev_id, i;
+ u8 get_feature[4];
+ u8 set_feature[4] = {ONDIE_ECC_FEATURE_ENABLE, 0x00, 0x00, 0x00};
+ unsigned long ecc_cfg;
+ int ondie_ecc_enabled = 0;
+ int is_16bit_bw;
+
+ smc->reg = (struct zynq_nand_smc_regs *)dev_read_addr(dev);
+ of_nand = dev_read_subnode(dev, "flash@e1000000");
+ if (!ofnode_valid(of_nand)) {
+ printf("Failed to find nand node in dt\n");
+ return -ENODEV;
+ }
+
+ if (!ofnode_is_available(of_nand)) {
+ debug("Nand node in dt disabled\n");
+ return dm_scan_fdt_dev(dev);
+ }
+
+ if (ofnode_read_resource(of_nand, 0, &res)) {
+ printf("Failed to get nand resource\n");
+ return -ENODEV;
+ }
+
+ xnand->nand_base = (void __iomem *)res.start;
+ mtd = nand_to_mtd(nand_chip);
+ nand_set_controller_data(nand_chip, &zynq->nand_ctrl);
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = xnand->nand_base;
+ nand_chip->IO_ADDR_W = xnand->nand_base;
+
+ /* Set the driver entry points for MTD */
+ nand_chip->cmdfunc = zynq_nand_cmd_function;
+ nand_chip->dev_ready = zynq_nand_device_ready;
+ nand_chip->select_chip = zynq_nand_select_chip;
+
+ /* If we don't set this delay driver sets 20us by default */
+ nand_chip->chip_delay = 30;
+
+ /* Buffer read/write routines */
+ nand_chip->read_buf = zynq_nand_read_buf;
+ nand_chip->write_buf = zynq_nand_write_buf;
+
+ is_16bit_bw = zynq_nand_check_is_16bit_bw_flash();
+ if (is_16bit_bw == NAND_BW_UNKNOWN) {
+ printf("%s: Unable detect NAND based on MIO settings\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (is_16bit_bw == NAND_BW_16BIT)
+ nand_chip->options = NAND_BUSWIDTH_16;
+
+ nand_chip->bbt_options = NAND_BBT_USE_FLASH;
+
+ /* Initialize the NAND flash interface on NAND controller */
+ if (zynq_nand_init_nand_flash(mtd, nand_chip->options) < 0) {
+ printf("%s: nand flash init failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1, NULL)) {
+ printf("%s: nand_scan_ident failed\n", __func__);
+ return -EINVAL;
+ }
+ /* Send the command for reading device ID */
+ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ maf_id = nand_chip->read_byte(mtd);
+ dev_id = nand_chip->read_byte(mtd);
+
+ if ((maf_id == 0x2c) && ((dev_id == 0xf1) ||
+ (dev_id == 0xa1) || (dev_id == 0xb1) ||
+ (dev_id == 0xaa) || (dev_id == 0xba) ||
+ (dev_id == 0xda) || (dev_id == 0xca) ||
+ (dev_id == 0xac) || (dev_id == 0xbc) ||
+ (dev_id == 0xdc) || (dev_id == 0xcc) ||
+ (dev_id == 0xa3) || (dev_id == 0xb3) ||
+ (dev_id == 0xd3) || (dev_id == 0xc3))) {
+ nand_chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES,
+ ONDIE_ECC_FEATURE_ADDR, -1);
+ for (i = 0; i < 4; i++)
+ writeb(set_feature[i], nand_chip->IO_ADDR_W);
+
+ /* Wait for 1us after writing data with SET_FEATURES command */
+ ndelay(1000);
+
+ nand_chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES,
+ ONDIE_ECC_FEATURE_ADDR, -1);
+ nand_chip->read_buf(mtd, get_feature, 4);
+
+ if (get_feature[0] & ONDIE_ECC_FEATURE_ENABLE) {
+ debug("%s: OnDie ECC flash\n", __func__);
+ ondie_ecc_enabled = 1;
+ } else {
+ printf("%s: Unable to detect OnDie ECC\n", __func__);
+ }
+ }
+
+ if (ondie_ecc_enabled) {
+ /* Bypass the controller ECC block */
+ ecc_cfg = readl(&smc->reg->emcr);
+ ecc_cfg &= ~ZYNQ_MEMC_NAND_ECC_MODE_MASK;
+ writel(ecc_cfg, &smc->reg->emcr);
+
+ /* The software ECC routines won't work
+ * with the SMC controller
+ */
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.read_page = zynq_nand_read_page_raw_nooob;
+ nand_chip->ecc.read_subpage = zynq_nand_read_subpage_raw;
+ nand_chip->ecc.write_page = zynq_nand_write_page_raw;
+ nand_chip->ecc.read_page_raw = zynq_nand_read_page_raw;
+ nand_chip->ecc.write_page_raw = zynq_nand_write_page_raw;
+ nand_chip->ecc.read_oob = zynq_nand_read_oob;
+ nand_chip->ecc.write_oob = zynq_nand_write_oob;
+ nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.bytes = 0;
+
+ /* NAND with on-die ECC supports subpage reads */
+ nand_chip->options |= NAND_SUBPAGE_READ;
+
+ /* On-Die ECC spare bytes offset 8 is used for ECC codes */
+ nand_chip->ecc.layout = &ondie_nand_oob_64;
+ /* Use the BBT pattern descriptors */
+ nand_chip->bbt_td = &bbt_main_descr;
+ nand_chip->bbt_md = &bbt_mirror_descr;
+ } else {
+ /* Hardware ECC generates 3 bytes ECC code for each 512 bytes */
+ nand_chip->ecc.mode = NAND_ECC_HW;
+ nand_chip->ecc.strength = 1;
+ nand_chip->ecc.size = ZYNQ_NAND_ECC_SIZE;
+ nand_chip->ecc.bytes = 3;
+ nand_chip->ecc.calculate = zynq_nand_calculate_hwecc;
+ nand_chip->ecc.correct = zynq_nand_correct_data;
+ nand_chip->ecc.hwctl = NULL;
+ nand_chip->ecc.read_page = zynq_nand_read_page_hwecc;
+ nand_chip->ecc.write_page = zynq_nand_write_page_hwecc;
+ nand_chip->ecc.read_page_raw = zynq_nand_read_page_raw;
+ nand_chip->ecc.write_page_raw = zynq_nand_write_page_raw;
+ nand_chip->ecc.read_oob = zynq_nand_read_oob;
+ nand_chip->ecc.write_oob = zynq_nand_write_oob;
+
+ switch (mtd->writesize) {
+ case 512:
+ ecc_page_size = 0x1;
+ /* Set the ECC memory config register */
+ writel((ZYNQ_NAND_ECC_CONFIG | ecc_page_size),
+ &smc->reg->emcr);
+ break;
+ case 1024:
+ ecc_page_size = 0x2;
+ /* Set the ECC memory config register */
+ writel((ZYNQ_NAND_ECC_CONFIG | ecc_page_size),
+ &smc->reg->emcr);
+ break;
+ case 2048:
+ ecc_page_size = 0x3;
+ /* Set the ECC memory config register */
+ writel((ZYNQ_NAND_ECC_CONFIG | ecc_page_size),
+ &smc->reg->emcr);
+ break;
+ default:
+ nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.calculate = nand_calculate_ecc;
+ nand_chip->ecc.correct = nand_correct_data;
+ nand_chip->ecc.read_page = zynq_nand_read_page_swecc;
+ nand_chip->ecc.write_page = zynq_nand_write_page_swecc;
+ nand_chip->ecc.size = 256;
+ break;
+ }
+
+ if (mtd->oobsize == 16)
+ nand_chip->ecc.layout = &nand_oob_16;
+ else if (mtd->oobsize == 64)
+ nand_chip->ecc.layout = &nand_oob_64;
+ else
+ printf("%s: No oob layout found\n", __func__);
+ }
+
+ /* Second phase scan */
+ if (nand_scan_tail(mtd)) {
+ printf("%s: nand_scan_tail failed\n", __func__);
+ return -EINVAL;
+ }
+ if (nand_register(0, mtd))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct udevice_id zynq_nand_dt_ids[] = {
+ {.compatible = "arm,pl353-smc-r2p1",},
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(zynq_nand) = {
+ .name = "zynq_nand",
+ .id = UCLASS_MTD,
+ .of_match = zynq_nand_dt_ids,
+ .probe = zynq_nand_probe,
+ .priv_auto = sizeof(struct zynq_nand_info),
+};
+
+void board_nand_init(void)
+{
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MTD,
+ DM_DRIVER_GET(zynq_nand), &dev);
+ if (ret && ret != -ENODEV)
+ pr_err("Failed to initialize %s. (error %d)\n", dev->name, ret);
+}
diff --git a/roms/u-boot/drivers/mtd/nand/spi/Kconfig b/roms/u-boot/drivers/mtd/nand/spi/Kconfig
new file mode 100644
index 000000000..0777dfdf0
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/Kconfig
@@ -0,0 +1,7 @@
+menuconfig MTD_SPI_NAND
+ bool "SPI NAND device Support"
+ depends on DM_MTD && DM_SPI
+ select MTD_NAND_CORE
+ select SPI_MEM
+ help
+ This is the framework for the SPI NAND device drivers.
diff --git a/roms/u-boot/drivers/mtd/nand/spi/Makefile b/roms/u-boot/drivers/mtd/nand/spi/Makefile
new file mode 100644
index 000000000..6c65b187e
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o
+obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/roms/u-boot/drivers/mtd/nand/spi/core.c b/roms/u-boot/drivers/mtd/nand/spi/core.c
new file mode 100644
index 000000000..e5330958c
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/core.c
@@ -0,0 +1,1255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#define pr_fmt(fmt) "spi-nand: " fmt
+
+#ifndef __UBOOT__
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/spinand.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#else
+#include <common.h>
+#include <errno.h>
+#include <watchdog.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/mtd/spinand.h>
+#endif
+
+/* SPI NAND index visible in MTD names */
+static int spi_nand_idx;
+
+static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
+ const struct nand_page_io_req *req,
+ u16 *column)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int shift;
+
+ if (nand->memorg.planes_per_lun < 2)
+ return;
+
+ /* The plane number is passed in MSB just above the column address */
+ shift = fls(nand->memorg.pagesize);
+ *column |= req->pos.plane << shift;
+}
+
+static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+{
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ *val = *spinand->scratchbuf;
+ return 0;
+}
+
+static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = val;
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int spinand_read_status(struct spinand_device *spinand, u8 *status)
+{
+ return spinand_read_reg_op(spinand, REG_STATUS, status);
+}
+
+static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ *cfg = spinand->cfg_cache[spinand->cur_target];
+ return 0;
+}
+
+static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(spinand->cur_target < 0 ||
+ spinand->cur_target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cfg_cache[spinand->cur_target] == cfg)
+ return 0;
+
+ ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
+ if (ret)
+ return ret;
+
+ spinand->cfg_cache[spinand->cur_target] = cfg;
+ return 0;
+}
+
+/**
+ * spinand_upd_cfg() - Update the configuration register
+ * @spinand: the spinand device
+ * @mask: the mask encoding the bits to update in the config reg
+ * @val: the new value to apply
+ *
+ * Update the configuration register.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
+{
+ int ret;
+ u8 cfg;
+
+ ret = spinand_get_cfg(spinand, &cfg);
+ if (ret)
+ return ret;
+
+ cfg &= ~mask;
+ cfg |= val;
+
+ return spinand_set_cfg(spinand, cfg);
+}
+
+/**
+ * spinand_select_target() - Select a specific NAND target/die
+ * @spinand: the spinand device
+ * @target: the target/die to select
+ *
+ * Select a new target/die. If chip only has one die, this function is a NOOP.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_select_target(struct spinand_device *spinand, unsigned int target)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ if (WARN_ON(target >= nand->memorg.ntargets))
+ return -EINVAL;
+
+ if (spinand->cur_target == target)
+ return 0;
+
+ if (nand->memorg.ntargets == 1) {
+ spinand->cur_target = target;
+ return 0;
+ }
+
+ ret = spinand->select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ spinand->cur_target = target;
+ return 0;
+}
+
+static int spinand_init_cfg_cache(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct udevice *dev = spinand->slave->dev;
+ unsigned int target;
+ int ret;
+
+ spinand->cfg_cache = devm_kzalloc(dev,
+ sizeof(*spinand->cfg_cache) *
+ nand->memorg.ntargets,
+ GFP_KERNEL);
+ if (!spinand->cfg_cache)
+ return -ENOMEM;
+
+ for (target = 0; target < nand->memorg.ntargets; target++) {
+ ret = spinand_select_target(spinand, target);
+ if (ret)
+ return ret;
+
+ /*
+ * We use spinand_read_reg_op() instead of spinand_get_cfg()
+ * here to bypass the config cache.
+ */
+ ret = spinand_read_reg_op(spinand, REG_CFG,
+ &spinand->cfg_cache[target]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spinand_init_quad_enable(struct spinand_device *spinand)
+{
+ bool enable = false;
+
+ if (!(spinand->flags & SPINAND_HAS_QE_BIT))
+ return 0;
+
+ if (spinand->op_templates.read_cache->data.buswidth == 4 ||
+ spinand->op_templates.write_cache->data.buswidth == 4 ||
+ spinand->op_templates.update_cache->data.buswidth == 4)
+ enable = true;
+
+ return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
+ enable ? CFG_QUAD_ENABLE : 0);
+}
+
+static int spinand_ecc_enable(struct spinand_device *spinand,
+ bool enable)
+{
+ return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
+ enable ? CFG_ECC_ENABLE : 0);
+}
+
+static int spinand_write_enable_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int spinand_load_page_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct spi_mem_op op = *spinand->op_templates.read_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ int ret;
+
+ if (req->datalen) {
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.dataoffs = 0;
+ adjreq.databuf.in = spinand->databuf;
+ buf = spinand->databuf;
+ nbytes = adjreq.datalen;
+ }
+
+ if (req->ooblen) {
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ adjreq.oobbuf.in = spinand->oobbuf;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+ op.addr.val = column;
+
+ /*
+ * Some controllers are limited in term of max RX data size. In this
+ * case, just repeat the READ_CACHE operation after updating the
+ * column.
+ */
+ while (nbytes) {
+ op.data.buf.in = buf;
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ buf += op.data.nbytes;
+ nbytes -= op.data.nbytes;
+ op.addr.val += op.data.nbytes;
+ }
+
+ if (req->datalen)
+ memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
+ req->datalen);
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+ req->ooblen);
+ }
+
+ return 0;
+}
+
+static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct spi_mem_op op = *spinand->op_templates.write_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_page_io_req adjreq = *req;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+ int ret;
+
+ memset(spinand->databuf, 0xff,
+ nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand));
+
+ if (req->datalen) {
+ memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
+ req->datalen);
+ adjreq.dataoffs = 0;
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.databuf.out = spinand->databuf;
+ nbytes = adjreq.datalen;
+ buf = spinand->databuf;
+ }
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+ mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
+ spinand->oobbuf,
+ req->ooboffs,
+ req->ooblen);
+ else
+ memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
+ req->ooblen);
+
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+ column = nanddev_page_size(nand);
+ }
+ }
+
+ spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+
+ op = *spinand->op_templates.write_cache;
+ op.addr.val = column;
+
+ /*
+ * Some controllers are limited in term of max TX data size. In this
+ * case, split the operation into one LOAD CACHE and one or more
+ * LOAD RANDOM CACHE.
+ */
+ while (nbytes) {
+ op.data.buf.out = buf;
+ op.data.nbytes = nbytes;
+
+ ret = spi_mem_adjust_op_size(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ buf += op.data.nbytes;
+ nbytes -= op.data.nbytes;
+ op.addr.val += op.data.nbytes;
+
+ /*
+ * We need to use the RANDOM LOAD CACHE operation if there's
+ * more than one iteration, because the LOAD operation resets
+ * the cache to 0xff.
+ */
+ if (nbytes) {
+ column = op.addr.val;
+ op = *spinand->op_templates.update_cache;
+ op.addr.val = column;
+ }
+ }
+
+ return 0;
+}
+
+static int spinand_program_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int row = nanddev_pos_to_row(nand, &req->pos);
+ struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int spinand_erase_op(struct spinand_device *spinand,
+ const struct nand_pos *pos)
+{
+ struct nand_device *nand = &spinand->base;
+ unsigned int row = nanddev_pos_to_row(nand, pos);
+ struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int spinand_wait(struct spinand_device *spinand, u8 *s)
+{
+ unsigned long start, stop;
+ u8 status;
+ int ret;
+
+ start = get_timer(0);
+ stop = 400;
+ do {
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+ if (!(status & STATUS_BUSY))
+ goto out;
+ } while (get_timer(start) < stop);
+
+ /*
+ * Extra read, just in case the STATUS_READY bit has changed
+ * since our last check
+ */
+ ret = spinand_read_status(spinand, &status);
+ if (ret)
+ return ret;
+
+out:
+ if (s)
+ *s = status;
+
+ return status & STATUS_BUSY ? -ETIMEDOUT : 0;
+}
+
+static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf)
+{
+ struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf,
+ SPINAND_MAX_ID_LEN);
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (!ret)
+ memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
+
+ return ret;
+}
+
+static int spinand_reset_op(struct spinand_device *spinand)
+{
+ struct spi_mem_op op = SPINAND_RESET_OP;
+ int ret;
+
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ return spinand_wait(spinand, NULL);
+}
+
+static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
+{
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
+}
+
+static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ if (spinand->eccinfo.get_status)
+ return spinand->eccinfo.get_status(spinand, status);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * We have no way to know exactly how many bitflips have been
+ * fixed, so let's return the maximum possible value so that
+ * wear-leveling layers move the data immediately.
+ */
+ return nand->eccreq.strength;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req,
+ bool ecc_enabled)
+{
+ u8 status;
+ int ret;
+
+ ret = spinand_load_page_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (ret < 0)
+ return ret;
+
+ ret = spinand_read_from_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ if (!ecc_enabled)
+ return 0;
+
+ return spinand_check_ecc_status(spinand, status);
+}
+
+static int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+{
+ u8 status;
+ int ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_to_cache_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_program_op(spinand, req);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int max_bitflips = 0;
+ struct nand_io_iter iter;
+ bool enable_ecc = false;
+ bool ecc_failed = false;
+ int ret = 0;
+
+ if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
+ enable_ecc = true;
+
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+
+ nanddev_io_for_each_page(nand, from, ops, &iter) {
+ WATCHDOG_RESET();
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_ecc_enable(spinand, enable_ecc);
+ if (ret)
+ break;
+
+ ret = spinand_read_page(spinand, &iter.req, enable_ecc);
+ if (ret < 0 && ret != -EBADMSG)
+ break;
+
+ if (ret == -EBADMSG) {
+ ecc_failed = true;
+ mtd->ecc_stats.failed++;
+ ret = 0;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ }
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+#ifndef __UBOOT__
+ mutex_unlock(&spinand->lock);
+#endif
+ if (ecc_failed && !ret)
+ ret = -EBADMSG;
+
+ return ret ? ret : max_bitflips;
+}
+
+static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_io_iter iter;
+ bool enable_ecc = false;
+ int ret = 0;
+
+ if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
+ enable_ecc = true;
+
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+
+ nanddev_io_for_each_page(nand, to, ops, &iter) {
+ WATCHDOG_RESET();
+ ret = spinand_select_target(spinand, iter.req.pos.target);
+ if (ret)
+ break;
+
+ ret = spinand_ecc_enable(spinand, enable_ecc);
+ if (ret)
+ break;
+
+ ret = spinand_write_page(spinand, &iter.req);
+ if (ret)
+ break;
+
+ ops->retlen += iter.req.datalen;
+ ops->oobretlen += iter.req.ooblen;
+ }
+
+#ifndef __UBOOT__
+ mutex_unlock(&spinand->lock);
+#endif
+
+ return ret;
+}
+
+static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooblen = sizeof(marker),
+ .ooboffs = 0,
+ .oobbuf.in = marker,
+ .mode = MTD_OPS_RAW,
+ };
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_read_page(spinand, &req, false);
+ if (ret)
+ return ret;
+
+ if (marker[0] != 0xff || marker[1] != 0xff)
+ return true;
+
+ return false;
+}
+
+static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+#ifndef __UBOOT__
+ struct spinand_device *spinand = nand_to_spinand(nand);
+#endif
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+ ret = nanddev_isbad(nand, &pos);
+#ifndef __UBOOT__
+ mutex_unlock(&spinand->lock);
+#endif
+ return ret;
+}
+
+static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 marker[2] = { };
+ struct nand_page_io_req req = {
+ .pos = *pos,
+ .ooboffs = 0,
+ .ooblen = sizeof(marker),
+ .oobbuf.out = marker,
+ .mode = MTD_OPS_RAW,
+ };
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ return spinand_write_page(spinand, &req);
+}
+
+static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+#ifndef __UBOOT__
+ struct spinand_device *spinand = nand_to_spinand(nand);
+#endif
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+ ret = nanddev_markbad(nand, &pos);
+#ifndef __UBOOT__
+ mutex_unlock(&spinand->lock);
+#endif
+ return ret;
+}
+
+static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct spinand_device *spinand = nand_to_spinand(nand);
+ u8 status;
+ int ret;
+
+ ret = spinand_select_target(spinand, pos->target);
+ if (ret)
+ return ret;
+
+ ret = spinand_write_enable_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_erase_op(spinand, pos);
+ if (ret)
+ return ret;
+
+ ret = spinand_wait(spinand, &status);
+ if (!ret && (status & STATUS_ERASE_FAILED))
+ ret = -EIO;
+
+ return ret;
+}
+
+static int spinand_mtd_erase(struct mtd_info *mtd,
+ struct erase_info *einfo)
+{
+#ifndef __UBOOT__
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+#endif
+ int ret;
+
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+ ret = nanddev_mtd_erase(mtd, einfo);
+#ifndef __UBOOT__
+ mutex_unlock(&spinand->lock);
+#endif
+
+ return ret;
+}
+
+static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
+{
+#ifndef __UBOOT__
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+#endif
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos;
+ int ret;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+#ifndef __UBOOT__
+ mutex_lock(&spinand->lock);
+#endif
+ ret = nanddev_isreserved(nand, &pos);
+#ifndef __UBOOT__
+ mutex_unlock(&spinand->lock);
+#endif
+
+ return ret;
+}
+
+const struct spi_mem_op *
+spinand_find_supported_op(struct spinand_device *spinand,
+ const struct spi_mem_op *ops,
+ unsigned int nops)
+{
+ unsigned int i;
+
+ for (i = 0; i < nops; i++) {
+ if (spi_mem_supports_op(spinand->slave, &ops[i]))
+ return &ops[i];
+ }
+
+ return NULL;
+}
+
+static const struct nand_ops spinand_ops = {
+ .erase = spinand_erase,
+ .markbad = spinand_markbad,
+ .isbad = spinand_isbad,
+};
+
+static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &gigadevice_spinand_manufacturer,
+ &macronix_spinand_manufacturer,
+ &micron_spinand_manufacturer,
+ &toshiba_spinand_manufacturer,
+ &winbond_spinand_manufacturer,
+};
+
+static int spinand_manufacturer_detect(struct spinand_device *spinand)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
+ ret = spinand_manufacturers[i]->ops->detect(spinand);
+ if (ret > 0) {
+ spinand->manufacturer = spinand_manufacturers[i];
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_manufacturer_init(struct spinand_device *spinand)
+{
+ if (spinand->manufacturer->ops->init)
+ return spinand->manufacturer->ops->init(spinand);
+
+ return 0;
+}
+
+static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
+{
+ /* Release manufacturer private data */
+ if (spinand->manufacturer->ops->cleanup)
+ return spinand->manufacturer->ops->cleanup(spinand);
+}
+
+static const struct spi_mem_op *
+spinand_select_op_variant(struct spinand_device *spinand,
+ const struct spinand_op_variants *variants)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < variants->nops; i++) {
+ struct spi_mem_op op = variants->ops[i];
+ unsigned int nbytes;
+ int ret;
+
+ nbytes = nanddev_per_page_oobsize(nand) +
+ nanddev_page_size(nand);
+
+ while (nbytes) {
+ op.data.nbytes = nbytes;
+ ret = spi_mem_adjust_op_size(spinand->slave, &op);
+ if (ret)
+ break;
+
+ if (!spi_mem_supports_op(spinand->slave, &op))
+ break;
+
+ nbytes -= op.data.nbytes;
+ }
+
+ if (!nbytes)
+ return &variants->ops[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * spinand_match_and_init() - Try to find a match between a device ID and an
+ * entry in a spinand_info table
+ * @spinand: SPI NAND object
+ * @table: SPI NAND device description table
+ * @table_size: size of the device description table
+ *
+ * Should be used by SPI NAND manufacturer drivers when they want to find a
+ * match between a device ID retrieved through the READ_ID command and an
+ * entry in the SPI NAND description table. If a match is found, the spinand
+ * object will be initialized with information provided by the matching
+ * spinand_info entry.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_match_and_init(struct spinand_device *spinand,
+ const struct spinand_info *table,
+ unsigned int table_size, u8 devid)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ for (i = 0; i < table_size; i++) {
+ const struct spinand_info *info = &table[i];
+ const struct spi_mem_op *op;
+
+ if (devid != info->devid)
+ continue;
+
+ nand->memorg = table[i].memorg;
+ nand->eccreq = table[i].eccreq;
+ spinand->eccinfo = table[i].eccinfo;
+ spinand->flags = table[i].flags;
+ spinand->select_target = table[i].select_target;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.read_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.read_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.write_cache);
+ if (!op)
+ return -ENOTSUPP;
+
+ spinand->op_templates.write_cache = op;
+
+ op = spinand_select_op_variant(spinand,
+ info->op_variants.update_cache);
+ spinand->op_templates.update_cache = op;
+
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int spinand_detect(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_read_id_op(spinand, spinand->id.data);
+ if (ret)
+ return ret;
+
+ spinand->id.len = SPINAND_MAX_ID_LEN;
+
+ ret = spinand_manufacturer_detect(spinand);
+ if (ret) {
+ dev_err(spinand->slave->dev, "unknown raw ID %*phN\n",
+ SPINAND_MAX_ID_LEN, spinand->id.data);
+ return ret;
+ }
+
+ if (nand->memorg.ntargets > 1 && !spinand->select_target) {
+ dev_err(spinand->slave->dev,
+ "SPI NANDs with more than one die must implement ->select_target()\n");
+ return -EINVAL;
+ }
+
+ dev_info(spinand->slave->dev,
+ "%s SPI NAND was found.\n", spinand->manufacturer->name);
+ dev_info(spinand->slave->dev,
+ "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
+ nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
+ nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
+
+ return 0;
+}
+
+static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = 62;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
+ .ecc = spinand_noecc_ooblayout_ecc,
+ .rfree = spinand_noecc_ooblayout_free,
+};
+
+static int spinand_init(struct spinand_device *spinand)
+{
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ int ret, i;
+
+ /*
+ * We need a scratch buffer because the spi_mem interface requires that
+ * buf passed in spi_mem_op->data.buf be DMA-able.
+ */
+ spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
+ if (!spinand->scratchbuf)
+ return -ENOMEM;
+
+ ret = spinand_detect(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ /*
+ * Use kzalloc() instead of devm_kzalloc() here, because some drivers
+ * may use this buffer for DMA access.
+ * Memory allocated by devm_ does not guarantee DMA-safe alignment.
+ */
+ spinand->databuf = kzalloc(nanddev_page_size(nand) +
+ nanddev_per_page_oobsize(nand),
+ GFP_KERNEL);
+ if (!spinand->databuf) {
+ ret = -ENOMEM;
+ goto err_free_bufs;
+ }
+
+ spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
+
+ ret = spinand_init_cfg_cache(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_init_quad_enable(spinand);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_manufacturer_init(spinand);
+ if (ret) {
+ dev_err(spinand->slave->dev,
+ "Failed to initialize the SPI NAND chip (err = %d)\n",
+ ret);
+ goto err_free_bufs;
+ }
+
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+ if (ret)
+ goto err_free_bufs;
+
+ ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
+ if (ret)
+ goto err_free_bufs;
+ }
+
+ ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
+ if (ret)
+ goto err_manuf_cleanup;
+
+ /*
+ * Right now, we don't support ECC, so let the whole oob
+ * area is available for user.
+ */
+ mtd->_read_oob = spinand_mtd_read;
+ mtd->_write_oob = spinand_mtd_write;
+ mtd->_block_isbad = spinand_mtd_block_isbad;
+ mtd->_block_markbad = spinand_mtd_block_markbad;
+ mtd->_block_isreserved = spinand_mtd_block_isreserved;
+ mtd->_erase = spinand_mtd_erase;
+
+ if (spinand->eccinfo.ooblayout)
+ mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
+ else
+ mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
+
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ goto err_cleanup_nanddev;
+
+ mtd->oobavail = ret;
+
+ return 0;
+
+err_cleanup_nanddev:
+ nanddev_cleanup(nand);
+
+err_manuf_cleanup:
+ spinand_manufacturer_cleanup(spinand);
+
+err_free_bufs:
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+ return ret;
+}
+
+static void spinand_cleanup(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ nanddev_cleanup(nand);
+ spinand_manufacturer_cleanup(spinand);
+ kfree(spinand->databuf);
+ kfree(spinand->scratchbuf);
+}
+
+static int spinand_probe(struct udevice *dev)
+{
+ struct spinand_device *spinand = dev_get_priv(dev);
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+ struct mtd_info *mtd = dev_get_uclass_priv(dev);
+ struct nand_device *nand = spinand_to_nand(spinand);
+ int ret;
+
+#ifndef __UBOOT__
+ spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
+ GFP_KERNEL);
+ if (!spinand)
+ return -ENOMEM;
+
+ spinand->spimem = mem;
+ spi_mem_set_drvdata(mem, spinand);
+ spinand_set_of_node(spinand, mem->spi->dev.of_node);
+ mutex_init(&spinand->lock);
+
+ mtd = spinand_to_mtd(spinand);
+ mtd->dev.parent = &mem->spi->dev;
+#else
+ nand->mtd = mtd;
+ mtd->priv = nand;
+ mtd->dev = dev;
+ mtd->name = malloc(20);
+ if (!mtd->name)
+ return -ENOMEM;
+ sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
+ spinand->slave = slave;
+ spinand_set_ofnode(spinand, dev_ofnode(dev));
+#endif
+
+ ret = spinand_init(spinand);
+ if (ret)
+ return ret;
+
+#ifndef __UBOOT__
+ ret = mtd_device_register(mtd, NULL, 0);
+#else
+ ret = add_mtd_device(mtd);
+#endif
+ if (ret)
+ goto err_spinand_cleanup;
+
+ return 0;
+
+err_spinand_cleanup:
+ spinand_cleanup(spinand);
+
+ return ret;
+}
+
+#ifndef __UBOOT__
+static int spinand_remove(struct udevice *slave)
+{
+ struct spinand_device *spinand;
+ struct mtd_info *mtd;
+ int ret;
+
+ spinand = spi_mem_get_drvdata(slave);
+ mtd = spinand_to_mtd(spinand);
+ free(mtd->name);
+
+ ret = mtd_device_unregister(mtd);
+ if (ret)
+ return ret;
+
+ spinand_cleanup(spinand);
+
+ return 0;
+}
+
+static const struct spi_device_id spinand_ids[] = {
+ { .name = "spi-nand" },
+ { /* sentinel */ },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id spinand_of_ids[] = {
+ { .compatible = "spi-nand" },
+ { /* sentinel */ },
+};
+#endif
+
+static struct spi_mem_driver spinand_drv = {
+ .spidrv = {
+ .id_table = spinand_ids,
+ .driver = {
+ .name = "spi-nand",
+ .of_match_table = of_match_ptr(spinand_of_ids),
+ },
+ },
+ .probe = spinand_probe,
+ .remove = spinand_remove,
+};
+module_spi_mem_driver(spinand_drv);
+
+MODULE_DESCRIPTION("SPI NAND framework");
+MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
+MODULE_LICENSE("GPL v2");
+#endif /* __UBOOT__ */
+
+static const struct udevice_id spinand_ids[] = {
+ { .compatible = "spi-nand" },
+ { /* sentinel */ },
+};
+
+U_BOOT_DRIVER(spinand) = {
+ .name = "spi_nand",
+ .id = UCLASS_MTD,
+ .of_match = spinand_ids,
+ .priv_auto = sizeof(struct spinand_device),
+ .probe = spinand_probe,
+};
diff --git a/roms/u-boot/drivers/mtd/nand/spi/gigadevice.c b/roms/u-boot/drivers/mtd/nand/spi/gigadevice.c
new file mode 100644
index 000000000..a2c93486f
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/gigadevice.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Stefan Roese <sr@denx.de>
+ *
+ * Derived from drivers/mtd/nand/spi/micron.c
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ */
+
+#ifndef __UBOOT__
+#include <malloc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_GIGADEVICE 0xC8
+#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
+#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
+
+#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
+#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
+
+#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
+
+/* Q4 devices, QUADIO: Dummy bytes valid for 1 and 2 GBit variants */
+static SPINAND_OP_VARIANTS(gd5fxgq4_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+/* Q5 devices, QUADIO: Dummy bytes only valid for 1 GBit variants */
+static SPINAND_OP_VARIANTS(gd5f1gq5_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int gd5fxgqxxexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 64;
+ region->length = 64;
+
+ return 0;
+}
+
+static int gd5fxgqxxexxg_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 1 bytes for the BBM. */
+ region->offset = 1;
+ region->length = 63;
+
+ return 0;
+}
+
+static int gd5fxgq4xexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 4 ... 7 bits are flipped (1..4 can't be detected, so
+ * report the maximum of 4 in this case
+ */
+ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
+ return ((status & STATUS_ECC_MASK) >> 2) |
+ ((status2 & STATUS_ECC_MASK) >> 4);
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ u8 status2;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
+ &status2);
+ int ret;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
+ /*
+ * Read status2 register to determine a more fine grained
+ * bit error status
+ */
+ ret = spi_mem_exec_op(spinand->slave, &op);
+ if (ret)
+ return ret;
+
+ /*
+ * 1 ... 4 bits are flipped (and corrected)
+ */
+ /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
+ return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgqxxexxg_ooblayout = {
+ .ecc = gd5fxgqxxexxg_ooblayout_ecc,
+ .rfree = gd5fxgqxxexxg_ooblayout_free,
+};
+
+static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&gd5fxgq4_read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
+ gd5fxgq4xexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5UExxG", 0x51,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
+ gd5fxgq5xexxg_ecc_get_status)),
+};
+
+static int gigadevice_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * For GD NANDs, There is an address byte needed to shift in before IDs
+ * are read out, so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_GIGADEVICE)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
+ ARRAY_SIZE(gigadevice_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+ .detect = gigadevice_spinand_detect,
+};
+
+const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
+ .id = SPINAND_MFR_GIGADEVICE,
+ .name = "GigaDevice",
+ .ops = &gigadevice_spinand_manuf_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/nand/spi/macronix.c b/roms/u-boot/drivers/mtd/nand/spi/macronix.c
new file mode 100644
index 000000000..215f09acc
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/macronix.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Macronix
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef __UBOOT__
+#include <malloc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/bug.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MACRONIX 0xC2
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ return -ERANGE;
+}
+
+static int mx35lfxge4ab_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mx35lfxge4ab_ooblayout = {
+ .ecc = mx35lfxge4ab_ooblayout_ecc,
+ .rfree = mx35lfxge4ab_ooblayout_free,
+};
+
+static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x7c, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_DUMMY(1, 1),
+ SPI_MEM_OP_DATA_IN(1, eccsr, 1));
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 eccsr;
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr))
+ return nand->eccreq.strength;
+
+ if (WARN_ON(eccsr > nand->eccreq.strength || !eccsr))
+ return nand->eccreq.strength;
+
+ return eccsr;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info macronix_spinand_table[] = {
+ SPINAND_INFO("MX35LF1GE4AB", 0x12,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
+ mx35lf1ge4ab_ecc_get_status)),
+ SPINAND_INFO("MX35LF2GE4AB", 0x22,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+};
+
+static int macronix_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Macronix SPI NAND read ID needs a dummy byte, so the first byte in
+ * raw_id is garbage.
+ */
+ if (id[1] != SPINAND_MFR_MACRONIX)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, macronix_spinand_table,
+ ARRAY_SIZE(macronix_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = {
+ .detect = macronix_spinand_detect,
+};
+
+const struct spinand_manufacturer macronix_spinand_manufacturer = {
+ .id = SPINAND_MFR_MACRONIX,
+ .name = "Macronix",
+ .ops = &macronix_spinand_manuf_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/nand/spi/micron.c b/roms/u-boot/drivers/mtd/nand/spi/micron.c
new file mode 100644
index 000000000..6bacf14aa
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/micron.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+
+#ifndef __UBOOT__
+#include <malloc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/bitops.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_MICRON 0x2c
+
+#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
+#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
+#define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+
+#define MICRON_CFG_CR BIT(0)
+
+/*
+ * As per datasheet, die selection is done by the 6th bit of Die
+ * Select Register (Address 0xD0).
+ */
+#define MICRON_DIE_SELECT_REG 0xD0
+
+#define MICRON_SELECT_DIE(x) ((x) << 6)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
+
+ return 0;
+}
+
+static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = (mtd->oobsize / 2) - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_8_ooblayout = {
+ .ecc = micron_8_ooblayout_ecc,
+ .rfree = micron_8_ooblayout_free,
+};
+
+static int micron_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG,
+ spinand->scratchbuf);
+
+ if (target > 1)
+ return -EINVAL;
+
+ *spinand->scratchbuf = MICRON_SELECT_DIE(target);
+
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static int micron_8_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & MICRON_STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case MICRON_STATUS_ECC_1TO3_BITFLIPS:
+ return 3;
+
+ case MICRON_STATUS_ECC_4TO6_BITFLIPS:
+ return 6;
+
+ case MICRON_STATUS_ECC_7TO8_BITFLIPS:
+ return 8;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info micron_spinand_table[] = {
+ /* M79A 2Gb 3.3V */
+ SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 2Gb 1.8V */
+ SPINAND_INFO("MT29F2G01ABBGD", 0x25,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 3.3V */
+ SPINAND_INFO("MT29F1G01ABAFD", 0x14,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M78A 1Gb 1.8V */
+ SPINAND_INFO("MT29F1G01ABAFD", 0x15,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M79A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ADAGD", 0x36,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 2, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 4Gb 3.3V */
+ SPINAND_INFO("MT29F4G01ABAFD", 0x34,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 4Gb 1.8V */
+ SPINAND_INFO("MT29F4G01ABBFD", 0x35,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status)),
+ /* M70A 8Gb 3.3V */
+ SPINAND_INFO("MT29F8G01ADAFD", 0x46,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+ /* M70A 8Gb 1.8V */
+ SPINAND_INFO("MT29F8G01ADBFD", 0x47,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 2),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_CR_FEAT_BIT,
+ SPINAND_ECCINFO(&micron_8_ooblayout,
+ micron_8_ecc_get_status),
+ SPINAND_SELECT_TARGET(micron_select_target)),
+};
+
+static int micron_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Micron SPI NAND read ID need a dummy byte,
+ * so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_MICRON)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, micron_spinand_table,
+ ARRAY_SIZE(micron_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static int micron_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * M70A device series enable Continuous Read feature at Power-up,
+ * which is not supported. Disable this bit to avoid any possible
+ * failure.
+ */
+ if (spinand->flags & SPINAND_HAS_CR_FEAT_BIT)
+ return spinand_upd_cfg(spinand, MICRON_CFG_CR, 0);
+
+ return 0;
+}
+
+static const struct spinand_manufacturer_ops micron_spinand_manuf_ops = {
+ .detect = micron_spinand_detect,
+ .init = micron_spinand_init,
+};
+
+const struct spinand_manufacturer micron_spinand_manufacturer = {
+ .id = SPINAND_MFR_MICRON,
+ .name = "Micron",
+ .ops = &micron_spinand_manuf_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/nand/spi/toshiba.c b/roms/u-boot/drivers/mtd/nand/spi/toshiba.c
new file mode 100644
index 000000000..c2cd3b426
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/toshiba.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 exceet electronics GmbH
+ * Copyright (c) 2018 Kontron Electronics GmbH
+ *
+ * Author: Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#ifndef __UBOOT__
+#include <malloc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/bug.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_TOSHIBA 0x98
+#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_x4_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+/**
+ * Backward compatibility for 1st generation Serial NAND devices
+ * which don't support Quad Program Load operation.
+ */
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 0)
+ return -ERANGE;
+
+ region->offset = mtd->oobsize / 2;
+ region->length = mtd->oobsize / 2;
+
+ return 0;
+}
+
+static int tx58cxgxsxraix_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 0)
+ return -ERANGE;
+
+ /* 2 bytes reserved for BBM */
+ region->offset = 2;
+ region->length = (mtd->oobsize / 2) - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tx58cxgxsxraix_ooblayout = {
+ .ecc = tx58cxgxsxraix_ooblayout_ecc,
+ .rfree = tx58cxgxsxraix_ooblayout_free,
+};
+
+static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 mbf = 0;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ case TOSH_STATUS_ECC_HAS_BITFLIPS_T:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (spi_mem_exec_op(spinand->slave, &op))
+ return nand->eccreq.strength;
+
+ mbf >>= 4;
+
+ if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
+ return nand->eccreq.strength;
+
+ return mbf;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info toshiba_spinand_table[] = {
+ /* 3.3V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIG", 0xC2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIG", 0xCB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIG", 0xCD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIG", 0xB2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (1st generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIG", 0xBB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIG", 0xBD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+
+ /*
+ * 2nd generation serial nand has HOLD_D which is equivalent to
+ * QE_BIT.
+ */
+ /* 3.3V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG0S3HRAIJ", 0xE2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG1S3HRAIJ", 0xEB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CVG2S0HRAIJ", 0xED,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 3.3V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CVG3S0HRAIJ", 0xE4,
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG0S3HRAIJ", 0xD2,
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 2Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG1S3HRAIJ", 0xDB,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (2nd generation) */
+ SPINAND_INFO("TC58CYG2S0HRAIJ", 0xDD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (2nd generation) */
+ SPINAND_INFO("TH58CYG3S0HRAIJ", 0xD4,
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+};
+
+static int toshiba_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Toshiba SPI NAND read ID needs a dummy byte,
+ * so the first byte in id is garbage.
+ */
+ if (id[1] != SPINAND_MFR_TOSHIBA)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, toshiba_spinand_table,
+ ARRAY_SIZE(toshiba_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
+ .detect = toshiba_spinand_detect,
+};
+
+const struct spinand_manufacturer toshiba_spinand_manufacturer = {
+ .id = SPINAND_MFR_TOSHIBA,
+ .name = "Toshiba",
+ .ops = &toshiba_spinand_manuf_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/nand/spi/winbond.c b/roms/u-boot/drivers/mtd/nand/spi/winbond.c
new file mode 100644
index 000000000..c119486ef
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/nand/spi/winbond.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 exceet electronics GmbH
+ *
+ * Authors:
+ * Frieder Schrempf <frieder.schrempf@exceet.de>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef __UBOOT__
+#include <malloc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#endif
+#include <linux/bitops.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_WINBOND 0xEF
+
+#define WINBOND_CFG_BUF_READ BIT(3)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int w25m02gv_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 2;
+ region->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops w25m02gv_ooblayout = {
+ .ecc = w25m02gv_ooblayout_ecc,
+ .rfree = w25m02gv_ooblayout_free,
+};
+
+static int w25m02gv_select_target(struct spinand_device *spinand,
+ unsigned int target)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0xc2, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(1,
+ spinand->scratchbuf,
+ 1));
+
+ *spinand->scratchbuf = target;
+ return spi_mem_exec_op(spinand->slave, &op);
+}
+
+static const struct spinand_info winbond_spinand_table[] = {
+ SPINAND_INFO("W25M02GV", 0xAB,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 2),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
+ SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+ SPINAND_INFO("W25N01GV", 0xAA,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
+};
+
+/**
+ * winbond_spinand_detect - initialize device related part in spinand_device
+ * struct if it is a Winbond device.
+ * @spinand: SPI NAND device structure
+ */
+static int winbond_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Winbond SPI NAND read ID need a dummy byte,
+ * so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_WINBOND)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, winbond_spinand_table,
+ ARRAY_SIZE(winbond_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static int winbond_spinand_init(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ unsigned int i;
+
+ /*
+ * Make sure all dies are in buffer read mode and not continuous read
+ * mode.
+ */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ spinand_select_target(spinand, i);
+ spinand_upd_cfg(spinand, WINBOND_CFG_BUF_READ,
+ WINBOND_CFG_BUF_READ);
+ }
+
+ return 0;
+}
+
+static const struct spinand_manufacturer_ops winbond_spinand_manuf_ops = {
+ .detect = winbond_spinand_detect,
+ .init = winbond_spinand_init,
+};
+
+const struct spinand_manufacturer winbond_spinand_manufacturer = {
+ .id = SPINAND_MFR_WINBOND,
+ .name = "Winbond",
+ .ops = &winbond_spinand_manuf_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/onenand/Makefile b/roms/u-boot/drivers/mtd/onenand/Makefile
new file mode 100644
index 000000000..4dc417a57
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/onenand/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (C) 2005-2007 Samsung Electronics.
+# Kyungmin Park <kyungmin.park@samsung.com>
+
+ifndef CONFIG_SPL_BUILD
+obj-$(CONFIG_CMD_ONENAND) := onenand_uboot.o onenand_base.o onenand_bbt.o
+obj-$(CONFIG_SAMSUNG_ONENAND) += samsung.o
+else
+obj-y := onenand_spl.o
+endif
diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_base.c b/roms/u-boot/drivers/mtd/onenand/onenand_base.c
new file mode 100644
index 000000000..09daa0dd3
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/onenand/onenand_base.c
@@ -0,0 +1,2802 @@
+/*
+ * linux/drivers/mtd/onenand/onenand_base.c
+ *
+ * Copyright (C) 2005-2007 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Credits:
+ * Adrian Hunter <ext-adrian.hunter@nokia.com>:
+ * auto-placement support, read-while load support, various fixes
+ * Copyright (C) Nokia Corporation, 2007
+ *
+ * Rohit Hagargundgi <h.rohit at samsung.com>,
+ * Amul Kumar Saha <amul.saha@samsung.com>:
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <log.h>
+#include <watchdog.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <linux/mtd/mtd.h>
+#include "linux/mtd/flashchip.h"
+#include <linux/mtd/onenand.h>
+
+#include <asm/io.h>
+#include <linux/errno.h>
+#include <malloc.h>
+
+/* It should access 16-bit instead of 8-bit */
+static void *memcpy_16(void *dst, const void *src, unsigned int len)
+{
+ void *ret = dst;
+ short *d = dst;
+ const short *s = src;
+
+ len >>= 1;
+ while (len-- > 0)
+ *d++ = *s++;
+ return ret;
+}
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
+/**
+ * onenand_oob_64 - oob info for large (2KB) page
+ */
+static struct nand_ecclayout onenand_oob_64 = {
+ .eccbytes = 20,
+ .eccpos = {
+ 8, 9, 10, 11, 12,
+ 24, 25, 26, 27, 28,
+ 40, 41, 42, 43, 44,
+ 56, 57, 58, 59, 60,
+ },
+ .oobfree = {
+ {2, 3}, {14, 2}, {18, 3}, {30, 2},
+ {34, 3}, {46, 2}, {50, 3}, {62, 2}
+ }
+};
+
+/**
+ * onenand_oob_32 - oob info for middle (1KB) page
+ */
+static struct nand_ecclayout onenand_oob_32 = {
+ .eccbytes = 10,
+ .eccpos = {
+ 8, 9, 10, 11, 12,
+ 24, 25, 26, 27, 28,
+ },
+ .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} }
+};
+
+/*
+ * Warning! This array is used with the memcpy_16() function, thus
+ * it must be aligned to 2 bytes. GCC can make this array unaligned
+ * as the array is made of unsigned char, which memcpy16() doesn't
+ * like and will cause unaligned access.
+ */
+static const unsigned char __aligned(2) ffchars[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
+};
+
+/**
+ * onenand_readw - [OneNAND Interface] Read OneNAND register
+ * @param addr address to read
+ *
+ * Read OneNAND register
+ */
+static unsigned short onenand_readw(void __iomem * addr)
+{
+ return readw(addr);
+}
+
+/**
+ * onenand_writew - [OneNAND Interface] Write OneNAND register with value
+ * @param value value to write
+ * @param addr address to write
+ *
+ * Write OneNAND register with value
+ */
+static void onenand_writew(unsigned short value, void __iomem * addr)
+{
+ writew(value, addr);
+}
+
+/**
+ * onenand_block_address - [DEFAULT] Get block address
+ * @param device the device id
+ * @param block the block
+ * @return translated block address if DDP, otherwise same
+ *
+ * Setup Start Address 1 Register (F100h)
+ */
+static int onenand_block_address(struct onenand_chip *this, int block)
+{
+ /* Device Flash Core select, NAND Flash Block Address */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1 | (block ^ this->density_mask);
+
+ return block;
+}
+
+/**
+ * onenand_bufferram_address - [DEFAULT] Get bufferram address
+ * @param device the device id
+ * @param block the block
+ * @return set DBS value if DDP, otherwise 0
+ *
+ * Setup Start Address 2 Register (F101h) for DDP
+ */
+static int onenand_bufferram_address(struct onenand_chip *this, int block)
+{
+ /* Device BufferRAM Select */
+ if (block & this->density_mask)
+ return ONENAND_DDP_CHIP1;
+
+ return ONENAND_DDP_CHIP0;
+}
+
+/**
+ * onenand_page_address - [DEFAULT] Get page address
+ * @param page the page address
+ * @param sector the sector address
+ * @return combined page and sector address
+ *
+ * Setup Start Address 8 Register (F107h)
+ */
+static int onenand_page_address(int page, int sector)
+{
+ /* Flash Page Address, Flash Sector Address */
+ int fpa, fsa;
+
+ fpa = page & ONENAND_FPA_MASK;
+ fsa = sector & ONENAND_FSA_MASK;
+
+ return ((fpa << ONENAND_FPA_SHIFT) | fsa);
+}
+
+/**
+ * onenand_buffer_address - [DEFAULT] Get buffer address
+ * @param dataram1 DataRAM index
+ * @param sectors the sector address
+ * @param count the number of sectors
+ * @return the start buffer value
+ *
+ * Setup Start Buffer Register (F200h)
+ */
+static int onenand_buffer_address(int dataram1, int sectors, int count)
+{
+ int bsa, bsc;
+
+ /* BufferRAM Sector Address */
+ bsa = sectors & ONENAND_BSA_MASK;
+
+ if (dataram1)
+ bsa |= ONENAND_BSA_DATARAM1; /* DataRAM1 */
+ else
+ bsa |= ONENAND_BSA_DATARAM0; /* DataRAM0 */
+
+ /* BufferRAM Sector Count */
+ bsc = count & ONENAND_BSC_MASK;
+
+ return ((bsa << ONENAND_BSA_SHIFT) | bsc);
+}
+
+/**
+ * flexonenand_block - Return block number for flash address
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned int flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned int boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+unsigned int onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t) block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t) (block - boundary - 1)
+ << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t) block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+
+/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+
+/**
+ * onenand_get_density - [DEFAULT] Get OneNAND density
+ * @param dev_id OneNAND device ID
+ *
+ * Get OneNAND density from device ID
+ */
+static inline int onenand_get_density(int dev_id)
+{
+ int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ return (density & ONENAND_DEVICE_DENSITY_MASK);
+}
+
+/**
+ * onenand_command - [DEFAULT] Send command to OneNAND device
+ * @param mtd MTD device structure
+ * @param cmd the command to be sent
+ * @param addr offset to read from or write to
+ * @param len number of bytes to read or write
+ *
+ * Send command to OneNAND device. This function is used for middle/large page
+ * devices (1KB/2KB Bytes per page)
+ */
+static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr,
+ size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int value;
+ int block, page;
+
+ /* Now we use page size operation */
+ int sectors = 0, count = 0;
+
+ /* Address translation */
+ switch (cmd) {
+ case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_LOCK:
+ case ONENAND_CMD_LOCK_TIGHT:
+ case ONENAND_CMD_UNLOCK_ALL:
+ block = -1;
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
+ case ONENAND_CMD_ERASE:
+ case ONENAND_CMD_BUFFERRAM:
+ block = onenand_block(this, addr);
+ page = -1;
+ break;
+
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
+ default:
+ block = onenand_block(this, addr);
+ page = (int) (addr
+ - onenand_addr(this, block)) >> this->page_shift;
+ page &= this->page_mask;
+ break;
+ }
+
+ /* NOTE: The setting order of the registers is very important! */
+ if (cmd == ONENAND_CMD_BUFFERRAM) {
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS2);
+
+ if (ONENAND_IS_4KB_PAGE(this))
+ ONENAND_SET_BUFFERRAM0(this);
+ else
+ /* Switch to the next data buffer */
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ return 0;
+ }
+
+ if (block != -1) {
+ /* Write 'DFS, FBA' of Flash */
+ value = onenand_block_address(this, block);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS1);
+
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ if (page != -1) {
+ int dataram;
+
+ switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ if (ONENAND_IS_4KB_PAGE(this))
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+
+ break;
+
+ default:
+ dataram = ONENAND_CURRENT_BUFFERRAM(this);
+ break;
+ }
+
+ /* Write 'FPA, FSA' of Flash */
+ value = onenand_page_address(page, sectors);
+ this->write_word(value,
+ this->base + ONENAND_REG_START_ADDRESS8);
+
+ /* Write 'BSA, BSC' of DataRAM */
+ value = onenand_buffer_address(dataram, sectors, count);
+ this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
+ }
+
+ /* Interrupt clear */
+ this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT);
+ /* Write command */
+ this->write_word(cmd, this->base + ONENAND_REG_COMMAND);
+
+ return 0;
+}
+
+/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base
+ + ((ONENAND_REG_ECC_STATUS + i) << 1));
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done. This applies to all OneNAND command
+ * Read can take up to 30us, erase up to 2ms and program up to 350us
+ * according to general OneNAND specs
+ */
+static int onenand_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int interrupt = 0;
+ unsigned int ctrl;
+
+ /* Wait at most 20ms ... */
+ u32 timeo = (CONFIG_SYS_HZ * 20) / 1000;
+ u32 time_start = get_timer(0);
+ do {
+ WATCHDOG_RESET();
+ if (get_timer(time_start) > timeo)
+ return -EIO;
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ } while ((interrupt & ONENAND_INT_MASTER) == 0);
+
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+
+ if (interrupt & ONENAND_INT_READ) {
+ int ecc = onenand_read_ecc(this);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk("onenand_wait: ECC error = 0x%04x\n", ecc);
+ return -EBADMSG;
+ }
+ }
+
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk("onenand_wait: controller error = 0x%04x\n", ctrl);
+ if (ctrl & ONENAND_CTRL_LOCK)
+ printk("onenand_wait: it's locked error = 0x%04x\n",
+ ctrl);
+
+ return -EIO;
+ }
+
+
+ return 0;
+}
+
+/**
+ * onenand_bufferram_offset - [DEFAULT] BufferRAM offset
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @return offset given area
+ *
+ * Return BufferRAM offset given area
+ */
+static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+
+ if (ONENAND_CURRENT_BUFFERRAM(this)) {
+ if (area == ONENAND_DATARAM)
+ return mtd->writesize;
+ if (area == ONENAND_SPARERAM)
+ return mtd->oobsize;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Read the BufferRAM area
+ */
+static int onenand_read_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ memcpy_16(buffer, bufferram + offset, count);
+
+ return 0;
+}
+
+/**
+ * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Read the BufferRAM area with Sync. Burst Mode
+ */
+static int onenand_sync_read_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ);
+
+ memcpy_16(buffer, bufferram + offset, count);
+
+ this->mmcontrol(mtd, 0);
+
+ return 0;
+}
+
+/**
+ * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area
+ * @param mtd MTD data structure
+ * @param area BufferRAM area
+ * @param buffer the databuffer to put/get data
+ * @param offset offset to read from or write to
+ * @param count number of bytes to read/write
+ *
+ * Write the BufferRAM area
+ */
+static int onenand_write_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *bufferram;
+
+ bufferram = this->base + area;
+ bufferram += onenand_bufferram_offset(mtd, area);
+
+ memcpy_16(bufferram + offset, buffer, count);
+
+ return 0;
+}
+
+/**
+ * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode
+ * @param mtd MTD data structure
+ * @param addr address to check
+ * @return blockpage address
+ *
+ * Get blockpage address at 2x program mode
+ */
+static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, block, page;
+
+ /* Calculate the even block number */
+ block = (int) (addr >> this->erase_shift) & ~1;
+ /* Is it the odd plane? */
+ if (addr & this->writesize)
+ block++;
+ page = (int) (addr >> (this->page_shift + 1)) & this->page_mask;
+ blockpage = (block << 7) | page;
+
+ return blockpage;
+}
+
+/**
+ * onenand_check_bufferram - [GENERIC] Check BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr address to check
+ * @return 1 if there are valid data, otherwise 0
+ *
+ * Check bufferram if there is data we required
+ */
+static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage, found = 0;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int) (addr >> this->page_shift);
+
+ /* Is there valid data? */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ found = 1;
+ else {
+ /* Check another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage) {
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ found = 1;
+ }
+ }
+
+ if (found && ONENAND_IS_DDP(this)) {
+ /* Select DataRAM for DDP */
+ int block = onenand_block(this, addr);
+ int value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ }
+
+ return found;
+}
+
+/**
+ * onenand_update_bufferram - [GENERIC] Update BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr address to update
+ * @param valid valid flag
+ *
+ * Update BufferRAM information
+ */
+static int onenand_update_bufferram(struct mtd_info *mtd, loff_t addr,
+ int valid)
+{
+ struct onenand_chip *this = mtd->priv;
+ int blockpage;
+ unsigned int i;
+
+ if (ONENAND_IS_2PLANE(this))
+ blockpage = onenand_get_2x_blockpage(mtd, addr);
+ else
+ blockpage = (int)(addr >> this->page_shift);
+
+ /* Invalidate another BufferRAM */
+ i = ONENAND_NEXT_BUFFERRAM(this);
+ if (this->bufferram[i].blockpage == blockpage)
+ this->bufferram[i].blockpage = -1;
+
+ /* Update BufferRAM */
+ i = ONENAND_CURRENT_BUFFERRAM(this);
+ if (valid)
+ this->bufferram[i].blockpage = blockpage;
+ else
+ this->bufferram[i].blockpage = -1;
+
+ return 0;
+}
+
+/**
+ * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information
+ * @param mtd MTD data structure
+ * @param addr start address to invalidate
+ * @param len length to invalidate
+ *
+ * Invalidate BufferRAM information
+ */
+static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr,
+ unsigned int len)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+ loff_t end_addr = addr + len;
+
+ /* Invalidate BufferRAM */
+ for (i = 0; i < MAX_BUFFERRAM; i++) {
+ loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift;
+
+ if (buf_addr >= addr && buf_addr < end_addr)
+ this->bufferram[i].blockpage = -1;
+ }
+}
+
+/**
+ * onenand_get_device - [GENERIC] Get chip for selected access
+ * @param mtd MTD device structure
+ * @param new_state the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static void onenand_get_device(struct mtd_info *mtd, int new_state)
+{
+ /* Do nothing */
+}
+
+/**
+ * onenand_release_device - [GENERIC] release chip
+ * @param mtd MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void onenand_release_device(struct mtd_info *mtd)
+{
+ /* Do nothing */
+}
+
+/**
+ * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
+ * @param mtd MTD device structure
+ * @param buf destination address
+ * @param column oob offset to read from
+ * @param thislen oob length to read
+ */
+static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf,
+ int column, int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct nand_oobfree *free;
+ int readcol = column;
+ int readend = column + thislen;
+ int lastgap = 0;
+ unsigned int i;
+ uint8_t *oob_buf = this->oob_buf;
+
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length;
+ i++, free++) {
+ if (readcol >= lastgap)
+ readcol += free->offset - lastgap;
+ if (readend >= lastgap)
+ readend += free->offset - lastgap;
+ lastgap = free->offset + free->length;
+ }
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length;
+ i++, free++) {
+ int free_end = free->offset + free->length;
+ if (free->offset < readend && free_end > readcol) {
+ int st = max_t(int,free->offset,readcol);
+ int ed = min_t(int,free_end,readend);
+ int n = ed - st;
+ memcpy(buf, oob_buf + st, n);
+ buf += n;
+ } else if (column == 0)
+ break;
+ }
+ return 0;
+}
+
+/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ printk("onenand_recover_lsb:"
+ "Attempting to recover from uncorrectable read\n");
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read main and/or out-of-band data
+ */
+static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0, boundary = 0;
+ int writesize = this->writesize;
+
+ pr_debug("onenand_read_ops_nolock: from = 0x%08x, len = %i\n",
+ (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ /* Read-while-load method */
+ /* Note: We can't use this feature in MLC */
+
+ /* Do first load to bufferRAM */
+ if (read < len) {
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->main_buf = buf;
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+ }
+
+ thislen = min_t(int, writesize, len - read);
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ while (!ret) {
+ /* If there is more to load then start next load */
+ from += thislen;
+ if (!ONENAND_IS_4KB_PAGE(this) && read + thislen < len) {
+ this->main_buf = buf + thislen;
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ /*
+ * Chip boundary handling in DDP
+ * Now we issued chip 1 read and pointed chip 1
+ * bufferam so we have to point chip 0 bufferam.
+ */
+ if (ONENAND_IS_DDP(this) &&
+ unlikely(from == (this->chipsize >> 1))) {
+ this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2);
+ boundary = 1;
+ } else
+ boundary = 0;
+ ONENAND_SET_PREV_BUFFERRAM(this);
+ }
+
+ /* While load is going, read from last bufferRAM */
+ this->read_bufferram(mtd, from - thislen, ONENAND_DATARAM, buf, column, thislen);
+
+ /* Read oob area if needed */
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ if (ONENAND_IS_4KB_PAGE(this) && (read + thislen < len)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ }
+
+ /* See if we are done */
+ read += thislen;
+ if (read == len)
+ break;
+ /* Set up for next read from bufferRAM */
+ if (unlikely(boundary))
+ this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2);
+ if (!ONENAND_IS_4KB_PAGE(this))
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ buf += thislen;
+ thislen = min_t(int, writesize, len - read);
+ column = 0;
+
+ if (!ONENAND_IS_4KB_PAGE(this)) {
+ /* Now wait for load */
+ ret = this->wait(mtd, FL_READING);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (mtd_is_eccerr(ret))
+ ret = 0;
+ }
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
+}
+
+/**
+ * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area
+ */
+static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ int read = 0, thislen, column, oobsize;
+ size_t len = ops->ooblen;
+ unsigned int mode = ops->mode;
+ u_char *buf = ops->oobbuf;
+ int ret = 0, readcmd;
+
+ from += ops->ooboffs;
+
+ pr_debug("onenand_read_oob_nolock: from = 0x%08x, len = %i\n",
+ (unsigned int) from, (int) len);
+
+ /* Initialize return length value */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = from & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(from >= mtd->size ||
+ column + len > ((mtd->size >> this->page_shift) -
+ (from >> this->page_shift)) * oobsize)) {
+ printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n");
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ?
+ ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ while (read < len) {
+ thislen = oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->spare_buf = buf;
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret && ret != -EBADMSG) {
+ printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
+ break;
+ }
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ onenand_transfer_auto_oob(mtd, buf, column, thislen);
+ else
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, buf, column, thislen);
+
+ read += thislen;
+
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Page size */
+ from += mtd->writesize;
+ column = 0;
+ }
+ }
+
+ ops->oobretlen = read;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_read - [MTD Interface] MTD compability function for onenand_read_ecc
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param len number of bytes to read
+ * @param retlen pointer to variable to store the number of read bytes
+ * @param buf the databuffer to put data
+ *
+ * This function simply calls onenand_read_ecc with oob buffer and oobsel = NULL
+*/
+int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = onenand_read_ops_nolock(mtd, from, &ops);
+ onenand_release_device(mtd);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * onenand_read_oob - [MTD Interface] OneNAND read out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operations description structure
+ *
+ * OneNAND main and/or out-of-band
+ */
+int onenand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ break;
+ case MTD_OPS_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_READING);
+ if (ops->datbuf)
+ ret = onenand_read_ops_nolock(mtd, from, ops);
+ else
+ ret = onenand_read_oob_nolock(mtd, from, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_bbt_wait - [DEFAULT] wait until the command is done
+ * @param mtd MTD device structure
+ * @param state state to select the max. timeout value
+ *
+ * Wait for command done.
+ */
+static int onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int interrupt;
+ unsigned int ctrl;
+
+ /* Wait at most 20ms ... */
+ u32 timeo = (CONFIG_SYS_HZ * 20) / 1000;
+ u32 time_start = get_timer(0);
+ do {
+ WATCHDOG_RESET();
+ if (get_timer(time_start) > timeo)
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ } while ((interrupt & ONENAND_INT_MASTER) == 0);
+
+ /* To get correct interrupt status in timeout case */
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+
+ if (interrupt & ONENAND_INT_READ) {
+ int ecc = onenand_read_ecc(this);
+ if (ecc & ONENAND_ECC_2BIT_ALL) {
+ printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
+ ", controller = 0x%04x\n", ecc, ctrl);
+ return ONENAND_BBT_READ_ERROR;
+ }
+ } else {
+ printk(KERN_ERR "onenand_bbt_wait: read timeout!"
+ "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Initial bad block case: 0x2400 or 0x0400 */
+ if (ctrl & ONENAND_CTRL_ERROR) {
+ printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops oob operation description structure
+ *
+ * OneNAND read out-of-band data from the spare area for bbt scan
+ */
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int read = 0, thislen, column;
+ int ret = 0, readcmd;
+ size_t len = ops->ooblen;
+ u_char *buf = ops->oobbuf;
+
+ pr_debug("onenand_bbt_read_oob: from = 0x%08x, len = %zi\n",
+ (unsigned int) from, len);
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ?
+ ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ /* Initialize return value */
+ ops->oobretlen = 0;
+
+ /* Do not allow reads past end of device */
+ if (unlikely((from + len) > mtd->size)) {
+ printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n");
+ return ONENAND_BBT_READ_FATAL_ERROR;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_READING);
+
+ column = from & (mtd->oobsize - 1);
+
+ while (read < len) {
+
+ thislen = mtd->oobsize - column;
+ thislen = min_t(int, thislen, len);
+
+ this->spare_buf = buf;
+ this->command(mtd, readcmd, from, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, from, 0);
+
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
+ if (ret)
+ break;
+
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, buf, column, thislen);
+ read += thislen;
+ if (read == len)
+ break;
+
+ buf += thislen;
+
+ /* Read more? */
+ if (read < len) {
+ /* Update Page size */
+ from += this->writesize;
+ column = 0;
+ }
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ ops->oobretlen = read;
+ return ret;
+}
+
+
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+/**
+ * onenand_verify_oob - [GENERIC] verify the oob contents after a write
+ * @param mtd MTD device structure
+ * @param buf the databuffer to verify
+ * @param to offset to read from
+ */
+static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
+{
+ struct onenand_chip *this = mtd->priv;
+ u_char *oob_buf = this->oob_buf;
+ int status, i, readcmd;
+
+ readcmd = ONENAND_IS_4KB_PAGE(this) ?
+ ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
+ onenand_update_bufferram(mtd, to, 0);
+ status = this->wait(mtd, FL_READING);
+ if (status)
+ return status;
+
+ this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
+ for (i = 0; i < mtd->oobsize; i++)
+ if (buf[i] != 0xFF && buf[i] != oob_buf[i])
+ return -EBADMSG;
+
+ return 0;
+}
+
+/**
+ * onenand_verify - [GENERIC] verify the chip contents after a write
+ * @param mtd MTD device structure
+ * @param buf the databuffer to verify
+ * @param addr offset to read from
+ * @param len number of bytes to read and compare
+ */
+static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ void __iomem *dataram;
+ int ret = 0;
+ int thislen, column;
+
+ while (len != 0) {
+ thislen = min_t(int, this->writesize, len);
+ column = addr & (this->writesize - 1);
+ if (column + thislen > this->writesize)
+ thislen = this->writesize - column;
+
+ this->command(mtd, ONENAND_CMD_READ, addr, this->writesize);
+
+ onenand_update_bufferram(mtd, addr, 0);
+
+ ret = this->wait(mtd, FL_READING);
+ if (ret)
+ return ret;
+
+ onenand_update_bufferram(mtd, addr, 1);
+
+ dataram = this->base + ONENAND_DATARAM;
+ dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM);
+
+ if (memcmp(buf, dataram + column, thislen))
+ return -EBADMSG;
+
+ len -= thislen;
+ buf += thislen;
+ addr += thislen;
+ }
+
+ return 0;
+}
+#else
+#define onenand_verify(...) (0)
+#define onenand_verify_oob(...) (0)
+#endif
+
+#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
+
+/**
+ * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
+ * @param mtd MTD device structure
+ * @param oob_buf oob buffer
+ * @param buf source address
+ * @param column oob offset to write to
+ * @param thislen oob length to write
+ */
+static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
+ const u_char *buf, int column, int thislen)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct nand_oobfree *free;
+ int writecol = column;
+ int writeend = column + thislen;
+ int lastgap = 0;
+ unsigned int i;
+
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length;
+ i++, free++) {
+ if (writecol >= lastgap)
+ writecol += free->offset - lastgap;
+ if (writeend >= lastgap)
+ writeend += free->offset - lastgap;
+ lastgap = free->offset + free->length;
+ }
+ free = this->ecclayout->oobfree;
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length;
+ i++, free++) {
+ int free_end = free->offset + free->length;
+ if (free->offset < writeend && free_end > writecol) {
+ int st = max_t(int,free->offset,writecol);
+ int ed = min_t(int,free_end,writeend);
+ int n = ed - st;
+ memcpy(oob_buf + st, buf, n);
+ buf += n;
+ } else if (column == 0)
+ break;
+ }
+ return 0;
+}
+
+/**
+ * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param ops oob operation description structure
+ *
+ * Write main and/or oob with ECC
+ */
+static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int written = 0, column, thislen, subpage;
+ int oobwritten = 0, oobcolumn, thisooblen, oobsize;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ const u_char *buf = ops->datbuf;
+ const u_char *oob = ops->oobbuf;
+ u_char *oobbuf;
+ int ret = 0;
+
+ pr_debug("onenand_write_ops_nolock: to = 0x%08x, len = %i\n",
+ (unsigned int) to, (int) len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) {
+ printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = to & (mtd->oobsize - 1);
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (written < len) {
+ u_char *wbuf = (u_char *) buf;
+
+ thislen = min_t(int, mtd->writesize - column, len - written);
+ thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten);
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, to, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+
+ if (oob) {
+ oobbuf = this->oob_buf;
+
+ /* We send data to spare ram with oobsize
+ * * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (ops->mode == MTD_OPS_AUTO_OOB)
+ onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
+ else
+ memcpy(oobbuf + oobcolumn, oob, thisooblen);
+
+ oobwritten += thisooblen;
+ oob += thisooblen;
+ oobcolumn = 0;
+ } else
+ oobbuf = (u_char *) ffchars;
+
+ this->write_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+
+ this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+
+ ret = this->wait(mtd, FL_WRITING);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !ret && !subpage);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
+ }
+
+ if (ret) {
+ printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret);
+ break;
+ }
+
+ /* Only check verify write turn on */
+ ret = onenand_verify(mtd, buf, to, thislen);
+ if (ret) {
+ printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
+ break;
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ column = 0;
+ to += thislen;
+ buf += thislen;
+ }
+
+ ops->retlen = written;
+
+ return ret;
+}
+
+/**
+ * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ * @param mode operation mode
+ *
+ * OneNAND write out-of-band
+ */
+static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, ret = 0, oobsize;
+ int written = 0, oobcmd;
+ u_char *oobbuf;
+ size_t len = ops->ooblen;
+ const u_char *buf = ops->oobbuf;
+ unsigned int mode = ops->mode;
+
+ to += ops->ooboffs;
+
+ pr_debug("onenand_write_oob_nolock: to = 0x%08x, len = %i\n",
+ (unsigned int) to, (int) len);
+
+ /* Initialize retlen, in case of early exit */
+ ops->oobretlen = 0;
+
+ if (mode == MTD_OPS_AUTO_OOB)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ column = to & (mtd->oobsize - 1);
+
+ if (unlikely(column >= oobsize)) {
+ printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n");
+ return -EINVAL;
+ }
+
+ /* For compatibility with NAND: Do not allow write past end of page */
+ if (unlikely(column + len > oobsize)) {
+ printk(KERN_ERR "onenand_write_oob_nolock: "
+ "Attempt to write past end of page\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow reads past end of device */
+ if (unlikely(to >= mtd->size ||
+ column + len > ((mtd->size >> this->page_shift) -
+ (to >> this->page_shift)) * oobsize)) {
+ printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n");
+ return -EINVAL;
+ }
+
+ oobbuf = this->oob_buf;
+
+ oobcmd = ONENAND_IS_4KB_PAGE(this) ?
+ ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, oobsize, len - written);
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
+
+ /* We send data to spare ram with oobsize
+ * to prevent byte access */
+ memset(oobbuf, 0xff, mtd->oobsize);
+ if (mode == MTD_OPS_AUTO_OOB)
+ onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
+ else
+ memcpy(oobbuf + column, buf, thislen);
+ this->write_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+
+ if (ONENAND_IS_4KB_PAGE(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, 0, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
+
+ onenand_update_bufferram(mtd, to, 0);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, 0);
+ }
+
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret);
+ break;
+ }
+
+ ret = onenand_verify_oob(mtd, oobbuf, to);
+ if (ret) {
+ printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret);
+ break;
+ }
+
+ written += thislen;
+ if (written == len)
+ break;
+
+ to += mtd->writesize;
+ buf += thislen;
+ column = 0;
+ }
+
+ ops->oobretlen = written;
+
+ return ret;
+}
+
+/**
+ * onenand_write - [MTD Interface] compability function for onenand_write_ecc
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * Write with ECC
+ */
+int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .ooblen = 0,
+ .datbuf = (u_char *) buf,
+ .oobbuf = NULL,
+ };
+ int ret;
+
+ onenand_get_device(mtd, FL_WRITING);
+ ret = onenand_write_ops_nolock(mtd, to, &ops);
+ onenand_release_device(mtd);
+
+ *retlen = ops.retlen;
+ return ret;
+}
+
+/**
+ * onenand_write_oob - [MTD Interface] OneNAND write out-of-band
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param ops oob operation description structure
+ *
+ * OneNAND write main and/or out-of-band
+ */
+int onenand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ break;
+ case MTD_OPS_RAW:
+ /* Not implemented yet */
+ default:
+ return -EINVAL;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ if (ops->datbuf)
+ ret = onenand_write_ops_nolock(mtd, to, ops);
+ else
+ ret = onenand_write_oob_nolock(mtd, to, ops);
+ onenand_release_device(mtd);
+
+ return ret;
+
+}
+
+/**
+ * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad
+ * @param mtd MTD device structure
+ * @param ofs offset from device start
+ * @param allowbbt 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad, Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+
+ /* Return info from the table */
+ return bbm->isbad_bbt(mtd, ofs, allowbbt);
+}
+
+
+/**
+ * onenand_erase - [MTD Interface] erase block(s)
+ * @param mtd MTD device structure
+ * @param instr erase instruction
+ *
+ * Erase one ore more blocks
+ */
+int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block_size;
+ loff_t addr = instr->addr;
+ unsigned int len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ unsigned int region_end = 0;
+
+ pr_debug("onenand_erase: start = 0x%08x, len = %i\n",
+ (unsigned int) addr, len);
+
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset
+ + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ pr_debug("onenand_erase:" " Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ pr_debug("onenand_erase:" "Unaligned address\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ pr_debug("onenand_erase: Length not block aligned\n");
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_ERASING);
+
+ /* Loop throught the pages */
+ instr->state = MTD_ERASING;
+
+ while (len) {
+
+ /* Check if we have a bad block, we do not erase bad blocks */
+ if (instr->priv == 0 && onenand_block_isbad_nolock(mtd, addr, 0)) {
+ printk(KERN_WARNING "onenand_erase: attempt to erase"
+ " a bad block at addr 0x%08x\n",
+ (unsigned int) addr);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, block_size);
+
+ onenand_invalidate_bufferram(mtd, addr, block_size);
+
+ ret = this->wait(mtd, FL_ERASING);
+ /* Check, if it is write protected */
+ if (ret) {
+ if (ret == -EPERM)
+ pr_debug("onenand_erase: "
+ "Device is write protected!!!\n");
+ else
+ pr_debug("onenand_erase: "
+ "Failed erase, block %d\n",
+ onenand_block(this, addr));
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = addr;
+
+ goto erase_exit;
+ }
+
+ len -= block_size;
+ addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset
+ + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* This has been checked at MTD
+ * partitioning level. */
+ printk("onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+ }
+
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /* Deselect and wake up anyone waiting on the device */
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_sync - [MTD Interface] sync
+ * @param mtd MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+void onenand_sync(struct mtd_info *mtd)
+{
+ pr_debug("onenand_sync: called\n");
+
+ /* Grab the lock and see if the device is available */
+ onenand_get_device(mtd, FL_SYNCING);
+
+ /* Release it and go back */
+ onenand_release_device(mtd);
+}
+
+/**
+ * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ *
+ * Check whether the block is bad
+ */
+int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ /* Check for invalid offset */
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ onenand_get_device(mtd, FL_READING);
+ ret = onenand_block_isbad_nolock(mtd,ofs, 0);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_default_block_markbad - [DEFAULT] mark a block bad
+ * @param mtd MTD device structure
+ * @param ofs offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+ */
+static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ u_char buf[2] = {0, 0};
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooblen = 2,
+ .oobbuf = buf,
+ .ooboffs = 0,
+ };
+ int block;
+
+ /* Get block number */
+ block = onenand_block(this, ofs);
+ if (bbm->bbt)
+ bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* We write two bytes, so we dont have to mess with 16 bit access */
+ ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
+}
+
+/**
+ * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ *
+ * Mark the block as bad
+ */
+int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret;
+
+ ret = onenand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ onenand_get_device(mtd, FL_WRITING);
+ ret = this->block_markbad(mtd, ofs);
+ onenand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to lock or unlock
+ * @param cmd lock or unlock command
+ *
+ * Lock or unlock one or more blocks
+ */
+static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, block, value, status;
+
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len);
+
+ /* Continuous lock scheme */
+ if (this->options & ONENAND_HAS_CONT_LOCK) {
+ /* Set start block address */
+ this->write_word(start,
+ this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Set end block address */
+ this->write_word(end - 1,
+ this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, cmd, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_UNLOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US))
+ printk(KERN_ERR "wp status = 0x%x\n", status);
+
+ return 0;
+ }
+
+ /* Block lock scheme */
+ for (block = start; block < end; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+
+ /* Set start block address */
+ this->write_word(block,
+ this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_UNLOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US))
+ printk(KERN_ERR "block = %d, wp status = 0x%x\n",
+ block, status);
+ }
+
+ return 0;
+}
+
+#ifdef ONENAND_LINUX
+/**
+ * onenand_lock - [MTD Interface] Lock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to unlock
+ *
+ * Lock one or more blocks
+ */
+static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+
+/**
+ * onenand_unlock - [MTD Interface] Unlock block(s)
+ * @param mtd MTD device structure
+ * @param ofs offset relative to mtd start
+ * @param len number of bytes to unlock
+ *
+ * Unlock one or more blocks
+ */
+static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
+
+ onenand_get_device(mtd, FL_LOCKING);
+ ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+ onenand_release_device(mtd);
+ return ret;
+}
+#endif
+
+/**
+ * onenand_check_lock_status - [OneNAND Interface] Check lock status
+ * @param this onenand chip data structure
+ *
+ * Check lock status
+ */
+static int onenand_check_lock_status(struct onenand_chip *this)
+{
+ unsigned int value, block, status;
+ unsigned int end;
+
+ end = this->chipsize >> this->erase_shift;
+ for (block = 0; block < end; block++) {
+ /* Set block address */
+ value = onenand_block_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
+ /* Set start block address */
+ this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+
+ /* Check lock status */
+ status = this->read_word(this->base + ONENAND_REG_WP_STATUS);
+ if (!(status & ONENAND_WP_US)) {
+ printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_unlock_all - [OneNAND Interface] unlock all blocks
+ * @param mtd MTD device structure
+ *
+ * Unlock all blocks
+ */
+static void onenand_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = mtd->size;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Set start block address */
+ this->write_word(0, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* There's no return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Sanity check */
+ while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS)
+ & ONENAND_CTRL_ONGO)
+ continue;
+
+ /* Check lock status */
+ if (onenand_check_lock_status(this))
+ return;
+
+ /* Workaround for all block unlock in DDP */
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+ }
+
+ onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+}
+
+
+/**
+ * onenand_check_features - Check and set OneNAND features
+ * @param mtd MTD data structure
+ *
+ * Check and set OneNAND features
+ * - lock scheme
+ * - two plane
+ */
+static void onenand_check_features(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int density, process;
+
+ /* Lock scheme depends on density and process */
+ density = onenand_get_density(this->device_id);
+ process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
+
+ /* Lock scheme */
+ switch (density) {
+ case ONENAND_DEVICE_DENSITY_4Gb:
+ if (ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ else
+ this->options |= ONENAND_HAS_4KB_PAGE;
+
+ case ONENAND_DEVICE_DENSITY_2Gb:
+ /* 2Gb DDP don't have 2 plane */
+ if (!ONENAND_IS_DDP(this))
+ this->options |= ONENAND_HAS_2PLANE;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+
+ case ONENAND_DEVICE_DENSITY_1Gb:
+ /* A-Die has all block unlock */
+ if (process)
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ break;
+
+ default:
+ /* Some OneNAND has continuous lock scheme */
+ if (!process)
+ this->options |= ONENAND_HAS_CONT_LOCK;
+ break;
+ }
+
+ if (ONENAND_IS_MLC(this))
+ this->options |= ONENAND_HAS_4KB_PAGE;
+
+ if (ONENAND_IS_4KB_PAGE(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
+ if (this->options & ONENAND_HAS_CONT_LOCK)
+ printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
+ if (this->options & ONENAND_HAS_UNLOCK_ALL)
+ printk(KERN_DEBUG "Chip support all block unlock\n");
+ if (this->options & ONENAND_HAS_2PLANE)
+ printk(KERN_DEBUG "Chip has 2 plane\n");
+ if (this->options & ONENAND_HAS_4KB_PAGE)
+ printk(KERN_DEBUG "Chip has 4KiB pagesize\n");
+
+}
+
+/**
+ * onenand_print_device_info - Print device ID
+ * @param device device ID
+ *
+ * Print device ID
+ */
+char *onenand_print_device_info(int device, int version)
+{
+ int vcc, demuxed, ddp, density, flexonenand;
+ char *dev_info = malloc(80);
+ char *p = dev_info;
+
+ vcc = device & ONENAND_DEVICE_VCC_MASK;
+ demuxed = device & ONENAND_DEVICE_IS_DEMUX;
+ ddp = device & ONENAND_DEVICE_IS_DDP;
+ density = onenand_get_density(device);
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ p += sprintf(dev_info, "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
+ ddp ? "(DDP)" : "",
+ (16 << density), vcc ? "2.65/3.3" : "1.8", device);
+
+ sprintf(p, "\nOneNAND version = 0x%04x", version);
+ printk("%s\n", dev_info);
+
+ return dev_info;
+}
+
+static const struct onenand_manufacturers onenand_manuf_ids[] = {
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
+ {ONENAND_MFR_SAMSUNG, "Samsung"},
+};
+
+/**
+ * onenand_check_maf - Check manufacturer ID
+ * @param manuf manufacturer ID
+ *
+ * Check manufacturer ID
+ */
+static int onenand_check_maf(int manuf)
+{
+ int size = ARRAY_SIZE(onenand_manuf_ids);
+ int i;
+#ifdef ONENAND_DEBUG
+ char *name;
+#endif
+
+ for (i = 0; i < size; i++)
+ if (manuf == onenand_manuf_ids[i].id)
+ break;
+
+#ifdef ONENAND_DEBUG
+ if (i < size)
+ name = onenand_manuf_ids[i].name;
+ else
+ name = "Unknown";
+
+ printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf);
+#endif
+
+ return i == size;
+}
+
+/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+*
+* Fill up boundary[] field in onenand_chip
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int die, bdry;
+ int syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize,
+ * mtd->eraseregions
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = 0;
+ ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08llx, erasesize: 0x%05x,"
+ " numblocks: %04u]\n", mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t) (blksperdie << this->erase_shift);
+ this->diesize[die] -= (loff_t) (this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd,
+ int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OPS_PLACE_OOB,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ if (die >= this->dies)
+ return -EINVAL;
+
+ if (boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk("flexonenand_set_boundary:"
+ "Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new)
+ + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ addr = die ? this->diesize[0] : 0;
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk("flexonenand_set_boundary:"
+ "Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk("flexonenand_set_boundary:"
+ "Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
+ * onenand_chip_probe - [OneNAND Interface] Probe the OneNAND chip
+ * @param mtd MTD device structure
+ *
+ * OneNAND detection method:
+ * Compare the the values from command with ones from register
+ */
+static int onenand_chip_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int bram_maf_id, bram_dev_id, maf_id, dev_id;
+ int syscfg;
+
+ /* Save system configuration 1 */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Clear Sync. Burst Read mode to read BootRAM */
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ),
+ this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Send the command for reading device ID from BootRAM */
+ this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
+
+ /* Read manufacturer and device IDs from BootRAM */
+ bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0);
+ bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2);
+
+ /* Reset OneNAND to read default register values */
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM);
+
+ /* Wait reset */
+ if (this->wait(mtd, FL_RESETING))
+ return -ENXIO;
+
+ /* Restore system configuration 1 */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+
+ /* Check manufacturer ID */
+ if (onenand_check_maf(bram_maf_id))
+ return -ENXIO;
+
+ /* Read manufacturer and device IDs from Register */
+ maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+
+ /* Check OneNAND device */
+ if (maf_id != bram_maf_id || dev_id != bram_dev_id)
+ return -ENXIO;
+
+ return 0;
+}
+
+/**
+ * onenand_probe - [OneNAND Interface] Probe the OneNAND device
+ * @param mtd MTD device structure
+ *
+ * OneNAND detection method:
+ * Compare the the values from command with ones from register
+ */
+int onenand_probe(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int dev_id, ver_id;
+ int density;
+ int ret;
+
+ ret = this->chip_probe(mtd);
+ if (ret)
+ return ret;
+
+ /* Read device IDs from Register */
+ dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
+ ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
+
+ /* Flash device information */
+ mtd->name = onenand_print_device_info(dev_id, ver_id);
+ this->device_id = dev_id;
+ this->version_id = ver_id;
+
+ /* Check OneNAND features */
+ onenand_check_features(mtd);
+
+ density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1));
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
+ this->chipsize = (16 << density) << 20;
+
+ /* OneNAND page size & block size */
+ /* The data buffer size is equal to page size */
+ mtd->writesize =
+ this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_4KB_PAGE(this))
+ mtd->writesize <<= 1;
+
+ mtd->oobsize = mtd->writesize >> 5;
+ /* Pagers per block is always 64 in OneNAND */
+ mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
+
+ this->erase_shift = ffs(mtd->erasesize) - 1;
+ this->page_shift = ffs(mtd->writesize) - 1;
+ this->ppb_shift = (this->erase_shift - this->page_shift);
+ this->page_mask = (mtd->erasesize / mtd->writesize) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
+ /* It's real page size */
+ this->writesize = mtd->writesize;
+
+ /* REVIST: Multichip handling */
+
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
+
+ mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->_erase = onenand_erase;
+ mtd->_read_oob = onenand_read_oob;
+ mtd->_write_oob = onenand_write_oob;
+ mtd->_sync = onenand_sync;
+ mtd->_block_isbad = onenand_block_isbad;
+ mtd->_block_markbad = onenand_block_markbad;
+ mtd->writebufsize = mtd->writesize;
+
+ return 0;
+}
+
+/**
+ * onenand_scan - [OneNAND Interface] Scan for the OneNAND device
+ * @param mtd MTD device structure
+ * @param maxchips Number of chips to scan for
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values.
+ */
+int onenand_scan(struct mtd_info *mtd, int maxchips)
+{
+ int i;
+ struct onenand_chip *this = mtd->priv;
+
+ if (!this->read_word)
+ this->read_word = onenand_readw;
+ if (!this->write_word)
+ this->write_word = onenand_writew;
+
+ if (!this->command)
+ this->command = onenand_command;
+ if (!this->wait)
+ this->wait = onenand_wait;
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+
+ if (!this->read_bufferram)
+ this->read_bufferram = onenand_read_bufferram;
+ if (!this->write_bufferram)
+ this->write_bufferram = onenand_write_bufferram;
+
+ if (!this->chip_probe)
+ this->chip_probe = onenand_chip_probe;
+
+ if (!this->block_markbad)
+ this->block_markbad = onenand_default_block_markbad;
+ if (!this->scan_bbt)
+ this->scan_bbt = onenand_default_bbt;
+
+ if (onenand_probe(mtd))
+ return -ENXIO;
+
+ /* Set Sync. Burst Read after probing */
+ if (this->mmcontrol) {
+ printk(KERN_INFO "OneNAND Sync. Burst Read support\n");
+ this->read_bufferram = onenand_sync_read_bufferram;
+ }
+
+ /* Allocate buffers, if necessary */
+ if (!this->page_buf) {
+ this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL);
+ if (!this->page_buf) {
+ printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n");
+ return -ENOMEM;
+ }
+ this->options |= ONENAND_PAGEBUF_ALLOC;
+ }
+ if (!this->oob_buf) {
+ this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL);
+ if (!this->oob_buf) {
+ printk(KERN_ERR "onenand_scan: Can't allocate oob_buf\n");
+ if (this->options & ONENAND_PAGEBUF_ALLOC) {
+ this->options &= ~ONENAND_PAGEBUF_ALLOC;
+ kfree(this->page_buf);
+ }
+ return -ENOMEM;
+ }
+ this->options |= ONENAND_OOBBUF_ALLOC;
+ }
+
+ this->state = FL_READY;
+
+ /*
+ * Allow subpage writes up to oobsize.
+ */
+ switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
+
+ case 64:
+ this->ecclayout = &onenand_oob_64;
+ mtd->subpage_sft = 2;
+ break;
+
+ case 32:
+ this->ecclayout = &onenand_oob_32;
+ mtd->subpage_sft = 1;
+ break;
+
+ default:
+ printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ mtd->subpage_sft = 0;
+ /* To prevent kernel oops */
+ this->ecclayout = &onenand_oob_32;
+ break;
+ }
+
+ this->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area
+ */
+ this->ecclayout->oobavail = 0;
+
+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE &&
+ this->ecclayout->oobfree[i].length; i++)
+ this->ecclayout->oobavail +=
+ this->ecclayout->oobfree[i].length;
+ mtd->oobavail = this->ecclayout->oobavail;
+
+ mtd->ecclayout = this->ecclayout;
+
+ /* Unlock whole block */
+ onenand_unlock_all(mtd);
+
+ return this->scan_bbt(mtd);
+}
+
+/**
+ * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device
+ * @param mtd MTD device structure
+ */
+void onenand_release(struct mtd_info *mtd)
+{
+}
diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_bbt.c b/roms/u-boot/drivers/mtd/onenand/onenand_bbt.c
new file mode 100644
index 000000000..eca9edff6
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/onenand/onenand_bbt.c
@@ -0,0 +1,265 @@
+/*
+ * linux/drivers/mtd/onenand/onenand_bbt.c
+ *
+ * Bad Block Table support for the OneNAND driver
+ *
+ * Copyright(c) 2005-2008 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * TODO:
+ * Split BBT core and chip specific BBT.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <log.h>
+#include <linux/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <malloc.h>
+
+#include <linux/errno.h>
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @param buf the buffer to search
+ * @param len the length of buffer to search
+ * @param paglen the pagelength
+ * @param td search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check and the pattern is expected to start
+ * at offset 0.
+ */
+static int check_short_pattern(uint8_t * buf, int len, int paglen,
+ struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @param mtd MTD device structure
+ * @param buf temporary buffer
+ * @param bd descriptor for the good/bad block search pattern
+ * @param chip create the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t * buf,
+ struct nand_bbt_descr *bd, int chip)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int i, j, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen, ooblen;
+ struct mtd_oob_ops ops;
+ int rgn;
+
+ printk(KERN_INFO "Scanning device for bad blocks\n");
+
+ len = 1;
+
+ /* We need only read few bytes from the OOB area */
+ scanlen = ooblen = 0;
+ readlen = bd->len;
+
+ /* chip == -1 case only */
+ /* Note that numblocks is 2 * (real numblocks) here;
+ * see i += 2 below as it makses shifting and masking less painful
+ */
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
+ startblock = 0;
+ from = 0;
+
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.ooblen = readlen;
+ ops.oobbuf = buf;
+ ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ for (j = 0; j < len; j++) {
+ /* No need to read pages fully,
+ * just read required OOB bytes */
+ ret = onenand_bbt_read_oob(mtd,
+ from + j * mtd->writesize +
+ bd->offs, &ops);
+
+ /* If it is a initial bad block, just ignore it */
+ if (ret == ONENAND_BBT_READ_FATAL_ERROR)
+ return -EIO;
+
+ if (ret || check_short_pattern
+ (&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
+ bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ printk(KERN_WARNING
+ "Bad eraseblock %d at 0x%08x\n", i >> 1,
+ (unsigned int)from);
+ break;
+ }
+ }
+ i += 2;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
+ }
+
+ return 0;
+}
+
+/**
+ * onenand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @param mtd MTD device structure
+ * @param bd descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+ */
+static inline int onenand_memory_bbt(struct mtd_info *mtd,
+ struct nand_bbt_descr *bd)
+{
+ unsigned char data_buf[MAX_ONENAND_PAGESIZE];
+
+ return create_bbt(mtd, data_buf, bd, -1);
+}
+
+/**
+ * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
+ * @param mtd MTD device structure
+ * @param offs offset in the device
+ * @param allowbbt allow access to bad block table region
+ */
+static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ block = (int) (onenand_block(this, offs) << 1);
+ res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block >> 1, res);
+
+ switch ((int)res) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return 1;
+ case 0x02:
+ return allowbbt ? 0 : 1;
+ }
+
+ return 1;
+}
+
+/**
+ * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
+ * @param mtd MTD device structure
+ * @param bd descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed
+ * by calling the onenand_free_bbt function.
+ *
+ */
+int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm = this->bbm;
+ int len, ret = 0;
+
+ len = this->chipsize >> (this->erase_shift + 2);
+ /* Allocate memory (2bit per block) */
+ bbm->bbt = malloc(len);
+ if (!bbm->bbt)
+ return -ENOMEM;
+ /* Clear the memory bad block table */
+ memset(bbm->bbt, 0x00, len);
+
+ /* Set the bad block position */
+ bbm->badblockpos = ONENAND_BADBLOCK_POS;
+
+ /* Set erase shift */
+ bbm->bbt_erase_shift = this->erase_shift;
+
+ if (!bbm->isbad_bbt)
+ bbm->isbad_bbt = onenand_isbad_bbt;
+
+ /* Scan the device to build a memory based bad block table */
+ if ((ret = onenand_memory_bbt(mtd, bd))) {
+ printk(KERN_ERR
+ "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
+ free(bbm->bbt);
+ bbm->bbt = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern,
+};
+
+/**
+ * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
+ * @param mtd MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the onenand_scan_bbt function
+ */
+int onenand_default_bbt(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct bbm_info *bbm;
+
+ this->bbm = malloc(sizeof(struct bbm_info));
+ if (!this->bbm)
+ return -ENOMEM;
+
+ bbm = this->bbm;
+
+ memset(bbm, 0, sizeof(struct bbm_info));
+
+ /* 1KB page has same configuration as 2KB page */
+ if (!bbm->badblock_pattern)
+ bbm->badblock_pattern = &largepage_memorybased;
+
+ return onenand_scan_bbt(mtd, bbm->badblock_pattern);
+}
diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_spl.c b/roms/u-boot/drivers/mtd/onenand/onenand_spl.c
new file mode 100644
index 000000000..ab6f1a8be
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/onenand/onenand_spl.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * Based on code:
+ * Copyright (C) 2005-2009 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/onenand_regs.h>
+#include <onenand_uboot.h>
+
+/*
+ * Device geometry:
+ * - 2048b page, 128k erase block.
+ * - 4096b page, 256k erase block.
+ */
+enum onenand_spl_pagesize {
+ PAGE_2K = 2048,
+ PAGE_4K = 4096,
+};
+
+static unsigned int density_mask;
+
+#define ONENAND_PAGES_PER_BLOCK 64
+#define onenand_sector_address(page) (page << 2)
+#define onenand_buffer_address() ((1 << 3) << 8)
+
+static inline int onenand_block_address(int block)
+{
+ /* Device Flash Core select, NAND Flash Block Address */
+ if (block & density_mask)
+ return ONENAND_DDP_CHIP1 | (block ^ density_mask);
+
+ return block;
+}
+
+static inline int onenand_bufferram_address(int block)
+{
+ /* Device BufferRAM Select */
+ if (block & density_mask)
+ return ONENAND_DDP_CHIP1;
+
+ return ONENAND_DDP_CHIP0;
+}
+
+static inline uint16_t onenand_readw(uint32_t addr)
+{
+ return readw(CONFIG_SYS_ONENAND_BASE + addr);
+}
+
+static inline void onenand_writew(uint16_t value, uint32_t addr)
+{
+ writew(value, CONFIG_SYS_ONENAND_BASE + addr);
+}
+
+static enum onenand_spl_pagesize onenand_spl_get_geometry(void)
+{
+ unsigned int dev_id, density, size;
+
+ if (!onenand_readw(ONENAND_REG_TECHNOLOGY)) {
+ dev_id = onenand_readw(ONENAND_REG_DEVICE_ID);
+ density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density &= ONENAND_DEVICE_DENSITY_MASK;
+
+ if (density < ONENAND_DEVICE_DENSITY_4Gb)
+ return PAGE_2K;
+
+ if (dev_id & ONENAND_DEVICE_IS_DDP) {
+ size = onenand_readw(ONENAND_REG_DATA_BUFFER_SIZE);
+ density_mask = 1 << (18 + density - ffs(size));
+ return PAGE_2K;
+ }
+ }
+
+ return PAGE_4K;
+}
+
+static int onenand_spl_read_page(uint32_t block, uint32_t page, uint32_t *buf,
+ enum onenand_spl_pagesize pagesize)
+{
+ const uint32_t addr = CONFIG_SYS_ONENAND_BASE + ONENAND_DATARAM;
+ uint32_t offset;
+
+ onenand_writew(onenand_block_address(block),
+ ONENAND_REG_START_ADDRESS1);
+
+ onenand_writew(onenand_bufferram_address(block),
+ ONENAND_REG_START_ADDRESS2);
+
+ onenand_writew(onenand_sector_address(page),
+ ONENAND_REG_START_ADDRESS8);
+
+ onenand_writew(onenand_buffer_address(),
+ ONENAND_REG_START_BUFFER);
+
+ onenand_writew(ONENAND_INT_CLEAR, ONENAND_REG_INTERRUPT);
+
+ onenand_writew(ONENAND_CMD_READ, ONENAND_REG_COMMAND);
+
+ while (!(onenand_readw(ONENAND_REG_INTERRUPT) & ONENAND_INT_READ))
+ continue;
+
+ /* Check for invalid block mark */
+ if (page < 2 && (onenand_readw(ONENAND_SPARERAM) != 0xffff))
+ return 1;
+
+ for (offset = 0; offset < pagesize; offset += 4)
+ buf[offset / 4] = readl(addr + offset);
+
+ return 0;
+}
+
+#ifdef CONFIG_SPL_UBI
+/* Temporary storage for non page aligned and non page sized reads. */
+static u8 scratch_buf[PAGE_4K];
+
+/**
+ * onenand_spl_read_block - Read data from physical eraseblock into a buffer
+ * @block: Number of the physical eraseblock
+ * @offset: Data offset from the start of @peb
+ * @len: Data size to read
+ * @dst: Address of the destination buffer
+ *
+ * Notes:
+ * @offset + @len are not allowed to be larger than a physical
+ * erase block. No sanity check done for simplicity reasons.
+ */
+int onenand_spl_read_block(int block, int offset, int len, void *dst)
+{
+ int page, read;
+ static int psize;
+
+ if (!psize)
+ psize = onenand_spl_get_geometry();
+
+ /* Calculate the page number */
+ page = offset / psize;
+ /* Offset to the start of a flash page */
+ offset = offset % psize;
+
+ while (len) {
+ /*
+ * Non page aligned reads go to the scratch buffer.
+ * Page aligned reads go directly to the destination.
+ */
+ if (offset || len < psize) {
+ onenand_spl_read_page(block, page,
+ (uint32_t *)scratch_buf, psize);
+ read = min(len, psize - offset);
+ memcpy(dst, scratch_buf + offset, read);
+ offset = 0;
+ } else {
+ onenand_spl_read_page(block, page, dst, psize);
+ read = psize;
+ }
+ page++;
+ len -= read;
+ dst += read;
+ }
+ return 0;
+}
+#endif
+
+void onenand_spl_load_image(uint32_t offs, uint32_t size, void *dst)
+{
+ uint32_t *addr = (uint32_t *)dst;
+ uint32_t to_page;
+ uint32_t block;
+ uint32_t page, rpage;
+ enum onenand_spl_pagesize pagesize;
+ int ret;
+
+ pagesize = onenand_spl_get_geometry();
+
+ /*
+ * The page can be either 2k or 4k, avoid using DIV_ROUND_UP to avoid
+ * pulling further unwanted functions into the SPL.
+ */
+ if (pagesize == 2048) {
+ page = offs / 2048;
+ to_page = page + DIV_ROUND_UP(size, 2048);
+ } else {
+ page = offs / 4096;
+ to_page = page + DIV_ROUND_UP(size, 4096);
+ }
+
+ for (; page <= to_page; page++) {
+ block = page / ONENAND_PAGES_PER_BLOCK;
+ rpage = page & (ONENAND_PAGES_PER_BLOCK - 1);
+ ret = onenand_spl_read_page(block, rpage, addr, pagesize);
+ if (ret)
+ page += ONENAND_PAGES_PER_BLOCK - 1;
+ else
+ addr += pagesize / 4;
+ }
+}
diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_uboot.c b/roms/u-boot/drivers/mtd/onenand/onenand_uboot.c
new file mode 100644
index 000000000..6893394e0
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/onenand/onenand_uboot.c
@@ -0,0 +1,56 @@
+/*
+ * drivers/mtd/onenand/onenand_uboot.c
+ *
+ * Copyright (C) 2005-2008 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * OneNAND initialization at U-Boot
+ */
+
+#include <common.h>
+#include <linux/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+
+struct mtd_info onenand_mtd;
+struct onenand_chip onenand_chip;
+static __attribute__((unused)) char dev_name[] = "onenand0";
+
+void onenand_init(void)
+{
+ int err = 0;
+ memset(&onenand_mtd, 0, sizeof(struct mtd_info));
+ memset(&onenand_chip, 0, sizeof(struct onenand_chip));
+
+ onenand_mtd.priv = &onenand_chip;
+
+#ifdef CONFIG_USE_ONENAND_BOARD_INIT
+ /* It's used for some board init required */
+ err = onenand_board_init(&onenand_mtd);
+#else
+ onenand_chip.base = (void *) CONFIG_SYS_ONENAND_BASE;
+#endif
+
+ if (!err && !(onenand_scan(&onenand_mtd, 1))) {
+
+ if (onenand_chip.device_id & DEVICE_IS_FLEXONENAND)
+ puts("Flex-");
+ puts("OneNAND: ");
+
+#ifdef CONFIG_MTD
+ /*
+ * Add MTD device so that we can reference it later
+ * via the mtdcore infrastructure (e.g. ubi).
+ */
+ onenand_mtd.name = dev_name;
+ add_mtd_device(&onenand_mtd);
+#endif
+ }
+ print_size(onenand_chip.chipsize, "\n");
+}
diff --git a/roms/u-boot/drivers/mtd/onenand/samsung.c b/roms/u-boot/drivers/mtd/onenand/samsung.c
new file mode 100644
index 000000000..657abaab8
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/onenand/samsung.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * S5PC100 OneNAND driver at U-Boot
+ *
+ * Copyright (C) 2008-2009 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Implementation:
+ * Emulate the pseudo BufferRAM
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <linux/compat.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/onenand.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/samsung_onenand.h>
+
+#include <asm/io.h>
+#include <linux/errno.h>
+
+#define ONENAND_ERASE_STATUS 0x00
+#define ONENAND_MULTI_ERASE_SET 0x01
+#define ONENAND_ERASE_START 0x03
+#define ONENAND_UNLOCK_START 0x08
+#define ONENAND_UNLOCK_END 0x09
+#define ONENAND_LOCK_START 0x0A
+#define ONENAND_LOCK_END 0x0B
+#define ONENAND_LOCK_TIGHT_START 0x0C
+#define ONENAND_LOCK_TIGHT_END 0x0D
+#define ONENAND_UNLOCK_ALL 0x0E
+#define ONENAND_OTP_ACCESS 0x12
+#define ONENAND_SPARE_ACCESS_ONLY 0x13
+#define ONENAND_MAIN_ACCESS_ONLY 0x14
+#define ONENAND_ERASE_VERIFY 0x15
+#define ONENAND_MAIN_SPARE_ACCESS 0x16
+#define ONENAND_PIPELINE_READ 0x4000
+
+#if defined(CONFIG_S5P)
+#define MAP_00 (0x0 << 26)
+#define MAP_01 (0x1 << 26)
+#define MAP_10 (0x2 << 26)
+#define MAP_11 (0x3 << 26)
+#endif
+
+/* read/write of XIP buffer */
+#define CMD_MAP_00(mem_addr) (MAP_00 | ((mem_addr) << 1))
+/* read/write to the memory device */
+#define CMD_MAP_01(mem_addr) (MAP_01 | (mem_addr))
+/* control special functions of the memory device */
+#define CMD_MAP_10(mem_addr) (MAP_10 | (mem_addr))
+/* direct interface(direct access) with the memory device */
+#define CMD_MAP_11(mem_addr) (MAP_11 | ((mem_addr) << 2))
+
+struct s3c_onenand {
+ struct mtd_info *mtd;
+ void __iomem *base;
+ void __iomem *ahb_addr;
+ int bootram_command;
+ void __iomem *page_buf;
+ void __iomem *oob_buf;
+ unsigned int (*mem_addr)(int fba, int fpa, int fsa);
+ struct samsung_onenand *reg;
+};
+
+static struct s3c_onenand *onenand;
+
+static int s3c_read_cmd(unsigned int cmd)
+{
+ return readl(onenand->ahb_addr + cmd);
+}
+
+static void s3c_write_cmd(int value, unsigned int cmd)
+{
+ writel(value, onenand->ahb_addr + cmd);
+}
+
+/*
+ * MEM_ADDR
+ *
+ * fba: flash block address
+ * fpa: flash page address
+ * fsa: flash sector address
+ *
+ * return the buffer address on the memory device
+ * It will be combined with CMD_MAP_XX
+ */
+#if defined(CONFIG_S5P)
+static unsigned int s3c_mem_addr(int fba, int fpa, int fsa)
+{
+ return (fba << 13) | (fpa << 7) | (fsa << 5);
+}
+#endif
+
+static void s3c_onenand_reset(void)
+{
+ unsigned long timeout = 0x10000;
+ int stat;
+
+ writel(ONENAND_MEM_RESET_COLD, &onenand->reg->mem_reset);
+ while (timeout--) {
+ stat = readl(&onenand->reg->int_err_stat);
+ if (stat & RST_CMP)
+ break;
+ }
+ stat = readl(&onenand->reg->int_err_stat);
+ writel(stat, &onenand->reg->int_err_ack);
+
+ /* Clear interrupt */
+ writel(0x0, &onenand->reg->int_err_ack);
+ /* Clear the ECC status */
+ writel(0x0, &onenand->reg->ecc_err_stat);
+}
+
+static unsigned short s3c_onenand_readw(void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+ int value;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_MANUFACTURER_ID:
+ return readl(&onenand->reg->manufact_id);
+ case ONENAND_REG_DEVICE_ID:
+ return readl(&onenand->reg->device_id);
+ case ONENAND_REG_VERSION_ID:
+ return readl(&onenand->reg->flash_ver_id);
+ case ONENAND_REG_DATA_BUFFER_SIZE:
+ return readl(&onenand->reg->data_buf_size);
+ case ONENAND_REG_TECHNOLOGY:
+ return readl(&onenand->reg->tech);
+ case ONENAND_REG_SYS_CFG1:
+ return readl(&onenand->reg->mem_cfg);
+
+ /* Used at unlock all status */
+ case ONENAND_REG_CTRL_STATUS:
+ return 0;
+
+ case ONENAND_REG_WP_STATUS:
+ return ONENAND_WP_US;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if (reg < ONENAND_DATARAM && onenand->bootram_command) {
+ if (word_addr == 0)
+ return readl(&onenand->reg->manufact_id);
+ if (word_addr == 1)
+ return readl(&onenand->reg->device_id);
+ if (word_addr == 2)
+ return readl(&onenand->reg->flash_ver_id);
+ }
+
+ value = s3c_read_cmd(CMD_MAP_11(word_addr)) & 0xffff;
+ printk(KERN_INFO "s3c_onenand_readw: Illegal access"
+ " at reg 0x%x, value 0x%x\n", word_addr, value);
+ return value;
+}
+
+static void s3c_onenand_writew(unsigned short value, void __iomem *addr)
+{
+ struct onenand_chip *this = onenand->mtd->priv;
+ int reg = addr - this->base;
+ int word_addr = reg >> 1;
+
+ /* It's used for probing time */
+ switch (reg) {
+ case ONENAND_REG_SYS_CFG1:
+ writel(value, &onenand->reg->mem_cfg);
+ return;
+
+ case ONENAND_REG_START_ADDRESS1:
+ case ONENAND_REG_START_ADDRESS2:
+ return;
+
+ /* Lock/lock-tight/unlock/unlock_all */
+ case ONENAND_REG_START_BLOCK_ADDRESS:
+ return;
+
+ default:
+ break;
+ }
+
+ /* BootRAM access control */
+ if (reg < ONENAND_DATARAM) {
+ if (value == ONENAND_CMD_READID) {
+ onenand->bootram_command = 1;
+ return;
+ }
+ if (value == ONENAND_CMD_RESET) {
+ writel(ONENAND_MEM_RESET_COLD,
+ &onenand->reg->mem_reset);
+ onenand->bootram_command = 0;
+ return;
+ }
+ }
+
+ printk(KERN_INFO "s3c_onenand_writew: Illegal access"
+ " at reg 0x%x, value 0x%x\n", word_addr, value);
+
+ s3c_write_cmd(value, CMD_MAP_11(word_addr));
+}
+
+static int s3c_onenand_wait(struct mtd_info *mtd, int state)
+{
+ unsigned int flags = INT_ACT;
+ unsigned int stat, ecc;
+ unsigned long timeout = 0x100000;
+
+ switch (state) {
+ case FL_READING:
+ flags |= BLK_RW_CMP | LOAD_CMP;
+ break;
+ case FL_WRITING:
+ flags |= BLK_RW_CMP | PGM_CMP;
+ break;
+ case FL_ERASING:
+ flags |= BLK_RW_CMP | ERS_CMP;
+ break;
+ case FL_LOCKING:
+ flags |= BLK_RW_CMP;
+ break;
+ default:
+ break;
+ }
+
+ while (timeout--) {
+ stat = readl(&onenand->reg->int_err_stat);
+ if (stat & flags)
+ break;
+ }
+
+ /* To get correct interrupt status in timeout case */
+ stat = readl(&onenand->reg->int_err_stat);
+ writel(stat, &onenand->reg->int_err_ack);
+
+ /*
+ * In the Spec. it checks the controller status first
+ * However if you get the correct information in case of
+ * power off recovery (POR) test, it should read ECC status first
+ */
+ if (stat & LOAD_CMP) {
+ ecc = readl(&onenand->reg->ecc_err_stat);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ printk(KERN_INFO "%s: ECC error = 0x%04x\n",
+ __func__, ecc);
+ mtd->ecc_stats.failed++;
+ return -EBADMSG;
+ }
+ }
+
+ if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) {
+ printk(KERN_INFO "%s: controller error = 0x%04x\n",
+ __func__, stat);
+ if (stat & LOCKED_BLK)
+ printk(KERN_INFO "%s: it's locked error = 0x%04x\n",
+ __func__, stat);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int s3c_onenand_command(struct mtd_info *mtd, int cmd,
+ loff_t addr, size_t len)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int *m, *s;
+ int fba, fpa, fsa = 0;
+ unsigned int mem_addr;
+ int i, mcount, scount;
+ int index;
+
+ fba = (int) (addr >> this->erase_shift);
+ fpa = (int) (addr >> this->page_shift);
+ fpa &= this->page_mask;
+
+ mem_addr = onenand->mem_addr(fba, fpa, fsa);
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ case ONENAND_CMD_READOOB:
+ case ONENAND_CMD_BUFFERRAM:
+ ONENAND_SET_NEXT_BUFFERRAM(this);
+ default:
+ break;
+ }
+
+ index = ONENAND_CURRENT_BUFFERRAM(this);
+
+ /*
+ * Emulate Two BufferRAMs and access with 4 bytes pointer
+ */
+ m = (unsigned int *) onenand->page_buf;
+ s = (unsigned int *) onenand->oob_buf;
+
+ if (index) {
+ m += (this->writesize >> 2);
+ s += (mtd->oobsize >> 2);
+ }
+
+ mcount = mtd->writesize >> 2;
+ scount = mtd->oobsize >> 2;
+
+ switch (cmd) {
+ case ONENAND_CMD_READ:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(CMD_MAP_01(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_READOOB:
+ writel(TSRF, &onenand->reg->trans_spare);
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ *m++ = s3c_read_cmd(CMD_MAP_01(mem_addr));
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ *s++ = s3c_read_cmd(CMD_MAP_01(mem_addr));
+
+ writel(0, &onenand->reg->trans_spare);
+ return 0;
+
+ case ONENAND_CMD_PROG:
+ /* Main */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(*m++, CMD_MAP_01(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_PROGOOB:
+ writel(TSRF, &onenand->reg->trans_spare);
+
+ /* Main - dummy write */
+ for (i = 0; i < mcount; i++)
+ s3c_write_cmd(0xffffffff, CMD_MAP_01(mem_addr));
+
+ /* Spare */
+ for (i = 0; i < scount; i++)
+ s3c_write_cmd(*s++, CMD_MAP_01(mem_addr));
+
+ writel(0, &onenand->reg->trans_spare);
+ return 0;
+
+ case ONENAND_CMD_UNLOCK_ALL:
+ s3c_write_cmd(ONENAND_UNLOCK_ALL, CMD_MAP_10(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_ERASE:
+ s3c_write_cmd(ONENAND_ERASE_START, CMD_MAP_10(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_MULTIBLOCK_ERASE:
+ s3c_write_cmd(ONENAND_MULTI_ERASE_SET, CMD_MAP_10(mem_addr));
+ return 0;
+
+ case ONENAND_CMD_ERASE_VERIFY:
+ s3c_write_cmd(ONENAND_ERASE_VERIFY, CMD_MAP_10(mem_addr));
+ return 0;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area)
+{
+ struct onenand_chip *this = mtd->priv;
+ int index = ONENAND_CURRENT_BUFFERRAM(this);
+ unsigned char *p;
+
+ if (area == ONENAND_DATARAM) {
+ p = (unsigned char *) onenand->page_buf;
+ if (index == 1)
+ p += this->writesize;
+ } else {
+ p = (unsigned char *) onenand->oob_buf;
+ if (index == 1)
+ p += mtd->oobsize;
+ }
+
+ return p;
+}
+
+static int onenand_read_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(buffer, p + offset, count);
+ return 0;
+}
+
+static int onenand_write_bufferram(struct mtd_info *mtd, loff_t addr, int area,
+ const unsigned char *buffer, int offset,
+ size_t count)
+{
+ unsigned char *p;
+
+ p = s3c_get_bufferram(mtd, area);
+ memcpy(p + offset, buffer, count);
+ return 0;
+}
+
+static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state)
+{
+ struct samsung_onenand *reg = (struct samsung_onenand *)onenand->base;
+ unsigned int flags = INT_ACT | LOAD_CMP;
+ unsigned int stat;
+ unsigned long timeout = 0x10000;
+
+ while (timeout--) {
+ stat = readl(&reg->int_err_stat);
+ if (stat & flags)
+ break;
+ }
+ /* To get correct interrupt status in timeout case */
+ stat = readl(&onenand->reg->int_err_stat);
+ writel(stat, &onenand->reg->int_err_ack);
+
+ if (stat & LD_FAIL_ECC_ERR) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+
+ if (stat & LOAD_CMP) {
+ int ecc = readl(&onenand->reg->ecc_err_stat);
+ if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) {
+ s3c_onenand_reset();
+ return ONENAND_BBT_READ_ERROR;
+ }
+ }
+
+ return 0;
+}
+
+static void s3c_onenand_check_lock_status(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int block, end;
+
+ end = this->chipsize >> this->erase_shift;
+
+ for (block = 0; block < end; block++) {
+ s3c_read_cmd(CMD_MAP_01(onenand->mem_addr(block, 0, 0)));
+
+ if (readl(&onenand->reg->int_err_stat) & LOCKED_BLK) {
+ printf("block %d is write-protected!\n", block);
+ writel(LOCKED_BLK, &onenand->reg->int_err_ack);
+ }
+ }
+}
+
+static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs,
+ size_t len, int cmd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int start, end, start_mem_addr, end_mem_addr;
+
+ start = ofs >> this->erase_shift;
+ start_mem_addr = onenand->mem_addr(start, 0, 0);
+ end = start + (len >> this->erase_shift) - 1;
+ end_mem_addr = onenand->mem_addr(end, 0, 0);
+
+ if (cmd == ONENAND_CMD_LOCK) {
+ s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(start_mem_addr));
+ s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(end_mem_addr));
+ } else {
+ s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(start_mem_addr));
+ s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(end_mem_addr));
+ }
+
+ this->wait(mtd, FL_LOCKING);
+}
+
+static void s3c_onenand_unlock_all(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ loff_t ofs = 0;
+ size_t len = this->chipsize;
+
+ /* FIXME workaround */
+ this->subpagesize = mtd->writesize;
+ mtd->subpage_sft = 0;
+
+ if (this->options & ONENAND_HAS_UNLOCK_ALL) {
+ /* Write unlock command */
+ this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0);
+
+ /* No need to check return value */
+ this->wait(mtd, FL_LOCKING);
+
+ /* Workaround for all block unlock in DDP */
+ if (!ONENAND_IS_DDP(this)) {
+ s3c_onenand_check_lock_status(mtd);
+ return;
+ }
+
+ /* All blocks on another chip */
+ ofs = this->chipsize >> 1;
+ len = this->chipsize >> 1;
+ }
+
+ s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
+ s3c_onenand_check_lock_status(mtd);
+}
+
+int s5pc110_chip_probe(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+int s5pc210_chip_probe(struct mtd_info *mtd)
+{
+ return 0;
+}
+
+void s3c_onenand_init(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ u32 size = (4 << 10); /* 4 KiB */
+
+ onenand = malloc(sizeof(struct s3c_onenand));
+ if (!onenand)
+ return;
+
+ onenand->page_buf = malloc(size * sizeof(char));
+ if (!onenand->page_buf)
+ return;
+ memset(onenand->page_buf, 0xff, size);
+
+ onenand->oob_buf = malloc(128 * sizeof(char));
+ if (!onenand->oob_buf)
+ return;
+ memset(onenand->oob_buf, 0xff, 128);
+
+ onenand->mtd = mtd;
+
+#if defined(CONFIG_S5P)
+ onenand->base = (void *)0xE7100000;
+ onenand->ahb_addr = (void *)0xB0000000;
+#endif
+ onenand->mem_addr = s3c_mem_addr;
+ onenand->reg = (struct samsung_onenand *)onenand->base;
+
+ this->read_word = s3c_onenand_readw;
+ this->write_word = s3c_onenand_writew;
+
+ this->wait = s3c_onenand_wait;
+ this->bbt_wait = s3c_onenand_bbt_wait;
+ this->unlock_all = s3c_onenand_unlock_all;
+ this->command = s3c_onenand_command;
+
+ this->read_bufferram = onenand_read_bufferram;
+ this->write_bufferram = onenand_write_bufferram;
+
+ this->options |= ONENAND_RUNTIME_BADBLOCK_CHECK;
+}
diff --git a/roms/u-boot/drivers/mtd/pic32_flash.c b/roms/u-boot/drivers/mtd/pic32_flash.c
new file mode 100644
index 000000000..ea0dbe9ee
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/pic32_flash.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015
+ * Cristian Birsan <cristian.birsan@microchip.com>
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <fdt_support.h>
+#include <flash.h>
+#include <init.h>
+#include <irq_func.h>
+#include <asm/global_data.h>
+#include <linux/bitops.h>
+#include <mach/pic32.h>
+#include <wait_bit.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* NVM Controller registers */
+struct pic32_reg_nvm {
+ struct pic32_reg_atomic ctrl;
+ struct pic32_reg_atomic key;
+ struct pic32_reg_atomic addr;
+ struct pic32_reg_atomic data;
+};
+
+/* NVM operations */
+#define NVMOP_NOP 0
+#define NVMOP_WORD_WRITE 1
+#define NVMOP_PAGE_ERASE 4
+
+/* NVM control bits */
+#define NVM_WR BIT(15)
+#define NVM_WREN BIT(14)
+#define NVM_WRERR BIT(13)
+#define NVM_LVDERR BIT(12)
+
+/* NVM programming unlock register */
+#define LOCK_KEY 0x0
+#define UNLOCK_KEY1 0xaa996655
+#define UNLOCK_KEY2 0x556699aa
+
+/*
+ * PIC32 flash banks consist of number of pages, each page
+ * into number of rows and rows into number of words.
+ * Here we will maintain page information instead of sector.
+ */
+flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS];
+static struct pic32_reg_nvm *nvm_regs_p;
+
+static inline void flash_initiate_operation(u32 nvmop)
+{
+ /* set operation */
+ writel(nvmop, &nvm_regs_p->ctrl.raw);
+
+ /* enable flash write */
+ writel(NVM_WREN, &nvm_regs_p->ctrl.set);
+
+ /* unlock sequence */
+ writel(LOCK_KEY, &nvm_regs_p->key.raw);
+ writel(UNLOCK_KEY1, &nvm_regs_p->key.raw);
+ writel(UNLOCK_KEY2, &nvm_regs_p->key.raw);
+
+ /* initiate operation */
+ writel(NVM_WR, &nvm_regs_p->ctrl.set);
+}
+
+static int flash_wait_till_busy(const char *func, ulong timeout)
+{
+ int ret = wait_for_bit_le32(&nvm_regs_p->ctrl.raw,
+ NVM_WR, false, timeout, false);
+
+ return ret ? ERR_TIMEOUT : ERR_OK;
+}
+
+static inline int flash_complete_operation(void)
+{
+ u32 tmp;
+
+ tmp = readl(&nvm_regs_p->ctrl.raw);
+ if (tmp & NVM_WRERR) {
+ printf("Error in Block Erase - Lock Bit may be set!\n");
+ flash_initiate_operation(NVMOP_NOP);
+ return ERR_PROTECTED;
+ }
+
+ if (tmp & NVM_LVDERR) {
+ printf("Error in Block Erase - low-vol detected!\n");
+ flash_initiate_operation(NVMOP_NOP);
+ return ERR_NOT_ERASED;
+ }
+
+ /* disable flash write or erase operation */
+ writel(NVM_WREN, &nvm_regs_p->ctrl.clr);
+
+ return ERR_OK;
+}
+
+/*
+ * Erase flash sectors, returns:
+ * ERR_OK - OK
+ * ERR_INVAL - invalid sector arguments
+ * ERR_TIMEOUT - write timeout
+ * ERR_NOT_ERASED - Flash not erased
+ * ERR_UNKNOWN_FLASH_VENDOR - incorrect flash
+ */
+int flash_erase(flash_info_t *info, int s_first, int s_last)
+{
+ ulong sect_start, sect_end, flags;
+ int prot, sect;
+ int rc;
+
+ if ((info->flash_id & FLASH_VENDMASK) != FLASH_MAN_MCHP) {
+ printf("Can't erase unknown flash type %08lx - aborted\n",
+ info->flash_id);
+ return ERR_UNKNOWN_FLASH_VENDOR;
+ }
+
+ if ((s_first < 0) || (s_first > s_last)) {
+ printf("- no sectors to erase\n");
+ return ERR_INVAL;
+ }
+
+ prot = 0;
+ for (sect = s_first; sect <= s_last; ++sect) {
+ if (info->protect[sect])
+ prot++;
+ }
+
+ if (prot)
+ printf("- Warning: %d protected sectors will not be erased!\n",
+ prot);
+ else
+ printf("\n");
+
+ /* erase on unprotected sectors */
+ for (sect = s_first; sect <= s_last; sect++) {
+ if (info->protect[sect])
+ continue;
+
+ /* disable interrupts */
+ flags = disable_interrupts();
+
+ /* write destination page address (physical) */
+ sect_start = CPHYSADDR(info->start[sect]);
+ writel(sect_start, &nvm_regs_p->addr.raw);
+
+ /* page erase */
+ flash_initiate_operation(NVMOP_PAGE_ERASE);
+
+ /* wait */
+ rc = flash_wait_till_busy(__func__,
+ CONFIG_SYS_FLASH_ERASE_TOUT);
+
+ /* re-enable interrupts if necessary */
+ if (flags)
+ enable_interrupts();
+
+ if (rc != ERR_OK)
+ return rc;
+
+ rc = flash_complete_operation();
+ if (rc != ERR_OK)
+ return rc;
+
+ /*
+ * flash content is updated but cache might contain stale
+ * data, so invalidate dcache.
+ */
+ sect_end = info->start[sect] + info->size / info->sector_count;
+ invalidate_dcache_range(info->start[sect], sect_end);
+ }
+
+ printf(" done\n");
+ return ERR_OK;
+}
+
+int page_erase(flash_info_t *info, int sect)
+{
+ return 0;
+}
+
+/* Write a word to flash */
+static int write_word(flash_info_t *info, ulong dest, ulong word)
+{
+ ulong flags;
+ int rc;
+
+ /* read flash to check if it is sufficiently erased */
+ if ((readl((void __iomem *)dest) & word) != word) {
+ printf("Error, Flash not erased!\n");
+ return ERR_NOT_ERASED;
+ }
+
+ /* disable interrupts */
+ flags = disable_interrupts();
+
+ /* update destination page address (physical) */
+ writel(CPHYSADDR(dest), &nvm_regs_p->addr.raw);
+ writel(word, &nvm_regs_p->data.raw);
+
+ /* word write */
+ flash_initiate_operation(NVMOP_WORD_WRITE);
+
+ /* wait for operation to complete */
+ rc = flash_wait_till_busy(__func__, CONFIG_SYS_FLASH_WRITE_TOUT);
+
+ /* re-enable interrupts if necessary */
+ if (flags)
+ enable_interrupts();
+
+ if (rc != ERR_OK)
+ return rc;
+
+ return flash_complete_operation();
+}
+
+/*
+ * Copy memory to flash, returns:
+ * ERR_OK - OK
+ * ERR_TIMEOUT - write timeout
+ * ERR_NOT_ERASED - Flash not erased
+ */
+int write_buff(flash_info_t *info, uchar *src, ulong addr, ulong cnt)
+{
+ ulong dst, tmp_le, len = cnt;
+ int i, l, rc;
+ uchar *cp;
+
+ /* get lower word aligned address */
+ dst = (addr & ~3);
+
+ /* handle unaligned start bytes */
+ l = addr - dst;
+ if (l != 0) {
+ tmp_le = 0;
+ for (i = 0, cp = (uchar *)dst; i < l; ++i, ++cp)
+ tmp_le |= *cp << (i * 8);
+
+ for (; (i < 4) && (cnt > 0); ++i, ++src, --cnt, ++cp)
+ tmp_le |= *src << (i * 8);
+
+ for (; (cnt == 0) && (i < 4); ++i, ++cp)
+ tmp_le |= *cp << (i * 8);
+
+ rc = write_word(info, dst, tmp_le);
+ if (rc)
+ goto out;
+
+ dst += 4;
+ }
+
+ /* handle word aligned part */
+ while (cnt >= 4) {
+ tmp_le = src[0] | src[1] << 8 | src[2] << 16 | src[3] << 24;
+ rc = write_word(info, dst, tmp_le);
+ if (rc)
+ goto out;
+ src += 4;
+ dst += 4;
+ cnt -= 4;
+ }
+
+ if (cnt == 0) {
+ rc = ERR_OK;
+ goto out;
+ }
+
+ /* handle unaligned tail bytes */
+ tmp_le = 0;
+ for (i = 0, cp = (uchar *)dst; (i < 4) && (cnt > 0); ++i, ++cp) {
+ tmp_le |= *src++ << (i * 8);
+ --cnt;
+ }
+
+ for (; i < 4; ++i, ++cp)
+ tmp_le |= *cp << (i * 8);
+
+ rc = write_word(info, dst, tmp_le);
+out:
+ /*
+ * flash content updated by nvm controller but CPU cache might
+ * have stale data, so invalidate dcache.
+ */
+ invalidate_dcache_range(addr, addr + len);
+
+ printf(" done\n");
+ return rc;
+}
+
+void flash_print_info(flash_info_t *info)
+{
+ int i;
+
+ if (info->flash_id == FLASH_UNKNOWN) {
+ printf("missing or unknown FLASH type\n");
+ return;
+ }
+
+ switch (info->flash_id & FLASH_VENDMASK) {
+ case FLASH_MAN_MCHP:
+ printf("Microchip Technology ");
+ break;
+ default:
+ printf("Unknown Vendor ");
+ break;
+ }
+
+ switch (info->flash_id & FLASH_TYPEMASK) {
+ case FLASH_MCHP100T:
+ printf("Internal (8 Mbit, 64 x 16k)\n");
+ break;
+ default:
+ printf("Unknown Chip Type\n");
+ break;
+ }
+
+ printf(" Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+
+ printf(" Sector Start Addresses:");
+ for (i = 0; i < info->sector_count; ++i) {
+ if ((i % 5) == 0)
+ printf("\n ");
+
+ printf(" %08lX%s", info->start[i],
+ info->protect[i] ? " (RO)" : " ");
+ }
+ printf("\n");
+}
+
+unsigned long flash_init(void)
+{
+ unsigned long size = 0;
+ struct udevice *dev;
+ int bank;
+
+ /* probe every MTD device */
+ for (uclass_first_device(UCLASS_MTD, &dev); dev;
+ uclass_next_device(&dev)) {
+ /* nop */
+ }
+
+ /* calc total flash size */
+ for (bank = 0; bank < CONFIG_SYS_MAX_FLASH_BANKS; ++bank)
+ size += flash_info[bank].size;
+
+ return size;
+}
+
+static void pic32_flash_bank_init(flash_info_t *info,
+ ulong base, ulong size)
+{
+ ulong sect_size;
+ int sect;
+
+ /* device & manufacturer code */
+ info->flash_id = FLASH_MAN_MCHP | FLASH_MCHP100T;
+ info->sector_count = CONFIG_SYS_MAX_FLASH_SECT;
+ info->size = size;
+
+ /* update sector (i.e page) info */
+ sect_size = info->size / info->sector_count;
+ for (sect = 0; sect < info->sector_count; sect++) {
+ info->start[sect] = base;
+ /* protect each sector by default */
+ info->protect[sect] = 1;
+ base += sect_size;
+ }
+}
+
+static int pic32_flash_probe(struct udevice *dev)
+{
+ void *blob = (void *)gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ const char *list, *end;
+ const fdt32_t *cell;
+ unsigned long addr, size;
+ int parent, addrc, sizec;
+ flash_info_t *info;
+ int len, idx;
+
+ /*
+ * decode regs. there are multiple reg tuples, and they need to
+ * match with reg-names.
+ */
+ parent = fdt_parent_offset(blob, node);
+ fdt_support_default_count_cells(blob, parent, &addrc, &sizec);
+ list = fdt_getprop(blob, node, "reg-names", &len);
+ if (!list)
+ return -ENOENT;
+
+ end = list + len;
+ cell = fdt_getprop(blob, node, "reg", &len);
+ if (!cell)
+ return -ENOENT;
+
+ for (idx = 0, info = &flash_info[0]; list < end;) {
+ addr = fdt_translate_address((void *)blob, node, cell + idx);
+ size = fdt_addr_to_cpu(cell[idx + addrc]);
+ len = strlen(list);
+ if (!strncmp(list, "nvm", len)) {
+ /* NVM controller */
+ nvm_regs_p = ioremap(addr, size);
+ } else if (!strncmp(list, "bank", 4)) {
+ /* Flash bank: use kseg0 cached address */
+ pic32_flash_bank_init(info, CKSEG0ADDR(addr), size);
+ info++;
+ }
+ idx += addrc + sizec;
+ list += len + 1;
+ }
+
+ /* disable flash write/erase operations */
+ writel(NVM_WREN, &nvm_regs_p->ctrl.clr);
+
+#if (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE)
+ /* monitor protection ON by default */
+ flash_protect(FLAG_PROTECT_SET,
+ CONFIG_SYS_MONITOR_BASE,
+ CONFIG_SYS_MONITOR_BASE + monitor_flash_len - 1,
+ &flash_info[0]);
+#endif
+
+#ifdef CONFIG_ENV_IS_IN_FLASH
+ /* ENV protection ON by default */
+ flash_protect(FLAG_PROTECT_SET,
+ CONFIG_ENV_ADDR,
+ CONFIG_ENV_ADDR + CONFIG_ENV_SECT_SIZE - 1,
+ &flash_info[0]);
+#endif
+ return 0;
+}
+
+static const struct udevice_id pic32_flash_ids[] = {
+ { .compatible = "microchip,pic32mzda-flash" },
+ {}
+};
+
+U_BOOT_DRIVER(pic32_flash) = {
+ .name = "pic32_flash",
+ .id = UCLASS_MTD,
+ .of_match = pic32_flash_ids,
+ .probe = pic32_flash_probe,
+};
diff --git a/roms/u-boot/drivers/mtd/renesas_rpc_hf.c b/roms/u-boot/drivers/mtd/renesas_rpc_hf.c
new file mode 100644
index 000000000..2c61ce7b6
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/renesas_rpc_hf.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RCar Gen3 RPC HyperFlash driver
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ * Copyright (C) 2016 Cogent Embedded, Inc.
+ * Copyright (C) 2017 Marek Vasut <marek.vasut@gmail.com>
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <dm/of_access.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <flash.h>
+#include <mtd.h>
+#include <wait_bit.h>
+#include <linux/bitops.h>
+#include <mtd/cfi_flash.h>
+#include <asm/global_data.h>
+
+#define RPC_CMNCR 0x0000 /* R/W */
+#define RPC_CMNCR_MD BIT(31)
+#define RPC_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
+#define RPC_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
+#define RPC_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
+#define RPC_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
+#define RPC_CMNCR_MOIIO_HIZ (RPC_CMNCR_MOIIO0(3) | RPC_CMNCR_MOIIO1(3) | \
+ RPC_CMNCR_MOIIO2(3) | RPC_CMNCR_MOIIO3(3))
+#define RPC_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
+#define RPC_CMNCR_IO2FV(val) (((val) & 0x3) << 12)
+#define RPC_CMNCR_IO3FV(val) (((val) & 0x3) << 14)
+#define RPC_CMNCR_IOFV_HIZ (RPC_CMNCR_IO0FV(3) | RPC_CMNCR_IO2FV(3) | \
+ RPC_CMNCR_IO3FV(3))
+#define RPC_CMNCR_BSZ(val) (((val) & 0x3) << 0)
+
+#define RPC_SSLDR 0x0004 /* R/W */
+#define RPC_SSLDR_SPNDL(d) (((d) & 0x7) << 16)
+#define RPC_SSLDR_SLNDL(d) (((d) & 0x7) << 8)
+#define RPC_SSLDR_SCKDL(d) (((d) & 0x7) << 0)
+
+#define RPC_DRCR 0x000C /* R/W */
+#define RPC_DRCR_SSLN BIT(24)
+#define RPC_DRCR_RBURST(v) (((v) & 0x1F) << 16)
+#define RPC_DRCR_RCF BIT(9)
+#define RPC_DRCR_RBE BIT(8)
+#define RPC_DRCR_SSLE BIT(0)
+
+#define RPC_DRCMR 0x0010 /* R/W */
+#define RPC_DRCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPC_DRCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPC_DREAR 0x0014 /* R/W */
+#define RPC_DREAR_EAV(v) (((v) & 0xFF) << 16)
+#define RPC_DREAR_EAC(v) (((v) & 0x7) << 0)
+
+#define RPC_DROPR 0x0018 /* R/W */
+#define RPC_DROPR_OPD3(o) (((o) & 0xFF) << 24)
+#define RPC_DROPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPC_DROPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPC_DROPR_OPD0(o) (((o) & 0xFF) << 0)
+
+#define RPC_DRENR 0x001C /* R/W */
+#define RPC_DRENR_CDB(o) (u32)((((o) & 0x3) << 30))
+#define RPC_DRENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPC_DRENR_ADB(o) (((o) & 0x3) << 24)
+#define RPC_DRENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPC_DRENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPC_DRENR_DME BIT(15)
+#define RPC_DRENR_CDE BIT(14)
+#define RPC_DRENR_OCDE BIT(12)
+#define RPC_DRENR_ADE(v) (((v) & 0xF) << 8)
+#define RPC_DRENR_OPDE(v) (((v) & 0xF) << 4)
+
+#define RPC_SMCR 0x0020 /* R/W */
+#define RPC_SMCR_SSLKP BIT(8)
+#define RPC_SMCR_SPIRE BIT(2)
+#define RPC_SMCR_SPIWE BIT(1)
+#define RPC_SMCR_SPIE BIT(0)
+
+#define RPC_SMCMR 0x0024 /* R/W */
+#define RPC_SMCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPC_SMCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPC_SMADR 0x0028 /* R/W */
+#define RPC_SMOPR 0x002C /* R/W */
+#define RPC_SMOPR_OPD0(o) (((o) & 0xFF) << 0)
+#define RPC_SMOPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPC_SMOPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPC_SMOPR_OPD3(o) (((o) & 0xFF) << 24)
+
+#define RPC_SMENR 0x0030 /* R/W */
+#define RPC_SMENR_CDB(o) (((o) & 0x3) << 30)
+#define RPC_SMENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPC_SMENR_ADB(o) (((o) & 0x3) << 24)
+#define RPC_SMENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPC_SMENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPC_SMENR_DME BIT(15)
+#define RPC_SMENR_CDE BIT(14)
+#define RPC_SMENR_OCDE BIT(12)
+#define RPC_SMENR_ADE(v) (((v) & 0xF) << 8)
+#define RPC_SMENR_OPDE(v) (((v) & 0xF) << 4)
+#define RPC_SMENR_SPIDE(v) (((v) & 0xF) << 0)
+
+#define RPC_SMRDR0 0x0038 /* R */
+#define RPC_SMRDR1 0x003C /* R */
+#define RPC_SMWDR0 0x0040 /* R/W */
+#define RPC_SMWDR1 0x0044 /* R/W */
+#define RPC_CMNSR 0x0048 /* R */
+#define RPC_CMNSR_SSLF BIT(1)
+#define RPC_CMNSR_TEND BIT(0)
+
+#define RPC_DRDMCR 0x0058 /* R/W */
+#define RPC_DRDMCR_DMCYC(v) (((v) & 0xF) << 0)
+
+#define RPC_DRDRENR 0x005C /* R/W */
+#define RPC_DRDRENR_HYPE (0x5 << 12)
+#define RPC_DRDRENR_ADDRE BIT(8)
+#define RPC_DRDRENR_OPDRE BIT(4)
+#define RPC_DRDRENR_DRDRE BIT(0)
+
+#define RPC_SMDMCR 0x0060 /* R/W */
+#define RPC_SMDMCR_DMCYC(v) (((v) & 0xF) << 0)
+
+#define RPC_SMDRENR 0x0064 /* R/W */
+#define RPC_SMDRENR_HYPE (0x5 << 12)
+#define RPC_SMDRENR_ADDRE BIT(8)
+#define RPC_SMDRENR_OPDRE BIT(4)
+#define RPC_SMDRENR_SPIDRE BIT(0)
+
+#define RPC_PHYCNT 0x007C /* R/W */
+#define RPC_PHYCNT_CAL BIT(31)
+#define PRC_PHYCNT_OCTA_AA BIT(22)
+#define PRC_PHYCNT_OCTA_SA BIT(23)
+#define PRC_PHYCNT_EXDS BIT(21)
+#define RPC_PHYCNT_OCT BIT(20)
+#define RPC_PHYCNT_WBUF2 BIT(4)
+#define RPC_PHYCNT_WBUF BIT(2)
+#define RPC_PHYCNT_MEM(v) (((v) & 0x3) << 0)
+
+#define RPC_PHYINT 0x0088 /* R/W */
+#define RPC_PHYINT_RSTEN BIT(18)
+#define RPC_PHYINT_WPEN BIT(17)
+#define RPC_PHYINT_INTEN BIT(16)
+#define RPC_PHYINT_RST BIT(2)
+#define RPC_PHYINT_WP BIT(1)
+#define RPC_PHYINT_INT BIT(0)
+
+#define RPC_WBUF 0x8000 /* R/W size=4/8/16/32/64Bytes */
+#define RPC_WBUF_SIZE 0x100
+
+static phys_addr_t rpc_base;
+
+enum rpc_hf_size {
+ RPC_HF_SIZE_16BIT = RPC_SMENR_SPIDE(0x8),
+ RPC_HF_SIZE_32BIT = RPC_SMENR_SPIDE(0xC),
+ RPC_HF_SIZE_64BIT = RPC_SMENR_SPIDE(0xF),
+};
+
+static int rpc_hf_wait_tend(void)
+{
+ void __iomem *reg = (void __iomem *)rpc_base + RPC_CMNSR;
+ return wait_for_bit_le32(reg, RPC_CMNSR_TEND, true, 1000, 0);
+}
+
+static int rpc_hf_mode(bool man)
+{
+ int ret;
+
+ ret = rpc_hf_wait_tend();
+ if (ret)
+ return ret;
+
+ clrsetbits_le32(rpc_base + RPC_PHYCNT,
+ RPC_PHYCNT_WBUF | RPC_PHYCNT_WBUF2 |
+ RPC_PHYCNT_CAL | RPC_PHYCNT_MEM(3),
+ RPC_PHYCNT_CAL | RPC_PHYCNT_MEM(3));
+
+ clrsetbits_le32(rpc_base + RPC_CMNCR,
+ RPC_CMNCR_MD | RPC_CMNCR_BSZ(3),
+ RPC_CMNCR_MOIIO_HIZ | RPC_CMNCR_IOFV_HIZ |
+ (man ? RPC_CMNCR_MD : 0) | RPC_CMNCR_BSZ(1));
+
+ if (man)
+ return 0;
+
+ writel(RPC_DRCR_RBURST(0x1F) | RPC_DRCR_RCF | RPC_DRCR_RBE,
+ rpc_base + RPC_DRCR);
+
+ writel(RPC_DRCMR_CMD(0xA0), rpc_base + RPC_DRCMR);
+ writel(RPC_DRENR_CDB(2) | RPC_DRENR_OCDB(2) | RPC_DRENR_ADB(2) |
+ RPC_DRENR_SPIDB(2) | RPC_DRENR_CDE | RPC_DRENR_OCDE |
+ RPC_DRENR_ADE(4), rpc_base + RPC_DRENR);
+ writel(RPC_DRDMCR_DMCYC(0xE), rpc_base + RPC_DRDMCR);
+ writel(RPC_DRDRENR_HYPE | RPC_DRDRENR_ADDRE | RPC_DRDRENR_DRDRE,
+ rpc_base + RPC_DRDRENR);
+
+ /* Dummy read */
+ readl(rpc_base + RPC_DRCR);
+
+ return 0;
+}
+
+static int rpc_hf_xfer(void *addr, u64 wdata, u64 *rdata,
+ enum rpc_hf_size size, bool write)
+{
+ int ret;
+ u32 val;
+
+ ret = rpc_hf_mode(1);
+ if (ret)
+ return ret;
+
+ /* Submit HF address, SMCMR CMD[7] ~= CA Bit# 47 (R/nW) */
+ writel(write ? 0 : RPC_SMCMR_CMD(0x80), rpc_base + RPC_SMCMR);
+ writel((uintptr_t)addr >> 1, rpc_base + RPC_SMADR);
+ writel(0x0, rpc_base + RPC_SMOPR);
+
+ writel(RPC_SMDRENR_HYPE | RPC_SMDRENR_ADDRE | RPC_SMDRENR_SPIDRE,
+ rpc_base + RPC_SMDRENR);
+
+ val = RPC_SMENR_CDB(2) | RPC_SMENR_OCDB(2) |
+ RPC_SMENR_ADB(2) | RPC_SMENR_SPIDB(2) |
+ RPC_SMENR_CDE | RPC_SMENR_OCDE | RPC_SMENR_ADE(4) | size;
+
+ if (write) {
+ writel(val, rpc_base + RPC_SMENR);
+
+ if (size == RPC_HF_SIZE_64BIT)
+ writeq(cpu_to_be64(wdata), rpc_base + RPC_SMWDR0);
+ else
+ writel(cpu_to_be32(wdata), rpc_base + RPC_SMWDR0);
+
+ writel(RPC_SMCR_SPIWE | RPC_SMCR_SPIE, rpc_base + RPC_SMCR);
+ } else {
+ val |= RPC_SMENR_DME;
+
+ writel(RPC_SMDMCR_DMCYC(0xE), rpc_base + RPC_SMDMCR);
+
+ writel(val, rpc_base + RPC_SMENR);
+
+ writel(RPC_SMCR_SPIRE | RPC_SMCR_SPIE, rpc_base + RPC_SMCR);
+
+ ret = rpc_hf_wait_tend();
+ if (ret)
+ return ret;
+
+ if (size == RPC_HF_SIZE_64BIT)
+ *rdata = be64_to_cpu(readq(rpc_base + RPC_SMRDR0));
+ else
+ *rdata = be32_to_cpu(readl(rpc_base + RPC_SMRDR0));
+ }
+
+ return rpc_hf_mode(0);
+}
+
+static void rpc_hf_write_cmd(void *addr, u64 wdata, enum rpc_hf_size size)
+{
+ int ret;
+
+ ret = rpc_hf_xfer(addr, wdata, NULL, size, 1);
+ if (ret)
+ printf("RPC: Write failed, ret=%i\n", ret);
+}
+
+static u64 rpc_hf_read_reg(void *addr, enum rpc_hf_size size)
+{
+ u64 rdata = 0;
+ int ret;
+
+ ret = rpc_hf_xfer(addr, 0, &rdata, size, 0);
+ if (ret)
+ printf("RPC: Read failed, ret=%i\n", ret);
+
+ return rdata;
+}
+
+void flash_write8(u8 value, void *addr)
+{
+ rpc_hf_write_cmd(addr, value, RPC_HF_SIZE_16BIT);
+}
+
+void flash_write16(u16 value, void *addr)
+{
+ rpc_hf_write_cmd(addr, value, RPC_HF_SIZE_16BIT);
+}
+
+void flash_write32(u32 value, void *addr)
+{
+ rpc_hf_write_cmd(addr, value, RPC_HF_SIZE_32BIT);
+}
+
+void flash_write64(u64 value, void *addr)
+{
+ rpc_hf_write_cmd(addr, value, RPC_HF_SIZE_64BIT);
+}
+
+u8 flash_read8(void *addr)
+{
+ return rpc_hf_read_reg(addr, RPC_HF_SIZE_16BIT);
+}
+
+u16 flash_read16(void *addr)
+{
+ return rpc_hf_read_reg(addr, RPC_HF_SIZE_16BIT);
+}
+
+u32 flash_read32(void *addr)
+{
+ return rpc_hf_read_reg(addr, RPC_HF_SIZE_32BIT);
+}
+
+u64 flash_read64(void *addr)
+{
+ return rpc_hf_read_reg(addr, RPC_HF_SIZE_64BIT);
+}
+
+static int rpc_hf_bind(struct udevice *parent)
+{
+ const void *fdt = gd->fdt_blob;
+ ofnode node;
+ int ret, off;
+
+ /*
+ * Check if there are any SPI NOR child nodes, if so, do NOT bind
+ * as this controller will be operated by the QSPI driver instead.
+ */
+ dev_for_each_subnode(node, parent) {
+ off = ofnode_to_offset(node);
+
+ ret = fdt_node_check_compatible(fdt, off, "spi-flash");
+ if (!ret)
+ return -ENODEV;
+
+ ret = fdt_node_check_compatible(fdt, off, "jedec,spi-nor");
+ if (!ret)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int rpc_hf_probe(struct udevice *dev)
+{
+ void *blob = (void *)gd->fdt_blob;
+ const fdt32_t *cell;
+ int node = dev_of_offset(dev);
+ int parent, addrc, sizec, len, ret;
+ struct clk clk;
+ phys_addr_t flash_base;
+
+ parent = fdt_parent_offset(blob, node);
+ fdt_support_default_count_cells(blob, parent, &addrc, &sizec);
+ cell = fdt_getprop(blob, node, "reg", &len);
+ if (!cell)
+ return -ENOENT;
+
+ if (addrc != 2 || sizec != 2)
+ return -EINVAL;
+
+
+ ret = clk_get_by_index(dev, 0, &clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get RPC clock\n");
+ return ret;
+ }
+
+ ret = clk_enable(&clk);
+ clk_free(&clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable RPC clock\n");
+ return ret;
+ }
+
+ rpc_base = fdt_translate_address(blob, node, cell);
+ flash_base = fdt_translate_address(blob, node, cell + addrc + sizec);
+
+ flash_info[0].dev = dev;
+ flash_info[0].base = flash_base;
+ cfi_flash_num_flash_banks = 1;
+ gd->bd->bi_flashstart = flash_base;
+
+ return 0;
+}
+
+static const struct udevice_id rpc_hf_ids[] = {
+ { .compatible = "renesas,rpc" },
+ {}
+};
+
+U_BOOT_DRIVER(rpc_hf) = {
+ .name = "rpc_hf",
+ .id = UCLASS_MTD,
+ .of_match = rpc_hf_ids,
+ .bind = rpc_hf_bind,
+ .probe = rpc_hf_probe,
+};
diff --git a/roms/u-boot/drivers/mtd/spi/Kconfig b/roms/u-boot/drivers/mtd/spi/Kconfig
new file mode 100644
index 000000000..f8db8e521
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/Kconfig
@@ -0,0 +1,217 @@
+menu "SPI Flash Support"
+
+config DM_SPI_FLASH
+ bool "Enable Driver Model for SPI flash"
+ depends on DM && DM_SPI
+ imply SPI_FLASH
+ help
+ Enable driver model for SPI flash. This SPI flash interface
+ (spi_flash_probe(), spi_flash_write(), etc.) is then
+ implemented by the SPI flash uclass. There is one standard
+ SPI flash driver which knows how to probe most chips
+ supported by U-Boot. The uclass interface is defined in
+ include/spi_flash.h, but is currently fully compatible
+ with the old interface to avoid confusion and duplication
+ during the transition parent. SPI and SPI flash must be
+ enabled together (it is not possible to use driver model
+ for one and not the other).
+
+config SPI_FLASH_SANDBOX
+ bool "Support sandbox SPI flash device"
+ depends on SANDBOX && DM_SPI_FLASH
+ help
+ Since sandbox cannot access real devices, an emulation mechanism is
+ provided instead. Drivers can be connected up to the sandbox SPI
+ bus (see CONFIG_SANDBOX_SPI) and SPI traffic will be routed to this
+ device. Typically the contents of the emulated SPI flash device is
+ stored in a file on the host filesystem.
+
+config SPI_FLASH
+ bool "SPI Flash Core Interface support"
+ select SPI_MEM
+ help
+ Enable the SPI flash Core support. This will include basic
+ standard support for things like probing, read / write, and
+ erasing through cmd_sf interface.
+
+ If unsure, say N
+
+config SF_DEFAULT_BUS
+ int "SPI Flash default bus identifier"
+ depends on SPI_FLASH || DM_SPI_FLASH
+ default 0
+ help
+ The default bus may be provided by the platform
+ to handle the common case when only a single serial
+ flash is present on the system.
+
+config SF_DEFAULT_CS
+ int "SPI Flash default Chip-select"
+ depends on SPI_FLASH || DM_SPI_FLASH
+ default 0
+ help
+ The default chip select may be provided by the platform
+ to handle the common case when only a single serial
+ flash is present on the system.
+
+config SF_DEFAULT_MODE
+ hex "SPI Flash default mode (see include/spi.h)"
+ depends on SPI_FLASH || DM_SPI_FLASH
+ default 3
+ help
+ The default mode may be provided by the platform
+ to handle the common case when only a single serial
+ flash is present on the system.
+ Not used for boot with device tree; the SPI driver reads
+ speed and mode from plat values computed from
+ available node.
+
+config SF_DEFAULT_SPEED
+ int "SPI Flash default speed in Hz"
+ depends on SPI_FLASH || DM_SPI_FLASH
+ default 1000000
+ help
+ The default speed may be provided by the platform
+ to handle the common case when only a single serial
+ flash is present on the system.
+ Not used for boot with device tree; the SPI driver reads
+ speed and mode from plat values computed from
+ available node.
+
+if SPI_FLASH
+
+config SPI_FLASH_SFDP_SUPPORT
+ bool "SFDP table parsing support for SPI NOR flashes"
+ depends on !SPI_FLASH_BAR
+ help
+ Enable support for parsing and auto discovery of parameters for
+ SPI NOR flashes using Serial Flash Discoverable Parameters (SFDP)
+ tables as per JESD216 standard.
+
+config SPI_FLASH_BAR
+ bool "SPI flash Bank/Extended address register support"
+ help
+ Enable the SPI flash Bank/Extended address register support.
+ Bank/Extended address registers are used to access the flash
+ which has size > 16MiB in 3-byte addressing.
+
+config SPI_FLASH_UNLOCK_ALL
+ bool "Unlock the entire SPI flash on u-boot startup"
+ default y
+ help
+ Some flashes tend to power up with the software write protection
+ bits set. If this option is set, the whole flash will be unlocked.
+
+ For legacy reasons, this option default to y. But if you intend to
+ actually use the software protection bits you should say n here.
+
+config SF_DUAL_FLASH
+ bool "SPI DUAL flash memory support"
+ help
+ Enable this option to support two flash memories connected to a single
+ controller. Currently Xilinx Zynq qspi supports this.
+
+config SPI_FLASH_ATMEL
+ bool "Atmel SPI flash support"
+ help
+ Add support for various Atmel SPI flash chips (AT45xxx and AT25xxx)
+
+config SPI_FLASH_EON
+ bool "EON SPI flash support"
+ help
+ Add support for various EON SPI flash chips (EN25xxx)
+
+config SPI_FLASH_GIGADEVICE
+ bool "GigaDevice SPI flash support"
+ help
+ Add support for various GigaDevice SPI flash chips (GD25xxx)
+
+config SPI_FLASH_ISSI
+ bool "ISSI SPI flash support"
+ help
+ Add support for various ISSI SPI flash chips (ISxxx)
+
+config SPI_FLASH_MACRONIX
+ bool "Macronix SPI flash support"
+ help
+ Add support for various Macronix SPI flash chips (MX25Lxxx)
+
+config SPI_FLASH_SPANSION
+ bool "Spansion SPI flash support"
+ help
+ Add support for various Spansion SPI flash chips (S25FLxxx)
+
+config SPI_FLASH_STMICRO
+ bool "STMicro SPI flash support"
+ help
+ Add support for various STMicro SPI flash chips (M25Pxxx and N25Qxxx)
+
+config SPI_FLASH_SST
+ bool "SST SPI flash support"
+ help
+ Add support for various SST SPI flash chips (SST25xxx)
+
+config SPI_FLASH_WINBOND
+ bool "Winbond SPI flash support"
+ help
+ Add support for various Winbond SPI flash chips (W25xxx)
+
+config SPI_FLASH_XMC
+ bool "XMC SPI flash support"
+ help
+ Add support for various XMC (Wuhan Xinxin Semiconductor
+ Manufacturing Corp.) SPI flash chips (XM25xxx)
+
+endif
+
+config SPI_FLASH_USE_4K_SECTORS
+ bool "Use small 4096 B erase sectors"
+ depends on SPI_FLASH
+ default y
+ help
+ Many flash memories support erasing small (4096 B) sectors. Depending
+ on the usage this feature may provide performance gain in comparison
+ to erasing whole blocks (32/64 KiB).
+ Changing a small part of the flash's contents is usually faster with
+ small sectors. On the other hand erasing should be faster when using
+ 64 KiB block instead of 16 × 4 KiB sectors.
+
+ Please note that some tools/drivers/filesystems may not work with
+ 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum).
+
+config SPI_FLASH_DATAFLASH
+ bool "AT45xxx DataFlash support"
+ depends on SPI_FLASH && DM_SPI_FLASH
+ help
+ Enable the access for SPI-flash-based AT45xxx DataFlash chips.
+ DataFlash is a kind of SPI flash. Most AT45 chips have two buffers
+ in each chip, which may be used for double buffered I/O; but this
+ driver doesn't (yet) use these for any kind of i/o overlap or prefetching.
+
+ Sometimes DataFlash is packaged in MMC-format cards, although the
+ MMC stack can't (yet?) distinguish between MMC and DataFlash
+ protocols during enumeration.
+
+ If unsure, say N
+
+config SPI_FLASH_MTD
+ bool "SPI Flash MTD support"
+ depends on SPI_FLASH && MTD
+ help
+ Enable the MTD support for spi flash layer, this adapter is for
+ translating mtd_read/mtd_write commands into spi_flash_read/write
+ commands. It is not intended to use it within sf_cmd or the SPI
+ flash subsystem. Such an adapter is needed for subsystems like
+ UBI which can only operate on top of the MTD layer.
+
+ If unsure, say N
+
+config SPL_SPI_FLASH_MTD
+ bool "SPI flash MTD support for SPL"
+ depends on SPI_FLASH
+ help
+ Enable the MTD support for the SPI flash layer in SPL.
+
+ If unsure, say N
+
+endmenu # menu "SPI Flash Support"
diff --git a/roms/u-boot/drivers/mtd/spi/Makefile b/roms/u-boot/drivers/mtd/spi/Makefile
new file mode 100644
index 000000000..99cc41855
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+obj-$(CONFIG_$(SPL_TPL_)DM_SPI_FLASH) += sf-uclass.o
+spi-nor-y := sf_probe.o spi-nor-ids.o
+
+ifdef CONFIG_SPL_BUILD
+obj-$(CONFIG_SPL_SPI_BOOT) += fsl_espi_spl.o
+ifeq ($(CONFIG_$(SPL_TPL_)SPI_FLASH_TINY),y)
+spi-nor-y += spi-nor-tiny.o
+else
+spi-nor-y += spi-nor-core.o
+endif
+else
+spi-nor-y += spi-nor-core.o
+endif
+
+obj-$(CONFIG_SPI_FLASH) += spi-nor.o
+obj-$(CONFIG_SPI_FLASH_DATAFLASH) += sf_dataflash.o
+obj-$(CONFIG_$(SPL_TPL_)SPI_FLASH_MTD) += sf_mtd.o
+obj-$(CONFIG_SPI_FLASH_SANDBOX) += sandbox.o
diff --git a/roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c b/roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c
new file mode 100644
index 000000000..5c41d7558
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <hang.h>
+#include <spi_flash.h>
+#include <malloc.h>
+
+#define ESPI_BOOT_IMAGE_SIZE 0x48
+#define ESPI_BOOT_IMAGE_ADDR 0x50
+#define CONFIG_CFG_DATA_SECTOR 0
+
+void fsl_spi_spl_load_image(uint32_t offs, unsigned int size, void *vdst)
+{
+ struct spi_flash *flash;
+
+ flash = spi_flash_probe(CONFIG_ENV_SPI_BUS, CONFIG_ENV_SPI_CS,
+ CONFIG_ENV_SPI_MAX_HZ, CONFIG_ENV_SPI_MODE);
+ if (flash == NULL) {
+ puts("\nspi_flash_probe failed");
+ hang();
+ }
+
+ spi_flash_read(flash, offs, size, vdst);
+}
+
+/*
+ * The main entry for SPI booting. It's necessary that SDRAM is already
+ * configured and available since this code loads the main U-Boot image
+ * from SPI into SDRAM and starts it from there.
+ */
+void fsl_spi_boot(void)
+{
+ void (*uboot)(void) __noreturn;
+ u32 offset, code_len, copy_len = 0;
+#ifndef CONFIG_FSL_CORENET
+ unsigned char *buf = NULL;
+#endif
+ struct spi_flash *flash;
+
+ flash = spi_flash_probe(CONFIG_ENV_SPI_BUS, CONFIG_ENV_SPI_CS,
+ CONFIG_ENV_SPI_MAX_HZ, CONFIG_ENV_SPI_MODE);
+ if (flash == NULL) {
+ puts("\nspi_flash_probe failed");
+ hang();
+ }
+
+#ifdef CONFIG_FSL_CORENET
+ offset = CONFIG_SYS_SPI_FLASH_U_BOOT_OFFS;
+ code_len = CONFIG_SYS_SPI_FLASH_U_BOOT_SIZE;
+#else
+ /*
+ * Load U-Boot image from SPI flash into RAM
+ */
+ buf = malloc(flash->page_size);
+ if (buf == NULL) {
+ puts("\nmalloc failed");
+ hang();
+ }
+ memset(buf, 0, flash->page_size);
+
+ spi_flash_read(flash, CONFIG_CFG_DATA_SECTOR,
+ flash->page_size, (void *)buf);
+ offset = *(u32 *)(buf + ESPI_BOOT_IMAGE_ADDR);
+ /* Skip spl code */
+ offset += CONFIG_SYS_SPI_FLASH_U_BOOT_OFFS;
+ /* Get the code size from offset 0x48 */
+ code_len = *(u32 *)(buf + ESPI_BOOT_IMAGE_SIZE);
+ /* Skip spl code */
+ code_len = code_len - CONFIG_SPL_MAX_SIZE;
+#endif
+ /* copy code to DDR */
+ printf("Loading second stage boot loader ");
+ while (copy_len <= code_len) {
+ spi_flash_read(flash, offset + copy_len, 0x2000,
+ (void *)(CONFIG_SYS_SPI_FLASH_U_BOOT_DST
+ + copy_len));
+ copy_len = copy_len + 0x2000;
+ putc('.');
+ }
+
+ /*
+ * Jump to U-Boot image
+ */
+ flush_cache(CONFIG_SYS_SPI_FLASH_U_BOOT_DST, code_len);
+ uboot = (void *)CONFIG_SYS_SPI_FLASH_U_BOOT_START;
+ (*uboot)();
+}
diff --git a/roms/u-boot/drivers/mtd/spi/sandbox.c b/roms/u-boot/drivers/mtd/spi/sandbox.c
new file mode 100644
index 000000000..3c01e3b41
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/sandbox.c
@@ -0,0 +1,606 @@
+/*
+ * Simulate a SPI flash
+ *
+ * Copyright (c) 2011-2013 The Chromium OS Authors.
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define LOG_CATEGORY UCLASS_SPI_FLASH
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <os.h>
+
+#include <spi_flash.h>
+#include "sf_internal.h"
+
+#include <asm/getopt.h>
+#include <asm/spi.h>
+#include <asm/state.h>
+#include <dm/device-internal.h>
+#include <dm/lists.h>
+#include <dm/uclass-internal.h>
+
+/*
+ * The different states that our SPI flash transitions between.
+ * We need to keep track of this across multiple xfer calls since
+ * the SPI bus could possibly call down into us multiple times.
+ */
+enum sandbox_sf_state {
+ SF_CMD, /* default state -- we're awaiting a command */
+ SF_ID, /* read the flash's (jedec) ID code */
+ SF_ADDR, /* processing the offset in the flash to read/etc... */
+ SF_READ, /* reading data from the flash */
+ SF_WRITE, /* writing data to the flash, i.e. page programming */
+ SF_ERASE, /* erase the flash */
+ SF_READ_STATUS, /* read the flash's status register */
+ SF_READ_STATUS1, /* read the flash's status register upper 8 bits*/
+ SF_WRITE_STATUS, /* write the flash's status register */
+};
+
+static const char *sandbox_sf_state_name(enum sandbox_sf_state state)
+{
+ static const char * const states[] = {
+ "CMD", "ID", "ADDR", "READ", "WRITE", "ERASE", "READ_STATUS",
+ "READ_STATUS1", "WRITE_STATUS",
+ };
+ return states[state];
+}
+
+/* Bits for the status register */
+#define STAT_WIP (1 << 0)
+#define STAT_WEL (1 << 1)
+#define STAT_BP_SHIFT 2
+#define STAT_BP_MASK (7 << STAT_BP_SHIFT)
+
+/* Assume all SPI flashes have 3 byte addresses since they do atm */
+#define SF_ADDR_LEN 3
+
+#define IDCODE_LEN 3
+
+/* Used to quickly bulk erase backing store */
+static u8 sandbox_sf_0xff[0x1000];
+
+/* Internal state data for each SPI flash */
+struct sandbox_spi_flash {
+ unsigned int cs; /* Chip select we are attached to */
+ /*
+ * As we receive data over the SPI bus, our flash transitions
+ * between states. For example, we start off in the SF_CMD
+ * state where the first byte tells us what operation to perform
+ * (such as read or write the flash). But the operation itself
+ * can go through a few states such as first reading in the
+ * offset in the flash to perform the requested operation.
+ * Thus "state" stores the exact state that our machine is in
+ * while "cmd" stores the overall command we're processing.
+ */
+ enum sandbox_sf_state state;
+ uint cmd;
+ /* Erase size of current erase command */
+ uint erase_size;
+ /* Current position in the flash; used when reading/writing/etc... */
+ uint off;
+ /* How many address bytes we've consumed */
+ uint addr_bytes, pad_addr_bytes;
+ /* The current flash status (see STAT_XXX defines above) */
+ u16 status;
+ /* Data describing the flash we're emulating */
+ const struct flash_info *data;
+ /* The file on disk to serv up data from */
+ int fd;
+};
+
+struct sandbox_spi_flash_plat_data {
+ const char *filename;
+ const char *device_name;
+ int bus;
+ int cs;
+};
+
+void sandbox_sf_set_block_protect(struct udevice *dev, int bp_mask)
+{
+ struct sandbox_spi_flash *sbsf = dev_get_priv(dev);
+
+ sbsf->status &= ~STAT_BP_MASK;
+ sbsf->status |= bp_mask << STAT_BP_SHIFT;
+}
+
+/**
+ * This is a very strange probe function. If it has platform data (which may
+ * have come from the device tree) then this function gets the filename and
+ * device type from there.
+ */
+static int sandbox_sf_probe(struct udevice *dev)
+{
+ /* spec = idcode:file */
+ struct sandbox_spi_flash *sbsf = dev_get_priv(dev);
+ size_t len, idname_len;
+ const struct flash_info *data;
+ struct sandbox_spi_flash_plat_data *pdata = dev_get_plat(dev);
+ struct sandbox_state *state = state_get_current();
+ struct dm_spi_slave_plat *slave_plat;
+ struct udevice *bus = dev->parent;
+ const char *spec = NULL;
+ struct udevice *emul;
+ int ret = 0;
+ int cs = -1;
+
+ debug("%s: bus %d, looking for emul=%p: ", __func__, dev_seq(bus), dev);
+ ret = sandbox_spi_get_emul(state, bus, dev, &emul);
+ if (ret) {
+ printf("Error: Unknown chip select for device '%s'\n",
+ dev->name);
+ return ret;
+ }
+ slave_plat = dev_get_parent_plat(dev);
+ cs = slave_plat->cs;
+ debug("found at cs %d\n", cs);
+
+ if (!pdata->filename) {
+ printf("Error: No filename available\n");
+ return -EINVAL;
+ }
+ spec = strchr(pdata->device_name, ',');
+ if (spec)
+ spec++;
+ else
+ spec = pdata->device_name;
+ idname_len = strlen(spec);
+ debug("%s: device='%s'\n", __func__, spec);
+
+ for (data = spi_nor_ids; data->name; data++) {
+ len = strlen(data->name);
+ if (idname_len != len)
+ continue;
+ if (!strncasecmp(spec, data->name, len))
+ break;
+ }
+ if (!data->name) {
+ printf("%s: unknown flash '%*s'\n", __func__, (int)idname_len,
+ spec);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (sandbox_sf_0xff[0] == 0x00)
+ memset(sandbox_sf_0xff, 0xff, sizeof(sandbox_sf_0xff));
+
+ sbsf->fd = os_open(pdata->filename, 02);
+ if (sbsf->fd == -1) {
+ printf("%s: unable to open file '%s'\n", __func__,
+ pdata->filename);
+ ret = -EIO;
+ goto error;
+ }
+
+ sbsf->data = data;
+ sbsf->cs = cs;
+
+ return 0;
+
+ error:
+ debug("%s: Got error %d\n", __func__, ret);
+ return ret;
+}
+
+static int sandbox_sf_remove(struct udevice *dev)
+{
+ struct sandbox_spi_flash *sbsf = dev_get_priv(dev);
+
+ os_close(sbsf->fd);
+
+ return 0;
+}
+
+static void sandbox_sf_cs_activate(struct udevice *dev)
+{
+ struct sandbox_spi_flash *sbsf = dev_get_priv(dev);
+
+ log_content("sandbox_sf: CS activated; state is fresh!\n");
+
+ /* CS is asserted, so reset state */
+ sbsf->off = 0;
+ sbsf->addr_bytes = 0;
+ sbsf->pad_addr_bytes = 0;
+ sbsf->state = SF_CMD;
+ sbsf->cmd = SF_CMD;
+}
+
+static void sandbox_sf_cs_deactivate(struct udevice *dev)
+{
+ log_content("sandbox_sf: CS deactivated; cmd done processing!\n");
+}
+
+/*
+ * There are times when the data lines are allowed to tristate. What
+ * is actually sensed on the line depends on the hardware. It could
+ * always be 0xFF/0x00 (if there are pull ups/downs), or things could
+ * float and so we'd get garbage back. This func encapsulates that
+ * scenario so we can worry about the details here.
+ */
+static void sandbox_spi_tristate(u8 *buf, uint len)
+{
+ /* XXX: make this into a user config option ? */
+ memset(buf, 0xff, len);
+}
+
+/* Figure out what command this stream is telling us to do */
+static int sandbox_sf_process_cmd(struct sandbox_spi_flash *sbsf, const u8 *rx,
+ u8 *tx)
+{
+ enum sandbox_sf_state oldstate = sbsf->state;
+
+ /* We need to output a byte for the cmd byte we just ate */
+ if (tx)
+ sandbox_spi_tristate(tx, 1);
+
+ sbsf->cmd = rx[0];
+ switch (sbsf->cmd) {
+ case SPINOR_OP_RDID:
+ sbsf->state = SF_ID;
+ sbsf->cmd = SF_ID;
+ break;
+ case SPINOR_OP_READ_FAST:
+ sbsf->pad_addr_bytes = 1;
+ case SPINOR_OP_READ:
+ case SPINOR_OP_PP:
+ sbsf->state = SF_ADDR;
+ break;
+ case SPINOR_OP_WRDI:
+ debug(" write disabled\n");
+ sbsf->status &= ~STAT_WEL;
+ break;
+ case SPINOR_OP_RDSR:
+ sbsf->state = SF_READ_STATUS;
+ break;
+ case SPINOR_OP_RDSR2:
+ sbsf->state = SF_READ_STATUS1;
+ break;
+ case SPINOR_OP_WREN:
+ debug(" write enabled\n");
+ sbsf->status |= STAT_WEL;
+ break;
+ case SPINOR_OP_WRSR:
+ sbsf->state = SF_WRITE_STATUS;
+ break;
+ default: {
+ int flags = sbsf->data->flags;
+
+ /* we only support erase here */
+ if (sbsf->cmd == SPINOR_OP_CHIP_ERASE) {
+ sbsf->erase_size = sbsf->data->sector_size *
+ sbsf->data->n_sectors;
+ } else if (sbsf->cmd == SPINOR_OP_BE_4K && (flags & SECT_4K)) {
+ sbsf->erase_size = 4 << 10;
+ } else if (sbsf->cmd == SPINOR_OP_SE && !(flags & SECT_4K)) {
+ sbsf->erase_size = 64 << 10;
+ } else {
+ debug(" cmd unknown: %#x\n", sbsf->cmd);
+ return -EIO;
+ }
+ sbsf->state = SF_ADDR;
+ break;
+ }
+ }
+
+ if (oldstate != sbsf->state)
+ log_content(" cmd: transition to %s state\n",
+ sandbox_sf_state_name(sbsf->state));
+
+ return 0;
+}
+
+int sandbox_erase_part(struct sandbox_spi_flash *sbsf, int size)
+{
+ int todo;
+ int ret;
+
+ while (size > 0) {
+ todo = min(size, (int)sizeof(sandbox_sf_0xff));
+ ret = os_write(sbsf->fd, sandbox_sf_0xff, todo);
+ if (ret != todo)
+ return ret;
+ size -= todo;
+ }
+
+ return 0;
+}
+
+static int sandbox_sf_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *rxp, void *txp, unsigned long flags)
+{
+ struct sandbox_spi_flash *sbsf = dev_get_priv(dev);
+ const uint8_t *rx = rxp;
+ uint8_t *tx = txp;
+ uint cnt, pos = 0;
+ int bytes = bitlen / 8;
+ int ret;
+
+ log_content("sandbox_sf: state:%x(%s) bytes:%u\n", sbsf->state,
+ sandbox_sf_state_name(sbsf->state), bytes);
+
+ if ((flags & SPI_XFER_BEGIN))
+ sandbox_sf_cs_activate(dev);
+
+ if (sbsf->state == SF_CMD) {
+ /* Figure out the initial state */
+ ret = sandbox_sf_process_cmd(sbsf, rx, tx);
+ if (ret)
+ return ret;
+ ++pos;
+ }
+
+ /* Process the remaining data */
+ while (pos < bytes) {
+ switch (sbsf->state) {
+ case SF_ID: {
+ u8 id;
+
+ log_content(" id: off:%u tx:", sbsf->off);
+ if (sbsf->off < IDCODE_LEN) {
+ /* Extract correct byte from ID 0x00aabbcc */
+ id = ((JEDEC_MFR(sbsf->data) << 16) |
+ JEDEC_ID(sbsf->data)) >>
+ (8 * (IDCODE_LEN - 1 - sbsf->off));
+ } else {
+ id = 0;
+ }
+ log_content("%d %02x\n", sbsf->off, id);
+ tx[pos++] = id;
+ ++sbsf->off;
+ break;
+ }
+ case SF_ADDR:
+ log_content(" addr: bytes:%u rx:%02x ",
+ sbsf->addr_bytes, rx[pos]);
+
+ if (sbsf->addr_bytes++ < SF_ADDR_LEN)
+ sbsf->off = (sbsf->off << 8) | rx[pos];
+ log_content("addr:%06x\n", sbsf->off);
+
+ if (tx)
+ sandbox_spi_tristate(&tx[pos], 1);
+ pos++;
+
+ /* See if we're done processing */
+ if (sbsf->addr_bytes <
+ SF_ADDR_LEN + sbsf->pad_addr_bytes)
+ break;
+
+ /* Next state! */
+ if (os_lseek(sbsf->fd, sbsf->off, OS_SEEK_SET) < 0) {
+ puts("sandbox_sf: os_lseek() failed");
+ return -EIO;
+ }
+ switch (sbsf->cmd) {
+ case SPINOR_OP_READ_FAST:
+ case SPINOR_OP_READ:
+ sbsf->state = SF_READ;
+ break;
+ case SPINOR_OP_PP:
+ sbsf->state = SF_WRITE;
+ break;
+ default:
+ /* assume erase state ... */
+ sbsf->state = SF_ERASE;
+ goto case_sf_erase;
+ }
+ log_content(" cmd: transition to %s state\n",
+ sandbox_sf_state_name(sbsf->state));
+ break;
+ case SF_READ:
+ /*
+ * XXX: need to handle exotic behavior:
+ * - reading past end of device
+ */
+
+ cnt = bytes - pos;
+ log_content(" tx: read(%u)\n", cnt);
+ assert(tx);
+ ret = os_read(sbsf->fd, tx + pos, cnt);
+ if (ret < 0) {
+ puts("sandbox_sf: os_read() failed\n");
+ return -EIO;
+ }
+ pos += ret;
+ break;
+ case SF_READ_STATUS:
+ log_content(" read status: %#x\n", sbsf->status);
+ cnt = bytes - pos;
+ memset(tx + pos, sbsf->status, cnt);
+ pos += cnt;
+ break;
+ case SF_READ_STATUS1:
+ log_content(" read status: %#x\n", sbsf->status);
+ cnt = bytes - pos;
+ memset(tx + pos, sbsf->status >> 8, cnt);
+ pos += cnt;
+ break;
+ case SF_WRITE_STATUS:
+ log_content(" write status: %#x (ignored)\n", rx[pos]);
+ pos = bytes;
+ break;
+ case SF_WRITE:
+ /*
+ * XXX: need to handle exotic behavior:
+ * - unaligned addresses
+ * - more than a page (256) worth of data
+ * - reading past end of device
+ */
+ if (!(sbsf->status & STAT_WEL)) {
+ puts("sandbox_sf: write enable not set before write\n");
+ goto done;
+ }
+
+ cnt = bytes - pos;
+ log_content(" rx: write(%u)\n", cnt);
+ if (tx)
+ sandbox_spi_tristate(&tx[pos], cnt);
+ ret = os_write(sbsf->fd, rx + pos, cnt);
+ if (ret < 0) {
+ puts("sandbox_spi: os_write() failed\n");
+ return -EIO;
+ }
+ pos += ret;
+ sbsf->status &= ~STAT_WEL;
+ break;
+ case SF_ERASE:
+ case_sf_erase: {
+ if (!(sbsf->status & STAT_WEL)) {
+ puts("sandbox_sf: write enable not set before erase\n");
+ goto done;
+ }
+
+ /* verify address is aligned */
+ if (sbsf->off & (sbsf->erase_size - 1)) {
+ log_content(" sector erase: cmd:%#x needs align:%#x, but we got %#x\n",
+ sbsf->cmd, sbsf->erase_size,
+ sbsf->off);
+ sbsf->status &= ~STAT_WEL;
+ goto done;
+ }
+
+ log_content(" sector erase addr: %u, size: %u\n",
+ sbsf->off, sbsf->erase_size);
+
+ cnt = bytes - pos;
+ if (tx)
+ sandbox_spi_tristate(&tx[pos], cnt);
+ pos += cnt;
+
+ /*
+ * TODO(vapier@gentoo.org): latch WIP in status, and
+ * delay before clearing it ?
+ */
+ ret = sandbox_erase_part(sbsf, sbsf->erase_size);
+ sbsf->status &= ~STAT_WEL;
+ if (ret) {
+ log_content("sandbox_sf: Erase failed\n");
+ goto done;
+ }
+ goto done;
+ }
+ default:
+ log_content(" ??? no idea what to do ???\n");
+ goto done;
+ }
+ }
+
+ done:
+ if (flags & SPI_XFER_END)
+ sandbox_sf_cs_deactivate(dev);
+ return pos == bytes ? 0 : -EIO;
+}
+
+int sandbox_sf_of_to_plat(struct udevice *dev)
+{
+ struct sandbox_spi_flash_plat_data *pdata = dev_get_plat(dev);
+
+ pdata->filename = dev_read_string(dev, "sandbox,filename");
+ pdata->device_name = dev_read_string(dev, "compatible");
+ if (!pdata->filename || !pdata->device_name) {
+ debug("%s: Missing properties, filename=%s, device_name=%s\n",
+ __func__, pdata->filename, pdata->device_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct dm_spi_emul_ops sandbox_sf_emul_ops = {
+ .xfer = sandbox_sf_xfer,
+};
+
+#ifdef CONFIG_SPI_FLASH
+int sandbox_sf_bind_emul(struct sandbox_state *state, int busnum, int cs,
+ struct udevice *bus, ofnode node, const char *spec)
+{
+ struct udevice *emul;
+ char name[20], *str;
+ struct driver *drv;
+ int ret;
+
+ /* now the emulator */
+ strncpy(name, spec, sizeof(name) - 6);
+ name[sizeof(name) - 6] = '\0';
+ strcat(name, "-emul");
+ drv = lists_driver_lookup_name("sandbox_sf_emul");
+ if (!drv) {
+ puts("Cannot find sandbox_sf_emul driver\n");
+ return -ENOENT;
+ }
+ str = strdup(name);
+ if (!str)
+ return -ENOMEM;
+ ret = device_bind(bus, drv, str, NULL, node, &emul);
+ if (ret) {
+ free(str);
+ printf("Cannot create emul device for spec '%s' (err=%d)\n",
+ spec, ret);
+ return ret;
+ }
+ state->spi[busnum][cs].emul = emul;
+
+ return 0;
+}
+
+void sandbox_sf_unbind_emul(struct sandbox_state *state, int busnum, int cs)
+{
+ struct udevice *dev;
+
+ dev = state->spi[busnum][cs].emul;
+ device_remove(dev, DM_REMOVE_NORMAL);
+ device_unbind(dev);
+ state->spi[busnum][cs].emul = NULL;
+}
+
+int sandbox_spi_get_emul(struct sandbox_state *state,
+ struct udevice *bus, struct udevice *slave,
+ struct udevice **emulp)
+{
+ struct sandbox_spi_info *info;
+ int busnum = dev_seq(bus);
+ int cs = spi_chip_select(slave);
+ int ret;
+
+ info = &state->spi[busnum][cs];
+ if (!info->emul) {
+ /* Use the same device tree node as the SPI flash device */
+ debug("%s: busnum=%u, cs=%u: binding SPI flash emulation: ",
+ __func__, busnum, cs);
+ ret = sandbox_sf_bind_emul(state, busnum, cs, bus,
+ dev_ofnode(slave), slave->name);
+ if (ret) {
+ debug("failed (err=%d)\n", ret);
+ return ret;
+ }
+ debug("OK\n");
+ }
+ *emulp = info->emul;
+
+ return 0;
+}
+#endif
+
+static const struct udevice_id sandbox_sf_ids[] = {
+ { .compatible = "sandbox,spi-flash" },
+ { }
+};
+
+U_BOOT_DRIVER(sandbox_sf_emul) = {
+ .name = "sandbox_sf_emul",
+ .id = UCLASS_SPI_EMUL,
+ .of_match = sandbox_sf_ids,
+ .of_to_plat = sandbox_sf_of_to_plat,
+ .probe = sandbox_sf_probe,
+ .remove = sandbox_sf_remove,
+ .priv_auto = sizeof(struct sandbox_spi_flash),
+ .plat_auto = sizeof(struct sandbox_spi_flash_plat_data),
+ .ops = &sandbox_sf_emul_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/spi/sf-uclass.c b/roms/u-boot/drivers/mtd/spi/sf-uclass.c
new file mode 100644
index 000000000..cfce00ef5
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/sf-uclass.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014 Google, Inc
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi_flash.h>
+#include <asm/global_data.h>
+#include <dm/device-internal.h>
+#include "sf_internal.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int spi_flash_read_dm(struct udevice *dev, u32 offset, size_t len, void *buf)
+{
+ return log_ret(sf_get_ops(dev)->read(dev, offset, len, buf));
+}
+
+int spi_flash_write_dm(struct udevice *dev, u32 offset, size_t len,
+ const void *buf)
+{
+ return log_ret(sf_get_ops(dev)->write(dev, offset, len, buf));
+}
+
+int spi_flash_erase_dm(struct udevice *dev, u32 offset, size_t len)
+{
+ return log_ret(sf_get_ops(dev)->erase(dev, offset, len));
+}
+
+int spl_flash_get_sw_write_prot(struct udevice *dev)
+{
+ struct dm_spi_flash_ops *ops = sf_get_ops(dev);
+
+ if (!ops->get_sw_write_prot)
+ return -ENOSYS;
+ return log_ret(ops->get_sw_write_prot(dev));
+}
+
+/*
+ * TODO(sjg@chromium.org): This is an old-style function. We should remove
+ * it when all SPI flash drivers use dm
+ */
+struct spi_flash *spi_flash_probe(unsigned int bus, unsigned int cs,
+ unsigned int max_hz, unsigned int spi_mode)
+{
+ struct udevice *dev;
+
+ if (spi_flash_probe_bus_cs(bus, cs, max_hz, spi_mode, &dev))
+ return NULL;
+
+ return dev_get_uclass_priv(dev);
+}
+
+int spi_flash_probe_bus_cs(unsigned int busnum, unsigned int cs,
+ unsigned int max_hz, unsigned int spi_mode,
+ struct udevice **devp)
+{
+ struct spi_slave *slave;
+ struct udevice *bus;
+ char *str;
+ int ret;
+
+#if defined(CONFIG_SPL_BUILD) && CONFIG_IS_ENABLED(USE_TINY_PRINTF)
+ str = "spi_flash";
+#else
+ char name[30];
+
+ snprintf(name, sizeof(name), "spi_flash@%d:%d", busnum, cs);
+ str = strdup(name);
+#endif
+ ret = spi_get_bus_and_cs(busnum, cs, max_hz, spi_mode,
+ "jedec_spi_nor", str, &bus, &slave);
+ if (ret)
+ return ret;
+
+ *devp = slave->dev;
+ return 0;
+}
+
+static int spi_flash_post_bind(struct udevice *dev)
+{
+#if defined(CONFIG_NEEDS_MANUAL_RELOC)
+ struct dm_spi_flash_ops *ops = sf_get_ops(dev);
+ static int reloc_done;
+
+ if (!reloc_done) {
+ if (ops->read)
+ ops->read += gd->reloc_off;
+ if (ops->write)
+ ops->write += gd->reloc_off;
+ if (ops->erase)
+ ops->erase += gd->reloc_off;
+
+ reloc_done++;
+ }
+#endif
+ return 0;
+}
+
+UCLASS_DRIVER(spi_flash) = {
+ .id = UCLASS_SPI_FLASH,
+ .name = "spi_flash",
+ .post_bind = spi_flash_post_bind,
+ .per_device_auto = sizeof(struct spi_nor),
+};
diff --git a/roms/u-boot/drivers/mtd/spi/sf_dataflash.c b/roms/u-boot/drivers/mtd/spi/sf_dataflash.c
new file mode 100644
index 000000000..b59edd152
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/sf_dataflash.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Atmel DataFlash probing
+ *
+ * Copyright (C) 2004-2009, 2015 Freescale Semiconductor, Inc.
+ * Haikun Wang (haikun.wang@freescale.com)
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <flash.h>
+#include <log.h>
+#include <spi.h>
+#include <spi_flash.h>
+#include <div64.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+
+#include "sf_internal.h"
+
+#define CMD_READ_ID 0x9f
+/* reads can bypass the buffers */
+#define OP_READ_CONTINUOUS 0xE8
+#define OP_READ_PAGE 0xD2
+
+/* group B requests can run even while status reports "busy" */
+#define OP_READ_STATUS 0xD7 /* group B */
+
+/* move data between host and buffer */
+#define OP_READ_BUFFER1 0xD4 /* group B */
+#define OP_READ_BUFFER2 0xD6 /* group B */
+#define OP_WRITE_BUFFER1 0x84 /* group B */
+#define OP_WRITE_BUFFER2 0x87 /* group B */
+
+/* erasing flash */
+#define OP_ERASE_PAGE 0x81
+#define OP_ERASE_BLOCK 0x50
+
+/* move data between buffer and flash */
+#define OP_TRANSFER_BUF1 0x53
+#define OP_TRANSFER_BUF2 0x55
+#define OP_MREAD_BUFFER1 0xD4
+#define OP_MREAD_BUFFER2 0xD6
+#define OP_MWERASE_BUFFER1 0x83
+#define OP_MWERASE_BUFFER2 0x86
+#define OP_MWRITE_BUFFER1 0x88 /* sector must be pre-erased */
+#define OP_MWRITE_BUFFER2 0x89 /* sector must be pre-erased */
+
+/* write to buffer, then write-erase to flash */
+#define OP_PROGRAM_VIA_BUF1 0x82
+#define OP_PROGRAM_VIA_BUF2 0x85
+
+/* compare buffer to flash */
+#define OP_COMPARE_BUF1 0x60
+#define OP_COMPARE_BUF2 0x61
+
+/* read flash to buffer, then write-erase to flash */
+#define OP_REWRITE_VIA_BUF1 0x58
+#define OP_REWRITE_VIA_BUF2 0x59
+
+/*
+ * newer chips report JEDEC manufacturer and device IDs; chip
+ * serial number and OTP bits; and per-sector writeprotect.
+ */
+#define OP_READ_ID 0x9F
+#define OP_READ_SECURITY 0x77
+#define OP_WRITE_SECURITY_REVC 0x9A
+#define OP_WRITE_SECURITY 0x9B /* revision D */
+
+struct dataflash {
+ uint8_t command[16];
+ unsigned short page_offset; /* offset in flash address */
+};
+
+/* Return the status of the DataFlash device */
+static inline int dataflash_status(struct spi_slave *spi)
+{
+ int ret;
+ u8 opcode = OP_READ_STATUS;
+ u8 status;
+
+ /*
+ * NOTE: at45db321c over 25 MHz wants to write
+ * a dummy byte after the opcode...
+ */
+ ret = spi_write_then_read(spi, &opcode, 1, NULL, &status, 1);
+ return ret ? -EIO : status;
+}
+
+/*
+ * Poll the DataFlash device until it is READY.
+ * This usually takes 5-20 msec or so; more for sector erase.
+ * ready: return > 0
+ */
+static int dataflash_waitready(struct spi_slave *spi)
+{
+ int status;
+ int timeout = 2 * CONFIG_SYS_HZ;
+ int timebase;
+
+ timebase = get_timer(0);
+ do {
+ status = dataflash_status(spi);
+ if (status < 0)
+ status = 0;
+
+ if (status & (1 << 7)) /* RDY/nBSY */
+ return status;
+
+ mdelay(3);
+ } while (get_timer(timebase) < timeout);
+
+ return -ETIME;
+}
+
+/* Erase pages of flash */
+static int spi_dataflash_erase(struct udevice *dev, u32 offset, size_t len)
+{
+ struct dataflash *dataflash;
+ struct spi_flash *spi_flash;
+ struct spi_slave *spi;
+ unsigned blocksize;
+ uint8_t *command;
+ uint32_t rem;
+ int status;
+
+ dataflash = dev_get_priv(dev);
+ spi_flash = dev_get_uclass_priv(dev);
+ spi = spi_flash->spi;
+
+ blocksize = spi_flash->page_size << 3;
+
+ memset(dataflash->command, 0 , sizeof(dataflash->command));
+ command = dataflash->command;
+
+ debug("%s: erase addr=0x%x len 0x%x\n", dev->name, offset, len);
+
+ div_u64_rem(len, spi_flash->page_size, &rem);
+ if (rem) {
+ printf("%s: len(0x%x) isn't the multiple of page size(0x%x)\n",
+ dev->name, len, spi_flash->page_size);
+ return -EINVAL;
+ }
+ div_u64_rem(offset, spi_flash->page_size, &rem);
+ if (rem) {
+ printf("%s: offset(0x%x) isn't the multiple of page size(0x%x)\n",
+ dev->name, offset, spi_flash->page_size);
+ return -EINVAL;
+ }
+
+ status = spi_claim_bus(spi);
+ if (status) {
+ debug("dataflash: unable to claim SPI bus\n");
+ return status;
+ }
+
+ while (len > 0) {
+ unsigned int pageaddr;
+ int do_block;
+ /*
+ * Calculate flash page address; use block erase (for speed) if
+ * we're at a block boundary and need to erase the whole block.
+ */
+ pageaddr = div_u64(offset, spi_flash->page_size);
+ do_block = (pageaddr & 0x7) == 0 && len >= blocksize;
+ pageaddr = pageaddr << dataflash->page_offset;
+
+ command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
+ command[1] = (uint8_t)(pageaddr >> 16);
+ command[2] = (uint8_t)(pageaddr >> 8);
+ command[3] = 0;
+
+ debug("%s ERASE %s: (%x) %x %x %x [%d]\n",
+ dev->name, do_block ? "block" : "page",
+ command[0], command[1], command[2], command[3],
+ pageaddr);
+
+ status = spi_write_then_read(spi, command, 4, NULL, NULL, 0);
+ if (status < 0) {
+ debug("%s: erase send command error!\n", dev->name);
+ return -EIO;
+ }
+
+ status = dataflash_waitready(spi);
+ if (status < 0) {
+ debug("%s: erase waitready error!\n", dev->name);
+ return status;
+ }
+
+ if (do_block) {
+ offset += blocksize;
+ len -= blocksize;
+ } else {
+ offset += spi_flash->page_size;
+ len -= spi_flash->page_size;
+ }
+ }
+
+ spi_release_bus(spi);
+
+ return 0;
+}
+
+/*
+ * Read from the DataFlash device.
+ * offset : Start offset in flash device
+ * len : Amount to read
+ * buf : Buffer containing the data
+ */
+static int spi_dataflash_read(struct udevice *dev, u32 offset, size_t len,
+ void *buf)
+{
+ struct dataflash *dataflash;
+ struct spi_flash *spi_flash;
+ struct spi_slave *spi;
+ unsigned int addr;
+ uint8_t *command;
+ int status;
+
+ dataflash = dev_get_priv(dev);
+ spi_flash = dev_get_uclass_priv(dev);
+ spi = spi_flash->spi;
+
+ memset(dataflash->command, 0 , sizeof(dataflash->command));
+ command = dataflash->command;
+
+ debug("%s: erase addr=0x%x len 0x%x\n", dev->name, offset, len);
+ debug("READ: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ /* Calculate flash page/byte address */
+ addr = (((unsigned)offset / spi_flash->page_size)
+ << dataflash->page_offset)
+ + ((unsigned)offset % spi_flash->page_size);
+
+ status = spi_claim_bus(spi);
+ if (status) {
+ debug("dataflash: unable to claim SPI bus\n");
+ return status;
+ }
+
+ /*
+ * Continuous read, max clock = f(car) which may be less than
+ * the peak rate available. Some chips support commands with
+ * fewer "don't care" bytes. Both buffers stay unchanged.
+ */
+ command[0] = OP_READ_CONTINUOUS;
+ command[1] = (uint8_t)(addr >> 16);
+ command[2] = (uint8_t)(addr >> 8);
+ command[3] = (uint8_t)(addr >> 0);
+
+ /* plus 4 "don't care" bytes, command len: 4 + 4 "don't care" bytes */
+ status = spi_write_then_read(spi, command, 8, NULL, buf, len);
+
+ spi_release_bus(spi);
+
+ return status;
+}
+
+/*
+ * Write to the DataFlash device.
+ * offset : Start offset in flash device
+ * len : Amount to write
+ * buf : Buffer containing the data
+ */
+int spi_dataflash_write(struct udevice *dev, u32 offset, size_t len,
+ const void *buf)
+{
+ struct dataflash *dataflash;
+ struct spi_flash *spi_flash;
+ struct spi_slave *spi;
+ uint8_t *command;
+ unsigned int pageaddr, addr, to, writelen;
+ size_t remaining = len;
+ u_char *writebuf = (u_char *)buf;
+ int status = -EINVAL;
+
+ dataflash = dev_get_priv(dev);
+ spi_flash = dev_get_uclass_priv(dev);
+ spi = spi_flash->spi;
+
+ memset(dataflash->command, 0 , sizeof(dataflash->command));
+ command = dataflash->command;
+
+ debug("%s: write 0x%x..0x%x\n", dev->name, offset, (offset + len));
+
+ pageaddr = ((unsigned)offset / spi_flash->page_size);
+ to = ((unsigned)offset % spi_flash->page_size);
+ if (to + len > spi_flash->page_size)
+ writelen = spi_flash->page_size - to;
+ else
+ writelen = len;
+
+ status = spi_claim_bus(spi);
+ if (status) {
+ debug("dataflash: unable to claim SPI bus\n");
+ return status;
+ }
+
+ while (remaining > 0) {
+ debug("write @ %d:%d len=%d\n", pageaddr, to, writelen);
+
+ /*
+ * REVISIT:
+ * (a) each page in a sector must be rewritten at least
+ * once every 10K sibling erase/program operations.
+ * (b) for pages that are already erased, we could
+ * use WRITE+MWRITE not PROGRAM for ~30% speedup.
+ * (c) WRITE to buffer could be done while waiting for
+ * a previous MWRITE/MWERASE to complete ...
+ * (d) error handling here seems to be mostly missing.
+ *
+ * Two persistent bits per page, plus a per-sector counter,
+ * could support (a) and (b) ... we might consider using
+ * the second half of sector zero, which is just one block,
+ * to track that state. (On AT91, that sector should also
+ * support boot-from-DataFlash.)
+ */
+
+ addr = pageaddr << dataflash->page_offset;
+
+ /* (1) Maybe transfer partial page to Buffer1 */
+ if (writelen != spi_flash->page_size) {
+ command[0] = OP_TRANSFER_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = 0;
+
+ debug("TRANSFER: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_write_then_read(spi, command, 4,
+ NULL, NULL, 0);
+ if (status < 0) {
+ debug("%s: write(<pagesize) command error!\n",
+ dev->name);
+ return -EIO;
+ }
+
+ status = dataflash_waitready(spi);
+ if (status < 0) {
+ debug("%s: write(<pagesize) waitready error!\n",
+ dev->name);
+ return status;
+ }
+ }
+
+ /* (2) Program full page via Buffer1 */
+ addr += to;
+ command[0] = OP_PROGRAM_VIA_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = (addr & 0x000000FF);
+
+ debug("PROGRAM: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_write_then_read(spi, command, 4,
+ writebuf, NULL, writelen);
+ if (status < 0) {
+ debug("%s: write send command error!\n", dev->name);
+ return -EIO;
+ }
+
+ status = dataflash_waitready(spi);
+ if (status < 0) {
+ debug("%s: write waitready error!\n", dev->name);
+ return status;
+ }
+
+#ifdef CONFIG_SPI_DATAFLASH_WRITE_VERIFY
+ /* (3) Compare to Buffer1 */
+ addr = pageaddr << dataflash->page_offset;
+ command[0] = OP_COMPARE_BUF1;
+ command[1] = (addr & 0x00FF0000) >> 16;
+ command[2] = (addr & 0x0000FF00) >> 8;
+ command[3] = 0;
+
+ debug("COMPARE: (%x) %x %x %x\n",
+ command[0], command[1], command[2], command[3]);
+
+ status = spi_write_then_read(spi, command, 4,
+ writebuf, NULL, writelen);
+ if (status < 0) {
+ debug("%s: write(compare) send command error!\n",
+ dev->name);
+ return -EIO;
+ }
+
+ status = dataflash_waitready(spi);
+
+ /* Check result of the compare operation */
+ if (status & (1 << 6)) {
+ printf("dataflash: write compare page %u, err %d\n",
+ pageaddr, status);
+ remaining = 0;
+ status = -EIO;
+ break;
+ } else {
+ status = 0;
+ }
+
+#endif /* CONFIG_SPI_DATAFLASH_WRITE_VERIFY */
+ remaining = remaining - writelen;
+ pageaddr++;
+ to = 0;
+ writebuf += writelen;
+
+ if (remaining > spi_flash->page_size)
+ writelen = spi_flash->page_size;
+ else
+ writelen = remaining;
+ }
+
+ spi_release_bus(spi);
+
+ return 0;
+}
+
+static int add_dataflash(struct udevice *dev, char *name, int nr_pages,
+ int pagesize, int pageoffset, char revision)
+{
+ struct spi_flash *spi_flash;
+ struct dataflash *dataflash;
+
+ dataflash = dev_get_priv(dev);
+ spi_flash = dev_get_uclass_priv(dev);
+
+ dataflash->page_offset = pageoffset;
+
+ spi_flash->name = name;
+ spi_flash->page_size = pagesize;
+ spi_flash->size = nr_pages * pagesize;
+ spi_flash->erase_size = pagesize;
+
+#ifndef CONFIG_SPL_BUILD
+ printf("SPI DataFlash: Detected %s with page size ", spi_flash->name);
+ print_size(spi_flash->page_size, ", erase size ");
+ print_size(spi_flash->erase_size, ", total ");
+ print_size(spi_flash->size, "");
+ printf(", revision %c", revision);
+ puts("\n");
+#endif
+
+ return 0;
+}
+
+struct data_flash_info {
+ char *name;
+
+ /*
+ * JEDEC id has a high byte of zero plus three data bytes:
+ * the manufacturer id, then a two byte device id.
+ */
+ uint32_t jedec_id;
+
+ /* The size listed here is what works with OP_ERASE_PAGE. */
+ unsigned nr_pages;
+ uint16_t pagesize;
+ uint16_t pageoffset;
+
+ uint16_t flags;
+#define SUP_POW2PS 0x0002 /* supports 2^N byte pages */
+#define IS_POW2PS 0x0001 /* uses 2^N byte pages */
+};
+
+static struct data_flash_info dataflash_data[] = {
+ /*
+ * NOTE: chips with SUP_POW2PS (rev D and up) need two entries,
+ * one with IS_POW2PS and the other without. The entry with the
+ * non-2^N byte page size can't name exact chip revisions without
+ * losing backwards compatibility for cmdlinepart.
+ *
+ * Those two entries have different name spelling format in order to
+ * show their difference obviously.
+ * The upper case refer to the chip isn't in normal 2^N bytes page-size
+ * mode.
+ * The lower case refer to the chip is in normal 2^N bytes page-size
+ * mode.
+ *
+ * These newer chips also support 128-byte security registers (with
+ * 64 bytes one-time-programmable) and software write-protection.
+ */
+ { "AT45DB011B", 0x1f2200, 512, 264, 9, SUP_POW2PS},
+ { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB021B", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
+ { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB041x", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
+ { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB081B", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
+ { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB161x", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
+ { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB321x", 0x1f2700, 8192, 528, 10, 0}, /* rev C */
+
+ { "AT45DB321x", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
+ { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
+
+ { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
+ { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+};
+
+static struct data_flash_info *jedec_probe(struct spi_slave *spi)
+{
+ int tmp;
+ uint8_t id[5];
+ uint32_t jedec;
+ struct data_flash_info *info;
+ u8 opcode = CMD_READ_ID;
+ int status;
+
+ /*
+ * JEDEC also defines an optional "extended device information"
+ * string for after vendor-specific data, after the three bytes
+ * we use here. Supporting some chips might require using it.
+ *
+ * If the vendor ID isn't Atmel's (0x1f), assume this call failed.
+ * That's not an error; only rev C and newer chips handle it, and
+ * only Atmel sells these chips.
+ */
+ tmp = spi_write_then_read(spi, &opcode, 1, NULL, id, sizeof(id));
+ if (tmp < 0) {
+ printf("dataflash: error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+ if (id[0] != 0x1f)
+ return NULL;
+
+ jedec = id[0];
+ jedec = jedec << 8;
+ jedec |= id[1];
+ jedec = jedec << 8;
+ jedec |= id[2];
+
+ for (tmp = 0, info = dataflash_data;
+ tmp < ARRAY_SIZE(dataflash_data);
+ tmp++, info++) {
+ if (info->jedec_id == jedec) {
+ if (info->flags & SUP_POW2PS) {
+ status = dataflash_status(spi);
+ if (status < 0) {
+ debug("dataflash: status error %d\n",
+ status);
+ return NULL;
+ }
+ if (status & 0x1) {
+ if (info->flags & IS_POW2PS)
+ return info;
+ } else {
+ if (!(info->flags & IS_POW2PS))
+ return info;
+ }
+ } else {
+ return info;
+ }
+ }
+ }
+
+ /*
+ * Treat other chips as errors ... we won't know the right page
+ * size (it might be binary) even when we can tell which density
+ * class is involved (legacy chip id scheme).
+ */
+ printf("dataflash: JEDEC id %06x not handled\n", jedec);
+ return ERR_PTR(-ENODEV);
+}
+
+/*
+ * Detect and initialize DataFlash device, using JEDEC IDs on newer chips
+ * or else the ID code embedded in the status bits:
+ *
+ * Device Density ID code #Pages PageSize Offset
+ * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
+ * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
+ * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
+ * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
+ * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
+ * AT45DB0321B 32Mbit (4M) xx1101xx (0x34) 8192 528 10
+ * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
+ * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
+ */
+static int spi_dataflash_probe(struct udevice *dev)
+{
+ struct spi_slave *spi = dev_get_parent_priv(dev);
+ struct spi_flash *spi_flash;
+ struct data_flash_info *info;
+ int status;
+
+ spi_flash = dev_get_uclass_priv(dev);
+ spi_flash->spi = spi;
+ spi_flash->dev = dev;
+
+ status = spi_claim_bus(spi);
+ if (status)
+ return status;
+
+ /*
+ * Try to detect dataflash by JEDEC ID.
+ * If it succeeds we know we have either a C or D part.
+ * D will support power of 2 pagesize option.
+ * Both support the security register, though with different
+ * write procedures.
+ */
+ info = jedec_probe(spi);
+ if (IS_ERR(info))
+ goto err_jedec_probe;
+ if (info != NULL) {
+ status = add_dataflash(dev, info->name, info->nr_pages,
+ info->pagesize, info->pageoffset,
+ (info->flags & SUP_POW2PS) ? 'd' : 'c');
+ if (status < 0)
+ goto err_status;
+ }
+
+ /*
+ * Older chips support only legacy commands, identifing
+ * capacity using bits in the status byte.
+ */
+ status = dataflash_status(spi);
+ if (status <= 0 || status == 0xff) {
+ printf("dataflash: read status error %d\n", status);
+ if (status == 0 || status == 0xff)
+ status = -ENODEV;
+ goto err_jedec_probe;
+ }
+
+ /*
+ * if there's a device there, assume it's dataflash.
+ * board setup should have set spi->max_speed_max to
+ * match f(car) for continuous reads, mode 0 or 3.
+ */
+ switch (status & 0x3c) {
+ case 0x0c: /* 0 0 1 1 x x */
+ status = add_dataflash(dev, "AT45DB011B", 512, 264, 9, 0);
+ break;
+ case 0x14: /* 0 1 0 1 x x */
+ status = add_dataflash(dev, "AT45DB021B", 1024, 264, 9, 0);
+ break;
+ case 0x1c: /* 0 1 1 1 x x */
+ status = add_dataflash(dev, "AT45DB041x", 2048, 264, 9, 0);
+ break;
+ case 0x24: /* 1 0 0 1 x x */
+ status = add_dataflash(dev, "AT45DB081B", 4096, 264, 9, 0);
+ break;
+ case 0x2c: /* 1 0 1 1 x x */
+ status = add_dataflash(dev, "AT45DB161x", 4096, 528, 10, 0);
+ break;
+ case 0x34: /* 1 1 0 1 x x */
+ status = add_dataflash(dev, "AT45DB321x", 8192, 528, 10, 0);
+ break;
+ case 0x38: /* 1 1 1 x x x */
+ case 0x3c:
+ status = add_dataflash(dev, "AT45DB642x", 8192, 1056, 11, 0);
+ break;
+ /* obsolete AT45DB1282 not (yet?) supported */
+ default:
+ printf("dataflash: unsupported device (%x)\n", status & 0x3c);
+ status = -ENODEV;
+ goto err_status;
+ }
+
+ return status;
+
+err_status:
+ spi_free_slave(spi);
+err_jedec_probe:
+ spi_release_bus(spi);
+ return status;
+}
+
+static const struct dm_spi_flash_ops spi_dataflash_ops = {
+ .read = spi_dataflash_read,
+ .write = spi_dataflash_write,
+ .erase = spi_dataflash_erase,
+};
+
+static const struct udevice_id spi_dataflash_ids[] = {
+ { .compatible = "atmel,at45", },
+ { .compatible = "atmel,dataflash", },
+ { }
+};
+
+U_BOOT_DRIVER(spi_dataflash) = {
+ .name = "spi_dataflash",
+ .id = UCLASS_SPI_FLASH,
+ .of_match = spi_dataflash_ids,
+ .probe = spi_dataflash_probe,
+ .priv_auto = sizeof(struct dataflash),
+ .ops = &spi_dataflash_ops,
+};
diff --git a/roms/u-boot/drivers/mtd/spi/sf_internal.h b/roms/u-boot/drivers/mtd/spi/sf_internal.h
new file mode 100644
index 000000000..786301ba4
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/sf_internal.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SPI flash internal definitions
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc.
+ */
+
+#ifndef _SF_INTERNAL_H_
+#define _SF_INTERNAL_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#define SPI_NOR_MAX_ID_LEN 6
+#define SPI_NOR_MAX_ADDR_WIDTH 4
+
+struct flash_info {
+#if !CONFIG_IS_ENABLED(SPI_FLASH_TINY)
+ char *name;
+#endif
+
+ /*
+ * This array stores the ID bytes.
+ * The first three bytes are the JEDIC ID.
+ * JEDEC ID zero means "no ID" (mostly older chips).
+ */
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ u8 id_len;
+
+ /* The size listed here is what works with SPINOR_OP_SE, which isn't
+ * necessarily called a "sector" by the vendor.
+ */
+ unsigned int sector_size;
+ u16 n_sectors;
+
+ u16 page_size;
+ u16 addr_width;
+
+ u32 flags;
+#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
+#define SST_WRITE BIT(2) /* use SST byte programming */
+#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
+#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
+#define USE_FSR BIT(7) /* use flag status register */
+#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB BIT(9) /*
+ * Flash SR has Top/Bottom (TB) protect
+ * bit. Must be used with
+ * SPI_NOR_HAS_LOCK.
+ */
+#define SPI_S3AN BIT(10) /*
+ * Xilinx Spartan 3AN In-System Flash
+ * (MFR cannot be used for probing
+ * because it has the same value as
+ * ATMEL flashes)
+ */
+#define SPI_NOR_4B_OPCODES BIT(11) /*
+ * Use dedicated 4byte address op codes
+ * to support memory size above 128Mib.
+ */
+#define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
+#define USE_CLSR BIT(14) /* use CLSR command */
+#define SPI_NOR_HAS_SST26LOCK BIT(15) /* Flash supports lock/unlock via BPR */
+#define SPI_NOR_OCTAL_READ BIT(16) /* Flash supports Octal Read */
+};
+
+extern const struct flash_info spi_nor_ids[];
+
+#define JEDEC_MFR(info) ((info)->id[0])
+#define JEDEC_ID(info) (((info)->id[1]) << 8 | ((info)->id[2]))
+
+/* Get software write-protect value (BP bits) */
+int spi_flash_cmd_get_sw_write_prot(struct spi_flash *flash);
+
+
+#if CONFIG_IS_ENABLED(SPI_FLASH_MTD)
+int spi_flash_mtd_register(struct spi_flash *flash);
+void spi_flash_mtd_unregister(void);
+#else
+static inline int spi_flash_mtd_register(struct spi_flash *flash)
+{
+ return 0;
+}
+
+static inline void spi_flash_mtd_unregister(void)
+{
+}
+#endif
+
+#endif /* _SF_INTERNAL_H_ */
diff --git a/roms/u-boot/drivers/mtd/spi/sf_mtd.c b/roms/u-boot/drivers/mtd/spi/sf_mtd.c
new file mode 100644
index 000000000..987fac250
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/sf_mtd.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012-2014 Daniel Schwierzeck, daniel.schwierzeck@gmail.com
+ */
+
+#include <common.h>
+#include <flash.h>
+#include <malloc.h>
+#include <linux/errno.h>
+#include <linux/mtd/mtd.h>
+#include <spi_flash.h>
+
+static struct mtd_info sf_mtd_info;
+static bool sf_mtd_registered;
+static char sf_mtd_name[8];
+
+static int spi_flash_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_flash *flash = mtd->priv;
+ int err;
+
+ if (!flash)
+ return -ENODEV;
+
+ instr->state = MTD_ERASING;
+
+ err = spi_flash_erase(flash, instr->addr, instr->len);
+ if (err) {
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
+ return -EIO;
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static int spi_flash_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_flash *flash = mtd->priv;
+ int err;
+
+ if (!flash)
+ return -ENODEV;
+
+ err = spi_flash_read(flash, from, len, buf);
+ if (!err)
+ *retlen = len;
+
+ return err;
+}
+
+static int spi_flash_mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_flash *flash = mtd->priv;
+ int err;
+
+ if (!flash)
+ return -ENODEV;
+
+ err = spi_flash_write(flash, to, len, buf);
+ if (!err)
+ *retlen = len;
+
+ return err;
+}
+
+static void spi_flash_mtd_sync(struct mtd_info *mtd)
+{
+}
+
+static int spi_flash_mtd_number(void)
+{
+#ifdef CONFIG_SYS_MAX_FLASH_BANKS
+ return CONFIG_SYS_MAX_FLASH_BANKS;
+#else
+ return 0;
+#endif
+}
+
+int spi_flash_mtd_register(struct spi_flash *flash)
+{
+ int ret;
+
+ if (sf_mtd_registered) {
+ ret = del_mtd_device(&sf_mtd_info);
+ if (ret)
+ return ret;
+
+ sf_mtd_registered = false;
+ }
+
+ sf_mtd_registered = false;
+ memset(&sf_mtd_info, 0, sizeof(sf_mtd_info));
+ sprintf(sf_mtd_name, "nor%d", spi_flash_mtd_number());
+
+ sf_mtd_info.name = sf_mtd_name;
+ sf_mtd_info.type = MTD_NORFLASH;
+ sf_mtd_info.flags = MTD_CAP_NORFLASH;
+ sf_mtd_info.writesize = 1;
+ sf_mtd_info.writebufsize = flash->page_size;
+
+ sf_mtd_info._erase = spi_flash_mtd_erase;
+ sf_mtd_info._read = spi_flash_mtd_read;
+ sf_mtd_info._write = spi_flash_mtd_write;
+ sf_mtd_info._sync = spi_flash_mtd_sync;
+
+ sf_mtd_info.size = flash->size;
+ sf_mtd_info.priv = flash;
+
+ /* Only uniform flash devices for now */
+ sf_mtd_info.numeraseregions = 0;
+ sf_mtd_info.erasesize = flash->sector_size;
+
+ ret = add_mtd_device(&sf_mtd_info);
+ if (!ret)
+ sf_mtd_registered = true;
+
+ return ret;
+}
+
+void spi_flash_mtd_unregister(void)
+{
+ int ret;
+
+ if (!sf_mtd_registered)
+ return;
+
+ ret = del_mtd_device(&sf_mtd_info);
+ if (!ret) {
+ sf_mtd_registered = false;
+ return;
+ }
+
+ /*
+ * Setting mtd->priv to NULL is the best we can do. Thanks to that,
+ * the MTD layer can still call mtd hooks without risking a
+ * use-after-free bug. Still, things should be fixed to prevent the
+ * spi_flash object from being destroyed when del_mtd_device() fails.
+ */
+ sf_mtd_info.priv = NULL;
+ printf("Failed to unregister MTD %s and the spi_flash object is going away: you're in deep trouble!",
+ sf_mtd_info.name);
+}
diff --git a/roms/u-boot/drivers/mtd/spi/sf_probe.c b/roms/u-boot/drivers/mtd/spi/sf_probe.c
new file mode 100644
index 000000000..3befbe91c
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/sf_probe.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * SPI flash probing
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ * Copyright (C) 2010 Reinhard Meyer, EMK Elektronik
+ * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi_flash.h>
+
+#include "sf_internal.h"
+
+/**
+ * spi_flash_probe_slave() - Probe for a SPI flash device on a bus
+ *
+ * @flashp: Pointer to place to put flash info, which may be NULL if the
+ * space should be allocated
+ */
+static int spi_flash_probe_slave(struct spi_flash *flash)
+{
+ struct spi_slave *spi = flash->spi;
+ int ret;
+
+ /* Setup spi_slave */
+ if (!spi) {
+ printf("SF: Failed to set up slave\n");
+ return -ENODEV;
+ }
+
+ /* Claim spi bus */
+ ret = spi_claim_bus(spi);
+ if (ret) {
+ debug("SF: Failed to claim SPI bus: %d\n", ret);
+ return ret;
+ }
+
+ ret = spi_nor_scan(flash);
+ if (ret)
+ goto err_read_id;
+
+ if (CONFIG_IS_ENABLED(SPI_FLASH_MTD))
+ ret = spi_flash_mtd_register(flash);
+
+err_read_id:
+ spi_release_bus(spi);
+ return ret;
+}
+
+#if !CONFIG_IS_ENABLED(DM_SPI_FLASH)
+struct spi_flash *spi_flash_probe(unsigned int busnum, unsigned int cs,
+ unsigned int max_hz, unsigned int spi_mode)
+{
+ struct spi_slave *bus;
+ struct spi_flash *flash;
+
+ bus = spi_setup_slave(busnum, cs, max_hz, spi_mode);
+ if (!bus)
+ return NULL;
+
+ /* Allocate space if needed (not used by sf-uclass */
+ flash = calloc(1, sizeof(*flash));
+ if (!flash) {
+ debug("SF: Failed to allocate spi_flash\n");
+ return NULL;
+ }
+
+ flash->spi = bus;
+ if (spi_flash_probe_slave(flash)) {
+ spi_free_slave(bus);
+ free(flash);
+ return NULL;
+ }
+
+ return flash;
+}
+
+void spi_flash_free(struct spi_flash *flash)
+{
+ if (CONFIG_IS_ENABLED(SPI_FLASH_MTD))
+ spi_flash_mtd_unregister();
+
+ spi_free_slave(flash->spi);
+ free(flash);
+}
+
+#else /* defined CONFIG_DM_SPI_FLASH */
+
+static int spi_flash_std_read(struct udevice *dev, u32 offset, size_t len,
+ void *buf)
+{
+ struct spi_flash *flash = dev_get_uclass_priv(dev);
+ struct mtd_info *mtd = &flash->mtd;
+ size_t retlen;
+
+ return log_ret(mtd->_read(mtd, offset, len, &retlen, buf));
+}
+
+static int spi_flash_std_write(struct udevice *dev, u32 offset, size_t len,
+ const void *buf)
+{
+ struct spi_flash *flash = dev_get_uclass_priv(dev);
+ struct mtd_info *mtd = &flash->mtd;
+ size_t retlen;
+
+ return mtd->_write(mtd, offset, len, &retlen, buf);
+}
+
+static int spi_flash_std_erase(struct udevice *dev, u32 offset, size_t len)
+{
+ struct spi_flash *flash = dev_get_uclass_priv(dev);
+ struct mtd_info *mtd = &flash->mtd;
+ struct erase_info instr;
+
+ if (offset % mtd->erasesize || len % mtd->erasesize) {
+ debug("SF: Erase offset/length not multiple of erase size\n");
+ return -EINVAL;
+ }
+
+ memset(&instr, 0, sizeof(instr));
+ instr.addr = offset;
+ instr.len = len;
+
+ return mtd->_erase(mtd, &instr);
+}
+
+static int spi_flash_std_get_sw_write_prot(struct udevice *dev)
+{
+ struct spi_flash *flash = dev_get_uclass_priv(dev);
+
+ return spi_flash_cmd_get_sw_write_prot(flash);
+}
+
+int spi_flash_std_probe(struct udevice *dev)
+{
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+ struct spi_flash *flash;
+
+ flash = dev_get_uclass_priv(dev);
+ flash->dev = dev;
+ flash->spi = slave;
+ return spi_flash_probe_slave(flash);
+}
+
+static int spi_flash_std_remove(struct udevice *dev)
+{
+ if (CONFIG_IS_ENABLED(SPI_FLASH_MTD))
+ spi_flash_mtd_unregister();
+
+ return 0;
+}
+
+static const struct dm_spi_flash_ops spi_flash_std_ops = {
+ .read = spi_flash_std_read,
+ .write = spi_flash_std_write,
+ .erase = spi_flash_std_erase,
+ .get_sw_write_prot = spi_flash_std_get_sw_write_prot,
+};
+
+static const struct udevice_id spi_flash_std_ids[] = {
+ { .compatible = "jedec,spi-nor" },
+ { }
+};
+
+U_BOOT_DRIVER(jedec_spi_nor) = {
+ .name = "jedec_spi_nor",
+ .id = UCLASS_SPI_FLASH,
+ .of_match = spi_flash_std_ids,
+ .probe = spi_flash_std_probe,
+ .remove = spi_flash_std_remove,
+ .priv_auto = sizeof(struct spi_nor),
+ .ops = &spi_flash_std_ops,
+};
+
+DM_DRIVER_ALIAS(jedec_spi_nor, spansion_m25p16)
+
+#endif /* CONFIG_DM_SPI_FLASH */
diff --git a/roms/u-boot/drivers/mtd/spi/spi-nor-core.c b/roms/u-boot/drivers/mtd/spi/spi-nor-core.c
new file mode 100644
index 000000000..a6625535a
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/spi-nor-core.c
@@ -0,0 +1,2660 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * Synced from Linux v4.19
+ */
+
+#include <common.h>
+#include <log.h>
+#include <watchdog.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <spi-mem.h>
+#include <spi.h>
+
+#include "sf_internal.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+
+#define HZ CONFIG_SYS_HZ
+
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+static int spi_nor_read_write_reg(struct spi_nor *nor, struct spi_mem_op
+ *op, void *buf)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ op->data.buf.in = buf;
+ else
+ op->data.buf.out = buf;
+ return spi_mem_exec_op(nor->spi, op);
+}
+
+static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(len, NULL, 1));
+ int ret;
+
+ ret = spi_nor_read_write_reg(nor, &op, val);
+ if (ret < 0)
+ dev_dbg(nor->dev, "error %d reading %x\n", ret, code);
+
+ return ret;
+}
+
+static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+
+ return spi_nor_read_write_reg(nor, &op, buf);
+}
+
+static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(len, buf, 1));
+ size_t remaining = len;
+ int ret;
+
+ /* get transfer protocols. */
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+ while (remaining) {
+ op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
+ ret = spi_mem_adjust_op_size(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ op.addr.val += op.data.nbytes;
+ remaining -= op.data.nbytes;
+ op.data.buf.in += op.data.nbytes;
+ }
+
+ return len;
+}
+
+static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+ const u_char *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ int ret;
+
+ /* get transfer protocols. */
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+ op.addr.nbytes = 0;
+
+ ret = spi_mem_adjust_op_size(nor->spi, &op);
+ if (ret)
+ return ret;
+ op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
+
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ return op.data.nbytes;
+}
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+ if (ret < 0) {
+ pr_debug("error %d reading SR\n", (int)ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read the flag status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_fsr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
+ if (ret < 0) {
+ pr_debug("error %d reading FSR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occurred.
+ */
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+static int read_cr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+ if (ret < 0) {
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+#endif
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static int write_sr(struct spi_nor *nor, u8 val)
+{
+ nor->cmd_buf[0] = val;
+ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static int write_enable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+}
+
+/*
+ * Send write disable instruction to the chip.
+ */
+static int write_disable(struct spi_nor *nor)
+{
+ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
+}
+
+static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+#ifndef CONFIG_SPI_FLASH_BAR
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
+ { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
+
+ { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
+ { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
+ { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+ static const u8 spi_nor_3to4_program[][2] = {
+ { SPINOR_OP_PP, SPINOR_OP_PP_4B },
+ { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
+ { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
+ { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
+ { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+ ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+ static const u8 spi_nor_3to4_erase[][2] = {
+ { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
+ { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
+ { SPINOR_OP_SE, SPINOR_OP_SE_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+ ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ /* Do some manufacturer fixups first */
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_SPANSION:
+ /* No small sector erase for 4-byte command set */
+ nor->erase_opcode = SPINOR_OP_SE;
+ nor->mtd.erasesize = info->sector_size;
+ break;
+
+ default:
+ break;
+ }
+
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+ nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+ nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+}
+#endif /* !CONFIG_SPI_FLASH_BAR */
+
+/* Enable/disable 4-byte addressing mode. */
+static int set_4byte(struct spi_nor *nor, const struct flash_info *info,
+ int enable)
+{
+ int status;
+ bool need_wren = false;
+ u8 cmd;
+
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_ST:
+ case SNOR_MFR_MICRON:
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+ case SNOR_MFR_ISSI:
+ case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_WINBOND:
+ if (need_wren)
+ write_enable(nor);
+
+ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+ status = nor->write_reg(nor, cmd, NULL, 0);
+ if (need_wren)
+ write_disable(nor);
+
+ if (!status && !enable &&
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes
+ * the Extended Address Register to be set to 1, so all
+ * 3-byte-address reads come from the second 16M.
+ * We must clear the register to enable normal behavior.
+ */
+ write_enable(nor);
+ nor->cmd_buf[0] = 0;
+ nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
+ write_disable(nor);
+ }
+
+ return status;
+ default:
+ /* Spansion style */
+ nor->cmd_buf[0] = enable << 7;
+ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
+ }
+}
+
+static int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int sr = read_sr(nor);
+
+ if (sr < 0)
+ return sr;
+
+ if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
+ if (sr & SR_E_ERR)
+ dev_dbg(nor->dev, "Erase Error occurred\n");
+ else
+ dev_dbg(nor->dev, "Programming Error occurred\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
+ return -EIO;
+ }
+
+ return !(sr & SR_WIP);
+}
+
+static int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int fsr = read_fsr(nor);
+
+ if (fsr < 0)
+ return fsr;
+
+ if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+ if (fsr & FSR_E_ERR)
+ dev_err(nor->dev, "Erase operation failed.\n");
+ else
+ dev_err(nor->dev, "Program operation failed.\n");
+
+ if (fsr & FSR_PT_ERR)
+ dev_err(nor->dev,
+ "Attempted to modify a protected sector.\n");
+
+ nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+ return -EIO;
+ }
+
+ return fsr & FSR_READY;
+}
+
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout)
+{
+ unsigned long timebase;
+ int ret;
+
+ timebase = get_timer(0);
+
+ while (get_timer(timebase) < timeout) {
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+ }
+
+ dev_err(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+
+#ifdef CONFIG_SPI_FLASH_BAR
+/*
+ * This "clean_bar" is necessary in a situation when one was accessing
+ * spi flash memory > 16 MiB by using Bank Address Register's BA24 bit.
+ *
+ * After it the BA24 bit shall be cleared to allow access to correct
+ * memory region after SW reset (by calling "reset" command).
+ *
+ * Otherwise, the BA24 bit may be left set and then after reset, the
+ * ROM would read/write/erase SPL from 16 MiB * bank_sel address.
+ */
+static int clean_bar(struct spi_nor *nor)
+{
+ u8 cmd, bank_sel = 0;
+
+ if (nor->bank_curr == 0)
+ return 0;
+ cmd = nor->bank_write_cmd;
+ nor->bank_curr = 0;
+ write_enable(nor);
+
+ return nor->write_reg(nor, cmd, &bank_sel, 1);
+}
+
+static int write_bar(struct spi_nor *nor, u32 offset)
+{
+ u8 cmd, bank_sel;
+ int ret;
+
+ bank_sel = offset / SZ_16M;
+ if (bank_sel == nor->bank_curr)
+ goto bar_end;
+
+ cmd = nor->bank_write_cmd;
+ write_enable(nor);
+ ret = nor->write_reg(nor, cmd, &bank_sel, 1);
+ if (ret < 0) {
+ debug("SF: fail to write bank register\n");
+ return ret;
+ }
+
+bar_end:
+ nor->bank_curr = bank_sel;
+ return nor->bank_curr;
+}
+
+static int read_bar(struct spi_nor *nor, const struct flash_info *info)
+{
+ u8 curr_bank = 0;
+ int ret;
+
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_SPANSION:
+ nor->bank_read_cmd = SPINOR_OP_BRRD;
+ nor->bank_write_cmd = SPINOR_OP_BRWR;
+ break;
+ default:
+ nor->bank_read_cmd = SPINOR_OP_RDEAR;
+ nor->bank_write_cmd = SPINOR_OP_WREAR;
+ }
+
+ ret = nor->read_reg(nor, nor->bank_read_cmd,
+ &curr_bank, 1);
+ if (ret) {
+ debug("SF: fail to read bank addr register\n");
+ return ret;
+ }
+ nor->bank_curr = curr_bank;
+
+ return 0;
+}
+#endif
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_NO_DATA);
+
+ if (nor->erase)
+ return nor->erase(nor, addr);
+
+ /*
+ * Default implementation, if driver doesn't have a specialized HW
+ * control
+ */
+ return spi_mem_exec_op(nor->spi, &op);
+}
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ u32 addr, len, rem;
+ int ret;
+
+ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+ (long long)instr->len);
+
+ if (!instr->len)
+ return 0;
+
+ div_u64_rem(instr->len, mtd->erasesize, &rem);
+ if (rem)
+ return -EINVAL;
+
+ addr = instr->addr;
+ len = instr->len;
+
+ while (len) {
+ WATCHDOG_RESET();
+#ifdef CONFIG_SPI_FLASH_BAR
+ ret = write_bar(nor, addr);
+ if (ret < 0)
+ return ret;
+#endif
+ write_enable(nor);
+
+ ret = spi_nor_erase_sector(nor, addr);
+ if (ret)
+ goto erase_err;
+
+ addr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto erase_err;
+ }
+
+erase_err:
+#ifdef CONFIG_SPI_FLASH_BAR
+ ret = clean_bar(nor);
+#endif
+ write_disable(nor);
+
+ return ret;
+}
+
+#if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+ int ret;
+
+ write_enable(nor);
+ ret = write_sr(nor, status_new);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (ret < 0)
+ return ret;
+
+ return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
+static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
+ uint64_t *len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ int shift = ffs(mask) - 1;
+ int pow;
+
+ if (!(sr & mask)) {
+ /* No protection */
+ *ofs = 0;
+ *len = 0;
+ } else {
+ pow = ((sr & mask) ^ mask) >> shift;
+ *len = mtd->size >> pow;
+ if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+ *ofs = 0;
+ else
+ *ofs = mtd->size - *len;
+ }
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, u64 len,
+ u8 sr, bool locked)
+{
+ loff_t lock_offs;
+ uint64_t lock_len;
+
+ if (!len)
+ return 1;
+
+ stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
+
+ if (locked)
+ /* Requested range is a sub-range of locked range */
+ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+ else
+ /* Requested range does not overlap with locked range */
+ return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return stm_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+ u8 sr)
+{
+ return stm_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2} in the status register
+ * (SR). Does not support these features found in newer SR bitfields:
+ * - SEC: sector/block protect - only handle SEC=0 (block protect)
+ * - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ * - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
+ * --------------------------------------------------------------------------
+ * X | X | 0 | 0 | 0 | NONE | NONE
+ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
+ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
+ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
+ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
+ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
+ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
+ * X | X | 1 | 1 | 1 | 8 MB | ALL
+ * ------|-------|-------|-------|-------|---------------|-------------------
+ * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
+ * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
+ * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
+ * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
+ * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
+ * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ int status_old, status_new;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ status_old = read_sr(nor);
+ if (status_old < 0)
+ return status_old;
+
+ /* If nothing in our range is unlocked, we don't need to do anything */
+ if (stm_is_locked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is unlocked, we can't use 'bottom' protection */
+ if (!stm_is_locked_sr(nor, 0, ofs, status_old))
+ can_be_bottom = false;
+
+ /* If anything above us is unlocked, we can't use 'top' protection */
+ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_top = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should end up locked */
+ if (use_top)
+ lock_len = mtd->size - ofs;
+ else
+ lock_len = ofs + len;
+
+ /*
+ * Need smallest pow such that:
+ *
+ * 1 / (2^pow) <= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
+ */
+ pow = ilog2(mtd->size) - ilog2(lock_len);
+ val = mask - (pow << shift);
+ if (val & ~mask)
+ return -EINVAL;
+ /* Don't "lock" with no region! */
+ if (!(val & mask))
+ return -EINVAL;
+
+ status_new = (status_old & ~mask & ~SR_TB) | val;
+
+ /* Disallow further writes if WP pin is asserted */
+ status_new |= SR_SRWD;
+
+ if (!use_top)
+ status_new |= SR_TB;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new & mask) < (status_old & mask))
+ return -EINVAL;
+
+ return write_sr_and_check(nor, status_new, mask);
+}
+
+/*
+ * Unlock a region of the flash. See stm_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ int status_old, status_new;
+ u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+ u8 shift = ffs(mask) - 1, pow, val;
+ loff_t lock_len;
+ bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+ bool use_top;
+
+ status_old = read_sr(nor);
+ if (status_old < 0)
+ return status_old;
+
+ /* If nothing in our range is locked, we don't need to do anything */
+ if (stm_is_unlocked_sr(nor, ofs, len, status_old))
+ return 0;
+
+ /* If anything below us is locked, we can't use 'top' protection */
+ if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
+ can_be_top = false;
+
+ /* If anything above us is locked, we can't use 'bottom' protection */
+ if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+ status_old))
+ can_be_bottom = false;
+
+ if (!can_be_bottom && !can_be_top)
+ return -EINVAL;
+
+ /* Prefer top, if both are valid */
+ use_top = can_be_top;
+
+ /* lock_len: length of region that should remain locked */
+ if (use_top)
+ lock_len = mtd->size - (ofs + len);
+ else
+ lock_len = ofs;
+
+ /*
+ * Need largest pow such that:
+ *
+ * 1 / (2^pow) >= (len / size)
+ *
+ * so (assuming power-of-2 size) we do:
+ *
+ * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
+ */
+ pow = ilog2(mtd->size) - order_base_2(lock_len);
+ if (lock_len == 0) {
+ val = 0; /* fully unlocked */
+ } else {
+ val = mask - (pow << shift);
+ /* Some power-of-two sizes are not supported */
+ if (val & ~mask)
+ return -EINVAL;
+ }
+
+ status_new = (status_old & ~mask & ~SR_TB) | val;
+
+ /* Don't protect status register if we're fully unlocked */
+ if (lock_len == 0)
+ status_new &= ~SR_SRWD;
+
+ if (!use_top)
+ status_new |= SR_TB;
+
+ /* Don't bother if they're the same */
+ if (status_new == status_old)
+ return 0;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new & mask) > (status_old & mask))
+ return -EINVAL;
+
+ return write_sr_and_check(nor, status_new, mask);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See stm_lock() for
+ * more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ int status;
+
+ status = read_sr(nor);
+ if (status < 0)
+ return status;
+
+ return stm_is_locked_sr(nor, ofs, len, status);
+}
+#endif /* CONFIG_SPI_FLASH_STMICRO */
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+ int tmp;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ const struct flash_info *info;
+
+ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ if (tmp < 0) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+
+ info = spi_nor_ids;
+ for (; info->name; info++) {
+ if (info->id_len) {
+ if (!memcmp(info->id, id, info->id_len))
+ return info;
+ }
+ }
+
+ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
+ id[0], id[1], id[2]);
+ return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ while (len) {
+ loff_t addr = from;
+ size_t read_len = len;
+
+#ifdef CONFIG_SPI_FLASH_BAR
+ u32 remain_len;
+
+ ret = write_bar(nor, addr);
+ if (ret < 0)
+ return log_ret(ret);
+ remain_len = (SZ_16M * (nor->bank_curr + 1)) - addr;
+
+ if (len < remain_len)
+ read_len = len;
+ else
+ read_len = remain_len;
+#endif
+
+ ret = nor->read(nor, addr, read_len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+#ifdef CONFIG_SPI_FLASH_BAR
+ ret = clean_bar(nor);
+#endif
+ return ret;
+}
+
+#ifdef CONFIG_SPI_FLASH_SST
+/*
+ * sst26 flash series has its own block protection implementation:
+ * 4x - 8 KByte blocks - read & write protection bits - upper addresses
+ * 1x - 32 KByte blocks - write protection bits
+ * rest - 64 KByte blocks - write protection bits
+ * 1x - 32 KByte blocks - write protection bits
+ * 4x - 8 KByte blocks - read & write protection bits - lower addresses
+ *
+ * We'll support only per 64k lock/unlock so lower and upper 64 KByte region
+ * will be treated as single block.
+ */
+#define SST26_BPR_8K_NUM 4
+#define SST26_MAX_BPR_REG_LEN (18 + 1)
+#define SST26_BOUND_REG_SIZE ((32 + SST26_BPR_8K_NUM * 8) * SZ_1K)
+
+enum lock_ctl {
+ SST26_CTL_LOCK,
+ SST26_CTL_UNLOCK,
+ SST26_CTL_CHECK
+};
+
+static bool sst26_process_bpr(u32 bpr_size, u8 *cmd, u32 bit, enum lock_ctl ctl)
+{
+ switch (ctl) {
+ case SST26_CTL_LOCK:
+ cmd[bpr_size - (bit / 8) - 1] |= BIT(bit % 8);
+ break;
+ case SST26_CTL_UNLOCK:
+ cmd[bpr_size - (bit / 8) - 1] &= ~BIT(bit % 8);
+ break;
+ case SST26_CTL_CHECK:
+ return !!(cmd[bpr_size - (bit / 8) - 1] & BIT(bit % 8));
+ }
+
+ return false;
+}
+
+/*
+ * Lock, unlock or check lock status of the flash region of the flash (depending
+ * on the lock_ctl value)
+ */
+static int sst26_lock_ctl(struct spi_nor *nor, loff_t ofs, uint64_t len, enum lock_ctl ctl)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ u32 i, bpr_ptr, rptr_64k, lptr_64k, bpr_size;
+ bool lower_64k = false, upper_64k = false;
+ u8 bpr_buff[SST26_MAX_BPR_REG_LEN] = {};
+ int ret;
+
+ /* Check length and offset for 64k alignment */
+ if ((ofs & (SZ_64K - 1)) || (len & (SZ_64K - 1))) {
+ dev_err(nor->dev, "length or offset is not 64KiB allighned\n");
+ return -EINVAL;
+ }
+
+ if (ofs + len > mtd->size) {
+ dev_err(nor->dev, "range is more than device size: %#llx + %#llx > %#llx\n",
+ ofs, len, mtd->size);
+ return -EINVAL;
+ }
+
+ /* SST26 family has only 16 Mbit, 32 Mbit and 64 Mbit IC */
+ if (mtd->size != SZ_2M &&
+ mtd->size != SZ_4M &&
+ mtd->size != SZ_8M)
+ return -EINVAL;
+
+ bpr_size = 2 + (mtd->size / SZ_64K / 8);
+
+ ret = nor->read_reg(nor, SPINOR_OP_READ_BPR, bpr_buff, bpr_size);
+ if (ret < 0) {
+ dev_err(nor->dev, "fail to read block-protection register\n");
+ return ret;
+ }
+
+ rptr_64k = min_t(u32, ofs + len, mtd->size - SST26_BOUND_REG_SIZE);
+ lptr_64k = max_t(u32, ofs, SST26_BOUND_REG_SIZE);
+
+ upper_64k = ((ofs + len) > (mtd->size - SST26_BOUND_REG_SIZE));
+ lower_64k = (ofs < SST26_BOUND_REG_SIZE);
+
+ /* Lower bits in block-protection register are about 64k region */
+ bpr_ptr = lptr_64k / SZ_64K - 1;
+
+ /* Process 64K blocks region */
+ while (lptr_64k < rptr_64k) {
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ bpr_ptr++;
+ lptr_64k += SZ_64K;
+ }
+
+ /* 32K and 8K region bits in BPR are after 64k region bits */
+ bpr_ptr = (mtd->size - 2 * SST26_BOUND_REG_SIZE) / SZ_64K;
+
+ /* Process lower 32K block region */
+ if (lower_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ bpr_ptr++;
+
+ /* Process upper 32K block region */
+ if (upper_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ bpr_ptr++;
+
+ /* Process lower 8K block regions */
+ for (i = 0; i < SST26_BPR_8K_NUM; i++) {
+ if (lower_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ /* In 8K area BPR has both read and write protection bits */
+ bpr_ptr += 2;
+ }
+
+ /* Process upper 8K block regions */
+ for (i = 0; i < SST26_BPR_8K_NUM; i++) {
+ if (upper_64k)
+ if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
+ return EACCES;
+
+ /* In 8K area BPR has both read and write protection bits */
+ bpr_ptr += 2;
+ }
+
+ /* If we check region status we don't need to write BPR back */
+ if (ctl == SST26_CTL_CHECK)
+ return 0;
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRITE_BPR, bpr_buff, bpr_size);
+ if (ret < 0) {
+ dev_err(nor->dev, "fail to write block-protection register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sst26_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return sst26_lock_ctl(nor, ofs, len, SST26_CTL_UNLOCK);
+}
+
+static int sst26_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ return sst26_lock_ctl(nor, ofs, len, SST26_CTL_LOCK);
+}
+
+/*
+ * Returns EACCES (positive value) if region is locked, 0 if region is unlocked,
+ * and negative on errors.
+ */
+static int sst26_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+ /*
+ * is_locked function is used for check before reading or erasing flash
+ * region, so offset and length might be not 64k allighned, so adjust
+ * them to be 64k allighned as sst26_lock_ctl works only with 64k
+ * allighned regions.
+ */
+ ofs -= ofs & (SZ_64K - 1);
+ len = len & (SZ_64K - 1) ? (len & ~(SZ_64K - 1)) + SZ_64K : len;
+
+ return sst26_lock_ctl(nor, ofs, len, SST26_CTL_CHECK);
+}
+
+static int sst_write_byteprogram(struct spi_nor *nor, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ size_t actual;
+ int ret = 0;
+
+ for (actual = 0; actual < len; actual++) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ write_enable(nor);
+ /* write one byte. */
+ ret = nor->write(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ to++;
+ }
+
+sst_write_err:
+ write_disable(nor);
+ return ret;
+}
+
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct spi_slave *spi = nor->spi;
+ size_t actual;
+ int ret;
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+ if (spi->mode & SPI_TX_BYTE)
+ return sst_write_byteprogram(nor, to, len, retlen, buf);
+
+ write_enable(nor);
+
+ nor->sst_write_second = false;
+
+ actual = to % 2;
+ /* Start write from odd address. */
+ if (actual) {
+ nor->program_opcode = SPINOR_OP_BP;
+
+ /* write one byte. */
+ ret = nor->write(nor, to, 1, buf);
+ if (ret < 0)
+ goto sst_write_err;
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ }
+ to += actual;
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+ nor->program_opcode = SPINOR_OP_AAI_WP;
+
+ /* write two bytes. */
+ ret = nor->write(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ to += 2;
+ nor->sst_write_second = true;
+ }
+ nor->sst_write_second = false;
+
+ write_disable(nor);
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+
+ /* Write out trailing byte if it exists. */
+ if (actual != len) {
+ write_enable(nor);
+
+ nor->program_opcode = SPINOR_OP_BP;
+ ret = nor->write(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto sst_write_err;
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto sst_write_err;
+ write_disable(nor);
+ actual += 1;
+ }
+sst_write_err:
+ *retlen += actual;
+ return ret;
+}
+#endif
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ size_t page_offset, page_remain, i;
+ ssize_t ret;
+
+#ifdef CONFIG_SPI_FLASH_SST
+ /* sst nor chips use AAI word program */
+ if (nor->info->flags & SST_WRITE)
+ return sst_write(mtd, to, len, retlen, buf);
+#endif
+
+ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+ if (!len)
+ return 0;
+
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+ WATCHDOG_RESET();
+
+ /*
+ * If page_size is a power of two, the offset can be quickly
+ * calculated with an AND operation. On the other cases we
+ * need to do a modulus operation (more expensive).
+ */
+ if (is_power_of_2(nor->page_size)) {
+ page_offset = addr & (nor->page_size - 1);
+ } else {
+ u64 aux = addr;
+
+ page_offset = do_div(aux, nor->page_size);
+ }
+ /* the size of data remaining on the first page */
+ page_remain = min_t(size_t,
+ nor->page_size - page_offset, len - i);
+
+#ifdef CONFIG_SPI_FLASH_BAR
+ ret = write_bar(nor, addr);
+ if (ret < 0)
+ return ret;
+#endif
+ write_enable(nor);
+ ret = nor->write(nor, addr, page_remain, buf + i);
+ if (ret < 0)
+ goto write_err;
+ written = ret;
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ goto write_err;
+ *retlen += written;
+ i += written;
+ }
+
+write_err:
+#ifdef CONFIG_SPI_FLASH_BAR
+ ret = clean_bar(nor);
+#endif
+ return ret;
+}
+
+#if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
+/**
+ * macronix_quad_enable() - set QE bit in Status Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register.
+ *
+ * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ if (val < 0)
+ return val;
+ if (val & SR_QUAD_EN_MX)
+ return 0;
+
+ write_enable(nor);
+
+ write_sr(nor, val | SR_QUAD_EN_MX);
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occurred.
+ */
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
+{
+ int ret;
+
+ write_enable(nor);
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
+ if (ret < 0) {
+ dev_dbg(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_dbg(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2];
+ int ret;
+
+ /* Check current Quad Enable bit value. */
+ ret = read_cr(nor);
+ if (ret < 0) {
+ dev_dbg(nor->dev,
+ "error while reading configuration register\n");
+ return -EINVAL;
+ }
+
+ if (ret & CR_QUAD_EN_SPAN)
+ return 0;
+
+ sr_cr[1] = ret | CR_QUAD_EN_SPAN;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_dbg(nor->dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ /* Read back and check it. */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_dbg(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
+/**
+ * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories not supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2];
+ int ret;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_dbg(nor->dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+ sr_cr[1] = CR_QUAD_EN_SPAN;
+
+ return write_sr_cr(nor, sr_cr);
+}
+
+#endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
+#endif /* CONFIG_SPI_FLASH_SPANSION */
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octo SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octo SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ int (*quad_enable)(struct spi_nor *nor);
+};
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+static void
+spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ pp->opcode = opcode;
+ pp->proto = proto;
+}
+
+#if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
+/*
+ * Serial Flash Discoverable Parameters (SFDP) parsing.
+ */
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ u8 addr_width, read_opcode, read_dummy;
+ int ret;
+
+ read_opcode = nor->read_opcode;
+ addr_width = nor->addr_width;
+ read_dummy = nor->read_dummy;
+
+ nor->read_opcode = SPINOR_OP_RDSFDP;
+ nor->addr_width = 3;
+ nor->read_dummy = 8;
+
+ while (len) {
+ ret = nor->read(nor, addr, len, (u8 *)buf);
+ if (!ret || ret > len) {
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ buf += ret;
+ addr += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ nor->read_opcode = read_opcode;
+ nor->addr_width = addr_width;
+ nor->read_dummy = read_dummy;
+
+ return ret;
+}
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+#define SFDP_SST_ID 0x01bf /* Manufacturer specific Table */
+
+#define SFDP_SIGNATURE 0x50444653U
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i) ((i) - 1)
+#define BFPT_DWORD_MAX 16
+
+/* The first version of JESB216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Writ Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/* Fast Read settings. */
+
+static void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+ u16 half,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = (half >> 5) & 0x07;
+ read->num_wait_states = (half >> 0) & 0x1f;
+ read->opcode = (half >> 8) & 0xff;
+ read->proto = proto;
+}
+
+struct sfdp_bfpt_read {
+ /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+ * whether the Fast Read x-y-z command is supported.
+ */
+ u32 supported_dword;
+ u32 supported_bit;
+
+ /*
+ * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+ * encodes the op code, the number of mode clocks and the number of wait
+ * states to be used by Fast Read x-y-z command.
+ */
+ u32 settings_dword;
+ u32 settings_shift;
+
+ /* The SPI protocol for this Fast Read x-y-z command. */
+ enum spi_nor_protocol proto;
+};
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+ /* Fast Read 1-1-2 */
+ {
+ SNOR_HWCAPS_READ_1_1_2,
+ BFPT_DWORD(1), BIT(16), /* Supported bit */
+ BFPT_DWORD(4), 0, /* Settings */
+ SNOR_PROTO_1_1_2,
+ },
+
+ /* Fast Read 1-2-2 */
+ {
+ SNOR_HWCAPS_READ_1_2_2,
+ BFPT_DWORD(1), BIT(20), /* Supported bit */
+ BFPT_DWORD(4), 16, /* Settings */
+ SNOR_PROTO_1_2_2,
+ },
+
+ /* Fast Read 2-2-2 */
+ {
+ SNOR_HWCAPS_READ_2_2_2,
+ BFPT_DWORD(5), BIT(0), /* Supported bit */
+ BFPT_DWORD(6), 16, /* Settings */
+ SNOR_PROTO_2_2_2,
+ },
+
+ /* Fast Read 1-1-4 */
+ {
+ SNOR_HWCAPS_READ_1_1_4,
+ BFPT_DWORD(1), BIT(22), /* Supported bit */
+ BFPT_DWORD(3), 16, /* Settings */
+ SNOR_PROTO_1_1_4,
+ },
+
+ /* Fast Read 1-4-4 */
+ {
+ SNOR_HWCAPS_READ_1_4_4,
+ BFPT_DWORD(1), BIT(21), /* Supported bit */
+ BFPT_DWORD(3), 0, /* Settings */
+ SNOR_PROTO_1_4_4,
+ },
+
+ /* Fast Read 4-4-4 */
+ {
+ SNOR_HWCAPS_READ_4_4_4,
+ BFPT_DWORD(5), BIT(4), /* Supported bit */
+ BFPT_DWORD(7), 16, /* Settings */
+ SNOR_PROTO_4_4_4,
+ },
+};
+
+struct sfdp_bfpt_erase {
+ /*
+ * The half-word at offset <shift> in DWORD <dwoard> encodes the
+ * op code and erase sector size to be used by Sector Erase commands.
+ */
+ u32 dword;
+ u32 shift;
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+ /* Erase Type 1 in DWORD8 bits[15:0] */
+ {BFPT_DWORD(8), 0},
+
+ /* Erase Type 2 in DWORD8 bits[31:16] */
+ {BFPT_DWORD(8), 16},
+
+ /* Erase Type 3 in DWORD9 bits[15:0] */
+ {BFPT_DWORD(9), 0},
+
+ /* Erase Type 4 in DWORD9 bits[31:16] */
+ {BFPT_DWORD(9), 16},
+};
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor: pointer to a 'struct spi_nor'
+ * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the Basic Flash Parameter Table length and version
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ struct spi_nor_flash_parameter *params)
+{
+ struct mtd_info *mtd = &nor->mtd;
+ struct sfdp_bfpt bfpt;
+ size_t len;
+ int i, cmd, err;
+ u32 addr;
+ u16 half;
+
+ /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+ if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+ return -EINVAL;
+
+ /* Read the Basic Flash Parameter Table. */
+ len = min_t(size_t, sizeof(bfpt),
+ bfpt_header->length * sizeof(u32));
+ addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+ memset(&bfpt, 0, sizeof(bfpt));
+ err = spi_nor_read_sfdp(nor, addr, len, &bfpt);
+ if (err < 0)
+ return err;
+
+ /* Fix endianness of the BFPT DWORDs. */
+ for (i = 0; i < BFPT_DWORD_MAX; i++)
+ bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
+
+ /* Number of address bytes. */
+ switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+ case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+ nor->addr_width = 3;
+ break;
+
+ case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+ nor->addr_width = 4;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Flash Memory Density (in bits). */
+ params->size = bfpt.dwords[BFPT_DWORD(2)];
+ if (params->size & BIT(31)) {
+ params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
+ params->size = 1ULL << params->size;
+ } else {
+ params->size++;
+ }
+ params->size >>= 3; /* Convert to bytes. */
+
+ /* Fast Read settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+ const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+ struct spi_nor_read_command *read;
+
+ if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+ params->hwcaps.mask &= ~rd->hwcaps;
+ continue;
+ }
+
+ params->hwcaps.mask |= rd->hwcaps;
+ cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+ read = &params->reads[cmd];
+ half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+ spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+ }
+
+ /* Sector Erase settings. */
+ for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+ const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+ u32 erasesize;
+ u8 opcode;
+
+ half = bfpt.dwords[er->dword] >> er->shift;
+ erasesize = half & 0xff;
+
+ /* erasesize == 0 means this Erase Type is not supported. */
+ if (!erasesize)
+ continue;
+
+ erasesize = 1U << erasesize;
+ opcode = (half >> 8) & 0xff;
+#ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
+ if (erasesize == SZ_4K) {
+ nor->erase_opcode = opcode;
+ mtd->erasesize = erasesize;
+ break;
+ }
+#endif
+ if (!mtd->erasesize || mtd->erasesize < erasesize) {
+ nor->erase_opcode = opcode;
+ mtd->erasesize = erasesize;
+ }
+ }
+
+ /* Stop here if not JESD216 rev A or later. */
+ if (bfpt_header->length < BFPT_DWORD_MAX)
+ return 0;
+
+ /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+ params->page_size = bfpt.dwords[BFPT_DWORD(11)];
+ params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
+ params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+ params->page_size = 1U << params->page_size;
+
+ /* Quad Enable Requirements. */
+ switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+ case BFPT_DWORD15_QER_NONE:
+ params->quad_enable = NULL;
+ break;
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+ case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+ case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+ params->quad_enable = spansion_no_read_cr_quad_enable;
+ break;
+#endif
+#if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
+ case BFPT_DWORD15_QER_SR1_BIT6:
+ params->quad_enable = macronix_quad_enable;
+ break;
+#endif
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+ case BFPT_DWORD15_QER_SR2_BIT1:
+ params->quad_enable = spansion_read_cr_quad_enable;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spi_nor_parse_microchip_sfdp() - parse the Microchip manufacturer specific
+ * SFDP table.
+ * @nor: pointer to a 'struct spi_nor'.
+ * @param_header: pointer to the SFDP parameter header.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int
+spi_nor_parse_microchip_sfdp(struct spi_nor *nor,
+ const struct sfdp_parameter_header *param_header)
+{
+ size_t size;
+ u32 addr;
+ int ret;
+
+ size = param_header->length * sizeof(u32);
+ addr = SFDP_PARAM_HEADER_PTP(param_header);
+
+ nor->manufacturer_sfdp = devm_kmalloc(nor->dev, size, GFP_KERNEL);
+ if (!nor->manufacturer_sfdp)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, size, nor->manufacturer_sfdp);
+
+ return ret;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be
+ * filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+{
+ const struct sfdp_parameter_header *param_header, *bfpt_header;
+ struct sfdp_parameter_header *param_headers = NULL;
+ struct sfdp_header header;
+ size_t psize;
+ int i, err;
+
+ /* Get the SFDP header. */
+ err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
+ if (err < 0)
+ return err;
+
+ /* Check the SFDP header version. */
+ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+ header.major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Verify that the first and only mandatory parameter header is a
+ * Basic Flash Parameter Table header as specified in JESD216.
+ */
+ bfpt_header = &header.bfpt_header;
+ if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+ bfpt_header->major != SFDP_JESD216_MAJOR)
+ return -EINVAL;
+
+ /*
+ * Allocate memory then read all parameter headers with a single
+ * Read SFDP command. These parameter headers will actually be parsed
+ * twice: a first time to get the latest revision of the basic flash
+ * parameter table, then a second time to handle the supported optional
+ * tables.
+ * Hence we read the parameter headers once for all to reduce the
+ * processing time. Also we use kmalloc() instead of devm_kmalloc()
+ * because we don't need to keep these parameter headers: the allocated
+ * memory is always released with kfree() before exiting this function.
+ */
+ if (header.nph) {
+ psize = header.nph * sizeof(*param_headers);
+
+ param_headers = kmalloc(psize, GFP_KERNEL);
+ if (!param_headers)
+ return -ENOMEM;
+
+ err = spi_nor_read_sfdp(nor, sizeof(header),
+ psize, param_headers);
+ if (err < 0) {
+ dev_err(nor->dev,
+ "failed to read SFDP parameter headers\n");
+ goto exit;
+ }
+ }
+
+ /*
+ * Check other parameter headers to get the latest revision of
+ * the basic flash parameter table.
+ */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+ param_header->major == SFDP_JESD216_MAJOR &&
+ (param_header->minor > bfpt_header->minor ||
+ (param_header->minor == bfpt_header->minor &&
+ param_header->length > bfpt_header->length)))
+ bfpt_header = param_header;
+ }
+
+ err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+ if (err)
+ goto exit;
+
+ /* Parse other parameter headers. */
+ for (i = 0; i < header.nph; i++) {
+ param_header = &param_headers[i];
+
+ switch (SFDP_PARAM_HEADER_ID(param_header)) {
+ case SFDP_SECTOR_MAP_ID:
+ dev_info(nor->dev,
+ "non-uniform erase sector maps are not supported yet.\n");
+ break;
+
+ case SFDP_SST_ID:
+ err = spi_nor_parse_microchip_sfdp(nor, param_header);
+ break;
+
+ default:
+ break;
+ }
+
+ if (err) {
+ dev_warn(nor->dev,
+ "Failed to parse optional parameter table: %04x\n",
+ SFDP_PARAM_HEADER_ID(param_header));
+ /*
+ * Let's not drop all information we extracted so far
+ * if optional table parsers fail. In case of failing,
+ * each optional parser is responsible to roll back to
+ * the previously known spi_nor data.
+ */
+ err = 0;
+ }
+ }
+
+exit:
+ kfree(param_headers);
+ return err;
+}
+#else
+static int spi_nor_parse_sfdp(struct spi_nor *nor,
+ struct spi_nor_flash_parameter *params)
+{
+ return -EINVAL;
+}
+#endif /* SPI_FLASH_SFDP_SUPPORT */
+
+static int spi_nor_init_params(struct spi_nor *nor,
+ const struct flash_info *info,
+ struct spi_nor_flash_parameter *params)
+{
+ /* Set legacy flash parameters as default. */
+ memset(params, 0, sizeof(*params));
+
+ /* Set SPI NOR sizes. */
+ params->size = info->sector_size * info->n_sectors;
+ params->page_size = info->page_size;
+
+ /* (Fast) Read settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ }
+
+ if (info->flags & SPI_NOR_DUAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+ 0, 8, SPINOR_OP_READ_1_1_2,
+ SNOR_PROTO_1_1_2);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ if (info->flags & SPI_NOR_OCTAL_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
+ 0, 8, SPINOR_OP_READ_1_1_8,
+ SNOR_PROTO_1_1_8);
+ }
+
+ /* Page Program settings. */
+ params->hwcaps.mask |= SNOR_HWCAPS_PP;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+ SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
+ }
+
+ /* Select the procedure to set the Quad Enable bit. */
+ if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
+ SNOR_HWCAPS_PP_QUAD)) {
+ switch (JEDEC_MFR(info)) {
+#if defined(CONFIG_SPI_FLASH_MACRONIX) || defined(CONFIG_SPI_FLASH_ISSI)
+ case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_ISSI:
+ params->quad_enable = macronix_quad_enable;
+ break;
+#endif
+ case SNOR_MFR_ST:
+ case SNOR_MFR_MICRON:
+ break;
+
+ default:
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+ /* Kept only for backward compatibility purpose. */
+ params->quad_enable = spansion_read_cr_quad_enable;
+#endif
+ break;
+ }
+ }
+
+ /* Override the parameters with data read from SFDP tables. */
+ nor->addr_width = 0;
+ nor->mtd.erasesize = 0;
+ if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+ !(info->flags & SPI_NOR_SKIP_SFDP)) {
+ struct spi_nor_flash_parameter sfdp_params;
+
+ memcpy(&sfdp_params, params, sizeof(sfdp_params));
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
+ nor->mtd.erasesize = 0;
+ } else {
+ memcpy(params, &sfdp_params, sizeof(*params));
+ }
+ }
+
+ return 0;
+}
+
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ read = &params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
+ const struct spi_nor_pp_command *pp;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
+ if (cmd < 0)
+ return -EINVAL;
+
+ pp = &params->page_programs[cmd];
+ nor->program_opcode = pp->opcode;
+ nor->write_proto = pp->proto;
+ return 0;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ struct mtd_info *mtd = &nor->mtd;
+
+ /* Do nothing if already configured from SFDP. */
+ if (mtd->erasesize)
+ return 0;
+
+#ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
+ /* prefer "small sector" erase if possible */
+ if (info->flags & SECT_4K) {
+ nor->erase_opcode = SPINOR_OP_BE_4K;
+ mtd->erasesize = 4096;
+ } else if (info->flags & SECT_4K_PMC) {
+ nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+ mtd->erasesize = 4096;
+ } else
+#endif
+ {
+ nor->erase_opcode = SPINOR_OP_SE;
+ mtd->erasesize = info->sector_size;
+ }
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ u32 ignored_mask, shared_mask;
+ bool enable_quad_io;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ /* SPI n-n-n protocols are not supported yet. */
+ ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
+ SNOR_HWCAPS_READ_4_4_4 |
+ SNOR_HWCAPS_READ_8_8_8 |
+ SNOR_HWCAPS_PP_4_4_4 |
+ SNOR_HWCAPS_PP_8_8_8);
+ if (shared_mask & ignored_mask) {
+ dev_dbg(nor->dev,
+ "SPI n-n-n protocols are not supported yet.\n");
+ shared_mask &= ~ignored_mask;
+ }
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, params, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Page Program command. */
+ err = spi_nor_select_pp(nor, params, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select write settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Select the Sector Erase command. */
+ err = spi_nor_select_erase(nor, info);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select erase settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Enable Quad I/O if needed. */
+ enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+ spi_nor_get_protocol_width(nor->write_proto) == 4);
+ if (enable_quad_io && params->quad_enable)
+ nor->quad_enable = params->quad_enable;
+ else
+ nor->quad_enable = NULL;
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ int err;
+
+ /*
+ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+ * with the software protection bits set
+ */
+ if (IS_ENABLED(CONFIG_SPI_FLASH_UNLOCK_ALL) &&
+ (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+ JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+ nor->info->flags & SPI_NOR_HAS_LOCK)) {
+ write_enable(nor);
+ write_sr(nor, 0);
+ spi_nor_wait_till_ready(nor);
+ }
+
+ if (nor->quad_enable) {
+ err = nor->quad_enable(nor);
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+ }
+
+ if (nor->addr_width == 4 &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ if (nor->flags & SNOR_F_BROKEN_RESET)
+ debug("enabling reset hack; may not recover from unexpected reboots\n");
+ set_4byte(nor, nor->info, 1);
+ }
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter params;
+ const struct flash_info *info = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST |
+ SNOR_HWCAPS_PP,
+ };
+ struct spi_slave *spi = nor->spi;
+ int ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+ nor->read = spi_nor_read_data;
+ nor->write = spi_nor_write_data;
+ nor->read_reg = spi_nor_read_reg;
+ nor->write_reg = spi_nor_write_reg;
+
+ if (spi->mode & SPI_RX_OCTAL) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
+
+ if (spi->mode & SPI_TX_OCTAL)
+ hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 |
+ SNOR_HWCAPS_PP_1_1_8 |
+ SNOR_HWCAPS_PP_1_8_8);
+ } else if (spi->mode & SPI_RX_QUAD) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+
+ if (spi->mode & SPI_TX_QUAD)
+ hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 |
+ SNOR_HWCAPS_PP_1_1_4 |
+ SNOR_HWCAPS_PP_1_4_4);
+ } else if (spi->mode & SPI_RX_DUAL) {
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+
+ if (spi->mode & SPI_TX_DUAL)
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2;
+ }
+
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
+ return -ENOENT;
+ /* Parse the Serial Flash Discoverable Parameters table. */
+ ret = spi_nor_init_params(nor, info, &params);
+ if (ret)
+ return ret;
+
+ if (!mtd->name)
+ mtd->name = info->name;
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = params.size;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ mtd->_write = spi_nor_write;
+
+#if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
+ /* NOR protection support for STmicro/Micron chips and similar */
+ if (JEDEC_MFR(info) == SNOR_MFR_ST ||
+ JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+ JEDEC_MFR(info) == SNOR_MFR_SST ||
+ info->flags & SPI_NOR_HAS_LOCK) {
+ nor->flash_lock = stm_lock;
+ nor->flash_unlock = stm_unlock;
+ nor->flash_is_locked = stm_is_locked;
+ }
+#endif
+
+#ifdef CONFIG_SPI_FLASH_SST
+ /*
+ * sst26 series block protection implementation differs from other
+ * series.
+ */
+ if (info->flags & SPI_NOR_HAS_SST26LOCK) {
+ nor->flash_lock = sst26_lock;
+ nor->flash_unlock = sst26_unlock;
+ nor->flash_is_locked = sst26_is_locked;
+ }
+#endif
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & SPI_NOR_HAS_TB)
+ nor->flags |= SNOR_F_HAS_SR_TB;
+ if (info->flags & NO_CHIP_ERASE)
+ nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+ if (info->flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (info->flags & SPI_NOR_NO_ERASE)
+ mtd->flags |= MTD_NO_ERASE;
+
+ nor->page_size = params.page_size;
+ mtd->writebufsize = nor->page_size;
+
+ /* Some devices cannot do fast-read, no matter what DT tells us */
+ if ((info->flags & SPI_NOR_NO_FR) || (spi->mode & SPI_RX_SLOW))
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
+ */
+ ret = spi_nor_setup(nor, info, &params, &hwcaps);
+ if (ret)
+ return ret;
+
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (info->addr_width) {
+ nor->addr_width = info->addr_width;
+ } else if (mtd->size > SZ_16M) {
+#ifndef CONFIG_SPI_FLASH_BAR
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor, info);
+#else
+ /* Configure the BAR - discover bank cmds and read current bank */
+ nor->addr_width = 3;
+ ret = read_bar(nor, info);
+ if (ret < 0)
+ return ret;
+#endif
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ nor->name = mtd->name;
+ nor->size = mtd->size;
+ nor->erase_size = mtd->erasesize;
+ nor->sector_size = mtd->erasesize;
+
+#ifndef CONFIG_SPL_BUILD
+ printf("SF: Detected %s with page size ", nor->name);
+ print_size(nor->page_size, ", erase size ");
+ print_size(nor->erase_size, ", total ");
+ print_size(nor->size, "");
+ puts("\n");
+#endif
+
+ return 0;
+}
+
+/* U-Boot specific functions, need to extend MTD to support these */
+int spi_flash_cmd_get_sw_write_prot(struct spi_nor *nor)
+{
+ int sr = read_sr(nor);
+
+ if (sr < 0)
+ return sr;
+
+ return (sr >> 2) & 7;
+}
diff --git a/roms/u-boot/drivers/mtd/spi/spi-nor-ids.c b/roms/u-boot/drivers/mtd/spi/spi-nor-ids.c
new file mode 100644
index 000000000..2b5779795
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/spi-nor-ids.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *
+ * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc.
+ * Copyright (C) 2016 Jagan Teki <jagan@openedev.com>
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <common.h>
+#include <spi.h>
+#include <spi_flash.h>
+
+#include "sf_internal.h"
+
+/* Exclude chip names for SPL to save space */
+#if !CONFIG_IS_ENABLED(SPI_FLASH_TINY)
+#define INFO_NAME(_name) .name = _name,
+#else
+#define INFO_NAME(_name)
+#endif
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ INFO_NAME(_name) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+#define INFO6(_name, _jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
+ INFO_NAME(_name) \
+ .id = { \
+ ((_jedec_id) >> 16) & 0xff, \
+ ((_jedec_id) >> 8) & 0xff, \
+ (_jedec_id) & 0xff, \
+ ((_ext_id) >> 16) & 0xff, \
+ ((_ext_id) >> 8) & 0xff, \
+ (_ext_id) & 0xff, \
+ }, \
+ .id_len = 6, \
+ .sector_size = (_sector_size), \
+ .n_sectors = (_n_sectors), \
+ .page_size = 256, \
+ .flags = (_flags),
+
+/* NOTE: double check command sets and memory organization when you add
+ * more nor chips. This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ *
+ * All newly added entries should describe *hardware* and should use SECT_4K
+ * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
+ * scenarios excluding small sectors there is config option that can be
+ * disabled: CONFIG_SPI_FLASH_USE_4K_SECTORS.
+ * For historical (and compatibility) reasons (before we got above config) some
+ * old entries may be missing 4K flag.
+ */
+const struct flash_info spi_nor_ids[] = {
+#ifdef CONFIG_SPI_FLASH_ATMEL /* ATMEL */
+ /* Atmel -- some are (confusingly) marketed as "DataFlash" */
+ { INFO("at26df321", 0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+ { INFO("at25df321a", 0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+
+ { INFO("at45db011d", 0x1f2200, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("at45db021d", 0x1f2300, 0, 64 * 1024, 8, SECT_4K) },
+ { INFO("at45db041d", 0x1f2400, 0, 64 * 1024, 8, SECT_4K) },
+ { INFO("at45db081d", 0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
+ { INFO("at45db161d", 0x1f2600, 0, 64 * 1024, 32, SECT_4K) },
+ { INFO("at45db321d", 0x1f2700, 0, 64 * 1024, 64, SECT_4K) },
+ { INFO("at45db641d", 0x1f2800, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("at25sl321", 0x1f4216, 0, 64 * 1024, 64, SECT_4K) },
+ { INFO("at26df081a", 0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
+#endif
+#ifdef CONFIG_SPI_FLASH_EON /* EON */
+ /* EON -- en25xxx */
+ { INFO("en25q32b", 0x1c3016, 0, 64 * 1024, 64, 0) },
+ { INFO("en25q64", 0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("en25qh128", 0x1c7018, 0, 64 * 1024, 256, 0) },
+ { INFO("en25s64", 0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
+#endif
+#ifdef CONFIG_SPI_FLASH_GIGADEVICE /* GIGADEVICE */
+ /* GigaDevice */
+ {
+ INFO("gd25q16", 0xc84015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25q32", 0xc84016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25lq32", 0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25q64", 0xc84017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25lq64c", 0xc86017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25q128", 0xc84018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("gd25lq128", 0xc86018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+#endif
+#ifdef CONFIG_SPI_FLASH_ISSI /* ISSI */
+ /* ISSI */
+ { INFO("is25lq040b", 0x9d4013, 0, 64 * 1024, 8,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("is25lp032", 0x9d6016, 0, 64 * 1024, 64, 0) },
+ { INFO("is25lp064", 0x9d6017, 0, 64 * 1024, 128, 0) },
+ { INFO("is25lp128", 0x9d6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { INFO("is25lp256", 0x9d6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { INFO("is25wp032", 0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("is25wp064", 0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("is25wp128", 0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("is25wp256", 0x9d7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
+#endif
+#ifdef CONFIG_SPI_FLASH_MACRONIX /* MACRONIX */
+ /* Macronix */
+ { INFO("mx25l2005a", 0xc22012, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("mx25l4005a", 0xc22013, 0, 64 * 1024, 8, SECT_4K) },
+ { INFO("mx25l8005", 0xc22014, 0, 64 * 1024, 16, 0) },
+ { INFO("mx25l1606e", 0xc22015, 0, 64 * 1024, 32, SECT_4K) },
+ { INFO("mx25l3205d", 0xc22016, 0, 64 * 1024, 64, SECT_4K) },
+ { INFO("mx25l6405d", 0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("mx25u2033e", 0xc22532, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("mx25u1635e", 0xc22535, 0, 64 * 1024, 32, SECT_4K) },
+ { INFO("mx25u3235f", 0xc22536, 0, 4 * 1024, 1024, SECT_4K) },
+ { INFO("mx25u6435f", 0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("mx25l12805d", 0xc22018, 0, 64 * 1024, 256, SECT_4K) },
+ { INFO("mx25u12835f", 0xc22538, 0, 64 * 1024, 256, SECT_4K) },
+ { INFO("mx25l12855e", 0xc22618, 0, 64 * 1024, 256, 0) },
+ { INFO("mx25l25635e", 0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("mx25u25635f", 0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { INFO("mx25l25655e", 0xc22619, 0, 64 * 1024, 512, 0) },
+ { INFO("mx66l51235l", 0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx66u51235f", 0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx66u2g45g", 0xc2253c, 0, 64 * 1024, 4096, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mx66l1g45g", 0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("mx25l1633e", 0xc22415, 0, 64 * 1024, 32, SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | SECT_4K) },
+ { INFO("mx25r6435f", 0xc22817, 0, 64 * 1024, 128, SECT_4K) },
+#endif
+
+#ifdef CONFIG_SPI_FLASH_STMICRO /* STMICRO */
+ /* Micron */
+ { INFO("n25q016a", 0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
+ { INFO("n25q032", 0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { INFO("n25q032a", 0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
+ { INFO("n25q064", 0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { INFO("n25q064a", 0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
+ { INFO("n25q128a11", 0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { INFO("n25q128a13", 0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
+ { INFO6("mt25ql256a", 0x20ba19, 0x104400, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | USE_FSR) },
+ { INFO("n25q256a", 0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_FSR) },
+ { INFO6("mt25qu256a", 0x20bb19, 0x104400, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | USE_FSR) },
+ { INFO("n25q256ax1", 0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ | USE_FSR) },
+ { INFO6("mt25qu512a", 0x20bb20, 0x104400, 64 * 1024, 1024,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES |
+ USE_FSR) },
+ { INFO("n25q512a", 0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { INFO6("mt25ql512a", 0x20ba20, 0x104400, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("n25q512ax3", 0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+ { INFO("n25q00", 0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { INFO("n25q00a", 0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { INFO("mt25ql01g", 0x21ba20, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { INFO("mt25qu02g", 0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ { INFO("mt35xu512aba", 0x2c5b1a, 0, 128 * 1024, 512, USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("mt35xu02g", 0x2c5b1c, 0, 128 * 1024, 2048, USE_FSR | SPI_NOR_OCTAL_READ | SPI_NOR_4B_OPCODES) },
+#endif
+#ifdef CONFIG_SPI_FLASH_SPANSION /* SPANSION */
+ /* Spansion/Cypress -- single (large) sector size only, at least
+ * for the chips listed here (without boot sectors).
+ */
+ { INFO("s25sl032p", 0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("s25sl064p", 0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("s25fl256s0", 0x010219, 0x4d00, 256 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25fl256s1", 0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO6("s25fl512s", 0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO6("s25fs512s", 0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25fl512s_256k", 0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25fl512s_64k", 0x010220, 0x4d01, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25fl512s_512k", 0x010220, 0x4f00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25sl12800", 0x012018, 0x0300, 256 * 1024, 64, 0) },
+ { INFO("s25sl12801", 0x012018, 0x0301, 64 * 1024, 256, 0) },
+ { INFO6("s25fl128s", 0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25fl129p0", 0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25fl129p1", 0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+ { INFO("s25sl008a", 0x010213, 0, 64 * 1024, 16, 0) },
+ { INFO("s25sl016a", 0x010214, 0, 64 * 1024, 32, 0) },
+ { INFO("s25sl032a", 0x010215, 0, 64 * 1024, 64, 0) },
+ { INFO("s25sl064a", 0x010216, 0, 64 * 1024, 128, 0) },
+ { INFO("s25fl116k", 0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("s25fl164k", 0x014017, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("s25fl208k", 0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) },
+ { INFO("s25fl064l", 0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { INFO("s25fl128l", 0x016018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+#endif
+#ifdef CONFIG_SPI_FLASH_SST /* SST */
+ /* SST -- large erase sizes are "overlays", "sectors" are 4K */
+ { INFO("sst25vf040b", 0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { INFO("sst25vf080b", 0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { INFO("sst25vf016b", 0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+ { INFO("sst25vf032b", 0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+ { INFO("sst25vf064c", 0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+ { INFO("sst25wf512", 0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
+ { INFO("sst25wf010", 0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
+ { INFO("sst25wf020", 0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
+ { INFO("sst25wf020a", 0x621612, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("sst25wf040b", 0x621613, 0, 64 * 1024, 8, SECT_4K) },
+ { INFO("sst25wf040", 0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
+ { INFO("sst25wf080", 0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+ { INFO("sst26vf064b", 0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_SST26LOCK | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("sst26wf016", 0xbf2651, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_HAS_SST26LOCK) },
+ { INFO("sst26wf032", 0xbf2622, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_SST26LOCK) },
+ { INFO("sst26wf064", 0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_SST26LOCK) },
+#endif
+#ifdef CONFIG_SPI_FLASH_STMICRO /* STMICRO */
+ /* ST Microelectronics -- newer production may have feature updates */
+ { INFO("m25p10", 0x202011, 0, 32 * 1024, 4, 0) },
+ { INFO("m25p20", 0x202012, 0, 64 * 1024, 4, 0) },
+ { INFO("m25p40", 0x202013, 0, 64 * 1024, 8, 0) },
+ { INFO("m25p80", 0x202014, 0, 64 * 1024, 16, 0) },
+ { INFO("m25p16", 0x202015, 0, 64 * 1024, 32, 0) },
+ { INFO("m25p32", 0x202016, 0, 64 * 1024, 64, 0) },
+ { INFO("m25p64", 0x202017, 0, 64 * 1024, 128, 0) },
+ { INFO("m25p128", 0x202018, 0, 256 * 1024, 64, 0) },
+ { INFO("m25pe16", 0x208015, 0, 64 * 1024, 32, SECT_4K) },
+ { INFO("m25px16", 0x207115, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("m25px64", 0x207117, 0, 64 * 1024, 128, 0) },
+#endif
+#ifdef CONFIG_SPI_FLASH_WINBOND /* WINBOND */
+ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { INFO("w25p80", 0xef2014, 0x0, 64 * 1024, 16, 0) },
+ { INFO("w25p16", 0xef2015, 0x0, 64 * 1024, 32, 0) },
+ { INFO("w25p32", 0xef2016, 0x0, 64 * 1024, 64, 0) },
+ { INFO("w25x05", 0xef3010, 0, 64 * 1024, 1, SECT_4K) },
+ { INFO("w25x40", 0xef3013, 0, 64 * 1024, 8, SECT_4K) },
+ { INFO("w25x16", 0xef3015, 0, 64 * 1024, 32, SECT_4K) },
+ {
+ INFO("w25q16dw", 0xef6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { INFO("w25x32", 0xef3016, 0, 64 * 1024, 64, SECT_4K) },
+ { INFO("w25q20cl", 0xef4012, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("w25q20bw", 0xef5012, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("w25q20ew", 0xef6012, 0, 64 * 1024, 4, SECT_4K) },
+ { INFO("w25q32", 0xef4016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ {
+ INFO("w25q32dw", 0xef6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q32jv", 0xef7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q32jwm", 0xef8016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { INFO("w25x64", 0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+ {
+ INFO("w25q64dw", 0xef6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q64jv", 0xef7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q128fw", 0xef6018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q128jv", 0xef7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q256fw", 0xef6019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
+ INFO("w25q256jw", 0xef7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { INFO("w25q80", 0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { INFO("w25q80bl", 0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25q16cl", 0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25q64cv", 0xef4017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25q128", 0xef4018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ { INFO("w25q256", 0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25m512jw", 0xef6119, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("w25m512jv", 0xef7119, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+#endif
+#ifdef CONFIG_SPI_FLASH_XMC
+ /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+ { INFO("XM25QH64A", 0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { INFO("XM25QH128A", 0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+#endif
+ { },
+};
diff --git a/roms/u-boot/drivers/mtd/spi/spi-nor-tiny.c b/roms/u-boot/drivers/mtd/spi/spi-nor-tiny.c
new file mode 100644
index 000000000..1d5861d55
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/spi/spi-nor-tiny.c
@@ -0,0 +1,817 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * Synced from Linux v4.19
+ */
+
+#include <common.h>
+#include <log.h>
+#include <dm/device_compat.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <spi-mem.h>
+#include <spi.h>
+
+#include "sf_internal.h"
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+
+#define HZ CONFIG_SYS_HZ
+
+#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
+
+static int spi_nor_read_write_reg(struct spi_nor *nor, struct spi_mem_op
+ *op, void *buf)
+{
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ op->data.buf.in = buf;
+ else
+ op->data.buf.out = buf;
+ return spi_mem_exec_op(nor->spi, op);
+}
+
+static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(len, NULL, 1));
+ int ret;
+
+ ret = spi_nor_read_write_reg(nor, &op, val);
+ if (ret < 0) {
+ /*
+ * spi_slave does not have a struct udevice member without DM,
+ * so use the bus and cs instead.
+ */
+#if CONFIG_IS_ENABLED(DM_SPI)
+ dev_dbg(nor->spi->dev, "error %d reading %x\n", ret,
+ code);
+#else
+ log_debug("spi%u.%u: error %d reading %x\n",
+ nor->spi->bus, nor->spi->cs, ret, code);
+#endif
+ }
+
+ return ret;
+}
+
+static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+
+ return spi_nor_read_write_reg(nor, &op, buf);
+}
+
+static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+ u_char *buf)
+{
+ struct spi_mem_op op =
+ SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+ SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+ SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+ SPI_MEM_OP_DATA_IN(len, buf, 1));
+ size_t remaining = len;
+ int ret;
+
+ /* get transfer protocols. */
+ op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+ op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+ op.dummy.buswidth = op.addr.buswidth;
+ op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+ /* convert the dummy cycles to the number of bytes */
+ op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+ while (remaining) {
+ op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
+ ret = spi_mem_adjust_op_size(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(nor->spi, &op);
+ if (ret)
+ return ret;
+
+ op.addr.val += op.data.nbytes;
+ remaining -= op.data.nbytes;
+ op.data.buf.in += op.data.nbytes;
+ }
+
+ return len;
+}
+
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occurred.
+ */
+static int read_cr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = spi_nor_read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+ if (ret < 0) {
+ dev_dbg(nor->dev, "error %d reading CR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+#endif
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static inline int write_sr(struct spi_nor *nor, u8 val)
+{
+ nor->cmd_buf[0] = val;
+ return spi_nor_write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct spi_nor *nor)
+{
+ return spi_nor_write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+}
+
+/*
+ * Send write disable instruction to the chip.
+ */
+static inline int write_disable(struct spi_nor *nor)
+{
+ return spi_nor_write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
+}
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+ return mtd->priv;
+}
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == opcode)
+ return table[i][1];
+
+ /* No conversion found, keep input op code. */
+ return opcode;
+}
+
+static inline u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+ static const u8 spi_nor_3to4_read[][2] = {
+ { SPINOR_OP_READ, SPINOR_OP_READ_4B },
+ { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
+ { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
+ { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
+ { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
+ { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
+ };
+
+ return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+ ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+ const struct flash_info *info)
+{
+ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+}
+
+/* Enable/disable 4-byte addressing mode. */
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
+ int enable)
+{
+ int status;
+ bool need_wren = false;
+ u8 cmd;
+
+ switch (JEDEC_MFR(info)) {
+ case SNOR_MFR_ST:
+ case SNOR_MFR_MICRON:
+ /* Some Micron need WREN command; all will accept it */
+ need_wren = true;
+ case SNOR_MFR_MACRONIX:
+ case SNOR_MFR_WINBOND:
+ if (need_wren)
+ write_enable(nor);
+
+ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+ status = spi_nor_write_reg(nor, cmd, NULL, 0);
+ if (need_wren)
+ write_disable(nor);
+
+ if (!status && !enable &&
+ JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ /*
+ * On Winbond W25Q256FV, leaving 4byte mode causes
+ * the Extended Address Register to be set to 1, so all
+ * 3-byte-address reads come from the second 16M.
+ * We must clear the register to enable normal behavior.
+ */
+ write_enable(nor);
+ nor->cmd_buf[0] = 0;
+ spi_nor_write_reg(nor, SPINOR_OP_WREAR,
+ nor->cmd_buf, 1);
+ write_disable(nor);
+ }
+
+ return status;
+ default:
+ /* Spansion style */
+ nor->cmd_buf[0] = enable << 7;
+ return spi_nor_write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
+ }
+}
+
+#if defined(CONFIG_SPI_FLASH_SPANSION) || \
+ defined(CONFIG_SPI_FLASH_WINBOND) || \
+ defined(CONFIG_SPI_FLASH_MACRONIX)
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = spi_nor_read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+ if (ret < 0) {
+ pr_debug("error %d reading SR\n", (int)ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
+ * Read the flag status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_fsr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = spi_nor_read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
+ if (ret < 0) {
+ pr_debug("error %d reading FSR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+static int spi_nor_sr_ready(struct spi_nor *nor)
+{
+ int sr = read_sr(nor);
+
+ if (sr < 0)
+ return sr;
+
+ return !(sr & SR_WIP);
+}
+
+static int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+ int fsr = read_fsr(nor);
+
+ if (fsr < 0)
+ return fsr;
+ return fsr & FSR_READY;
+}
+
+static int spi_nor_ready(struct spi_nor *nor)
+{
+ int sr, fsr;
+
+ sr = spi_nor_sr_ready(nor);
+ if (sr < 0)
+ return sr;
+ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+ if (fsr < 0)
+ return fsr;
+ return sr && fsr;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+ unsigned long timeout)
+{
+ unsigned long timebase;
+ int ret;
+
+ timebase = get_timer(0);
+
+ while (get_timer(timebase) < timeout) {
+ ret = spi_nor_ready(nor);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+ }
+
+ dev_err(nor->dev, "flash operation timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+ return spi_nor_wait_till_ready_with_timeout(nor,
+ DEFAULT_READY_WAIT_JIFFIES);
+}
+#endif /* CONFIG_SPI_FLASH_SPANSION */
+
+/*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return -ENOTSUPP;
+}
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+ int tmp;
+ u8 id[SPI_NOR_MAX_ID_LEN];
+ const struct flash_info *info;
+
+ tmp = spi_nor_read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+ if (tmp < 0) {
+ dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
+ return ERR_PTR(tmp);
+ }
+
+ info = spi_nor_ids;
+ for (; info->sector_size != 0; info++) {
+ if (info->id_len) {
+ if (!memcmp(info->id, id, info->id_len))
+ return info;
+ }
+ }
+ dev_dbg(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
+ id[0], id[1], id[2]);
+ return ERR_PTR(-EMEDIUMTYPE);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+ while (len) {
+ loff_t addr = from;
+
+ ret = spi_nor_read_data(nor, addr, len, buf);
+ if (ret == 0) {
+ /* We shouldn't see 0-length reads */
+ ret = -EIO;
+ goto read_err;
+ }
+ if (ret < 0)
+ goto read_err;
+
+ *retlen += ret;
+ buf += ret;
+ from += ret;
+ len -= ret;
+ }
+ ret = 0;
+
+read_err:
+ return ret;
+}
+
+/*
+ * Write an address range to the nor chip. Data must be written in
+ * FLASH_PAGESIZE chunks. The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ return -ENOTSUPP;
+}
+
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+/**
+ * macronix_quad_enable() - set QE bit in Status Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register.
+ *
+ * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ if (val < 0)
+ return val;
+ if (val & SR_QUAD_EN_MX)
+ return 0;
+
+ write_enable(nor);
+
+ write_sr(nor, val | SR_QUAD_EN_MX);
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occurred.
+ */
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
+{
+ int ret;
+
+ write_enable(nor);
+
+ ret = spi_nor_write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
+ if (ret < 0) {
+ dev_dbg(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_dbg(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2];
+ int ret;
+
+ /* Check current Quad Enable bit value. */
+ ret = read_cr(nor);
+ if (ret < 0) {
+ dev_dbg(nor->dev,
+ "error while reading configuration register\n");
+ return -EINVAL;
+ }
+
+ if (ret & CR_QUAD_EN_SPAN)
+ return 0;
+
+ sr_cr[1] = ret | CR_QUAD_EN_SPAN;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_dbg(nor->dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ /* Read back and check it. */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_dbg(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_SPI_FLASH_SPANSION */
+
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+
+ SNOR_CMD_READ_MAX
+};
+
+struct spi_nor_flash_parameter {
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+};
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+ u8 num_mode_clocks,
+ u8 num_wait_states,
+ u8 opcode,
+ enum spi_nor_protocol proto)
+{
+ read->num_mode_clocks = num_mode_clocks;
+ read->num_wait_states = num_wait_states;
+ read->opcode = opcode;
+ read->proto = proto;
+}
+
+static int spi_nor_init_params(struct spi_nor *nor,
+ const struct flash_info *info,
+ struct spi_nor_flash_parameter *params)
+{
+ /* (Fast) Read settings. */
+ params->hwcaps.mask = SNOR_HWCAPS_READ;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+ 0, 0, SPINOR_OP_READ,
+ SNOR_PROTO_1_1_1);
+
+ if (!(info->flags & SPI_NOR_NO_FR)) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+ 0, 8, SPINOR_OP_READ_FAST,
+ SNOR_PROTO_1_1_1);
+ }
+
+ if (info->flags & SPI_NOR_QUAD_READ) {
+ params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+ spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+ 0, 8, SPINOR_OP_READ_1_1_4,
+ SNOR_PROTO_1_1_4);
+ }
+
+ return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+ const struct spi_nor_flash_parameter *params,
+ u32 shared_hwcaps)
+{
+ int best_match = shared_hwcaps & SNOR_HWCAPS_READ_MASK;
+ int cmd;
+ const struct spi_nor_read_command *read;
+
+ if (best_match < 0)
+ return -EINVAL;
+
+ if (best_match & SNOR_HWCAPS_READ_1_1_4)
+ cmd = SNOR_CMD_READ_1_1_4;
+ else if (best_match & SNOR_HWCAPS_READ_FAST)
+ cmd = SNOR_CMD_READ_FAST;
+ else
+ cmd = SNOR_CMD_READ;
+
+ read = &params->reads[cmd];
+ nor->read_opcode = read->opcode;
+ nor->read_proto = read->proto;
+
+ /*
+ * In the spi-nor framework, we don't need to make the difference
+ * between mode clock cycles and wait state clock cycles.
+ * Indeed, the value of the mode clock cycles is used by a QSPI
+ * flash memory to know whether it should enter or leave its 0-4-4
+ * (Continuous Read / XIP) mode.
+ * eXecution In Place is out of the scope of the mtd sub-system.
+ * Hence we choose to merge both mode and wait state clock cycles
+ * into the so called dummy clock cycles.
+ */
+ nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+ return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+ const struct spi_nor_flash_parameter *params,
+ const struct spi_nor_hwcaps *hwcaps)
+{
+ u32 shared_mask;
+ int err;
+
+ /*
+ * Keep only the hardware capabilities supported by both the SPI
+ * controller and the SPI flash memory.
+ */
+ shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+ /* Select the (Fast) Read command. */
+ err = spi_nor_select_read(nor, params, shared_mask);
+ if (err) {
+ dev_dbg(nor->dev,
+ "can't select read settings supported by both the SPI controller and memory.\n");
+ return err;
+ }
+
+ /* Enable Quad I/O if needed. */
+ if (spi_nor_get_protocol_width(nor->read_proto) == 4) {
+ switch (JEDEC_MFR(info)) {
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+ case SNOR_MFR_MACRONIX:
+ err = macronix_quad_enable(nor);
+ break;
+#endif
+ case SNOR_MFR_ST:
+ case SNOR_MFR_MICRON:
+ break;
+
+ default:
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+ /* Kept only for backward compatibility purpose. */
+ err = spansion_read_cr_quad_enable(nor);
+#endif
+ break;
+ }
+ }
+ if (err) {
+ dev_dbg(nor->dev, "quad mode not supported\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+ if (nor->addr_width == 4 &&
+ (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+ !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+ /*
+ * If the RESET# pin isn't hooked up properly, or the system
+ * otherwise doesn't perform a reset command in the boot
+ * sequence, it's impossible to 100% protect against unexpected
+ * reboots (e.g., crashes). Warn the user (or hopefully, system
+ * designer) that this is bad.
+ */
+ if (nor->flags & SNOR_F_BROKEN_RESET)
+ printf("enabling reset hack; may not recover from unexpected reboots\n");
+ set_4byte(nor, nor->info, 1);
+ }
+
+ return 0;
+}
+
+int spi_nor_scan(struct spi_nor *nor)
+{
+ struct spi_nor_flash_parameter params;
+ const struct flash_info *info = NULL;
+ struct mtd_info *mtd = &nor->mtd;
+ struct spi_nor_hwcaps hwcaps = {
+ .mask = SNOR_HWCAPS_READ |
+ SNOR_HWCAPS_READ_FAST
+ };
+ struct spi_slave *spi = nor->spi;
+ int ret;
+
+ /* Reset SPI protocol for all commands. */
+ nor->reg_proto = SNOR_PROTO_1_1_1;
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
+ if (spi->mode & SPI_RX_QUAD)
+ hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+
+ info = spi_nor_read_id(nor);
+ if (IS_ERR_OR_NULL(info))
+ return PTR_ERR(info);
+ /* Parse the Serial Flash Discoverable Parameters table. */
+ ret = spi_nor_init_params(nor, info, &params);
+ if (ret)
+ return ret;
+
+ mtd->name = "spi-flash";
+ mtd->priv = nor;
+ mtd->type = MTD_NORFLASH;
+ mtd->writesize = 1;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->size = info->sector_size * info->n_sectors;
+ mtd->_erase = spi_nor_erase;
+ mtd->_read = spi_nor_read;
+ mtd->_write = spi_nor_write;
+
+ nor->size = mtd->size;
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+ if (info->flags & USE_CLSR)
+ nor->flags |= SNOR_F_USE_CLSR;
+
+ if (info->flags & SPI_NOR_NO_FR)
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+
+ /*
+ * Configure the SPI memory:
+ * - select op codes for (Fast) Read, Page Program and Sector Erase.
+ * - set the number of dummy cycles (mode cycles + wait states).
+ * - set the SPI protocols for register and memory accesses.
+ * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
+ */
+ ret = spi_nor_setup(nor, info, &params, &hwcaps);
+ if (ret)
+ return ret;
+
+ if (nor->addr_width) {
+ /* already configured from SFDP */
+ } else if (info->addr_width) {
+ nor->addr_width = info->addr_width;
+ } else if (mtd->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+ if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor, info);
+ } else {
+ nor->addr_width = 3;
+ }
+
+ if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+ dev_dbg(nor->dev, "address width is too large: %u\n",
+ nor->addr_width);
+ return -EINVAL;
+ }
+
+ /* Send all the required SPI flash commands to initialize device */
+ nor->info = info;
+ ret = spi_nor_init(nor);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* U-Boot specific functions, need to extend MTD to support these */
+int spi_flash_cmd_get_sw_write_prot(struct spi_nor *nor)
+{
+ return -ENOTSUPP;
+}
diff --git a/roms/u-boot/drivers/mtd/st_smi.c b/roms/u-boot/drivers/mtd/st_smi.c
new file mode 100644
index 000000000..7c652e6c5
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/st_smi.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2009
+ * Vipin Kumar, ST Microelectronics, vipin.kumar@st.com.
+ */
+
+#include <common.h>
+#include <flash.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mtd/st_smi.h>
+
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+
+#if defined(CONFIG_MTD_NOR_FLASH)
+
+static struct smi_regs *const smicntl =
+ (struct smi_regs * const)CONFIG_SYS_SMI_BASE;
+static ulong bank_base[CONFIG_SYS_MAX_FLASH_BANKS] =
+ CONFIG_SYS_FLASH_ADDR_BASE;
+flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS];
+
+/* data structure to maintain flash ids from different vendors */
+struct flash_device {
+ char *name;
+ u8 erase_cmd;
+ u32 device_id;
+ u32 pagesize;
+ unsigned long sectorsize;
+ unsigned long size_in_bytes;
+};
+
+#define FLASH_ID(n, es, id, psize, ssize, size) \
+{ \
+ .name = n, \
+ .erase_cmd = es, \
+ .device_id = id, \
+ .pagesize = psize, \
+ .sectorsize = ssize, \
+ .size_in_bytes = size \
+}
+
+/*
+ * List of supported flash devices.
+ * Currently the erase_cmd field is not used in this driver.
+ */
+static struct flash_device flash_devices[] = {
+ FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000),
+ FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000),
+ FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000),
+ FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000),
+ FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000),
+ FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000),
+ FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000),
+ FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000),
+ FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000),
+ FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000),
+ FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000),
+ FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000),
+ FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000),
+ FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000),
+ FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000),
+ FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000),
+ FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000),
+ FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000),
+ FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000),
+ FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000),
+ FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000),
+ FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000),
+ FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000),
+ FLASH_ID("wbd w25q128" , 0xd8, 0x001840EF, 0x100, 0x10000, 0x1000000),
+};
+
+/*
+ * smi_wait_xfer_finish - Wait until TFF is set in status register
+ * @timeout: timeout in milliseconds
+ *
+ * Wait until TFF is set in status register
+ */
+static int smi_wait_xfer_finish(int timeout)
+{
+ ulong start = get_timer(0);
+
+ while (get_timer(start) < timeout) {
+ if (readl(&smicntl->smi_sr) & TFF)
+ return 0;
+
+ /* Try after 10 ms */
+ udelay(10);
+ };
+
+ return -1;
+}
+
+/*
+ * smi_read_id - Read flash id
+ * @info: flash_info structure pointer
+ * @banknum: bank number
+ *
+ * Read the flash id present at bank #banknum
+ */
+static unsigned int smi_read_id(flash_info_t *info, int banknum)
+{
+ unsigned int value;
+
+ writel(readl(&smicntl->smi_cr1) | SW_MODE, &smicntl->smi_cr1);
+ writel(READ_ID, &smicntl->smi_tr);
+ writel((banknum << BANKSEL_SHIFT) | SEND | TX_LEN_1 | RX_LEN_3,
+ &smicntl->smi_cr2);
+
+ if (smi_wait_xfer_finish(XFER_FINISH_TOUT))
+ return -EIO;
+
+ value = (readl(&smicntl->smi_rr) & 0x00FFFFFF);
+
+ writel(readl(&smicntl->smi_sr) & ~TFF, &smicntl->smi_sr);
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1);
+
+ return value;
+}
+
+/*
+ * flash_get_size - Detect the SMI flash by reading the ID.
+ * @base: Base address of the flash area bank #banknum
+ * @banknum: Bank number
+ *
+ * Detect the SMI flash by reading the ID. Initializes the flash_info structure
+ * with size, sector count etc.
+ */
+static ulong flash_get_size(ulong base, int banknum)
+{
+ flash_info_t *info = &flash_info[banknum];
+ int value;
+ int i;
+
+ value = smi_read_id(info, banknum);
+
+ if (value < 0) {
+ printf("Flash id could not be read\n");
+ return 0;
+ }
+
+ /* Matches chip-id to entire list of 'serial-nor flash' ids */
+ for (i = 0; i < ARRAY_SIZE(flash_devices); i++) {
+ if (flash_devices[i].device_id == value) {
+ info->size = flash_devices[i].size_in_bytes;
+ info->flash_id = value;
+ info->start[0] = base;
+ info->sector_count =
+ info->size/flash_devices[i].sectorsize;
+
+ return info->size;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * smi_read_sr - Read status register of SMI
+ * @bank: bank number
+ *
+ * This routine will get the status register of the flash chip present at the
+ * given bank
+ */
+static int smi_read_sr(int bank)
+{
+ u32 ctrlreg1, val;
+
+ /* store the CTRL REG1 state */
+ ctrlreg1 = readl(&smicntl->smi_cr1);
+
+ /* Program SMI in HW Mode */
+ writel(readl(&smicntl->smi_cr1) & ~(SW_MODE | WB_MODE),
+ &smicntl->smi_cr1);
+
+ /* Performing a RSR instruction in HW mode */
+ writel((bank << BANKSEL_SHIFT) | RD_STATUS_REG, &smicntl->smi_cr2);
+
+ if (smi_wait_xfer_finish(XFER_FINISH_TOUT))
+ return -1;
+
+ val = readl(&smicntl->smi_sr);
+
+ /* Restore the CTRL REG1 state */
+ writel(ctrlreg1, &smicntl->smi_cr1);
+
+ return val;
+}
+
+/*
+ * smi_wait_till_ready - Wait till last operation is over.
+ * @bank: bank number shifted.
+ * @timeout: timeout in milliseconds.
+ *
+ * This routine checks for WIP(write in progress)bit in Status register(SMSR-b0)
+ * The routine checks for #timeout loops, each at interval of 1 milli-second.
+ * If successful the routine returns 0.
+ */
+static int smi_wait_till_ready(int bank, int timeout)
+{
+ int sr;
+ ulong start = get_timer(0);
+
+ /* One chip guarantees max 5 msec wait here after page writes,
+ but potentially three seconds (!) after page erase. */
+ while (get_timer(start) < timeout) {
+ sr = smi_read_sr(bank);
+ if ((sr >= 0) && (!(sr & WIP_BIT)))
+ return 0;
+
+ /* Try again after 10 usec */
+ udelay(10);
+ } while (timeout--);
+
+ printf("SMI controller is still in wait, timeout=%d\n", timeout);
+ return -EIO;
+}
+
+/*
+ * smi_write_enable - Enable the flash to do write operation
+ * @bank: bank number
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static int smi_write_enable(int bank)
+{
+ u32 ctrlreg1;
+ u32 start;
+ int timeout = WMODE_TOUT;
+ int sr;
+
+ /* Store the CTRL REG1 state */
+ ctrlreg1 = readl(&smicntl->smi_cr1);
+
+ /* Program SMI in H/W Mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1);
+
+ /* Give the Flash, Write Enable command */
+ writel((bank << BANKSEL_SHIFT) | WE, &smicntl->smi_cr2);
+
+ if (smi_wait_xfer_finish(XFER_FINISH_TOUT))
+ return -1;
+
+ /* Restore the CTRL REG1 state */
+ writel(ctrlreg1, &smicntl->smi_cr1);
+
+ start = get_timer(0);
+ while (get_timer(start) < timeout) {
+ sr = smi_read_sr(bank);
+ if ((sr >= 0) && (sr & (1 << (bank + WM_SHIFT))))
+ return 0;
+
+ /* Try again after 10 usec */
+ udelay(10);
+ };
+
+ return -1;
+}
+
+/*
+ * smi_init - SMI initialization routine
+ *
+ * SMI initialization routine. Sets SMI control register1.
+ */
+void smi_init(void)
+{
+ /* Setting the fast mode values. SMI working at 166/4 = 41.5 MHz */
+ writel(HOLD1 | FAST_MODE | BANK_EN | DSEL_TIME | PRESCAL4,
+ &smicntl->smi_cr1);
+}
+
+/*
+ * smi_sector_erase - Erase flash sector
+ * @info: flash_info structure pointer
+ * @sector: sector number
+ *
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static int smi_sector_erase(flash_info_t *info, unsigned int sector)
+{
+ int bank;
+ unsigned int sect_add;
+ unsigned int instruction;
+
+ switch (info->start[0]) {
+ case SMIBANK0_BASE:
+ bank = BANK0;
+ break;
+ case SMIBANK1_BASE:
+ bank = BANK1;
+ break;
+ case SMIBANK2_BASE:
+ bank = BANK2;
+ break;
+ case SMIBANK3_BASE:
+ bank = BANK3;
+ break;
+ default:
+ return -1;
+ }
+
+ sect_add = sector * (info->size / info->sector_count);
+ instruction = ((sect_add >> 8) & 0x0000FF00) | SECTOR_ERASE;
+
+ writel(readl(&smicntl->smi_sr) & ~(ERF1 | ERF2), &smicntl->smi_sr);
+
+ /* Wait until finished previous write command. */
+ if (smi_wait_till_ready(bank, CONFIG_SYS_FLASH_ERASE_TOUT))
+ return -EBUSY;
+
+ /* Send write enable, before erase commands. */
+ if (smi_write_enable(bank))
+ return -EIO;
+
+ /* Put SMI in SW mode */
+ writel(readl(&smicntl->smi_cr1) | SW_MODE, &smicntl->smi_cr1);
+
+ /* Send Sector Erase command in SW Mode */
+ writel(instruction, &smicntl->smi_tr);
+ writel((bank << BANKSEL_SHIFT) | SEND | TX_LEN_4,
+ &smicntl->smi_cr2);
+ if (smi_wait_xfer_finish(XFER_FINISH_TOUT))
+ return -EIO;
+
+ if (smi_wait_till_ready(bank, CONFIG_SYS_FLASH_ERASE_TOUT))
+ return -EBUSY;
+
+ /* Put SMI in HW mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE,
+ &smicntl->smi_cr1);
+
+ return 0;
+}
+
+/*
+ * smi_write - Write to SMI flash
+ * @src_addr: source buffer
+ * @dst_addr: destination buffer
+ * @length: length to write in bytes
+ * @bank: bank base address
+ *
+ * Write to SMI flash
+ */
+static int smi_write(unsigned int *src_addr, unsigned int *dst_addr,
+ unsigned int length, ulong bank_addr)
+{
+ u8 *src_addr8 = (u8 *)src_addr;
+ u8 *dst_addr8 = (u8 *)dst_addr;
+ int banknum;
+ int i;
+
+ switch (bank_addr) {
+ case SMIBANK0_BASE:
+ banknum = BANK0;
+ break;
+ case SMIBANK1_BASE:
+ banknum = BANK1;
+ break;
+ case SMIBANK2_BASE:
+ banknum = BANK2;
+ break;
+ case SMIBANK3_BASE:
+ banknum = BANK3;
+ break;
+ default:
+ return -1;
+ }
+
+ if (smi_wait_till_ready(banknum, CONFIG_SYS_FLASH_WRITE_TOUT))
+ return -EBUSY;
+
+ /* Set SMI in Hardware Mode */
+ writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1);
+
+ if (smi_write_enable(banknum))
+ return -EIO;
+
+ /* Perform the write command */
+ for (i = 0; i < length; i += 4) {
+ if (((ulong) (dst_addr) % SFLASH_PAGE_SIZE) == 0) {
+ if (smi_wait_till_ready(banknum,
+ CONFIG_SYS_FLASH_WRITE_TOUT))
+ return -EBUSY;
+
+ if (smi_write_enable(banknum))
+ return -EIO;
+ }
+
+ if (length < 4) {
+ int k;
+
+ /*
+ * Handle special case, where length < 4 (redundant env)
+ */
+ for (k = 0; k < length; k++)
+ *dst_addr8++ = *src_addr8++;
+ } else {
+ /* Normal 32bit write */
+ *dst_addr++ = *src_addr++;
+ }
+
+ if ((readl(&smicntl->smi_sr) & (ERF1 | ERF2)))
+ return -EIO;
+ }
+
+ if (smi_wait_till_ready(banknum, CONFIG_SYS_FLASH_WRITE_TOUT))
+ return -EBUSY;
+
+ writel(readl(&smicntl->smi_sr) & ~(WCF), &smicntl->smi_sr);
+
+ return 0;
+}
+
+/*
+ * write_buff - Write to SMI flash
+ * @info: flash info structure
+ * @src: source buffer
+ * @dest_addr: destination buffer
+ * @length: length to write in words
+ *
+ * Write to SMI flash
+ */
+int write_buff(flash_info_t *info, uchar *src, ulong dest_addr, ulong length)
+{
+ return smi_write((unsigned int *)src, (unsigned int *)dest_addr,
+ length, info->start[0]);
+}
+
+/*
+ * flash_init - SMI flash initialization
+ *
+ * SMI flash initialization
+ */
+unsigned long flash_init(void)
+{
+ unsigned long size = 0;
+ int i, j;
+
+ smi_init();
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ flash_info[i].flash_id = FLASH_UNKNOWN;
+ size += flash_info[i].size = flash_get_size(bank_base[i], i);
+ }
+
+ for (j = 0; j < CONFIG_SYS_MAX_FLASH_BANKS; j++) {
+ for (i = 1; i < flash_info[j].sector_count; i++)
+ flash_info[j].start[i] =
+ flash_info[j].start[i - 1] +
+ flash_info->size / flash_info->sector_count;
+
+ }
+
+ return size;
+}
+
+/*
+ * flash_print_info - Print SMI flash information
+ *
+ * Print SMI flash information
+ */
+void flash_print_info(flash_info_t *info)
+{
+ int i;
+ if (info->flash_id == FLASH_UNKNOWN) {
+ puts("missing or unknown FLASH type\n");
+ return;
+ }
+
+ if (info->size >= 0x100000)
+ printf(" Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+ else
+ printf(" Size: %ld KB in %d Sectors\n",
+ info->size >> 10, info->sector_count);
+
+ puts(" Sector Start Addresses:");
+ for (i = 0; i < info->sector_count; ++i) {
+#ifdef CONFIG_SYS_FLASH_EMPTY_INFO
+ int size;
+ int erased;
+ u32 *flash;
+
+ /*
+ * Check if whole sector is erased
+ */
+ size = (info->size) / (info->sector_count);
+ flash = (u32 *) info->start[i];
+ size = size / sizeof(int);
+
+ while ((size--) && (*flash++ == ~0))
+ ;
+
+ size++;
+ if (size)
+ erased = 0;
+ else
+ erased = 1;
+
+ if ((i % 5) == 0)
+ printf("\n");
+
+ printf(" %08lX%s%s",
+ info->start[i],
+ erased ? " E" : " ", info->protect[i] ? "RO " : " ");
+#else
+ if ((i % 5) == 0)
+ printf("\n ");
+ printf(" %08lX%s",
+ info->start[i], info->protect[i] ? " (RO) " : " ");
+#endif
+ }
+ putc('\n');
+ return;
+}
+
+/*
+ * flash_erase - Erase SMI flash
+ *
+ * Erase SMI flash
+ */
+int flash_erase(flash_info_t *info, int s_first, int s_last)
+{
+ int rcode = 0;
+ int prot = 0;
+ flash_sect_t sect;
+
+ if ((s_first < 0) || (s_first > s_last)) {
+ puts("- no sectors to erase\n");
+ return 1;
+ }
+
+ for (sect = s_first; sect <= s_last; ++sect) {
+ if (info->protect[sect])
+ prot++;
+ }
+ if (prot) {
+ printf("- Warning: %d protected sectors will not be erased!\n",
+ prot);
+ } else {
+ putc('\n');
+ }
+
+ for (sect = s_first; sect <= s_last; sect++) {
+ if (info->protect[sect] == 0) {
+ if (smi_sector_erase(info, sect))
+ rcode = 1;
+ else
+ putc('.');
+ }
+ }
+ puts(" done\n");
+ return rcode;
+}
+#endif
diff --git a/roms/u-boot/drivers/mtd/stm32_flash.c b/roms/u-boot/drivers/mtd/stm32_flash.c
new file mode 100644
index 000000000..95afa2d6b
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/stm32_flash.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2015
+ * Kamil Lulko, <kamil.lulko@gmail.com>
+ */
+
+#include <common.h>
+#include <flash.h>
+#include <asm/io.h>
+#include <asm/arch/stm32.h>
+#include "stm32_flash.h"
+
+flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS];
+
+#define STM32_FLASH ((struct stm32_flash_regs *)STM32_FLASH_CNTL_BASE)
+
+void stm32_flash_latency_cfg(int latency)
+{
+ /* 5 wait states, Prefetch enabled, D-Cache enabled, I-Cache enabled */
+ writel(FLASH_ACR_WS(latency) | FLASH_ACR_PRFTEN | FLASH_ACR_ICEN
+ | FLASH_ACR_DCEN, &STM32_FLASH->acr);
+}
+
+static void stm32_flash_lock(u8 lock)
+{
+ if (lock) {
+ setbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_LOCK);
+ } else {
+ writel(STM32_FLASH_KEY1, &STM32_FLASH->key);
+ writel(STM32_FLASH_KEY2, &STM32_FLASH->key);
+ }
+}
+
+unsigned long flash_init(void)
+{
+ unsigned long total_size = 0;
+ u8 i, j;
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ flash_info[i].flash_id = FLASH_STM32;
+ flash_info[i].sector_count = CONFIG_SYS_MAX_FLASH_SECT;
+ flash_info[i].start[0] = CONFIG_SYS_FLASH_BASE + (i << 20);
+ flash_info[i].size = sect_sz_kb[0];
+ for (j = 1; j < CONFIG_SYS_MAX_FLASH_SECT; j++) {
+ flash_info[i].start[j] = flash_info[i].start[j - 1]
+ + (sect_sz_kb[j - 1]);
+ flash_info[i].size += sect_sz_kb[j];
+ }
+ total_size += flash_info[i].size;
+ }
+
+ return total_size;
+}
+
+void flash_print_info(flash_info_t *info)
+{
+ int i;
+
+ if (info->flash_id == FLASH_UNKNOWN) {
+ printf("missing or unknown FLASH type\n");
+ return;
+ } else if (info->flash_id == FLASH_STM32) {
+ printf("stm32 Embedded Flash\n");
+ }
+
+ printf(" Size: %ld MB in %d Sectors\n",
+ info->size >> 20, info->sector_count);
+
+ printf(" Sector Start Addresses:");
+ for (i = 0; i < info->sector_count; ++i) {
+ if ((i % 5) == 0)
+ printf("\n ");
+ printf(" %08lX%s",
+ info->start[i],
+ info->protect[i] ? " (RO)" : " ");
+ }
+ printf("\n");
+ return;
+}
+
+int flash_erase(flash_info_t *info, int first, int last)
+{
+ u8 bank = 0xFF;
+ int i;
+
+ for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) {
+ if (info == &flash_info[i]) {
+ bank = i;
+ break;
+ }
+ }
+ if (bank == 0xFF)
+ return -1;
+
+ stm32_flash_lock(0);
+
+ for (i = first; i <= last; i++) {
+ while (readl(&STM32_FLASH->sr) & STM32_FLASH_SR_BSY)
+ ;
+
+ /* clear old sector number before writing a new one */
+ clrbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_SNB_MASK);
+
+ if (bank == 0) {
+ setbits_le32(&STM32_FLASH->cr,
+ (i << STM32_FLASH_CR_SNB_OFFSET));
+ } else if (bank == 1) {
+ setbits_le32(&STM32_FLASH->cr,
+ ((0x10 | i) << STM32_FLASH_CR_SNB_OFFSET));
+ } else {
+ stm32_flash_lock(1);
+ return -1;
+ }
+ setbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_SER);
+ setbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_STRT);
+
+ while (readl(&STM32_FLASH->sr) & STM32_FLASH_SR_BSY)
+ ;
+
+ clrbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_SER);
+ }
+
+ stm32_flash_lock(1);
+ return 0;
+}
+
+int write_buff(flash_info_t *info, uchar *src, ulong addr, ulong cnt)
+{
+ ulong i;
+
+ while (readl(&STM32_FLASH->sr) & STM32_FLASH_SR_BSY)
+ ;
+
+ stm32_flash_lock(0);
+
+ setbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_PG);
+ /* To make things simple use byte writes only */
+ for (i = 0; i < cnt; i++) {
+ *(uchar *)(addr + i) = src[i];
+ /* avoid re-ordering flash data write and busy status
+ * check as flash memory space attributes are generally Normal
+ */
+ mb();
+ while (readl(&STM32_FLASH->sr) & STM32_FLASH_SR_BSY)
+ ;
+ }
+ clrbits_le32(&STM32_FLASH->cr, STM32_FLASH_CR_PG);
+ stm32_flash_lock(1);
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/stm32_flash.h b/roms/u-boot/drivers/mtd/stm32_flash.h
new file mode 100644
index 000000000..8cb81ef68
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/stm32_flash.h
@@ -0,0 +1,27 @@
+struct stm32_flash_regs {
+ u32 acr;
+ u32 key;
+ u32 optkeyr;
+ u32 sr;
+ u32 cr;
+ u32 optcr;
+ u32 optcr1;
+};
+
+#define STM32_FLASH_KEY1 0x45670123
+#define STM32_FLASH_KEY2 0xCDEF89AB
+
+#define STM32_FLASH_SR_BSY (1 << 16)
+
+#define STM32_FLASH_CR_PG (1 << 0)
+#define STM32_FLASH_CR_SER (1 << 1)
+#define STM32_FLASH_CR_STRT (1 << 16)
+#define STM32_FLASH_CR_LOCK (1 << 31)
+#define STM32_FLASH_CR_SNB_OFFSET 3
+#define STM32_FLASH_CR_SNB_MASK (15 << STM32_FLASH_CR_SNB_OFFSET)
+
+/* Flash ACR: Access control register */
+#define FLASH_ACR_WS(n) n
+#define FLASH_ACR_PRFTEN (1 << 8)
+#define FLASH_ACR_ICEN (1 << 9)
+#define FLASH_ACR_DCEN (1 << 10)
diff --git a/roms/u-boot/drivers/mtd/ubi/Kconfig b/roms/u-boot/drivers/mtd/ubi/Kconfig
new file mode 100644
index 000000000..a78fd51ba
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/Kconfig
@@ -0,0 +1,106 @@
+menu "UBI support"
+
+config UBI_SILENCE_MSG
+ bool "UBI silence verbose messages"
+ default ENV_IS_IN_UBI
+ help
+ Make the verbose messages from UBI stop printing. This leaves
+ warnings and errors enabled.
+
+config MTD_UBI
+ bool "Enable UBI - Unsorted block images"
+ select RBTREE
+ select MTD_PARTITIONS
+ help
+ UBI is a software layer above MTD layer which admits of LVM-like
+ logical volumes on top of MTD devices, hides some complexities of
+ flash chips like wear and bad blocks and provides some other useful
+ capabilities. Please, consult the MTD web site for more details
+ (www.linux-mtd.infradead.org).
+
+if MTD_UBI
+
+config MTD_UBI_WL_THRESHOLD
+ int "UBI wear-leveling threshold"
+ default 4096
+ range 2 65536
+ help
+ This parameter defines the maximum difference between the highest
+ erase counter value and the lowest erase counter value of eraseblocks
+ of UBI devices. When this threshold is exceeded, UBI starts performing
+ wear leveling by means of moving data from eraseblock with low erase
+ counter to eraseblocks with high erase counter.
+
+ The default value should be OK for SLC NAND flashes, NOR flashes and
+ other flashes which have eraseblock life-cycle 100000 or more.
+ However, in case of MLC NAND flashes which typically have eraseblock
+ life-cycle less than 10000, the threshold should be lessened (e.g.,
+ to 128 or 256, although it does not have to be power of 2).
+
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
+ help
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
+
+config MTD_UBI_FASTMAP
+ bool "UBI Fastmap (Experimental feature)"
+ default n
+ help
+ Important: this feature is experimental so far and the on-flash
+ format for fastmap may change in the next kernel versions
+
+ Fastmap is a mechanism which allows attaching an UBI device
+ in nearly constant time. Instead of scanning the whole MTD device it
+ only has to locate a checkpoint (called fastmap) on the device.
+ The on-flash fastmap contains all information needed to attach
+ the device. Using fastmap makes only sense on large devices where
+ attaching by scanning takes long. UBI will not automatically install
+ a fastmap on old images, but you can set the UBI module parameter
+ fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
+ images are still usable with UBI implementations without
+ fastmap support. On typical flash devices the whole fastmap fits
+ into one PEB. UBI will reserve PEBs to hold two fastmaps.
+
+ If in doubt, say "N".
+
+config MTD_UBI_FASTMAP_AUTOCONVERT
+ int "enable UBI Fastmap autoconvert"
+ depends on MTD_UBI_FASTMAP
+ default 0
+ help
+ Set this parameter to enable fastmap automatically on images
+ without a fastmap.
+
+config MTD_UBI_FM_DEBUG
+ int "Enable UBI fastmap debug"
+ depends on MTD_UBI_FASTMAP
+ default 0
+ help
+ Enable UBI fastmap debug
+
+endif # MTD_UBI
+endmenu # "Enable UBI - Unsorted block images"
diff --git a/roms/u-boot/drivers/mtd/ubi/Makefile b/roms/u-boot/drivers/mtd/ubi/Makefile
new file mode 100644
index 000000000..30d00fbdf
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2006
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+obj-y += attach.o build.o vtbl.o vmt.o upd.o kapi.o eba.o io.o wl.o crc32.o
+obj-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
+obj-y += misc.o
+obj-y += debug.o
diff --git a/roms/u-boot/drivers/mtd/ubi/attach.c b/roms/u-boot/drivers/mtd/ubi/attach.c
new file mode 100644
index 000000000..e488caa55
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/attach.c
@@ -0,0 +1,1766 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI attaching sub-system.
+ *
+ * This sub-system is responsible for attaching MTD devices and it also
+ * implements flash media scanning.
+ *
+ * The attaching information is represented by a &struct ubi_attach_info'
+ * object. Information about volumes is represented by &struct ubi_ainf_volume
+ * objects which are kept in volume RB-tree with root at the @volumes field.
+ * The RB-tree is indexed by the volume ID.
+ *
+ * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
+ * objects are kept in per-volume RB-trees with the root at the corresponding
+ * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
+ * per-volume objects and each of these objects is the root of RB-tree of
+ * per-LEB objects.
+ *
+ * Corrupted physical eraseblocks are put to the @corr list, free physical
+ * eraseblocks are put to the @free list and the physical eraseblock to be
+ * erased are put to the @erase list.
+ *
+ * About corruptions
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
+ * whether the headers are corrupted or not. Sometimes UBI also protects the
+ * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
+ * when it moves the contents of a PEB for wear-leveling purposes.
+ *
+ * UBI tries to distinguish between 2 types of corruptions.
+ *
+ * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
+ * tries to handle them gracefully, without printing too many warnings and
+ * error messages. The idea is that we do not lose important data in these
+ * cases - we may lose only the data which were being written to the media just
+ * before the power cut happened, and the upper layers (e.g., UBIFS) are
+ * supposed to handle such data losses (e.g., by using the FS journal).
+ *
+ * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
+ * the reason is a power cut, UBI puts this PEB to the @erase list, and all
+ * PEBs in the @erase list are scheduled for erasure later.
+ *
+ * 2. Unexpected corruptions which are not caused by power cuts. During
+ * attaching, such PEBs are put to the @corr list and UBI preserves them.
+ * Obviously, this lessens the amount of available PEBs, and if at some point
+ * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
+ * about such PEBs every time the MTD device is attached.
+ *
+ * However, it is difficult to reliably distinguish between these types of
+ * corruptions and UBI's strategy is as follows (in case of attaching by
+ * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
+ * the data area does not contain all 0xFFs, and there were no bit-flips or
+ * integrity errors (e.g., ECC errors in case of NAND) while reading the data
+ * area. Otherwise UBI assumes corruption type 1. So the decision criteria
+ * are as follows.
+ * o If the data area contains only 0xFFs, there are no data, and it is safe
+ * to just erase this PEB - this is corruption type 1.
+ * o If the data area has bit-flips or data integrity errors (ECC errors on
+ * NAND), it is probably a PEB which was being erased when power cut
+ * happened, so this is corruption type 1. However, this is just a guess,
+ * which might be wrong.
+ * o Otherwise this is corruption type 2.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/random.h>
+#include <u-boot/crc.h>
+#else
+#include <div64.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+#endif
+
+#include <linux/math64.h>
+
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* Temporary variables used during scanning */
+static struct ubi_ec_hdr *ech;
+static struct ubi_vid_hdr *vidh;
+
+/**
+ * add_to_list - add physical eraseblock to a list.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @vol_id: the last used volume id for the PEB
+ * @lnum: the last used LEB number for the PEB
+ * @ec: erase counter of the physical eraseblock
+ * @to_head: if not zero, add to the head of the list
+ * @list: the list to add to
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for physical
+ * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
+ * It stores the @lnum and @vol_id alongside, which can both be
+ * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
+ * If @to_head is not zero, PEB will be added to the head of the list, which
+ * basically means it will be processed first later. E.g., we add corrupted
+ * PEBs (corrupted due to power cuts) to the head of the erase list to make
+ * sure we erase them first and get rid of corruptions ASAP. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id,
+ int lnum, int ec, int to_head, struct list_head *list)
+{
+ struct ubi_ainf_peb *aeb;
+
+ if (list == &ai->free) {
+ dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->erase) {
+ dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
+ } else if (list == &ai->alien) {
+ dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
+ ai->alien_peb_count += 1;
+ } else
+ BUG();
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->ec = ec;
+ if (to_head)
+ list_add(&aeb->u.list, list);
+ else
+ list_add_tail(&aeb->u.list, list);
+ return 0;
+}
+
+/**
+ * add_corrupted - add a corrupted physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to add
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
+ * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
+ * was presumably not caused by a power cut. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ ai->corr_peb_count += 1;
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->corr);
+ return 0;
+}
+
+/**
+ * validate_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ * @av: information about the volume this logical eraseblock belongs to
+ * @pnum: physical eraseblock number the VID header came from
+ *
+ * This function checks that data stored in @vid_hdr is consistent. Returns
+ * non-zero if an inconsistency was found and zero if not.
+ *
+ * Note, UBI does sanity check of everything it reads from the flash media.
+ * Most of the checks are done in the I/O sub-system. Here we check that the
+ * information in the VID header is consistent to the information in other VID
+ * headers of the same volume.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr,
+ const struct ubi_ainf_volume *av, int pnum)
+{
+ int vol_type = vid_hdr->vol_type;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+
+ if (av->leb_count != 0) {
+ int av_vol_type;
+
+ /*
+ * This is not the first logical eraseblock belonging to this
+ * volume. Ensure that the data in its VID header is consistent
+ * to the data in previous logical eraseblock headers.
+ */
+
+ if (vol_id != av->vol_id) {
+ ubi_err(ubi, "inconsistent vol_id");
+ goto bad;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av_vol_type = UBI_VID_STATIC;
+ else
+ av_vol_type = UBI_VID_DYNAMIC;
+
+ if (vol_type != av_vol_type) {
+ ubi_err(ubi, "inconsistent vol_type");
+ goto bad;
+ }
+
+ if (used_ebs != av->used_ebs) {
+ ubi_err(ubi, "inconsistent used_ebs");
+ goto bad;
+ }
+
+ if (data_pad != av->data_pad) {
+ ubi_err(ubi, "inconsistent data_pad");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ ubi_dump_av(av);
+ return -EINVAL;
+}
+
+/**
+ * add_volume - add volume to the attaching information.
+ * @ai: attaching information
+ * @vol_id: ID of the volume to add
+ * @pnum: physical eraseblock number
+ * @vid_hdr: volume identifier header
+ *
+ * If the volume corresponding to the @vid_hdr logical eraseblock is already
+ * present in the attaching information, this function does nothing. Otherwise
+ * it adds corresponding volume to the attaching information. Returns a pointer
+ * to the allocated "av" object in case of success and a negative error code in
+ * case of failure.
+ */
+static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
+ int vol_id, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
+
+ /* Walk the volume RB-tree to look if this volume is already present */
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ /* The volume is absent - add it */
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ return ERR_PTR(-ENOMEM);
+
+ av->highest_lnum = av->leb_count = 0;
+ av->vol_id = vol_id;
+ av->root = RB_ROOT;
+ av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ av->data_pad = be32_to_cpu(vid_hdr->data_pad);
+ av->compat = vid_hdr->compat;
+ av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
+ : UBI_STATIC_VOLUME;
+ if (vol_id > ai->highest_vol_id)
+ ai->highest_vol_id = vol_id;
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+ ai->vols_found += 1;
+ dbg_bld("added volume %d", vol_id);
+ return av;
+}
+
+/**
+ * ubi_compare_lebs - find out which logical eraseblock is newer.
+ * @ubi: UBI device description object
+ * @aeb: first logical eraseblock to compare
+ * @pnum: physical eraseblock number of the second logical eraseblock to
+ * compare
+ * @vid_hdr: volume identifier header of the second logical eraseblock
+ *
+ * This function compares 2 copies of a LEB and informs which one is newer. In
+ * case of success this function returns a positive value, in case of failure, a
+ * negative error code is returned. The success return codes use the following
+ * bits:
+ * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
+ * second PEB (described by @pnum and @vid_hdr);
+ * o bit 0 is set: the second PEB is newer;
+ * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
+ * o bit 1 is set: bit-flips were detected in the newer LEB;
+ * o bit 2 is cleared: the older LEB is not corrupted;
+ * o bit 2 is set: the older LEB is corrupted.
+ */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr)
+{
+ int len, err, second_is_newer, bitflips = 0, corrupted = 0;
+ uint32_t data_crc, crc;
+ struct ubi_vid_hdr *vh = NULL;
+ unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
+
+ if (sqnum2 == aeb->sqnum) {
+ /*
+ * This must be a really ancient UBI image which has been
+ * created before sequence numbers support has been added. At
+ * that times we used 32-bit LEB versions stored in logical
+ * eraseblocks. That was before UBI got into mainline. We do not
+ * support these images anymore. Well, those images still work,
+ * but only if no unclean reboots happened.
+ */
+ ubi_err(ubi, "unsupported on-flash UBI format");
+ return -EINVAL;
+ }
+
+ /* Obviously the LEB with lower sequence counter is older */
+ second_is_newer = (sqnum2 > aeb->sqnum);
+
+ /*
+ * Now we know which copy is newer. If the copy flag of the PEB with
+ * newer version is not set, then we just return, otherwise we have to
+ * check data CRC. For the second PEB we already have the VID header,
+ * for the first one - we'll need to re-read it from flash.
+ *
+ * Note: this may be optimized so that we wouldn't read twice.
+ */
+
+ if (second_is_newer) {
+ if (!vid_hdr->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("second PEB %d is newer, copy_flag is unset",
+ pnum);
+ return 1;
+ }
+ } else {
+ if (!aeb->copy_flag) {
+ /* It is not a copy, so it is newer */
+ dbg_bld("first PEB %d is newer, copy_flag is unset",
+ pnum);
+ return bitflips << 1;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh)
+ return -ENOMEM;
+
+ pnum = aeb->pnum;
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ bitflips = 1;
+ else {
+ ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
+ if (err > 0)
+ err = -EIO;
+
+ goto out_free_vidh;
+ }
+ }
+
+ vid_hdr = vh;
+ }
+
+ /* Read the data of the copy and check the CRC */
+
+ len = be32_to_cpu(vid_hdr->data_size);
+
+ mutex_lock(&ubi->buf_mutex);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto out_unlock;
+
+ data_crc = be32_to_cpu(vid_hdr->data_crc);
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
+ if (crc != data_crc) {
+ dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
+ pnum, crc, data_crc);
+ corrupted = 1;
+ bitflips = 0;
+ second_is_newer = !second_is_newer;
+ } else {
+ dbg_bld("PEB %d CRC is OK", pnum);
+ bitflips |= !!err;
+ }
+ mutex_unlock(&ubi->buf_mutex);
+
+ ubi_free_vid_hdr(ubi, vh);
+
+ if (second_is_newer)
+ dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
+ else
+ dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
+
+ return second_is_newer | (bitflips << 1) | (corrupted << 2);
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+out_free_vidh:
+ ubi_free_vid_hdr(ubi, vh);
+ return err;
+}
+
+/**
+ * ubi_add_to_av - add used physical eraseblock to the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @ec: erase counter
+ * @vid_hdr: the volume identifier header
+ * @bitflips: if bit-flips were detected when this physical eraseblock was read
+ *
+ * This function adds information about a used physical eraseblock to the
+ * 'used' tree of the corresponding volume. The function is rather complex
+ * because it has to handle cases when this is not the first physical
+ * eraseblock belonging to the same logical eraseblock, and the newer one has
+ * to be picked, while the older one has to be dropped. This function returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
+{
+ int err, vol_id, lnum;
+ unsigned long long sqnum;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node **p, *parent = NULL;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+ sqnum = be64_to_cpu(vid_hdr->sqnum);
+
+ dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
+ pnum, vol_id, lnum, ec, sqnum, bitflips);
+
+ av = add_volume(ai, vol_id, pnum, vid_hdr);
+ if (IS_ERR(av))
+ return PTR_ERR(av);
+
+ if (ai->max_sqnum < sqnum)
+ ai->max_sqnum = sqnum;
+
+ /*
+ * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
+ * if this is the first instance of this logical eraseblock or not.
+ */
+ p = &av->root.rb_node;
+ while (*p) {
+ int cmp_res;
+
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (lnum != aeb->lnum) {
+ if (lnum < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ continue;
+ }
+
+ /*
+ * There is already a physical eraseblock describing the same
+ * logical eraseblock present.
+ */
+
+ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
+ aeb->pnum, aeb->sqnum, aeb->ec);
+
+ /*
+ * Make sure that the logical eraseblocks have different
+ * sequence numbers. Otherwise the image is bad.
+ *
+ * However, if the sequence number is zero, we assume it must
+ * be an ancient UBI image from the era when UBI did not have
+ * sequence numbers. We still can attach these images, unless
+ * there is a need to distinguish between old and new
+ * eraseblocks, in which case we'll refuse the image in
+ * 'ubi_compare_lebs()'. In other words, we attach old clean
+ * images, but refuse attaching old images with duplicated
+ * logical eraseblocks because there was an unclean reboot.
+ */
+ if (aeb->sqnum == sqnum && sqnum != 0) {
+ ubi_err(ubi, "two LEBs with same sequence number %llu",
+ sqnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_vid_hdr(vid_hdr);
+ return -EINVAL;
+ }
+
+ /*
+ * Now we have to drop the older one and preserve the newer
+ * one.
+ */
+ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ if (cmp_res & 1) {
+ /*
+ * This logical eraseblock is newer than the one
+ * found earlier.
+ */
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ err = add_to_list(ai, aeb->pnum, aeb->vol_id,
+ aeb->lnum, aeb->ec, cmp_res & 4,
+ &ai->erase);
+ if (err)
+ return err;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = ((cmp_res & 2) || bitflips);
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum == lnum)
+ av->last_data_size =
+ be32_to_cpu(vid_hdr->data_size);
+
+ return 0;
+ } else {
+ /*
+ * This logical eraseblock is older than the one found
+ * previously.
+ */
+ return add_to_list(ai, pnum, vol_id, lnum, ec,
+ cmp_res & 4, &ai->erase);
+ }
+ }
+
+ /*
+ * We've met this logical eraseblock for the first time, add it to the
+ * attaching information.
+ */
+
+ err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
+ if (err)
+ return err;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->ec = ec;
+ aeb->pnum = pnum;
+ aeb->vol_id = vol_id;
+ aeb->lnum = lnum;
+ aeb->scrub = bitflips;
+ aeb->copy_flag = vid_hdr->copy_flag;
+ aeb->sqnum = sqnum;
+
+ if (av->highest_lnum <= lnum) {
+ av->highest_lnum = lnum;
+ av->last_data_size = be32_to_cpu(vid_hdr->data_size);
+ }
+
+ av->leb_count += 1;
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+ return 0;
+}
+
+/**
+ * ubi_find_av - find volume in the attaching information.
+ * @ai: attaching information
+ * @vol_id: the requested volume ID
+ *
+ * This function returns a pointer to the volume description or %NULL if there
+ * are no data about this volume in the attaching information.
+ */
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *p = ai->volumes.rb_node;
+
+ while (p) {
+ av = rb_entry(p, struct ubi_ainf_volume, rb);
+
+ if (vol_id == av->vol_id)
+ return av;
+
+ if (vol_id > av->vol_id)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+
+ return NULL;
+}
+
+/**
+ * ubi_remove_av - delete attaching information about a volume.
+ * @ai: attaching information
+ * @av: the volume attaching information to delete
+ */
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+
+ dbg_bld("remove attaching information about volume %d", av->vol_id);
+
+ while ((rb = rb_first(&av->root))) {
+ aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, &ai->erase);
+ }
+
+ rb_erase(&av->rb, &ai->volumes);
+ kfree(av);
+ ai->vols_found -= 1;
+}
+
+/**
+ * early_erase_peb - erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: physical eraseblock number to erase;
+ * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
+ *
+ * This function erases physical eraseblock 'pnum', and writes the erase
+ * counter header to it. This function should only be used on UBI device
+ * initialization stages, when the EBA sub-system had not been yet initialized.
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int early_erase_peb(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai, int pnum, int ec)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
+ pnum, ec);
+ return -EINVAL;
+ }
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_sync_erase(ubi, pnum, 0);
+ if (err < 0)
+ goto out_free;
+
+ err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * ubi_early_get_peb - get a free physical eraseblock.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns a free physical eraseblock. It is supposed to be
+ * called on the UBI initialization stages when the wear-leveling sub-system is
+ * not initialized yet. This function picks a physical eraseblocks from one of
+ * the lists, writes the EC header if it is needed, and removes it from the
+ * list.
+ *
+ * This function returns a pointer to the "aeb" of the found free PEB in case
+ * of success and an error code in case of failure.
+ */
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err = 0;
+ struct ubi_ainf_peb *aeb, *tmp_aeb;
+
+ if (!list_empty(&ai->free)) {
+ aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
+ list_del(&aeb->u.list);
+ dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ /*
+ * We try to erase the first physical eraseblock from the erase list
+ * and pick it if we succeed, or try to erase the next one if not. And
+ * so forth. We don't want to take care about bad eraseblocks here -
+ * they'll be handled later.
+ */
+ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
+ if (err)
+ continue;
+
+ aeb->ec += 1;
+ list_del(&aeb->u.list);
+ dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
+ return aeb;
+ }
+
+ ubi_err(ubi, "no free eraseblocks");
+ return ERR_PTR(-ENOSPC);
+}
+
+/**
+ * check_corruption - check the data area of PEB.
+ * @ubi: UBI device description object
+ * @vid_hdr: the (corrupted) VID header of this PEB
+ * @pnum: the physical eraseblock number to check
+ *
+ * This is a helper function which is used to distinguish between VID header
+ * corruptions caused by power cuts and other reasons. If the PEB contains only
+ * 0xFF bytes in the data area, the VID header is most probably corrupted
+ * because of a power cut (%0 is returned in this case). Otherwise, it was
+ * probably corrupted for some other reasons (%1 is returned in this case). A
+ * negative error code is returned if a read error occurred.
+ *
+ * If the corruption reason was a power cut, UBI can safely erase this PEB.
+ * Otherwise, it should preserve it to avoid possibly destroying important
+ * information.
+ */
+static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
+ int pnum)
+{
+ int err;
+
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf, 0x00, ubi->leb_size);
+
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start,
+ ubi->leb_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * Bit-flips or integrity errors while reading the data area.
+ * It is difficult to say for sure what type of corruption is
+ * this, but presumably a power cut happened while this PEB was
+ * erased, so it became unstable and corrupted, and should be
+ * erased.
+ */
+ err = 0;
+ goto out_unlock;
+ }
+
+ if (err)
+ goto out_unlock;
+
+ if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
+ goto out_unlock;
+
+ ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
+ ubi_dump_vid_hdr(vid_hdr);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
+ ubi_dbg_print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->peb_buf, ubi->leb_size, 1);
+ err = 1;
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+ return err;
+}
+
+/**
+ * scan_peb - scan and process UBI headers of a PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @pnum: the physical eraseblock number
+ * @vid: The volume ID of the found volume will be stored in this pointer
+ * @sqnum: The sqnum of the found volume will be stored in this pointer
+ *
+ * This function reads UBI headers of PEB @pnum, checks them, and adds
+ * information about this PEB to the corresponding list or RB-tree in the
+ * "attaching info" structure. Returns zero if the physical eraseblock was
+ * successfully handled and a negative error code in case of failure.
+ */
+static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int pnum, int *vid, unsigned long long *sqnum)
+{
+ long long uninitialized_var(ec);
+ int err, bitflips = 0, vol_id = -1, ec_err = 0;
+
+ dbg_bld("scan PEB %d", pnum);
+
+ /* Skip bad physical eraseblocks */
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0)
+ return err;
+ else if (err) {
+ ai->bad_peb_count += 1;
+ return 0;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_FF:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 0, &ai->erase);
+ case UBI_IO_FF_BITFLIPS:
+ ai->empty_peb_count += 1;
+ return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ UBI_UNKNOWN, 1, &ai->erase);
+ case UBI_IO_BAD_HDR_EBADMSG:
+ case UBI_IO_BAD_HDR:
+ /*
+ * We have to also look at the VID header, possibly it is not
+ * corrupted. Set %bitflips flag in order to make this PEB be
+ * moved and EC be re-created.
+ */
+ ec_err = err;
+ ec = UBI_UNKNOWN;
+ bitflips = 1;
+ break;
+ default:
+ ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ if (!ec_err) {
+ int image_seq;
+
+ /* Make sure UBI version is OK */
+ if (ech->version != UBI_VERSION) {
+ ubi_err(ubi, "this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ech->version);
+ return -EINVAL;
+ }
+
+ ec = be64_to_cpu(ech->ec);
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. The EC headers have 64 bits
+ * reserved, but we anyway make use of only 31 bit
+ * values, as this seems to be enough for any existing
+ * flash. Upgrade UBI and use 64-bit erase counters
+ * internally.
+ */
+ ubi_err(ubi, "erase counter overflow, max is %d",
+ UBI_MAX_ERASECOUNTER);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure that all PEBs have the same image sequence number.
+ * This allows us to detect situations when users flash UBI
+ * images incorrectly, so that the flash has the new UBI image
+ * and leftovers from the old one. This feature was added
+ * relatively recently, and the sequence number was always
+ * zero, because old UBI implementations always set it to zero.
+ * For this reasons, we do not panic if some PEBs have zero
+ * sequence number, while other PEBs have non-zero sequence
+ * number.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+ if (image_seq && ubi->image_seq != image_seq) {
+ ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
+ ubi_dump_ec_hdr(ech);
+ return -EINVAL;
+ }
+ }
+
+ /* OK, we've done with the EC header, let's look at the VID header */
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
+ if (err < 0)
+ return err;
+ switch (err) {
+ case 0:
+ break;
+ case UBI_IO_BITFLIPS:
+ bitflips = 1;
+ break;
+ case UBI_IO_BAD_HDR_EBADMSG:
+ if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
+ /*
+ * Both EC and VID headers are corrupted and were read
+ * with data integrity error, probably this is a bad
+ * PEB, bit it is not marked as bad yet. This may also
+ * be a result of power cut during erasure.
+ */
+ ai->maybe_bad_peb_count += 1;
+ case UBI_IO_BAD_HDR:
+ if (ec_err)
+ /*
+ * Both headers are corrupted. There is a possibility
+ * that this a valid UBI PEB which has corresponding
+ * LEB, but the headers are corrupted. However, it is
+ * impossible to distinguish it from a PEB which just
+ * contains garbage because of a power cut during erase
+ * operation. So we just schedule this PEB for erasure.
+ *
+ * Besides, in case of NOR flash, we deliberately
+ * corrupt both headers because NOR flash erasure is
+ * slow and can start from the end.
+ */
+ err = 0;
+ else
+ /*
+ * The EC was OK, but the VID header is corrupted. We
+ * have to check what is in the data area.
+ */
+ err = check_corruption(ubi, vidh, pnum);
+
+ if (err < 0)
+ return err;
+ else if (!err)
+ /* This corruption is caused by a power cut */
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ /* This is an unexpected corruption */
+ err = add_corrupted(ai, pnum, ec);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF_BITFLIPS:
+ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ case UBI_IO_FF:
+ if (ec_err || bitflips)
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 1, &ai->erase);
+ else
+ err = add_to_list(ai, pnum, UBI_UNKNOWN,
+ UBI_UNKNOWN, ec, 0, &ai->free);
+ if (err)
+ return err;
+ goto adjust_mean_ec;
+ default:
+ ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
+ err);
+ return -EINVAL;
+ }
+
+ vol_id = be32_to_cpu(vidh->vol_id);
+ if (vid)
+ *vid = vol_id;
+ if (sqnum)
+ *sqnum = be64_to_cpu(vidh->sqnum);
+ if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ int lnum = be32_to_cpu(vidh->lnum);
+
+ /* Unsupported internal volume */
+ switch (vidh->compat) {
+ case UBI_COMPAT_DELETE:
+ if (vol_id != UBI_FM_SB_VOLUME_ID
+ && vol_id != UBI_FM_DATA_VOLUME_ID) {
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+ }
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 1, &ai->erase);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_RO:
+ ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
+ vol_id, lnum);
+ ubi->ro_mode = 1;
+ break;
+
+ case UBI_COMPAT_PRESERVE:
+ ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
+ err = add_to_list(ai, pnum, vol_id, lnum,
+ ec, 0, &ai->alien);
+ if (err)
+ return err;
+ return 0;
+
+ case UBI_COMPAT_REJECT:
+ ubi_err(ubi, "incompatible internal volume %d:%d found",
+ vol_id, lnum);
+ return -EINVAL;
+ }
+ }
+
+ if (ec_err)
+ ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
+ pnum);
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+ if (err)
+ return err;
+
+adjust_mean_ec:
+ if (!ec_err) {
+ ai->ec_sum += ec;
+ ai->ec_count += 1;
+ if (ec > ai->max_ec)
+ ai->max_ec = ec;
+ if (ec < ai->min_ec)
+ ai->min_ec = ec;
+ }
+
+ return 0;
+}
+
+/**
+ * late_analysis - analyze the overall situation with PEB.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This is a helper function which takes a look what PEBs we have after we
+ * gather information about all of them ("ai" is compete). It decides whether
+ * the flash is empty and should be formatted of whether there are too many
+ * corrupted PEBs and we should not attach this MTD device. Returns zero if we
+ * should proceed with attaching the MTD device, and %-EINVAL if we should not.
+ */
+static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ int max_corr, peb_count;
+
+ peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
+ max_corr = peb_count / 20 ?: 8;
+
+ /*
+ * Few corrupted PEBs is not a problem and may be just a result of
+ * unclean reboots. However, many of them may indicate some problems
+ * with the flash HW or driver.
+ */
+ if (ai->corr_peb_count) {
+ ubi_err(ubi, "%d PEBs are corrupted and preserved",
+ ai->corr_peb_count);
+ pr_err("Corrupted PEBs are:");
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
+
+ /*
+ * If too many PEBs are corrupted, we refuse attaching,
+ * otherwise, only print a warning.
+ */
+ if (ai->corr_peb_count >= max_corr) {
+ ubi_err(ubi, "too many corrupted PEBs, refusing");
+ return -EINVAL;
+ }
+ }
+
+ if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
+ /*
+ * All PEBs are empty, or almost all - a couple PEBs look like
+ * they may be bad PEBs which were not marked as bad yet.
+ *
+ * This piece of code basically tries to distinguish between
+ * the following situations:
+ *
+ * 1. Flash is empty, but there are few bad PEBs, which are not
+ * marked as bad so far, and which were read with error. We
+ * want to go ahead and format this flash. While formatting,
+ * the faulty PEBs will probably be marked as bad.
+ *
+ * 2. Flash contains non-UBI data and we do not want to format
+ * it and destroy possibly important information.
+ */
+ if (ai->maybe_bad_peb_count <= 2) {
+ ai->is_empty = 1;
+ ubi_msg(ubi, "empty MTD device detected");
+ get_random_bytes(&ubi->image_seq,
+ sizeof(ubi->image_seq));
+ } else {
+ ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+/**
+ * destroy_av - free volume attaching information.
+ * @av: volume attaching information
+ * @ai: attaching information
+ *
+ * This function destroys the volume attaching information.
+ */
+static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *this = av->root.rb_node;
+
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
+ this = rb_parent(this);
+ if (this) {
+ if (this->rb_left == &aeb->u.rb)
+ this->rb_left = NULL;
+ else
+ this->rb_right = NULL;
+ }
+
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ }
+ kfree(av);
+}
+
+/**
+ * destroy_ai - destroy attaching information.
+ * @ai: attaching information
+ */
+static void destroy_ai(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb, *aeb_tmp;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb;
+
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
+
+ /* Destroy the volume RB-tree */
+ rb = ai->volumes.rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ av = rb_entry(rb, struct ubi_ainf_volume, rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &av->rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ destroy_av(ai, av);
+ }
+ }
+
+ kmem_cache_destroy(ai->aeb_slab_cache);
+
+ kfree(ai);
+}
+
+/**
+ * scan_all - scan entire MTD device.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ * @start: start scanning at this PEB
+ *
+ * This function does full scanning of an MTD device and returns complete
+ * information about it in form of a "struct ubi_attach_info" object. In case
+ * of failure, an error code is returned.
+ */
+static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int start)
+{
+ int err, pnum;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return err;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = start; pnum < ubi->peb_count; pnum++) {
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ if (err < 0)
+ goto out_vidh;
+ }
+
+ ubi_msg(ubi, "scanning is finished");
+
+ /* Calculate mean erase counter */
+ if (ai->ec_count)
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+
+ err = late_analysis(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ /*
+ * In case of unknown erase counter we use the mean erase counter
+ * value.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+ }
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ if (aeb->ec == UBI_UNKNOWN)
+ aeb->ec = ai->mean_ec;
+
+ err = self_check_ai(ubi, ai);
+ if (err)
+ goto out_vidh;
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ return 0;
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+ return err;
+}
+
+static struct ubi_attach_info *alloc_ai(void)
+{
+ struct ubi_attach_info *ai;
+
+ ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
+ if (!ai)
+ return ai;
+
+ INIT_LIST_HEAD(&ai->corr);
+ INIT_LIST_HEAD(&ai->free);
+ INIT_LIST_HEAD(&ai->erase);
+ INIT_LIST_HEAD(&ai->alien);
+ ai->volumes = RB_ROOT;
+ ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
+ sizeof(struct ubi_ainf_peb),
+ 0, 0, NULL);
+ if (!ai->aeb_slab_cache) {
+ kfree(ai);
+ ai = NULL;
+ }
+
+ return ai;
+}
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+
+/**
+ * scan_fastmap - try to find a fastmap and attach from it.
+ * @ubi: UBI device description object
+ * @ai: attach info object
+ *
+ * Returns 0 on success, negative return values indicate an internal
+ * error.
+ * UBI_NO_FASTMAP denotes that no fastmap was found.
+ * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
+ */
+static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
+{
+ int err, pnum, fm_anchor = -1;
+ unsigned long long max_sqnum = 0;
+
+ err = -ENOMEM;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ goto out;
+
+ vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vidh)
+ goto out_ech;
+
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ int vol_id = -1;
+ unsigned long long sqnum = -1;
+ cond_resched();
+
+ dbg_gen("process PEB %d", pnum);
+ err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
+ if (err < 0)
+ goto out_vidh;
+
+ if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
+ max_sqnum = sqnum;
+ fm_anchor = pnum;
+ }
+ }
+
+ ubi_free_vid_hdr(ubi, vidh);
+ kfree(ech);
+
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ destroy_ai(*ai);
+ *ai = alloc_ai();
+ if (!*ai)
+ return -ENOMEM;
+
+ return ubi_scan_fastmap(ubi, *ai, fm_anchor);
+
+out_vidh:
+ ubi_free_vid_hdr(ubi, vidh);
+out_ech:
+ kfree(ech);
+out:
+ return err;
+}
+
+#endif
+
+/**
+ * ubi_attach - attach an MTD device.
+ * @ubi: UBI device descriptor
+ * @force_scan: if set to non-zero attach by scanning
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_attach(struct ubi_device *ubi, int force_scan)
+{
+ int err;
+ struct ubi_attach_info *ai;
+
+ ai = alloc_ai();
+ if (!ai)
+ return -ENOMEM;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* On small flash devices we disable fastmap in any case. */
+ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
+ ubi->fm_disabled = 1;
+ force_scan = 1;
+ }
+
+ if (force_scan)
+ err = scan_all(ubi, ai, 0);
+ else {
+ err = scan_fast(ubi, &ai);
+ if (err > 0 || mtd_is_eccerr(err)) {
+ if (err != UBI_NO_FASTMAP) {
+ destroy_ai(ai);
+ ai = alloc_ai();
+ if (!ai)
+ return -ENOMEM;
+
+ err = scan_all(ubi, ai, 0);
+ } else {
+ err = scan_all(ubi, ai, UBI_FM_MAX_START);
+ }
+ }
+ }
+#else
+ err = scan_all(ubi, ai, 0);
+#endif
+ if (err)
+ goto out_ai;
+
+ ubi->bad_peb_count = ai->bad_peb_count;
+ ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
+ ubi->corr_peb_count = ai->corr_peb_count;
+ ubi->max_ec = ai->max_ec;
+ ubi->mean_ec = ai->mean_ec;
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
+
+ err = ubi_read_volume_table(ubi, ai);
+ if (err)
+ goto out_ai;
+
+ err = ubi_wl_init(ubi, ai);
+ if (err)
+ goto out_vtbl;
+
+ err = ubi_eba_init(ubi, ai);
+ if (err)
+ goto out_wl;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) {
+ struct ubi_attach_info *scan_ai;
+
+ scan_ai = alloc_ai();
+ if (!scan_ai) {
+ err = -ENOMEM;
+ goto out_wl;
+ }
+
+ err = scan_all(ubi, scan_ai, 0);
+ if (err) {
+ destroy_ai(scan_ai);
+ goto out_wl;
+ }
+
+ err = self_check_eba(ubi, ai, scan_ai);
+ destroy_ai(scan_ai);
+
+ if (err)
+ goto out_wl;
+ }
+#endif
+
+ destroy_ai(ai);
+ return 0;
+
+out_wl:
+ ubi_wl_close(ubi);
+out_vtbl:
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_ai:
+ destroy_ai(ai);
+ return err;
+}
+
+/**
+ * self_check_ai - check the attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero if the attaching information is all right, and a
+ * negative error code if not or if an error occurred.
+ */
+static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int pnum, err, vols_found = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *last_aeb;
+ uint8_t *buf;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ /*
+ * At first, check that attaching information is OK.
+ */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ int leb_count = 0;
+
+ cond_resched();
+
+ vols_found += 1;
+
+ if (ai->is_empty) {
+ ubi_err(ubi, "bad is_empty flag");
+ goto bad_av;
+ }
+
+ if (av->vol_id < 0 || av->highest_lnum < 0 ||
+ av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
+ av->data_pad < 0 || av->last_data_size < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad_av;
+ }
+
+ if (av->vol_id >= UBI_MAX_VOLUMES &&
+ av->vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "bad vol_id");
+ goto bad_av;
+ }
+
+ if (av->vol_id > ai->highest_vol_id) {
+ ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
+ ai->highest_vol_id, av->vol_id);
+ goto out;
+ }
+
+ if (av->vol_type != UBI_DYNAMIC_VOLUME &&
+ av->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad_av;
+ }
+
+ if (av->data_pad > ubi->leb_size / 2) {
+ ubi_err(ubi, "bad data_pad");
+ goto bad_av;
+ }
+
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ last_aeb = aeb;
+ leb_count += 1;
+
+ if (aeb->pnum < 0 || aeb->ec < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad_aeb;
+ }
+
+ if (aeb->ec < ai->min_ec) {
+ ubi_err(ubi, "bad ai->min_ec (%d), %d found",
+ ai->min_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->ec > ai->max_ec) {
+ ubi_err(ubi, "bad ai->max_ec (%d), %d found",
+ ai->max_ec, aeb->ec);
+ goto bad_aeb;
+ }
+
+ if (aeb->pnum >= ubi->peb_count) {
+ ubi_err(ubi, "too high PEB number %d, total PEBs %d",
+ aeb->pnum, ubi->peb_count);
+ goto bad_aeb;
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME) {
+ if (aeb->lnum >= av->used_ebs) {
+ ubi_err(ubi, "bad lnum or used_ebs");
+ goto bad_aeb;
+ }
+ } else {
+ if (av->used_ebs != 0) {
+ ubi_err(ubi, "non-zero used_ebs");
+ goto bad_aeb;
+ }
+ }
+
+ if (aeb->lnum > av->highest_lnum) {
+ ubi_err(ubi, "incorrect highest_lnum or lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (av->leb_count != leb_count) {
+ ubi_err(ubi, "bad leb_count, %d objects in the tree",
+ leb_count);
+ goto bad_av;
+ }
+
+ if (!last_aeb)
+ continue;
+
+ aeb = last_aeb;
+
+ if (aeb->lnum != av->highest_lnum) {
+ ubi_err(ubi, "bad highest_lnum");
+ goto bad_aeb;
+ }
+ }
+
+ if (vols_found != ai->vols_found) {
+ ubi_err(ubi, "bad ai->vols_found %d, should be %d",
+ ai->vols_found, vols_found);
+ goto out;
+ }
+
+ /* Check that attaching information is correct */
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ last_aeb = NULL;
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ int vol_type;
+
+ cond_resched();
+
+ last_aeb = aeb;
+
+ err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "VID header is not OK (%d)",
+ err);
+ if (err > 0)
+ err = -EIO;
+ return err;
+ }
+
+ vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ if (av->vol_type != vol_type) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
+ ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
+ ubi_err(ubi, "bad vol_id %d", av->vol_id);
+ goto bad_vid_hdr;
+ }
+
+ if (av->compat != vidh->compat) {
+ ubi_err(ubi, "bad compat %d", vidh->compat);
+ goto bad_vid_hdr;
+ }
+
+ if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err(ubi, "bad lnum %d", aeb->lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
+ ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
+ goto bad_vid_hdr;
+ }
+
+ if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
+ ubi_err(ubi, "bad data_pad %d", av->data_pad);
+ goto bad_vid_hdr;
+ }
+ }
+
+ if (!last_aeb)
+ continue;
+
+ if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
+ ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
+ goto bad_vid_hdr;
+ }
+
+ if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
+ ubi_err(ubi, "bad last_data_size %d",
+ av->last_data_size);
+ goto bad_vid_hdr;
+ }
+ }
+
+ /*
+ * Make sure that all the physical eraseblocks are in one of the lists
+ * or trees.
+ */
+ buf = kzalloc(ubi->peb_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ err = ubi_io_is_bad(ubi, pnum);
+ if (err < 0) {
+ kfree(buf);
+ return err;
+ } else if (err)
+ buf[pnum] = 1;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->corr, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ buf[aeb->pnum] = 1;
+
+ list_for_each_entry(aeb, &ai->alien, u.list)
+ buf[aeb->pnum] = 1;
+
+ err = 0;
+ for (pnum = 0; pnum < ubi->peb_count; pnum++)
+ if (!buf[pnum]) {
+ ubi_err(ubi, "PEB %d is not referred", pnum);
+ err = 1;
+ }
+
+ kfree(buf);
+ if (err)
+ goto out;
+ return 0;
+
+bad_aeb:
+ ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
+ ubi_dump_aeb(aeb, 0);
+ ubi_dump_av(av);
+ goto out;
+
+bad_av:
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ goto out;
+
+bad_vid_hdr:
+ ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
+ ubi_dump_av(av);
+ ubi_dump_vid_hdr(vidh);
+
+out:
+ dump_stack();
+ return -EINVAL;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/build.c b/roms/u-boot/drivers/mtd/ubi/build.c
new file mode 100644
index 000000000..61e38ba1a
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/build.c
@@ -0,0 +1,1554 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём),
+ * Frank Haverkamp
+ */
+
+/*
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/namei.h>
+#include <linux/stat.h>
+#include <linux/miscdevice.h>
+#include <linux/log2.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/major.h>
+#else
+#include <linux/bug.h>
+#include <linux/log2.h>
+#endif
+#include <linux/err.h>
+#include <ubi_uboot.h>
+#include <linux/mtd/partitions.h>
+
+#include "ubi.h"
+
+/* Maximum length of the 'mtd=' parameter */
+#define MTD_PARAM_LEN_MAX 64
+
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 4
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
+#ifdef CONFIG_MTD_UBI_MODULE
+#define ubi_is_module() 1
+#else
+#define ubi_is_module() 0
+#endif
+
+#if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
+#error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
+#endif
+
+/**
+ * struct mtd_dev_param - MTD device parameter description data structure.
+ * @name: MTD character device node path, MTD device name, or MTD device number
+ * string
+ * @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
+ */
+struct mtd_dev_param {
+ char name[MTD_PARAM_LEN_MAX];
+ int ubi_num;
+ int vid_hdr_offs;
+ int max_beb_per1024;
+};
+
+/* Numbers of elements set in the @mtd_dev_param array */
+static int __initdata mtd_devs;
+
+/* MTD devices specification parameters */
+static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
+#ifndef __UBOOT__
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/* UBI module parameter to enable fastmap automatically on non-fastmap images */
+static bool fm_autoconvert;
+static bool fm_debug;
+#endif
+#else
+#ifdef CONFIG_MTD_UBI_FASTMAP
+#if !defined(CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT)
+#define CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT 0
+#endif
+static bool fm_autoconvert = CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT;
+#if !defined(CONFIG_MTD_UBI_FM_DEBUG)
+#define CONFIG_MTD_UBI_FM_DEBUG 0
+#endif
+static bool fm_debug = CONFIG_MTD_UBI_FM_DEBUG;
+#endif
+#endif
+
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+#ifndef __UBOOT__
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ubi_ctrl",
+ .fops = &ubi_ctrl_cdev_operations,
+};
+#endif
+
+/* All UBI devices in system */
+#ifndef __UBOOT__
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+#else
+struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+#endif
+
+#ifndef __UBOOT__
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
+
+/* "Show" method for files in '/<sysfs>/class/ubi/' */
+static ssize_t ubi_version_show(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", UBI_VERSION);
+}
+
+/* UBI version attribute ('/<sysfs>/class/ubi/version') */
+static struct class_attribute ubi_class_attrs[] = {
+ __ATTR(version, S_IRUGO, ubi_version_show, NULL),
+ __ATTR_NULL
+};
+
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class ubi_class = {
+ .name = UBI_NAME_STR,
+ .owner = THIS_MODULE,
+ .class_attrs = ubi_class_attrs,
+};
+
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
+static struct device_attribute dev_eraseblock_size =
+ __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_avail_eraseblocks =
+ __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_total_eraseblocks =
+ __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_volumes_count =
+ __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_ec =
+ __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_reserved_for_bad =
+ __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bad_peb_count =
+ __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_max_vol_count =
+ __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_min_io_size =
+ __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_bgt_enabled =
+ __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+ __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+#endif
+
+/**
+ * ubi_volume_notify - send a volume change notification.
+ * @ubi: UBI device description object
+ * @vol: volume description object of the changed volume
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ *
+ * This is a helper function which notifies all subscribers about a volume
+ * change event (creation, removal, re-sizing, re-naming, updating). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
+{
+ int ret;
+ struct ubi_notification nt;
+
+ ubi_do_get_device_info(ubi, &nt.di);
+ ubi_do_get_volume_info(ubi, vol, &nt.vi);
+
+ switch (ntype) {
+ case UBI_VOLUME_ADDED:
+ case UBI_VOLUME_REMOVED:
+ case UBI_VOLUME_RESIZED:
+ case UBI_VOLUME_RENAMED:
+ ret = ubi_update_fastmap(ubi);
+ if (ret)
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ }
+
+ return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
+}
+
+/**
+ * ubi_notify_all - send a notification to all volumes.
+ * @ubi: UBI device description object
+ * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
+ * @nb: the notifier to call
+ *
+ * This function walks all volumes of UBI device @ubi and sends the @ntype
+ * notification for each volume. If @nb is %NULL, then all registered notifiers
+ * are called, otherwise only the @nb notifier is called. Returns the number of
+ * sent notifications.
+ */
+int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
+{
+ struct ubi_notification nt;
+ int i, count = 0;
+#ifndef __UBOOT__
+ int ret;
+#endif
+
+ ubi_do_get_device_info(ubi, &nt.di);
+
+ mutex_lock(&ubi->device_mutex);
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ /*
+ * Since the @ubi->device is locked, and we are not going to
+ * change @ubi->volumes, we do not have to lock
+ * @ubi->volumes_lock.
+ */
+ if (!ubi->volumes[i])
+ continue;
+
+ ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
+#ifndef __UBOOT__
+ if (nb)
+ nb->notifier_call(nb, ntype, &nt);
+ else
+ ret = blocking_notifier_call_chain(&ubi_notifiers, ntype,
+ &nt);
+#endif
+ count += 1;
+ }
+ mutex_unlock(&ubi->device_mutex);
+
+ return count;
+}
+
+/**
+ * ubi_enumerate_volumes - send "add" notification for all existing volumes.
+ * @nb: the notifier to call
+ *
+ * This function walks all UBI devices and volumes and sends the
+ * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
+ * registered notifiers are called, otherwise only the @nb notifier is called.
+ * Returns the number of sent notifications.
+ */
+int ubi_enumerate_volumes(struct notifier_block *nb)
+{
+ int i, count = 0;
+
+ /*
+ * Since the @ubi_devices_mutex is locked, and we are not going to
+ * change @ubi_devices, we do not have to lock @ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (!ubi)
+ continue;
+ count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
+ }
+
+ return count;
+}
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (ubi) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+ spin_lock(&ubi_devices_lock);
+ ubi->ref_count -= 1;
+ put_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device by character device major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+ int i;
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+ return ubi;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+ int i, ubi_num = -ENODEV;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_num = ubi->ubi_num;
+ break;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi_num;
+}
+
+#ifndef __UBOOT__
+/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
+static ssize_t dev_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct ubi_device *ubi;
+
+ /*
+ * The below code looks weird, but it actually makes sense. We get the
+ * UBI device reference from the contained 'struct ubi_device'. But it
+ * is unclear if the device was removed or not yet. Indeed, if the
+ * device was removed before we increased its reference count,
+ * 'ubi_get_device()' will return -ENODEV and we fail.
+ *
+ * Remember, 'struct ubi_device' is freed in the release function, so
+ * we still can use 'ubi->ubi_num'.
+ */
+ ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ if (attr == &dev_eraseblock_size)
+ ret = sprintf(buf, "%d\n", ubi->leb_size);
+ else if (attr == &dev_avail_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->avail_pebs);
+ else if (attr == &dev_total_eraseblocks)
+ ret = sprintf(buf, "%d\n", ubi->good_peb_count);
+ else if (attr == &dev_volumes_count)
+ ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
+ else if (attr == &dev_max_ec)
+ ret = sprintf(buf, "%d\n", ubi->max_ec);
+ else if (attr == &dev_reserved_for_bad)
+ ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+ else if (attr == &dev_bad_peb_count)
+ ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
+ else if (attr == &dev_max_vol_count)
+ ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
+ else if (attr == &dev_min_io_size)
+ ret = sprintf(buf, "%d\n", ubi->min_io_size);
+ else if (attr == &dev_bgt_enabled)
+ ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+ else if (attr == &dev_mtd_num)
+ ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else
+ ret = -EINVAL;
+
+ ubi_put_device(ubi);
+ return ret;
+}
+
+static struct attribute *ubi_dev_attrs[] = {
+ &dev_eraseblock_size.attr,
+ &dev_avail_eraseblocks.attr,
+ &dev_total_eraseblocks.attr,
+ &dev_volumes_count.attr,
+ &dev_max_ec.attr,
+ &dev_reserved_for_bad.attr,
+ &dev_bad_peb_count.attr,
+ &dev_max_vol_count.attr,
+ &dev_min_io_size.attr,
+ &dev_bgt_enabled.attr,
+ &dev_mtd_num.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(ubi_dev);
+
+static void dev_release(struct device *dev)
+{
+ struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
+
+ kfree(ubi);
+}
+
+/**
+ * ubi_sysfs_init - initialize sysfs for an UBI device.
+ * @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
+{
+ int err;
+
+ ubi->dev.release = dev_release;
+ ubi->dev.devt = ubi->cdev.dev;
+ ubi->dev.class = &ubi_class;
+ ubi->dev.groups = ubi_dev_groups;
+ dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
+ err = device_register(&ubi->dev);
+ if (err)
+ return err;
+
+ *ref = 1;
+ return 0;
+}
+
+/**
+ * ubi_sysfs_close - close sysfs for an UBI device.
+ * @ubi: UBI device description object
+ */
+static void ubi_sysfs_close(struct ubi_device *ubi)
+{
+ device_unregister(&ubi->dev);
+}
+#endif
+
+/**
+ * kill_volumes - destroy all user volumes.
+ * @ubi: UBI device description object
+ */
+static void kill_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i])
+ ubi_free_volume(ubi, ubi->volumes[i]);
+}
+
+/**
+ * uif_init - initialize user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken, otherwise set to %0
+ *
+ * This function initializes various user interfaces for an UBI device. If the
+ * initialization fails at an early stage, this function frees all the
+ * resources it allocated, returns an error, and @ref is set to %0. However,
+ * if the initialization fails after the UBI device was registered in the
+ * driver core subsystem, this function takes a reference to @ubi->dev, because
+ * otherwise the release function ('dev_release()') would free whole @ubi
+ * object. The @ref argument is set to %1 in this case. The caller has to put
+ * this reference.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int uif_init(struct ubi_device *ubi, int *ref)
+{
+ int i, err;
+#ifndef __UBOOT__
+ dev_t dev;
+#endif
+
+ *ref = 0;
+ sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
+
+ /*
+ * Major numbers for the UBI character devices are allocated
+ * dynamically. Major numbers of volume character devices are
+ * equivalent to ones of the corresponding UBI character device. Minor
+ * numbers of UBI character devices are 0, while minor numbers of
+ * volume character devices start from 1. Thus, we allocate one major
+ * number and ubi->vtbl_slots + 1 minor numbers.
+ */
+ err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
+ if (err) {
+ ubi_err(ubi, "cannot register UBI character devices");
+ return err;
+ }
+
+ ubi_assert(MINOR(dev) == 0);
+ cdev_init(&ubi->cdev, &ubi_cdev_operations);
+ dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
+ ubi->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&ubi->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device");
+ goto out_unreg;
+ }
+
+ err = ubi_sysfs_init(ubi, ref);
+ if (err)
+ goto out_sysfs;
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i]) {
+ err = ubi_add_volume(ubi, ubi->volumes[i]);
+ if (err) {
+ ubi_err(ubi, "cannot add volume %d", i);
+ goto out_volumes;
+ }
+ }
+
+ return 0;
+
+out_volumes:
+ kill_volumes(ubi);
+out_sysfs:
+ if (*ref)
+ get_device(&ubi->dev);
+ ubi_sysfs_close(ubi);
+ cdev_del(&ubi->cdev);
+out_unreg:
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+ ubi_err(ubi, "cannot initialize UBI %s, error %d",
+ ubi->ubi_name, err);
+ return err;
+}
+
+/**
+ * uif_close - close user interfaces for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * Note, since this function un-registers UBI volume device objects (@vol->dev),
+ * the memory allocated voe the volumes is freed as well (in the release
+ * function).
+ */
+static void uif_close(struct ubi_device *ubi)
+{
+ kill_volumes(ubi);
+ ubi_sysfs_close(ubi);
+ cdev_del(&ubi->cdev);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+}
+
+/**
+ * ubi_free_internal_volumes - free internal volumes.
+ * @ubi: UBI device description object
+ */
+void ubi_free_internal_volumes(struct ubi_device *ubi)
+{
+ int i;
+
+ for (i = ubi->vtbl_slots;
+ i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ kfree(ubi->volumes[i]->eba_tbl);
+ kfree(ubi->volumes[i]);
+ }
+}
+
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+ int limit, device_pebs;
+ uint64_t device_size;
+
+ if (!max_beb_per1024)
+ return 0;
+
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
+
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
+
+ return limit;
+}
+
+/**
+ * io_init - initialize I/O sub-system for a given UBI device.
+ * @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ *
+ * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
+ * assumed:
+ * o EC header is always at offset zero - this cannot be changed;
+ * o VID header starts just after the EC header at the closest address
+ * aligned to @io->hdrs_min_io_size;
+ * o data starts just after the VID header at the closest address aligned to
+ * @io->min_io_size
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
+{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
+ if (ubi->mtd->numeraseregions != 0) {
+ /*
+ * Some flashes have several erase regions. Different regions
+ * may have different eraseblock size and other
+ * characteristics. It looks like mostly multi-region flashes
+ * have one "main" region and one or more small regions to
+ * store boot loader code or boot parameters or whatever. I
+ * guess we should just pick the largest region. But this is
+ * not implemented.
+ */
+ ubi_err(ubi, "multiple regions, not implemented");
+ return -EINVAL;
+ }
+
+ if (ubi->vid_hdr_offset < 0)
+ return -EINVAL;
+
+ /*
+ * Note, in this implementation we support MTD devices with 0x7FFFFFFF
+ * physical eraseblocks maximum.
+ */
+
+ ubi->peb_size = ubi->mtd->erasesize;
+ ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
+ ubi->flash_size = ubi->mtd->size;
+
+ if (mtd_can_have_bb(ubi->mtd)) {
+ ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
+
+ if (ubi->mtd->type == MTD_NORFLASH) {
+ ubi_assert(ubi->mtd->writesize == 1);
+ ubi->nor_flash = 1;
+ }
+
+ ubi->min_io_size = ubi->mtd->writesize;
+ ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
+
+ /*
+ * Make sure minimal I/O unit is power of 2. Note, there is no
+ * fundamental reason for this assumption. It is just an optimization
+ * which allows us to avoid costly division operations.
+ */
+ if (!is_power_of_2(ubi->min_io_size)) {
+ ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
+ ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ ubi_assert(ubi->hdrs_min_io_size > 0);
+ ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
+ ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
+
+ ubi->max_write_size = ubi->mtd->writebufsize;
+ /*
+ * Maximum write size has to be greater or equivalent to min. I/O
+ * size, and be multiple of min. I/O size.
+ */
+ if (ubi->max_write_size < ubi->min_io_size ||
+ ubi->max_write_size % ubi->min_io_size ||
+ !is_power_of_2(ubi->max_write_size)) {
+ ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
+ ubi->max_write_size, ubi->min_io_size);
+ return -EINVAL;
+ }
+
+ /* Calculate default aligned sizes of EC and VID headers */
+ ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
+ ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
+
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+
+ if (ubi->vid_hdr_offset == 0)
+ /* Default offset */
+ ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
+ ubi->ec_hdr_alsize;
+ else {
+ ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
+ ~(ubi->hdrs_min_io_size - 1);
+ ubi->vid_hdr_shift = ubi->vid_hdr_offset -
+ ubi->vid_hdr_aloffset;
+ }
+
+ /* Similar for the data offset */
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
+ ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
+
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
+
+ /* The shift must be aligned to 32-bit boundary */
+ if (ubi->vid_hdr_shift % 4) {
+ ubi_err(ubi, "unaligned VID header shift %d",
+ ubi->vid_hdr_shift);
+ return -EINVAL;
+ }
+
+ /* Check sanity */
+ if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
+ ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
+ ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
+ ubi->leb_start & (ubi->min_io_size - 1)) {
+ ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
+ ubi->vid_hdr_offset, ubi->leb_start);
+ return -EINVAL;
+ }
+
+ /*
+ * Set maximum amount of physical erroneous eraseblocks to be 10%.
+ * Erroneous PEB are those which have read errors.
+ */
+ ubi->max_erroneous = ubi->peb_count / 10;
+ if (ubi->max_erroneous < 16)
+ ubi->max_erroneous = 16;
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
+
+ /*
+ * It may happen that EC and VID headers are situated in one minimal
+ * I/O unit. In this case we can only accept this UBI image in
+ * read-only mode.
+ */
+ if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
+ ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
+ ubi->ro_mode = 1;
+ }
+
+ ubi->leb_size = ubi->peb_size - ubi->leb_start;
+
+ if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
+ ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
+ ubi->ro_mode = 1;
+ }
+
+ /*
+ * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
+ * unfortunately, MTD does not provide this information. We should loop
+ * over all physical eraseblocks and invoke mtd->block_is_bad() for
+ * each physical eraseblock. So, we leave @ubi->bad_peb_count
+ * uninitialized so far.
+ */
+
+ return 0;
+}
+
+/**
+ * autoresize - re-size the volume which has the "auto-resize" flag set.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to re-size
+ *
+ * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
+ * the volume table to the largest possible size. See comments in ubi-header.h
+ * for more description of the flag. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+static int autoresize(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_volume_desc desc;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
+
+ if (ubi->ro_mode) {
+ ubi_warn(ubi, "skip auto-resize because of R/O mode");
+ return 0;
+ }
+
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propagate this change
+ * to the flash.
+ */
+ ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
+
+ if (ubi->avail_pebs == 0) {
+ struct ubi_vtbl_record vtbl_rec;
+
+ /*
+ * No available PEBs to re-size the volume, clear the flag on
+ * flash and exit.
+ */
+ vtbl_rec = ubi->vtbl[vol_id];
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
+ vol_id);
+ } else {
+ desc.vol = vol;
+ err = ubi_resize_volume(&desc,
+ old_reserved_pebs + ubi->avail_pebs);
+ if (err)
+ ubi_err(ubi, "cannot auto-resize volume %d",
+ vol_id);
+ }
+
+ if (err)
+ return err;
+
+ ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
+ vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
+ return 0;
+}
+
+/**
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
+ * @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
+ *
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device number and assigns it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
+{
+ struct ubi_device *ubi;
+ int i, err, ref = 0;
+
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
+ /*
+ * Check if we already have the same MTD device attached.
+ *
+ * Note, this function assumes that UBI devices creations and deletions
+ * are serialized, so it does not take the &ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && mtd->index == ubi->mtd->index) {
+ ubi_err(ubi, "mtd%d is already attached to ubi%d",
+ mtd->index, i);
+ return -EEXIST;
+ }
+ }
+
+ /*
+ * Make sure this MTD device is not emulated on top of an UBI volume
+ * already. Well, generally this recursion works fine, but there are
+ * different problems like the UBI module takes a reference to itself
+ * by attaching (and thus, opening) the emulated MTD device. This
+ * results in inability to unload the module. And in general it makes
+ * no sense to attach emulated MTD devices, so we prohibit this.
+ */
+ if (mtd->type == MTD_UBIVOLUME) {
+ ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
+ return -EINVAL;
+ }
+
+ if (ubi_num == UBI_DEV_NUM_AUTO) {
+ /* Search for an empty slot in the @ubi_devices array */
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+ if (!ubi_devices[ubi_num])
+ break;
+ if (ubi_num == UBI_MAX_DEVICES) {
+ ubi_err(ubi, "only %d UBI devices may be created",
+ UBI_MAX_DEVICES);
+ return -ENFILE;
+ }
+ } else {
+ if (ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Make sure ubi_num is not busy */
+ if (ubi_devices[ubi_num]) {
+ ubi_err(ubi, "already exists");
+ return -EEXIST;
+ }
+ }
+
+ ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+ if (!ubi)
+ return -ENOMEM;
+
+ ubi->mtd = mtd;
+ ubi->ubi_num = ubi_num;
+ ubi->vid_hdr_offset = vid_hdr_offset;
+ ubi->autoresize_vol_id = -1;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_pool.used = ubi->fm_pool.size = 0;
+ ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
+
+ /*
+ * fm_pool.max_size is 5% of the total number of PEBs but it's also
+ * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
+ */
+ ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
+ ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
+ ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
+ UBI_FM_MIN_POOL_SIZE);
+
+ ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
+ ubi->fm_disabled = !fm_autoconvert;
+ if (fm_debug)
+ ubi_enable_dbg_chk_fastmap(ubi);
+
+ if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
+ <= UBI_FM_MAX_START) {
+ ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
+ UBI_FM_MAX_START);
+ ubi->fm_disabled = 1;
+ }
+
+ ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "default fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
+#else
+ ubi->fm_disabled = 1;
+#endif
+ mutex_init(&ubi->buf_mutex);
+ mutex_init(&ubi->ckvol_mutex);
+ mutex_init(&ubi->device_mutex);
+ spin_lock_init(&ubi->volumes_lock);
+ init_rwsem(&ubi->fm_protect);
+ init_rwsem(&ubi->fm_eba_sem);
+
+ ubi_msg(ubi, "attaching mtd%d", mtd->index);
+
+ err = io_init(ubi, max_beb_per1024);
+ if (err)
+ goto out_free;
+
+ err = -ENOMEM;
+ ubi->peb_buf = vmalloc(ubi->peb_size);
+ if (!ubi->peb_buf)
+ goto out_free;
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_buf = vzalloc(ubi->fm_size);
+ if (!ubi->fm_buf)
+ goto out_free;
+#endif
+ err = ubi_attach(ubi, 0);
+ if (err) {
+ ubi_err(ubi, "failed to attach mtd%d, error %d",
+ mtd->index, err);
+ goto out_free;
+ }
+
+ if (ubi->autoresize_vol_id != -1) {
+ err = autoresize(ubi, ubi->autoresize_vol_id);
+ if (err)
+ goto out_detach;
+ }
+
+ err = uif_init(ubi, &ref);
+ if (err)
+ goto out_detach;
+
+ err = ubi_debugfs_init_dev(ubi);
+ if (err)
+ goto out_uif;
+
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
+ if (IS_ERR(ubi->bgt_thread)) {
+ err = PTR_ERR(ubi->bgt_thread);
+ ubi_err(ubi, "cannot spawn \"%s\", error %d",
+ ubi->bgt_name, err);
+ goto out_debugfs;
+ }
+
+ ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
+ mtd->index, mtd->name, ubi->flash_size >> 20);
+ ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
+
+ /*
+ * The below lock makes sure we do not race with 'ubi_thread()' which
+ * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
+ */
+ spin_lock(&ubi->wl_lock);
+ ubi->thread_enabled = 1;
+#ifndef __UBOOT__
+ wake_up_process(ubi->bgt_thread);
+#else
+ ubi_do_worker(ubi);
+#endif
+
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_devices[ubi_num] = ubi;
+ ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
+ return ubi_num;
+
+out_debugfs:
+ ubi_debugfs_exit_dev(ubi);
+out_uif:
+ get_device(&ubi->dev);
+ ubi_assert(ref);
+ uif_close(ubi);
+out_detach:
+ ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+out_free:
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ if (ref)
+ put_device(&ubi->dev);
+ else
+ kfree(ubi);
+ return err;
+}
+
+/**
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -EINVAL;
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
+ ubi->ref_count -= 1;
+ if (ubi->ref_count) {
+ if (!anyway) {
+ spin_unlock(&ubi_devices_lock);
+ return -EBUSY;
+ }
+ /* This may only happen if there is a bug */
+ ubi_err(ubi, "%s reference count %d, destroy anyway",
+ ubi->ubi_name, ubi->ref_count);
+ }
+ ubi_devices[ubi_num] = NULL;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_assert(ubi_num == ubi->ubi_num);
+ ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
+ ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* If we don't write a new fastmap at detach time we lose all
+ * EC updates that have been made since the last written fastmap.
+ * In case of fastmap debugging we omit the update to simulate an
+ * unclean shutdown. */
+ if (!ubi_dbg_chk_fastmap(ubi))
+ ubi_update_fastmap(ubi);
+#endif
+ /*
+ * Before freeing anything, we have to stop the background thread to
+ * prevent it from doing anything on this device while we are freeing.
+ */
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
+ /*
+ * Get a reference to the device in order to prevent 'dev_release()'
+ * from freeing the @ubi object.
+ */
+ get_device(&ubi->dev);
+
+ ubi_debugfs_exit_dev(ubi);
+ uif_close(ubi);
+
+ ubi_wl_close(ubi);
+ ubi_free_internal_volumes(ubi);
+ vfree(ubi->vtbl);
+ put_mtd_device(ubi->mtd);
+ vfree(ubi->peb_buf);
+ vfree(ubi->fm_buf);
+ ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
+ put_device(&ubi->dev);
+ return 0;
+}
+
+#ifndef __UBOOT__
+/**
+ * open_mtd_by_chdev - open an MTD device by its character device node path.
+ * @mtd_dev: MTD character device node path
+ *
+ * This helper function opens an MTD device by its character node device path.
+ * Returns MTD device description object in case of success and a negative
+ * error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
+{
+ int err, major, minor, mode;
+ struct path path;
+
+ /* Probably this is an MTD character device node path */
+ err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+ if (err)
+ return ERR_PTR(err);
+
+ /* MTD device number is defined by the major / minor numbers */
+ major = imajor(d_backing_inode(path.dentry));
+ minor = iminor(d_backing_inode(path.dentry));
+ mode = d_backing_inode(path.dentry)->i_mode;
+ path_put(&path);
+ if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
+ return ERR_PTR(-EINVAL);
+
+ if (minor & 1)
+ /*
+ * Just do not think the "/dev/mtdrX" devices support is need,
+ * so do not support them to avoid doing extra work.
+ */
+ return ERR_PTR(-EINVAL);
+
+ return get_mtd_device(NULL, minor / 2);
+}
+#endif
+
+/**
+ * open_mtd_device - open MTD device by name, character device path, or number.
+ * @mtd_dev: name, character device node path, or MTD device device number
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as ASCII MTD device number, and if it is not true, it
+ * is treated as MTD device name, and if that is also not true, it is treated
+ * as MTD character device node path. Returns MTD device description object in
+ * case of success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+ struct mtd_info *mtd;
+ int mtd_num;
+ char *endp;
+
+ mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+ if (*endp != '\0' || mtd_dev == endp) {
+ /*
+ * This does not look like an ASCII integer, probably this is
+ * MTD device name.
+ */
+ mtd = get_mtd_device_nm(mtd_dev);
+#ifndef __UBOOT__
+ if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+ /* Probably this is an MTD character device node path */
+ mtd = open_mtd_by_chdev(mtd_dev);
+#endif
+ } else
+ mtd = get_mtd_device(NULL, mtd_num);
+
+ return mtd;
+}
+
+#ifndef __UBOOT__
+static int __init ubi_init(void)
+#else
+int ubi_init(void)
+#endif
+{
+ int err, i, k;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
+
+ /* Attach MTD devices */
+ for (i = 0; i < mtd_devs; i++) {
+ struct mtd_dev_param *p = &mtd_dev_param[i];
+ struct mtd_info *mtd;
+
+ cond_resched();
+
+ mtd = open_mtd_device(p->name);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ pr_err("UBI error: cannot open mtd %s, error %d\n",
+ p->name, err);
+ /* See comment below re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ continue;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, p->ubi_num,
+ p->vid_hdr_offs, p->max_beb_per1024);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ pr_err("UBI error: cannot attach mtd%d\n",
+ mtd->index);
+ put_mtd_device(mtd);
+
+ /*
+ * Originally UBI stopped initializing on any error.
+ * However, later on it was found out that this
+ * behavior is not very good when UBI is compiled into
+ * the kernel and the MTD devices to attach are passed
+ * through the command line. Indeed, UBI failure
+ * stopped whole boot sequence.
+ *
+ * To fix this, we changed the behavior for the
+ * non-module case, but preserved the old behavior for
+ * the module case, just for compatibility. This is a
+ * little inconsistent, though.
+ */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+ }
+
+ err = ubiblock_init();
+ if (err) {
+ pr_err("UBI error: block: cannot initialize, error %d\n", err);
+
+ /* See comment above re-ubi_is_module(). */
+ if (ubi_is_module())
+ goto out_detach;
+ }
+
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ ubi_debugfs_exit();
+out_slab:
+ kmem_cache_destroy(ubi_wl_entry_slab);
+out_dev_unreg:
+ misc_deregister(&ubi_ctrl_cdev);
+out:
+#ifdef __UBOOT__
+ /* Reset any globals that the driver depends on being zeroed */
+ mtd_devs = 0;
+#endif
+ class_unregister(&ubi_class);
+ pr_err("UBI error: cannot initialize UBI, error %d\n", err);
+ return err;
+}
+late_initcall(ubi_init);
+
+#ifndef __UBOOT__
+static void __exit ubi_exit(void)
+#else
+void ubi_exit(void)
+#endif
+{
+ int i;
+
+ ubiblock_exit();
+
+ for (i = 0; i < UBI_MAX_DEVICES; i++)
+ if (ubi_devices[i]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ ubi_debugfs_exit();
+ kmem_cache_destroy(ubi_wl_entry_slab);
+ misc_deregister(&ubi_ctrl_cdev);
+ class_unregister(&ubi_class);
+#ifdef __UBOOT__
+ /* Reset any globals that the driver depends on being zeroed */
+ mtd_devs = 0;
+#endif
+}
+module_exit(ubi_exit);
+
+/**
+ * bytes_str_to_int - convert a number of bytes string into an integer.
+ * @str: the string to convert
+ *
+ * This function returns positive resulting integer in case of success and a
+ * negative error code in case of failure.
+ */
+static int __init bytes_str_to_int(const char *str)
+{
+ char *endp;
+ unsigned long result;
+
+ result = simple_strtoul(str, &endp, 0);
+ if (str == endp || result >= INT_MAX) {
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
+ return -EINVAL;
+ }
+
+ switch (*endp) {
+ case 'G':
+ result *= 1024;
+ case 'M':
+ result *= 1024;
+ case 'K':
+ result *= 1024;
+ if (endp[1] == 'i' && endp[2] == 'B')
+ endp += 2;
+ case '\0':
+ break;
+ default:
+ pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+int kstrtoint(const char *s, unsigned int base, int *res)
+{
+ unsigned long long tmp;
+
+ tmp = simple_strtoull(s, NULL, base);
+ if (tmp != (unsigned long long)(int)tmp)
+ return -ERANGE;
+
+ return (int)tmp;
+}
+
+/**
+ * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
+ * @val: the parameter value to parse
+ * @kp: not used
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of error.
+ */
+#ifndef __UBOOT__
+static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+#else
+int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
+#endif
+{
+ int i, len;
+ struct mtd_dev_param *p;
+ char buf[MTD_PARAM_LEN_MAX];
+ char *pbuf = &buf[0];
+ char *tokens[MTD_PARAM_MAX_COUNT], *token;
+
+ if (!val)
+ return -EINVAL;
+
+ if (mtd_devs == UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ len = strnlen(val, MTD_PARAM_LEN_MAX);
+ if (len == MTD_PARAM_LEN_MAX) {
+ pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
+ return -EINVAL;
+ }
+
+ if (len == 0) {
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
+ return 0;
+ }
+
+ strcpy(buf, val);
+
+ /* Get rid of the final newline */
+ if (buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
+ tokens[i] = strsep(&pbuf, ",");
+
+ if (pbuf) {
+ pr_err("UBI error: too many arguments at \"%s\"\n", val);
+ return -EINVAL;
+ }
+
+ p = &mtd_dev_param[mtd_devs];
+ strcpy(&p->name[0], tokens[0]);
+
+ token = tokens[1];
+ if (token) {
+ p->vid_hdr_offs = bytes_str_to_int(token);
+
+ if (p->vid_hdr_offs < 0)
+ return p->vid_hdr_offs;
+ }
+
+ token = tokens[2];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->max_beb_per1024);
+
+ if (err) {
+ pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ token);
+ return -EINVAL;
+ }
+ }
+
+ token = tokens[3];
+ if (token) {
+ int err = kstrtoint(token, 10, &p->ubi_num);
+
+ if (err) {
+ pr_err("UBI error: bad value for ubi_num parameter: %s",
+ token);
+ return -EINVAL;
+ }
+ } else
+ p->ubi_num = UBI_DEV_NUM_AUTO;
+
+ mtd_devs += 1;
+ return 0;
+}
+
+module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
+ "Multiple \"mtd\" parameters may be specified.\n"
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+module_param(fm_autoconvert, bool, 0644);
+MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
+module_param(fm_debug, bool, 0);
+MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
+#endif
+MODULE_VERSION(__stringify(UBI_VERSION));
+MODULE_DESCRIPTION("UBI - Unsorted Block Images");
+MODULE_AUTHOR("Artem Bityutskiy");
+MODULE_LICENSE("GPL");
diff --git a/roms/u-boot/drivers/mtd/ubi/crc32.c b/roms/u-boot/drivers/mtd/ubi/crc32.c
new file mode 100644
index 000000000..9ce061c86
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/crc32.c
@@ -0,0 +1,511 @@
+/*
+ * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
+ * Code was from the public domain, copyright abandoned. Code was
+ * subsequently included in the kernel, thus was re-licensed under the
+ * GNU GPL v2.
+ *
+ * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * Same crc32 function was used in 5 other places in the kernel.
+ * I made one version, and deleted the others.
+ * There are various incantations of crc32(). Some use a seed of 0 or ~0.
+ * Some xor at the end with ~0. The generic crc32() function takes
+ * seed as an argument, and doesn't xor at the end. Then individual
+ * users can do whatever they need.
+ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
+ * fs/jffs2 uses seed 0, doesn't xor with ~0.
+ * fs/partitions/efi.c uses seed ~0, xor's with ~0.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifndef __UBOOT__
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <u-boot/crc.h>
+#endif
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#ifndef __UBOOT__
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#endif
+#include "crc32defs.h"
+#define CRC_LE_BITS 8
+
+#if CRC_LE_BITS == 8
+#define tole(x) cpu_to_le32(x)
+#define tobe(x) cpu_to_be32(x)
+#else
+#define tole(x) (x)
+#define tobe(x) (x)
+#endif
+#include "crc32table.h"
+#ifndef __UBOOT__
+MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
+MODULE_DESCRIPTION("Ethernet CRC32 calculations");
+MODULE_LICENSE("GPL");
+#endif
+/**
+ * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 crc32_le(u32 crc, unsigned char const *p, size_t len);
+
+#if CRC_LE_BITS == 1
+/*
+ * In fact, the table-based code will work in this case, but it can be
+ * simplified by inlining the table in ?: form.
+ */
+
+u32 crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
+ }
+ return crc;
+}
+#else /* Table-based approach */
+
+u32 crc32_le(u32 crc, unsigned char const *p, size_t len)
+{
+# if CRC_LE_BITS == 8
+ const u32 *b =(u32 *)p;
+ const u32 *tab = crc32table_le;
+
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
+# else
+# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
+# endif
+ /* printf("Crc32_le crc=%x\n",crc); */
+ crc = __cpu_to_le32(crc);
+ /* Align it */
+ if((((long)b)&3 && len)){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (void *)p;
+ } while ((--len) && ((long)b)&3 );
+ }
+ if((len >= 4)){
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ size_t save_len = len & 3;
+ len = len >> 2;
+ --b; /* use pre increment below(*++b) for speed */
+ do {
+ crc ^= *++b;
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ } while (--len);
+ b++; /* point to next byte(s) */
+ len = save_len;
+ }
+ /* And the last few bytes */
+ if(len){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (void *)p;
+ } while (--len);
+ }
+
+ return __le32_to_cpu(crc);
+#undef ENDIAN_SHIFT
+#undef DO_CRC
+
+# elif CRC_LE_BITS == 4
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 4) ^ crc32table_le[crc & 15];
+ crc = (crc >> 4) ^ crc32table_le[crc & 15];
+ }
+ return crc;
+# elif CRC_LE_BITS == 2
+ while (len--) {
+ crc ^= *p++;
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ crc = (crc >> 2) ^ crc32table_le[crc & 3];
+ }
+ return crc;
+# endif
+}
+#endif
+#ifndef __UBOOT__
+/**
+ * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
+ * @crc: seed value for computation. ~0 for Ethernet, sometimes 0 for
+ * other uses, or the previous crc32 value if computing incrementally.
+ * @p: pointer to buffer over which CRC is run
+ * @len: length of buffer @p
+ */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len);
+
+#if CRC_BE_BITS == 1
+/*
+ * In fact, the table-based code will work in this case, but it can be
+ * simplified by inlining the table in ?: form.
+ */
+
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 24;
+ for (i = 0; i < 8; i++)
+ crc =
+ (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE :
+ 0);
+ }
+ return crc;
+}
+
+#else /* Table-based approach */
+u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
+{
+# if CRC_BE_BITS == 8
+ const u32 *b =(u32 *)p;
+ const u32 *tab = crc32table_be;
+
+# ifdef __LITTLE_ENDIAN
+# define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8)
+# else
+# define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8)
+# endif
+
+ crc = __cpu_to_be32(crc);
+ /* Align it */
+ if(unlikely(((long)b)&3 && len)){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (u32 *)p;
+ } while ((--len) && ((long)b)&3 );
+ }
+ if(likely(len >= 4)){
+ /* load data 32 bits wide, xor data 32 bits wide. */
+ size_t save_len = len & 3;
+ len = len >> 2;
+ --b; /* use pre increment below(*++b) for speed */
+ do {
+ crc ^= *++b;
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ DO_CRC(0);
+ } while (--len);
+ b++; /* point to next byte(s) */
+ len = save_len;
+ }
+ /* And the last few bytes */
+ if(len){
+ do {
+ u8 *p = (u8 *)b;
+ DO_CRC(*p++);
+ b = (void *)p;
+ } while (--len);
+ }
+ return __be32_to_cpu(crc);
+#undef ENDIAN_SHIFT
+#undef DO_CRC
+
+# elif CRC_BE_BITS == 4
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 4) ^ crc32table_be[crc >> 28];
+ crc = (crc << 4) ^ crc32table_be[crc >> 28];
+ }
+ return crc;
+# elif CRC_BE_BITS == 2
+ while (len--) {
+ crc ^= *p++ << 24;
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ crc = (crc << 2) ^ crc32table_be[crc >> 30];
+ }
+ return crc;
+# endif
+}
+#endif
+
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(crc32_be);
+#endif
+/*
+ * A brief CRC tutorial.
+ *
+ * A CRC is a long-division remainder. You add the CRC to the message,
+ * and the whole thing (message+CRC) is a multiple of the given
+ * CRC polynomial. To check the CRC, you can either check that the
+ * CRC matches the recomputed value, *or* you can check that the
+ * remainder computed on the message+CRC is 0. This latter approach
+ * is used by a lot of hardware implementations, and is why so many
+ * protocols put the end-of-frame flag after the CRC.
+ *
+ * It's actually the same long division you learned in school, except that
+ * - We're working in binary, so the digits are only 0 and 1, and
+ * - When dividing polynomials, there are no carries. Rather than add and
+ * subtract, we just xor. Thus, we tend to get a bit sloppy about
+ * the difference between adding and subtracting.
+ *
+ * A 32-bit CRC polynomial is actually 33 bits long. But since it's
+ * 33 bits long, bit 32 is always going to be set, so usually the CRC
+ * is written in hex with the most significant bit omitted. (If you're
+ * familiar with the IEEE 754 floating-point format, it's the same idea.)
+ *
+ * Note that a CRC is computed over a string of *bits*, so you have
+ * to decide on the endianness of the bits within each byte. To get
+ * the best error-detecting properties, this should correspond to the
+ * order they're actually sent. For example, standard RS-232 serial is
+ * little-endian; the most significant bit (sometimes used for parity)
+ * is sent last. And when appending a CRC word to a message, you should
+ * do it in the right order, matching the endianness.
+ *
+ * Just like with ordinary division, the remainder is always smaller than
+ * the divisor (the CRC polynomial) you're dividing by. Each step of the
+ * division, you take one more digit (bit) of the dividend and append it
+ * to the current remainder. Then you figure out the appropriate multiple
+ * of the divisor to subtract to being the remainder back into range.
+ * In binary, it's easy - it has to be either 0 or 1, and to make the
+ * XOR cancel, it's just a copy of bit 32 of the remainder.
+ *
+ * When computing a CRC, we don't care about the quotient, so we can
+ * throw the quotient bit away, but subtract the appropriate multiple of
+ * the polynomial from the remainder and we're back to where we started,
+ * ready to process the next bit.
+ *
+ * A big-endian CRC written this way would be coded like:
+ * for (i = 0; i < input_bits; i++) {
+ * multiple = remainder & 0x80000000 ? CRCPOLY : 0;
+ * remainder = (remainder << 1 | next_input_bit()) ^ multiple;
+ * }
+ * Notice how, to get at bit 32 of the shifted remainder, we look
+ * at bit 31 of the remainder *before* shifting it.
+ *
+ * But also notice how the next_input_bit() bits we're shifting into
+ * the remainder don't actually affect any decision-making until
+ * 32 bits later. Thus, the first 32 cycles of this are pretty boring.
+ * Also, to add the CRC to a message, we need a 32-bit-long hole for it at
+ * the end, so we have to add 32 extra cycles shifting in zeros at the
+ * end of every message,
+ *
+ * So the standard trick is to rearrage merging in the next_input_bit()
+ * until the moment it's needed. Then the first 32 cycles can be precomputed,
+ * and merging in the final 32 zero bits to make room for the CRC can be
+ * skipped entirely.
+ * This changes the code to:
+ * for (i = 0; i < input_bits; i++) {
+ * remainder ^= next_input_bit() << 31;
+ * multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ * remainder = (remainder << 1) ^ multiple;
+ * }
+ * With this optimization, the little-endian code is simpler:
+ * for (i = 0; i < input_bits; i++) {
+ * remainder ^= next_input_bit();
+ * multiple = (remainder & 1) ? CRCPOLY : 0;
+ * remainder = (remainder >> 1) ^ multiple;
+ * }
+ *
+ * Note that the other details of endianness have been hidden in CRCPOLY
+ * (which must be bit-reversed) and next_input_bit().
+ *
+ * However, as long as next_input_bit is returning the bits in a sensible
+ * order, we can actually do the merging 8 or more bits at a time rather
+ * than one bit at a time:
+ * for (i = 0; i < input_bytes; i++) {
+ * remainder ^= next_input_byte() << 24;
+ * for (j = 0; j < 8; j++) {
+ * multiple = (remainder & 0x80000000) ? CRCPOLY : 0;
+ * remainder = (remainder << 1) ^ multiple;
+ * }
+ * }
+ * Or in little-endian:
+ * for (i = 0; i < input_bytes; i++) {
+ * remainder ^= next_input_byte();
+ * for (j = 0; j < 8; j++) {
+ * multiple = (remainder & 1) ? CRCPOLY : 0;
+ * remainder = (remainder << 1) ^ multiple;
+ * }
+ * }
+ * If the input is a multiple of 32 bits, you can even XOR in a 32-bit
+ * word at a time and increase the inner loop count to 32.
+ *
+ * You can also mix and match the two loop styles, for example doing the
+ * bulk of a message byte-at-a-time and adding bit-at-a-time processing
+ * for any fractional bytes at the end.
+ *
+ * The only remaining optimization is to the byte-at-a-time table method.
+ * Here, rather than just shifting one bit of the remainder to decide
+ * in the correct multiple to subtract, we can shift a byte at a time.
+ * This produces a 40-bit (rather than a 33-bit) intermediate remainder,
+ * but again the multiple of the polynomial to subtract depends only on
+ * the high bits, the high 8 bits in this case.
+ *
+ * The multile we need in that case is the low 32 bits of a 40-bit
+ * value whose high 8 bits are given, and which is a multiple of the
+ * generator polynomial. This is simply the CRC-32 of the given
+ * one-byte message.
+ *
+ * Two more details: normally, appending zero bits to a message which
+ * is already a multiple of a polynomial produces a larger multiple of that
+ * polynomial. To enable a CRC to detect this condition, it's common to
+ * invert the CRC before appending it. This makes the remainder of the
+ * message+crc come out not as zero, but some fixed non-zero value.
+ *
+ * The same problem applies to zero bits prepended to the message, and
+ * a similar solution is used. Instead of starting with a remainder of
+ * 0, an initial remainder of all ones is used. As long as you start
+ * the same way on decoding, it doesn't make a difference.
+ */
+
+#ifdef UNITTEST
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifndef __UBOOT__
+static void
+buf_dump(char const *prefix, unsigned char const *buf, size_t len)
+{
+ fputs(prefix, stdout);
+ while (len--)
+ printf(" %02x", *buf++);
+ putchar('\n');
+
+}
+#endif
+
+static void bytereverse(unsigned char *buf, size_t len)
+{
+ while (len--) {
+ unsigned char x = bitrev8(*buf);
+ *buf++ = x;
+ }
+}
+
+static void random_garbage(unsigned char *buf, size_t len)
+{
+ while (len--)
+ *buf++ = (unsigned char) random();
+}
+
+#ifndef __UBOOT__
+static void store_le(u32 x, unsigned char *buf)
+{
+ buf[0] = (unsigned char) x;
+ buf[1] = (unsigned char) (x >> 8);
+ buf[2] = (unsigned char) (x >> 16);
+ buf[3] = (unsigned char) (x >> 24);
+}
+#endif
+
+static void store_be(u32 x, unsigned char *buf)
+{
+ buf[0] = (unsigned char) (x >> 24);
+ buf[1] = (unsigned char) (x >> 16);
+ buf[2] = (unsigned char) (x >> 8);
+ buf[3] = (unsigned char) x;
+}
+
+/*
+ * This checks that CRC(buf + CRC(buf)) = 0, and that
+ * CRC commutes with bit-reversal. This has the side effect
+ * of bytewise bit-reversing the input buffer, and returns
+ * the CRC of the reversed buffer.
+ */
+static u32 test_step(u32 init, unsigned char *buf, size_t len)
+{
+ u32 crc1, crc2;
+ size_t i;
+
+ crc1 = crc32_be(init, buf, len);
+ store_be(crc1, buf + len);
+ crc2 = crc32_be(init, buf, len + 4);
+ if (crc2)
+ printf("\nCRC cancellation fail: 0x%08x should be 0\n",
+ crc2);
+
+ for (i = 0; i <= len + 4; i++) {
+ crc2 = crc32_be(init, buf, i);
+ crc2 = crc32_be(crc2, buf + i, len + 4 - i);
+ if (crc2)
+ printf("\nCRC split fail: 0x%08x\n", crc2);
+ }
+
+ /* Now swap it around for the other test */
+
+ bytereverse(buf, len + 4);
+ init = bitrev32(init);
+ crc2 = bitrev32(crc1);
+ if (crc1 != bitrev32(crc2))
+ printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n",
+ crc1, crc2, bitrev32(crc2));
+ crc1 = crc32_le(init, buf, len);
+ if (crc1 != crc2)
+ printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1,
+ crc2);
+ crc2 = crc32_le(init, buf, len + 4);
+ if (crc2)
+ printf("\nCRC cancellation fail: 0x%08x should be 0\n",
+ crc2);
+
+ for (i = 0; i <= len + 4; i++) {
+ crc2 = crc32_le(init, buf, i);
+ crc2 = crc32_le(crc2, buf + i, len + 4 - i);
+ if (crc2)
+ printf("\nCRC split fail: 0x%08x\n", crc2);
+ }
+
+ return crc1;
+}
+
+#define SIZE 64
+#define INIT1 0
+#define INIT2 0
+
+int main(void)
+{
+ unsigned char buf1[SIZE + 4];
+ unsigned char buf2[SIZE + 4];
+ unsigned char buf3[SIZE + 4];
+ int i, j;
+ u32 crc1, crc2, crc3;
+
+ for (i = 0; i <= SIZE; i++) {
+ printf("\rTesting length %d...", i);
+ fflush(stdout);
+ random_garbage(buf1, i);
+ random_garbage(buf2, i);
+ for (j = 0; j < i; j++)
+ buf3[j] = buf1[j] ^ buf2[j];
+
+ crc1 = test_step(INIT1, buf1, i);
+ crc2 = test_step(INIT2, buf2, i);
+ /* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */
+ crc3 = test_step(INIT1 ^ INIT2, buf3, i);
+ if (crc3 != (crc1 ^ crc2))
+ printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n",
+ crc3, crc1, crc2);
+ }
+ printf("\nAll test complete. No failures expected.\n");
+ return 0;
+}
+
+#endif /* UNITTEST */
diff --git a/roms/u-boot/drivers/mtd/ubi/crc32defs.h b/roms/u-boot/drivers/mtd/ubi/crc32defs.h
new file mode 100644
index 000000000..f5a540176
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/crc32defs.h
@@ -0,0 +1,32 @@
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRCPOLY_LE 0xedb88320
+#define CRCPOLY_BE 0x04c11db7
+
+/* How many bits at a time to use. Requires a table of 4<<CRC_xx_BITS bytes. */
+/* For less performance-sensitive, use 4 */
+#ifndef CRC_LE_BITS
+# define CRC_LE_BITS 8
+#endif
+#ifndef CRC_BE_BITS
+# define CRC_BE_BITS 8
+#endif
+
+/*
+ * Little-endian CRC computation. Used with serial bit streams sent
+ * lsbit-first. Be sure to use cpu_to_le32() to append the computed CRC.
+ */
+#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1
+# error CRC_LE_BITS must be a power of 2 between 1 and 8
+#endif
+
+/*
+ * Big-endian CRC computation. Used with serial bit streams sent
+ * msbit-first. Be sure to use cpu_to_be32() to append the computed CRC.
+ */
+#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1
+# error CRC_BE_BITS must be a power of 2 between 1 and 8
+#endif
diff --git a/roms/u-boot/drivers/mtd/ubi/crc32table.h b/roms/u-boot/drivers/mtd/ubi/crc32table.h
new file mode 100644
index 000000000..02ce6fd90
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/crc32table.h
@@ -0,0 +1,136 @@
+/* this file is generated - do not edit */
+
+static const u32 crc32table_le[] = {
+tole(0x00000000L), tole(0x77073096L), tole(0xee0e612cL), tole(0x990951baL),
+tole(0x076dc419L), tole(0x706af48fL), tole(0xe963a535L), tole(0x9e6495a3L),
+tole(0x0edb8832L), tole(0x79dcb8a4L), tole(0xe0d5e91eL), tole(0x97d2d988L),
+tole(0x09b64c2bL), tole(0x7eb17cbdL), tole(0xe7b82d07L), tole(0x90bf1d91L),
+tole(0x1db71064L), tole(0x6ab020f2L), tole(0xf3b97148L), tole(0x84be41deL),
+tole(0x1adad47dL), tole(0x6ddde4ebL), tole(0xf4d4b551L), tole(0x83d385c7L),
+tole(0x136c9856L), tole(0x646ba8c0L), tole(0xfd62f97aL), tole(0x8a65c9ecL),
+tole(0x14015c4fL), tole(0x63066cd9L), tole(0xfa0f3d63L), tole(0x8d080df5L),
+tole(0x3b6e20c8L), tole(0x4c69105eL), tole(0xd56041e4L), tole(0xa2677172L),
+tole(0x3c03e4d1L), tole(0x4b04d447L), tole(0xd20d85fdL), tole(0xa50ab56bL),
+tole(0x35b5a8faL), tole(0x42b2986cL), tole(0xdbbbc9d6L), tole(0xacbcf940L),
+tole(0x32d86ce3L), tole(0x45df5c75L), tole(0xdcd60dcfL), tole(0xabd13d59L),
+tole(0x26d930acL), tole(0x51de003aL), tole(0xc8d75180L), tole(0xbfd06116L),
+tole(0x21b4f4b5L), tole(0x56b3c423L), tole(0xcfba9599L), tole(0xb8bda50fL),
+tole(0x2802b89eL), tole(0x5f058808L), tole(0xc60cd9b2L), tole(0xb10be924L),
+tole(0x2f6f7c87L), tole(0x58684c11L), tole(0xc1611dabL), tole(0xb6662d3dL),
+tole(0x76dc4190L), tole(0x01db7106L), tole(0x98d220bcL), tole(0xefd5102aL),
+tole(0x71b18589L), tole(0x06b6b51fL), tole(0x9fbfe4a5L), tole(0xe8b8d433L),
+tole(0x7807c9a2L), tole(0x0f00f934L), tole(0x9609a88eL), tole(0xe10e9818L),
+tole(0x7f6a0dbbL), tole(0x086d3d2dL), tole(0x91646c97L), tole(0xe6635c01L),
+tole(0x6b6b51f4L), tole(0x1c6c6162L), tole(0x856530d8L), tole(0xf262004eL),
+tole(0x6c0695edL), tole(0x1b01a57bL), tole(0x8208f4c1L), tole(0xf50fc457L),
+tole(0x65b0d9c6L), tole(0x12b7e950L), tole(0x8bbeb8eaL), tole(0xfcb9887cL),
+tole(0x62dd1ddfL), tole(0x15da2d49L), tole(0x8cd37cf3L), tole(0xfbd44c65L),
+tole(0x4db26158L), tole(0x3ab551ceL), tole(0xa3bc0074L), tole(0xd4bb30e2L),
+tole(0x4adfa541L), tole(0x3dd895d7L), tole(0xa4d1c46dL), tole(0xd3d6f4fbL),
+tole(0x4369e96aL), tole(0x346ed9fcL), tole(0xad678846L), tole(0xda60b8d0L),
+tole(0x44042d73L), tole(0x33031de5L), tole(0xaa0a4c5fL), tole(0xdd0d7cc9L),
+tole(0x5005713cL), tole(0x270241aaL), tole(0xbe0b1010L), tole(0xc90c2086L),
+tole(0x5768b525L), tole(0x206f85b3L), tole(0xb966d409L), tole(0xce61e49fL),
+tole(0x5edef90eL), tole(0x29d9c998L), tole(0xb0d09822L), tole(0xc7d7a8b4L),
+tole(0x59b33d17L), tole(0x2eb40d81L), tole(0xb7bd5c3bL), tole(0xc0ba6cadL),
+tole(0xedb88320L), tole(0x9abfb3b6L), tole(0x03b6e20cL), tole(0x74b1d29aL),
+tole(0xead54739L), tole(0x9dd277afL), tole(0x04db2615L), tole(0x73dc1683L),
+tole(0xe3630b12L), tole(0x94643b84L), tole(0x0d6d6a3eL), tole(0x7a6a5aa8L),
+tole(0xe40ecf0bL), tole(0x9309ff9dL), tole(0x0a00ae27L), tole(0x7d079eb1L),
+tole(0xf00f9344L), tole(0x8708a3d2L), tole(0x1e01f268L), tole(0x6906c2feL),
+tole(0xf762575dL), tole(0x806567cbL), tole(0x196c3671L), tole(0x6e6b06e7L),
+tole(0xfed41b76L), tole(0x89d32be0L), tole(0x10da7a5aL), tole(0x67dd4accL),
+tole(0xf9b9df6fL), tole(0x8ebeeff9L), tole(0x17b7be43L), tole(0x60b08ed5L),
+tole(0xd6d6a3e8L), tole(0xa1d1937eL), tole(0x38d8c2c4L), tole(0x4fdff252L),
+tole(0xd1bb67f1L), tole(0xa6bc5767L), tole(0x3fb506ddL), tole(0x48b2364bL),
+tole(0xd80d2bdaL), tole(0xaf0a1b4cL), tole(0x36034af6L), tole(0x41047a60L),
+tole(0xdf60efc3L), tole(0xa867df55L), tole(0x316e8eefL), tole(0x4669be79L),
+tole(0xcb61b38cL), tole(0xbc66831aL), tole(0x256fd2a0L), tole(0x5268e236L),
+tole(0xcc0c7795L), tole(0xbb0b4703L), tole(0x220216b9L), tole(0x5505262fL),
+tole(0xc5ba3bbeL), tole(0xb2bd0b28L), tole(0x2bb45a92L), tole(0x5cb36a04L),
+tole(0xc2d7ffa7L), tole(0xb5d0cf31L), tole(0x2cd99e8bL), tole(0x5bdeae1dL),
+tole(0x9b64c2b0L), tole(0xec63f226L), tole(0x756aa39cL), tole(0x026d930aL),
+tole(0x9c0906a9L), tole(0xeb0e363fL), tole(0x72076785L), tole(0x05005713L),
+tole(0x95bf4a82L), tole(0xe2b87a14L), tole(0x7bb12baeL), tole(0x0cb61b38L),
+tole(0x92d28e9bL), tole(0xe5d5be0dL), tole(0x7cdcefb7L), tole(0x0bdbdf21L),
+tole(0x86d3d2d4L), tole(0xf1d4e242L), tole(0x68ddb3f8L), tole(0x1fda836eL),
+tole(0x81be16cdL), tole(0xf6b9265bL), tole(0x6fb077e1L), tole(0x18b74777L),
+tole(0x88085ae6L), tole(0xff0f6a70L), tole(0x66063bcaL), tole(0x11010b5cL),
+tole(0x8f659effL), tole(0xf862ae69L), tole(0x616bffd3L), tole(0x166ccf45L),
+tole(0xa00ae278L), tole(0xd70dd2eeL), tole(0x4e048354L), tole(0x3903b3c2L),
+tole(0xa7672661L), tole(0xd06016f7L), tole(0x4969474dL), tole(0x3e6e77dbL),
+tole(0xaed16a4aL), tole(0xd9d65adcL), tole(0x40df0b66L), tole(0x37d83bf0L),
+tole(0xa9bcae53L), tole(0xdebb9ec5L), tole(0x47b2cf7fL), tole(0x30b5ffe9L),
+tole(0xbdbdf21cL), tole(0xcabac28aL), tole(0x53b39330L), tole(0x24b4a3a6L),
+tole(0xbad03605L), tole(0xcdd70693L), tole(0x54de5729L), tole(0x23d967bfL),
+tole(0xb3667a2eL), tole(0xc4614ab8L), tole(0x5d681b02L), tole(0x2a6f2b94L),
+tole(0xb40bbe37L), tole(0xc30c8ea1L), tole(0x5a05df1bL), tole(0x2d02ef8dL)
+};
+#ifndef __UBOOT__
+static const u32 crc32table_be[] = {
+tobe(0x00000000L), tobe(0x04c11db7L), tobe(0x09823b6eL), tobe(0x0d4326d9L),
+tobe(0x130476dcL), tobe(0x17c56b6bL), tobe(0x1a864db2L), tobe(0x1e475005L),
+tobe(0x2608edb8L), tobe(0x22c9f00fL), tobe(0x2f8ad6d6L), tobe(0x2b4bcb61L),
+tobe(0x350c9b64L), tobe(0x31cd86d3L), tobe(0x3c8ea00aL), tobe(0x384fbdbdL),
+tobe(0x4c11db70L), tobe(0x48d0c6c7L), tobe(0x4593e01eL), tobe(0x4152fda9L),
+tobe(0x5f15adacL), tobe(0x5bd4b01bL), tobe(0x569796c2L), tobe(0x52568b75L),
+tobe(0x6a1936c8L), tobe(0x6ed82b7fL), tobe(0x639b0da6L), tobe(0x675a1011L),
+tobe(0x791d4014L), tobe(0x7ddc5da3L), tobe(0x709f7b7aL), tobe(0x745e66cdL),
+tobe(0x9823b6e0L), tobe(0x9ce2ab57L), tobe(0x91a18d8eL), tobe(0x95609039L),
+tobe(0x8b27c03cL), tobe(0x8fe6dd8bL), tobe(0x82a5fb52L), tobe(0x8664e6e5L),
+tobe(0xbe2b5b58L), tobe(0xbaea46efL), tobe(0xb7a96036L), tobe(0xb3687d81L),
+tobe(0xad2f2d84L), tobe(0xa9ee3033L), tobe(0xa4ad16eaL), tobe(0xa06c0b5dL),
+tobe(0xd4326d90L), tobe(0xd0f37027L), tobe(0xddb056feL), tobe(0xd9714b49L),
+tobe(0xc7361b4cL), tobe(0xc3f706fbL), tobe(0xceb42022L), tobe(0xca753d95L),
+tobe(0xf23a8028L), tobe(0xf6fb9d9fL), tobe(0xfbb8bb46L), tobe(0xff79a6f1L),
+tobe(0xe13ef6f4L), tobe(0xe5ffeb43L), tobe(0xe8bccd9aL), tobe(0xec7dd02dL),
+tobe(0x34867077L), tobe(0x30476dc0L), tobe(0x3d044b19L), tobe(0x39c556aeL),
+tobe(0x278206abL), tobe(0x23431b1cL), tobe(0x2e003dc5L), tobe(0x2ac12072L),
+tobe(0x128e9dcfL), tobe(0x164f8078L), tobe(0x1b0ca6a1L), tobe(0x1fcdbb16L),
+tobe(0x018aeb13L), tobe(0x054bf6a4L), tobe(0x0808d07dL), tobe(0x0cc9cdcaL),
+tobe(0x7897ab07L), tobe(0x7c56b6b0L), tobe(0x71159069L), tobe(0x75d48ddeL),
+tobe(0x6b93dddbL), tobe(0x6f52c06cL), tobe(0x6211e6b5L), tobe(0x66d0fb02L),
+tobe(0x5e9f46bfL), tobe(0x5a5e5b08L), tobe(0x571d7dd1L), tobe(0x53dc6066L),
+tobe(0x4d9b3063L), tobe(0x495a2dd4L), tobe(0x44190b0dL), tobe(0x40d816baL),
+tobe(0xaca5c697L), tobe(0xa864db20L), tobe(0xa527fdf9L), tobe(0xa1e6e04eL),
+tobe(0xbfa1b04bL), tobe(0xbb60adfcL), tobe(0xb6238b25L), tobe(0xb2e29692L),
+tobe(0x8aad2b2fL), tobe(0x8e6c3698L), tobe(0x832f1041L), tobe(0x87ee0df6L),
+tobe(0x99a95df3L), tobe(0x9d684044L), tobe(0x902b669dL), tobe(0x94ea7b2aL),
+tobe(0xe0b41de7L), tobe(0xe4750050L), tobe(0xe9362689L), tobe(0xedf73b3eL),
+tobe(0xf3b06b3bL), tobe(0xf771768cL), tobe(0xfa325055L), tobe(0xfef34de2L),
+tobe(0xc6bcf05fL), tobe(0xc27dede8L), tobe(0xcf3ecb31L), tobe(0xcbffd686L),
+tobe(0xd5b88683L), tobe(0xd1799b34L), tobe(0xdc3abdedL), tobe(0xd8fba05aL),
+tobe(0x690ce0eeL), tobe(0x6dcdfd59L), tobe(0x608edb80L), tobe(0x644fc637L),
+tobe(0x7a089632L), tobe(0x7ec98b85L), tobe(0x738aad5cL), tobe(0x774bb0ebL),
+tobe(0x4f040d56L), tobe(0x4bc510e1L), tobe(0x46863638L), tobe(0x42472b8fL),
+tobe(0x5c007b8aL), tobe(0x58c1663dL), tobe(0x558240e4L), tobe(0x51435d53L),
+tobe(0x251d3b9eL), tobe(0x21dc2629L), tobe(0x2c9f00f0L), tobe(0x285e1d47L),
+tobe(0x36194d42L), tobe(0x32d850f5L), tobe(0x3f9b762cL), tobe(0x3b5a6b9bL),
+tobe(0x0315d626L), tobe(0x07d4cb91L), tobe(0x0a97ed48L), tobe(0x0e56f0ffL),
+tobe(0x1011a0faL), tobe(0x14d0bd4dL), tobe(0x19939b94L), tobe(0x1d528623L),
+tobe(0xf12f560eL), tobe(0xf5ee4bb9L), tobe(0xf8ad6d60L), tobe(0xfc6c70d7L),
+tobe(0xe22b20d2L), tobe(0xe6ea3d65L), tobe(0xeba91bbcL), tobe(0xef68060bL),
+tobe(0xd727bbb6L), tobe(0xd3e6a601L), tobe(0xdea580d8L), tobe(0xda649d6fL),
+tobe(0xc423cd6aL), tobe(0xc0e2d0ddL), tobe(0xcda1f604L), tobe(0xc960ebb3L),
+tobe(0xbd3e8d7eL), tobe(0xb9ff90c9L), tobe(0xb4bcb610L), tobe(0xb07daba7L),
+tobe(0xae3afba2L), tobe(0xaafbe615L), tobe(0xa7b8c0ccL), tobe(0xa379dd7bL),
+tobe(0x9b3660c6L), tobe(0x9ff77d71L), tobe(0x92b45ba8L), tobe(0x9675461fL),
+tobe(0x8832161aL), tobe(0x8cf30badL), tobe(0x81b02d74L), tobe(0x857130c3L),
+tobe(0x5d8a9099L), tobe(0x594b8d2eL), tobe(0x5408abf7L), tobe(0x50c9b640L),
+tobe(0x4e8ee645L), tobe(0x4a4ffbf2L), tobe(0x470cdd2bL), tobe(0x43cdc09cL),
+tobe(0x7b827d21L), tobe(0x7f436096L), tobe(0x7200464fL), tobe(0x76c15bf8L),
+tobe(0x68860bfdL), tobe(0x6c47164aL), tobe(0x61043093L), tobe(0x65c52d24L),
+tobe(0x119b4be9L), tobe(0x155a565eL), tobe(0x18197087L), tobe(0x1cd86d30L),
+tobe(0x029f3d35L), tobe(0x065e2082L), tobe(0x0b1d065bL), tobe(0x0fdc1becL),
+tobe(0x3793a651L), tobe(0x3352bbe6L), tobe(0x3e119d3fL), tobe(0x3ad08088L),
+tobe(0x2497d08dL), tobe(0x2056cd3aL), tobe(0x2d15ebe3L), tobe(0x29d4f654L),
+tobe(0xc5a92679L), tobe(0xc1683bceL), tobe(0xcc2b1d17L), tobe(0xc8ea00a0L),
+tobe(0xd6ad50a5L), tobe(0xd26c4d12L), tobe(0xdf2f6bcbL), tobe(0xdbee767cL),
+tobe(0xe3a1cbc1L), tobe(0xe760d676L), tobe(0xea23f0afL), tobe(0xeee2ed18L),
+tobe(0xf0a5bd1dL), tobe(0xf464a0aaL), tobe(0xf9278673L), tobe(0xfde69bc4L),
+tobe(0x89b8fd09L), tobe(0x8d79e0beL), tobe(0x803ac667L), tobe(0x84fbdbd0L),
+tobe(0x9abc8bd5L), tobe(0x9e7d9662L), tobe(0x933eb0bbL), tobe(0x97ffad0cL),
+tobe(0xafb010b1L), tobe(0xab710d06L), tobe(0xa6322bdfL), tobe(0xa2f33668L),
+tobe(0xbcb4666dL), tobe(0xb8757bdaL), tobe(0xb5365d03L), tobe(0xb1f740b4L)
+};
+#endif
diff --git a/roms/u-boot/drivers/mtd/ubi/debug.c b/roms/u-boot/drivers/mtd/ubi/debug.c
new file mode 100644
index 000000000..d2b7ca5e3
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/debug.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#include <hexdump.h>
+#include <malloc.h>
+#include <ubi_uboot.h>
+#include "ubi.h"
+#ifndef __UBOOT__
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#endif
+
+/**
+ * ubi_dump_flash - dump a region of flash.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to dump
+ * @offset: the starting offset within the physical eraseblock to dump
+ * @len: the length of the region to dump
+ */
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ int err;
+ size_t read;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ buf = vmalloc(len);
+ if (!buf)
+ return;
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && err != -EUCLEAN) {
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto out;
+ }
+
+ ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
+ len, pnum, offset);
+ print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+out:
+ vfree(buf);
+ return;
+}
+
+/**
+ * ubi_dump_ec_hdr - dump an erase counter header.
+ * @ec_hdr: the erase counter header to dump
+ */
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
+{
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
+ print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
+ ec_hdr, UBI_EC_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dump_vid_hdr - dump a volume identifier header.
+ * @vid_hdr: the volume identifier header to dump
+ */
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
+{
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
+ (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
+ print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
+ vid_hdr, UBI_VID_HDR_SIZE, 1);
+}
+
+/**
+ * ubi_dump_vol_info - dump volume information.
+ * @vol: UBI volume description object
+ */
+void ubi_dump_vol_info(const struct ubi_volume *vol)
+{
+ printf("Volume information dump:\n");
+ printf("\tvol_id %d\n", vol->vol_id);
+ printf("\treserved_pebs %d\n", vol->reserved_pebs);
+ printf("\talignment %d\n", vol->alignment);
+ printf("\tdata_pad %d\n", vol->data_pad);
+ printf("\tvol_type %d\n", vol->vol_type);
+ printf("\tname_len %d\n", vol->name_len);
+ printf("\tusable_leb_size %d\n", vol->usable_leb_size);
+ printf("\tused_ebs %d\n", vol->used_ebs);
+ printf("\tused_bytes %lld\n", vol->used_bytes);
+ printf("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ printf("\tcorrupted %d\n", vol->corrupted);
+ printf("\tupd_marker %d\n", vol->upd_marker);
+ printf("\tskip_check %d\n", vol->skip_check);
+
+ if (vol->name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
+ printf("\tname %s\n", vol->name);
+ } else {
+ printf("\t1st 5 characters of name: %c%c%c%c%c\n",
+ vol->name[0], vol->name[1], vol->name[2],
+ vol->name[3], vol->name[4]);
+ }
+}
+
+/**
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * @r: the object to dump
+ * @idx: volume table index
+ */
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+{
+ int name_len = be16_to_cpu(r->name_len);
+
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
+
+ if (r->name[0] == '\0') {
+ pr_err("\tname NULL\n");
+ return;
+ }
+
+ if (name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(&r->name[0], name_len + 1) == name_len) {
+ pr_err("\tname %s\n", &r->name[0]);
+ } else {
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
+ r->name[0], r->name[1], r->name[2], r->name[3],
+ r->name[4]);
+ }
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
+}
+
+/**
+ * ubi_dump_av - dump a &struct ubi_ainf_volume object.
+ * @av: the object to dump
+ */
+void ubi_dump_av(const struct ubi_ainf_volume *av)
+{
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
+}
+
+/**
+ * ubi_dump_aeb - dump a &struct ubi_ainf_peb object.
+ * @aeb: the object to dump
+ * @type: object type: 0 - not corrupted, 1 - corrupted
+ */
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
+{
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
+ if (type == 0) {
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
+ }
+}
+
+/**
+ * ubi_dump_mkvol_req - dump a &struct ubi_mkvol_req object.
+ * @req: the object to dump
+ */
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
+{
+ char nm[17];
+
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
+
+ memcpy(nm, req->name, 16);
+ nm[16] = 0;
+ pr_err("\t1st 16 characters of name: %s\n", nm);
+}
+
+#ifndef __UBOOT__
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
+/**
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ dfs_rootdir = debugfs_create_dir("ubi", NULL);
+ if (IS_ERR_OR_NULL(dfs_rootdir)) {
+ int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+
+ pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ char buf[8];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_io)
+ val = d->chk_io;
+ else if (dent == d->dfs_chk_fastmap)
+ val = d->chk_fastmap;
+ else if (dent == d->dfs_disable_bgt)
+ val = d->disable_bgt;
+ else if (dent == d->dfs_emulate_bitflips)
+ val = d->emulate_bitflips;
+ else if (dent == d->dfs_emulate_io_failures)
+ val = d->emulate_io_failures;
+ else if (dent == d->dfs_emulate_power_cut) {
+ snprintf(buf, sizeof(buf), "%u\n", d->emulate_power_cut);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_min) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_min);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ snprintf(buf, sizeof(buf), "%u\n", d->power_cut_max);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ buf, strlen(buf));
+ goto out;
+ }
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ size_t buf_size;
+ char buf[8] = {0};
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = &ubi->dbg;
+
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (dent == d->dfs_power_cut_min) {
+ if (kstrtouint(buf, 0, &d->power_cut_min) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_power_cut_max) {
+ if (kstrtouint(buf, 0, &d->power_cut_max) != 0)
+ count = -EINVAL;
+ goto out;
+ } else if (dent == d->dfs_emulate_power_cut) {
+ if (kstrtoint(buf, 0, &val) != 0)
+ count = -EINVAL;
+ d->emulate_power_cut = val;
+ goto out;
+ }
+
+ if (buf[0] == '1')
+ val = 1;
+ else if (buf[0] == '0')
+ val = 0;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_io)
+ d->chk_io = val;
+ else if (dent == d->dfs_chk_fastmap)
+ d->chk_fastmap = val;
+ else if (dent == d->dfs_disable_bgt)
+ d->disable_bgt = val;
+ else if (dent == d->dfs_emulate_bitflips)
+ d->emulate_bitflips = val;
+ else if (dent == d->dfs_emulate_io_failures)
+ d->emulate_io_failures = val;
+ else
+ count = -EINVAL;
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* File operations for all UBI debugfs files */
+static const struct file_operations dfs_fops = {
+ .read = dfs_file_read,
+ .write = dfs_file_write,
+ .open = simple_open,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+ int err, n;
+ unsigned long ubi_num = ubi->ubi_num;
+ const char *fname;
+ struct dentry *dent;
+ struct ubi_debug_info *d = &ubi->dbg;
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ ubi->ubi_num);
+ if (n == UBI_DFS_DIR_LEN) {
+ /* The array size is too small */
+ fname = UBI_DFS_DIR_NAME;
+ dent = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
+ goto out;
+ d->dfs_dir = dent;
+
+ fname = "chk_gen";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_gen = dent;
+
+ fname = "chk_io";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_io = dent;
+
+ fname = "chk_fastmap";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_fastmap = dent;
+
+ fname = "tst_disable_bgt";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_disable_bgt = dent;
+
+ fname = "tst_emulate_bitflips";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_bitflips = dent;
+
+ fname = "tst_emulate_io_failures";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_io_failures = dent;
+
+ fname = "tst_emulate_power_cut";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_power_cut = dent;
+
+ fname = "tst_emulate_power_cut_min";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_power_cut_min = dent;
+
+ fname = "tst_emulate_power_cut_max";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_power_cut_max = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(d->dfs_dir);
+out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
+ fname, err);
+ return err;
+}
+
+/**
+ * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg.dfs_dir);
+}
+
+/**
+ * ubi_dbg_power_cut - emulate a power cut if it is time to do so
+ * @ubi: UBI device description object
+ * @caller: Flags set to indicate from where the function is being called
+ *
+ * Returns non-zero if a power cut was emulated, zero if not.
+ */
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+ unsigned int range;
+
+ if ((ubi->dbg.emulate_power_cut & caller) == 0)
+ return 0;
+
+ if (ubi->dbg.power_cut_counter == 0) {
+ ubi->dbg.power_cut_counter = ubi->dbg.power_cut_min;
+
+ if (ubi->dbg.power_cut_max > ubi->dbg.power_cut_min) {
+ range = ubi->dbg.power_cut_max - ubi->dbg.power_cut_min;
+ ubi->dbg.power_cut_counter += prandom_u32() % range;
+ }
+ return 0;
+ }
+
+ ubi->dbg.power_cut_counter--;
+ if (ubi->dbg.power_cut_counter)
+ return 0;
+
+ ubi_msg(ubi, "XXXXXXXXXXXXXXX emulating a power cut XXXXXXXXXXXXXXXX");
+ ubi_ro_mode(ubi);
+ return 1;
+}
+#else
+int ubi_debugfs_init(void)
+{
+ return 0;
+}
+
+void ubi_debugfs_exit(void)
+{
+}
+
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+ return 0;
+}
+
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+}
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller)
+{
+ return 0;
+}
+#endif
diff --git a/roms/u-boot/drivers/mtd/ubi/debug.h b/roms/u-boot/drivers/mtd/ubi/debug.h
new file mode 100644
index 000000000..2c2faaf1b
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/debug.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_DEBUG_H__
+#define __UBI_DEBUG_H__
+
+void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
+void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr);
+void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
+
+#ifndef __UBOOT__
+#include <linux/random.h>
+#endif
+
+#include <hexdump.h>
+
+#ifndef __UBOOT__
+#define ubi_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
+ __func__, __LINE__, current->pid); \
+ dump_stack(); \
+ } \
+} while (0)
+#else
+#define ubi_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ pr_debug("UBI assert failed in %s at %u\n", \
+ __func__, __LINE__); \
+ dump_stack(); \
+ } \
+} while (0)
+#endif
+
+#define ubi_dbg_print_hex_dump(ps, pt, r, g, b, len, a) \
+ print_hex_dump(ps, pt, r, g, b, len, a)
+
+#define ubi_dbg_msg(type, fmt, ...) \
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
+
+/* General debugging messages */
+#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
+/* Messages from the eraseblock association sub-system */
+#define dbg_eba(fmt, ...) ubi_dbg_msg("eba", fmt, ##__VA_ARGS__)
+/* Messages from the wear-leveling sub-system */
+#define dbg_wl(fmt, ...) ubi_dbg_msg("wl", fmt, ##__VA_ARGS__)
+/* Messages from the input/output sub-system */
+#define dbg_io(fmt, ...) ubi_dbg_msg("io", fmt, ##__VA_ARGS__)
+/* Initialization and build messages */
+#define dbg_bld(fmt, ...) ubi_dbg_msg("bld", fmt, ##__VA_ARGS__)
+
+void ubi_dump_vol_info(const struct ubi_volume *vol);
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
+void ubi_dump_av(const struct ubi_ainf_volume *av);
+void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type);
+void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req);
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
+ int len);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
+
+/**
+ * ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if the UBI background thread is disabled for testing
+ * purposes.
+ */
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
+{
+ return ubi->dbg.disable_bgt;
+}
+
+/**
+ * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
+ */
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_bitflips)
+ return !(prandom_u32() % 200);
+ return 0;
+}
+
+/**
+ * ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if a write failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !(prandom_u32() % 500);
+ return 0;
+}
+
+/**
+ * ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
+ *
+ * Returns non-zero if an erase failure should be emulated, otherwise returns
+ * zero.
+ */
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
+{
+ if (ubi->dbg.emulate_io_failures)
+ return !(prandom_u32() % 400);
+ return 0;
+}
+
+static inline int ubi_dbg_chk_io(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_io;
+}
+
+static inline int ubi_dbg_chk_gen(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_gen;
+}
+
+static inline int ubi_dbg_chk_fastmap(const struct ubi_device *ubi)
+{
+ return ubi->dbg.chk_fastmap;
+}
+
+static inline void ubi_enable_dbg_chk_fastmap(struct ubi_device *ubi)
+{
+ ubi->dbg.chk_fastmap = 1;
+}
+
+int ubi_dbg_power_cut(struct ubi_device *ubi, int caller);
+#endif /* !__UBI_DEBUG_H__ */
diff --git a/roms/u-boot/drivers/mtd/ubi/eba.c b/roms/u-boot/drivers/mtd/ubi/eba.c
new file mode 100644
index 000000000..9d4337bcf
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/eba.c
@@ -0,0 +1,1474 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * The UBI Eraseblock Association (EBA) sub-system.
+ *
+ * This sub-system is responsible for I/O to/from logical eraseblock.
+ *
+ * Although in this implementation the EBA table is fully kept and managed in
+ * RAM, which assumes poor scalability, it might be (partially) maintained on
+ * flash in future implementations.
+ *
+ * The EBA sub-system implements per-logical eraseblock locking. Before
+ * accessing a logical eraseblock it is locked for reading or writing. The
+ * per-logical eraseblock locking is implemented by means of the lock tree. The
+ * lock tree is an RB-tree which refers all the currently locked logical
+ * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
+ * They are indexed by (@vol_id, @lnum) pairs.
+ *
+ * EBA also maintains the global sequence counter which is incremented each
+ * time a logical eraseblock is mapped to a physical eraseblock and it is
+ * stored in the volume identifier header. This means that each VID header has
+ * a unique sequence number. The sequence number is only increased an we assume
+ * 64 bits is enough to never overflow.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <u-boot/crc.h>
+#else
+#include <ubi_uboot.h>
+#endif
+
+#include <linux/err.h>
+#include "ubi.h"
+
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
+/**
+ * next_sqnum - get next sequence number.
+ * @ubi: UBI device description object
+ *
+ * This function returns next sequence number to use, which is just the current
+ * global sequence counter value. It also increases the global sequence
+ * counter.
+ */
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
+{
+ unsigned long long sqnum;
+
+ spin_lock(&ubi->ltree_lock);
+ sqnum = ubi->global_sqnum++;
+ spin_unlock(&ubi->ltree_lock);
+
+ return sqnum;
+}
+
+/**
+ * ubi_get_compat - get compatibility flags of a volume.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * This function returns compatibility flags for an internal volume. User
+ * volumes have no compatibility flags, so %0 is returned.
+ */
+static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id == UBI_LAYOUT_VOLUME_ID)
+ return UBI_LAYOUT_VOLUME_COMPAT;
+ return 0;
+}
+
+/**
+ * ltree_lookup - look up the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
+ * object if the logical eraseblock is locked and %NULL if it is not.
+ * @ubi->ltree_lock has to be locked.
+ */
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+ int lnum)
+{
+ struct rb_node *p;
+
+ p = ubi->ltree.rb_node;
+ while (p) {
+ struct ubi_ltree_entry *le;
+
+ le = rb_entry(p, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le->vol_id)
+ p = p->rb_left;
+ else if (vol_id > le->vol_id)
+ p = p->rb_right;
+ else {
+ if (lnum < le->lnum)
+ p = p->rb_left;
+ else if (lnum > le->lnum)
+ p = p->rb_right;
+ else
+ return le;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * ltree_add_entry - add new entry to the lock tree.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
+ * lock tree. If such entry is already there, its usage counter is increased.
+ * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
+ * failed.
+ */
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+ int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le, *le1, *le_free;
+
+ le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
+ if (!le)
+ return ERR_PTR(-ENOMEM);
+
+ le->users = 0;
+ init_rwsem(&le->mutex);
+ le->vol_id = vol_id;
+ le->lnum = lnum;
+
+ spin_lock(&ubi->ltree_lock);
+ le1 = ltree_lookup(ubi, vol_id, lnum);
+
+ if (le1) {
+ /*
+ * This logical eraseblock is already locked. The newly
+ * allocated lock entry is not needed.
+ */
+ le_free = le;
+ le = le1;
+ } else {
+ struct rb_node **p, *parent = NULL;
+
+ /*
+ * No lock entry, add the newly allocated one to the
+ * @ubi->ltree RB-tree.
+ */
+ le_free = NULL;
+
+ p = &ubi->ltree.rb_node;
+ while (*p) {
+ parent = *p;
+ le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
+
+ if (vol_id < le1->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id > le1->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(lnum != le1->lnum);
+ if (lnum < le1->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&le->rb, parent, p);
+ rb_insert_color(&le->rb, &ubi->ltree);
+ }
+ le->users += 1;
+ spin_unlock(&ubi->ltree_lock);
+
+ kfree(le_free);
+ return le;
+}
+
+/**
+ * leb_read_lock - lock logical eraseblock for reading.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for reading. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_read(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_read_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ up_read(&le->mutex);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing. Returns zero in case
+ * of success and a negative error code in case of failure.
+ */
+static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ down_write(&le->mutex);
+ return 0;
+}
+
+/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ if (down_write_trylock(&le->mutex))
+ return 0;
+
+ /* Contention, cancel */
+ spin_lock(&ubi->ltree_lock);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+
+ return 1;
+}
+
+/**
+ * leb_write_unlock - unlock logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ */
+static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ struct ubi_ltree_entry *le;
+
+ spin_lock(&ubi->ltree_lock);
+ le = ltree_lookup(ubi, vol_id, lnum);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ up_write(&le->mutex);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ kfree(le);
+ }
+ spin_unlock(&ubi->ltree_lock);
+}
+
+/**
+ * ubi_eba_unmap_leb - un-map logical eraseblock.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules corresponding
+ * physical eraseblock for erasure. Returns zero in case of success and a
+ * negative error code in case of failure.
+ */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum)
+{
+ int err, pnum, vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0)
+ /* This logical eraseblock is already unmapped */
+ goto out_unlock;
+
+ dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
+
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_eba_sem);
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
+
+out_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * ubi_eba_read_leb - read data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: buffer to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
+ * bytes. The @check flag only makes sense for static volumes and forces
+ * eraseblock data CRC checking.
+ *
+ * In case of success this function returns zero. In case of a static volume,
+ * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
+ * returned for any volume type if an ECC error was detected by the MTD device
+ * driver. Other negative error cored may be returned in case of other errors.
+ */
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check)
+{
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t uninitialized_var(crc);
+
+ err = leb_read_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum < 0) {
+ /*
+ * The logical eraseblock is not mapped, fill the whole buffer
+ * with 0xFF bytes. The exception is static volumes for which
+ * it is an error to read unmapped logical eraseblocks.
+ */
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
+ len, offset, vol_id, lnum);
+ leb_read_unlock(ubi, vol_id, lnum);
+ ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
+ memset(buf, 0xFF, len);
+ return 0;
+ }
+
+ dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ check = 0;
+
+retry:
+ if (check) {
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0) {
+ /*
+ * The header is either absent or corrupted.
+ * The former case means there is a bug -
+ * switch to read-only mode just in case.
+ * The latter case means a real corruption - we
+ * may try to recover data. FIXME: but this is
+ * not implemented.
+ */
+ if (err == UBI_IO_BAD_HDR_EBADMSG ||
+ err == UBI_IO_BAD_HDR) {
+ ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
+ err = -EBADMSG;
+ } else {
+ err = -EINVAL;
+ ubi_ro_mode(ubi);
+ }
+ }
+ goto out_free;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
+ ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
+
+ crc = be32_to_cpu(vid_hdr->data_crc);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ }
+
+ err = ubi_io_read_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+ else if (mtd_is_eccerr(err)) {
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ goto out_unlock;
+ scrub = 1;
+ if (!check) {
+ ubi_msg(ubi, "force data checking");
+ check = 1;
+ goto retry;
+ }
+ } else
+ goto out_unlock;
+ }
+
+ if (check) {
+ uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
+ if (crc1 != crc) {
+ ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
+ crc1, crc);
+ err = -EBADMSG;
+ goto out_unlock;
+ }
+ }
+
+ if (scrub)
+ err = ubi_wl_scrub_peb(ubi, pnum);
+
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+out_unlock:
+ leb_read_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+#ifndef __UBOOT__
+/**
+ * ubi_eba_read_leb_sg - read data into a scatter gather list.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @sgl: UBI scatter gather list to store the read data
+ * @offset: offset from where to read
+ * @len: how many bytes to read
+ * @check: data CRC check flag
+ *
+ * This function works exactly like ubi_eba_read_leb(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check)
+{
+ int to_read;
+ int ret;
+ struct scatterlist *sg;
+
+ for (;;) {
+ ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
+ sg = &sgl->sg[sgl->list_pos];
+ if (len < sg->length - sgl->page_pos)
+ to_read = len;
+ else
+ to_read = sg->length - sgl->page_pos;
+
+ ret = ubi_eba_read_leb(ubi, vol, lnum,
+ sg_virt(sg) + sgl->page_pos, offset,
+ to_read, check);
+ if (ret < 0)
+ return ret;
+
+ offset += to_read;
+ len -= to_read;
+ if (!len) {
+ sgl->page_pos += to_read;
+ if (sgl->page_pos == sg->length) {
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ break;
+ }
+
+ sgl->list_pos++;
+ sgl->page_pos = 0;
+ }
+
+ return ret;
+}
+#endif
+
+/**
+ * recover_peb - recover from write failure.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to recover
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ * @buf: data which was not written because of the write failure
+ * @offset: offset of the failed write
+ * @len: how many bytes should have been written
+ *
+ * This function is called in case of a write failure and moves all good data
+ * from the potentially bad physical eraseblock to a good physical eraseblock.
+ * This function also writes the data which was not written due to the failure.
+ * Returns new physical eraseblock number in case of success, and a negative
+ * error code in case of failure.
+ */
+static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
+ struct ubi_volume *vol = ubi->volumes[idx];
+ struct ubi_vid_hdr *vid_hdr;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+retry:
+ new_pnum = ubi_wl_get_peb(ubi);
+ if (new_pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ up_read(&ubi->fm_eba_sem);
+ return new_pnum;
+ }
+
+ ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
+ pnum, new_pnum);
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err > 0)
+ err = -EIO;
+ up_read(&ubi->fm_eba_sem);
+ goto out_put;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+ if (err) {
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ data_size = offset + len;
+ mutex_lock(&ubi->buf_mutex);
+ memset(ubi->peb_buf + offset, 0xFF, len);
+
+ /* Read everything before the area where the write failure happened */
+ if (offset > 0) {
+ err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
+ if (err && err != UBI_IO_BITFLIPS) {
+ up_read(&ubi->fm_eba_sem);
+ goto out_unlock;
+ }
+ }
+
+ memcpy(ubi->peb_buf + offset, buf, len);
+
+ err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+ if (err) {
+ mutex_unlock(&ubi->buf_mutex);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ mutex_unlock(&ubi->buf_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+
+ vol->eba_tbl[lnum] = new_pnum;
+ up_read(&ubi->fm_eba_sem);
+ ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+
+ ubi_msg(ubi, "data was successfully recovered");
+ return 0;
+
+out_unlock:
+ mutex_unlock(&ubi->buf_mutex);
+out_put:
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ /*
+ * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
+ * get another one.
+ */
+ ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
+ ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
+ if (++tries > UBI_IO_RETRIES) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+ ubi_msg(ubi, "try again");
+ goto retry;
+}
+
+/**
+ * ubi_eba_write_leb - write data to dynamic volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: the data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes data to logical eraseblock @lnum of a dynamic volume
+ * @vol. Returns zero in case of success and a negative error code in case
+ * of failure. In case of error, it is possible that something was still
+ * written to the flash media, but may be some garbage.
+ */
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len)
+{
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ return err;
+
+ pnum = vol->eba_tbl[lnum];
+ if (pnum >= 0) {
+ dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write data to PEB %d", pnum);
+ if (err == -EIO && ubi->bad_allowed)
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
+ if (err)
+ ubi_ro_mode(ubi);
+ }
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
+
+ /*
+ * The logical eraseblock is not mapped. We have to get a free physical
+ * eraseblock and write the volume identifier header there first.
+ */
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr) {
+ leb_write_unlock(ubi, vol_id, lnum);
+ return -ENOMEM;
+ }
+
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ up_read(&ubi->fm_eba_sem);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+ }
+
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ /*
+ * Fortunately, this is the first write operation to this physical
+ * eraseblock, so just put it and request a new one. We assume that if
+ * this physical eraseblock went bad, the erase code will handle that.
+ */
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ goto retry;
+}
+
+/**
+ * ubi_eba_write_leb_st - write data to static volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ * @used_ebs: how many logical eraseblocks will this volume contain
+ *
+ * This function writes data to logical eraseblock @lnum of static volume
+ * @vol. The @used_ebs argument should contain total number of logical
+ * eraseblock in this static volume.
+ *
+ * When writing to the last logical eraseblock, the @len argument doesn't have
+ * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
+ * to the real data size, although the @buf buffer has to contain the
+ * alignment. In all other cases, @len has to be aligned.
+ *
+ * It is prohibited to write more than once to logical eraseblocks of static
+ * volumes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int used_ebs)
+{
+ int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (lnum == used_ebs - 1)
+ /* If this is the last LEB @len may be unaligned */
+ len = ALIGN(data_size, ubi->min_io_size);
+ else
+ ubi_assert(!(len & (ubi->min_io_size - 1)));
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, data_size);
+ vid_hdr->vol_type = UBI_VID_STATIC;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->used_ebs = cpu_to_be32(used_ebs);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ leb_write_unlock(ubi, vol_id, lnum);
+ up_read(&ubi->fm_eba_sem);
+ return pnum;
+ }
+
+ dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
+ len, vol_id, lnum, pnum, used_ebs);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] < 0);
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ leb_write_unlock(ubi, vol_id, lnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ goto retry;
+}
+
+/*
+ * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: how many bytes to write
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. This function guarantees that in case of an
+ * unclean reboot the old contents is preserved. Returns zero in case of
+ * success and a negative error code in case of failure.
+ *
+ * UBI reserves one LEB for the "atomic LEB change" operation, so only one
+ * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
+ */
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len)
+{
+ int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
+ struct ubi_vid_hdr *vid_hdr;
+ uint32_t crc;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (len == 0) {
+ /*
+ * Special case when data length is zero. In this case the LEB
+ * has to be unmapped and mapped somewhere else.
+ */
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
+ }
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ mutex_lock(&ubi->alc_mutex);
+ err = leb_write_lock(ubi, vol_id, lnum);
+ if (err)
+ goto out_mutex;
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ vid_hdr->vol_id = cpu_to_be32(vol_id);
+ vid_hdr->lnum = cpu_to_be32(lnum);
+ vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+ vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+ crc = crc32(UBI_CRC32_INIT, buf, len);
+ vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->data_size = cpu_to_be32(len);
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+ pnum = ubi_wl_get_peb(ubi);
+ if (pnum < 0) {
+ err = pnum;
+ up_read(&ubi->fm_eba_sem);
+ goto out_leb_unlock;
+ }
+
+ dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
+ vol_id, lnum, vol->eba_tbl[lnum], pnum);
+
+ err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+ if (err) {
+ ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
+ vol_id, lnum, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+ if (err) {
+ ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
+ len, pnum);
+ up_read(&ubi->fm_eba_sem);
+ goto write_error;
+ }
+
+ old_pnum = vol->eba_tbl[lnum];
+ vol->eba_tbl[lnum] = pnum;
+ up_read(&ubi->fm_eba_sem);
+
+ if (old_pnum >= 0) {
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
+ if (err)
+ goto out_leb_unlock;
+ }
+
+out_leb_unlock:
+ leb_write_unlock(ubi, vol_id, lnum);
+out_mutex:
+ mutex_unlock(&ubi->alc_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err != -EIO || !ubi->bad_allowed) {
+ /*
+ * This flash device does not admit of bad eraseblocks or
+ * something nasty and unexpected happened. Switch to read-only
+ * mode just in case.
+ */
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
+ if (err || ++tries > UBI_IO_RETRIES) {
+ ubi_ro_mode(ubi);
+ goto out_leb_unlock;
+ }
+
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ubi_msg(ubi, "try another PEB");
+ goto retry;
+}
+
+/**
+ * is_error_sane - check whether a read error is sane.
+ * @err: code of the error happened during reading
+ *
+ * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
+ * cannot read data from the target PEB (an error @err happened). If the error
+ * code is sane, then we treat this error as non-fatal. Otherwise the error is
+ * fatal and UBI will be switched to R/O mode later.
+ *
+ * The idea is that we try not to switch to R/O mode if the read error is
+ * something which suggests there was a real read problem. E.g., %-EIO. Or a
+ * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
+ * mode, simply because we do not know what happened at the MTD level, and we
+ * cannot handle this. E.g., the underlying driver may have become crazy, and
+ * it is safer to switch to R/O mode to preserve the data.
+ *
+ * And bear in mind, this is about reading from the target PEB, i.e. the PEB
+ * which we have just written.
+ */
+static int is_error_sane(int err)
+{
+ if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
+ err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
+ return 0;
+ return 1;
+}
+
+/**
+ * ubi_eba_copy_leb - copy logical eraseblock.
+ * @ubi: UBI device description object
+ * @from: physical eraseblock number from where to copy
+ * @to: physical eraseblock number where to copy
+ * @vid_hdr: VID header of the @from physical eraseblock
+ *
+ * This function copies logical eraseblock from physical eraseblock @from to
+ * physical eraseblock @to. The @vid_hdr buffer may be changed by this
+ * function. Returns:
+ * o %0 in case of success;
+ * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
+ * o a negative error code in case of failure.
+ */
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ int err, vol_id, lnum, data_size, aldata_size, idx;
+ struct ubi_volume *vol;
+ uint32_t crc;
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
+
+ if (vid_hdr->vol_type == UBI_VID_STATIC) {
+ data_size = be32_to_cpu(vid_hdr->data_size);
+ aldata_size = ALIGN(data_size, ubi->min_io_size);
+ } else
+ data_size = aldata_size =
+ ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
+
+ idx = vol_id2idx(ubi, vol_id);
+ spin_lock(&ubi->volumes_lock);
+ /*
+ * Note, we may race with volume deletion, which means that the volume
+ * this logical eraseblock belongs to might be being deleted. Since the
+ * volume deletion un-maps all the volume's logical eraseblocks, it will
+ * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
+ */
+ vol = ubi->volumes[idx];
+ spin_unlock(&ubi->volumes_lock);
+ if (!vol) {
+ /* No need to do further work, cancel */
+ dbg_wl("volume %d is being removed, cancel", vol_id);
+ return MOVE_CANCEL_RACE;
+ }
+
+ /*
+ * We do not want anybody to write to this logical eraseblock while we
+ * are moving it, so lock it.
+ *
+ * Note, we are using non-waiting locking here, because we cannot sleep
+ * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+ * unmapping the LEB which is mapped to the PEB we are going to move
+ * (@from). This task locks the LEB and goes sleep in the
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return MOVE_RETRY;
+ }
+
+ /*
+ * The LEB might have been put meanwhile, and the task which put it is
+ * probably waiting on @ubi->move_mutex. No need to continue the work,
+ * cancel it.
+ */
+ if (vol->eba_tbl[lnum] != from) {
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
+ err = MOVE_CANCEL_RACE;
+ goto out_unlock_leb;
+ }
+
+ /*
+ * OK, now the LEB is locked and we can safely start moving it. Since
+ * this function utilizes the @ubi->peb_buf buffer which is shared
+ * with some other functions - we lock the buffer by taking the
+ * @ubi->buf_mutex.
+ */
+ mutex_lock(&ubi->buf_mutex);
+ dbg_wl("read %d bytes of data", aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading data from PEB %d",
+ err, from);
+ err = MOVE_SOURCE_RD_ERR;
+ goto out_unlock_buf;
+ }
+
+ /*
+ * Now we have got to calculate how much data we have to copy. In
+ * case of a static volume it is fairly easy - the VID header contains
+ * the data size. In case of a dynamic volume it is more difficult - we
+ * have to read the contents, cut 0xFF bytes from the end and copy only
+ * the first part. We must do this to avoid writing 0xFF bytes as it
+ * may have some side-effects. And not only this. It is important not
+ * to include those 0xFFs to CRC because later the they may be filled
+ * by data.
+ */
+ if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
+ aldata_size = data_size =
+ ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
+
+ cond_resched();
+ crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+ cond_resched();
+
+ /*
+ * It may turn out to be that the whole @from physical eraseblock
+ * contains only 0xFF bytes. Then we have to only write the VID header
+ * and do not write any data. This also means we should not set
+ * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
+ */
+ if (data_size > 0) {
+ vid_hdr->copy_flag = 1;
+ vid_hdr->data_size = cpu_to_be32(data_size);
+ vid_hdr->data_crc = cpu_to_be32(crc);
+ }
+ vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+
+ err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ /* Read the VID header back and check if it was written correctly */
+ err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
+ goto out_unlock_buf;
+ }
+
+ if (data_size > 0) {
+ err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+ if (err) {
+ if (err == -EIO)
+ err = MOVE_TARGET_WR_ERR;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ /*
+ * We've written the data and are going to read it back to make
+ * sure it was written correctly.
+ */
+ memset(ubi->peb_buf, 0xFF, aldata_size);
+ err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
+ if (err) {
+ if (err != UBI_IO_BITFLIPS) {
+ ubi_warn(ubi, "error %d while reading data back from PEB %d",
+ err, to);
+ if (is_error_sane(err))
+ err = MOVE_TARGET_RD_ERR;
+ } else
+ err = MOVE_TARGET_BITFLIPS;
+ goto out_unlock_buf;
+ }
+
+ cond_resched();
+
+ if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
+ ubi_warn(ubi, "read data back from PEB %d and it is different",
+ to);
+ err = -EINVAL;
+ goto out_unlock_buf;
+ }
+ }
+
+ ubi_assert(vol->eba_tbl[lnum] == from);
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl[lnum] = to;
+ up_read(&ubi->fm_eba_sem);
+
+out_unlock_buf:
+ mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+}
+
+/**
+ * print_rsvd_warning - warn about not having enough reserved PEBs.
+ * @ubi: UBI device description object
+ *
+ * This is a helper function for 'ubi_eba_init()' which is called when UBI
+ * cannot reserve enough PEBs for bad block handling. This function makes a
+ * decision whether we have to print a warning or not. The algorithm is as
+ * follows:
+ * o if this is a new UBI image, then just print the warning
+ * o if this is an UBI image which has already been used for some time, print
+ * a warning only if we can reserve less than 10% of the expected amount of
+ * the reserved PEB.
+ *
+ * The idea is that when UBI is used, PEBs become bad, and the reserved pool
+ * of PEBs becomes smaller, which is normal and we do not want to scare users
+ * with a warning every time they attach the MTD device. This was an issue
+ * reported by real users.
+ */
+static void print_rsvd_warning(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ /*
+ * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
+ * large number to distinguish between newly flashed and used images.
+ */
+ if (ai->max_sqnum > (1 << 18)) {
+ int min = ubi->beb_rsvd_level / 10;
+
+ if (!min)
+ min = 1;
+ if (ubi->beb_rsvd_pebs > min)
+ return;
+ }
+
+ ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ if (ubi->corr_peb_count)
+ ubi_warn(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+}
+
+/**
+ * self_check_eba - run a self check on the EBA table constructed by fastmap.
+ * @ubi: UBI device description object
+ * @ai_fastmap: UBI attach info object created by fastmap
+ * @ai_scan: UBI attach info object created by scanning
+ *
+ * Returns < 0 in case of an internal error, 0 otherwise.
+ * If a bad EBA table entry was found it will be printed out and
+ * ubi_assert() triggers.
+ */
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan)
+{
+ int i, j, num_volumes, ret = 0;
+ int **scan_eba, **fm_eba;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
+ if (!scan_eba)
+ return -ENOMEM;
+
+ fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
+ if (!fm_eba) {
+ kfree(scan_eba);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
+ GFP_KERNEL);
+ if (!scan_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
+ GFP_KERNEL);
+ if (!fm_eba[i]) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ scan_eba[i][aeb->lnum] = aeb->pnum;
+
+ av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
+ fm_eba[i][aeb->lnum] = aeb->pnum;
+
+ for (j = 0; j < vol->reserved_pebs; j++) {
+ if (scan_eba[i][j] != fm_eba[i][j]) {
+ if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
+ fm_eba[i][j] == UBI_LEB_UNMAPPED)
+ continue;
+
+ ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
+ vol->vol_id, i, fm_eba[i][j],
+ scan_eba[i][j]);
+ ubi_assert(0);
+ }
+ }
+ }
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+
+ kfree(scan_eba[i]);
+ kfree(fm_eba[i]);
+ }
+
+ kfree(scan_eba);
+ kfree(fm_eba);
+ return ret;
+}
+
+/**
+ * ubi_eba_init - initialize the EBA sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int i, j, err, num_volumes;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+ struct ubi_ainf_peb *aeb;
+ struct rb_node *rb;
+
+ dbg_eba("initialize EBA sub-system");
+
+ spin_lock_init(&ubi->ltree_lock);
+ mutex_init(&ubi->alc_mutex);
+ ubi->ltree = RB_ROOT;
+
+ ubi->global_sqnum = ai->max_sqnum + 1;
+ num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
+
+ for (i = 0; i < num_volumes; i++) {
+ vol = ubi->volumes[i];
+ if (!vol)
+ continue;
+
+ cond_resched();
+
+ vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
+ GFP_KERNEL);
+ if (!vol->eba_tbl) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
+
+ av = ubi_find_av(ai, idx2vol_id(ubi, i));
+ if (!av)
+ continue;
+
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ if (aeb->lnum >= vol->reserved_pebs)
+ /*
+ * This may happen in case of an unclean reboot
+ * during re-size.
+ */
+ ubi_move_aeb_to_list(av, aeb, &ai->erase);
+ else
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ }
+ }
+
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
+ if (ubi->bad_allowed) {
+ ubi_calculate_reserved(ubi);
+
+ if (ubi->avail_pebs < ubi->beb_rsvd_level) {
+ /* No enough free physical eraseblocks */
+ ubi->beb_rsvd_pebs = ubi->avail_pebs;
+ print_rsvd_warning(ubi, ai);
+ } else
+ ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
+
+ ubi->avail_pebs -= ubi->beb_rsvd_pebs;
+ ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
+ }
+
+ dbg_eba("EBA sub-system is initialized");
+ return 0;
+
+out_free:
+ for (i = 0; i < num_volumes; i++) {
+ if (!ubi->volumes[i])
+ continue;
+ kfree(ubi->volumes[i]->eba_tbl);
+ ubi->volumes[i]->eba_tbl = NULL;
+ }
+ return err;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/fastmap-wl.c b/roms/u-boot/drivers/mtd/ubi/fastmap-wl.c
new file mode 100644
index 000000000..4cb1377c4
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/fastmap-wl.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ */
+
+/**
+ * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
+ * @wrk: the work description object
+ */
+#ifndef __UBOOT__
+static void update_fastmap_work_fn(struct work_struct *wrk)
+#else
+void update_fastmap_work_fn(struct ubi_device *ubi)
+#endif
+{
+#ifndef __UBOOT__
+ struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
+#endif
+
+ ubi_update_fastmap(ubi);
+ spin_lock(&ubi->wl_lock);
+ ubi->fm_work_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
+ * @root: the RB-tree where to look for
+ */
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *victim = NULL;
+ int max_ec = UBI_MAX_ERASECOUNTER;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb) {
+ if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
+ victim = e;
+ max_ec = e->ec;
+ }
+ }
+
+ return victim;
+}
+
+/**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+ * @pool: fastmap pool description object
+ */
+static void return_unused_pool_pebs(struct ubi_device *ubi,
+ struct ubi_fm_pool *pool)
+{
+ int i;
+ struct ubi_wl_entry *e;
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ }
+}
+
+static int anchor_pebs_avalible(struct rb_root *root)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e;
+
+ ubi_rb_for_each_entry(p, e, root, u.rb)
+ if (e->pnum < UBI_FM_MAX_START)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+ * @anchor: This PEB will be used as anchor PEB by fastmap
+ *
+ * The function returns a physical erase block with a given maximal number
+ * and removes it from the wl subsystem.
+ * Must be called with wl_lock held!
+ */
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
+{
+ struct ubi_wl_entry *e = NULL;
+
+ if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+ goto out;
+
+ if (anchor)
+ e = find_anchor_wl_entry(&ubi->free);
+ else
+ e = find_mean_wl_entry(ubi, &ubi->free);
+
+ if (!e)
+ goto out;
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /* remove it from the free list,
+ * the wl subsystem does no longer know this erase block */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+out:
+ return e;
+}
+
+/**
+ * ubi_refill_pools - refills all fastmap PEB pools.
+ * @ubi: UBI device description object
+ */
+void ubi_refill_pools(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_wl_entry *e;
+ int enough;
+
+ spin_lock(&ubi->wl_lock);
+
+ return_unused_pool_pebs(ubi, wl_pool);
+ return_unused_pool_pebs(ubi, pool);
+
+ wl_pool->size = 0;
+ pool->size = 0;
+
+ for (;;) {
+ enough = 0;
+ if (pool->size < pool->max_size) {
+ if (!ubi->free.rb_node)
+ break;
+
+ e = wl_get_wle(ubi);
+ if (!e)
+ break;
+
+ pool->pebs[pool->size] = e->pnum;
+ pool->size++;
+ } else
+ enough++;
+
+ if (wl_pool->size < wl_pool->max_size) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ wl_pool->pebs[wl_pool->size] = e->pnum;
+ wl_pool->size++;
+ } else
+ enough++;
+
+ if (enough == 2)
+ break;
+ }
+
+ wl_pool->used = 0;
+ pool->used = 0;
+
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int ret, retried = 0;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+
+again:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+
+ /* We check here also for the WL pool because at this point we can
+ * refill the WL pool synchronous. */
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ ret = ubi_update_fastmap(ubi);
+ if (ret) {
+ ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
+ down_read(&ubi->fm_eba_sem);
+ return -ENOSPC;
+ }
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ }
+
+ if (pool->used == pool->size) {
+ spin_unlock(&ubi->wl_lock);
+ if (retried) {
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+ ret = -ENOSPC;
+ goto out;
+ }
+ retried = 1;
+ up_read(&ubi->fm_eba_sem);
+ ret = produce_free_peb(ubi);
+ if (ret < 0) {
+ down_read(&ubi->fm_eba_sem);
+ goto out;
+ }
+ goto again;
+ }
+
+ ubi_assert(pool->used < pool->size);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+out:
+ return ret;
+}
+
+/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
+ *
+ * @ubi: UBI device description object
+ */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int pnum;
+
+ if (pool->used == pool->size) {
+#ifndef __UBOOT__
+ /* We cannot update the fastmap here because this
+ * function is called in atomic context.
+ * Let's fail here and refill/update it as soon as possible. */
+ if (!ubi->fm_work_scheduled) {
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
+ return NULL;
+#else
+ /*
+ * No work queues in U-Boot, we must do this immediately
+ */
+ update_fastmap_work_fn(ubi);
+#endif
+ }
+
+ pnum = pool->pebs[pool->used++];
+ return ubi->lookuptbl[pnum];
+}
+
+/**
+ * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
+ * @ubi: UBI device description object
+ */
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+{
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ return -ENOMEM;
+ }
+
+ wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ schedule_ubi_work(ubi, wrk);
+ return 0;
+}
+
+/**
+ * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
+ * sub-system.
+ * see: ubi_wl_put_peb()
+ *
+ * @ubi: UBI device description object
+ * @fm_e: physical eraseblock to return
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if this physical eraseblock has to be tortured
+ */
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
+ int lnum, int torture)
+{
+ struct ubi_wl_entry *e;
+ int vol_id, pnum = fm_e->pnum;
+
+ dbg_wl("PEB %d", pnum);
+
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+
+ /* This can happen if we recovered from a fastmap the very
+ * first time and writing now a new one. In this case the wl system
+ * has never seen any PEB used by the original fastmap.
+ */
+ if (!e) {
+ e = fm_e;
+ ubi_assert(e->ec >= 0);
+ ubi->lookuptbl[pnum] = e;
+ }
+
+ spin_unlock(&ubi->wl_lock);
+
+ vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
+ return schedule_erase(ubi, e, vol_id, lnum, torture);
+}
+
+/**
+ * ubi_is_erase_work - checks whether a work is erase work.
+ * @wrk: The work object to be checked
+ */
+int ubi_is_erase_work(struct ubi_work *wrk)
+{
+ return wrk->func == erase_worker;
+}
+
+static void ubi_fastmap_close(struct ubi_device *ubi)
+{
+ int i;
+
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);
+ }
+ kfree(ubi->fm);
+}
+
+/**
+ * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
+ * See find_mean_wl_entry()
+ *
+ * @ubi: UBI device description object
+ * @e: physical eraseblock to return
+ * @root: RB tree to test against.
+ */
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ if (e && !ubi->fm_disabled && !ubi->fm &&
+ e->pnum < UBI_FM_MAX_START)
+ e = rb_entry(rb_next(root->rb_node),
+ struct ubi_wl_entry, u.rb);
+
+ return e;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/fastmap.c b/roms/u-boot/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 000000000..b54b56375
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1658 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2012 Linutronix GmbH
+ * Copyright (c) 2014 sigma star gmbh
+ * Author: Richard Weinberger <richard@nod.at>
+ *
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <u-boot/crc.h>
+#else
+#include <div64.h>
+#include <malloc.h>
+#include <ubi_uboot.h>
+#include <linux/bug.h>
+#endif
+
+#include <linux/compat.h>
+#include <linux/math64.h>
+#include "ubi.h"
+
+/**
+ * init_seen - allocate memory for used for debugging.
+ * @ubi: UBI device description object
+ */
+static inline int *init_seen(struct ubi_device *ubi)
+{
+ int *ret;
+
+ if (!ubi_dbg_chk_fastmap(ubi))
+ return NULL;
+
+ ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
+ if (!ret)
+ return ERR_PTR(-ENOMEM);
+
+ return ret;
+}
+
+/**
+ * free_seen - free the seen logic integer array.
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void free_seen(int *seen)
+{
+ kfree(seen);
+}
+
+/**
+ * set_seen - mark a PEB as seen.
+ * @ubi: UBI device description object
+ * @pnum: The PEB to be makred as seen
+ * @seen: integer array of @ubi->peb_count size
+ */
+static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
+{
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return;
+
+ seen[pnum] = 1;
+}
+
+/**
+ * self_check_seen - check whether all PEB have been seen by fastmap.
+ * @ubi: UBI device description object
+ * @seen: integer array of @ubi->peb_count size
+ */
+static int self_check_seen(struct ubi_device *ubi, int *seen)
+{
+ int pnum, ret = 0;
+
+ if (!ubi_dbg_chk_fastmap(ubi) || !seen)
+ return 0;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ if (!seen[pnum] && ubi->lookuptbl[pnum]) {
+ ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+size_t ubi_calc_fm_size(struct ubi_device *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_sb) +
+ sizeof(struct ubi_fm_hdr) +
+ sizeof(struct ubi_fm_scan_pool) +
+ sizeof(struct ubi_fm_scan_pool) +
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+ (sizeof(struct ubi_fm_eba) +
+ (ubi->peb_count * sizeof(__be32))) +
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+
+/**
+ * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * @ubi: UBI device description object
+ * @vol_id: the VID of the new header
+ *
+ * Returns a new struct ubi_vid_hdr on success.
+ * NULL indicates out of memory.
+ */
+static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
+{
+ struct ubi_vid_hdr *new;
+
+ new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ new->vol_type = UBI_VID_DYNAMIC;
+ new->vol_id = cpu_to_be32(vol_id);
+
+ /* UBI implementations without fastmap support have to delete the
+ * fastmap.
+ */
+ new->compat = UBI_COMPAT_DELETE;
+
+out:
+ return new;
+}
+
+/**
+ * add_aeb - create and add a attach erase block to a given list.
+ * @ai: UBI attach info object
+ * @list: the target list
+ * @pnum: PEB number of the new attach erase block
+ * @ec: erease counter of the new LEB
+ * @scrub: scrub this PEB after attaching
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
+ int pnum, int ec, int scrub)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->ec = ec;
+ aeb->lnum = -1;
+ aeb->scrub = scrub;
+ aeb->copy_flag = aeb->sqnum = 0;
+
+ ai->ec_sum += aeb->ec;
+ ai->ec_count++;
+
+ if (ai->max_ec < aeb->ec)
+ ai->max_ec = aeb->ec;
+
+ if (ai->min_ec > aeb->ec)
+ ai->min_ec = aeb->ec;
+
+ list_add_tail(&aeb->u.list, list);
+
+ return 0;
+}
+
+/**
+ * add_vol - create and add a new volume to ubi_attach_info.
+ * @ai: ubi_attach_info object
+ * @vol_id: VID of the new volume
+ * @used_ebs: number of used EBS
+ * @data_pad: data padding value of the new volume
+ * @vol_type: volume type
+ * @last_eb_bytes: number of bytes in the last LEB
+ *
+ * Returns the new struct ubi_ainf_volume on success.
+ * NULL indicates an error.
+ */
+static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
+ int used_ebs, int data_pad, u8 vol_type,
+ int last_eb_bytes)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ while (*p) {
+ parent = *p;
+ av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (vol_id > av->vol_id)
+ p = &(*p)->rb_left;
+ else if (vol_id < av->vol_id)
+ p = &(*p)->rb_right;
+ else
+ return ERR_PTR(-EINVAL);
+ }
+
+ av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
+ if (!av)
+ goto out;
+
+ av->highest_lnum = av->leb_count = av->used_ebs = 0;
+ av->vol_id = vol_id;
+ av->data_pad = data_pad;
+ av->last_data_size = last_eb_bytes;
+ av->compat = 0;
+ av->vol_type = vol_type;
+ av->root = RB_ROOT;
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = used_ebs;
+
+ dbg_bld("found volume (ID %i)", vol_id);
+
+ rb_link_node(&av->rb, parent, p);
+ rb_insert_color(&av->rb, &ai->volumes);
+
+out:
+ return av;
+}
+
+/**
+ * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
+ * from it's original list.
+ * @ai: ubi_attach_info object
+ * @aeb: the to be assigned SEB
+ * @av: target scan volume
+ */
+static void assign_aeb_to_av(struct ubi_attach_info *ai,
+ struct ubi_ainf_peb *aeb,
+ struct ubi_ainf_volume *av)
+{
+ struct ubi_ainf_peb *tmp_aeb;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+
+ p = &av->root.rb_node;
+ while (*p) {
+ parent = *p;
+
+ tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+ if (aeb->lnum != tmp_aeb->lnum) {
+ if (aeb->lnum < tmp_aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ } else
+ break;
+ }
+
+ list_del(&aeb->u.list);
+ av->leb_count++;
+
+ rb_link_node(&aeb->u.rb, parent, p);
+ rb_insert_color(&aeb->u.rb, &av->root);
+}
+
+/**
+ * update_vol - inserts or updates a LEB which was found a pool.
+ * @ubi: the UBI device object
+ * @ai: attach info object
+ * @av: the volume this LEB belongs to
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct rb_node **p = &av->root.rb_node, *parent = NULL;
+ struct ubi_ainf_peb *aeb, *victim;
+ int cmp_res;
+
+ while (*p) {
+ parent = *p;
+ aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
+
+ if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
+ if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+
+ continue;
+ }
+
+ /* This case can happen if the fastmap gets written
+ * because of a volume change (creation, deletion, ..).
+ * Then a PEB can be within the persistent EBA and the pool.
+ */
+ if (aeb->pnum == new_aeb->pnum) {
+ ubi_assert(aeb->lnum == new_aeb->lnum);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
+ if (cmp_res < 0)
+ return cmp_res;
+
+ /* new_aeb is newer */
+ if (cmp_res & 1) {
+ victim = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!victim)
+ return -ENOMEM;
+
+ victim->ec = aeb->ec;
+ victim->pnum = aeb->pnum;
+ list_add_tail(&victim->u.list, &ai->erase);
+
+ if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
+ av->last_data_size =
+ be32_to_cpu(new_vh->data_size);
+
+ dbg_bld("vol %i: AEB %i's PEB %i is the newer",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+
+ aeb->ec = new_aeb->ec;
+ aeb->pnum = new_aeb->pnum;
+ aeb->copy_flag = new_vh->copy_flag;
+ aeb->scrub = new_aeb->scrub;
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ /* new_aeb is older */
+ } else {
+ dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
+ av->vol_id, aeb->lnum, new_aeb->pnum);
+ list_add_tail(&new_aeb->u.list, &ai->erase);
+ }
+
+ return 0;
+ }
+ /* This LEB is new, let's add it to the volume */
+
+ if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
+ av->highest_lnum = be32_to_cpu(new_vh->lnum);
+ av->last_data_size = be32_to_cpu(new_vh->data_size);
+ }
+
+ if (av->vol_type == UBI_STATIC_VOLUME)
+ av->used_ebs = be32_to_cpu(new_vh->used_ebs);
+
+ av->leb_count++;
+
+ rb_link_node(&new_aeb->u.rb, parent, p);
+ rb_insert_color(&new_aeb->u.rb, &av->root);
+
+ return 0;
+}
+
+/**
+ * process_pool_aeb - we found a non-empty PEB in a pool.
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @new_vh: the volume header derived from new_aeb
+ * @new_aeb: the AEB to be examined
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ struct ubi_vid_hdr *new_vh,
+ struct ubi_ainf_peb *new_aeb)
+{
+ struct ubi_ainf_volume *av, *tmp_av = NULL;
+ struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
+ int found = 0;
+
+ if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
+ be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+
+ return 0;
+ }
+
+ /* Find the volume this SEB belongs to */
+ while (*p) {
+ parent = *p;
+ tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
+
+ if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
+ p = &(*p)->rb_left;
+ else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
+ p = &(*p)->rb_right;
+ else {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ av = tmp_av;
+ else {
+ ubi_err(ubi, "orphaned volume in fastmap pool!");
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ return UBI_BAD_FASTMAP;
+ }
+
+ ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
+
+ return update_vol(ubi, ai, av, new_vh, new_aeb);
+}
+
+/**
+ * unmap_peb - unmap a PEB.
+ * If fastmap detects a free PEB in the pool it has to check whether
+ * this PEB has been unmapped after writing the fastmap.
+ *
+ * @ai: UBI attach info object
+ * @pnum: The PEB to be unmapped
+ */
+static void unmap_peb(struct ubi_attach_info *ai, int pnum)
+{
+ struct ubi_ainf_volume *av;
+ struct rb_node *node, *node2;
+ struct ubi_ainf_peb *aeb;
+
+ for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
+ av = rb_entry(node, struct ubi_ainf_volume, rb);
+
+ for (node2 = rb_first(&av->root); node2;
+ node2 = rb_next(node2)) {
+ aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
+ if (aeb->pnum == pnum) {
+ rb_erase(&aeb->u.rb, &av->root);
+ av->leb_count--;
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * scan_pool - scans a pool for changed (no longer empty PEBs).
+ * @ubi: UBI device object
+ * @ai: attach info object
+ * @pebs: an array of all PEB numbers in the to be scanned pool
+ * @pool_size: size of the pool (number of entries in @pebs)
+ * @max_sqnum: pointer to the maximal sequence number
+ * @free: list of PEBs which are most likely free (and go into @ai->free)
+ *
+ * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
+ * < 0 indicates an internal error.
+ */
+#ifndef __UBOOT__
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *free)
+#else
+static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
+ struct list_head *free)
+#endif
+{
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_ainf_peb *new_aeb;
+ int i, pnum, err, ret = 0;
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech)
+ return -ENOMEM;
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ kfree(ech);
+ return -ENOMEM;
+ }
+
+ dbg_bld("scanning fastmap pool: size = %i", pool_size);
+
+ /*
+ * Now scan all PEBs in the pool to find changes which have been made
+ * after the creation of the fastmap
+ */
+ for (i = 0; i < pool_size; i++) {
+ int scrub = 0;
+ int image_seq;
+
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err(ubi, "bad PEB in fastmap pool!");
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
+ pnum, err);
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ } else if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ image_seq = be32_to_cpu(ech->image_seq);
+
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto out;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
+ unsigned long long ec = be64_to_cpu(ech->ec);
+ unmap_peb(ai, pnum);
+ dbg_bld("Adding PEB to free: %i", pnum);
+ if (err == UBI_IO_FF_BITFLIPS)
+ add_aeb(ai, free, pnum, ec, 1);
+ else
+ add_aeb(ai, free, pnum, ec, 0);
+ continue;
+ } else if (err == 0 || err == UBI_IO_BITFLIPS) {
+ dbg_bld("Found non empty PEB:%i in pool", pnum);
+
+ if (err == UBI_IO_BITFLIPS)
+ scrub = 1;
+
+ new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
+ GFP_KERNEL);
+ if (!new_aeb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ new_aeb->ec = be64_to_cpu(ech->ec);
+ new_aeb->pnum = pnum;
+ new_aeb->lnum = be32_to_cpu(vh->lnum);
+ new_aeb->sqnum = be64_to_cpu(vh->sqnum);
+ new_aeb->copy_flag = vh->copy_flag;
+ new_aeb->scrub = scrub;
+
+ if (*max_sqnum < new_aeb->sqnum)
+ *max_sqnum = new_aeb->sqnum;
+
+ err = process_pool_aeb(ubi, ai, vh, new_aeb);
+ if (err) {
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+ } else {
+ /* We are paranoid and fall back to scanning mode */
+ ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
+ ret = err > 0 ? UBI_BAD_FASTMAP : err;
+ goto out;
+ }
+
+ }
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+ return ret;
+}
+
+/**
+ * count_fastmap_pebs - Counts the PEBs found by fastmap.
+ * @ai: The UBI attach info object
+ */
+static int count_fastmap_pebs(struct ubi_attach_info *ai)
+{
+ struct ubi_ainf_peb *aeb;
+ struct ubi_ainf_volume *av;
+ struct rb_node *rb1, *rb2;
+ int n = 0;
+
+ list_for_each_entry(aeb, &ai->erase, u.list)
+ n++;
+
+ list_for_each_entry(aeb, &ai->free, u.list)
+ n++;
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
+ n++;
+
+ return n;
+}
+
+/**
+ * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info object
+ * @fm: the fastmap to be attached
+ *
+ * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
+ * < 0 indicates an internal error.
+ */
+static int ubi_attach_fastmap(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct list_head used, free;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ unsigned long long max_sqnum = 0;
+ void *fm_raw = ubi->fm_buf;
+
+ INIT_LIST_HEAD(&used);
+ INIT_LIST_HEAD(&free);
+ ai->min_ec = UBI_MAX_ERASECOUNTER;
+
+ fmsb = (struct ubi_fm_sb *)(fm_raw);
+ ai->max_sqnum = fmsb->sqnum;
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl_wl);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl->size);
+ wl_pool_size = be16_to_cpu(fmpl_wl->size);
+ fm->max_pool_size = be16_to_cpu(fmpl->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err(ubi, "bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err(ubi, "bad maximal WL pool size: %i",
+ fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 0);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
+ be32_to_cpu(fmec->ec), 1);
+ }
+
+ ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
+ ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
+ be32_to_cpu(fmvhdr->used_ebs),
+ be32_to_cpu(fmvhdr->data_pad),
+ fmvhdr->vol_type,
+ be32_to_cpu(fmvhdr->last_eb_bytes));
+
+ if (!av)
+ goto fail_bad;
+ if (PTR_ERR(av) == -EINVAL) {
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+ goto fail_bad;
+ }
+
+ ai->vols_found++;
+ if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
+ ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ aeb = NULL;
+ list_for_each_entry(tmp_aeb, &used, u.list) {
+ if (tmp_aeb->pnum == pnum) {
+ aeb = tmp_aeb;
+ break;
+ }
+ }
+
+ if (!aeb) {
+ ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
+ goto fail_bad;
+ }
+
+ aeb->lnum = j;
+
+ if (av->highest_lnum <= aeb->lnum)
+ av->highest_lnum = aeb->lnum;
+
+ assign_aeb_to_av(ai, aeb, av);
+
+ dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
+ aeb->pnum, aeb->lnum, av->vol_id);
+ }
+ }
+
+ ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
+ if (ret)
+ goto fail;
+
+ if (max_sqnum > ai->max_sqnum)
+ ai->max_sqnum = max_sqnum;
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->free);
+
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
+ list_move_tail(&tmp_aeb->u.list, &ai->erase);
+
+ ubi_assert(list_empty(&free));
+
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+#ifndef __UBOOT__
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+#else
+ if (count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks) {
+ WARN_ON(1);
+ goto fail_bad;
+ }
+#endif
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+ list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+ list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+ }
+
+ return ret;
+}
+
+/**
+ * ubi_scan_fastmap - scan the fastmap.
+ * @ubi: UBI device object
+ * @ai: UBI attach info to be filled
+ * @fm_anchor: The fastmap starts at this PEB
+ *
+ * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
+ * UBI_BAD_FASTMAP if one was found but is not usable.
+ * < 0 indicates an internal error.
+ */
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_ec_hdr *ech;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ down_write(&ubi->fm_protect);
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
+ if (!fmsb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm) {
+ ret = -ENOMEM;
+ kfree(fmsb);
+ goto out;
+ }
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err(ubi, "bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err(ubi, "number of fastmap blocks is invalid: %i",
+ used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
+ fm_size, ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ech) {
+ ret = -ENOMEM;
+ goto free_fm_sb;
+ }
+
+ vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vh) {
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ int image_seq;
+
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err(ubi, "wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ kfree(fmsb);
+ fmsb = NULL;
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err(ubi, "fastmap data CRC is invalid");
+ ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
+ tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ for (i = 0; i < used_blocks; i++) {
+ struct ubi_wl_entry *e;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e) {
+ while (i--)
+ kfree(fm->e[i]);
+
+ ret = -ENOMEM;
+ goto free_hdr;
+ }
+
+ e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
+ e->ec = be32_to_cpu(fmsb2->block_ec[i]);
+ fm->e[i] = e;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg(ubi, "attached by fastmap");
+ ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_msg(ubi, "fastmap WL pool size: %d",
+ ubi->fm_wl_pool.max_size);
+ ubi->fm_disabled = 0;
+
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+out:
+ up_write(&ubi->fm_protect);
+ if (ret == UBI_BAD_FASTMAP)
+ ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+ ubi_free_vid_hdr(ubi, vh);
+ kfree(ech);
+free_fm_sb:
+ kfree(fmsb);
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * ubi_write_fastmap - writes a fastmap.
+ * @ubi: UBI device object
+ * @new_fm: the to be written fastmap
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int ubi_write_fastmap(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *new_fm)
+{
+ size_t fm_pos = 0;
+ void *fm_raw;
+ struct ubi_fm_sb *fmsb;
+ struct ubi_fm_hdr *fmh;
+ struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
+ struct ubi_fm_ec *fec;
+ struct ubi_fm_volhdr *fvh;
+ struct ubi_fm_eba *feba;
+ struct ubi_wl_entry *wl_e;
+ struct ubi_volume *vol;
+ struct ubi_vid_hdr *avhdr, *dvhdr;
+ struct ubi_work *ubi_wrk;
+ struct rb_node *tmp_rb;
+ int ret, i, j, free_peb_count, used_peb_count, vol_count;
+ int scrub_peb_count, erase_peb_count;
+ int *seen_pebs = NULL;
+
+ fm_raw = ubi->fm_buf;
+ memset(ubi->fm_buf, 0, ubi->fm_size);
+
+ avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!avhdr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
+ if (!dvhdr) {
+ ret = -ENOMEM;
+ goto out_kfree;
+ }
+
+ seen_pebs = init_seen(ubi);
+ if (IS_ERR(seen_pebs)) {
+ ret = PTR_ERR(seen_pebs);
+ goto out_kfree;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ spin_lock(&ubi->wl_lock);
+
+ fmsb = (struct ubi_fm_sb *)fm_raw;
+ fm_pos += sizeof(*fmsb);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
+ fmsb->version = UBI_FM_FMT_VERSION;
+ fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
+ /* the max sqnum will be filled in while *reading* the fastmap */
+ fmsb->sqnum = 0;
+
+ fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
+ free_peb_count = 0;
+ used_peb_count = 0;
+ scrub_peb_count = 0;
+ erase_peb_count = 0;
+ vol_count = 0;
+
+ fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl);
+ fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl->size = cpu_to_be16(ubi->fm_pool.size);
+ fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
+
+ for (i = 0; i < ubi->fm_pool.size; i++) {
+ fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
+ }
+
+ fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl_wl);
+ fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
+ fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
+ fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
+
+ for (i = 0; i < ubi->fm_wl_pool.size; i++) {
+ fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
+ set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
+ }
+
+ ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ free_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->free_peb_count = cpu_to_be32(free_peb_count);
+
+ ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+
+ ubi_for_each_protected_peb(ubi, i, wl_e) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->used_peb_count = cpu_to_be32(used_peb_count);
+
+ ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ scrub_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
+
+
+ list_for_each_entry(ubi_wrk, &ubi->works, list) {
+ if (ubi_is_erase_work(ubi_wrk)) {
+ wl_e = ubi_wrk->e;
+ ubi_assert(wl_e);
+
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ set_seen(ubi, wl_e->pnum, seen_pebs);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ erase_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
+ fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
+
+ for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
+ vol = ubi->volumes[i];
+
+ if (!vol)
+ continue;
+
+ vol_count++;
+
+ fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fvh);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
+ fvh->vol_id = cpu_to_be32(vol->vol_id);
+ fvh->vol_type = vol->vol_type;
+ fvh->used_ebs = cpu_to_be32(vol->used_ebs);
+ fvh->data_pad = cpu_to_be32(vol->data_pad);
+ fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
+
+ ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
+ vol->vol_type == UBI_STATIC_VOLUME);
+
+ feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
+ ubi_assert(fm_pos <= ubi->fm_size);
+
+ for (j = 0; j < vol->reserved_pebs; j++)
+ feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
+
+ feba->reserved_pebs = cpu_to_be32(j);
+ feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
+ }
+ fmh->vol_count = cpu_to_be32(vol_count);
+ fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
+
+ avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ avhdr->lnum = 0;
+
+ spin_unlock(&ubi->wl_lock);
+ spin_unlock(&ubi->volumes_lock);
+
+ dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
+ if (ret) {
+ ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
+ goto out_kfree;
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
+ set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
+ fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
+ }
+
+ fmsb->data_crc = 0;
+ fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
+ ubi->fm_size));
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ dvhdr->lnum = cpu_to_be32(i);
+ dbg_bld("writing fastmap data to PEB %i sqnum %llu",
+ new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
+ ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
+ if (ret) {
+ ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ for (i = 0; i < new_fm->used_blocks; i++) {
+ ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
+ new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
+ if (ret) {
+ ubi_err(ubi, "unable to write fastmap to PEB %i!",
+ new_fm->e[i]->pnum);
+ goto out_kfree;
+ }
+ }
+
+ ubi_assert(new_fm);
+ ubi->fm = new_fm;
+
+ ret = self_check_seen(ubi, seen_pebs);
+ dbg_bld("fastmap written!");
+
+out_kfree:
+ ubi_free_vid_hdr(ubi, avhdr);
+ ubi_free_vid_hdr(ubi, dvhdr);
+ free_seen(seen_pebs);
+out:
+ return ret;
+}
+
+/**
+ * erase_block - Manually erase a PEB.
+ * @ubi: UBI device object
+ * @pnum: PEB to be erased
+ *
+ * Returns the new EC value on success, < 0 indicates an internal error.
+ */
+static int erase_block(struct ubi_device *ubi, int pnum)
+{
+ int ret;
+ struct ubi_ec_hdr *ec_hdr;
+ long long ec;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (ret < 0)
+ goto out;
+ else if (ret && ret != UBI_IO_BITFLIPS) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ubi_io_sync_erase(ubi, pnum, 0);
+ if (ret < 0)
+ goto out;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ ec += ret;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ec_hdr->ec = cpu_to_be64(ec);
+ ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
+ if (ret < 0)
+ goto out;
+
+ ret = ec;
+out:
+ kfree(ec_hdr);
+ return ret;
+}
+
+/**
+ * invalidate_fastmap - destroys a fastmap.
+ * @ubi: UBI device object
+ *
+ * This function ensures that upon next UBI attach a full scan
+ * is issued. We need this if UBI is about to write a new fastmap
+ * but is unable to do so. In this case we have two options:
+ * a) Make sure that the current fastmap will not be usued upon
+ * attach time and contine or b) fall back to RO mode to have the
+ * current fastmap in a valid state.
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+static int invalidate_fastmap(struct ubi_device *ubi)
+{
+ int ret;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_wl_entry *e;
+ struct ubi_vid_hdr *vh = NULL;
+
+ if (!ubi->fm)
+ return 0;
+
+ ubi->fm = NULL;
+
+ ret = -ENOMEM;
+ fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+ if (!fm)
+ goto out;
+
+ vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
+ if (!vh)
+ goto out_free_fm;
+
+ ret = -ENOSPC;
+ e = ubi_wl_get_fm_peb(ubi, 1);
+ if (!e)
+ goto out_free_fm;
+
+ /*
+ * Create fake fastmap such that UBI will fall back
+ * to scanning mode.
+ */
+ vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+ ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
+ if (ret < 0) {
+ ubi_wl_put_fm_peb(ubi, e, 0, 0);
+ goto out_free_fm;
+ }
+
+ fm->used_blocks = 1;
+ fm->e[0] = e;
+
+ ubi->fm = fm;
+
+out:
+ ubi_free_vid_hdr(ubi, vh);
+ return ret;
+
+out_free_fm:
+ kfree(fm);
+ goto out;
+}
+
+/**
+ * return_fm_pebs - returns all PEBs used by a fastmap back to the
+ * WL sub-system.
+ * @ubi: UBI device object
+ * @fm: fastmap layout object
+ */
+static void return_fm_pebs(struct ubi_device *ubi,
+ struct ubi_fastmap_layout *fm)
+{
+ int i;
+
+ if (!fm)
+ return;
+
+ for (i = 0; i < fm->used_blocks; i++) {
+ if (fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, fm->e[i], i,
+ fm->to_be_tortured[i]);
+ fm->e[i] = NULL;
+ }
+ }
+}
+
+/**
+ * ubi_update_fastmap - will be called by UBI if a volume changes or
+ * a fastmap pool becomes full.
+ * @ubi: UBI device object
+ *
+ * Returns 0 on success, < 0 indicates an internal error.
+ */
+int ubi_update_fastmap(struct ubi_device *ubi)
+{
+ int ret, i, j;
+ struct ubi_fastmap_layout *new_fm, *old_fm;
+ struct ubi_wl_entry *tmp_e;
+
+ down_write(&ubi->fm_protect);
+
+ ubi_refill_pools(ubi);
+
+ if (ubi->ro_mode || ubi->fm_disabled) {
+ up_write(&ubi->fm_protect);
+ return 0;
+ }
+
+ ret = ubi_ensure_anchor_pebs(ubi);
+ if (ret) {
+ up_write(&ubi->fm_protect);
+ return ret;
+ }
+
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ up_write(&ubi->fm_protect);
+ return -ENOMEM;
+ }
+
+ new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
+ old_fm = ubi->fm;
+ ubi->fm = NULL;
+
+ if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
+ ubi_err(ubi, "fastmap too large");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 0);
+ spin_unlock(&ubi->wl_lock);
+
+ if (!tmp_e) {
+ if (old_fm && old_fm->e[i]) {
+ ret = erase_block(ubi, old_fm->e[i]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old fastmap PEB");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j],
+ j, 0);
+ new_fm->e[j] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[i] = old_fm->e[i];
+ old_fm->e[i] = NULL;
+ } else {
+ ubi_err(ubi, "could not get any free erase block");
+
+ for (j = 1; j < i; j++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
+ new_fm->e[j] = NULL;
+ }
+
+ ret = -ENOSPC;
+ goto err;
+ }
+ } else {
+ new_fm->e[i] = tmp_e;
+
+ if (old_fm && old_fm->e[i]) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+ }
+
+ /* Old fastmap is larger than the new one */
+ if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
+ for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
+ old_fm->to_be_tortured[i]);
+ old_fm->e[i] = NULL;
+ }
+ }
+
+ spin_lock(&ubi->wl_lock);
+ tmp_e = ubi_wl_get_fm_peb(ubi, 1);
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+ /* no fresh anchor PEB was found, reuse the old one */
+ if (!tmp_e) {
+ ret = erase_block(ubi, old_fm->e[0]->pnum);
+ if (ret < 0) {
+ ubi_err(ubi, "could not erase old anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i],
+ i, 0);
+ new_fm->e[i] = NULL;
+ }
+ goto err;
+ }
+ new_fm->e[0] = old_fm->e[0];
+ new_fm->e[0]->ec = ret;
+ old_fm->e[0] = NULL;
+ } else {
+ /* we've got a new anchor PEB, return the old one */
+ ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
+ old_fm->to_be_tortured[0]);
+ new_fm->e[0] = tmp_e;
+ old_fm->e[0] = NULL;
+ }
+ } else {
+ if (!tmp_e) {
+ ubi_err(ubi, "could not find any anchor PEB");
+
+ for (i = 1; i < new_fm->used_blocks; i++) {
+ ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
+ new_fm->e[i] = NULL;
+ }
+
+ ret = -ENOSPC;
+ goto err;
+ }
+ new_fm->e[0] = tmp_e;
+ }
+
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
+ ret = ubi_write_fastmap(ubi, new_fm);
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
+
+ if (ret)
+ goto err;
+
+out_unlock:
+ up_write(&ubi->fm_protect);
+ kfree(old_fm);
+ return ret;
+
+err:
+ ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
+
+ ret = invalidate_fastmap(ubi);
+ if (ret < 0) {
+ ubi_err(ubi, "Unable to invalidiate current fastmap!");
+ ubi_ro_mode(ubi);
+ } else {
+ return_fm_pebs(ubi, old_fm);
+ return_fm_pebs(ubi, new_fm);
+ ret = 0;
+ }
+
+ kfree(new_fm);
+ goto out_unlock;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/io.c b/roms/u-boot/drivers/mtd/ubi/io.c
new file mode 100644
index 000000000..b8b878b91
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/io.c
@@ -0,0 +1,1434 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * UBI input/output sub-system.
+ *
+ * This sub-system provides a uniform way to work with all kinds of the
+ * underlying MTD devices. It also implements handy functions for reading and
+ * writing UBI headers.
+ *
+ * We are trying to have a paranoid mindset and not to trust to what we read
+ * from the flash media in order to be more secure and robust. So this
+ * sub-system validates every single header it reads from the flash media.
+ *
+ * Some words about how the eraseblock headers are stored.
+ *
+ * The erase counter header is always stored at offset zero. By default, the
+ * VID header is stored after the EC header at the closest aligned offset
+ * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
+ * header at the closest aligned offset. But this default layout may be
+ * changed. For example, for different reasons (e.g., optimization) UBI may be
+ * asked to put the VID header at further offset, and even at an unaligned
+ * offset. Of course, if the offset of the VID header is unaligned, UBI adds
+ * proper padding in front of it. Data offset may also be changed but it has to
+ * be aligned.
+ *
+ * About minimal I/O units. In general, UBI assumes flash device model where
+ * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
+ * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
+ * @ubi->mtd->writesize field. But as an exception, UBI admits of using another
+ * (smaller) minimal I/O unit size for EC and VID headers to make it possible
+ * to do different optimizations.
+ *
+ * This is extremely useful in case of NAND flashes which admit of several
+ * write operations to one NAND page. In this case UBI can fit EC and VID
+ * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
+ * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
+ * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
+ * users.
+ *
+ * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
+ * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
+ * headers.
+ *
+ * Q: why not just to treat sub-page as a minimal I/O unit of this flash
+ * device, e.g., make @ubi->min_io_size = 512 in the example above?
+ *
+ * A: because when writing a sub-page, MTD still writes a full 2K page but the
+ * bytes which are not relevant to the sub-page are 0xFF. So, basically,
+ * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
+ * Thus, we prefer to use sub-pages only for EC and VID headers.
+ *
+ * As it was noted above, the VID header may start at a non-aligned offset.
+ * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
+ * the VID header may reside at offset 1984 which is the last 64 bytes of the
+ * last sub-page (EC header is always at offset zero). This causes some
+ * difficulties when reading and writing VID headers.
+ *
+ * Suppose we have a 64-byte buffer and we read a VID header at it. We change
+ * the data and want to write this VID header out. As we can only write in
+ * 512-byte chunks, we have to allocate one more buffer and copy our VID header
+ * to offset 448 of this buffer.
+ *
+ * The I/O sub-system does the following trick in order to avoid this extra
+ * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
+ * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
+ * When the VID header is being written out, it shifts the VID header pointer
+ * back and writes the whole sub-page.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <u-boot/crc.h>
+#else
+#include <hexdump.h>
+#include <ubi_uboot.h>
+#endif
+
+#include "ubi.h"
+
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr);
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr);
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
+
+/**
+ * ubi_io_read - read data from a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer where to store the read data
+ * @pnum: physical eraseblock number to read from
+ * @offset: offset within the physical eraseblock from where to read
+ * @len: how many bytes to read
+ *
+ * This function reads data from offset @offset of physical eraseblock @pnum
+ * and stores the read data in the @buf buffer. The following return codes are
+ * possible:
+ *
+ * o %0 if all the requested data were successfully read;
+ * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
+ * correctable bit-flips were detected; this is harmless but may indicate
+ * that this eraseblock may become bad soon (but do not have to);
+ * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
+ * example it can be an ECC error in case of NAND; this most probably means
+ * that the data is corrupted;
+ * o %-EIO if some I/O error occurred;
+ * o other negative error codes in case of other errors.
+ */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len)
+{
+ int err, retries = 0;
+ size_t read;
+ loff_t addr;
+
+ dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(len > 0);
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /*
+ * Deliberately corrupt the buffer to improve robustness. Indeed, if we
+ * do not do this, the following may happen:
+ * 1. The buffer contains data from previous operation, e.g., read from
+ * another PEB previously. The data looks like expected, e.g., if we
+ * just do not read anything and return - the caller would not
+ * notice this. E.g., if we are reading a VID header, the buffer may
+ * contain a valid VID header from another PEB.
+ * 2. The driver is buggy and returns us success or -EBADMSG or
+ * -EUCLEAN, but it does not actually put any data to the buffer.
+ *
+ * This may confuse UBI or upper layers - they may think the buffer
+ * contains valid data while in fact it is just old data. This is
+ * especially possible because UBI (and UBIFS) relies on CRC, and
+ * treats data as correct even in case of ECC errors if the CRC is
+ * correct.
+ *
+ * Try to prevent this situation by changing the first byte of the
+ * buffer.
+ */
+ *((uint8_t *)buf) ^= 0xFF;
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+retry:
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err) {
+ const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
+
+ if (mtd_is_bitflip(err)) {
+ /*
+ * -EUCLEAN is reported if there was a bit-flip which
+ * was corrected, so this is harmless.
+ *
+ * We do not report about it here unless debugging is
+ * enabled. A corresponding message will be printed
+ * later, when it is has been scrubbed.
+ */
+ ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
+ pnum);
+ ubi_assert(len == read);
+ return UBI_IO_BITFLIPS;
+ }
+
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
+ err, errstr, len, pnum, offset, read);
+ yield();
+ goto retry;
+ }
+
+ ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
+ dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (read != len && mtd_is_eccerr(err)) {
+ ubi_assert(0);
+ err = -EIO;
+ }
+ } else {
+ ubi_assert(len == read);
+
+ if (ubi_dbg_is_bitflip(ubi)) {
+ dbg_gen("bit-flip (emulated)");
+ err = UBI_IO_BITFLIPS;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * ubi_io_write - write data to a physical eraseblock.
+ * @ubi: UBI device description object
+ * @buf: buffer with the data to write
+ * @pnum: physical eraseblock number to write to
+ * @offset: offset within the physical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from buffer @buf to offset @offset
+ * of physical eraseblock @pnum. If all the data were successfully written,
+ * zero is returned. If an error occurred, this function returns a negative
+ * error code. If %-EIO is returned, the physical eraseblock most probably went
+ * bad.
+ *
+ * Note, in case of an error, it is possible that something was still written
+ * to the flash media, but may be some garbage.
+ */
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+
+ dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+ ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
+ ubi_assert(offset % ubi->hdrs_min_io_size == 0);
+ ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err)
+ return err;
+
+ /* The area we are writing to has to contain all 0xFF bytes */
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ if (err)
+ return err;
+
+ if (offset >= ubi->leb_start) {
+ /*
+ * We write to the data area of the physical eraseblock. Make
+ * sure it has valid EC and VID headers.
+ */
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+ err = self_check_peb_vid_hdr(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (ubi_dbg_is_write_failure(ubi)) {
+ ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
+ dump_stack();
+ return -EIO;
+ }
+
+ addr = (loff_t)pnum * ubi->peb_size + offset;
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
+ if (err) {
+ ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
+ dump_stack();
+ ubi_dump_flash(ubi, pnum, offset, len);
+ } else
+ ubi_assert(written == len);
+
+ if (!err) {
+ err = self_check_write(ubi, buf, pnum, offset, len);
+ if (err)
+ return err;
+
+ /*
+ * Since we always write sequentially, the rest of the PEB has
+ * to contain only 0xFF bytes.
+ */
+ offset += len;
+ len = ubi->peb_size - offset;
+ if (len)
+ err = ubi_self_check_all_ff(ubi, pnum, offset, len);
+ }
+
+ return err;
+}
+
+/**
+ * erase_callback - MTD erasure call-back.
+ * @ei: MTD erase information object.
+ *
+ * Note, even though MTD erase interface is asynchronous, all the current
+ * implementations are synchronous anyway.
+ */
+static void erase_callback(struct erase_info *ei)
+{
+ wake_up_interruptible((wait_queue_head_t *)ei->priv);
+}
+
+/**
+ * do_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to erase
+ *
+ * This function synchronously erases physical eraseblock @pnum and returns
+ * zero in case of success and a negative error code in case of failure. If
+ * %-EIO is returned, the physical eraseblock most probably went bad.
+ */
+static int do_sync_erase(struct ubi_device *ubi, int pnum)
+{
+ int err, retries = 0;
+ struct erase_info ei;
+ wait_queue_head_t wq;
+
+ dbg_io("erase PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+retry:
+ init_waitqueue_head(&wq);
+ memset(&ei, 0, sizeof(struct erase_info));
+
+ ei.mtd = ubi->mtd;
+ ei.addr = (loff_t)pnum * ubi->peb_size;
+ ei.len = ubi->peb_size;
+ ei.callback = erase_callback;
+ ei.priv = (unsigned long)&wq;
+
+ err = mtd_erase(ubi->mtd, &ei);
+ if (err) {
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error %d while erasing PEB %d, retry",
+ err, pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
+ dump_stack();
+ return err;
+ }
+
+ err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
+ ei.state == MTD_ERASE_FAILED);
+ if (err) {
+ ubi_err(ubi, "interrupted PEB %d erasure", pnum);
+ return -EINTR;
+ }
+
+ if (ei.state == MTD_ERASE_FAILED) {
+ if (retries++ < UBI_IO_RETRIES) {
+ ubi_warn(ubi, "error while erasing PEB %d, retry",
+ pnum);
+ yield();
+ goto retry;
+ }
+ ubi_err(ubi, "cannot erase PEB %d", pnum);
+ dump_stack();
+ return -EIO;
+ }
+
+ err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
+ if (err)
+ return err;
+
+ if (ubi_dbg_is_erase_failure(ubi)) {
+ ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Patterns to write to a physical eraseblock when torturing it */
+static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
+
+/**
+ * torture_peb - test a supposedly bad physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to test
+ *
+ * This function returns %-EIO if the physical eraseblock did not pass the
+ * test, a positive number of erase operations done if the test was
+ * successfully passed, and other negative error codes in case of other errors.
+ */
+static int torture_peb(struct ubi_device *ubi, int pnum)
+{
+ int err, i, patt_count;
+
+ ubi_msg(ubi, "run torture test for PEB %d", pnum);
+ patt_count = ARRAY_SIZE(patterns);
+ ubi_assert(patt_count > 0);
+
+ mutex_lock(&ubi->buf_mutex);
+ for (i = 0; i < patt_count; i++) {
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ goto out;
+
+ /* Make sure the PEB contains only 0xFF bytes */
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
+ if (err == 0) {
+ ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
+ pnum);
+ err = -EIO;
+ goto out;
+ }
+
+ /* Write a pattern and check it */
+ memset(ubi->peb_buf, patterns[i], ubi->peb_size);
+ err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
+ err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
+ if (err)
+ goto out;
+
+ err = ubi_check_pattern(ubi->peb_buf, patterns[i],
+ ubi->peb_size);
+ if (err == 0) {
+ ubi_err(ubi, "pattern %x checking failed for PEB %d",
+ patterns[i], pnum);
+ err = -EIO;
+ goto out;
+ }
+ }
+
+ err = patt_count;
+ ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
+
+out:
+ mutex_unlock(&ubi->buf_mutex);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
+ /*
+ * If a bit-flip or data integrity error was detected, the test
+ * has not passed because it happened on a freshly erased
+ * physical eraseblock which means something is wrong with it.
+ */
+ ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
+ pnum);
+ err = -EIO;
+ }
+ return err;
+}
+
+/**
+ * nor_erase_prepare - prepare a NOR flash PEB for erasure.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to prepare
+ *
+ * NOR flash, or at least some of them, have peculiar embedded PEB erasure
+ * algorithm: the PEB is first filled with zeroes, then it is erased. And
+ * filling with zeroes starts from the end of the PEB. This was observed with
+ * Spansion S29GL512N NOR flash.
+ *
+ * This means that in case of a power cut we may end up with intact data at the
+ * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
+ * EC and VID headers are OK, but a large chunk of data at the end of PEB is
+ * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
+ * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
+ *
+ * This function is called before erasing NOR PEBs and it zeroes out EC and VID
+ * magic numbers in order to invalidate them and prevent the failures. Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
+{
+ int err;
+ size_t written;
+ loff_t addr;
+ uint32_t data = 0;
+ struct ubi_ec_hdr ec_hdr;
+
+ /*
+ * Note, we cannot generally define VID header buffers on stack,
+ * because of the way we deal with these buffers (see the header
+ * comment in this file). But we know this is a NOR-specific piece of
+ * code, so we can do this. But yes, this is error-prone and we should
+ * (pre-)allocate VID header buffer instead.
+ */
+ struct ubi_vid_hdr vid_hdr;
+
+ /*
+ * If VID or EC is valid, we have to corrupt them before erasing.
+ * It is important to first invalidate the EC header, and then the VID
+ * header. Otherwise a power cut may lead to valid EC header and
+ * invalid VID header, in which case UBI will treat this PEB as
+ * corrupted and will try to preserve it, and print scary warnings.
+ */
+ addr = (loff_t)pnum * ubi->peb_size;
+ err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if(err)
+ goto error;
+ }
+
+ err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
+ if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
+ err != UBI_IO_FF){
+ addr += ubi->vid_hdr_aloffset;
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
+ if (err)
+ goto error;
+ }
+ return 0;
+
+error:
+ /*
+ * The PEB contains a valid VID or EC header, but we cannot invalidate
+ * it. Supposedly the flash media or the driver is screwed up, so
+ * return an error.
+ */
+ ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
+ ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
+ return -EIO;
+}
+
+/**
+ * ubi_io_sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to erase
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function synchronously erases physical eraseblock @pnum. If @torture
+ * flag is not zero, the physical eraseblock is checked by means of writing
+ * different patterns to it and reading them back. If the torturing is enabled,
+ * the physical eraseblock is erased more than once.
+ *
+ * This function returns the number of erasures made in case of success, %-EIO
+ * if the erasure failed or the torturing test failed, and other negative error
+ * codes in case of other errors. Note, %-EIO means that the physical
+ * eraseblock is bad.
+ */
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
+{
+ int err, ret = 0;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = self_check_not_bad(ubi, pnum);
+ if (err != 0)
+ return err;
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ if (ubi->nor_flash) {
+ err = nor_erase_prepare(ubi, pnum);
+ if (err)
+ return err;
+ }
+
+ if (torture) {
+ ret = torture_peb(ubi, pnum);
+ if (ret < 0)
+ return ret;
+ }
+
+ err = do_sync_erase(ubi, pnum);
+ if (err)
+ return err;
+
+ return ret + 1;
+}
+
+/**
+ * ubi_io_is_bad - check if a physical eraseblock is bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns a positive number if the physical eraseblock is bad,
+ * zero if not, and a negative error code if an error occurred.
+ */
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
+{
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->bad_allowed) {
+ int ret;
+
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (ret < 0)
+ ubi_err(ubi, "error %d while checking if PEB %d is bad",
+ ret, pnum);
+ else if (ret)
+ dbg_io("PEB %d is bad", pnum);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_io_mark_bad - mark a physical eraseblock as bad.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to mark
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ struct mtd_info *mtd = ubi->mtd;
+
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ if (ubi->ro_mode) {
+ ubi_err(ubi, "read-only mode");
+ return -EROFS;
+ }
+
+ if (!ubi->bad_allowed)
+ return 0;
+
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ if (err)
+ ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
+ return err;
+}
+
+/**
+ * validate_ec_hdr - validate an erase counter header.
+ * @ubi: UBI device description object
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header is OK, and %1 if
+ * not.
+ */
+static int validate_ec_hdr(const struct ubi_device *ubi,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ long long ec;
+ int vid_hdr_offset, leb_start;
+
+ ec = be64_to_cpu(ec_hdr->ec);
+ vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
+ leb_start = be32_to_cpu(ec_hdr->data_offset);
+
+ if (ec_hdr->version != UBI_VERSION) {
+ ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
+ UBI_VERSION, (int)ec_hdr->version);
+ goto bad;
+ }
+
+ if (vid_hdr_offset != ubi->vid_hdr_offset) {
+ ubi_err(ubi, "bad VID header offset %d, expected %d",
+ vid_hdr_offset, ubi->vid_hdr_offset);
+ goto bad;
+ }
+
+ if (leb_start != ubi->leb_start) {
+ ubi_err(ubi, "bad data offset %d, expected %d",
+ leb_start, ubi->leb_start);
+ goto bad;
+ }
+
+ if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
+ ubi_err(ubi, "bad erase counter %lld", ec);
+ goto bad;
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad EC header");
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_ec_hdr - read and check an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to read from
+ * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
+ * header
+ * @verbose: be verbose if the header is corrupted or was not found
+ *
+ * This function reads erase counter header from physical eraseblock @pnum and
+ * stores it in @ec_hdr. This function also checks CRC checksum of the read
+ * erase counter header. The following codes may be returned:
+ *
+ * o %0 if the CRC checksum is correct and the header was successfully read;
+ * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
+ * and corrected by the flash driver; this is harmless but may indicate that
+ * this eraseblock may become bad soon (but may be not);
+ * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
+ * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
+ * a data integrity error (uncorrectable ECC error in case of NAND);
+ * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
+ * o a negative error code in case of failure.
+ */
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose)
+{
+ int err, read_err;
+ uint32_t crc, magic, hdr_crc;
+
+ dbg_io("read EC header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (read_err) {
+ if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
+
+ /*
+ * We read all the data, but either a correctable bit-flip
+ * occurred, or MTD reported a data integrity error
+ * (uncorrectable ECC error in case of NAND). The former is
+ * harmless, the later may mean that the read data is
+ * corrupted. But we have a CRC check-sum and we will detect
+ * this. If the EC header is still OK, we just report this as
+ * there was a bit-flip, to force scrubbing.
+ */
+ }
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
+ /*
+ * The magic field is wrong. Let's check if we have read all
+ * 0xFF. If yes, this physical eraseblock is assumed to be
+ * empty.
+ */
+ if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
+ /* The physical eraseblock is supposedly empty */
+ if (verbose)
+ ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ /*
+ * This is not a valid erase counter header, and these are not
+ * 0xFF bytes. Report that the header is corrupted.
+ */
+ if (verbose) {
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_dump_ec_hdr(ec_hdr);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_ec_hdr(ec_hdr);
+ }
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ /* And of course validate what has just been read from the media */
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ /*
+ * If there was %-EBADMSG, but the header CRC is still OK, report about
+ * a bit-flip to force scrubbing on this PEB.
+ */
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_ec_hdr - write an erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to write to
+ * @ec_hdr: the erase counter header to write
+ *
+ * This function writes erase counter header described by @ec_hdr to physical
+ * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
+ * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
+ * field.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock most probably
+ * went bad.
+ */
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t crc;
+
+ dbg_io("write EC header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
+ ec_hdr->version = UBI_VERSION;
+ ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
+ ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
+ ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ ec_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
+ if (err)
+ return err;
+
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_EC_WRITE))
+ return -EROFS;
+
+ err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
+ return err;
+}
+
+/**
+ * validate_vid_hdr - validate a volume identifier header.
+ * @ubi: UBI device description object
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function checks that data stored in the volume identifier header
+ * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
+ */
+static int validate_vid_hdr(const struct ubi_device *ubi,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int vol_type = vid_hdr->vol_type;
+ int copy_flag = vid_hdr->copy_flag;
+ int vol_id = be32_to_cpu(vid_hdr->vol_id);
+ int lnum = be32_to_cpu(vid_hdr->lnum);
+ int compat = vid_hdr->compat;
+ int data_size = be32_to_cpu(vid_hdr->data_size);
+ int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
+ int data_pad = be32_to_cpu(vid_hdr->data_pad);
+ int data_crc = be32_to_cpu(vid_hdr->data_crc);
+ int usable_leb_size = ubi->leb_size - data_pad;
+
+ if (copy_flag != 0 && copy_flag != 1) {
+ ubi_err(ubi, "bad copy_flag");
+ goto bad;
+ }
+
+ if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
+ data_pad < 0) {
+ ubi_err(ubi, "negative values");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "bad vol_id");
+ goto bad;
+ }
+
+ if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
+ ubi_err(ubi, "bad compat");
+ goto bad;
+ }
+
+ if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
+ compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
+ compat != UBI_COMPAT_REJECT) {
+ ubi_err(ubi, "bad compat");
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ ubi_err(ubi, "bad vol_type");
+ goto bad;
+ }
+
+ if (data_pad >= ubi->leb_size / 2) {
+ ubi_err(ubi, "bad data_pad");
+ goto bad;
+ }
+
+ if (vol_type == UBI_VID_STATIC) {
+ /*
+ * Although from high-level point of view static volumes may
+ * contain zero bytes of data, but no VID headers can contain
+ * zero at these fields, because they empty volumes do not have
+ * mapped logical eraseblocks.
+ */
+ if (used_ebs == 0) {
+ ubi_err(ubi, "zero used_ebs");
+ goto bad;
+ }
+ if (data_size == 0) {
+ ubi_err(ubi, "zero data_size");
+ goto bad;
+ }
+ if (lnum < used_ebs - 1) {
+ if (data_size != usable_leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+ } else if (lnum == used_ebs - 1) {
+ if (data_size == 0) {
+ ubi_err(ubi, "bad data_size at last LEB");
+ goto bad;
+ }
+ } else {
+ ubi_err(ubi, "too high lnum");
+ goto bad;
+ }
+ } else {
+ if (copy_flag == 0) {
+ if (data_crc != 0) {
+ ubi_err(ubi, "non-zero data CRC");
+ goto bad;
+ }
+ if (data_size != 0) {
+ ubi_err(ubi, "non-zero data_size");
+ goto bad;
+ }
+ } else {
+ if (data_size == 0) {
+ ubi_err(ubi, "zero data_size of copy");
+ goto bad;
+ }
+ }
+ if (used_ebs != 0) {
+ ubi_err(ubi, "bad used_ebs");
+ goto bad;
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "bad VID header");
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return 1;
+}
+
+/**
+ * ubi_io_read_vid_hdr - read and check a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to read from
+ * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
+ * identifier header
+ * @verbose: be verbose if the header is corrupted or wasn't found
+ *
+ * This function reads the volume identifier header from physical eraseblock
+ * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
+ * volume identifier header. The error codes are the same as in
+ * 'ubi_io_read_ec_hdr()'.
+ *
+ * Note, the implementation of this function is also very similar to
+ * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
+ */
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int verbose)
+{
+ int err, read_err;
+ uint32_t crc, magic, hdr_crc;
+ void *p;
+
+ dbg_io("read VID header from PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
+ return read_err;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ if (mtd_is_eccerr(read_err))
+ return UBI_IO_BAD_HDR_EBADMSG;
+
+ if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
+ if (verbose)
+ ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ if (!read_err)
+ return UBI_IO_FF;
+ else
+ return UBI_IO_FF_BITFLIPS;
+ }
+
+ if (verbose) {
+ ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_dump_vid_hdr(vid_hdr);
+ }
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
+ return UBI_IO_BAD_HDR;
+ }
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+
+ if (hdr_crc != crc) {
+ if (verbose) {
+ ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_dump_vid_hdr(vid_hdr);
+ }
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ if (!read_err)
+ return UBI_IO_BAD_HDR;
+ else
+ return UBI_IO_BAD_HDR_EBADMSG;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err(ubi, "validation failed for PEB %d", pnum);
+ return -EINVAL;
+ }
+
+ return read_err ? UBI_IO_BITFLIPS : 0;
+}
+
+/**
+ * ubi_io_write_vid_hdr - write a volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to write to
+ * @vid_hdr: the volume identifier header to write
+ *
+ * This function writes the volume identifier header described by @vid_hdr to
+ * physical eraseblock @pnum. This function automatically fills the
+ * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
+ * header CRC checksum and stores it at vid_hdr->hdr_crc.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If %-EIO is returned, the physical eraseblock probably went
+ * bad.
+ */
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t crc;
+ void *p;
+
+ dbg_io("write VID header to PEB %d", pnum);
+ ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
+
+ err = self_check_peb_ec_hdr(ubi, pnum);
+ if (err)
+ return err;
+
+ vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
+ vid_hdr->version = UBI_VERSION;
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
+ vid_hdr->hdr_crc = cpu_to_be32(crc);
+
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
+ if (err)
+ return err;
+
+ if (ubi_dbg_power_cut(ubi, POWER_CUT_VID_WRITE))
+ return -EROFS;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ return err;
+}
+
+/**
+ * self_check_not_bad - ensure that a physical eraseblock is not bad.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number to check
+ *
+ * This function returns zero if the physical eraseblock is good, %-EINVAL if
+ * it is bad and a negative error code if an error occurred.
+ */
+static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ err = ubi_io_is_bad(ubi, pnum);
+ if (!err)
+ return err;
+
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ dump_stack();
+ return err > 0 ? -EINVAL : err;
+}
+
+/**
+ * self_check_ec_hdr - check if an erase counter header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the erase counter header belongs to
+ * @ec_hdr: the erase counter header to check
+ *
+ * This function returns zero if the erase counter header contains valid
+ * values, and %-EINVAL if not.
+ */
+static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_ec_hdr *ec_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ magic = be32_to_cpu(ec_hdr->magic);
+ if (magic != UBI_EC_HDR_MAGIC) {
+ ubi_err(ubi, "bad magic %#08x, must be %#08x",
+ magic, UBI_EC_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_ec_hdr(ubi, ec_hdr);
+ if (err) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
+ * self_check_peb_ec_hdr - check erase counter header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the erase counter header is all right and and
+ * a negative error code if not or if an error occurred.
+ */
+static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
+ crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_ec_hdr(ec_hdr);
+ dump_stack();
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = self_check_ec_hdr(ubi, pnum, ec_hdr);
+
+exit:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * self_check_vid_hdr - check that a volume identifier header is all right.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock number the volume identifier header belongs to
+ * @vid_hdr: the volume identifier header to check
+ *
+ * This function returns zero if the volume identifier header is all right, and
+ * %-EINVAL if not.
+ */
+static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
+ const struct ubi_vid_hdr *vid_hdr)
+{
+ int err;
+ uint32_t magic;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ magic = be32_to_cpu(vid_hdr->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
+ magic, pnum, UBI_VID_HDR_MAGIC);
+ goto fail;
+ }
+
+ err = validate_vid_hdr(ubi, vid_hdr);
+ if (err) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ goto fail;
+ }
+
+ return err;
+
+fail:
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ return -EINVAL;
+
+}
+
+/**
+ * self_check_peb_vid_hdr - check volume identifier header.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ *
+ * This function returns zero if the volume identifier header is all right,
+ * and a negative error code if not or if an error occurred.
+ */
+static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
+{
+ int err;
+ uint32_t crc, hdr_crc;
+ struct ubi_vid_hdr *vid_hdr;
+ void *p;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ p = (char *)vid_hdr - ubi->vid_hdr_shift;
+ err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
+ ubi->vid_hdr_alsize);
+ if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
+ goto exit;
+
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
+ hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
+ if (hdr_crc != crc) {
+ ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_dump_vid_hdr(vid_hdr);
+ dump_stack();
+ err = -EINVAL;
+ goto exit;
+ }
+
+ err = self_check_vid_hdr(ubi, pnum, vid_hdr);
+
+exit:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+}
+
+/**
+ * self_check_write - make sure write succeeded.
+ * @ubi: UBI device description object
+ * @buf: buffer with data which were written
+ * @pnum: physical eraseblock number the data were written to
+ * @offset: offset within the physical eraseblock the data were written to
+ * @len: how many bytes were written
+ *
+ * This functions reads data which were recently written and compares it with
+ * the original data buffer - the data have to match. Returns zero if the data
+ * match and a negative error code if not or in case of failure.
+ */
+static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
+{
+ int err, i;
+ size_t read;
+ void *buf1;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ if (!buf1) {
+ ubi_err(ubi, "cannot allocate memory to check writes");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
+ if (err && !mtd_is_bitflip(err))
+ goto out_free;
+
+ for (i = 0; i < len; i++) {
+ uint8_t c = ((uint8_t *)buf)[i];
+ uint8_t c1 = ((uint8_t *)buf1)[i];
+#if !defined(CONFIG_UBI_SILENCE_MSG)
+ int dump_len = max_t(int, 128, len - i);
+#endif
+
+ if (c == c1)
+ continue;
+
+ ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
+ pnum, offset, len);
+#if !defined(CONFIG_UBI_SILENCE_MSG)
+ ubi_msg(ubi, "data differ at position %d", i);
+ ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
+ buf + i, dump_len, 1);
+ ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1,
+ buf1 + i, dump_len, 1);
+#endif
+ dump_stack();
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ vfree(buf1);
+ return 0;
+
+out_free:
+ vfree(buf1);
+ return err;
+}
+
+/**
+ * ubi_self_check_all_ff - check that a region of flash is empty.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @offset: the starting offset within the physical eraseblock to check
+ * @len: the length of the region to check
+ *
+ * This function returns zero if only 0xFF bytes are present at offset
+ * @offset of the physical eraseblock @pnum, and a negative error code if not
+ * or if an error occurred.
+ */
+int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
+{
+ size_t read;
+ int err;
+ void *buf;
+ loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
+
+ if (!ubi_dbg_chk_io(ubi))
+ return 0;
+
+ buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
+ if (!buf) {
+ ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
+ return 0;
+ }
+
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
+ if (err && !mtd_is_bitflip(err)) {
+ ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
+ goto error;
+ }
+
+ err = ubi_check_pattern(buf, 0xFF, len);
+ if (err == 0) {
+ ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
+ goto fail;
+ }
+
+ vfree(buf);
+ return 0;
+
+fail:
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_msg(ubi, "hex dump of the %d-%d region", offset, offset + len);
+ print_hex_dump("", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
+ err = -EINVAL;
+error:
+ dump_stack();
+ vfree(buf);
+ return err;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/kapi.c b/roms/u-boot/drivers/mtd/ubi/kapi.c
new file mode 100644
index 000000000..41680cdad
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/kapi.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* This file mostly implements UBI kernel API functions */
+
+#ifndef __UBOOT__
+#include <dm/devres.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+#include <asm/div64.h>
+#else
+#include <ubi_uboot.h>
+#endif
+#include <linux/err.h>
+
+#include "ubi.h"
+
+/**
+ * ubi_do_get_device_info - get information about UBI device.
+ * @ubi: UBI device description object
+ * @di: the information is stored here
+ *
+ * This function is the same as 'ubi_get_device_info()', but it assumes the UBI
+ * device is locked and cannot disappear.
+ */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di)
+{
+ di->ubi_num = ubi->ubi_num;
+ di->leb_size = ubi->leb_size;
+ di->leb_start = ubi->leb_start;
+ di->min_io_size = ubi->min_io_size;
+ di->max_write_size = ubi->max_write_size;
+ di->ro_mode = ubi->ro_mode;
+#ifndef __UBOOT__
+ di->cdev = ubi->cdev.dev;
+#endif
+}
+EXPORT_SYMBOL_GPL(ubi_do_get_device_info);
+
+/**
+ * ubi_get_device_info - get information about UBI device.
+ * @ubi_num: UBI device number
+ * @di: the information is stored here
+ *
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
+ */
+int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
+{
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ ubi_do_get_device_info(ubi, di);
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_get_device_info);
+
+/**
+ * ubi_do_get_volume_info - get information about UBI volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @vi: the information is stored here
+ */
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi)
+{
+ vi->vol_id = vol->vol_id;
+ vi->ubi_num = ubi->ubi_num;
+ vi->size = vol->reserved_pebs;
+ vi->used_bytes = vol->used_bytes;
+ vi->vol_type = vol->vol_type;
+ vi->corrupted = vol->corrupted;
+ vi->upd_marker = vol->upd_marker;
+ vi->alignment = vol->alignment;
+ vi->usable_leb_size = vol->usable_leb_size;
+ vi->name_len = vol->name_len;
+ vi->name = vol->name;
+ vi->cdev = vol->cdev.dev;
+}
+
+/**
+ * ubi_get_volume_info - get information about UBI volume.
+ * @desc: volume descriptor
+ * @vi: the information is stored here
+ */
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi)
+{
+ ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi);
+}
+EXPORT_SYMBOL_GPL(ubi_get_volume_info);
+
+/**
+ * ubi_open_volume - open UBI volume.
+ * @ubi_num: UBI device number
+ * @vol_id: volume ID
+ * @mode: open mode
+ *
+ * The @mode parameter specifies if the volume should be opened in read-only
+ * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
+ * nobody else will be able to open this volume. UBI allows to have many volume
+ * readers and one writer at a time.
+ *
+ * If a static volume is being opened for the first time since boot, it will be
+ * checked by this function, which means it will be fully read and the CRC
+ * checksum of each logical eraseblock will be checked.
+ *
+ * This function returns volume descriptor in case of success and a negative
+ * error code in case of failure.
+ */
+struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
+{
+ int err;
+ struct ubi_volume_desc *desc;
+ struct ubi_device *ubi;
+ struct ubi_volume *vol;
+
+ dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ if (mode != UBI_READONLY && mode != UBI_READWRITE &&
+ mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
+
+ desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
+ if (!desc) {
+ err = -ENOMEM;
+ goto out_put_ubi;
+ }
+
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_free;
+
+ spin_lock(&ubi->volumes_lock);
+ vol = ubi->volumes[vol_id];
+ if (!vol)
+ goto out_unlock;
+
+ err = -EBUSY;
+ switch (mode) {
+ case UBI_READONLY:
+ if (vol->exclusive)
+ goto out_unlock;
+ vol->readers += 1;
+ break;
+
+ case UBI_READWRITE:
+ if (vol->exclusive || vol->writers > 0)
+ goto out_unlock;
+ vol->writers += 1;
+ break;
+
+ case UBI_EXCLUSIVE:
+ if (vol->exclusive || vol->writers || vol->readers ||
+ vol->metaonly)
+ goto out_unlock;
+ vol->exclusive = 1;
+ break;
+
+ case UBI_METAONLY:
+ if (vol->metaonly || vol->exclusive)
+ goto out_unlock;
+ vol->metaonly = 1;
+ break;
+ }
+ get_device(&vol->dev);
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ desc->vol = vol;
+ desc->mode = mode;
+
+ mutex_lock(&ubi->ckvol_mutex);
+ if (!vol->checked && !vol->skip_check) {
+ /* This is the first open - check the volume */
+ err = ubi_check_volume(ubi, vol_id);
+ if (err < 0) {
+ mutex_unlock(&ubi->ckvol_mutex);
+ ubi_close_volume(desc);
+ return ERR_PTR(err);
+ }
+ if (err == 1) {
+ ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
+ vol_id, ubi->ubi_num);
+ vol->corrupted = 1;
+ }
+ vol->checked = 1;
+ }
+ mutex_unlock(&ubi->ckvol_mutex);
+
+ return desc;
+
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ module_put(THIS_MODULE);
+out_free:
+ kfree(desc);
+out_put_ubi:
+ ubi_put_device(ubi);
+ ubi_err(ubi, "cannot open device %d, volume %d, error %d",
+ ubi_num, vol_id, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume);
+
+/**
+ * ubi_open_volume_nm - open UBI volume by name.
+ * @ubi_num: UBI device number
+ * @name: volume name
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume by name.
+ */
+struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
+ int mode)
+{
+ int i, vol_id = -1, len;
+ struct ubi_device *ubi;
+ struct ubi_volume_desc *ret;
+
+ dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode);
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len > UBI_VOL_NAME_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ spin_lock(&ubi->volumes_lock);
+ /* Walk all volumes of this UBI device */
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ struct ubi_volume *vol = ubi->volumes[i];
+
+ if (vol && len == vol->name_len && !strcmp(name, vol->name)) {
+ vol_id = i;
+ break;
+ }
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ if (vol_id >= 0)
+ ret = ubi_open_volume(ubi_num, vol_id, mode);
+ else
+ ret = ERR_PTR(-ENODEV);
+
+ /*
+ * We should put the UBI device even in case of success, because
+ * 'ubi_open_volume()' took a reference as well.
+ */
+ ubi_put_device(ubi);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
+
+#ifndef __UBOOT__
+/**
+ * ubi_open_volume_path - open UBI volume by its character device node path.
+ * @pathname: volume character device node path
+ * @mode: open mode
+ *
+ * This function is similar to 'ubi_open_volume()', but opens a volume the path
+ * to its character device node.
+ */
+struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
+{
+ int error, ubi_num, vol_id, mod;
+ struct inode *inode;
+ struct path path;
+
+ dbg_gen("open volume %s, mode %d", pathname, mode);
+
+ if (!pathname || !*pathname)
+ return ERR_PTR(-EINVAL);
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return ERR_PTR(error);
+
+ inode = d_backing_inode(path.dentry);
+ mod = inode->i_mode;
+ ubi_num = ubi_major2num(imajor(inode));
+ vol_id = iminor(inode) - 1;
+ path_put(&path);
+
+ if (!S_ISCHR(mod))
+ return ERR_PTR(-EINVAL);
+ if (vol_id >= 0 && ubi_num >= 0)
+ return ubi_open_volume(ubi_num, vol_id, mode);
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(ubi_open_volume_path);
+#endif
+
+/**
+ * ubi_close_volume - close UBI volume.
+ * @desc: volume descriptor
+ */
+void ubi_close_volume(struct ubi_volume_desc *desc)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("close device %d, volume %d, mode %d",
+ ubi->ubi_num, vol->vol_id, desc->mode);
+
+ spin_lock(&ubi->volumes_lock);
+ switch (desc->mode) {
+ case UBI_READONLY:
+ vol->readers -= 1;
+ break;
+ case UBI_READWRITE:
+ vol->writers -= 1;
+ break;
+ case UBI_EXCLUSIVE:
+ vol->exclusive = 0;
+ break;
+ case UBI_METAONLY:
+ vol->metaonly = 0;
+ break;
+ }
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ kfree(desc);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_close_volume);
+
+/**
+ * leb_read_sanity_check - does sanity checks on read requests.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ *
+ * This function is used by ubi_leb_read() and ubi_leb_read_sg()
+ * to perform sanity checks.
+ */
+static int leb_read_sanity_check(struct ubi_volume_desc *desc, int lnum,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 ||
+ lnum >= vol->used_ebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size)
+ return -EINVAL;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ if (vol->used_ebs == 0)
+ /* Empty static UBI volume */
+ return 0;
+ if (lnum == vol->used_ebs - 1 &&
+ offset + len > vol->last_eb_bytes)
+ return -EINVAL;
+ }
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return 0;
+}
+
+/**
+ * ubi_leb_read - read data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function reads data from offset @offset of logical eraseblock @lnum and
+ * stores the data at @buf. When reading from static volumes, @check specifies
+ * whether the data has to be checked or not. If yes, the whole logical
+ * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC
+ * checksum is per-eraseblock). So checking may substantially slow down the
+ * read speed. The @check argument is ignored for dynamic volumes.
+ *
+ * In case of success, this function returns zero. In case of failure, this
+ * function returns a negative error code.
+ *
+ * %-EBADMSG error code is returned:
+ * o for both static and dynamic volumes if MTD driver has detected a data
+ * integrity problem (unrecoverable ECC checksum mismatch in case of NAND);
+ * o for static volumes in case of data CRC mismatch.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF error code.
+ */
+int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
+ int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read);
+
+#ifndef __UBOOT__
+/**
+ * ubi_leb_read_sg - read data into a scatter gather list.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to read from
+ * @buf: buffer where to store the read data
+ * @offset: offset within the logical eraseblock to read from
+ * @len: how many bytes to read
+ * @check: whether UBI has to check the read data's CRC or not.
+ *
+ * This function works exactly like ubi_leb_read_sg(). But instead of
+ * storing the read data into a buffer it writes to an UBI scatter gather
+ * list.
+ */
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err, vol_id = vol->vol_id;
+
+ dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ err = leb_read_sanity_check(desc, lnum, offset, len);
+ if (err < 0)
+ return err;
+
+ if (len == 0)
+ return 0;
+
+ err = ubi_eba_read_leb_sg(ubi, vol, sgl, lnum, offset, len, check);
+ if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
+ ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
+ vol->corrupted = 1;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_leb_read_sg);
+#endif
+
+/**
+ * ubi_leb_write - write data.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to write to
+ * @buf: data to write
+ * @offset: offset within the logical eraseblock where to write
+ * @len: how many bytes to write
+ *
+ * This function writes @len bytes of data from @buf to offset @offset of
+ * logical eraseblock @lnum.
+ *
+ * This function takes care of physical eraseblock write failures. If write to
+ * the physical eraseblock write operation fails, the logical eraseblock is
+ * re-mapped to another physical eraseblock, the data is recovered, and the
+ * write finishes. UBI has a pool of reserved physical eraseblocks for this.
+ *
+ * If all the data were successfully written, zero is returned. If an error
+ * occurred and UBI has not been able to recover from it, this function returns
+ * a negative error code. Note, in case of an error, it is possible that
+ * something was still written to the flash media, but that may be some
+ * garbage.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int offset, int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 ||
+ offset + len > vol->usable_leb_size ||
+ offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_write);
+
+/*
+ * ubi_leb_change - change logical eraseblock atomically.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number to change
+ * @buf: data to write
+ * @len: how many bytes to write
+ *
+ * This function changes the contents of a logical eraseblock atomically. @buf
+ * has to contain new logical eraseblock data, and @len - the length of the
+ * data, which has to be aligned. The length may be shorter than the logical
+ * eraseblock size, ant the logical eraseblock may be appended to more times
+ * later on. This function guarantees that in case of an unclean reboot the old
+ * contents is preserved. Returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int len)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+ return -EINVAL;
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+ len > vol->usable_leb_size || len & (ubi->min_io_size - 1))
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (len == 0)
+ return 0;
+
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_change);
+
+/**
+ * ubi_leb_erase - erase logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and synchronously erases the
+ * correspondent physical eraseblock. Returns zero in case of success and a
+ * negative error code in case of failure.
+ *
+ * If the volume is damaged because of an interrupted update this function just
+ * returns immediately with %-EBADF code.
+ */
+int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int err;
+
+ dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+
+ return ubi_wl_flush(ubi, vol->vol_id, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_erase);
+
+/**
+ * ubi_leb_unmap - un-map logical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function un-maps logical eraseblock @lnum and schedules the
+ * corresponding physical eraseblock for erasure, so that it will eventually be
+ * physically erased in background. This operation is much faster than the
+ * erase operation.
+ *
+ * Unlike erase, the un-map operation does not guarantee that the logical
+ * eraseblock will contain all 0xFF bytes when UBI is initialized again. For
+ * example, if several logical eraseblocks are un-mapped, and an unclean reboot
+ * happens after this, the logical eraseblocks will not necessarily be
+ * un-mapped again when this MTD device is attached. They may actually be
+ * mapped to the same physical eraseblocks again. So, this function has to be
+ * used with care.
+ *
+ * In other words, when un-mapping a logical eraseblock, UBI does not store
+ * any information about this on the flash media, it just marks the logical
+ * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical
+ * eraseblock is physically erased, it will be mapped again to the same logical
+ * eraseblock when the MTD device is attached again.
+ *
+ * The main and obvious use-case of this function is when the contents of a
+ * logical eraseblock has to be re-written. Then it is much more efficient to
+ * first un-map it, then write new data, rather than first erase it, then write
+ * new data. Note, once new data has been written to the logical eraseblock,
+ * UBI guarantees that the old contents has gone forever. In other words, if an
+ * unclean reboot happens after the logical eraseblock has been un-mapped and
+ * then written to, it will contain the last written data.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure. If the volume is damaged because of an interrupted update
+ * this function just returns immediately with %-EBADF code.
+ */
+int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return ubi_eba_unmap_leb(ubi, vol, lnum);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_unmap);
+
+/**
+ * ubi_leb_map - map logical eraseblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successful invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (vol->eba_tbl[lnum] >= 0)
+ return -EBADMSG;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
+ * ubi_is_mapped - check if logical eraseblock is mapped.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ *
+ * This function checks if logical eraseblock @lnum is mapped to a physical
+ * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily
+ * mean it will still be un-mapped after the UBI device is re-attached. The
+ * logical eraseblock may become mapped to the physical eraseblock it was last
+ * mapped to.
+ *
+ * This function returns %1 if the LEB is mapped, %0 if not, and a negative
+ * error code in case of failure. If the volume is damaged because of an
+ * interrupted update this function just returns immediately with %-EBADF error
+ * code.
+ */
+int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum)
+{
+ struct ubi_volume *vol = desc->vol;
+
+ dbg_gen("test LEB %d:%d", vol->vol_id, lnum);
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ return vol->eba_tbl[lnum] >= 0;
+}
+EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/**
+ * ubi_sync - synchronize UBI device buffers.
+ * @ubi_num: UBI device to synchronize
+ *
+ * The underlying MTD device may cache data in hardware or in software. This
+ * function ensures the caches are flushed. Returns zero in case of success and
+ * a negative error code in case of failure.
+ */
+int ubi_sync(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ mtd_sync(ubi->mtd);
+ ubi_put_device(ubi);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ubi_sync);
+
+/**
+ * ubi_flush - flush UBI work queue.
+ * @ubi_num: UBI device to flush work queue
+ * @vol_id: volume id to flush for
+ * @lnum: logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id / logical
+ * eraseblock number pair. If either value is set to %UBI_ALL, then it acts as
+ * a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_flush(int ubi_num, int vol_id, int lnum)
+{
+ struct ubi_device *ubi;
+ int err = 0;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ err = ubi_wl_flush(ubi, vol_id, lnum);
+ ubi_put_device(ubi);
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_flush);
+
+#ifndef __UBOOT__
+BLOCKING_NOTIFIER_HEAD(ubi_notifiers);
+
+/**
+ * ubi_register_volume_notifier - register a volume notifier.
+ * @nb: the notifier description object
+ * @ignore_existing: if non-zero, do not send "added" notification for all
+ * already existing volumes
+ *
+ * This function registers a volume notifier, which means that
+ * 'nb->notifier_call()' will be invoked when an UBI volume is created,
+ * removed, re-sized, re-named, or updated. The first argument of the function
+ * is the notification type. The second argument is pointer to a
+ * &struct ubi_notification object which describes the notification event.
+ * Using UBI API from the volume notifier is prohibited.
+ *
+ * This function returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing)
+{
+ int err;
+
+ err = blocking_notifier_chain_register(&ubi_notifiers, nb);
+ if (err != 0)
+ return err;
+ if (ignore_existing)
+ return 0;
+
+ /*
+ * We are going to walk all UBI devices and all volumes, and
+ * notify the user about existing volumes by the %UBI_VOLUME_ADDED
+ * event. We have to lock the @ubi_devices_mutex to make sure UBI
+ * devices do not disappear.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ ubi_enumerate_volumes(nb);
+ mutex_unlock(&ubi_devices_mutex);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ubi_register_volume_notifier);
+
+/**
+ * ubi_unregister_volume_notifier - unregister the volume notifier.
+ * @nb: the notifier description object
+ *
+ * This function unregisters volume notifier @nm and returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_unregister_volume_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ubi_notifiers, nb);
+}
+EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier);
+#endif
diff --git a/roms/u-boot/drivers/mtd/ubi/misc.c b/roms/u-boot/drivers/mtd/ubi/misc.c
new file mode 100644
index 000000000..81275deae
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/misc.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/* Here we keep miscellaneous functions which are used all over the UBI code */
+
+#include <log.h>
+#include <malloc.h>
+#include <ubi_uboot.h>
+#include "ubi.h"
+
+/**
+ * calc_data_len - calculate how much real data is stored in a buffer.
+ * @ubi: UBI device description object
+ * @buf: a buffer with the contents of the physical eraseblock
+ * @length: the buffer length
+ *
+ * This function calculates how much "real data" is stored in @buf and returnes
+ * the length. Continuous 0xFF bytes at the end of the buffer are not
+ * considered as "real data".
+ */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length)
+{
+ int i;
+
+ ubi_assert(!(length & (ubi->min_io_size - 1)));
+
+ for (i = length - 1; i >= 0; i--)
+ if (((const uint8_t *)buf)[i] != 0xFF)
+ break;
+
+ /* The resulting length must be aligned to the minimum flash I/O size */
+ length = ALIGN(i + 1, ubi->min_io_size);
+ return length;
+}
+
+/**
+ * ubi_check_volume - check the contents of a static volume.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to check
+ *
+ * This function checks if static volume @vol_id is corrupted by fully reading
+ * it and checking data CRC. This function returns %0 if the volume is not
+ * corrupted, %1 if it is corrupted and a negative error code in case of
+ * failure. Dynamic volumes are not checked and zero is returned immediately.
+ */
+int ubi_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ void *buf;
+ int err = 0, i;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+
+ if (vol->vol_type != UBI_STATIC_VOLUME)
+ return 0;
+
+ buf = vmalloc(vol->usable_leb_size);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < vol->used_ebs; i++) {
+ int size;
+
+ cond_resched();
+
+ if (i == vol->used_ebs - 1)
+ size = vol->last_eb_bytes;
+ else
+ size = vol->usable_leb_size;
+
+ err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
+ if (err) {
+ if (mtd_is_eccerr(err))
+ err = 1;
+ break;
+ }
+ }
+
+ vfree(buf);
+ return err;
+}
+
+/**
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
+ * eraseblock handling.
+ * @ubi: UBI device description object
+ */
+void ubi_calculate_reserved(struct ubi_device *ubi)
+{
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
+}
+
+/**
+ * ubi_check_pattern - check if buffer contains only a certain byte pattern.
+ * @buf: buffer to check
+ * @patt: the pattern to check
+ * @size: buffer size in bytes
+ *
+ * This function returns %1 in there are only @patt bytes in @buf, and %0 if
+ * something else was also found.
+ */
+int ubi_check_pattern(const void *buf, uint8_t patt, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (((const uint8_t *)buf)[i] != patt)
+ return 0;
+ return 1;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/ubi-media.h b/roms/u-boot/drivers/mtd/ubi/ubi-media.h
new file mode 100644
index 000000000..4af85c424
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/ubi-media.h
@@ -0,0 +1,510 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём)
+ * Thomas Gleixner
+ * Frank Haverkamp
+ * Oliver Lohmann
+ * Andreas Arnez
+ */
+
+/*
+ * This file defines the layout of UBI headers and all the other UBI on-flash
+ * data structures.
+ */
+
+#ifndef __UBI_MEDIA_H__
+#define __UBI_MEDIA_H__
+
+#include <asm/byteorder.h>
+
+/* The version of UBI images supported by this implementation */
+#define UBI_VERSION 1
+
+/* The highest erase counter value supported by this implementation */
+#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF
+
+/* The initial CRC32 value used when calculating CRC checksums */
+#define UBI_CRC32_INIT 0xFFFFFFFFU
+
+/* Erase counter header magic number (ASCII "UBI#") */
+#define UBI_EC_HDR_MAGIC 0x55424923
+/* Volume identifier header magic number (ASCII "UBI!") */
+#define UBI_VID_HDR_MAGIC 0x55424921
+
+/*
+ * Volume type constants used in the volume identifier header.
+ *
+ * @UBI_VID_DYNAMIC: dynamic volume
+ * @UBI_VID_STATIC: static volume
+ */
+enum {
+ UBI_VID_DYNAMIC = 1,
+ UBI_VID_STATIC = 2
+};
+
+/*
+ * Volume flags used in the volume table record.
+ *
+ * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ * @UBI_VTBL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at
+ * open time. Should only be set on volumes that
+ * are used by upper layers doing this kind of
+ * check. Main use-case for this flag is
+ * boot-time reduction
+ *
+ * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
+ * table. UBI automatically re-sizes the volume which has this flag and makes
+ * the volume to be of largest possible size. This means that if after the
+ * initialization UBI finds out that there are available physical eraseblocks
+ * present on the device, it automatically appends all of them to the volume
+ * (the physical eraseblocks reserved for bad eraseblocks handling and other
+ * reserved physical eraseblocks are not taken). So, if there is a volume with
+ * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
+ * eraseblocks will be zero after UBI is loaded, because all of them will be
+ * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
+ * after the volume had been initialized.
+ *
+ * The auto-resize feature is useful for device production purposes. For
+ * example, different NAND flash chips may have different amount of initial bad
+ * eraseblocks, depending of particular chip instance. Manufacturers of NAND
+ * chips usually guarantee that the amount of initial bad eraseblocks does not
+ * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
+ * flashed to the end devices in production, he does not know the exact amount
+ * of good physical eraseblocks the NAND chip on the device will have, but this
+ * number is required to calculate the volume sized and put them to the volume
+ * table of the UBI image. In this case, one of the volumes (e.g., the one
+ * which will store the root file system) is marked as "auto-resizable", and
+ * UBI will adjust its size on the first boot if needed.
+ *
+ * Note, first UBI reserves some amount of physical eraseblocks for bad
+ * eraseblock handling, and then re-sizes the volume, not vice-versa. This
+ * means that the pool of reserved physical eraseblocks will always be present.
+ */
+enum {
+ UBI_VTBL_AUTORESIZE_FLG = 0x01,
+ UBI_VTBL_SKIP_CRC_CHECK_FLG = 0x02,
+};
+
+/*
+ * Compatibility constants used by internal volumes.
+ *
+ * @UBI_COMPAT_DELETE: delete this internal volume before anything is written
+ * to the flash
+ * @UBI_COMPAT_RO: attach this device in read-only mode
+ * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its
+ * physical eraseblocks, don't allow the wear-leveling
+ * sub-system to move them
+ * @UBI_COMPAT_REJECT: reject this UBI image
+ */
+enum {
+ UBI_COMPAT_DELETE = 1,
+ UBI_COMPAT_RO = 2,
+ UBI_COMPAT_PRESERVE = 4,
+ UBI_COMPAT_REJECT = 5
+};
+
+/* Sizes of UBI headers */
+#define UBI_EC_HDR_SIZE sizeof(struct ubi_ec_hdr)
+#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr)
+
+/* Sizes of UBI headers without the ending CRC */
+#define UBI_EC_HDR_SIZE_CRC (UBI_EC_HDR_SIZE - sizeof(__be32))
+#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_ec_hdr - UBI erase counter header.
+ * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC)
+ * @version: version of UBI implementation which is supposed to accept this
+ * UBI image
+ * @padding1: reserved for future, zeroes
+ * @ec: the erase counter
+ * @vid_hdr_offset: where the VID header starts
+ * @data_offset: where the user data start
+ * @image_seq: image sequence number
+ * @padding2: reserved for future, zeroes
+ * @hdr_crc: erase counter header CRC checksum
+ *
+ * The erase counter header takes 64 bytes and has a plenty of unused space for
+ * future usage. The unused fields are zeroed. The @version field is used to
+ * indicate the version of UBI implementation which is supposed to be able to
+ * work with this UBI image. If @version is greater than the current UBI
+ * version, the image is rejected. This may be useful in future if something
+ * is changed radically. This field is duplicated in the volume identifier
+ * header.
+ *
+ * The @vid_hdr_offset and @data_offset fields contain the offset of the the
+ * volume identifier header and user data, relative to the beginning of the
+ * physical eraseblock. These values have to be the same for all physical
+ * eraseblocks.
+ *
+ * The @image_seq field is used to validate a UBI image that has been prepared
+ * for a UBI device. The @image_seq value can be any value, but it must be the
+ * same on all eraseblocks. UBI will ensure that all new erase counter headers
+ * also contain this value, and will check the value when attaching the flash.
+ * One way to make use of @image_seq is to increase its value by one every time
+ * an image is flashed over an existing image, then, if the flashing does not
+ * complete, UBI will detect the error when attaching the media.
+ */
+struct ubi_ec_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be64 ec; /* Warning: the current limit is 31-bit anyway! */
+ __be32 vid_hdr_offset;
+ __be32 data_offset;
+ __be32 image_seq;
+ __u8 padding2[32];
+ __be32 hdr_crc;
+} __packed;
+
+/**
+ * struct ubi_vid_hdr - on-flash UBI volume identifier header.
+ * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC)
+ * @version: UBI implementation version which is supposed to accept this UBI
+ * image (%UBI_VERSION)
+ * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC)
+ * @copy_flag: if this logical eraseblock was copied from another physical
+ * eraseblock (for wear-leveling reasons)
+ * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE,
+ * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT)
+ * @vol_id: ID of this volume
+ * @lnum: logical eraseblock number
+ * @padding1: reserved for future, zeroes
+ * @data_size: how many bytes of data this logical eraseblock contains
+ * @used_ebs: total number of used logical eraseblocks in this volume
+ * @data_pad: how many bytes at the end of this physical eraseblock are not
+ * used
+ * @data_crc: CRC checksum of the data stored in this logical eraseblock
+ * @padding2: reserved for future, zeroes
+ * @sqnum: sequence number
+ * @padding3: reserved for future, zeroes
+ * @hdr_crc: volume identifier header CRC checksum
+ *
+ * The @sqnum is the value of the global sequence counter at the time when this
+ * VID header was created. The global sequence counter is incremented each time
+ * UBI writes a new VID header to the flash, i.e. when it maps a logical
+ * eraseblock to a new physical eraseblock. The global sequence counter is an
+ * unsigned 64-bit integer and we assume it never overflows. The @sqnum
+ * (sequence number) is used to distinguish between older and newer versions of
+ * logical eraseblocks.
+ *
+ * There are 2 situations when there may be more than one physical eraseblock
+ * corresponding to the same logical eraseblock, i.e., having the same @vol_id
+ * and @lnum values in the volume identifier header. Suppose we have a logical
+ * eraseblock L and it is mapped to the physical eraseblock P.
+ *
+ * 1. Because UBI may erase physical eraseblocks asynchronously, the following
+ * situation is possible: L is asynchronously erased, so P is scheduled for
+ * erasure, then L is written to,i.e. mapped to another physical eraseblock P1,
+ * so P1 is written to, then an unclean reboot happens. Result - there are 2
+ * physical eraseblocks P and P1 corresponding to the same logical eraseblock
+ * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the
+ * flash.
+ *
+ * 2. From time to time UBI moves logical eraseblocks to other physical
+ * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P
+ * to P1, and an unclean reboot happens before P is physically erased, there
+ * are two physical eraseblocks P and P1 corresponding to L and UBI has to
+ * select one of them when the flash is attached. The @sqnum field says which
+ * PEB is the original (obviously P will have lower @sqnum) and the copy. But
+ * it is not enough to select the physical eraseblock with the higher sequence
+ * number, because the unclean reboot could have happen in the middle of the
+ * copying process, so the data in P is corrupted. It is also not enough to
+ * just select the physical eraseblock with lower sequence number, because the
+ * data there may be old (consider a case if more data was added to P1 after
+ * the copying). Moreover, the unclean reboot may happen when the erasure of P
+ * was just started, so it result in unstable P, which is "mostly" OK, but
+ * still has unstable bits.
+ *
+ * UBI uses the @copy_flag field to indicate that this logical eraseblock is a
+ * copy. UBI also calculates data CRC when the data is moved and stores it at
+ * the @data_crc field of the copy (P1). So when UBI needs to pick one physical
+ * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is
+ * examined. If it is cleared, the situation* is simple and the newer one is
+ * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC
+ * checksum is correct, this physical eraseblock is selected (P1). Otherwise
+ * the older one (P) is selected.
+ *
+ * There are 2 sorts of volumes in UBI: user volumes and internal volumes.
+ * Internal volumes are not seen from outside and are used for various internal
+ * UBI purposes. In this implementation there is only one internal volume - the
+ * layout volume. Internal volumes are the main mechanism of UBI extensions.
+ * For example, in future one may introduce a journal internal volume. Internal
+ * volumes have their own reserved range of IDs.
+ *
+ * The @compat field is only used for internal volumes and contains the "degree
+ * of their compatibility". It is always zero for user volumes. This field
+ * provides a mechanism to introduce UBI extensions and to be still compatible
+ * with older UBI binaries. For example, if someone introduced a journal in
+ * future, he would probably use %UBI_COMPAT_DELETE compatibility for the
+ * journal volume. And in this case, older UBI binaries, which know nothing
+ * about the journal volume, would just delete this volume and work perfectly
+ * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image
+ * - it just ignores the Ext3fs journal.
+ *
+ * The @data_crc field contains the CRC checksum of the contents of the logical
+ * eraseblock if this is a static volume. In case of dynamic volumes, it does
+ * not contain the CRC checksum as a rule. The only exception is when the
+ * data of the physical eraseblock was moved by the wear-leveling sub-system,
+ * then the wear-leveling sub-system calculates the data CRC and stores it in
+ * the @data_crc field. And of course, the @copy_flag is %in this case.
+ *
+ * The @data_size field is used only for static volumes because UBI has to know
+ * how many bytes of data are stored in this eraseblock. For dynamic volumes,
+ * this field usually contains zero. The only exception is when the data of the
+ * physical eraseblock was moved to another physical eraseblock for
+ * wear-leveling reasons. In this case, UBI calculates CRC checksum of the
+ * contents and uses both @data_crc and @data_size fields. In this case, the
+ * @data_size field contains data size.
+ *
+ * The @used_ebs field is used only for static volumes and indicates how many
+ * eraseblocks the data of the volume takes. For dynamic volumes this field is
+ * not used and always contains zero.
+ *
+ * The @data_pad is calculated when volumes are created using the alignment
+ * parameter. So, effectively, the @data_pad field reduces the size of logical
+ * eraseblocks of this volume. This is very handy when one uses block-oriented
+ * software (say, cramfs) on top of the UBI volume.
+ */
+struct ubi_vid_hdr {
+ __be32 magic;
+ __u8 version;
+ __u8 vol_type;
+ __u8 copy_flag;
+ __u8 compat;
+ __be32 vol_id;
+ __be32 lnum;
+ __u8 padding1[4];
+ __be32 data_size;
+ __be32 used_ebs;
+ __be32 data_pad;
+ __be32 data_crc;
+ __u8 padding2[4];
+ __be64 sqnum;
+ __u8 padding3[12];
+ __be32 hdr_crc;
+} __packed;
+
+/* Internal UBI volumes count */
+#define UBI_INT_VOL_COUNT 1
+
+/*
+ * Starting ID of internal volumes: 0x7fffefff.
+ * There is reserved room for 4096 internal volumes.
+ */
+#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096)
+
+/* The layout volume contains the volume table */
+
+#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
+#define UBI_LAYOUT_VOLUME_ALIGN 1
+#define UBI_LAYOUT_VOLUME_EBS 2
+#define UBI_LAYOUT_VOLUME_NAME "layout volume"
+#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
+
+/* The maximum number of volumes per one UBI device */
+#define UBI_MAX_VOLUMES 128
+
+/* The maximum volume name length */
+#define UBI_VOL_NAME_MAX 127
+
+/* Size of the volume table record */
+#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record)
+
+/* Size of the volume table record without the ending CRC */
+#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32))
+
+/**
+ * struct ubi_vtbl_record - a record in the volume table.
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are unused at the end of the each physical
+ * eraseblock to satisfy the requested alignment
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @upd_marker: if volume update was started but not finished
+ * @name_len: volume name length
+ * @name: the volume name
+ * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
+ * @padding: reserved, zeroes
+ * @crc: a CRC32 checksum of the record
+ *
+ * The volume table records are stored in the volume table, which is stored in
+ * the layout volume. The layout volume consists of 2 logical eraseblock, each
+ * of which contains a copy of the volume table (i.e., the volume table is
+ * duplicated). The volume table is an array of &struct ubi_vtbl_record
+ * objects indexed by the volume ID.
+ *
+ * If the size of the logical eraseblock is large enough to fit
+ * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES
+ * records. Otherwise, it contains as many records as it can fit (i.e., size of
+ * logical eraseblock divided by sizeof(struct ubi_vtbl_record)).
+ *
+ * The @upd_marker flag is used to implement volume update. It is set to %1
+ * before update and set to %0 after the update. So if the update operation was
+ * interrupted, UBI knows that the volume is corrupted.
+ *
+ * The @alignment field is specified when the volume is created and cannot be
+ * later changed. It may be useful, for example, when a block-oriented file
+ * system works on top of UBI. The @data_pad field is calculated using the
+ * logical eraseblock size and @alignment. The alignment must be multiple to the
+ * minimal flash I/O unit. If @alignment is 1, all the available space of
+ * the physical eraseblocks is used.
+ *
+ * Empty records contain all zeroes and the CRC checksum of those zeroes.
+ */
+struct ubi_vtbl_record {
+ __be32 reserved_pebs;
+ __be32 alignment;
+ __be32 data_pad;
+ __u8 vol_type;
+ __u8 upd_marker;
+ __be16 name_len;
+#ifndef __UBOOT__
+ __u8 name[UBI_VOL_NAME_MAX+1];
+#else
+ char name[UBI_VOL_NAME_MAX+1];
+#endif
+ __u8 flags;
+ __u8 padding[23];
+ __be32 crc;
+} __packed;
+
+/* UBI fastmap on-flash data structures */
+
+#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
+#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
+
+/* fastmap on-flash data structure format version */
+#define UBI_FM_FMT_VERSION 1
+
+#define UBI_FM_SB_MAGIC 0x7B11D69F
+#define UBI_FM_HDR_MAGIC 0xD4B82EF7
+#define UBI_FM_VHDR_MAGIC 0xFA370ED1
+#define UBI_FM_POOL_MAGIC 0x67AF4D08
+#define UBI_FM_EBA_MAGIC 0xf0c040a8
+
+/* A fastmap supber block can be located between PEB 0 and
+ * UBI_FM_MAX_START */
+#define UBI_FM_MAX_START 64
+
+/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
+#define UBI_FM_MAX_BLOCKS 32
+
+/* 5% of the total number of PEBs have to be scanned while attaching
+ * from a fastmap.
+ * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
+ * UBI_FM_MAX_POOL_SIZE */
+#define UBI_FM_MIN_POOL_SIZE 8
+#define UBI_FM_MAX_POOL_SIZE 256
+
+/**
+ * struct ubi_fm_sb - UBI fastmap super block
+ * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
+ * @version: format version of this fastmap
+ * @data_crc: CRC over the fastmap data
+ * @used_blocks: number of PEBs used by this fastmap
+ * @block_loc: an array containing the location of all PEBs of the fastmap
+ * @block_ec: the erase counter of each used PEB
+ * @sqnum: highest sequence number value at the time while taking the fastmap
+ *
+ */
+struct ubi_fm_sb {
+ __be32 magic;
+ __u8 version;
+ __u8 padding1[3];
+ __be32 data_crc;
+ __be32 used_blocks;
+ __be32 block_loc[UBI_FM_MAX_BLOCKS];
+ __be32 block_ec[UBI_FM_MAX_BLOCKS];
+ __be64 sqnum;
+ __u8 padding2[32];
+} __packed;
+
+/**
+ * struct ubi_fm_hdr - header of the fastmap data set
+ * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
+ * @free_peb_count: number of free PEBs known by this fastmap
+ * @used_peb_count: number of used PEBs known by this fastmap
+ * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
+ * @bad_peb_count: number of bad PEBs known by this fastmap
+ * @erase_peb_count: number of bad PEBs which have to be erased
+ * @vol_count: number of UBI volumes known by this fastmap
+ */
+struct ubi_fm_hdr {
+ __be32 magic;
+ __be32 free_peb_count;
+ __be32 used_peb_count;
+ __be32 scrub_peb_count;
+ __be32 bad_peb_count;
+ __be32 erase_peb_count;
+ __be32 vol_count;
+ __u8 padding[4];
+} __packed;
+
+/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
+
+/**
+ * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
+ * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
+ * @size: current pool size
+ * @max_size: maximal pool size
+ * @pebs: an array containing the location of all PEBs in this pool
+ */
+struct ubi_fm_scan_pool {
+ __be32 magic;
+ __be16 size;
+ __be16 max_size;
+ __be32 pebs[UBI_FM_MAX_POOL_SIZE];
+ __be32 padding[4];
+} __packed;
+
+/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
+
+/**
+ * struct ubi_fm_ec - stores the erase counter of a PEB
+ * @pnum: PEB number
+ * @ec: ec of this PEB
+ */
+struct ubi_fm_ec {
+ __be32 pnum;
+ __be32 ec;
+} __packed;
+
+/**
+ * struct ubi_fm_volhdr - Fastmap volume header
+ * it identifies the start of an eba table
+ * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
+ * @vol_id: volume id of the fastmapped volume
+ * @vol_type: type of the fastmapped volume
+ * @data_pad: data_pad value of the fastmapped volume
+ * @used_ebs: number of used LEBs within this volume
+ * @last_eb_bytes: number of bytes used in the last LEB
+ */
+struct ubi_fm_volhdr {
+ __be32 magic;
+ __be32 vol_id;
+ __u8 vol_type;
+ __u8 padding1[3];
+ __be32 data_pad;
+ __be32 used_ebs;
+ __be32 last_eb_bytes;
+ __u8 padding2[8];
+} __packed;
+
+/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
+
+/**
+ * struct ubi_fm_eba - denotes an association beween a PEB and LEB
+ * @magic: EBA table magic number
+ * @reserved_pebs: number of table entries
+ * @pnum: PEB number of LEB (LEB is the index)
+ */
+struct ubi_fm_eba {
+ __be32 magic;
+ __be32 reserved_pebs;
+ __be32 pnum[0];
+} __packed;
+#endif /* !__UBI_MEDIA_H__ */
diff --git a/roms/u-boot/drivers/mtd/ubi/ubi.h b/roms/u-boot/drivers/mtd/ubi/ubi.h
new file mode 100644
index 000000000..f44960186
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/ubi.h
@@ -0,0 +1,1127 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __UBI_UBI_H__
+#define __UBI_UBI_H__
+
+#ifndef __UBOOT__
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/notifier.h>
+#include <asm/pgtable.h>
+#else
+#include <ubi_uboot.h>
+#endif
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/ubi.h>
+#include "ubi-media.h"
+#include <mtd/ubi-user.h>
+
+/* Maximum number of supported UBI devices */
+#define UBI_MAX_DEVICES 32
+
+/* UBI name used for character devices, sysfs, etc */
+#define UBI_NAME_STR "ubi"
+
+/* Normal UBI messages */
+#ifdef CONFIG_UBI_SILENCE_MSG
+#define ubi_msg(ubi, fmt, ...)
+#else
+#define ubi_msg(ubi, fmt, ...) printk(UBI_NAME_STR "%d: " fmt "\n", \
+ ubi->ubi_num, ##__VA_ARGS__)
+#endif
+
+/* UBI warning messages */
+#define ubi_warn(ubi, fmt, ...) pr_warn(UBI_NAME_STR "%d warning: %s: " fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
+/* UBI error messages */
+#define ubi_err(ubi, fmt, ...) pr_err(UBI_NAME_STR "%d error: %s: " fmt "\n", \
+ ubi->ubi_num, __func__, ##__VA_ARGS__)
+
+/* Background thread name pattern */
+#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
+
+/*
+ * This marker in the EBA table means that the LEB is um-mapped.
+ * NOTE! It has to have the same value as %UBI_ALL.
+ */
+#define UBI_LEB_UNMAPPED -1
+
+/*
+ * In case of errors, UBI tries to repeat the operation several times before
+ * returning error. The below constant defines how many times UBI re-tries.
+ */
+#define UBI_IO_RETRIES 3
+
+/*
+ * Length of the protection queue. The length is effectively equivalent to the
+ * number of (global) erase cycles PEBs are protected from the wear-leveling
+ * worker.
+ */
+#define UBI_PROT_QUEUE_LEN 10
+
+/* The volume ID/LEB number/erase counter is unknown */
+#define UBI_UNKNOWN -1
+
+/*
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/*
+ * Error codes returned by the I/O sub-system.
+ *
+ * UBI_IO_FF: the read region of flash contains only 0xFFs
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
+ * data integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ *
+ * Note, it is probably better to have bit-flip and ebadmsg as flags which can
+ * be or'ed with other error code. But this is a big change because there are
+ * may callers, so it does not worth the risk of introducing a bug
+ */
+enum {
+ UBI_IO_FF = 1,
+ UBI_IO_FF_BITFLIPS,
+ UBI_IO_BAD_HDR,
+ UBI_IO_BAD_HDR_EBADMSG,
+ UBI_IO_BITFLIPS,
+};
+
+/*
+ * Return codes of the 'ubi_eba_copy_leb()' function.
+ *
+ * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source
+ * PEB was put meanwhile, or there is I/O on the source PEB
+ * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source
+ * PEB
+ * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target
+ * PEB
+ * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target
+ * PEB
+ * MOVE_TARGET_BITFLIPS: canceled because a bit-flip was detected in the
+ * target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
+ */
+enum {
+ MOVE_CANCEL_RACE = 1,
+ MOVE_SOURCE_RD_ERR,
+ MOVE_TARGET_RD_ERR,
+ MOVE_TARGET_WR_ERR,
+ MOVE_TARGET_BITFLIPS,
+ MOVE_RETRY,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
+/*
+ * Flags for emulate_power_cut in ubi_debug_info
+ *
+ * POWER_CUT_EC_WRITE: Emulate a power cut when writing an EC header
+ * POWER_CUT_VID_WRITE: Emulate a power cut when writing a VID header
+ */
+enum {
+ POWER_CUT_EC_WRITE = 0x01,
+ POWER_CUT_VID_WRITE = 0x02,
+};
+
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @u.rb: link in the corresponding (free/used) RB-tree
+ * @u.list: link in the protection queue
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL sub-system. Each physical eraseblock
+ * has a corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL sub-system for details.
+ */
+struct ubi_wl_entry {
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+ int ec;
+ int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ * the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA sub-system to implement per-LEB
+ * locking. When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA sub-system for details.
+ */
+struct ubi_ltree_entry {
+ struct rb_node rb;
+ int vol_id;
+ int lnum;
+ int users;
+ struct rw_semaphore mutex;
+};
+
+/**
+ * struct ubi_rename_entry - volume re-name description data structure.
+ * @new_name_len: new volume name length
+ * @new_name: new volume name
+ * @remove: if not zero, this volume should be removed, not re-named
+ * @desc: descriptor of the volume
+ * @list: links re-name entries into a list
+ *
+ * This data structure is utilized in the multiple volume re-name code. Namely,
+ * UBI first creates a list of &struct ubi_rename_entry objects from the
+ * &struct ubi_rnvol_req request object, and then utilizes this list to do all
+ * the job.
+ */
+struct ubi_rename_entry {
+ int new_name_len;
+ char new_name[UBI_VOL_NAME_MAX + 1];
+ int remove;
+ struct ubi_volume_desc *desc;
+ struct list_head list;
+};
+
+struct ubi_volume_desc;
+
+/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+/**
+ * struct ubi_volume - UBI volume description data structure.
+ * @dev: device object to make use of the the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi: reference to the UBI device description object
+ * @vol_id: volume ID
+ * @ref_count: volume reference count
+ * @readers: number of users holding this volume in read-only mode
+ * @writers: number of users holding this volume in read-write mode
+ * @exclusive: whether somebody holds this volume in exclusive mode
+ * @metaonly: whether somebody is altering only meta data of this volume
+ *
+ * @reserved_pebs: how many physical eraseblocks are reserved for this volume
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @usable_leb_size: logical eraseblock size without padding
+ * @used_ebs: how many logical eraseblocks in this volume contain data
+ * @last_eb_bytes: how many bytes are stored in the last logical eraseblock
+ * @used_bytes: how many bytes of data this volume contains
+ * @alignment: volume alignment
+ * @data_pad: how many bytes are not used at the end of physical eraseblocks to
+ * satisfy the requested alignment
+ * @name_len: volume name length
+ * @name: volume name
+ *
+ * @upd_ebs: how many eraseblocks are expected to be updated
+ * @ch_lnum: LEB number which is being changing by the atomic LEB change
+ * operation
+ * @upd_bytes: how many bytes are expected to be received for volume update or
+ * atomic LEB change
+ * @upd_received: how many bytes were already received for volume update or
+ * atomic LEB change
+ * @upd_buf: update buffer which is used to collect update data or data for
+ * atomic LEB change
+ *
+ * @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @skip_check: %1 if CRC check of this static volume should be skipped.
+ * Directly reflects the presence of the
+ * %UBI_VTBL_SKIP_CRC_CHECK_FLG flag in the vtbl entry
+ * @checked: %1 if this static volume was checked
+ * @corrupted: %1 if the volume is corrupted (static volumes only)
+ * @upd_marker: %1 if the update marker is set for this volume
+ * @updating: %1 if the volume is being updated
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
+ * @direct_writes: %1 if direct writes are enabled for this volume
+ *
+ * The @corrupted field indicates that the volume's contents is corrupted.
+ * Since UBI protects only static volumes, this field is not relevant to
+ * dynamic volumes - it is user's responsibility to assure their data
+ * integrity.
+ *
+ * The @upd_marker flag indicates that this volume is either being updated at
+ * the moment or is damaged because of an unclean reboot.
+ */
+struct ubi_volume {
+ struct device dev;
+ struct cdev cdev;
+ struct ubi_device *ubi;
+ int vol_id;
+ int ref_count;
+ int readers;
+ int writers;
+ int exclusive;
+ int metaonly;
+
+ int reserved_pebs;
+ int vol_type;
+ int usable_leb_size;
+ int used_ebs;
+#ifndef __UBOOT__
+ int last_eb_bytes;
+#else
+ u32 last_eb_bytes;
+#endif
+ long long used_bytes;
+ int alignment;
+ int data_pad;
+ int name_len;
+ char name[UBI_VOL_NAME_MAX + 1];
+
+ int upd_ebs;
+ int ch_lnum;
+ long long upd_bytes;
+ long long upd_received;
+ void *upd_buf;
+
+ int *eba_tbl;
+ unsigned int skip_check:1;
+ unsigned int checked:1;
+ unsigned int corrupted:1;
+ unsigned int upd_marker:1;
+ unsigned int updating:1;
+ unsigned int changing_leb:1;
+ unsigned int direct_writes:1;
+};
+
+/**
+ * struct ubi_volume_desc - UBI volume descriptor returned when it is opened.
+ * @vol: reference to the corresponding volume description object
+ * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, %UBI_EXCLUSIVE
+ * or %UBI_METAONLY)
+ */
+struct ubi_volume_desc {
+ struct ubi_volume *vol;
+ int mode;
+};
+
+struct ubi_wl_entry;
+
+/**
+ * struct ubi_debug_info - debugging information for an UBI device.
+ *
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @chk_fastmap: if UBI fastmap extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @emulate_power_cut: emulate power cut for testing purposes
+ * @power_cut_counter: count down for writes left until emulated power cut
+ * @power_cut_min: minimum number of writes before emulating a power cut
+ * @power_cut_max: maximum number of writes until emulating a power cut
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_chk_fastmap: debugfs knob to enable UBI fastmap extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
+ * @dfs_emulate_power_cut: debugfs knob to emulate power cuts
+ * @dfs_power_cut_min: debugfs knob for minimum writes before power cut
+ * @dfs_power_cut_max: debugfs knob for maximum writes until power cut
+ */
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int chk_fastmap:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ unsigned int emulate_power_cut:2;
+ unsigned int power_cut_counter;
+ unsigned int power_cut_min;
+ unsigned int power_cut_max;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_chk_fastmap;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
+ struct dentry *dfs_emulate_power_cut;
+ struct dentry *dfs_power_cut_min;
+ struct dentry *dfs_power_cut_max;
+};
+
+/**
+ * struct ubi_device - UBI device description structure
+ * @dev: UBI device object to use the the Linux device model
+ * @cdev: character device object to create character device
+ * @ubi_num: UBI device number
+ * @ubi_name: UBI device name
+ * @vol_count: number of volumes in this UBI device
+ * @volumes: volumes of this UBI device
+ * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
+ * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ * @vol->readers, @vol->writers, @vol->exclusive,
+ * @vol->metaonly, @vol->ref_count, @vol->mapping and
+ * @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
+ * @image_seq: image sequence number recorded on EC headers
+ *
+ * @rsvd_pebs: count of reserved physical eraseblocks
+ * @avail_pebs: count of available physical eraseblocks
+ * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
+ * handling
+ * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
+ *
+ * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
+ * of UBI initialization
+ * @vtbl_slots: how many slots are available in the volume table
+ * @vtbl_size: size of the volume table in bytes
+ * @vtbl: in-RAM volume table copy
+ * @device_mutex: protects on-flash volume table and serializes volume
+ * creation, deletion, update, re-size, re-name and set
+ * property
+ *
+ * @max_ec: current highest erase counter value
+ * @mean_ec: current mean erase counter value
+ *
+ * @global_sqnum: global sequence number
+ * @ltree_lock: protects the lock tree and @global_sqnum
+ * @ltree: the lock tree
+ * @alc_mutex: serializes "atomic LEB change" operations
+ *
+ * @fm_disabled: non-zero if fastmap is disabled (default)
+ * @fm: in-memory data structure of the currently used fastmap
+ * @fm_pool: in-memory data structure of the fastmap pool
+ * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
+ * sub-system
+ * @fm_protect: serializes ubi_update_fastmap(), protects @fm_buf and makes sure
+ * that critical sections cannot be interrupted by ubi_update_fastmap()
+ * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
+ * @fm_size: fastmap size in bytes
+ * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
+ * @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
+ *
+ * @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
+ * @free: RB-tree of free physical eraseblocks
+ * @free_count: Contains the number of elements in @free
+ * @scrub: RB-tree of physical eraseblocks which need scrubbing
+ * @pq: protection queue (contain physical eraseblocks which are temporarily
+ * protected from the wear-leveling worker)
+ * @pq_head: protection queue head
+ * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
+ * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ * and @fm_wl_pool fields
+ * @move_mutex: serializes eraseblock moves
+ * @work_sem: used to wait for all the scheduled works to finish and prevent
+ * new works from being submitted
+ * @wl_scheduled: non-zero if the wear-leveling was scheduled
+ * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
+ * physical eraseblock
+ * @move_from: physical eraseblock from where the data is being moved
+ * @move_to: physical eraseblock where the data is being moved to
+ * @move_to_put: if the "to" PEB was put
+ * @works: list of pending works
+ * @works_count: count of pending works
+ * @bgt_thread: background thread description object
+ * @thread_enabled: if the background thread is enabled
+ * @bgt_name: background thread name
+ *
+ * @flash_size: underlying MTD device size (in bytes)
+ * @peb_count: count of physical eraseblocks on the MTD device
+ * @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @good_peb_count: count of good physical eraseblocks
+ * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
+ * used by UBI)
+ * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous
+ * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks
+ * @min_io_size: minimal input/output unit size of the underlying MTD device
+ * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers
+ * @ro_mode: if the UBI device is in read-only mode
+ * @leb_size: logical eraseblock size
+ * @leb_start: starting offset of logical eraseblocks within physical
+ * eraseblocks
+ * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size
+ * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size
+ * @vid_hdr_offset: starting offset of the volume identifier header (might be
+ * unaligned)
+ * @vid_hdr_aloffset: starting offset of the VID header aligned to
+ * @hdrs_min_io_size
+ * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
+ * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
+ * not
+ * @nor_flash: non-zero if working on top of NOR flash
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
+ * @mtd: MTD device descriptor
+ *
+ * @peb_buf: a buffer of PEB size used for different purposes
+ * @buf_mutex: protects @peb_buf
+ * @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
+ */
+struct ubi_device {
+ struct cdev cdev;
+ struct device dev;
+ int ubi_num;
+ char ubi_name[sizeof(UBI_NAME_STR)+5];
+ int vol_count;
+ struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
+ spinlock_t volumes_lock;
+ int ref_count;
+ int image_seq;
+
+ int rsvd_pebs;
+ int avail_pebs;
+ int beb_rsvd_pebs;
+ int beb_rsvd_level;
+ int bad_peb_limit;
+
+ int autoresize_vol_id;
+ int vtbl_slots;
+ int vtbl_size;
+ struct ubi_vtbl_record *vtbl;
+ struct mutex device_mutex;
+
+ int max_ec;
+ /* Note, mean_ec is not updated run-time - should be fixed */
+ int mean_ec;
+
+ /* EBA sub-system's stuff */
+ unsigned long long global_sqnum;
+ spinlock_t ltree_lock;
+ struct rb_root ltree;
+ struct mutex alc_mutex;
+
+ /* Fastmap stuff */
+ int fm_disabled;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+ struct rw_semaphore fm_eba_sem;
+ struct rw_semaphore fm_protect;
+ void *fm_buf;
+ size_t fm_size;
+#ifndef __UBOOT__
+ struct work_struct fm_work;
+#endif
+ int fm_work_scheduled;
+
+ /* Wear-leveling sub-system's stuff */
+ struct rb_root used;
+ struct rb_root erroneous;
+ struct rb_root free;
+ int free_count;
+ struct rb_root scrub;
+ struct list_head pq[UBI_PROT_QUEUE_LEN];
+ int pq_head;
+ spinlock_t wl_lock;
+ struct mutex move_mutex;
+ struct rw_semaphore work_sem;
+ int wl_scheduled;
+ struct ubi_wl_entry **lookuptbl;
+ struct ubi_wl_entry *move_from;
+ struct ubi_wl_entry *move_to;
+ int move_to_put;
+ struct list_head works;
+ int works_count;
+ struct task_struct *bgt_thread;
+ int thread_enabled;
+ char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2];
+
+ /* I/O sub-system's stuff */
+ long long flash_size;
+ int peb_count;
+ int peb_size;
+ int bad_peb_count;
+ int good_peb_count;
+ int corr_peb_count;
+ int erroneous_peb_count;
+ int max_erroneous;
+ int min_io_size;
+ int hdrs_min_io_size;
+ int ro_mode;
+ int leb_size;
+ int leb_start;
+ int ec_hdr_alsize;
+ int vid_hdr_alsize;
+ int vid_hdr_offset;
+ int vid_hdr_aloffset;
+ int vid_hdr_shift;
+ unsigned int bad_allowed:1;
+ unsigned int nor_flash:1;
+ int max_write_size;
+ struct mtd_info *mtd;
+
+ void *peb_buf;
+ struct mutex buf_mutex;
+ struct mutex ckvol_mutex;
+
+ struct ubi_debug_info dbg;
+};
+
+/**
+ * struct ubi_ainf_peb - attach information about a physical eraseblock.
+ * @ec: erase counter (%UBI_UNKNOWN if it is unknown)
+ * @pnum: physical eraseblock number
+ * @vol_id: ID of the volume this LEB belongs to
+ * @lnum: logical eraseblock number
+ * @scrub: if this physical eraseblock needs scrubbing
+ * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB)
+ * @sqnum: sequence number
+ * @u: unions RB-tree or @list links
+ * @u.rb: link in the per-volume RB-tree of &struct ubi_ainf_peb objects
+ * @u.list: link in one of the eraseblock lists
+ *
+ * One object of this type is allocated for each physical eraseblock when
+ * attaching an MTD device. Note, if this PEB does not belong to any LEB /
+ * volume, the @vol_id and @lnum fields are initialized to %UBI_UNKNOWN.
+ */
+struct ubi_ainf_peb {
+ int ec;
+ int pnum;
+ int vol_id;
+ int lnum;
+ unsigned int scrub:1;
+ unsigned int copy_flag:1;
+ unsigned long long sqnum;
+ union {
+ struct rb_node rb;
+ struct list_head list;
+ } u;
+};
+
+/**
+ * struct ubi_ainf_volume - attaching information about a volume.
+ * @vol_id: volume ID
+ * @highest_lnum: highest logical eraseblock number in this volume
+ * @leb_count: number of logical eraseblocks in this volume
+ * @vol_type: volume type
+ * @used_ebs: number of used logical eraseblocks in this volume (only for
+ * static volumes)
+ * @last_data_size: amount of data in the last logical eraseblock of this
+ * volume (always equivalent to the usable logical eraseblock
+ * size in case of dynamic volumes)
+ * @data_pad: how many bytes at the end of logical eraseblocks of this volume
+ * are not used (due to volume alignment)
+ * @compat: compatibility flags of this volume
+ * @rb: link in the volume RB-tree
+ * @root: root of the RB-tree containing all the eraseblock belonging to this
+ * volume (&struct ubi_ainf_peb objects)
+ *
+ * One object of this type is allocated for each volume when attaching an MTD
+ * device.
+ */
+struct ubi_ainf_volume {
+ int vol_id;
+ int highest_lnum;
+ int leb_count;
+ int vol_type;
+ int used_ebs;
+ int last_data_size;
+ int data_pad;
+ int compat;
+ struct rb_node rb;
+ struct rb_root root;
+};
+
+/**
+ * struct ubi_attach_info - MTD device attaching information.
+ * @volumes: root of the volume RB-tree
+ * @corr: list of corrupted physical eraseblocks
+ * @free: list of free physical eraseblocks
+ * @erase: list of physical eraseblocks which have to be erased
+ * @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
+ * those belonging to "preserve"-compatible internal volumes)
+ * @corr_peb_count: count of PEBs in the @corr list
+ * @empty_peb_count: count of PEBs which are presumably empty (contain only
+ * 0xFF bytes)
+ * @alien_peb_count: count of PEBs in the @alien list
+ * @bad_peb_count: count of bad physical eraseblocks
+ * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked
+ * as bad yet, but which look like bad
+ * @vols_found: number of volumes found
+ * @highest_vol_id: highest volume ID
+ * @is_empty: flag indicating whether the MTD device is empty or not
+ * @min_ec: lowest erase counter value
+ * @max_ec: highest erase counter value
+ * @max_sqnum: highest sequence number value
+ * @mean_ec: mean erase counter value
+ * @ec_sum: a temporary variable used when calculating @mean_ec
+ * @ec_count: a temporary variable used when calculating @mean_ec
+ * @aeb_slab_cache: slab cache for &struct ubi_ainf_peb objects
+ *
+ * This data structure contains the result of attaching an MTD device and may
+ * be used by other UBI sub-systems to build final UBI data structures, further
+ * error-recovery and so on.
+ */
+struct ubi_attach_info {
+ struct rb_root volumes;
+ struct list_head corr;
+ struct list_head free;
+ struct list_head erase;
+ struct list_head alien;
+ int corr_peb_count;
+ int empty_peb_count;
+ int alien_peb_count;
+ int bad_peb_count;
+ int maybe_bad_peb_count;
+ int vols_found;
+ int highest_vol_id;
+ int is_empty;
+ int min_ec;
+ int max_ec;
+ unsigned long long max_sqnum;
+ int mean_ec;
+ uint64_t ec_sum;
+ int ec_count;
+ struct kmem_cache *aeb_slab_cache;
+};
+
+/**
+ * struct ubi_work - UBI work description data structure.
+ * @list: a link in the list of pending works
+ * @func: worker function
+ * @e: physical eraseblock to erase
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+ * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @shutdown argument is
+ * not zero, the worker has to free the resources and exit immediately as the
+ * WL sub-system is shutting down.
+ * The worker has to return zero in case of success and a negative error code in
+ * case of failure.
+ */
+struct ubi_work {
+ struct list_head list;
+ int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
+ /* The below fields are only relevant to erasure works */
+ struct ubi_wl_entry *e;
+ int vol_id;
+ int lnum;
+ int torture;
+ int anchor;
+};
+
+#include "debug.h"
+
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern const struct file_operations ubi_ctrl_cdev_operations;
+extern const struct file_operations ubi_cdev_operations;
+extern const struct file_operations ubi_vol_cdev_operations;
+extern struct class ubi_class;
+extern struct mutex ubi_devices_mutex;
+extern struct blocking_notifier_head ubi_notifiers;
+
+/* attach.c */
+int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
+ int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
+struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
+ int vol_id);
+void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
+struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
+ struct ubi_attach_info *ai);
+int ubi_attach(struct ubi_device *ubi, int force_scan);
+void ubi_destroy_ai(struct ubi_attach_info *ai);
+
+/* vtbl.c */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec);
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list);
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai);
+
+/* vmt.c */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl);
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+
+/* upd.c */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes);
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req);
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
+
+/* misc.c */
+int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
+ int length);
+int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
+void ubi_calculate_reserved(struct ubi_device *ubi);
+int ubi_check_pattern(const void *buf, uint8_t patt, int size);
+
+/* gluebi.c */
+#ifdef CONFIG_MTD_UBI_GLUEBI
+int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol);
+int ubi_destroy_gluebi(struct ubi_volume *vol);
+void ubi_gluebi_updated(struct ubi_volume *vol);
+#else
+#define ubi_create_gluebi(ubi, vol) 0
+
+static inline int ubi_destroy_gluebi(struct ubi_volume *vol)
+{
+ return 0;
+}
+
+#define ubi_gluebi_updated(vol)
+#endif
+
+/* eba.c */
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check);
+int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_sgl *sgl, int lnum, int offset, int len,
+ int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ const void *buf, int offset, int len);
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int used_ebs);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len);
+int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
+ struct ubi_vid_hdr *vid_hdr);
+int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
+int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
+ struct ubi_attach_info *ai_scan);
+
+/* wl.c */
+int ubi_wl_get_peb(struct ubi_device *ubi);
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture);
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum);
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
+void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
+struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
+int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
+ int lnum, int torture);
+int ubi_is_erase_work(struct ubi_work *wrk);
+void ubi_refill_pools(struct ubi_device *ubi);
+int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
+
+/* io.c */
+int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
+ int len);
+int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
+ int len);
+int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
+int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
+int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr, int verbose);
+int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_ec_hdr *ec_hdr);
+int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int verbose);
+int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
+ struct ubi_vid_hdr *vid_hdr);
+
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol,
+ int ntype);
+int ubi_notify_all(struct ubi_device *ubi, int ntype,
+ struct notifier_block *nb);
+int ubi_enumerate_volumes(struct notifier_block *nb);
+void ubi_free_internal_volumes(struct ubi_device *ubi);
+
+/* kapi.c */
+void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
+void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
+ struct ubi_volume_info *vi);
+/* scan.c */
+int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
+ int pnum, const struct ubi_vid_hdr *vid_hdr);
+
+/* fastmap.c */
+#ifdef CONFIG_MTD_UBI_FASTMAP
+size_t ubi_calc_fm_size(struct ubi_device *ubi);
+int ubi_update_fastmap(struct ubi_device *ubi);
+int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int fm_anchor);
+#else
+static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
+#endif
+
+/* block.c */
+#ifdef CONFIG_MTD_UBI_BLOCK
+int ubiblock_init(void);
+void ubiblock_exit(void);
+int ubiblock_create(struct ubi_volume_info *vi);
+int ubiblock_remove(struct ubi_volume_info *vi);
+#else
+static inline int ubiblock_init(void) { return 0; }
+static inline void ubiblock_exit(void) {}
+static inline int ubiblock_create(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+static inline int ubiblock_remove(struct ubi_volume_info *vi)
+{
+ return -ENOSYS;
+}
+#endif
+
+/*
+ * ubi_for_each_free_peb - walk the UBI free RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_free_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
+
+/*
+ * ubi_for_each_used_peb - walk the UBI used RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_used_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
+
+/*
+ * ubi_for_each_scub_peb - walk the UBI scub RB tree.
+ * @ubi: UBI device description object
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ */
+#define ubi_for_each_scrub_peb(ubi, e, tmp_rb) \
+ ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
+
+/*
+ * ubi_for_each_protected_peb - walk the UBI protection queue.
+ * @ubi: UBI device description object
+ * @i: a integer used as counter
+ * @e: a pointer to a ubi_wl_entry to use as cursor
+ */
+#define ubi_for_each_protected_peb(ubi, i, e) \
+ for ((i) = 0; (i) < UBI_PROT_QUEUE_LEN; (i)++) \
+ list_for_each_entry((e), &(ubi->pq[(i)]), u.list)
+
+/*
+ * ubi_rb_for_each_entry - walk an RB-tree.
+ * @rb: a pointer to type 'struct rb_node' to use as a loop counter
+ * @pos: a pointer to RB-tree entry type to use as a loop counter
+ * @root: RB-tree's root
+ * @member: the name of the 'struct rb_node' within the RB-tree entry
+ */
+#define ubi_rb_for_each_entry(rb, pos, root, member) \
+ for (rb = rb_first(root), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
+ rb; \
+ rb = rb_next(rb), \
+ pos = (rb ? container_of(rb, typeof(*pos), member) : NULL))
+
+/*
+ * ubi_move_aeb_to_list - move a PEB from the volume tree to a list.
+ *
+ * @av: volume attaching information
+ * @aeb: attaching eraseblock information
+ * @list: the list to move to
+ */
+static inline void ubi_move_aeb_to_list(struct ubi_ainf_volume *av,
+ struct ubi_ainf_peb *aeb,
+ struct list_head *list)
+{
+ rb_erase(&aeb->u.rb, &av->root);
+ list_add_tail(&aeb->u.list, list);
+}
+
+/**
+ * ubi_zalloc_vid_hdr - allocate a volume identifier header object.
+ * @ubi: UBI device description object
+ * @gfp_flags: GFP flags to allocate with
+ *
+ * This function returns a pointer to the newly allocated and zero-filled
+ * volume identifier header object in case of success and %NULL in case of
+ * failure.
+ */
+static inline struct ubi_vid_hdr *
+ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
+{
+ void *vid_hdr;
+
+ vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
+ if (!vid_hdr)
+ return NULL;
+
+ /*
+ * VID headers may be stored at un-aligned flash offsets, so we shift
+ * the pointer.
+ */
+ return vid_hdr + ubi->vid_hdr_shift;
+}
+
+/**
+ * ubi_free_vid_hdr - free a volume identifier header object.
+ * @ubi: UBI device description object
+ * @vid_hdr: the object to free
+ */
+static inline void ubi_free_vid_hdr(const struct ubi_device *ubi,
+ struct ubi_vid_hdr *vid_hdr)
+{
+ void *p = vid_hdr;
+
+ if (!p)
+ return;
+
+ kfree(p - ubi->vid_hdr_shift);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_read()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/*
+ * This function is equivalent to 'ubi_io_write()', but @offset is relative to
+ * the beginning of the logical eraseblock, not to the beginning of the
+ * physical eraseblock.
+ */
+static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
+ int pnum, int offset, int len)
+{
+ ubi_assert(offset >= 0);
+ return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len);
+}
+
+/**
+ * ubi_ro_mode - switch to read-only mode.
+ * @ubi: UBI device description object
+ */
+static inline void ubi_ro_mode(struct ubi_device *ubi)
+{
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn(ubi, "switch to read-only mode");
+ dump_stack();
+ }
+}
+
+/**
+ * vol_id2idx - get table index by volume ID.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ */
+static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id)
+{
+ if (vol_id >= UBI_INTERNAL_VOL_START)
+ return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots;
+ else
+ return vol_id;
+}
+
+/**
+ * idx2vol_id - get volume ID by table index.
+ * @ubi: UBI device description object
+ * @idx: table index
+ */
+static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
+{
+ if (idx >= ubi->vtbl_slots)
+ return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START;
+ else
+ return idx;
+}
+
+#ifdef __UBOOT__
+void ubi_do_worker(struct ubi_device *ubi);
+#endif
+#endif /* !__UBI_UBI_H__ */
diff --git a/roms/u-boot/drivers/mtd/ubi/upd.c b/roms/u-boot/drivers/mtd/ubi/upd.c
new file mode 100644
index 000000000..28a362aa1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/upd.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ *
+ * Jan 2007: Alexander Schmidt, hacked per-volume update.
+ */
+
+/*
+ * This file contains implementation of the volume update and atomic LEB change
+ * functionality.
+ *
+ * The update operation is based on the per-volume update marker which is
+ * stored in the volume table. The update marker is set before the update
+ * starts, and removed after the update has been finished. So if the update was
+ * interrupted by an unclean re-boot or due to some other reasons, the update
+ * marker stays on the flash media and UBI finds it when it attaches the MTD
+ * device next time. If the update marker is set for a volume, the volume is
+ * treated as damaged and most I/O operations are prohibited. Only a new update
+ * operation is allowed.
+ *
+ * Note, in general it is possible to implement the update operation as a
+ * transaction with a roll-back capability.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <malloc.h>
+#include <linux/uaccess.h>
+#else
+#include <div64.h>
+#include <ubi_uboot.h>
+#endif
+#include <linux/err.h>
+#include <linux/math64.h>
+
+#include "ubi.h"
+
+/**
+ * set_update_marker - set update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function sets the update marker flag for volume @vol. Returns zero
+ * in case of success and a negative error code in case of failure.
+ */
+static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_gen("set update marker for volume %d", vol->vol_id);
+
+ if (vol->upd_marker) {
+ ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
+ dbg_gen("already set");
+ return 0;
+ }
+
+ vtbl_rec = ubi->vtbl[vol->vol_id];
+ vtbl_rec.upd_marker = 1;
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ vol->upd_marker = 1;
+ mutex_unlock(&ubi->device_mutex);
+ return err;
+}
+
+/**
+ * clear_update_marker - clear update marker.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: new data size in bytes
+ *
+ * This function clears the update marker for volume @vol, sets new volume
+ * data size and clears the "corrupted" flag (static volumes only). Returns
+ * zero in case of success and a negative error code in case of failure.
+ */
+static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int err;
+ struct ubi_vtbl_record vtbl_rec;
+
+ dbg_gen("clear update marker for volume %d", vol->vol_id);
+
+ vtbl_rec = ubi->vtbl[vol->vol_id];
+ ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
+ vtbl_rec.upd_marker = 0;
+
+ if (vol->vol_type == UBI_STATIC_VOLUME) {
+ vol->corrupted = 0;
+ vol->used_bytes = bytes;
+ vol->used_ebs = div_u64_rem(bytes, vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ mutex_lock(&ubi->device_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ vol->upd_marker = 0;
+ mutex_unlock(&ubi->device_mutex);
+ return err;
+}
+
+/**
+ * ubi_start_update - start volume update.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @bytes: update bytes
+ *
+ * This function starts volume update operation. If @bytes is zero, the volume
+ * is just wiped out. Returns zero in case of success and a negative error code
+ * in case of failure.
+ */
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
+{
+ int i, err;
+
+ dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+ ubi_assert(!vol->updating && !vol->changing_leb);
+ vol->updating = 1;
+
+ vol->upd_buf = vmalloc(ubi->leb_size);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ err = set_update_marker(ubi, vol);
+ if (err)
+ return err;
+
+ /* Before updating - wipe out the volume */
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ return err;
+ }
+
+ if (bytes == 0) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+
+ err = clear_update_marker(ubi, vol, 0);
+ if (err)
+ return err;
+
+ vfree(vol->upd_buf);
+ vol->updating = 0;
+ return 0;
+ }
+
+ vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
+ vol->upd_bytes = bytes;
+ vol->upd_received = 0;
+ return 0;
+}
+
+/**
+ * ubi_start_leb_change - start atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @req: operation request
+ *
+ * This function starts atomic LEB change operation. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req)
+{
+ ubi_assert(!vol->updating && !vol->changing_leb);
+
+ dbg_gen("start changing LEB %d:%d, %u bytes",
+ vol->vol_id, req->lnum, req->bytes);
+ if (req->bytes == 0)
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0);
+
+ vol->upd_bytes = req->bytes;
+ vol->upd_received = 0;
+ vol->changing_leb = 1;
+ vol->ch_lnum = req->lnum;
+
+ vol->upd_buf = vmalloc(req->bytes);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * write_leb - write update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @buf: data to write
+ * @len: data size
+ * @used_ebs: how many logical eraseblocks will this volume contain (static
+ * volumes only)
+ *
+ * This function writes update data to corresponding logical eraseblock. In
+ * case of dynamic volume, this function checks if the data contains 0xFF bytes
+ * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole
+ * buffer contains only 0xFF bytes, the LEB is left unmapped.
+ *
+ * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is
+ * that we want to make sure that more data may be appended to the logical
+ * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and
+ * this PEB won't be writable anymore. So if one writes the file-system image
+ * to the UBI volume where 0xFFs mean free space - UBI makes sure this free
+ * space is writable after the update.
+ *
+ * We do not do this for static volumes because they are read-only. But this
+ * also cannot be done because we have to store per-LEB CRC and the correct
+ * data length.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int len, int used_ebs)
+{
+ int err;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ int l = ALIGN(len, ubi->min_io_size);
+
+ memset(buf + len, 0xFF, l - len);
+ len = ubi_calc_data_len(ubi, buf, l);
+ if (len == 0) {
+ dbg_gen("all %d bytes contain 0xFF - skip", len);
+ return 0;
+ }
+
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len);
+ } else {
+ /*
+ * When writing static volume, and this is the last logical
+ * eraseblock, the length (@len) does not have to be aligned to
+ * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()'
+ * function accepts exact (unaligned) length and stores it in
+ * the VID header. And it takes care of proper alignment by
+ * padding the buffer. Here we just make sure the padding will
+ * contain zeros, not random trash.
+ */
+ memset(buf + len, 0, vol->usable_leb_size - len);
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, used_ebs);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_update_data - write more update data.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function writes more data to the volume which is being updated. It may
+ * be called arbitrary number of times until all the update data arriveis. This
+ * function returns %0 in case of success, number of bytes written during the
+ * last call if the whole volume update has been successfully finished, and a
+ * negative error code in case of failure.
+ */
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+#ifndef __UBOOT__
+ int lnum, offs, err = 0, len, to_write = count;
+#else
+ int lnum, err = 0, len, to_write = count;
+ u32 offs;
+#endif
+
+ dbg_gen("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ lnum = div_u64_rem(vol->upd_received, vol->usable_leb_size, &offs);
+ if (vol->upd_received + count > vol->upd_bytes)
+ to_write = count = vol->upd_bytes - vol->upd_received;
+
+ /*
+ * When updating volumes, we accumulate whole logical eraseblock of
+ * data and write it at once.
+ */
+ if (offs != 0) {
+ /*
+ * This is a write to the middle of the logical eraseblock. We
+ * copy the data to our update buffer and wait for more data or
+ * flush it if the whole eraseblock is written or the update
+ * is finished.
+ */
+
+ len = vol->usable_leb_size - offs;
+ if (len > count)
+ len = count;
+
+ err = copy_from_user(vol->upd_buf + offs, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (offs + len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ int flush_len = offs + len;
+
+ /*
+ * OK, we gathered either the whole eraseblock or this
+ * is the last chunk, it's time to flush the buffer.
+ */
+ ubi_assert(flush_len <= vol->usable_leb_size);
+ err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
+ vol->upd_ebs);
+ if (err)
+ return err;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ buf += len;
+ lnum += 1;
+ }
+
+ /*
+ * If we've got more to write, let's continue. At this point we know we
+ * are starting from the beginning of an eraseblock.
+ */
+ while (count) {
+ if (count > vol->usable_leb_size)
+ len = vol->usable_leb_size;
+ else
+ len = count;
+
+ err = copy_from_user(vol->upd_buf, buf, len);
+ if (err)
+ return -EFAULT;
+
+ if (len == vol->usable_leb_size ||
+ vol->upd_received + len == vol->upd_bytes) {
+ err = write_leb(ubi, vol, lnum, vol->upd_buf,
+ len, vol->upd_ebs);
+ if (err)
+ break;
+ }
+
+ vol->upd_received += len;
+ count -= len;
+ lnum += 1;
+ buf += len;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ /* The update is finished, clear the update marker */
+ err = clear_update_marker(ubi, vol, vol->upd_bytes);
+ if (err)
+ return err;
+ vol->updating = 0;
+ err = to_write;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
+
+/**
+ * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function accepts more data to the volume which is being under the
+ * "atomic LEB change" operation. It may be called arbitrary number of times
+ * until all data arrives. This function returns %0 in case of success, number
+ * of bytes written during the last call if the whole "atomic LEB change"
+ * operation has been successfully finished, and a negative error code in case
+ * of failure.
+ */
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int err;
+
+ dbg_gen("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (vol->upd_received + count > vol->upd_bytes)
+ count = vol->upd_bytes - vol->upd_received;
+
+ err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
+ if (err)
+ return -EFAULT;
+
+ vol->upd_received += count;
+
+ if (vol->upd_received == vol->upd_bytes) {
+ int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
+
+ memset(vol->upd_buf + vol->upd_bytes, 0xFF,
+ len - vol->upd_bytes);
+ len = ubi_calc_data_len(ubi, vol->upd_buf, len);
+ err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
+ vol->upd_buf, len);
+ if (err)
+ return err;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ vol->changing_leb = 0;
+ err = count;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/vmt.c b/roms/u-boot/drivers/mtd/ubi/vmt.c
new file mode 100644
index 000000000..0bfedd0de
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/vmt.c
@@ -0,0 +1,822 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file contains implementation of volume creation, deletion, updating and
+ * resizing.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#else
+#include <div64.h>
+#include <ubi_uboot.h>
+#endif
+#include <linux/math64.h>
+
+#include "ubi.h"
+
+static int self_check_volumes(struct ubi_device *ubi);
+
+#ifndef __UBOOT__
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
+static struct device_attribute attr_vol_reserved_ebs =
+ __ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_type =
+ __ATTR(type, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_name =
+ __ATTR(name, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_corrupted =
+ __ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_alignment =
+ __ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_usable_eb_size =
+ __ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_data_bytes =
+ __ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
+static struct device_attribute attr_vol_upd_marker =
+ __ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
+
+/*
+ * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'.
+ *
+ * Consider a situation:
+ * A. process 1 opens a sysfs file related to volume Y, say
+ * /<sysfs>/class/ubi/ubiX_Y/reserved_ebs;
+ * B. process 2 removes volume Y;
+ * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
+ *
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
+ */
+static ssize_t vol_attribute_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+ struct ubi_device *ubi;
+
+ ubi = ubi_get_device(vol->ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ spin_lock(&ubi->volumes_lock);
+ if (!ubi->volumes[vol->vol_id]) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
+ return -ENODEV;
+ }
+ /* Take a reference to prevent volume removal */
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (attr == &attr_vol_reserved_ebs)
+ ret = sprintf(buf, "%d\n", vol->reserved_pebs);
+ else if (attr == &attr_vol_type) {
+ const char *tp;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ tp = "dynamic";
+ else
+ tp = "static";
+ ret = sprintf(buf, "%s\n", tp);
+ } else if (attr == &attr_vol_name)
+ ret = sprintf(buf, "%s\n", vol->name);
+ else if (attr == &attr_vol_corrupted)
+ ret = sprintf(buf, "%d\n", vol->corrupted);
+ else if (attr == &attr_vol_alignment)
+ ret = sprintf(buf, "%d\n", vol->alignment);
+ else if (attr == &attr_vol_usable_eb_size)
+ ret = sprintf(buf, "%d\n", vol->usable_leb_size);
+ else if (attr == &attr_vol_data_bytes)
+ ret = sprintf(buf, "%lld\n", vol->used_bytes);
+ else if (attr == &attr_vol_upd_marker)
+ ret = sprintf(buf, "%d\n", vol->upd_marker);
+ else
+ /* This must be a bug */
+ ret = -EINVAL;
+
+ /* We've done the operation, drop volume and UBI device references */
+ spin_lock(&ubi->volumes_lock);
+ vol->ref_count -= 1;
+ ubi_assert(vol->ref_count >= 0);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
+ return ret;
+}
+
+static struct attribute *volume_dev_attrs[] = {
+ &attr_vol_reserved_ebs.attr,
+ &attr_vol_type.attr,
+ &attr_vol_name.attr,
+ &attr_vol_corrupted.attr,
+ &attr_vol_alignment.attr,
+ &attr_vol_usable_eb_size.attr,
+ &attr_vol_data_bytes.attr,
+ &attr_vol_upd_marker.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(volume_dev);
+#endif
+
+/* Release method for volume devices */
+static void vol_release(struct device *dev)
+{
+ struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+
+ kfree(vol->eba_tbl);
+ kfree(vol);
+}
+
+/**
+ * ubi_create_volume - create volume.
+ * @ubi: UBI device description object
+ * @req: volume creation request
+ *
+ * This function creates volume described by @req. If @req->vol_id id
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
+ * and saves it in @req->vol_id. Returns zero in case of success and a negative
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->device_mutex locked.
+ */
+int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
+{
+ int i, err, vol_id = req->vol_id, do_free = 1;
+ struct ubi_volume *vol;
+ struct ubi_vtbl_record vtbl_rec;
+ dev_t dev;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG)
+ vol->skip_check = 1;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ /* Find unused volume ID */
+ dbg_gen("search for vacant volume ID");
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (!ubi->volumes[i]) {
+ vol_id = i;
+ break;
+ }
+
+ if (vol_id == UBI_VOL_NUM_AUTO) {
+ ubi_err(ubi, "out of volume IDs");
+ err = -ENFILE;
+ goto out_unlock;
+ }
+ req->vol_id = vol_id;
+ }
+
+ dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s",
+ ubi->ubi_num, vol_id, (unsigned long long)req->bytes,
+ (int)req->vol_type, req->name);
+
+ /* Ensure that this volume does not exist */
+ err = -EEXIST;
+ if (ubi->volumes[vol_id]) {
+ ubi_err(ubi, "volume %d already exists", vol_id);
+ goto out_unlock;
+ }
+
+ /* Ensure that the name is unique */
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ if (ubi->volumes[i] &&
+ ubi->volumes[i]->name_len == req->name_len &&
+ !strcmp(ubi->volumes[i]->name, req->name)) {
+ ubi_err(ubi, "volume \"%s\" exists (ID %d)",
+ req->name, i);
+ goto out_unlock;
+ }
+
+ /* Calculate how many eraseblocks are requested */
+ vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment;
+ vol->reserved_pebs = div_u64(req->bytes + vol->usable_leb_size - 1,
+ vol->usable_leb_size);
+
+ /* Reserve physical eraseblocks */
+ if (vol->reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, only %d available",
+ ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+ ubi->avail_pebs -= vol->reserved_pebs;
+ ubi->rsvd_pebs += vol->reserved_pebs;
+ spin_unlock(&ubi->volumes_lock);
+
+ vol->vol_id = vol_id;
+ vol->alignment = req->alignment;
+ vol->data_pad = ubi->leb_size % vol->alignment;
+ vol->vol_type = req->vol_type;
+ vol->name_len = req->name_len;
+ memcpy(vol->name, req->name, vol->name_len);
+ vol->ubi = ubi;
+
+ /*
+ * Finish all pending erases because there may be some LEBs belonging
+ * to the same volume ID.
+ */
+ err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
+ if (err)
+ goto out_acc;
+
+ vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL);
+ if (!vol->eba_tbl) {
+ err = -ENOMEM;
+ goto out_acc;
+ }
+
+ for (i = 0; i < vol->reserved_pebs; i++)
+ vol->eba_tbl[i] = UBI_LEB_UNMAPPED;
+
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ } else {
+ vol->used_ebs = div_u64_rem(vol->used_bytes,
+ vol->usable_leb_size,
+ &vol->last_eb_bytes);
+ if (vol->last_eb_bytes != 0)
+ vol->used_ebs += 1;
+ else
+ vol->last_eb_bytes = vol->usable_leb_size;
+ }
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device");
+ goto out_mapping;
+ }
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+#ifndef __UBOOT__
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
+#endif
+
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err) {
+ ubi_err(ubi, "cannot register device");
+ goto out_cdev;
+ }
+
+ /* Fill volume table record */
+ memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record));
+ vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs);
+ vtbl_rec.alignment = cpu_to_be32(vol->alignment);
+ vtbl_rec.data_pad = cpu_to_be32(vol->data_pad);
+ vtbl_rec.name_len = cpu_to_be16(vol->name_len);
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME)
+ vtbl_rec.vol_type = UBI_VID_DYNAMIC;
+ else
+ vtbl_rec.vol_type = UBI_VID_STATIC;
+
+ if (vol->skip_check)
+ vtbl_rec.flags |= UBI_VTBL_SKIP_CRC_CHECK_FLG;
+
+ memcpy(vtbl_rec.name, vol->name, vol->name_len);
+
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_sysfs;
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+ ubi->vol_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ self_check_volumes(ubi);
+ return err;
+
+out_sysfs:
+ /*
+ * We have registered our device, we should not free the volume
+ * description object in this function in case of an error - it is
+ * freed by the release function.
+ *
+ * Get device reference to prevent the release function from being
+ * called just after sysfs has been closed.
+ */
+ do_free = 0;
+ get_device(&vol->dev);
+ device_unregister(&vol->dev);
+out_cdev:
+ cdev_del(&vol->cdev);
+out_mapping:
+ if (do_free)
+ kfree(vol->eba_tbl);
+out_acc:
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= vol->reserved_pebs;
+ ubi->avail_pebs += vol->reserved_pebs;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ if (do_free)
+ kfree(vol);
+ else
+ put_device(&vol->dev);
+ ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
+ return err;
+}
+
+/**
+ * ubi_remove_volume - remove volume.
+ * @desc: volume descriptor
+ * @no_vtbl: do not change volume table if not zero
+ *
+ * This function removes volume described by @desc. The volume has to be opened
+ * in "exclusive" mode. Returns zero in case of success and a negative error
+ * code in case of failure. The caller has to have the @ubi->device_mutex
+ * locked.
+ */
+int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs;
+
+ dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id);
+ ubi_assert(desc->mode == UBI_EXCLUSIVE);
+ ubi_assert(vol == ubi->volumes[vol_id]);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ /*
+ * The volume is busy, probably someone is reading one of its
+ * sysfs files.
+ */
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ ubi->volumes[vol_id] = NULL;
+ spin_unlock(&ubi->volumes_lock);
+
+ if (!no_vtbl) {
+ err = ubi_change_vtbl_record(ubi, vol_id, NULL);
+ if (err)
+ goto out_err;
+ }
+
+ for (i = 0; i < vol->reserved_pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, i);
+ if (err)
+ goto out_err;
+ }
+
+ cdev_del(&vol->cdev);
+ device_unregister(&vol->dev);
+
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= reserved_pebs;
+ ubi->avail_pebs += reserved_pebs;
+ ubi_update_reserved(ubi);
+ ubi->vol_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED);
+ if (!no_vtbl)
+ self_check_volumes(ubi);
+
+ return err;
+
+out_err:
+ ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ return err;
+}
+
+/**
+ * ubi_resize_volume - re-size volume.
+ * @desc: volume descriptor
+ * @reserved_pebs: new size in physical eraseblocks
+ *
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->device_mutex locked.
+ */
+int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+{
+ int i, err, pebs, *new_mapping;
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ struct ubi_vtbl_record vtbl_rec;
+ int vol_id = vol->vol_id;
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ dbg_gen("re-size device %d, volume %d to from %d to %d PEBs",
+ ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs);
+
+ if (vol->vol_type == UBI_STATIC_VOLUME &&
+ reserved_pebs < vol->used_ebs) {
+ ubi_err(ubi, "too small size %d, %d LEBs contain data",
+ reserved_pebs, vol->used_ebs);
+ return -EINVAL;
+ }
+
+ /* If the size is the same, we have nothing to do */
+ if (reserved_pebs == vol->reserved_pebs)
+ return 0;
+
+ new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL);
+ if (!new_mapping)
+ return -ENOMEM;
+
+ for (i = 0; i < reserved_pebs; i++)
+ new_mapping[i] = UBI_LEB_UNMAPPED;
+
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ spin_unlock(&ubi->volumes_lock);
+ err = -EBUSY;
+ goto out_free;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ /* Reserve physical eraseblocks */
+ pebs = reserved_pebs - vol->reserved_pebs;
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ if (pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs: requested %d, available %d",
+ pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ spin_unlock(&ubi->volumes_lock);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= pebs;
+ ubi->rsvd_pebs += pebs;
+ for (i = 0; i < vol->reserved_pebs; i++)
+ new_mapping[i] = vol->eba_tbl[i];
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = new_mapping;
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ /* Change volume table record */
+ vtbl_rec = ubi->vtbl[vol_id];
+ vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ goto out_acc;
+
+ if (pebs < 0) {
+ for (i = 0; i < -pebs; i++) {
+ err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+ if (err)
+ goto out_acc;
+ }
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs += pebs;
+ ubi->avail_pebs -= pebs;
+ ubi_update_reserved(ubi);
+ for (i = 0; i < reserved_pebs; i++)
+ new_mapping[i] = vol->eba_tbl[i];
+ kfree(vol->eba_tbl);
+ vol->eba_tbl = new_mapping;
+ spin_unlock(&ubi->volumes_lock);
+ }
+
+ vol->reserved_pebs = reserved_pebs;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ }
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
+ self_check_volumes(ubi);
+ return err;
+
+out_acc:
+ if (pebs > 0) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ spin_unlock(&ubi->volumes_lock);
+ }
+out_free:
+ kfree(new_mapping);
+ return err;
+}
+
+/**
+ * ubi_rename_volumes - re-name UBI volumes.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names or removes volumes specified in the re-name list.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list)
+{
+ int err;
+ struct ubi_rename_entry *re;
+
+ err = ubi_vtbl_rename_volumes(ubi, rename_list);
+ if (err)
+ return err;
+
+ list_for_each_entry(re, rename_list, list) {
+ if (re->remove) {
+ err = ubi_remove_volume(re->desc, 1);
+ if (err)
+ break;
+ } else {
+ struct ubi_volume *vol = re->desc->vol;
+
+ spin_lock(&ubi->volumes_lock);
+ vol->name_len = re->new_name_len;
+ memcpy(vol->name, re->new_name, re->new_name_len + 1);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED);
+ }
+ }
+
+ if (!err)
+ self_check_volumes(ubi);
+ return err;
+}
+
+/**
+ * ubi_add_volume - add volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ int err, vol_id = vol->vol_id;
+ dev_t dev;
+
+ dbg_gen("add volume %d", vol_id);
+
+ /* Register character device for the volume */
+ cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ vol->cdev.owner = THIS_MODULE;
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
+ if (err) {
+ ubi_err(ubi, "cannot add character device for volume %d, error %d",
+ vol_id, err);
+ return err;
+ }
+
+ vol->dev.release = vol_release;
+ vol->dev.parent = &ubi->dev;
+ vol->dev.devt = dev;
+#ifndef __UBOOT__
+ vol->dev.class = &ubi_class;
+ vol->dev.groups = volume_dev_groups;
+#endif
+ dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ err = device_register(&vol->dev);
+ if (err)
+ goto out_cdev;
+
+ self_check_volumes(ubi);
+ return err;
+
+out_cdev:
+ cdev_del(&vol->cdev);
+ return err;
+}
+
+/**
+ * ubi_free_volume - free volume.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ *
+ * This function frees all resources for volume @vol but does not remove it.
+ * Used only when the UBI device is detached.
+ */
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
+{
+ dbg_gen("free volume %d", vol->vol_id);
+
+ ubi->volumes[vol->vol_id] = NULL;
+ cdev_del(&vol->cdev);
+ device_unregister(&vol->dev);
+}
+
+/**
+ * self_check_volume - check volume information.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ *
+ * Returns zero if volume is all right and a a negative error code if not.
+ */
+static int self_check_volume(struct ubi_device *ubi, int vol_id)
+{
+ int idx = vol_id2idx(ubi, vol_id);
+ int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker;
+ const struct ubi_volume *vol;
+ long long n;
+ const char *name;
+
+ spin_lock(&ubi->volumes_lock);
+ reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs);
+ vol = ubi->volumes[idx];
+
+ if (!vol) {
+ if (reserved_pebs) {
+ ubi_err(ubi, "no volume info, but volume exists");
+ goto fail;
+ }
+ spin_unlock(&ubi->volumes_lock);
+ return 0;
+ }
+
+ if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
+ vol->name_len < 0) {
+ ubi_err(ubi, "negative values");
+ goto fail;
+ }
+ if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
+ ubi_err(ubi, "bad alignment");
+ goto fail;
+ }
+
+ n = vol->alignment & (ubi->min_io_size - 1);
+ if (vol->alignment != 1 && n) {
+ ubi_err(ubi, "alignment is not multiple of min I/O unit");
+ goto fail;
+ }
+
+ n = ubi->leb_size % vol->alignment;
+ if (vol->data_pad != n) {
+ ubi_err(ubi, "bad data_pad, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
+ vol->vol_type != UBI_STATIC_VOLUME) {
+ ubi_err(ubi, "bad vol_type");
+ goto fail;
+ }
+
+ if (vol->upd_marker && vol->corrupted) {
+ ubi_err(ubi, "update marker and corrupted simultaneously");
+ goto fail;
+ }
+
+ if (vol->reserved_pebs > ubi->good_peb_count) {
+ ubi_err(ubi, "too large reserved_pebs");
+ goto fail;
+ }
+
+ n = ubi->leb_size - vol->data_pad;
+ if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
+ ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
+ goto fail;
+ }
+
+ if (vol->name_len > UBI_VOL_NAME_MAX) {
+ ubi_err(ubi, "too long volume name, max is %d",
+ UBI_VOL_NAME_MAX);
+ goto fail;
+ }
+
+ n = strnlen(vol->name, vol->name_len + 1);
+ if (n != vol->name_len) {
+ ubi_err(ubi, "bad name_len %lld", n);
+ goto fail;
+ }
+
+ n = (long long)vol->used_ebs * vol->usable_leb_size;
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ if (vol->corrupted) {
+ ubi_err(ubi, "corrupted dynamic volume");
+ goto fail;
+ }
+ if (vol->used_ebs != vol->reserved_pebs) {
+ ubi_err(ubi, "bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes != vol->usable_leb_size) {
+ ubi_err(ubi, "bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes != n) {
+ ubi_err(ubi, "bad used_bytes");
+ goto fail;
+ }
+
+ if (vol->skip_check) {
+ ubi_err(ubi, "bad skip_check");
+ goto fail;
+ }
+ } else {
+ if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
+ ubi_err(ubi, "bad used_ebs");
+ goto fail;
+ }
+ if (vol->last_eb_bytes < 0 ||
+ vol->last_eb_bytes > vol->usable_leb_size) {
+ ubi_err(ubi, "bad last_eb_bytes");
+ goto fail;
+ }
+ if (vol->used_bytes < 0 || vol->used_bytes > n ||
+ vol->used_bytes < n - vol->usable_leb_size) {
+ ubi_err(ubi, "bad used_bytes");
+ goto fail;
+ }
+ }
+
+ alignment = be32_to_cpu(ubi->vtbl[vol_id].alignment);
+ data_pad = be32_to_cpu(ubi->vtbl[vol_id].data_pad);
+ name_len = be16_to_cpu(ubi->vtbl[vol_id].name_len);
+ upd_marker = ubi->vtbl[vol_id].upd_marker;
+ name = &ubi->vtbl[vol_id].name[0];
+ if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC)
+ vol_type = UBI_DYNAMIC_VOLUME;
+ else
+ vol_type = UBI_STATIC_VOLUME;
+
+ if (alignment != vol->alignment || data_pad != vol->data_pad ||
+ upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
+ name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
+ ubi_err(ubi, "volume info is different");
+ goto fail;
+ }
+
+ spin_unlock(&ubi->volumes_lock);
+ return 0;
+
+fail:
+ ubi_err(ubi, "self-check failed for volume %d", vol_id);
+ if (vol)
+ ubi_dump_vol_info(vol);
+ ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
+ dump_stack();
+ spin_unlock(&ubi->volumes_lock);
+ return -EINVAL;
+}
+
+/**
+ * self_check_volumes - check information about all volumes.
+ * @ubi: UBI device description object
+ *
+ * Returns zero if volumes are all right and a a negative error code if not.
+ */
+static int self_check_volumes(struct ubi_device *ubi)
+{
+ int i, err = 0;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ err = self_check_volume(ubi, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/vtbl.c b/roms/u-boot/drivers/mtd/ubi/vtbl.c
new file mode 100644
index 000000000..a2b5352cb
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/vtbl.c
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ * Copyright (c) Nokia Corporation, 2006, 2007
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+/*
+ * This file includes volume table manipulation code. The volume table is an
+ * on-flash table containing volume meta-data like name, number of reserved
+ * physical eraseblocks, type, etc. The volume table is stored in the so-called
+ * "layout volume".
+ *
+ * The layout volume is an internal volume which is organized as follows. It
+ * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical
+ * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
+ * other. This redundancy guarantees robustness to unclean reboots. The volume
+ * table is basically an array of volume table records. Each record contains
+ * full information about the volume and protected by a CRC checksum. Note,
+ * nowadays we use the atomic LEB change operation when updating the volume
+ * table, so we do not really need 2 LEBs anymore, but we preserve the older
+ * design for the backward compatibility reasons.
+ *
+ * When the volume table is changed, it is first changed in RAM. Then LEB 0 is
+ * erased, and the updated volume table is written back to LEB 0. Then same for
+ * LEB 1. This scheme guarantees recoverability from unclean reboots.
+ *
+ * In this UBI implementation the on-flash volume table does not contain any
+ * information about how much data static volumes contain.
+ *
+ * But it would still be beneficial to store this information in the volume
+ * table. For example, suppose we have a static volume X, and all its physical
+ * eraseblocks became bad for some reasons. Suppose we are attaching the
+ * corresponding MTD device, for some reason we find no logical eraseblocks
+ * corresponding to the volume X. According to the volume table volume X does
+ * exist. So we don't know whether it is just empty or all its physical
+ * eraseblocks went bad. So we cannot alarm the user properly.
+ *
+ * The volume table also stores so-called "update marker", which is used for
+ * volume updates. Before updating the volume, the update marker is set, and
+ * after the update operation is finished, the update marker is cleared. So if
+ * the update operation was interrupted (e.g. by an unclean reboot) - the
+ * update marker is still there and we know that the volume's contents is
+ * damaged.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <asm/div64.h>
+#include <u-boot/crc.h>
+#else
+#include <ubi_uboot.h>
+#include <linux/bug.h>
+#endif
+
+#include <linux/err.h>
+#include "ubi.h"
+
+static void self_vtbl_check(const struct ubi_device *ubi);
+
+/* Empty volume table record */
+static struct ubi_vtbl_record empty_vtbl_record;
+
+/**
+ * ubi_update_layout_vol - helper for updatting layout volumes on flash
+ * @ubi: UBI device description object
+ */
+static int ubi_update_layout_vol(struct ubi_device *ubi)
+{
+ struct ubi_volume *layout_vol;
+ int i, err;
+
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
+ ubi->vtbl_size);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_change_vtbl_record - change volume table record.
+ * @ubi: UBI device description object
+ * @idx: table index to change
+ * @vtbl_rec: new volume table record
+ *
+ * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty
+ * volume table record is written. The caller does not have to calculate CRC of
+ * the record as it is done by this function. Returns zero in case of success
+ * and a negative error code in case of failure.
+ */
+int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
+ struct ubi_vtbl_record *vtbl_rec)
+{
+ int err;
+ uint32_t crc;
+
+ ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+
+ if (!vtbl_rec)
+ vtbl_rec = &empty_vtbl_record;
+ else {
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
+ err = ubi_update_layout_vol(ubi);
+
+ self_vtbl_check(ubi);
+ return err ? err : 0;
+}
+
+/**
+ * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table.
+ * @ubi: UBI device description object
+ * @rename_list: list of &struct ubi_rename_entry objects
+ *
+ * This function re-names multiple volumes specified in @req in the volume
+ * table. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
+ struct list_head *rename_list)
+{
+ struct ubi_rename_entry *re;
+
+ list_for_each_entry(re, rename_list, list) {
+ uint32_t crc;
+ struct ubi_volume *vol = re->desc->vol;
+ struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id];
+
+ if (re->remove) {
+ memcpy(vtbl_rec, &empty_vtbl_record,
+ sizeof(struct ubi_vtbl_record));
+ continue;
+ }
+
+ vtbl_rec->name_len = cpu_to_be16(re->new_name_len);
+ memcpy(vtbl_rec->name, re->new_name, re->new_name_len);
+ memset(vtbl_rec->name + re->new_name_len, 0,
+ UBI_VOL_NAME_MAX + 1 - re->new_name_len);
+ crc = crc32(UBI_CRC32_INIT, vtbl_rec,
+ UBI_VTBL_RECORD_SIZE_CRC);
+ vtbl_rec->crc = cpu_to_be32(crc);
+ }
+
+ return ubi_update_layout_vol(ubi);
+}
+
+/**
+ * vtbl_check - check if volume table is not corrupted and sensible.
+ * @ubi: UBI device description object
+ * @vtbl: volume table
+ *
+ * This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
+ * and %-EINVAL if it contains inconsistent data.
+ */
+static int vtbl_check(const struct ubi_device *ubi,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
+ int upd_marker, err;
+ uint32_t crc;
+ const char *name;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
+ upd_marker = vtbl[i].upd_marker;
+ vol_type = vtbl[i].vol_type;
+ name_len = be16_to_cpu(vtbl[i].name_len);
+ name = &vtbl[i].name[0];
+
+ crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
+ ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
+ i, crc, be32_to_cpu(vtbl[i].crc));
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return 1;
+ }
+
+ if (reserved_pebs == 0) {
+ if (memcmp(&vtbl[i], &empty_vtbl_record,
+ UBI_VTBL_RECORD_SIZE)) {
+ err = 2;
+ goto bad;
+ }
+ continue;
+ }
+
+ if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
+ name_len < 0) {
+ err = 3;
+ goto bad;
+ }
+
+ if (alignment > ubi->leb_size || alignment == 0) {
+ err = 4;
+ goto bad;
+ }
+
+ n = alignment & (ubi->min_io_size - 1);
+ if (alignment != 1 && n) {
+ err = 5;
+ goto bad;
+ }
+
+ n = ubi->leb_size % alignment;
+ if (data_pad != n) {
+ ubi_err(ubi, "bad data_pad, has to be %d", n);
+ err = 6;
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ err = 7;
+ goto bad;
+ }
+
+ if (upd_marker != 0 && upd_marker != 1) {
+ err = 8;
+ goto bad;
+ }
+
+ if (reserved_pebs > ubi->good_peb_count) {
+ ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
+ reserved_pebs, ubi->good_peb_count);
+ err = 9;
+ goto bad;
+ }
+
+ if (name_len > UBI_VOL_NAME_MAX) {
+ err = 10;
+ goto bad;
+ }
+
+ if (name[0] == '\0') {
+ err = 11;
+ goto bad;
+ }
+
+ if (name_len != strnlen(name, name_len + 1)) {
+ err = 12;
+ goto bad;
+ }
+ }
+
+ /* Checks that all names are unique */
+ for (i = 0; i < ubi->vtbl_slots - 1; i++) {
+ for (n = i + 1; n < ubi->vtbl_slots; n++) {
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
+
+ if (len1 > 0 && len1 == len2 &&
+#ifndef __UBOOT__
+ !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
+#else
+ !strncmp((char *)vtbl[i].name, vtbl[n].name, len1)) {
+#endif
+ ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return -EINVAL;
+}
+
+/**
+ * create_vtbl - create a copy of volume table.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @copy: number of the volume table copy
+ * @vtbl: contents of the volume table
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ int copy, void *vtbl)
+{
+ int err, tries = 0;
+ struct ubi_vid_hdr *vid_hdr;
+ struct ubi_ainf_peb *new_aeb;
+
+ dbg_gen("create volume table (copy #%d)", copy + 1);
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+retry:
+ new_aeb = ubi_early_get_peb(ubi, ai);
+ if (IS_ERR(new_aeb)) {
+ err = PTR_ERR(new_aeb);
+ goto out_free;
+ }
+
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
+ vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
+ vid_hdr->data_size = vid_hdr->used_ebs =
+ vid_hdr->data_pad = cpu_to_be32(0);
+ vid_hdr->lnum = cpu_to_be32(copy);
+ vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
+
+ /* The EC header is already there, write the VID header */
+ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vid_hdr);
+ if (err)
+ goto write_error;
+
+ /* Write the layout volume contents */
+ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size);
+ if (err)
+ goto write_error;
+
+ /*
+ * And add it to the attaching information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
+ */
+ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+write_error:
+ if (err == -EIO && ++tries <= 5) {
+ /*
+ * Probably this physical eraseblock went bad, try to pick
+ * another one.
+ */
+ list_add(&new_aeb->u.list, &ai->erase);
+ goto retry;
+ }
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return err;
+
+}
+
+/**
+ * process_lvol - process the layout volume.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ * @av: layout volume attaching information
+ *
+ * This function is responsible for reading the layout volume, ensuring it is
+ * not corrupted, and recovering from corruptions if needed. Returns volume
+ * table in case of success and a negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_ainf_volume *av)
+{
+ int err;
+ struct rb_node *rb;
+ struct ubi_ainf_peb *aeb;
+ struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL };
+ int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1};
+
+ /*
+ * UBI goes through the following steps when it changes the layout
+ * volume:
+ * a. erase LEB 0;
+ * b. write new data to LEB 0;
+ * c. erase LEB 1;
+ * d. write new data to LEB 1.
+ *
+ * Before the change, both LEBs contain the same data.
+ *
+ * Due to unclean reboots, the contents of LEB 0 may be lost, but there
+ * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not.
+ * Similarly, LEB 1 may be lost, but there should be LEB 0. And
+ * finally, unclean reboots may result in a situation when neither LEB
+ * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB
+ * 0 contains more recent information.
+ *
+ * So the plan is to first check LEB 0. Then
+ * a. if LEB 0 is OK, it must be containing the most recent data; then
+ * we compare it with LEB 1, and if they are different, we copy LEB
+ * 0 to LEB 1;
+ * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1
+ * to LEB 0.
+ */
+
+ dbg_gen("check layout volume");
+
+ /* Read both LEB 0 and LEB 1 into memory */
+ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
+ leb[aeb->lnum] = vzalloc(ubi->vtbl_size);
+ if (!leb[aeb->lnum]) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0,
+ ubi->vtbl_size);
+ if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
+ /*
+ * Scrub the PEB later. Note, -EBADMSG indicates an
+ * uncorrectable ECC error, but we have our own CRC and
+ * the data will be checked later. If the data is OK,
+ * the PEB will be scrubbed (because we set
+ * aeb->scrub). If the data is not OK, the contents of
+ * the PEB will be recovered from the second copy, and
+ * aeb->scrub will be cleared in
+ * 'ubi_add_to_av()'.
+ */
+ aeb->scrub = 1;
+ else if (err)
+ goto out_free;
+ }
+
+ err = -EINVAL;
+ if (leb[0]) {
+ leb_corrupted[0] = vtbl_check(ubi, leb[0]);
+ if (leb_corrupted[0] < 0)
+ goto out_free;
+ }
+
+ if (!leb_corrupted[0]) {
+ /* LEB 0 is OK */
+ if (leb[1])
+ leb_corrupted[1] = memcmp(leb[0], leb[1],
+ ubi->vtbl_size);
+ if (leb_corrupted[1]) {
+ ubi_warn(ubi, "volume table copy #2 is corrupted");
+ err = create_vtbl(ubi, ai, 1, leb[0]);
+ if (err)
+ goto out_free;
+ ubi_msg(ubi, "volume table was restored");
+ }
+
+ /* Both LEB 1 and LEB 2 are OK and consistent */
+ vfree(leb[1]);
+ return leb[0];
+ } else {
+ /* LEB 0 is corrupted or does not exist */
+ if (leb[1]) {
+ leb_corrupted[1] = vtbl_check(ubi, leb[1]);
+ if (leb_corrupted[1] < 0)
+ goto out_free;
+ }
+ if (leb_corrupted[1]) {
+ /* Both LEB 0 and LEB 1 are corrupted */
+ ubi_err(ubi, "both volume tables are corrupted");
+ goto out_free;
+ }
+
+ ubi_warn(ubi, "volume table copy #1 is corrupted");
+ err = create_vtbl(ubi, ai, 0, leb[1]);
+ if (err)
+ goto out_free;
+ ubi_msg(ubi, "volume table was restored");
+
+ vfree(leb[0]);
+ return leb[1];
+ }
+
+out_free:
+ vfree(leb[0]);
+ vfree(leb[1]);
+ return ERR_PTR(err);
+}
+
+/**
+ * create_empty_lvol - create empty layout volume.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns volume table contents in case of success and a
+ * negative error code in case of failure.
+ */
+static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int i;
+ struct ubi_vtbl_record *vtbl;
+
+ vtbl = vzalloc(ubi->vtbl_size);
+ if (!vtbl)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ubi->vtbl_slots; i++)
+ memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE);
+
+ for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
+ int err;
+
+ err = create_vtbl(ubi, ai, i, vtbl);
+ if (err) {
+ vfree(vtbl);
+ return ERR_PTR(err);
+ }
+ }
+
+ return vtbl;
+}
+
+/**
+ * init_volumes - initialize volume information for existing volumes.
+ * @ubi: UBI device description object
+ * @ai: scanning information
+ * @vtbl: volume table
+ *
+ * This function allocates volume description objects for existing volumes.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int init_volumes(struct ubi_device *ubi,
+ const struct ubi_attach_info *ai,
+ const struct ubi_vtbl_record *vtbl)
+{
+ int i, reserved_pebs = 0;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+ for (i = 0; i < ubi->vtbl_slots; i++) {
+ cond_resched();
+
+ if (be32_to_cpu(vtbl[i].reserved_pebs) == 0)
+ continue; /* Empty record */
+
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ vol->alignment = be32_to_cpu(vtbl[i].alignment);
+ vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
+ vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
+ UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
+ vol->name_len = be16_to_cpu(vtbl[i].name_len);
+ vol->usable_leb_size = ubi->leb_size - vol->data_pad;
+ memcpy(vol->name, vtbl[i].name, vol->name_len);
+ vol->name[vol->name_len] = '\0';
+ vol->vol_id = i;
+
+ if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG)
+ vol->skip_check = 1;
+
+ if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
+ /* Auto re-size flag may be set only for one volume */
+ if (ubi->autoresize_vol_id != -1) {
+ ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
+ kfree(vol);
+ return -EINVAL;
+ }
+
+ ubi->autoresize_vol_id = i;
+ }
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[i] = vol;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+ reserved_pebs += vol->reserved_pebs;
+
+ /*
+ * In case of dynamic volume UBI knows nothing about how many
+ * data is stored there. So assume the whole volume is used.
+ */
+ if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->usable_leb_size;
+ vol->used_bytes =
+ (long long)vol->used_ebs * vol->usable_leb_size;
+ continue;
+ }
+
+ /* Static volumes only */
+ av = ubi_find_av(ai, i);
+ if (!av || !av->leb_count) {
+ /*
+ * No eraseblocks belonging to this volume found. We
+ * don't actually know whether this static volume is
+ * completely corrupted or just contains no data. And
+ * we cannot know this as long as data size is not
+ * stored on flash. So we just assume the volume is
+ * empty. FIXME: this should be handled.
+ */
+ continue;
+ }
+
+ if (av->leb_count != av->used_ebs) {
+ /*
+ * We found a static volume which misses several
+ * eraseblocks. Treat it as corrupted.
+ */
+ ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
+ av->vol_id, av->used_ebs - av->leb_count);
+ vol->corrupted = 1;
+ continue;
+ }
+
+ vol->used_ebs = av->used_ebs;
+ vol->used_bytes =
+ (long long)(vol->used_ebs - 1) * vol->usable_leb_size;
+ vol->used_bytes += av->last_data_size;
+ vol->last_eb_bytes = av->last_data_size;
+ }
+
+ /* And add the layout volume */
+ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
+ if (!vol)
+ return -ENOMEM;
+
+ vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
+ vol->vol_type = UBI_DYNAMIC_VOLUME;
+ vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
+ memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
+ vol->usable_leb_size = ubi->leb_size;
+ vol->used_ebs = vol->reserved_pebs;
+ vol->last_eb_bytes = vol->reserved_pebs;
+ vol->used_bytes =
+ (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
+ vol->vol_id = UBI_LAYOUT_VOLUME_ID;
+ vol->ref_count = 1;
+
+ ubi_assert(!ubi->volumes[i]);
+ ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
+ reserved_pebs += vol->reserved_pebs;
+ ubi->vol_count += 1;
+ vol->ubi = ubi;
+
+ if (reserved_pebs > ubi->avail_pebs) {
+ ubi_err(ubi, "not enough PEBs, required %d, available %d",
+ reserved_pebs, ubi->avail_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ }
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+
+ return 0;
+}
+
+/**
+ * check_av - check volume attaching information.
+ * @vol: UBI volume description object
+ * @av: volume attaching information
+ *
+ * This function returns zero if the volume attaching information is consistent
+ * to the data read from the volume tabla, and %-EINVAL if not.
+ */
+static int check_av(const struct ubi_volume *vol,
+ const struct ubi_ainf_volume *av)
+{
+ int err;
+
+ if (av->highest_lnum >= vol->reserved_pebs) {
+ err = 1;
+ goto bad;
+ }
+ if (av->leb_count > vol->reserved_pebs) {
+ err = 2;
+ goto bad;
+ }
+ if (av->vol_type != vol->vol_type) {
+ err = 3;
+ goto bad;
+ }
+ if (av->used_ebs > vol->reserved_pebs) {
+ err = 4;
+ goto bad;
+ }
+ if (av->data_pad != vol->data_pad) {
+ err = 5;
+ goto bad;
+ }
+ return 0;
+
+bad:
+ ubi_err(vol->ubi, "bad attaching information, error %d", err);
+ ubi_dump_av(av);
+ ubi_dump_vol_info(vol);
+ return -EINVAL;
+}
+
+/**
+ * check_attaching_info - check that attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * Even though we protect on-flash data by CRC checksums, we still don't trust
+ * the media. This function ensures that attaching information is consistent to
+ * the information read from the volume table. Returns zero if the attaching
+ * information is OK and %-EINVAL if it is not.
+ */
+static int check_attaching_info(const struct ubi_device *ubi,
+ struct ubi_attach_info *ai)
+{
+ int err, i;
+ struct ubi_ainf_volume *av;
+ struct ubi_volume *vol;
+
+ if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
+ ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
+ ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
+ return -EINVAL;
+ }
+
+ if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
+ ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
+ ubi_err(ubi, "too large volume ID %d found",
+ ai->highest_vol_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ cond_resched();
+
+ av = ubi_find_av(ai, i);
+ vol = ubi->volumes[i];
+ if (!vol) {
+ if (av)
+ ubi_remove_av(ai, av);
+ continue;
+ }
+
+ if (vol->reserved_pebs == 0) {
+ ubi_assert(i < ubi->vtbl_slots);
+
+ if (!av)
+ continue;
+
+ /*
+ * During attaching we found a volume which does not
+ * exist according to the information in the volume
+ * table. This must have happened due to an unclean
+ * reboot while the volume was being removed. Discard
+ * these eraseblocks.
+ */
+ ubi_msg(ubi, "finish volume %d removal", av->vol_id);
+ ubi_remove_av(ai, av);
+ } else if (av) {
+ err = check_av(vol, av);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_read_volume_table - read the volume table.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function reads volume table, checks it, recover from errors if needed,
+ * or creates it if needed. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int i, err;
+ struct ubi_ainf_volume *av;
+
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
+
+ /*
+ * The number of supported volumes is limited by the eraseblock size
+ * and by the UBI_MAX_VOLUMES constant.
+ */
+ ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
+ if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
+ ubi->vtbl_slots = UBI_MAX_VOLUMES;
+
+ ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
+ ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
+
+ av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID);
+ if (!av) {
+ /*
+ * No logical eraseblocks belonging to the layout volume were
+ * found. This could mean that the flash is just empty. In
+ * this case we create empty layout volume.
+ *
+ * But if flash is not empty this must be a corruption or the
+ * MTD device just contains garbage.
+ */
+ if (ai->is_empty) {
+ ubi->vtbl = create_empty_lvol(ubi, ai);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ } else {
+ ubi_err(ubi, "the layout volume was not found");
+ return -EINVAL;
+ }
+ } else {
+ if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
+ /* This must not happen with proper UBI images */
+ ubi_err(ubi, "too many LEBs (%d) in layout volume",
+ av->leb_count);
+ return -EINVAL;
+ }
+
+ ubi->vtbl = process_lvol(ubi, ai, av);
+ if (IS_ERR(ubi->vtbl))
+ return PTR_ERR(ubi->vtbl);
+ }
+
+ ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count;
+
+ /*
+ * The layout volume is OK, initialize the corresponding in-RAM data
+ * structures.
+ */
+ err = init_volumes(ubi, ai, ubi->vtbl);
+ if (err)
+ goto out_free;
+
+ /*
+ * Make sure that the attaching information is consistent to the
+ * information stored in the volume table.
+ */
+ err = check_attaching_info(ubi, ai);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ vfree(ubi->vtbl);
+ for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
+ kfree(ubi->volumes[i]);
+ ubi->volumes[i] = NULL;
+ }
+ return err;
+}
+
+/**
+ * self_vtbl_check - check volume table.
+ * @ubi: UBI device description object
+ */
+static void self_vtbl_check(const struct ubi_device *ubi)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return;
+
+ if (vtbl_check(ubi, ubi->vtbl)) {
+ ubi_err(ubi, "self-check failed");
+ BUG();
+ }
+}
diff --git a/roms/u-boot/drivers/mtd/ubi/wl.c b/roms/u-boot/drivers/mtd/ubi/wl.c
new file mode 100644
index 000000000..bae402418
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/wl.c
@@ -0,0 +1,1882 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
+ */
+
+/*
+ * UBI wear-leveling sub-system.
+ *
+ * This sub-system is responsible for wear-leveling. It works in terms of
+ * physical eraseblocks and erase counters and knows nothing about logical
+ * eraseblocks, volumes, etc. From this sub-system's perspective all physical
+ * eraseblocks are of two types - used and free. Used physical eraseblocks are
+ * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
+ * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
+ *
+ * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
+ * header. The rest of the physical eraseblock contains only %0xFF bytes.
+ *
+ * When physical eraseblocks are returned to the WL sub-system by means of the
+ * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
+ * done asynchronously in context of the per-UBI device background thread,
+ * which is also managed by the WL sub-system.
+ *
+ * The wear-leveling is ensured by means of moving the contents of used
+ * physical eraseblocks with low erase counter to free physical eraseblocks
+ * with high erase counter.
+ *
+ * If the WL sub-system fails to erase a physical eraseblock, it marks it as
+ * bad.
+ *
+ * This sub-system is also responsible for scrubbing. If a bit-flip is detected
+ * in a physical eraseblock, it has to be moved. Technically this is the same
+ * as moving it for wear-leveling reasons.
+ *
+ * As it was said, for the UBI sub-system all physical eraseblocks are either
+ * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
+ * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
+ * RB-trees, as well as (temporarily) in the @wl->pq queue.
+ *
+ * When the WL sub-system returns a physical eraseblock, the physical
+ * eraseblock is protected from being moved for some "time". For this reason,
+ * the physical eraseblock is not directly moved from the @wl->free tree to the
+ * @wl->used tree. There is a protection queue in between where this
+ * physical eraseblock is temporarily stored (@wl->pq).
+ *
+ * All this protection stuff is needed because:
+ * o we don't want to move physical eraseblocks just after we have given them
+ * to the user; instead, we first want to let users fill them up with data;
+ *
+ * o there is a chance that the user will put the physical eraseblock very
+ * soon, so it makes sense not to move it for some time, but wait.
+ *
+ * Physical eraseblocks stay protected only for limited time. But the "time" is
+ * measured in erase cycles in this case. This is implemented with help of the
+ * protection queue. Eraseblocks are put to the tail of this queue when they
+ * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
+ * head of the queue on each erase operation (for any eraseblock). So the
+ * length of the queue defines how may (global) erase cycles PEBs are protected.
+ *
+ * To put it differently, each physical eraseblock has 2 main states: free and
+ * used. The former state corresponds to the @wl->free tree. The latter state
+ * is split up on several sub-states:
+ * o the WL movement is allowed (@wl->used tree);
+ * o the WL movement is disallowed (@wl->erroneous) because the PEB is
+ * erroneous - e.g., there was a read error;
+ * o the WL movement is temporarily prohibited (@wl->pq queue);
+ * o scrubbing is needed (@wl->scrub tree).
+ *
+ * Depending on the sub-state, wear-leveling entries of the used physical
+ * eraseblocks may be kept in one of those structures.
+ *
+ * Note, in this implementation, we keep a small in-RAM object for each physical
+ * eraseblock. This is surely not a scalable solution. But it appears to be good
+ * enough for moderately large flashes and it is simple. In future, one may
+ * re-work this sub-system and make it more scalable.
+ *
+ * At the moment this sub-system does not utilize the sequence number, which
+ * was introduced relatively recently. But it would be wise to do this because
+ * the sequence number of a logical eraseblock characterizes how old is it. For
+ * example, when we move a PEB with low erase counter, and we need to pick the
+ * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
+ * pick target PEB with an average EC if our PEB is not very "old". This is a
+ * room for future re-works of the WL sub-system.
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#else
+#include <ubi_uboot.h>
+#endif
+
+#include "ubi.h"
+#include "wl.h"
+
+/* Number of physical eraseblocks reserved for wear-leveling purposes */
+#define WL_RESERVED_PEBS 1
+
+/*
+ * Maximum difference between two erase counters. If this threshold is
+ * exceeded, the WL sub-system starts moving data from used physical
+ * eraseblocks with low erase counter to free physical eraseblocks with high
+ * erase counter.
+ */
+#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
+
+/*
+ * When a physical eraseblock is moved, the WL sub-system has to pick the target
+ * physical eraseblock to move to. The simplest way would be just to pick the
+ * one with the highest erase counter. But in certain workloads this could lead
+ * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
+ * situation when the picked physical eraseblock is constantly erased after the
+ * data is written to it. So, we have a constant which limits the highest erase
+ * counter of the free physical eraseblock to pick. Namely, the WL sub-system
+ * does not pick eraseblocks with erase counter greater than the lowest erase
+ * counter plus %WL_FREE_MAX_DIFF.
+ */
+#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
+
+/*
+ * Maximum number of consecutive background thread failures which is enough to
+ * switch to read-only mode.
+ */
+#define WL_MAX_FAILURES 32
+
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root);
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
+
+/**
+ * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
+ * @e: the wear-leveling entry to add
+ * @root: the root of the tree
+ *
+ * Note, we use (erase counter, physical eraseblock number) pairs as keys in
+ * the @ubi->used and @ubi->free RB-trees.
+ */
+static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node **p, *parent = NULL;
+
+ p = &root->rb_node;
+ while (*p) {
+ struct ubi_wl_entry *e1;
+
+ parent = *p;
+ e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
+
+ if (e->ec < e1->ec)
+ p = &(*p)->rb_left;
+ else if (e->ec > e1->ec)
+ p = &(*p)->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ }
+
+ rb_link_node(&e->u.rb, parent, p);
+ rb_insert_color(&e->u.rb, root);
+}
+
+/**
+ * wl_tree_destroy - destroy a wear-leveling entry.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to add
+ *
+ * This function destroys a wear leveling entry and removes
+ * the reference from the lookup table.
+ */
+static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ ubi->lookuptbl[e->pnum] = NULL;
+ kmem_cache_free(ubi_wl_entry_slab, e);
+}
+
+/**
+ * do_work - do one pending work.
+ * @ubi: UBI device description object
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int do_work(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_work *wrk;
+
+ cond_resched();
+
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works)) {
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ return 0;
+ }
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Call the worker function. Do not touch the work structure
+ * after this call as it will have been freed or reused by that
+ * time by the worker function.
+ */
+ err = wrk->func(ubi, wrk, 0);
+ if (err)
+ ubi_err(ubi, "work failed with error code %d", err);
+ up_read(&ubi->work_sem);
+
+ return err;
+}
+
+/**
+ * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns non-zero if @e is in the @root RB-tree and zero if it
+ * is not.
+ */
+static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
+{
+ struct rb_node *p;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+
+ if (e->pnum == e1->pnum) {
+ ubi_assert(e == e1);
+ return 1;
+ }
+
+ if (e->ec < e1->ec)
+ p = p->rb_left;
+ else if (e->ec > e1->ec)
+ p = p->rb_right;
+ else {
+ ubi_assert(e->pnum != e1->pnum);
+ if (e->pnum < e1->pnum)
+ p = p->rb_left;
+ else
+ p = p->rb_right;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * prot_queue_add - add physical eraseblock to the protection queue.
+ * @ubi: UBI device description object
+ * @e: the physical eraseblock to add
+ *
+ * This function adds @e to the tail of the protection queue @ubi->pq, where
+ * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
+ * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
+ * be locked.
+ */
+static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
+{
+ int pq_tail = ubi->pq_head - 1;
+
+ if (pq_tail < 0)
+ pq_tail = UBI_PROT_QUEUE_LEN - 1;
+ ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
+ list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
+ dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
+}
+
+/**
+ * find_wl_entry - find wear-leveling entry closest to certain erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ * @diff: maximum possible difference from the smallest erase counter
+ *
+ * This function looks for a wear leveling entry with erase counter closest to
+ * min + @diff, where min is the smallest erase counter.
+ */
+static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root, int diff)
+{
+ struct rb_node *p;
+ struct ubi_wl_entry *e, *prev_e = NULL;
+ int max;
+
+ e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ max = e->ec + diff;
+
+ p = root->rb_node;
+ while (p) {
+ struct ubi_wl_entry *e1;
+
+ e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
+ if (e1->ec >= max)
+ p = p->rb_left;
+ else {
+ p = p->rb_right;
+ prev_e = e;
+ e = e1;
+ }
+ }
+
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best WL entry
+ * such that fastmap can use the anchor PEB later. */
+ if (prev_e && !ubi->fm_disabled &&
+ !ubi->fm && e->pnum < UBI_FM_MAX_START)
+ return prev_e;
+
+ return e;
+}
+
+/**
+ * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
+ * @ubi: UBI device description object
+ * @root: the RB-tree where to look for
+ *
+ * This function looks for a wear leveling entry with medium erase counter,
+ * but not greater or equivalent than the lowest erase counter plus
+ * %WL_FREE_MAX_DIFF/2.
+ */
+static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
+ struct rb_root *root)
+{
+ struct ubi_wl_entry *e, *first, *last;
+
+ first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
+ last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
+
+ if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
+ e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
+
+ /* If no fastmap has been written and this WL entry can be used
+ * as anchor PEB, hold it back and return the second best
+ * WL entry such that fastmap can use the anchor PEB later. */
+ e = may_reserve_for_fm(ubi, e, root);
+ } else
+ e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+
+ return e;
+}
+
+/**
+ * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
+ * refill_wl_user_pool().
+ * @ubi: UBI device description object
+ *
+ * This function returns a a wear leveling entry in case of success and
+ * NULL in case of failure.
+ */
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err(ubi, "no free eraseblocks");
+ return NULL;
+ }
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /*
+ * Move the physical eraseblock to the protection queue where it will
+ * be protected from being moved for some time.
+ */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+
+ return e;
+}
+
+/**
+ * prot_queue_del - remove a physical eraseblock from the protection queue.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to remove
+ *
+ * This function deletes PEB @pnum from the protection queue and returns zero
+ * in case of success and %-ENODEV if the PEB was not found.
+ */
+static int prot_queue_del(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ e = ubi->lookuptbl[pnum];
+ if (!e)
+ return -ENODEV;
+
+ if (self_check_in_pq(ubi, e))
+ return -ENODEV;
+
+ list_del(&e->u.list);
+ dbg_wl("deleted PEB %d from the protection queue", e->pnum);
+ return 0;
+}
+
+/**
+ * sync_erase - synchronously erase a physical eraseblock.
+ * @ubi: UBI device description object
+ * @e: the the physical eraseblock to erase
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int torture)
+{
+ int err;
+ struct ubi_ec_hdr *ec_hdr;
+ unsigned long long ec = e->ec;
+
+ dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
+
+ err = self_check_ec(ubi, e->pnum, e->ec);
+ if (err)
+ return -EINVAL;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_sync_erase(ubi, e->pnum, torture);
+ if (err < 0)
+ goto out_free;
+
+ ec += err;
+ if (ec > UBI_MAX_ERASECOUNTER) {
+ /*
+ * Erase counter overflow. Upgrade UBI and use 64-bit
+ * erase counters internally.
+ */
+ ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
+ e->pnum, ec);
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
+
+ ec_hdr->ec = cpu_to_be64(ec);
+
+ err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
+ if (err)
+ goto out_free;
+
+ e->ec = ec;
+ spin_lock(&ubi->wl_lock);
+ if (e->ec > ubi->max_ec)
+ ubi->max_ec = e->ec;
+ spin_unlock(&ubi->wl_lock);
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * serve_prot_queue - check if it is time to stop protecting PEBs.
+ * @ubi: UBI device description object
+ *
+ * This function is called after each erase operation and removes PEBs from the
+ * tail of the protection queue. These PEBs have been protected for long enough
+ * and should be moved to the used tree.
+ */
+static void serve_prot_queue(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e, *tmp;
+ int count;
+
+ /*
+ * There may be several protected physical eraseblock to remove,
+ * process them all.
+ */
+repeat:
+ count = 0;
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
+ dbg_wl("PEB %d EC %d protection over, move to used tree",
+ e->pnum, e->ec);
+
+ list_del(&e->u.list);
+ wl_tree_add(e, &ubi->used);
+ if (count++ > 32) {
+ /*
+ * Let's be nice and avoid holding the spinlock for
+ * too long.
+ */
+ spin_unlock(&ubi->wl_lock);
+ cond_resched();
+ goto repeat;
+ }
+ }
+
+ ubi->pq_head += 1;
+ if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
+ ubi->pq_head = 0;
+ ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
+ spin_unlock(&ubi->wl_lock);
+}
+
+#ifdef __UBOOT__
+void ubi_do_worker(struct ubi_device *ubi)
+{
+ int err;
+
+ if (list_empty(&ubi->works) || ubi->ro_mode ||
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi))
+ return;
+
+ spin_lock(&ubi->wl_lock);
+ while (!list_empty(&ubi->works)) {
+ /*
+ * call do_work, which executes exactly one work form the queue,
+ * including removeing it from the work queue.
+ */
+ spin_unlock(&ubi->wl_lock);
+ err = do_work(ubi);
+ spin_lock(&ubi->wl_lock);
+ if (err) {
+ ubi_err(ubi, "%s: work failed with error code %d",
+ ubi->bgt_name, err);
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+}
+#endif
+
+/**
+ * __schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list. Can only be used if ubi->work_sem is already held in read mode!
+ */
+static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ spin_lock(&ubi->wl_lock);
+ list_add_tail(&wrk->list, &ubi->works);
+ ubi_assert(ubi->works_count >= 0);
+ ubi->works_count += 1;
+#ifndef __UBOOT__
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
+ wake_up_process(ubi->bgt_thread);
+#endif
+ spin_unlock(&ubi->wl_lock);
+}
+
+/**
+ * schedule_ubi_work - schedule a work.
+ * @ubi: UBI device description object
+ * @wrk: the work to schedule
+ *
+ * This function adds a work defined by @wrk to the tail of the pending works
+ * list.
+ */
+static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
+{
+ down_read(&ubi->work_sem);
+ __schedule_ubi_work(ubi, wrk);
+ up_read(&ubi->work_sem);
+}
+
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown);
+
+/**
+ * schedule_erase - schedule an erase work.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ * This function returns zero in case of success and a %-ENOMEM in case of
+ * failure.
+ */
+static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ ubi_assert(e);
+
+ dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
+ e->pnum, e->ec, torture);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->func = &erase_worker;
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ schedule_ubi_work(ubi, wl_wrk);
+
+#ifdef __UBOOT__
+ ubi_do_worker(ubi);
+#endif
+ return 0;
+}
+
+/**
+ * do_sync_erase - run the erase worker synchronously.
+ * @ubi: UBI device description object
+ * @e: the WL entry of the physical eraseblock to erase
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @torture: if the physical eraseblock has to be tortured
+ *
+ */
+static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
+ int vol_id, int lnum, int torture)
+{
+ struct ubi_work *wl_wrk;
+
+ dbg_wl("sync erase of PEB %i", e->pnum);
+
+ wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wl_wrk)
+ return -ENOMEM;
+
+ wl_wrk->e = e;
+ wl_wrk->vol_id = vol_id;
+ wl_wrk->lnum = lnum;
+ wl_wrk->torture = torture;
+
+ return erase_worker(ubi, wl_wrk, 0);
+}
+
+/**
+ * wear_leveling_worker - wear-leveling worker function.
+ * @ubi: UBI device description object
+ * @wrk: the work object
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL-subsystem is shutting down
+ *
+ * This function copies a more worn out physical eraseblock to a less worn out
+ * one. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ int shutdown)
+{
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int vol_id = -1, lnum = -1;
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ int anchor = wrk->anchor;
+#endif
+ struct ubi_wl_entry *e1, *e2;
+ struct ubi_vid_hdr *vid_hdr;
+
+ kfree(wrk);
+ if (shutdown)
+ return 0;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ mutex_lock(&ubi->move_mutex);
+ spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
+
+ if (!ubi->free.rb_node ||
+ (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
+ /*
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
+ *
+ * No used physical eraseblocks? They must be temporarily
+ * protected from being moved. They will be moved to the
+ * @ubi->used tree later and the wear-leveling will be
+ * triggered again.
+ */
+ dbg_wl("cancel WL, a list is empty: free %d, used %d",
+ !ubi->free.rb_node, !ubi->used.rb_node);
+ goto out_cancel;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ /* Check whether we need to produce an anchor PEB */
+ if (!anchor)
+ anchor = !anchor_pebs_avalible(&ubi->free);
+
+ if (anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
+ } else if (!ubi->scrub.rb_node) {
+#else
+ if (!ubi->scrub.rb_node) {
+#endif
+ /*
+ * Now pick the least worn-out used physical eraseblock and a
+ * highly worn-out free physical eraseblock. If the erase
+ * counters differ much enough, start wear-leveling.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
+ dbg_wl("no WL needed: min used EC %d, max free EC %d",
+ e1->ec, e2->ec);
+
+ /* Give the unused PEB back */
+ wl_tree_add(e2, &ubi->free);
+ ubi->free_count++;
+ goto out_cancel;
+ }
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("move PEB %d EC %d to PEB %d EC %d",
+ e1->pnum, e1->ec, e2->pnum, e2->ec);
+ } else {
+ /* Perform scrubbing */
+ scrubbing = 1;
+ e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
+ e2 = get_peb_for_wl(ubi);
+ if (!e2)
+ goto out_cancel;
+
+ self_check_in_wl_tree(ubi, e1, &ubi->scrub);
+ rb_erase(&e1->u.rb, &ubi->scrub);
+ dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
+ }
+
+ ubi->move_from = e1;
+ ubi->move_to = e2;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
+ * We so far do not know which logical eraseblock our physical
+ * eraseblock (@e1) belongs to. We have to read the volume identifier
+ * header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
+ */
+
+ err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ if (err == UBI_IO_FF) {
+ /*
+ * We are trying to move PEB without a VID header. UBI
+ * always write VID headers shortly after the PEB was
+ * given, so we have a situation when it has not yet
+ * had a chance to write it, because it was preempted.
+ * So add this PEB to the protection queue so far,
+ * because presumably more data will be written there
+ * (including the missing VID header), and then we'll
+ * move it.
+ */
+ dbg_wl("PEB %d has no VID header", e1->pnum);
+ protect = 1;
+ goto out_not_moved;
+ } else if (err == UBI_IO_FF_BITFLIPS) {
+ /*
+ * The same situation as %UBI_IO_FF, but bit-flips were
+ * detected. It is better to schedule this PEB for
+ * scrubbing.
+ */
+ dbg_wl("PEB %d has no VID header but has bit-flips",
+ e1->pnum);
+ scrubbing = 1;
+ goto out_not_moved;
+ }
+
+ ubi_err(ubi, "error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ goto out_error;
+ }
+
+ vol_id = be32_to_cpu(vid_hdr->vol_id);
+ lnum = be32_to_cpu(vid_hdr->lnum);
+
+ err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
+ if (err) {
+ if (err == MOVE_CANCEL_RACE) {
+ /*
+ * The LEB has not been moved because the volume is
+ * being deleted or the PEB has been put meanwhile. We
+ * should prevent this PEB from being selected for
+ * wear-leveling movement again, so put it to the
+ * protection queue.
+ */
+ protect = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
+ if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
+ err == MOVE_TARGET_RD_ERR) {
+ /*
+ * Target PEB had bit-flips or write error - torture it.
+ */
+ torture = 1;
+ goto out_not_moved;
+ }
+
+ if (err == MOVE_SOURCE_RD_ERR) {
+ /*
+ * An error happened while reading the source PEB. Do
+ * not switch to R/O mode in this case, and give the
+ * upper layers a possibility to recover from this,
+ * e.g. by unmapping corresponding LEB. Instead, just
+ * put this PEB to the @ubi->erroneous list to prevent
+ * UBI from trying to move it over and over again.
+ */
+ if (ubi->erroneous_peb_count > ubi->max_erroneous) {
+ ubi_err(ubi, "too many erroneous eraseblocks (%d)",
+ ubi->erroneous_peb_count);
+ goto out_error;
+ }
+ erroneous = 1;
+ goto out_not_moved;
+ }
+
+ if (err < 0)
+ goto out_error;
+
+ ubi_assert(0);
+ }
+
+ /* The PEB has been successfully moved */
+ if (scrubbing)
+ ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
+ e1->pnum, vol_id, lnum, e2->pnum);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->move_to_put) {
+ wl_tree_add(e2, &ubi->used);
+ e2 = NULL;
+ }
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
+ if (err) {
+ if (e2)
+ wl_entry_destroy(ubi, e2);
+ goto out_ro;
+ }
+
+ if (e2) {
+ /*
+ * Well, the target PEB was put meanwhile, schedule it for
+ * erasure.
+ */
+ dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
+ e2->pnum, vol_id, lnum);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
+ if (err)
+ goto out_ro;
+ }
+
+ dbg_wl("done");
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+ /*
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * have been changed, schedule it for erasure.
+ */
+out_not_moved:
+ if (vol_id != -1)
+ dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
+ e1->pnum, vol_id, lnum, e2->pnum, err);
+ else
+ dbg_wl("cancel moving PEB %d to PEB %d (%d)",
+ e1->pnum, e2->pnum, err);
+ spin_lock(&ubi->wl_lock);
+ if (protect)
+ prot_queue_add(ubi, e1);
+ else if (erroneous) {
+ wl_tree_add(e1, &ubi->erroneous);
+ ubi->erroneous_peb_count += 1;
+ } else if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
+ else
+ wl_tree_add(e1, &ubi->used);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
+ if (err)
+ goto out_ro;
+
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+out_error:
+ if (vol_id != -1)
+ ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
+ else
+ ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
+ err, e1->pnum, vol_id, lnum, e2->pnum);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ wl_entry_destroy(ubi, e1);
+ wl_entry_destroy(ubi, e2);
+
+out_ro:
+ ubi_ro_mode(ubi);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_assert(err != 0);
+ return err < 0 ? err : -EIO;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
+}
+
+/**
+ * ensure_wear_leveling - schedule wear-leveling if it is needed.
+ * @ubi: UBI device description object
+ * @nested: set to non-zero if this function is called from UBI worker
+ *
+ * This function checks if it is time to start wear-leveling and schedules it
+ * if yes. This function returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
+{
+ int err = 0;
+ struct ubi_wl_entry *e1;
+ struct ubi_wl_entry *e2;
+ struct ubi_work *wrk;
+
+ spin_lock(&ubi->wl_lock);
+ if (ubi->wl_scheduled)
+ /* Wear-leveling is already in the work queue */
+ goto out_unlock;
+
+ /*
+ * If the ubi->scrub tree is not empty, scrubbing is needed, and the
+ * the WL worker has to be scheduled anyway.
+ */
+ if (!ubi->scrub.rb_node) {
+ if (!ubi->used.rb_node || !ubi->free.rb_node)
+ /* No physical eraseblocks - no deal */
+ goto out_unlock;
+
+ /*
+ * We schedule wear-leveling only if the difference between the
+ * lowest erase counter of used physical eraseblocks and a high
+ * erase counter of free physical eraseblocks is greater than
+ * %UBI_WL_THRESHOLD.
+ */
+ e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
+ e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+
+ if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
+ goto out_unlock;
+ dbg_wl("schedule wear-leveling");
+ } else
+ dbg_wl("schedule scrubbing");
+
+ ubi->wl_scheduled = 1;
+ spin_unlock(&ubi->wl_lock);
+
+ wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
+ if (!wrk) {
+ err = -ENOMEM;
+ goto out_cancel;
+ }
+
+ wrk->anchor = 0;
+ wrk->func = &wear_leveling_worker;
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+#ifndef __UBOOT__
+ else
+ schedule_ubi_work(ubi, wrk);
+#else
+ else {
+ schedule_ubi_work(ubi, wrk);
+ ubi_do_worker(ubi);
+ }
+#endif
+ return err;
+
+out_cancel:
+ spin_lock(&ubi->wl_lock);
+ ubi->wl_scheduled = 0;
+out_unlock:
+ spin_unlock(&ubi->wl_lock);
+ return err;
+}
+
+/**
+ * erase_worker - physical eraseblock erase worker function.
+ * @ubi: UBI device description object
+ * @wl_wrk: the work object
+ * @shutdown: non-zero if the worker has to free memory and exit
+ * because the WL sub-system is shutting down
+ *
+ * This function erases a physical eraseblock and perform torture testing if
+ * needed. It also takes care about marking the physical eraseblock bad if
+ * needed. Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown)
+{
+ struct ubi_wl_entry *e = wl_wrk->e;
+ int pnum = e->pnum;
+ int vol_id = wl_wrk->vol_id;
+ int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
+
+ if (shutdown) {
+ dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
+ kfree(wl_wrk);
+ wl_entry_destroy(ubi, e);
+ return 0;
+ }
+
+ dbg_wl("erase PEB %d EC %d LEB %d:%d",
+ pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
+
+ err = sync_erase(ubi, e, wl_wrk->torture);
+ if (!err) {
+ /* Fine, we've erased it successfully */
+ kfree(wl_wrk);
+
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * One more erase operation has happened, take care about
+ * protected physical eraseblocks.
+ */
+ serve_prot_queue(ubi);
+
+ /* And take care about wear-leveling */
+ err = ensure_wear_leveling(ubi, 1);
+ return err;
+ }
+
+ ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
+ kfree(wl_wrk);
+
+ if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
+ err == -EBUSY) {
+ int err1;
+
+ /* Re-schedule the LEB for erasure */
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
+ if (err1) {
+ err = err1;
+ goto out_ro;
+ }
+ return err;
+ }
+
+ wl_entry_destroy(ubi, e);
+ if (err != -EIO)
+ /*
+ * If this is not %-EIO, we have no idea what to do. Scheduling
+ * this physical eraseblock for erasure again would cause
+ * errors again and again. Well, lets switch to R/O mode.
+ */
+ goto out_ro;
+
+ /* It is %-EIO, the PEB went bad */
+
+ if (!ubi->bad_allowed) {
+ ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
+ goto out_ro;
+ }
+
+ spin_lock(&ubi->volumes_lock);
+ if (ubi->beb_rsvd_pebs == 0) {
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err(ubi, "no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_msg(ubi, "mark PEB %d as bad", pnum);
+ err = ubi_io_mark_bad(ubi, pnum);
+ if (err)
+ goto out_ro;
+
+ spin_lock(&ubi->volumes_lock);
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
+ ubi->bad_peb_count += 1;
+ ubi->good_peb_count -= 1;
+ ubi_calculate_reserved(ubi);
+ if (available_consumed)
+ ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
+ ubi_msg(ubi, "%d PEBs left in the reserve",
+ ubi->beb_rsvd_pebs);
+ else
+ ubi_warn(ubi, "last PEB from the reserve was used");
+ spin_unlock(&ubi->volumes_lock);
+
+ return err;
+
+out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
+ ubi_ro_mode(ubi);
+ return err;
+}
+
+/**
+ * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
+ * @ubi: UBI device description object
+ * @vol_id: the volume ID that last used this PEB
+ * @lnum: the last used logical eraseblock number for the PEB
+ * @pnum: physical eraseblock to return
+ * @torture: if this physical eraseblock has to be tortured
+ *
+ * This function is called to return physical eraseblock @pnum to the pool of
+ * free physical eraseblocks. The @torture flag has to be set if an I/O error
+ * occurred to this @pnum and it has to be tested. This function returns zero
+ * in case of success, and a negative error code in case of failure.
+ */
+int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
+ int pnum, int torture)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+ dbg_wl("PEB %d", pnum);
+ ubi_assert(pnum >= 0);
+ ubi_assert(pnum < ubi->peb_count);
+
+ down_read(&ubi->fm_protect);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from) {
+ /*
+ * User is putting the physical eraseblock which was selected to
+ * be moved. It will be scheduled for erasure in the
+ * wear-leveling worker.
+ */
+ dbg_wl("PEB %d is being moved, wait", pnum);
+ spin_unlock(&ubi->wl_lock);
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
+ } else if (e == ubi->move_to) {
+ /*
+ * User is putting the physical eraseblock which was selected
+ * as the target the data is moved to. It may happen if the EBA
+ * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
+ * but the WL sub-system has not put the PEB to the "used" tree
+ * yet, but it is about to do this. So we just set a flag which
+ * will tell the WL worker that the PEB is not needed anymore
+ * and should be scheduled for erasure.
+ */
+ dbg_wl("PEB %d is the target of data moving", pnum);
+ ubi_assert(!ubi->move_to_put);
+ ubi->move_to_put = 1;
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return 0;
+ } else {
+ if (in_wl_tree(e, &ubi->used)) {
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
+ } else if (in_wl_tree(e, &ubi->scrub)) {
+ self_check_in_wl_tree(ubi, e, &ubi->scrub);
+ rb_erase(&e->u.rb, &ubi->scrub);
+ } else if (in_wl_tree(e, &ubi->erroneous)) {
+ self_check_in_wl_tree(ubi, e, &ubi->erroneous);
+ rb_erase(&e->u.rb, &ubi->erroneous);
+ ubi->erroneous_peb_count -= 1;
+ ubi_assert(ubi->erroneous_peb_count >= 0);
+ /* Erroneous PEBs should be tortured */
+ torture = 1;
+ } else {
+ err = prot_queue_del(ubi, e->pnum);
+ if (err) {
+ ubi_err(ubi, "PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_protect);
+ return err;
+ }
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = schedule_erase(ubi, e, vol_id, lnum, torture);
+ if (err) {
+ spin_lock(&ubi->wl_lock);
+ wl_tree_add(e, &ubi->used);
+ spin_unlock(&ubi->wl_lock);
+ }
+
+ up_read(&ubi->fm_protect);
+ return err;
+}
+
+/**
+ * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock to schedule
+ *
+ * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
+ * needs scrubbing. This function schedules a physical eraseblock for
+ * scrubbing which is done in background. This function returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
+{
+ struct ubi_wl_entry *e;
+
+ ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
+
+retry:
+ spin_lock(&ubi->wl_lock);
+ e = ubi->lookuptbl[pnum];
+ if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
+ in_wl_tree(e, &ubi->erroneous)) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+ }
+
+ if (e == ubi->move_to) {
+ /*
+ * This physical eraseblock was used to move data to. The data
+ * was moved but the PEB was not yet inserted to the proper
+ * tree. We should just wait a little and let the WL worker
+ * proceed.
+ */
+ spin_unlock(&ubi->wl_lock);
+ dbg_wl("the PEB %d is not in proper tree, retry", pnum);
+ yield();
+ goto retry;
+ }
+
+ if (in_wl_tree(e, &ubi->used)) {
+ self_check_in_wl_tree(ubi, e, &ubi->used);
+ rb_erase(&e->u.rb, &ubi->used);
+ } else {
+ int err;
+
+ err = prot_queue_del(ubi, e->pnum);
+ if (err) {
+ ubi_err(ubi, "PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
+
+ wl_tree_add(e, &ubi->scrub);
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+ * Technically scrubbing is the same as wear-leveling, so it is done
+ * by the WL worker.
+ */
+ return ensure_wear_leveling(ubi, 0);
+}
+
+/**
+ * ubi_wl_flush - flush all pending works.
+ * @ubi: UBI device description object
+ * @vol_id: the volume id to flush for
+ * @lnum: the logical eraseblock number to flush for
+ *
+ * This function executes all pending works for a particular volume id /
+ * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
+ * acts as a wildcard for all of the corresponding volume numbers or logical
+ * eraseblock numbers. It returns zero in case of success and a negative error
+ * code in case of failure.
+ */
+int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int err = 0;
+ int found = 1;
+
+ /*
+ * Erase while the pending works queue is not empty, but not more than
+ * the number of currently pending works.
+ */
+ dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
+ vol_id, lnum, ubi->works_count);
+
+ while (found) {
+ struct ubi_work *wrk, *tmp;
+ found = 0;
+
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
+ list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
+ if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
+ (lnum == UBI_ALL || wrk->lnum == lnum)) {
+ list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ spin_unlock(&ubi->wl_lock);
+
+ err = wrk->func(ubi, wrk, 0);
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
+ spin_lock(&ubi->wl_lock);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ return err;
+}
+
+/**
+ * tree_destroy - destroy an RB-tree.
+ * @ubi: UBI device description object
+ * @root: the root of the tree to destroy
+ */
+static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
+{
+ struct rb_node *rb;
+ struct ubi_wl_entry *e;
+
+ rb = root->rb_node;
+ while (rb) {
+ if (rb->rb_left)
+ rb = rb->rb_left;
+ else if (rb->rb_right)
+ rb = rb->rb_right;
+ else {
+ e = rb_entry(rb, struct ubi_wl_entry, u.rb);
+
+ rb = rb_parent(rb);
+ if (rb) {
+ if (rb->rb_left == &e->u.rb)
+ rb->rb_left = NULL;
+ else
+ rb->rb_right = NULL;
+ }
+
+ wl_entry_destroy(ubi, e);
+ }
+ }
+}
+
+/**
+ * ubi_thread - UBI background thread.
+ * @u: the UBI device description object pointer
+ */
+int ubi_thread(void *u)
+{
+ int failures = 0;
+ struct ubi_device *ubi = u;
+
+ ubi_msg(ubi, "background thread \"%s\" started, PID %d",
+ ubi->bgt_name, task_pid_nr(current));
+
+ set_freezable();
+ for (;;) {
+ int err;
+
+ if (kthread_should_stop())
+ break;
+
+ if (try_to_freeze())
+ continue;
+
+ spin_lock(&ubi->wl_lock);
+ if (list_empty(&ubi->works) || ubi->ro_mode ||
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&ubi->wl_lock);
+ schedule();
+ continue;
+ }
+ spin_unlock(&ubi->wl_lock);
+
+ err = do_work(ubi);
+ if (err) {
+ ubi_err(ubi, "%s: work failed with error code %d",
+ ubi->bgt_name, err);
+ if (failures++ > WL_MAX_FAILURES) {
+ /*
+ * Too many failures, disable the thread and
+ * switch to read-only mode.
+ */
+ ubi_msg(ubi, "%s: %d consecutive failures",
+ ubi->bgt_name, WL_MAX_FAILURES);
+ ubi_ro_mode(ubi);
+ ubi->thread_enabled = 0;
+ continue;
+ }
+ } else
+ failures = 0;
+
+ cond_resched();
+ }
+
+ dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
+ return 0;
+}
+
+/**
+ * shutdown_work - shutdown all pending works.
+ * @ubi: UBI device description object
+ */
+static void shutdown_work(struct ubi_device *ubi)
+{
+#ifdef CONFIG_MTD_UBI_FASTMAP
+#ifndef __UBOOT__
+ flush_work(&ubi->fm_work);
+#else
+ /* in U-Boot, we have all work done */
+#endif
+#endif
+ while (!list_empty(&ubi->works)) {
+ struct ubi_work *wrk;
+
+ wrk = list_entry(ubi->works.next, struct ubi_work, list);
+ list_del(&wrk->list);
+ wrk->func(ubi, wrk, 1);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
+ }
+}
+
+/**
+ * ubi_wl_init - initialize the WL sub-system using attaching information.
+ * @ubi: UBI device description object
+ * @ai: attaching information
+ *
+ * This function returns zero in case of success, and a negative error code in
+ * case of failure.
+ */
+int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+{
+ int err, i, reserved_pebs, found_pebs = 0;
+ struct rb_node *rb1, *rb2;
+ struct ubi_ainf_volume *av;
+ struct ubi_ainf_peb *aeb, *tmp;
+ struct ubi_wl_entry *e;
+
+ ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
+ spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
+ ubi->max_ec = ai->max_ec;
+ INIT_LIST_HEAD(&ubi->works);
+
+ sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
+
+ err = -ENOMEM;
+ ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
+ if (!ubi->lookuptbl)
+ return err;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
+ INIT_LIST_HEAD(&ubi->pq[i]);
+ ubi->pq_head = 0;
+
+ ubi->free_count = 0;
+ list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+ if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
+ wl_entry_destroy(ubi, e);
+ goto out_free;
+ }
+
+ found_pebs++;
+ }
+
+ list_for_each_entry(aeb, &ai->free, u.list) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi_assert(e->ec >= 0);
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+
+ ubi->lookuptbl[e->pnum] = e;
+
+ found_pebs++;
+ }
+
+ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
+ cond_resched();
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ goto out_free;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (!aeb->scrub) {
+ dbg_wl("add PEB %d EC %d to the used tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->used);
+ } else {
+ dbg_wl("add PEB %d EC %d to the scrub tree",
+ e->pnum, e->ec);
+ wl_tree_add(e, &ubi->scrub);
+ }
+
+ found_pebs++;
+ }
+ }
+
+ dbg_wl("found %i PEBs", found_pebs);
+
+ if (ubi->fm) {
+ ubi_assert(ubi->good_peb_count ==
+ found_pebs + ubi->fm->used_blocks);
+
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ e = ubi->fm->e[i];
+ ubi->lookuptbl[e->pnum] = e;
+ }
+ }
+ else
+ ubi_assert(ubi->good_peb_count == found_pebs);
+
+ reserved_pebs = WL_RESERVED_PEBS;
+ ubi_fastmap_init(ubi, &reserved_pebs);
+
+ if (ubi->avail_pebs < reserved_pebs) {
+ ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, reserved_pebs);
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
+ goto out_free;
+ }
+ ubi->avail_pebs -= reserved_pebs;
+ ubi->rsvd_pebs += reserved_pebs;
+
+ /* Schedule wear-leveling if needed */
+ err = ensure_wear_leveling(ubi, 0);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ shutdown_work(ubi);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
+ kfree(ubi->lookuptbl);
+ return err;
+}
+
+/**
+ * protection_queue_destroy - destroy the protection queue.
+ * @ubi: UBI device description object
+ */
+static void protection_queue_destroy(struct ubi_device *ubi)
+{
+ int i;
+ struct ubi_wl_entry *e, *tmp;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
+ list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
+ list_del(&e->u.list);
+ wl_entry_destroy(ubi, e);
+ }
+ }
+}
+
+/**
+ * ubi_wl_close - close the wear-leveling sub-system.
+ * @ubi: UBI device description object
+ */
+void ubi_wl_close(struct ubi_device *ubi)
+{
+ dbg_wl("close the WL sub-system");
+ ubi_fastmap_close(ubi);
+ shutdown_work(ubi);
+ protection_queue_destroy(ubi);
+ tree_destroy(ubi, &ubi->used);
+ tree_destroy(ubi, &ubi->erroneous);
+ tree_destroy(ubi, &ubi->free);
+ tree_destroy(ubi, &ubi->scrub);
+ kfree(ubi->lookuptbl);
+}
+
+/**
+ * self_check_ec - make sure that the erase counter of a PEB is correct.
+ * @ubi: UBI device description object
+ * @pnum: the physical eraseblock number to check
+ * @ec: the erase counter to check
+ *
+ * This function returns zero if the erase counter of physical eraseblock @pnum
+ * is equivalent to @ec, and a negative error code if not or if an error
+ * occurred.
+ */
+static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
+{
+ int err;
+ long long read_ec;
+ struct ubi_ec_hdr *ec_hdr;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
+ if (!ec_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
+ if (err && err != UBI_IO_BITFLIPS) {
+ /* The header does not have to exist */
+ err = 0;
+ goto out_free;
+ }
+
+ read_ec = be64_to_cpu(ec_hdr->ec);
+ if (ec != read_ec && read_ec - ec > 1) {
+ ubi_err(ubi, "self-check failed for PEB %d", pnum);
+ ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
+ dump_stack();
+ err = 1;
+ } else
+ err = 0;
+
+out_free:
+ kfree(ec_hdr);
+ return err;
+}
+
+/**
+ * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ * @root: the root of the tree
+ *
+ * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
+ * is not.
+ */
+static int self_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e, struct rb_root *root)
+{
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ if (in_wl_tree(e, root))
+ return 0;
+
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
+ e->pnum, e->ec, root);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
+ * self_check_in_pq - check if wear-leveling entry is in the protection
+ * queue.
+ * @ubi: UBI device description object
+ * @e: the wear-leveling entry to check
+ *
+ * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
+ */
+static int self_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ struct ubi_wl_entry *p;
+ int i;
+
+ if (!ubi_dbg_chk_gen(ubi))
+ return 0;
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
+ list_for_each_entry(p, &ubi->pq[i], u.list)
+ if (p == e)
+ return 0;
+
+ ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
+ e->pnum, e->ec);
+ dump_stack();
+ return -EINVAL;
+}
+#ifndef CONFIG_MTD_UBI_FASTMAP
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ ubi->free_count--;
+ ubi_assert(ubi->free_count >= 0);
+ rb_erase(&e->u.rb, &ubi->free);
+
+ return e;
+}
+
+/**
+ * produce_free_peb - produce a free physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function tries to make a free PEB by means of synchronous execution of
+ * pending works. This may be needed if, for example the background thread is
+ * disabled. Returns zero in case of success and a negative error code in case
+ * of failure.
+ */
+static int produce_free_peb(struct ubi_device *ubi)
+{
+ int err;
+
+ while (!ubi->free.rb_node && ubi->works_count) {
+ spin_unlock(&ubi->wl_lock);
+
+ dbg_wl("do one work synchronously");
+ err = do_work(ubi);
+
+ spin_lock(&ubi->wl_lock);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_wl_get_peb - get a physical eraseblock.
+ * @ubi: UBI device description object
+ *
+ * This function returns a physical eraseblock in case of success and a
+ * negative error code in case of failure.
+ * Returns with ubi->fm_eba_sem held in read mode!
+ */
+int ubi_wl_get_peb(struct ubi_device *ubi)
+{
+ int err;
+ struct ubi_wl_entry *e;
+
+retry:
+ down_read(&ubi->fm_eba_sem);
+ spin_lock(&ubi->wl_lock);
+ if (!ubi->free.rb_node) {
+ if (ubi->works_count == 0) {
+ ubi_err(ubi, "no free eraseblocks");
+ ubi_assert(list_empty(&ubi->works));
+ spin_unlock(&ubi->wl_lock);
+ return -ENOSPC;
+ }
+
+ err = produce_free_peb(ubi);
+ if (err < 0) {
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->fm_eba_sem);
+ goto retry;
+
+ }
+ e = wl_get_wle(ubi);
+ prot_queue_add(ubi, e);
+ spin_unlock(&ubi->wl_lock);
+
+ err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
+ ubi->peb_size - ubi->vid_hdr_aloffset);
+ if (err) {
+ ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
+ return err;
+ }
+
+ return e->pnum;
+}
+#else
+#include "fastmap-wl.c"
+#endif
diff --git a/roms/u-boot/drivers/mtd/ubi/wl.h b/roms/u-boot/drivers/mtd/ubi/wl.h
new file mode 100644
index 000000000..662dbe376
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubi/wl.h
@@ -0,0 +1,34 @@
+#ifndef UBI_WL_H
+#define UBI_WL_H
+#ifdef CONFIG_MTD_UBI_FASTMAP
+static int anchor_pebs_avalible(struct rb_root *root);
+#ifndef __UBOOT__
+static void update_fastmap_work_fn(struct work_struct *wrk);
+#else
+void update_fastmap_work_fn(struct ubi_device *ubi);
+#endif
+static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static void ubi_fastmap_close(struct ubi_device *ubi);
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
+{
+ /* Reserve enough LEBs to store two fastmaps. */
+ *count += (ubi->fm_size / ubi->leb_size) * 2;
+#ifndef __UBOOT__
+ INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
+#endif
+}
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root);
+#else /* !CONFIG_MTD_UBI_FASTMAP */
+static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+static inline void ubi_fastmap_close(struct ubi_device *ubi) { }
+static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { }
+static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
+ struct rb_root *root) {
+ return e;
+}
+#endif /* CONFIG_MTD_UBI_FASTMAP */
+#endif /* UBI_WL_H */
diff --git a/roms/u-boot/drivers/mtd/ubispl/Makefile b/roms/u-boot/drivers/mtd/ubispl/Makefile
new file mode 100644
index 000000000..740dbed12
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubispl/Makefile
@@ -0,0 +1 @@
+obj-y += ubispl.o ../ubi/crc32.o
diff --git a/roms/u-boot/drivers/mtd/ubispl/ubi-wrapper.h b/roms/u-boot/drivers/mtd/ubispl/ubi-wrapper.h
new file mode 100644
index 000000000..b58f42dd1
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubispl/ubi-wrapper.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL 2.0+ OR BSD-3-Clause */
+/*
+ * The parts taken from the kernel implementation are:
+ *
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * UBISPL specific defines:
+ *
+ * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
+ */
+
+/*
+ * Contains various defines copy&pasted from ubi.h and ubi-user.h to make
+ * the upstream fastboot code happy.
+ */
+#ifndef __UBOOT_UBI_WRAPPER_H
+#define __UBOOT_UBI_WRAPPER_H
+
+/*
+ * Error codes returned by the I/O sub-system.
+ *
+ * UBI_IO_FF: the read region of flash contains only 0xFFs
+ * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data
+ * integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC)
+ * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a
+ * data integrity error reported by the MTD driver
+ * (uncorrectable ECC error in case of NAND)
+ * UBI_IO_BITFLIPS: bit-flips were detected and corrected
+ *
+ * UBI_FASTMAP_ANCHOR: u-boot SPL add on to tell the caller that the fastmap
+ * anchor block has been found
+ *
+ * Note, it is probably better to have bit-flip and ebadmsg as flags which can
+ * be or'ed with other error code. But this is a big change because there are
+ * may callers, so it does not worth the risk of introducing a bug
+ */
+enum {
+ UBI_IO_FF = 1,
+ UBI_IO_FF_BITFLIPS,
+ UBI_IO_BAD_HDR,
+ UBI_IO_BAD_HDR_EBADMSG,
+ UBI_IO_BITFLIPS,
+ UBI_FASTMAP_ANCHOR,
+};
+
+/*
+ * UBI volume type constants.
+ *
+ * @UBI_DYNAMIC_VOLUME: dynamic volume
+ * @UBI_STATIC_VOLUME: static volume
+ */
+enum {
+ UBI_DYNAMIC_VOLUME = 3,
+ UBI_STATIC_VOLUME = 4,
+};
+
+/*
+ * Return codes of the fastmap sub-system
+ *
+ * UBI_NO_FASTMAP: No fastmap super block was found
+ * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
+ */
+enum {
+ UBI_NO_FASTMAP = 1,
+ UBI_BAD_FASTMAP,
+};
+
+/**
+ * struct ubi_fastmap_layout - in-memory fastmap data structure.
+ * @e: PEBs used by the current fastmap
+ * @to_be_tortured: if non-zero tortured this PEB
+ * @used_blocks: number of used PEBs
+ * @max_pool_size: maximal size of the user pool
+ * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
+ */
+struct ubi_fastmap_layout {
+ struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
+ int to_be_tortured[UBI_FM_MAX_BLOCKS];
+ int used_blocks;
+ int max_pool_size;
+ int max_wl_pool_size;
+};
+
+/**
+ * struct ubi_fm_pool - in-memory fastmap pool
+ * @pebs: PEBs in this pool
+ * @used: number of used PEBs
+ * @size: total number of PEBs in this pool
+ * @max_size: maximal size of the pool
+ *
+ * A pool gets filled with up to max_size.
+ * If all PEBs within the pool are used a new fastmap will be written
+ * to the flash and the pool gets refilled with empty PEBs.
+ *
+ */
+struct ubi_fm_pool {
+ int pebs[UBI_FM_MAX_POOL_SIZE];
+ int used;
+ int size;
+ int max_size;
+};
+
+#endif
diff --git a/roms/u-boot/drivers/mtd/ubispl/ubispl.c b/roms/u-boot/drivers/mtd/ubispl/ubispl.c
new file mode 100644
index 000000000..03b31f002
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubispl/ubispl.c
@@ -0,0 +1,1139 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
+ *
+ * The parts taken from the kernel implementation are:
+ *
+ * Copyright (c) International Business Machines Corp., 2006
+ */
+
+#include <common.h>
+#include <errno.h>
+#include <linux/bug.h>
+#include <u-boot/crc.h>
+#include <ubispl.h>
+
+#include <linux/bitops.h>
+#include <linux/crc32.h>
+
+#include "ubispl.h"
+
+/**
+ * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
+ * @ubi: UBI device description object
+ */
+static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
+{
+ size_t size;
+
+ size = sizeof(struct ubi_fm_sb) +
+ sizeof(struct ubi_fm_hdr) +
+ sizeof(struct ubi_fm_scan_pool) +
+ sizeof(struct ubi_fm_scan_pool) +
+ (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
+ (sizeof(struct ubi_fm_eba) +
+ (ubi->peb_count * sizeof(__be32))) +
+ sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ return roundup(size, ubi->leb_size);
+}
+
+static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
+ unsigned long from, unsigned long len)
+{
+ return ubi->read(pnum + ubi->peb_offset, from, len, buf);
+}
+
+static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
+{
+ return peb >= ubi->peb_count || peb < 0;
+}
+
+#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+
+/**
+ * ubi_dump_vtbl_record - dump a &struct ubi_vtbl_record object.
+ * @r: the object to dump
+ * @idx: volume table index
+ */
+void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
+{
+ int name_len = be16_to_cpu(r->name_len);
+
+ ubi_dbg("Volume table record %d dump: size: %d",
+ idx, sizeof(struct ubi_vtbl_record));
+ ubi_dbg("\treserved_pebs %d", be32_to_cpu(r->reserved_pebs));
+ ubi_dbg("\talignment %d", be32_to_cpu(r->alignment));
+ ubi_dbg("\tdata_pad %d", be32_to_cpu(r->data_pad));
+ ubi_dbg("\tvol_type %d", (int)r->vol_type);
+ ubi_dbg("\tupd_marker %d", (int)r->upd_marker);
+ ubi_dbg("\tname_len %d", name_len);
+
+ if (r->name[0] == '\0') {
+ ubi_dbg("\tname NULL");
+ return;
+ }
+
+ if (name_len <= UBI_VOL_NAME_MAX &&
+ strnlen(&r->name[0], name_len + 1) == name_len) {
+ ubi_dbg("\tname %s", &r->name[0]);
+ } else {
+ ubi_dbg("\t1st 5 characters of name: %c%c%c%c%c",
+ r->name[0], r->name[1], r->name[2], r->name[3],
+ r->name[4]);
+ }
+ ubi_dbg("\tcrc %#08x", be32_to_cpu(r->crc));
+}
+
+/* Empty volume table record */
+static struct ubi_vtbl_record empty_vtbl_record;
+
+/**
+ * vtbl_check - check if volume table is not corrupted and sensible.
+ * @ubi: UBI device description object
+ * @vtbl: volume table
+ *
+ * This function returns zero if @vtbl is all right, %1 if CRC is incorrect,
+ * and %-EINVAL if it contains inconsistent data.
+ */
+static int vtbl_check(struct ubi_scan_info *ubi,
+ struct ubi_vtbl_record *vtbl)
+{
+ int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len;
+ int upd_marker, err;
+ uint32_t crc;
+ const char *name;
+
+ for (i = 0; i < UBI_SPL_VOL_IDS; i++) {
+ reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
+ alignment = be32_to_cpu(vtbl[i].alignment);
+ data_pad = be32_to_cpu(vtbl[i].data_pad);
+ upd_marker = vtbl[i].upd_marker;
+ vol_type = vtbl[i].vol_type;
+ name_len = be16_to_cpu(vtbl[i].name_len);
+ name = &vtbl[i].name[0];
+
+ crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
+ if (be32_to_cpu(vtbl[i].crc) != crc) {
+ ubi_err("bad CRC at record %u: %#08x, not %#08x",
+ i, crc, be32_to_cpu(vtbl[i].crc));
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return 1;
+ }
+
+ if (reserved_pebs == 0) {
+ if (memcmp(&vtbl[i], &empty_vtbl_record,
+ UBI_VTBL_RECORD_SIZE)) {
+ err = 2;
+ goto bad;
+ }
+ continue;
+ }
+
+ if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 ||
+ name_len < 0) {
+ err = 3;
+ goto bad;
+ }
+
+ if (alignment > ubi->leb_size || alignment == 0) {
+ err = 4;
+ goto bad;
+ }
+
+ n = alignment & (CONFIG_SPL_UBI_VID_OFFSET - 1);
+ if (alignment != 1 && n) {
+ err = 5;
+ goto bad;
+ }
+
+ n = ubi->leb_size % alignment;
+ if (data_pad != n) {
+ ubi_err("bad data_pad, has to be %d", n);
+ err = 6;
+ goto bad;
+ }
+
+ if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
+ err = 7;
+ goto bad;
+ }
+
+ if (upd_marker != 0 && upd_marker != 1) {
+ err = 8;
+ goto bad;
+ }
+
+ if (name_len > UBI_VOL_NAME_MAX) {
+ err = 10;
+ goto bad;
+ }
+
+ if (name[0] == '\0') {
+ err = 11;
+ goto bad;
+ }
+
+ if (name_len != strnlen(name, name_len + 1)) {
+ err = 12;
+ goto bad;
+ }
+
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ }
+
+ /* Checks that all names are unique */
+ for (i = 0; i < UBI_SPL_VOL_IDS - 1; i++) {
+ for (n = i + 1; n < UBI_SPL_VOL_IDS; n++) {
+ int len1 = be16_to_cpu(vtbl[i].name_len);
+ int len2 = be16_to_cpu(vtbl[n].name_len);
+
+ if (len1 > 0 && len1 == len2 &&
+ !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
+ ubi_err("volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ ubi_dump_vtbl_record(&vtbl[n], n);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+
+bad:
+ ubi_err("volume table check failed: record %d, error %d", i, err);
+ ubi_dump_vtbl_record(&vtbl[i], i);
+ return -EINVAL;
+}
+
+static int ubi_read_volume_table(struct ubi_scan_info *ubi, u32 pnum)
+{
+ int err = -EINVAL;
+
+ empty_vtbl_record.crc = cpu_to_be32(0xf116c36b);
+
+ err = ubi_io_read(ubi, &ubi->vtbl, pnum, ubi->leb_start,
+ sizeof(struct ubi_vtbl_record) * UBI_SPL_VOL_IDS);
+ if (err && err != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read volume table");
+ goto out;
+ }
+
+ if (!vtbl_check(ubi, ubi->vtbl)) {
+ ubi->vtbl_valid = 1;
+ err = 0;
+ }
+out:
+ return err;
+}
+
+#endif /* CONFIG_SPL_UBI_LOAD_BY_VOLNAME */
+
+static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
+ struct ubi_vid_hdr *vh, int unused)
+{
+ u32 magic;
+ int res;
+
+ /* No point in rescanning a corrupt block */
+ if (test_bit(pnum, ubi->corrupt))
+ return UBI_IO_BAD_HDR;
+ /*
+ * If the block has been scanned already, no need to rescan
+ */
+ if (test_and_set_bit(pnum, ubi->scanned))
+ return 0;
+
+ res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
+
+ /*
+ * Bad block, unrecoverable ECC error, skip the block
+ */
+ if (res) {
+ ubi_dbg("Skipping bad or unreadable block %d", pnum);
+ vh->magic = 0;
+ generic_set_bit(pnum, ubi->corrupt);
+ return res;
+ }
+
+ /* Magic number available ? */
+ magic = be32_to_cpu(vh->magic);
+ if (magic != UBI_VID_HDR_MAGIC) {
+ generic_set_bit(pnum, ubi->corrupt);
+ if (magic == 0xffffffff)
+ return UBI_IO_FF;
+ ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
+ return UBI_IO_BAD_HDR;
+ }
+
+ /* Header CRC correct ? */
+ if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
+ be32_to_cpu(vh->hdr_crc)) {
+ ubi_msg("Bad CRC in block 0%d", pnum);
+ generic_set_bit(pnum, ubi->corrupt);
+ return UBI_IO_BAD_HDR;
+ }
+
+ ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
+
+ return 0;
+}
+
+static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
+ struct ubi_vid_hdr *vh,
+ u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
+{
+ int res;
+
+ if (ubi_io_is_bad(ubi, fm_pnum))
+ return -EINVAL;
+
+ res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
+ if (!res) {
+ /* Check volume id, volume type and lnum */
+ if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
+ vh->vol_type == UBI_VID_STATIC &&
+ be32_to_cpu(vh->lnum) == fm_lnum)
+ return 0;
+ ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
+ fm_pnum, fm_vol_id, vh->vol_type,
+ be32_to_cpu(vh->vol_id),
+ fm_lnum, be32_to_cpu(vh->lnum));
+ }
+ return res;
+}
+
+/* Insert the logic block into the volume info */
+static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
+ struct ubi_vid_hdr *vh, u32 vol_id,
+ u32 pnum, u32 lnum)
+{
+ struct ubi_vol_info *vi = ubi->volinfo + vol_id;
+ u32 *ltp;
+
+ /*
+ * If the volume is larger than expected, yell and give up :(
+ */
+ if (lnum >= UBI_MAX_VOL_LEBS) {
+ ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
+ return -EINVAL;
+ }
+
+ ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
+ pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
+ !!test_bit(pnum, ubi->scanned));
+
+ /* Points to the translation entry */
+ ltp = vi->lebs_to_pebs + lnum;
+
+ /* If the block is already assigned, check sqnum */
+ if (__test_and_set_bit(lnum, vi->found)) {
+ u32 cur_pnum = *ltp;
+ struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
+
+ /*
+ * If the current block hase not yet been scanned, we
+ * need to do that. The other block might be stale or
+ * the current block corrupted and the FM not yet
+ * updated.
+ */
+ if (!test_bit(cur_pnum, ubi->scanned)) {
+ /*
+ * If the scan fails, we use the valid block
+ */
+ if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
+ lnum)) {
+ *ltp = pnum;
+ return 0;
+ }
+ }
+
+ /*
+ * Should not happen ....
+ */
+ if (test_bit(cur_pnum, ubi->corrupt)) {
+ *ltp = pnum;
+ return 0;
+ }
+
+ ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
+ vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
+ be64_to_cpu(vh->sqnum));
+
+ /*
+ * Compare sqnum and take the newer one
+ */
+ if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
+ *ltp = pnum;
+ } else {
+ *ltp = pnum;
+ if (lnum > vi->last_block)
+ vi->last_block = lnum;
+ }
+
+ return 0;
+}
+
+static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
+ u32 pnum)
+{
+ u32 vol_id, lnum;
+ int res;
+
+ if (ubi_io_is_bad(ubi, pnum))
+ return -EINVAL;
+
+ res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (res)
+ return res;
+
+ /* Get volume id */
+ vol_id = be32_to_cpu(vh->vol_id);
+
+ /* If this is the fastmap anchor, return right away */
+ if (vol_id == UBI_FM_SB_VOLUME_ID)
+ return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
+
+#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+ /* If this is a UBI volume table, read it and return */
+ if (vol_id == UBI_LAYOUT_VOLUME_ID && !ubi->vtbl_valid) {
+ res = ubi_read_volume_table(ubi, pnum);
+ return res;
+ }
+#endif
+
+ /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
+ if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
+ return 0;
+
+#ifndef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+ /* We are only interested in the volumes to load */
+ if (!test_bit(vol_id, ubi->toload))
+ return 0;
+#endif
+ lnum = be32_to_cpu(vh->lnum);
+ return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
+}
+
+static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
+ u32 vol_id, u32 vol_type, u32 used)
+{
+ struct ubi_vid_hdr *vh;
+
+ if (ubi_io_is_bad(ubi, pnum))
+ return -EINVAL;
+
+ ubi->fastmap_pebs++;
+
+#ifndef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+ if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
+ return 0;
+
+ /* We are only interested in the volumes to load */
+ if (!test_bit(vol_id, ubi->toload))
+ return 0;
+#endif
+ vh = ubi->blockinfo + pnum;
+
+ return ubi_scan_vid_hdr(ubi, vh, pnum);
+}
+
+static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
+{
+ struct ubi_vid_hdr *vh;
+ u32 pnum;
+ int i;
+
+ ubi_dbg("Scanning pool size: %d", pool_size);
+
+ for (i = 0; i < pool_size; i++) {
+ pnum = be32_to_cpu(pebs[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
+ return UBI_BAD_FASTMAP;
+ }
+
+ vh = ubi->blockinfo + pnum;
+ /*
+ * We allow the scan to fail here. The loader will notice
+ * and look for a replacement.
+ */
+ ubi_scan_vid_hdr(ubi, vh, pnum);
+ }
+ return 0;
+}
+
+/*
+ * Fastmap code is stolen from Linux kernel and this stub structure is used
+ * to make it happy.
+ */
+struct ubi_attach_info {
+ int i;
+};
+
+static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
+ struct ubi_attach_info *ai,
+ struct ubi_fastmap_layout *fm)
+{
+ struct ubi_fm_hdr *fmhdr;
+ struct ubi_fm_scan_pool *fmpl1, *fmpl2;
+ struct ubi_fm_ec *fmec;
+ struct ubi_fm_volhdr *fmvhdr;
+ struct ubi_fm_eba *fm_eba;
+ int ret, i, j, pool_size, wl_pool_size;
+ size_t fm_pos = 0, fm_size = ubi->fm_size;
+ void *fm_raw = ubi->fm_buf;
+
+ memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
+
+ fm_pos += sizeof(struct ubi_fm_sb);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
+ ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl1);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmpl2);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
+ ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
+ goto fail_bad;
+ }
+
+ pool_size = be16_to_cpu(fmpl1->size);
+ wl_pool_size = be16_to_cpu(fmpl2->size);
+ fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
+ fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
+
+ if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
+ ubi_err("bad pool size: %i", pool_size);
+ goto fail_bad;
+ }
+
+ if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
+ ubi_err("bad WL pool size: %i", wl_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_pool_size < 0) {
+ ubi_err("bad maximal pool size: %i", fm->max_pool_size);
+ goto fail_bad;
+ }
+
+ if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
+ fm->max_wl_pool_size < 0) {
+ ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
+ goto fail_bad;
+ }
+
+ /* read EC values from free list */
+ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ }
+
+ /* read EC values from used list */
+ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
+ }
+
+ /* read EC values from scrub list */
+ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ }
+
+ /* read EC values from erase list */
+ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
+ fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmec);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+ }
+
+ /* Iterate over all volumes and read their EBA table */
+ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
+ u32 vol_id, vol_type, used, reserved;
+
+ fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fmvhdr);
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
+ ubi_err("bad fastmap vol header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
+ goto fail_bad;
+ }
+
+ vol_id = be32_to_cpu(fmvhdr->vol_id);
+ vol_type = fmvhdr->vol_type;
+ used = be32_to_cpu(fmvhdr->used_ebs);
+
+ fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
+ fm_pos += sizeof(*fm_eba);
+ fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
+ if (fm_pos >= fm_size)
+ goto fail_bad;
+
+ if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
+ ubi_err("bad fastmap EBA header magic: 0x%x, " \
+ "expected: 0x%x",
+ be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
+ goto fail_bad;
+ }
+
+ reserved = be32_to_cpu(fm_eba->reserved_pebs);
+ ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
+ for (j = 0; j < reserved; j++) {
+ int pnum = be32_to_cpu(fm_eba->pnum[j]);
+
+ if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
+ continue;
+
+ if (!__test_and_clear_bit(pnum, ubi->fm_used))
+ continue;
+
+ /*
+ * We only handle static volumes so used_ebs
+ * needs to be handed in. And we do not assign
+ * the reserved blocks
+ */
+ if (j >= used)
+ continue;
+
+ ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
+ vol_type, used);
+ if (!ret)
+ continue;
+
+ /*
+ * Nasty: The fastmap claims that the volume
+ * has one block more than it, but that block
+ * is always empty and the other blocks have
+ * the correct number of total LEBs in the
+ * headers. Deal with it.
+ */
+ if (ret != UBI_IO_FF && j != used - 1)
+ goto fail_bad;
+ ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
+ vol_id, j, used);
+ }
+ }
+
+ ret = scan_pool(ubi, fmpl1->pebs, pool_size);
+ if (ret)
+ goto fail;
+
+ ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
+ if (ret)
+ goto fail;
+
+#ifdef CHECKME
+ /*
+ * If fastmap is leaking PEBs (must not happen), raise a
+ * fat warning and fall back to scanning mode.
+ * We do this here because in ubi_wl_init() it's too late
+ * and we cannot fall back to scanning.
+ */
+ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
+ ai->bad_peb_count - fm->used_blocks))
+ goto fail_bad;
+#endif
+
+ return 0;
+
+fail_bad:
+ ret = UBI_BAD_FASTMAP;
+fail:
+ return ret;
+}
+
+static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
+ struct ubi_attach_info *ai,
+ int fm_anchor)
+{
+ struct ubi_fm_sb *fmsb, *fmsb2;
+ struct ubi_vid_hdr *vh;
+ struct ubi_fastmap_layout *fm;
+ int i, used_blocks, pnum, ret = 0;
+ size_t fm_size;
+ __be32 crc, tmp_crc;
+ unsigned long long sqnum = 0;
+
+ fmsb = &ubi->fm_sb;
+ fm = &ubi->fm_layout;
+
+ ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
+ if (ret && ret != UBI_IO_BITFLIPS)
+ goto free_fm_sb;
+ else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[0] = 1;
+
+ if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
+ ubi_err("bad super block magic: 0x%x, expected: 0x%x",
+ be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ if (fmsb->version != UBI_FM_FMT_VERSION) {
+ ubi_err("bad fastmap version: %i, expected: %i",
+ fmsb->version, UBI_FM_FMT_VERSION);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ used_blocks = be32_to_cpu(fmsb->used_blocks);
+ if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
+ ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ fm_size = ubi->leb_size * used_blocks;
+ if (fm_size != ubi->fm_size) {
+ ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
+ ubi->fm_size);
+ ret = UBI_BAD_FASTMAP;
+ goto free_fm_sb;
+ }
+
+ vh = &ubi->fm_vh;
+
+ for (i = 0; i < used_blocks; i++) {
+ pnum = be32_to_cpu(fmsb->block_loc[i]);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+#ifdef LATER
+ int image_seq;
+ ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
+ i, pnum);
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ } else if (ret == UBI_IO_BITFLIPS)
+ fm->to_be_tortured[i] = 1;
+
+ image_seq = be32_to_cpu(ech->image_seq);
+ if (!ubi->image_seq)
+ ubi->image_seq = image_seq;
+ /*
+ * Older UBI implementations have image_seq set to zero, so
+ * we shouldn't fail if image_seq == 0.
+ */
+ if (image_seq && (image_seq != ubi->image_seq)) {
+ ubi_err("wrong image seq:%d instead of %d",
+ be32_to_cpu(ech->image_seq), ubi->image_seq);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+#endif
+ ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i)",
+ i, pnum);
+ goto free_hdr;
+ }
+
+ /*
+ * Mainline code rescans the anchor header. We've done
+ * that already so we merily copy it over.
+ */
+ if (pnum == fm_anchor)
+ memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
+
+ if (i == 0) {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
+ ubi_err("bad fastmap anchor vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_SB_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ } else {
+ if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
+ ubi_err("bad fastmap data vol_id: 0x%x," \
+ " expected: 0x%x",
+ be32_to_cpu(vh->vol_id),
+ UBI_FM_DATA_VOLUME_ID);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+ }
+
+ if (sqnum < be64_to_cpu(vh->sqnum))
+ sqnum = be64_to_cpu(vh->sqnum);
+
+ ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
+ ubi->leb_start, ubi->leb_size);
+ if (ret && ret != UBI_IO_BITFLIPS) {
+ ubi_err("unable to read fastmap block# %i (PEB: %i, " \
+ "err: %i)", i, pnum, ret);
+ goto free_hdr;
+ }
+ }
+
+ fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
+ tmp_crc = be32_to_cpu(fmsb2->data_crc);
+ fmsb2->data_crc = 0;
+ crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
+ if (crc != tmp_crc) {
+ ubi_err("fastmap data CRC is invalid");
+ ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ fmsb2->sqnum = sqnum;
+
+ fm->used_blocks = used_blocks;
+
+ ret = ubi_attach_fastmap(ubi, ai, fm);
+ if (ret) {
+ if (ret > 0)
+ ret = UBI_BAD_FASTMAP;
+ goto free_hdr;
+ }
+
+ ubi->fm = fm;
+ ubi->fm_pool.max_size = ubi->fm->max_pool_size;
+ ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
+ ubi_msg("attached by fastmap %uMB %u blocks",
+ ubi->fsize_mb, ubi->peb_count);
+ ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
+ ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
+
+out:
+ if (ret)
+ ubi_err("Attach by fastmap failed, doing a full scan!");
+ return ret;
+
+free_hdr:
+free_fm_sb:
+ goto out;
+}
+
+/*
+ * Scan the flash and attempt to attach via fastmap
+ */
+static void ipl_scan(struct ubi_scan_info *ubi)
+{
+ unsigned int pnum;
+ int res;
+
+ /*
+ * Scan first for the fastmap super block
+ */
+ for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
+ res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
+ /*
+ * We ignore errors here as we are meriliy scanning
+ * the headers.
+ */
+ if (res != UBI_FASTMAP_ANCHOR)
+ continue;
+
+ /*
+ * If fastmap is disabled, continue scanning. This
+ * might happen because the previous attempt failed or
+ * the caller disabled it right away.
+ */
+ if (!ubi->fm_enabled)
+ continue;
+
+ /*
+ * Try to attach the fastmap, if that fails continue
+ * scanning.
+ */
+ if (!ubi_scan_fastmap(ubi, NULL, pnum))
+ return;
+ /*
+ * Fastmap failed. Clear everything we have and start
+ * over. We are paranoid and do not trust anything.
+ */
+ memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
+ pnum = 0;
+ break;
+ }
+
+ /*
+ * Continue scanning, ignore errors, we might find what we are
+ * looking for,
+ */
+ for (; pnum < ubi->peb_count; pnum++)
+ ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
+}
+
+/*
+ * Load a logical block of a volume into memory
+ */
+static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
+ struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
+ u32 last)
+{
+ struct ubi_vid_hdr *vh, *vrepl;
+ u32 pnum, crc, dlen;
+
+retry:
+ /*
+ * If this is a fastmap run, we try to rescan full, otherwise
+ * we simply give up.
+ */
+ if (!test_bit(lnum, vi->found)) {
+ ubi_warn("LEB %d of %d is missing", lnum, last);
+ return -EINVAL;
+ }
+
+ pnum = vi->lebs_to_pebs[lnum];
+
+ ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
+
+ if (ubi_io_is_bad(ubi, pnum)) {
+ ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
+ return -EINVAL;
+ }
+
+ if (test_bit(pnum, ubi->corrupt))
+ goto find_other;
+
+ /*
+ * Lets try to read that block
+ */
+ vh = ubi->blockinfo + pnum;
+
+ if (!test_bit(pnum, ubi->scanned)) {
+ ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
+ lnum, pnum);
+ if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
+ goto find_other;
+ }
+
+ /*
+ * Check, if the total number of blocks is correct
+ */
+ if (be32_to_cpu(vh->used_ebs) != last) {
+ ubi_dbg("Block count missmatch.");
+ ubi_dbg("vh->used_ebs: %d nrblocks: %d",
+ be32_to_cpu(vh->used_ebs), last);
+ generic_set_bit(pnum, ubi->corrupt);
+ goto find_other;
+ }
+
+ /*
+ * Get the data length of this block.
+ */
+ dlen = be32_to_cpu(vh->data_size);
+
+ /*
+ * Read the data into RAM. We ignore the return value
+ * here as the only thing which might go wrong are
+ * bitflips. Try nevertheless.
+ */
+ ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
+
+ /* Calculate CRC over the data */
+ crc = crc32(UBI_CRC32_INIT, laddr, dlen);
+
+ if (crc != be32_to_cpu(vh->data_crc)) {
+ ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
+ lnum, pnum);
+ generic_set_bit(pnum, ubi->corrupt);
+ goto find_other;
+ }
+
+ /* We are good. Return the data length we read */
+ return dlen;
+
+find_other:
+ ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
+ generic_clear_bit(lnum, vi->found);
+ vrepl = NULL;
+
+ for (pnum = 0; pnum < ubi->peb_count; pnum++) {
+ struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
+ u32 t_vol_id = be32_to_cpu(tmp->vol_id);
+ u32 t_lnum = be32_to_cpu(tmp->lnum);
+
+ if (test_bit(pnum, ubi->corrupt))
+ continue;
+
+ if (t_vol_id != vol_id || t_lnum != lnum)
+ continue;
+
+ if (!test_bit(pnum, ubi->scanned)) {
+ ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
+ vol_id, lnum, pnum);
+ if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
+ continue;
+ }
+
+ /*
+ * We found one. If its the first, assign it otherwise
+ * compare the sqnum
+ */
+ generic_set_bit(lnum, vi->found);
+
+ if (!vrepl) {
+ vrepl = tmp;
+ continue;
+ }
+
+ if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
+ vrepl = tmp;
+ }
+
+ if (vrepl) {
+ /* Update the vi table */
+ pnum = vrepl - ubi->blockinfo;
+ vi->lebs_to_pebs[lnum] = pnum;
+ ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
+ vh = vrepl;
+ }
+ goto retry;
+}
+
+/*
+ * Load a volume into RAM
+ */
+static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
+{
+ struct ubi_vol_info *vi;
+ u32 lnum, last, len;
+
+ if (vol_id >= UBI_SPL_VOL_IDS)
+ return -EINVAL;
+
+ len = 0;
+ vi = ubi->volinfo + vol_id;
+ last = vi->last_block + 1;
+
+ /* Read the blocks to RAM, check CRC */
+ for (lnum = 0 ; lnum < last; lnum++) {
+ int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
+
+ if (res < 0) {
+ ubi_warn("Failed to load volume %u", vol_id);
+ return res;
+ }
+ /* res is the data length of the read block */
+ laddr += res;
+ len += res;
+ }
+ return len;
+}
+
+int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
+ int nrvols)
+{
+ struct ubi_scan_info *ubi = info->ubi;
+ int res, i, fastmap = info->fastmap;
+ u32 fsize;
+
+retry:
+ /*
+ * We do a partial initializiation of @ubi. Cleaning fm_buf is
+ * not necessary.
+ */
+ memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
+
+ ubi->read = info->read;
+
+ /* Precalculate the offsets */
+ ubi->vid_offset = info->vid_offset;
+ ubi->leb_start = info->leb_start;
+ ubi->leb_size = info->peb_size - ubi->leb_start;
+ ubi->peb_count = info->peb_count;
+ ubi->peb_offset = info->peb_offset;
+
+#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+ ubi->vtbl_valid = 0;
+#endif
+
+ fsize = info->peb_size * info->peb_count;
+ ubi->fsize_mb = fsize >> 20;
+
+ /* Fastmap init */
+ ubi->fm_size = ubi_calc_fm_size(ubi);
+ ubi->fm_enabled = fastmap;
+
+ for (i = 0; i < nrvols; i++) {
+ struct ubispl_load *lv = lvols + i;
+
+ generic_set_bit(lv->vol_id, ubi->toload);
+ }
+
+ ipl_scan(ubi);
+
+ for (i = 0; i < nrvols; i++) {
+ struct ubispl_load *lv = lvols + i;
+
+#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+ if (lv->vol_id == -1) {
+ for (int j = 0; j < UBI_SPL_VOL_IDS; j++) {
+ int len = be16_to_cpu(ubi->vtbl[j].name_len);
+
+ if (strncmp(lv->name,
+ ubi->vtbl[j].name,
+ len) == 0) {
+ lv->vol_id = j;
+ break;
+ }
+ }
+ }
+ ubi_msg("Loading VolName %s (VolId #%d)", lv->name, lv->vol_id);
+#else
+ ubi_msg("Loading VolId #%d", lv->vol_id);
+#endif
+ res = ipl_load(ubi, lv->vol_id, lv->load_addr);
+ if (res < 0) {
+ if (fastmap) {
+ fastmap = 0;
+ goto retry;
+ }
+ ubi_warn("Failed");
+ return res;
+ }
+ }
+ return 0;
+}
diff --git a/roms/u-boot/drivers/mtd/ubispl/ubispl.h b/roms/u-boot/drivers/mtd/ubispl/ubispl.h
new file mode 100644
index 000000000..b7cb7fc94
--- /dev/null
+++ b/roms/u-boot/drivers/mtd/ubispl/ubispl.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL 2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
+ */
+
+#ifndef _UBOOT_MTD_UBISPL_H
+#define _UBOOT_MTD_UBISPL_H
+
+#include "../ubi/ubi-media.h"
+#include "ubi-wrapper.h"
+
+/*
+ * The maximum number of volume ids we scan. So you can load volume id
+ * 0 to (CONFIG_SPL_UBI_VOL_ID_MAX - 1)
+ */
+#define UBI_SPL_VOL_IDS CONFIG_SPL_UBI_VOL_IDS
+/*
+ * The size of the read buffer for the fastmap blocks. In theory up to
+ * UBI_FM_MAX_BLOCKS * CONFIG_SPL_MAX_PEB_SIZE. In practice today
+ * one or two blocks.
+ */
+#define UBI_FM_BUF_SIZE (UBI_FM_MAX_BLOCKS*CONFIG_SPL_UBI_MAX_PEB_SIZE)
+/*
+ * The size of the bitmaps for the attach/ scan
+ */
+#define UBI_FM_BM_SIZE ((CONFIG_SPL_UBI_MAX_PEBS / BITS_PER_LONG) + 1)
+/*
+ * The maximum number of logical erase blocks per loadable volume
+ */
+#define UBI_MAX_VOL_LEBS CONFIG_SPL_UBI_MAX_VOL_LEBS
+/*
+ * The bitmap size for the above to denote the found blocks inside the volume
+ */
+#define UBI_VOL_BM_SIZE ((UBI_MAX_VOL_LEBS / BITS_PER_LONG) + 1)
+
+/**
+ * struct ubi_vol_info - UBISPL internal volume represenation
+ * @last_block: The last block (highest LEB) found for this volume
+ * @found: Bitmap to mark found LEBS
+ * @lebs_to_pebs: LEB to PEB translation table
+ */
+struct ubi_vol_info {
+ u32 last_block;
+ unsigned long found[UBI_VOL_BM_SIZE];
+ u32 lebs_to_pebs[UBI_MAX_VOL_LEBS];
+};
+
+/**
+ * struct ubi_scan_info - UBISPL internal data for FM attach and full scan
+ *
+ * @read: Read function to access the flash provided by the caller
+ * @peb_count: Number of physical erase blocks in the UBI FLASH area
+ * aka MTD partition.
+ * @peb_offset: Offset of PEB0 in the UBI FLASH area (aka MTD partition)
+ * to the real start of the FLASH in erase blocks.
+ * @fsize_mb: Size of the scanned FLASH area in MB (stats only)
+ * @vid_offset: Offset from the start of a PEB to the VID header
+ * @leb_start: Offset from the start of a PEB to the data area
+ * @leb_size: Size of the data area
+ *
+ * @fastmap_pebs: Counter of PEBs "attached" by fastmap
+ * @fastmap_anchor: The anchor PEB of the fastmap
+ * @fm_sb: The fastmap super block data
+ * @fm_vh: The fastmap VID header
+ * @fm: Pointer to the fastmap layout
+ * @fm_layout: The fastmap layout itself
+ * @fm_pool: The pool of PEBs to scan at fastmap attach time
+ * @fm_wl_pool: The pool of PEBs scheduled for wearleveling
+ *
+ * @fm_enabled: Indicator whether fastmap attachment is enabled.
+ * @fm_used: Bitmap to indicate the PEBS covered by fastmap
+ * @scanned: Bitmap to indicate the PEBS of which the VID header
+ * hase been physically scanned.
+ * @corrupt: Bitmap to indicate corrupt blocks
+ * @toload: Bitmap to indicate the volumes which should be loaded
+ *
+ * @blockinfo: The vid headers of the scanned blocks
+ * @volinfo: The volume information of the interesting (toload)
+ * volumes
+ * @vtbl_corrupted: Flag to indicate status of volume table
+ * @vtbl: Volume table
+ *
+ * @fm_buf: The large fastmap attach buffer
+ */
+struct ubi_scan_info {
+ ubispl_read_flash read;
+ unsigned int fsize_mb;
+ unsigned int peb_count;
+ unsigned int peb_offset;
+
+ unsigned long vid_offset;
+ unsigned long leb_start;
+ unsigned long leb_size;
+
+ /* Fastmap: The upstream required fields */
+ int fastmap_pebs;
+ int fastmap_anchor;
+ size_t fm_size;
+ struct ubi_fm_sb fm_sb;
+ struct ubi_vid_hdr fm_vh;
+ struct ubi_fastmap_layout *fm;
+ struct ubi_fastmap_layout fm_layout;
+ struct ubi_fm_pool fm_pool;
+ struct ubi_fm_pool fm_wl_pool;
+
+ /* Fastmap: UBISPL specific data */
+ int fm_enabled;
+ unsigned long fm_used[UBI_FM_BM_SIZE];
+ unsigned long scanned[UBI_FM_BM_SIZE];
+ unsigned long corrupt[UBI_FM_BM_SIZE];
+ unsigned long toload[UBI_FM_BM_SIZE];
+
+ /* Data for storing the VID and volume information */
+ struct ubi_vol_info volinfo[UBI_SPL_VOL_IDS];
+ struct ubi_vid_hdr blockinfo[CONFIG_SPL_UBI_MAX_PEBS];
+
+#ifdef CONFIG_SPL_UBI_LOAD_BY_VOLNAME
+ /* Volume table */
+ int vtbl_valid;
+ struct ubi_vtbl_record vtbl[UBI_SPL_VOL_IDS];
+#endif
+ /* The large buffer for the fastmap */
+ uint8_t fm_buf[UBI_FM_BUF_SIZE];
+};
+
+#ifdef CFG_DEBUG
+#define ubi_dbg(fmt, ...) printf("UBI: debug:" fmt "\n", ##__VA_ARGS__)
+#else
+#define ubi_dbg(fmt, ...)
+#endif
+
+#ifdef CONFIG_UBI_SPL_SILENCE_MSG
+#define ubi_msg(fmt, ...)
+#else
+#define ubi_msg(fmt, ...) printf("UBI: " fmt "\n", ##__VA_ARGS__)
+#endif
+/* UBI warning messages */
+#define ubi_warn(fmt, ...) printf("UBI warning: " fmt "\n", ##__VA_ARGS__)
+/* UBI error messages */
+#define ubi_err(fmt, ...) printf("UBI error: " fmt "\n", ##__VA_ARGS__)
+
+#endif