aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/drivers/spi
diff options
context:
space:
mode:
authorAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
committerAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
commitaf1a266670d040d2f4083ff309d732d648afba2a (patch)
tree2fc46203448ddcc6f81546d379abfaeb323575e9 /roms/u-boot/drivers/spi
parente02cda008591317b1625707ff8e115a4841aa889 (diff)
Add submodule dependency filesHEADmaster
Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec
Diffstat (limited to 'roms/u-boot/drivers/spi')
-rw-r--r--roms/u-boot/drivers/spi/Kconfig484
-rw-r--r--roms/u-boot/drivers/spi/Makefile70
-rw-r--r--roms/u-boot/drivers/spi/altera_spi.c209
-rw-r--r--roms/u-boot/drivers/spi/atcspi200_spi.c421
-rw-r--r--roms/u-boot/drivers/spi/ath79_spi.c229
-rw-r--r--roms/u-boot/drivers/spi/atmel-quadspi.c629
-rw-r--r--roms/u-boot/drivers/spi/atmel_spi.c394
-rw-r--r--roms/u-boot/drivers/spi/atmel_spi.h86
-rw-r--r--roms/u-boot/drivers/spi/bcm63xx_hsspi.c409
-rw-r--r--roms/u-boot/drivers/spi/bcm63xx_spi.c428
-rw-r--r--roms/u-boot/drivers/spi/bcmstb_spi.c440
-rw-r--r--roms/u-boot/drivers/spi/ca_sflash.c577
-rw-r--r--roms/u-boot/drivers/spi/cadence_qspi.c362
-rw-r--r--roms/u-boot/drivers/spi/cadence_qspi.h87
-rw-r--r--roms/u-boot/drivers/spi/cadence_qspi_apb.c812
-rw-r--r--roms/u-boot/drivers/spi/cf_spi.c461
-rw-r--r--roms/u-boot/drivers/spi/davinci_spi.c429
-rw-r--r--roms/u-boot/drivers/spi/designware_spi.c782
-rw-r--r--roms/u-boot/drivers/spi/exynos_spi.c434
-rw-r--r--roms/u-boot/drivers/spi/fsl_dspi.c671
-rw-r--r--roms/u-boot/drivers/spi/fsl_espi.c585
-rw-r--r--roms/u-boot/drivers/spi/fsl_qspi.c885
-rw-r--r--roms/u-boot/drivers/spi/ich.c1003
-rw-r--r--roms/u-boot/drivers/spi/ich.h244
-rw-r--r--roms/u-boot/drivers/spi/kirkwood_spi.c359
-rw-r--r--roms/u-boot/drivers/spi/meson_spifc.c322
-rw-r--r--roms/u-boot/drivers/spi/mpc8xx_spi.c217
-rw-r--r--roms/u-boot/drivers/spi/mpc8xxx_spi.c285
-rw-r--r--roms/u-boot/drivers/spi/mscc_bb_spi.c237
-rw-r--r--roms/u-boot/drivers/spi/mt7620_spi.c281
-rw-r--r--roms/u-boot/drivers/spi/mt7621_spi.c309
-rw-r--r--roms/u-boot/drivers/spi/mtk_snfi_spi.c318
-rw-r--r--roms/u-boot/drivers/spi/mtk_snor.c563
-rw-r--r--roms/u-boot/drivers/spi/mvebu_a3700_spi.c332
-rw-r--r--roms/u-boot/drivers/spi/mxc_spi.c703
-rw-r--r--roms/u-boot/drivers/spi/mxs_spi.c496
-rw-r--r--roms/u-boot/drivers/spi/nxp_fspi.c1008
-rw-r--r--roms/u-boot/drivers/spi/octeon_spi.c616
-rw-r--r--roms/u-boot/drivers/spi/omap3_spi.c527
-rw-r--r--roms/u-boot/drivers/spi/pic32_spi.c450
-rw-r--r--roms/u-boot/drivers/spi/pl022_spi.c326
-rw-r--r--roms/u-boot/drivers/spi/renesas_rpc_spi.c472
-rw-r--r--roms/u-boot/drivers/spi/rk_spi.c568
-rw-r--r--roms/u-boot/drivers/spi/rk_spi.h130
-rw-r--r--roms/u-boot/drivers/spi/sandbox_spi.c179
-rw-r--r--roms/u-boot/drivers/spi/sh_qspi.c360
-rw-r--r--roms/u-boot/drivers/spi/soft_spi.c288
-rw-r--r--roms/u-boot/drivers/spi/spi-emul-uclass.c14
-rw-r--r--roms/u-boot/drivers/spi/spi-mem-nodm.c107
-rw-r--r--roms/u-boot/drivers/spi/spi-mem.c537
-rw-r--r--roms/u-boot/drivers/spi/spi-qup.c803
-rw-r--r--roms/u-boot/drivers/spi/spi-sifive.c480
-rw-r--r--roms/u-boot/drivers/spi/spi-sunxi.c638
-rw-r--r--roms/u-boot/drivers/spi/spi-uclass.c555
-rw-r--r--roms/u-boot/drivers/spi/spi.c40
-rw-r--r--roms/u-boot/drivers/spi/stm32_qspi.c552
-rw-r--r--roms/u-boot/drivers/spi/stm32_spi.c624
-rw-r--r--roms/u-boot/drivers/spi/tegra114_spi.c400
-rw-r--r--roms/u-boot/drivers/spi/tegra20_sflash.c361
-rw-r--r--roms/u-boot/drivers/spi/tegra20_slink.c380
-rw-r--r--roms/u-boot/drivers/spi/tegra210_qspi.c431
-rw-r--r--roms/u-boot/drivers/spi/tegra_spi.h11
-rw-r--r--roms/u-boot/drivers/spi/ti_qspi.c511
-rw-r--r--roms/u-boot/drivers/spi/uniphier_spi.c419
-rw-r--r--roms/u-boot/drivers/spi/xilinx_spi.c367
-rw-r--r--roms/u-boot/drivers/spi/zynq_qspi.c674
-rw-r--r--roms/u-boot/drivers/spi/zynq_spi.c379
-rw-r--r--roms/u-boot/drivers/spi/zynqmp_gqspi.c721
68 files changed, 29481 insertions, 0 deletions
diff --git a/roms/u-boot/drivers/spi/Kconfig b/roms/u-boot/drivers/spi/Kconfig
new file mode 100644
index 000000000..1494c9176
--- /dev/null
+++ b/roms/u-boot/drivers/spi/Kconfig
@@ -0,0 +1,484 @@
+menuconfig SPI
+ bool "SPI Support"
+ help
+ The "Serial Peripheral Interface" is a low level synchronous
+ protocol. Chips that support SPI can have data transfer rates
+ up to several tens of Mbit/sec. Chips are addressed with a
+ controller and a chipselect. Most SPI slaves don't support
+ dynamic device discovery; some are even write-only or read-only.
+
+ SPI is widely used by microcontrollers to talk with sensors,
+ eeprom and flash memory, codecs and various other controller
+ chips, analog to digital (and d-to-a) converters, and more.
+ MMC and SD cards can be accessed using SPI protocol; and for
+ DataFlash cards used in MMC sockets, SPI must always be used.
+
+ SPI is one of a family of similar protocols using a four wire
+ interface (select, clock, data in, data out) including Microwire
+ (half duplex), SSP, SSI, and PSP. This driver framework should
+ work with most such devices and controllers.
+
+if SPI
+
+config DM_SPI
+ bool "Enable Driver Model for SPI drivers"
+ depends on DM
+ help
+ Enable driver model for SPI. The SPI slave interface
+ (spi_setup_slave(), spi_xfer(), etc.) is then implemented by
+ the SPI uclass. Drivers provide methods to access the SPI
+ buses that they control. The uclass interface is defined in
+ include/spi.h. The existing spi_slave structure is attached
+ as 'parent data' to every slave on each bus. Slaves
+ typically use driver-private data instead of extending the
+ spi_slave structure.
+
+config SPI_MEM
+ bool "SPI memory extension"
+ help
+ Enable this option if you want to enable the SPI memory extension.
+ This extension is meant to simplify interaction with SPI memories
+ by providing an high-level interface to send memory-like commands.
+
+if DM_SPI
+
+config ALTERA_SPI
+ bool "Altera SPI driver"
+ help
+ Enable the Altera SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this Altera
+ IP core. Please find details on the "Embedded Peripherals IP
+ User Guide" of Altera.
+
+config ATCSPI200_SPI
+ bool "Andestech ATCSPI200 SPI driver"
+ help
+ Enable the Andestech ATCSPI200 SPI driver. This driver can be
+ used to access the SPI flash on AE3XX and AE250 platforms embedding
+ this Andestech IP core.
+
+config ATH79_SPI
+ bool "Atheros SPI driver"
+ depends on ARCH_ATH79
+ help
+ Enable the Atheros ar7xxx/ar9xxx SoC SPI driver, it was used
+ to access SPI NOR flash and other SPI peripherals. This driver
+ uses driver model and requires a device tree binding to operate.
+ please refer to doc/device-tree-bindings/spi/spi-ath79.txt.
+
+config ATMEL_QSPI
+ bool "Atmel Quad SPI Controller"
+ depends on ARCH_AT91
+ help
+ Enable the Atmel Quad SPI controller in master mode. This driver
+ does not support generic SPI. The implementation supports only the
+ spi-mem interface.
+
+config ATMEL_SPI
+ bool "Atmel SPI driver"
+ default y if ARCH_AT91
+ help
+ This enables driver for the Atmel SPI Controller, present on
+ many AT91 (ARM) chips. This driver can be used to access
+ the SPI Flash, such as AT25DF321.
+
+config BCM63XX_HSSPI
+ bool "BCM63XX HSSPI driver"
+ depends on (ARCH_BMIPS || ARCH_BCM68360 || \
+ ARCH_BCM6858 || ARCH_BCM63158)
+ help
+ Enable the BCM6328 HSSPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this Broadcom
+ SPI core.
+
+config BCM63XX_SPI
+ bool "BCM6348 SPI driver"
+ depends on ARCH_BMIPS
+ help
+ Enable the BCM6348/BCM6358 SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding these Broadcom
+ SPI cores.
+
+config BCMSTB_SPI
+ bool "BCMSTB SPI driver"
+ help
+ Enable the Broadcom set-top box SPI driver. This driver can
+ be used to access the SPI flash on platforms embedding this
+ Broadcom SPI core.
+
+config CORTINA_SFLASH
+ bool "Cortina-Access Serial Flash controller driver"
+ depends on DM_SPI && SPI_MEM
+ help
+ Enable the Cortina-Access Serial Flash controller driver. This driver
+ can be used to access the SPI NOR/NAND flash on platforms embedding this
+ Cortina-Access IP core.
+
+config CADENCE_QSPI
+ bool "Cadence QSPI driver"
+ help
+ Enable the Cadence Quad-SPI (QSPI) driver. This driver can be
+ used to access the SPI NOR flash on platforms embedding this
+ Cadence IP core.
+
+config CF_SPI
+ bool "ColdFire SPI driver"
+ help
+ Enable the ColdFire SPI driver. This driver can be used on
+ some m68k SoCs.
+
+config DAVINCI_SPI
+ bool "Davinci & Keystone SPI driver"
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
+ help
+ Enable the Davinci SPI driver
+
+config DESIGNWARE_SPI
+ bool "Designware SPI driver"
+ help
+ Enable the Designware SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this Designware
+ IP core.
+
+config EXYNOS_SPI
+ bool "Samsung Exynos SPI driver"
+ help
+ Enable the Samsung Exynos SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this Samsung
+ Exynos IP core.
+
+config FSL_DSPI
+ bool "Freescale DSPI driver"
+ help
+ Enable the Freescale DSPI driver. This driver can be used to
+ access the SPI NOR flash and SPI Data flash on platforms embedding
+ this Freescale DSPI IP core. LS102xA and Colibri VF50/VF61 platforms
+ use this driver.
+
+config FSL_QSPI
+ bool "Freescale QSPI driver"
+ imply SPI_FLASH_BAR
+ help
+ Enable the Freescale Quad-SPI (QSPI) driver. This driver can be
+ used to access the SPI NOR flash on platforms embedding this
+ Freescale IP core.
+
+config FSL_QSPI_AHB_FULL_MAP
+ bool "Use full AHB memory map space"
+ depends on FSL_QSPI
+ default y if ARCH_MX6 || ARCH_MX7 || ARCH_MX7ULP || ARCH_IMX8M
+ help
+ Enable the Freescale QSPI driver to use full AHB memory map space for
+ flash access.
+
+config ICH_SPI
+ bool "Intel ICH SPI driver"
+ help
+ Enable the Intel ICH SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this Intel
+ ICH IP core.
+
+config KIRKWOOD_SPI
+ bool "Marvell Kirkwood SPI Driver"
+ help
+ Enable support for SPI on various Marvell SoCs, such as
+ Kirkwood and Armada 375.
+
+config MESON_SPIFC
+ bool "Amlogic Meson SPI Flash Controller driver"
+ depends on ARCH_MESON
+ help
+ Enable the Amlogic Meson SPI Flash Controller SPIFC) driver.
+ This driver can be used to access the SPI NOR flash chips on
+ Amlogic Meson SoCs.
+
+config MPC8XX_SPI
+ bool "MPC8XX SPI Driver"
+ depends on MPC8xx
+ help
+ Enable support for SPI on MPC8XX
+
+config MPC8XXX_SPI
+ bool "MPC8XXX SPI Driver"
+ help
+ Enable support for SPI on the MPC8XXX PowerPC SoCs.
+
+config MSCC_BB_SPI
+ bool "MSCC bitbang SPI driver"
+ depends on SOC_VCOREIII
+ help
+ Enable MSCC bitbang SPI driver. This driver can be used on
+ MSCC SOCs.
+
+config MT7620_SPI
+ bool "MediaTek MT7620 SPI driver"
+ depends on SOC_MT7620
+ help
+ Enable the MT7620 SPI driver. This driver can be used to access
+ generic SPI devices on MediaTek MT7620 SoC.
+
+config MT7621_SPI
+ bool "MediaTek MT7621 SPI driver"
+ depends on SOC_MT7628
+ help
+ Enable the MT7621 SPI driver. This driver can be used to access
+ the SPI NOR flash on platforms embedding this Ralink / MediaTek
+ SPI core, like MT7621/7628/7688.
+
+config MTK_SNOR
+ bool "Mediatek SPI-NOR controller driver"
+ depends on SPI_MEM
+ help
+ Enable the Mediatek SPINOR controller driver. This driver has
+ better read/write performance with NOR.
+
+config MTK_SNFI_SPI
+ bool "Mediatek SPI memory controller driver"
+ depends on SPI_MEM
+ help
+ Enable the Mediatek SPI memory controller driver. This driver is
+ originally based on the MediaTek SNFI IP core. It can only be
+ used to access SPI memory devices like SPI-NOR or SPI-NAND on
+ platforms embedding this IP core, like MT7622/M7629.
+
+config MVEBU_A3700_SPI
+ bool "Marvell Armada 3700 SPI driver"
+ select CLK_ARMADA_3720
+ help
+ Enable the Marvell Armada 3700 SPI driver. This driver can be
+ used to access the SPI NOR flash on platforms embedding this
+ Marvell IP core.
+
+config MXS_SPI
+ bool "MXS SPI Driver"
+ help
+ Enable the MXS SPI controller driver. This driver can be used
+ on the i.MX23 and i.MX28 SoCs.
+
+config NXP_FSPI
+ bool "NXP FlexSPI driver"
+ depends on SPI_MEM
+ help
+ Enable the NXP FlexSPI (FSPI) driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this NXP IP core.
+
+config OCTEON_SPI
+ bool "Octeon SPI driver"
+ depends on DM_PCI && (ARCH_OCTEON || ARCH_OCTEONTX || ARCH_OCTEONTX2)
+ help
+ Enable the Octeon SPI driver. This driver can be used to
+ access the SPI NOR flash on Octeon II/III and OcteonTX/TX2
+ SoC platforms.
+
+config OMAP3_SPI
+ bool "McSPI driver for OMAP"
+ help
+ SPI master controller for OMAP24XX and later Multichannel SPI
+ (McSPI). This driver be used to access SPI chips on platforms
+ embedding this OMAP3 McSPI IP core.
+
+config PIC32_SPI
+ bool "Microchip PIC32 SPI driver"
+ depends on MACH_PIC32
+ help
+ Enable the Microchip PIC32 SPI driver. This driver can be used
+ to access the SPI NOR flash, MMC-over-SPI on platforms based on
+ Microchip PIC32 family devices.
+
+config PL022_SPI
+ bool "ARM AMBA PL022 SSP controller driver"
+ depends on ARM
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP
+ controller. If you have an embedded system with an AMBA(R)
+ bus and a PL022 controller, say Y or M here.
+
+config SPI_QUP
+ bool "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_IPQ40XX
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+config RENESAS_RPC_SPI
+ bool "Renesas RPC SPI driver"
+ depends on RCAR_GEN3 || RZA1
+ imply SPI_FLASH_BAR
+ help
+ Enable the Renesas RPC SPI driver, used to access SPI NOR flash
+ on Renesas RCar Gen3 SoCs. This uses driver model and requires a
+ device tree binding to operate.
+
+config ROCKCHIP_SPI
+ bool "Rockchip SPI driver"
+ help
+ Enable the Rockchip SPI driver, used to access SPI NOR flash and
+ other SPI peripherals (such as the Chrome OS EC) on Rockchip SoCs.
+ This uses driver model and requires a device tree binding to
+ operate.
+
+config SANDBOX_SPI
+ bool "Sandbox SPI driver"
+ depends on SANDBOX && DM
+ help
+ Enable SPI support for sandbox. This is an emulation of a real SPI
+ bus. Devices can be attached to the bus using the device tree
+ which specifies the driver to use. As an example, see this device
+ tree fragment from sandbox.dts. It shows that the SPI bus has a
+ single flash device on chip select 0 which is emulated by the driver
+ for "sandbox,spi-flash", which is in drivers/mtd/spi/sandbox.c.
+
+ spi@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ compatible = "sandbox,spi";
+ cs-gpios = <0>, <&gpio_a 0>;
+ flash@0 {
+ reg = <0>;
+ compatible = "spansion,m25p16", "jedec,spi-nor";
+ spi-max-frequency = <40000000>;
+ sandbox,filename = "spi.bin";
+ };
+ };
+
+config SPI_SIFIVE
+ bool "SiFive SPI driver"
+ help
+ This driver supports the SiFive SPI IP. If unsure say N.
+ Enable the SiFive SPI controller driver.
+
+ The SiFive SPI controller driver is found on various SiFive SoCs.
+
+config SOFT_SPI
+ bool "Soft SPI driver"
+ help
+ Enable Soft SPI driver. This driver is to use GPIO simulate
+ the SPI protocol.
+
+config SPI_SUNXI
+ bool "Allwinner SoC SPI controllers"
+ default ARCH_SUNXI
+ help
+ Enable the Allwinner SoC SPi controller driver.
+
+ Same controller driver can reuse in all Allwinner SoC variants.
+
+config STM32_QSPI
+ bool "STM32F7 QSPI driver"
+ depends on STM32F4 || STM32F7 || ARCH_STM32MP
+ help
+ Enable the STM32F7 Quad-SPI (QSPI) driver. This driver can be
+ used to access the SPI NOR flash chips on platforms embedding
+ this ST IP core.
+
+config STM32_SPI
+ bool "STM32 SPI driver"
+ depends on ARCH_STM32MP
+ help
+ Enable the STM32 Serial Peripheral Interface (SPI) driver for STM32MP
+ SoCs. This uses driver model and requires a device tree binding to
+ operate.
+
+config TEGRA114_SPI
+ bool "nVidia Tegra114 SPI driver"
+ help
+ Enable the nVidia Tegra114 SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this nVidia Tegra114
+ IP core.
+
+ This controller is different than the older SoCs SPI controller and
+ also register interface get changed with this controller.
+
+config TEGRA20_SFLASH
+ bool "nVidia Tegra20 Serial Flash controller driver"
+ help
+ Enable the nVidia Tegra20 Serial Flash controller driver. This driver
+ can be used to access the SPI NOR flash on platforms embedding this
+ nVidia Tegra20 IP core.
+
+config TEGRA20_SLINK
+ bool "nVidia Tegra20/Tegra30 SLINK driver"
+ help
+ Enable the nVidia Tegra20/Tegra30 SLINK driver. This driver can
+ be used to access the SPI NOR flash on platforms embedding this
+ nVidia Tegra20/Tegra30 IP cores.
+
+config TEGRA210_QSPI
+ bool "nVidia Tegra210 QSPI driver"
+ help
+ Enable the Tegra Quad-SPI (QSPI) driver for T210. This driver
+ be used to access SPI chips on platforms embedding this
+ NVIDIA Tegra210 IP core.
+
+config TI_QSPI
+ bool "TI QSPI driver"
+ imply TI_EDMA3
+ help
+ Enable the TI Quad-SPI (QSPI) driver for DRA7xx and AM43xx evms.
+ This driver support spi flash single, quad and memory reads.
+
+config UNIPHIER_SPI
+ bool "Socionext UniPhier SPI driver"
+ depends on ARCH_UNIPHIER
+ help
+ Enable the Socionext UniPhier SPI driver. This driver can
+ be used to access SPI chips on platforms embedding this
+ UniPhier IP core.
+
+config XILINX_SPI
+ bool "Xilinx SPI driver"
+ help
+ Enable the Xilinx SPI driver from the Xilinx EDK. This SPI
+ controller support 8 bit SPI transfers only, with or w/o FIFO.
+ For more info on Xilinx SPI Register Definitions and Overview
+ see driver file - drivers/spi/xilinx_spi.c
+
+config ZYNQ_SPI
+ bool "Zynq SPI driver"
+ help
+ Enable the Zynq SPI driver. This driver can be used to
+ access the SPI NOR flash on platforms embedding this Zynq
+ SPI IP core.
+
+config ZYNQ_QSPI
+ bool "Zynq QSPI driver"
+ imply SPI_FLASH_BAR
+ help
+ Enable the Zynq Quad-SPI (QSPI) driver. This driver can be
+ used to access the SPI NOR flash on platforms embedding this
+ Zynq QSPI IP core. This IP is used to connect the flash in
+ 4-bit qspi, 8-bit dual stacked and shared 4-bit dual parallel.
+
+config ZYNQMP_GQSPI
+ bool "Configure ZynqMP Generic QSPI"
+ help
+ This option is used to enable ZynqMP QSPI controller driver which
+ is used to communicate with qspi flash devices.
+
+endif # if DM_SPI
+
+config FSL_ESPI
+ bool "Freescale eSPI driver"
+ imply SPI_FLASH_BAR
+ help
+ Enable the Freescale eSPI driver. This driver can be used to
+ access the SPI interface and SPI NOR flash on platforms embedding
+ this Freescale eSPI IP core.
+
+config SH_QSPI
+ bool "Renesas Quad SPI driver"
+ help
+ Enable the Renesas Quad SPI controller driver. This driver can be
+ used on Renesas SoCs.
+
+config MXC_SPI
+ bool "MXC SPI Driver"
+ help
+ Enable the MXC SPI controller driver. This driver can be used
+ on various i.MX SoCs such as i.MX31/35/51/6/7.
+
+endif # menu "SPI Support"
diff --git a/roms/u-boot/drivers/spi/Makefile b/roms/u-boot/drivers/spi/Makefile
new file mode 100644
index 000000000..cfe4fae1d
--- /dev/null
+++ b/roms/u-boot/drivers/spi/Makefile
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# (C) Copyright 2000-2007
+# Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+
+# There are many options which enable SPI, so make this library available
+ifdef CONFIG_$(SPL_TPL_)DM_SPI
+obj-y += spi-uclass.o
+obj-$(CONFIG_CADENCE_QSPI) += cadence_qspi.o cadence_qspi_apb.o
+obj-$(CONFIG_SANDBOX) += spi-emul-uclass.o
+obj-$(CONFIG_SOFT_SPI) += soft_spi.o
+obj-$(CONFIG_SPI_MEM) += spi-mem.o
+obj-$(CONFIG_TI_QSPI) += ti_qspi.o
+else
+obj-y += spi.o
+obj-$(CONFIG_SPI_MEM) += spi-mem-nodm.o
+endif
+
+obj-$(CONFIG_ALTERA_SPI) += altera_spi.o
+obj-$(CONFIG_ATH79_SPI) += ath79_spi.o
+obj-$(CONFIG_ATMEL_QSPI) += atmel-quadspi.o
+obj-$(CONFIG_ATMEL_SPI) += atmel_spi.o
+obj-$(CONFIG_BCM63XX_HSSPI) += bcm63xx_hsspi.o
+obj-$(CONFIG_BCM63XX_SPI) += bcm63xx_spi.o
+obj-$(CONFIG_BCMSTB_SPI) += bcmstb_spi.o
+obj-$(CONFIG_CF_SPI) += cf_spi.o
+obj-$(CONFIG_CORTINA_SFLASH) += ca_sflash.o
+obj-$(CONFIG_DAVINCI_SPI) += davinci_spi.o
+obj-$(CONFIG_DESIGNWARE_SPI) += designware_spi.o
+obj-$(CONFIG_EXYNOS_SPI) += exynos_spi.o
+obj-$(CONFIG_FSL_DSPI) += fsl_dspi.o
+obj-$(CONFIG_FSL_ESPI) += fsl_espi.o
+obj-$(CONFIG_FSL_QSPI) += fsl_qspi.o
+obj-$(CONFIG_ICH_SPI) += ich.o
+obj-$(CONFIG_KIRKWOOD_SPI) += kirkwood_spi.o
+obj-$(CONFIG_MESON_SPIFC) += meson_spifc.o
+obj-$(CONFIG_MPC8XX_SPI) += mpc8xx_spi.o
+obj-$(CONFIG_MPC8XXX_SPI) += mpc8xxx_spi.o
+obj-$(CONFIG_MTK_SNFI_SPI) += mtk_snfi_spi.o
+obj-$(CONFIG_MTK_SNOR) += mtk_snor.o
+obj-$(CONFIG_MT7620_SPI) += mt7620_spi.o
+obj-$(CONFIG_MT7621_SPI) += mt7621_spi.o
+obj-$(CONFIG_MSCC_BB_SPI) += mscc_bb_spi.o
+obj-$(CONFIG_MVEBU_A3700_SPI) += mvebu_a3700_spi.o
+obj-$(CONFIG_MXC_SPI) += mxc_spi.o
+obj-$(CONFIG_MXS_SPI) += mxs_spi.o
+obj-$(CONFIG_NXP_FSPI) += nxp_fspi.o
+obj-$(CONFIG_ATCSPI200_SPI) += atcspi200_spi.o
+obj-$(CONFIG_OCTEON_SPI) += octeon_spi.o
+obj-$(CONFIG_OMAP3_SPI) += omap3_spi.o
+obj-$(CONFIG_PIC32_SPI) += pic32_spi.o
+obj-$(CONFIG_PL022_SPI) += pl022_spi.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
+obj-$(CONFIG_RENESAS_RPC_SPI) += renesas_rpc_spi.o
+obj-$(CONFIG_ROCKCHIP_SPI) += rk_spi.o
+obj-$(CONFIG_SANDBOX_SPI) += sandbox_spi.o
+obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
+obj-$(CONFIG_SPI_SUNXI) += spi-sunxi.o
+obj-$(CONFIG_SH_QSPI) += sh_qspi.o
+obj-$(CONFIG_STM32_QSPI) += stm32_qspi.o
+obj-$(CONFIG_STM32_SPI) += stm32_spi.o
+obj-$(CONFIG_TEGRA114_SPI) += tegra114_spi.o
+obj-$(CONFIG_TEGRA20_SFLASH) += tegra20_sflash.o
+obj-$(CONFIG_TEGRA20_SLINK) += tegra20_slink.o
+obj-$(CONFIG_TEGRA210_QSPI) += tegra210_qspi.o
+obj-$(CONFIG_UNIPHIER_SPI) += uniphier_spi.o
+obj-$(CONFIG_XILINX_SPI) += xilinx_spi.o
+obj-$(CONFIG_ZYNQ_SPI) += zynq_spi.o
+obj-$(CONFIG_ZYNQ_QSPI) += zynq_qspi.o
+obj-$(CONFIG_ZYNQMP_GQSPI) += zynqmp_gqspi.o
diff --git a/roms/u-boot/drivers/spi/altera_spi.c b/roms/u-boot/drivers/spi/altera_spi.c
new file mode 100644
index 000000000..fadc9f396
--- /dev/null
+++ b/roms/u-boot/drivers/spi/altera_spi.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Altera SPI driver
+ *
+ * based on bfin_spi.c
+ * Copyright (c) 2005-2008 Analog Devices Inc.
+ * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw>
+ */
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <fdtdec.h>
+#include <spi.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+
+#define ALTERA_SPI_STATUS_RRDY_MSK BIT(7)
+#define ALTERA_SPI_CONTROL_SSO_MSK BIT(10)
+
+#ifndef CONFIG_ALTERA_SPI_IDLE_VAL
+#define CONFIG_ALTERA_SPI_IDLE_VAL 0xff
+#endif
+
+struct altera_spi_regs {
+ u32 rxdata;
+ u32 txdata;
+ u32 status;
+ u32 control;
+ u32 _reserved;
+ u32 slave_sel;
+};
+
+struct altera_spi_plat {
+ struct altera_spi_regs *regs;
+};
+
+struct altera_spi_priv {
+ struct altera_spi_regs *regs;
+};
+
+static void spi_cs_activate(struct udevice *dev, uint cs)
+{
+ struct udevice *bus = dev->parent;
+ struct altera_spi_priv *priv = dev_get_priv(bus);
+ struct altera_spi_regs *const regs = priv->regs;
+
+ writel(1 << cs, &regs->slave_sel);
+ writel(ALTERA_SPI_CONTROL_SSO_MSK, &regs->control);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct altera_spi_priv *priv = dev_get_priv(bus);
+ struct altera_spi_regs *const regs = priv->regs;
+
+ writel(0, &regs->control);
+ writel(0, &regs->slave_sel);
+}
+
+static int altera_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct altera_spi_priv *priv = dev_get_priv(bus);
+ struct altera_spi_regs *const regs = priv->regs;
+
+ writel(0, &regs->control);
+ writel(0, &regs->slave_sel);
+
+ return 0;
+}
+
+static int altera_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct altera_spi_priv *priv = dev_get_priv(bus);
+ struct altera_spi_regs *const regs = priv->regs;
+
+ writel(0, &regs->slave_sel);
+
+ return 0;
+}
+
+static int altera_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct altera_spi_priv *priv = dev_get_priv(bus);
+ struct altera_spi_regs *const regs = priv->regs;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ /* assume spi core configured to do 8 bit transfers */
+ unsigned int bytes = bitlen / 8;
+ const unsigned char *txp = dout;
+ unsigned char *rxp = din;
+ uint32_t reg, data, start;
+
+ debug("%s: bus:%i cs:%i bitlen:%i bytes:%i flags:%lx\n", __func__,
+ dev_seq(bus), slave_plat->cs, bitlen, bytes, flags);
+
+ if (bitlen == 0)
+ goto done;
+
+ if (bitlen % 8) {
+ flags |= SPI_XFER_END;
+ goto done;
+ }
+
+ /* empty read buffer */
+ if (readl(&regs->status) & ALTERA_SPI_STATUS_RRDY_MSK)
+ readl(&regs->rxdata);
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev, slave_plat->cs);
+
+ while (bytes--) {
+ if (txp)
+ data = *txp++;
+ else
+ data = CONFIG_ALTERA_SPI_IDLE_VAL;
+
+ debug("%s: tx:%x ", __func__, data);
+ writel(data, &regs->txdata);
+
+ start = get_timer(0);
+ while (1) {
+ reg = readl(&regs->status);
+ if (reg & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
+ if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
+ debug("%s: Transmission timed out!\n", __func__);
+ return -1;
+ }
+ }
+
+ data = readl(&regs->rxdata);
+ if (rxp)
+ *rxp++ = data & 0xff;
+
+ debug("rx:%x\n", data);
+ }
+
+done:
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ return 0;
+}
+
+static int altera_spi_set_speed(struct udevice *bus, uint speed)
+{
+ return 0;
+}
+
+static int altera_spi_set_mode(struct udevice *bus, uint mode)
+{
+ return 0;
+}
+
+static int altera_spi_probe(struct udevice *bus)
+{
+ struct altera_spi_plat *plat = dev_get_plat(bus);
+ struct altera_spi_priv *priv = dev_get_priv(bus);
+
+ priv->regs = plat->regs;
+
+ return 0;
+}
+
+static int altera_spi_of_to_plat(struct udevice *bus)
+{
+ struct altera_spi_plat *plat = dev_get_plat(bus);
+
+ plat->regs = map_physmem(dev_read_addr(bus),
+ sizeof(struct altera_spi_regs),
+ MAP_NOCACHE);
+
+ return 0;
+}
+
+static const struct dm_spi_ops altera_spi_ops = {
+ .claim_bus = altera_spi_claim_bus,
+ .release_bus = altera_spi_release_bus,
+ .xfer = altera_spi_xfer,
+ .set_speed = altera_spi_set_speed,
+ .set_mode = altera_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id altera_spi_ids[] = {
+ { .compatible = "altr,spi-1.0" },
+ {}
+};
+
+U_BOOT_DRIVER(altera_spi) = {
+ .name = "altera_spi",
+ .id = UCLASS_SPI,
+ .of_match = altera_spi_ids,
+ .ops = &altera_spi_ops,
+ .of_to_plat = altera_spi_of_to_plat,
+ .plat_auto = sizeof(struct altera_spi_plat),
+ .priv_auto = sizeof(struct altera_spi_priv),
+ .probe = altera_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/atcspi200_spi.c b/roms/u-boot/drivers/spi/atcspi200_spi.c
new file mode 100644
index 000000000..775b9ffc2
--- /dev/null
+++ b/roms/u-boot/drivers/spi/atcspi200_spi.c
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Andestech ATCSPI200 SPI controller driver.
+ *
+ * Copyright 2017 Andes Technology, Inc.
+ * Author: Rick Chen (rick@andestech.com)
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <dm.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define MAX_TRANSFER_LEN 512
+#define CHUNK_SIZE 1
+#define SPI_TIMEOUT 0x100000
+#define SPI0_BUS 0
+#define SPI1_BUS 1
+#define SPI0_BASE 0xf0b00000
+#define SPI1_BASE 0xf0f00000
+#define NSPI_MAX_CS_NUM 1
+
+struct atcspi200_spi_regs {
+ u32 rev;
+ u32 reserve1[3];
+ u32 format; /* 0x10 */
+#define DATA_LENGTH(x) ((x-1)<<8)
+ u32 pio;
+ u32 reserve2[2];
+ u32 tctrl; /* 0x20 */
+#define TRAMODE_OFFSET 24
+#define TRAMODE_MASK (0x0F<<TRAMODE_OFFSET)
+#define TRAMODE_WR_SYNC (0<<TRAMODE_OFFSET)
+#define TRAMODE_WO (1<<TRAMODE_OFFSET)
+#define TRAMODE_RO (2<<TRAMODE_OFFSET)
+#define TRAMODE_WR (3<<TRAMODE_OFFSET)
+#define TRAMODE_RW (4<<TRAMODE_OFFSET)
+#define TRAMODE_WDR (5<<TRAMODE_OFFSET)
+#define TRAMODE_RDW (6<<TRAMODE_OFFSET)
+#define TRAMODE_NONE (7<<TRAMODE_OFFSET)
+#define TRAMODE_DW (8<<TRAMODE_OFFSET)
+#define TRAMODE_DR (9<<TRAMODE_OFFSET)
+#define WCNT_OFFSET 12
+#define WCNT_MASK (0x1FF<<WCNT_OFFSET)
+#define RCNT_OFFSET 0
+#define RCNT_MASK (0x1FF<<RCNT_OFFSET)
+ u32 cmd;
+ u32 addr;
+ u32 data;
+ u32 ctrl; /* 0x30 */
+#define TXFTH_OFFSET 16
+#define RXFTH_OFFSET 8
+#define TXDMAEN (1<<4)
+#define RXDMAEN (1<<3)
+#define TXFRST (1<<2)
+#define RXFRST (1<<1)
+#define SPIRST (1<<0)
+ u32 status;
+#define TXFFL (1<<23)
+#define TXEPTY (1<<22)
+#define TXFVE_MASK (0x1F<<16)
+#define RXFEM (1<<14)
+#define RXFVE_OFFSET (8)
+#define RXFVE_MASK (0x1F<<RXFVE_OFFSET)
+#define SPIBSY (1<<0)
+ u32 inten;
+ u32 intsta;
+ u32 timing; /* 0x40 */
+#define SCLK_DIV_MASK 0xFF
+};
+
+struct nds_spi_slave {
+ volatile struct atcspi200_spi_regs *regs;
+ int to;
+ unsigned int freq;
+ ulong clock;
+ unsigned int mode;
+ u8 num_cs;
+ unsigned int mtiming;
+ size_t cmd_len;
+ u8 cmd_buf[16];
+ size_t data_len;
+ size_t tran_len;
+ u8 *din;
+ u8 *dout;
+ unsigned int max_transfer_length;
+};
+
+static int __atcspi200_spi_set_speed(struct nds_spi_slave *ns)
+{
+ u32 tm;
+ u8 div;
+ tm = ns->regs->timing;
+ tm &= ~SCLK_DIV_MASK;
+
+ if(ns->freq >= ns->clock)
+ div =0xff;
+ else{
+ for (div = 0; div < 0xff; div++) {
+ if (ns->freq >= ns->clock / (2 * (div + 1)))
+ break;
+ }
+ }
+
+ tm |= div;
+ ns->regs->timing = tm;
+
+ return 0;
+
+}
+
+static int __atcspi200_spi_claim_bus(struct nds_spi_slave *ns)
+{
+ unsigned int format=0;
+ ns->regs->ctrl |= (TXFRST|RXFRST|SPIRST);
+ while((ns->regs->ctrl &(TXFRST|RXFRST|SPIRST))&&(ns->to--))
+ if(!ns->to)
+ return -EINVAL;
+
+ ns->cmd_len = 0;
+ format = ns->mode|DATA_LENGTH(8);
+ ns->regs->format = format;
+ __atcspi200_spi_set_speed(ns);
+
+ return 0;
+}
+
+static int __atcspi200_spi_release_bus(struct nds_spi_slave *ns)
+{
+ /* do nothing */
+ return 0;
+}
+
+static int __atcspi200_spi_start(struct nds_spi_slave *ns)
+{
+ int i,olen=0;
+ int tc = ns->regs->tctrl;
+
+ tc &= ~(WCNT_MASK|RCNT_MASK|TRAMODE_MASK);
+ if ((ns->din)&&(ns->cmd_len))
+ tc |= TRAMODE_WR;
+ else if (ns->din)
+ tc |= TRAMODE_RO;
+ else
+ tc |= TRAMODE_WO;
+
+ if(ns->dout)
+ olen = ns->tran_len;
+ tc |= (ns->cmd_len+olen-1) << WCNT_OFFSET;
+
+ if(ns->din)
+ tc |= (ns->tran_len-1) << RCNT_OFFSET;
+
+ ns->regs->tctrl = tc;
+ ns->regs->cmd = 1;
+
+ for (i=0;i<ns->cmd_len;i++)
+ ns->regs->data = ns->cmd_buf[i];
+
+ return 0;
+}
+
+static int __atcspi200_spi_stop(struct nds_spi_slave *ns)
+{
+ ns->regs->timing = ns->mtiming;
+ while ((ns->regs->status & SPIBSY)&&(ns->to--))
+ if (!ns->to)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __nspi_espi_tx(struct nds_spi_slave *ns, const void *dout)
+{
+ ns->regs->data = *(u8 *)dout;
+}
+
+static int __nspi_espi_rx(struct nds_spi_slave *ns, void *din, unsigned int bytes)
+{
+ *(u8 *)din = ns->regs->data;
+ return bytes;
+}
+
+
+static int __atcspi200_spi_xfer(struct nds_spi_slave *ns,
+ unsigned int bitlen, const void *data_out, void *data_in,
+ unsigned long flags)
+{
+ unsigned int event, rx_bytes;
+ const void *dout = NULL;
+ void *din = NULL;
+ int num_blks, num_chunks, max_tran_len, tran_len;
+ int num_bytes;
+ u8 *cmd_buf = ns->cmd_buf;
+ size_t cmd_len = ns->cmd_len;
+ unsigned long data_len = bitlen / 8;
+ int rf_cnt;
+ int ret = 0, timeout = 0;
+
+ max_tran_len = ns->max_transfer_length;
+ switch (flags) {
+ case SPI_XFER_BEGIN:
+ cmd_len = ns->cmd_len = data_len;
+ memcpy(cmd_buf, data_out, cmd_len);
+ return 0;
+
+ case 0:
+ case SPI_XFER_END:
+ if (bitlen == 0) {
+ return 0;
+ }
+ ns->data_len = data_len;
+ ns->din = (u8 *)data_in;
+ ns->dout = (u8 *)data_out;
+ break;
+
+ case SPI_XFER_BEGIN | SPI_XFER_END:
+ ns->data_len = 0;
+ ns->din = 0;
+ ns->dout = 0;
+ cmd_len = ns->cmd_len = data_len;
+ memcpy(cmd_buf, data_out, cmd_len);
+ data_out = 0;
+ data_len = 0;
+ __atcspi200_spi_start(ns);
+ break;
+ }
+ if (data_out)
+ debug("spi_xfer: data_out %08X(%p) data_in %08X(%p) data_len %lu\n",
+ *(uint *)data_out, data_out, *(uint *)data_in,
+ data_in, data_len);
+ num_chunks = DIV_ROUND_UP(data_len, max_tran_len);
+ din = data_in;
+ dout = data_out;
+ while (num_chunks--) {
+ tran_len = min((size_t)data_len, (size_t)max_tran_len);
+ ns->tran_len = tran_len;
+ num_blks = DIV_ROUND_UP(tran_len , CHUNK_SIZE);
+ num_bytes = (tran_len) % CHUNK_SIZE;
+ timeout = SPI_TIMEOUT;
+ if(num_bytes == 0)
+ num_bytes = CHUNK_SIZE;
+ __atcspi200_spi_start(ns);
+
+ while (num_blks && (timeout--)) {
+ event = in_le32(&ns->regs->status);
+ if ((event & TXEPTY) && (data_out)) {
+ __nspi_espi_tx(ns, dout);
+ num_blks -= CHUNK_SIZE;
+ dout += CHUNK_SIZE;
+ }
+
+ if ((event & RXFVE_MASK) && (data_in)) {
+ rf_cnt = ((event & RXFVE_MASK)>> RXFVE_OFFSET);
+ if (rf_cnt >= CHUNK_SIZE)
+ rx_bytes = CHUNK_SIZE;
+ else if (num_blks == 1 && rf_cnt == num_bytes)
+ rx_bytes = num_bytes;
+ else
+ continue;
+
+ if (__nspi_espi_rx(ns, din, rx_bytes) == rx_bytes) {
+ num_blks -= CHUNK_SIZE;
+ din = (unsigned char *)din + rx_bytes;
+ }
+ }
+
+ if (!timeout) {
+ debug("spi_xfer: %s() timeout\n", __func__);
+ break;
+ }
+ }
+
+ data_len -= tran_len;
+ if(data_len)
+ {
+ ns->cmd_buf[1] += ((tran_len>>16)&0xff);
+ ns->cmd_buf[2] += ((tran_len>>8)&0xff);
+ ns->cmd_buf[3] += ((tran_len)&0xff);
+ ns->data_len = data_len;
+ }
+ ret = __atcspi200_spi_stop(ns);
+ }
+ ret = __atcspi200_spi_stop(ns);
+
+ return ret;
+}
+
+static int atcspi200_spi_set_speed(struct udevice *bus, uint max_hz)
+{
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+
+ debug("%s speed %u\n", __func__, max_hz);
+
+ ns->freq = max_hz;
+ __atcspi200_spi_set_speed(ns);
+
+ return 0;
+}
+
+static int atcspi200_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+
+ debug("%s mode %u\n", __func__, mode);
+ ns->mode = mode;
+
+ return 0;
+}
+
+static int atcspi200_spi_claim_bus(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave_plat =
+ dev_get_parent_plat(dev);
+ struct udevice *bus = dev->parent;
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+
+ if (slave_plat->cs >= ns->num_cs) {
+ printf("Invalid SPI chipselect\n");
+ return -EINVAL;
+ }
+
+ return __atcspi200_spi_claim_bus(ns);
+}
+
+static int atcspi200_spi_release_bus(struct udevice *dev)
+{
+ struct nds_spi_slave *ns = dev_get_priv(dev->parent);
+
+ return __atcspi200_spi_release_bus(ns);
+}
+
+static int atcspi200_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din,
+ unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+
+ return __atcspi200_spi_xfer(ns, bitlen, dout, din, flags);
+}
+
+static int atcspi200_spi_get_clk(struct udevice *bus)
+{
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+ struct clk clk;
+ ulong clk_rate;
+ int ret;
+
+ ret = clk_get_by_index(bus, 0, &clk);
+ if (ret)
+ return -EINVAL;
+
+ clk_rate = clk_get_rate(&clk);
+ if (!clk_rate)
+ return -EINVAL;
+
+ ns->clock = clk_rate;
+ clk_free(&clk);
+
+ return 0;
+}
+
+static int atcspi200_spi_probe(struct udevice *bus)
+{
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+
+ ns->to = SPI_TIMEOUT;
+ ns->max_transfer_length = MAX_TRANSFER_LEN;
+ ns->mtiming = ns->regs->timing;
+ atcspi200_spi_get_clk(bus);
+
+ return 0;
+}
+
+static int atcspi200_ofdata_to_platadata(struct udevice *bus)
+{
+ struct nds_spi_slave *ns = dev_get_priv(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ ns->regs = map_physmem(dev_read_addr(bus),
+ sizeof(struct atcspi200_spi_regs),
+ MAP_NOCACHE);
+ if (!ns->regs) {
+ printf("%s: could not map device address\n", __func__);
+ return -EINVAL;
+ }
+ ns->num_cs = fdtdec_get_int(blob, node, "num-cs", 4);
+
+ return 0;
+}
+
+static const struct dm_spi_ops atcspi200_spi_ops = {
+ .claim_bus = atcspi200_spi_claim_bus,
+ .release_bus = atcspi200_spi_release_bus,
+ .xfer = atcspi200_spi_xfer,
+ .set_speed = atcspi200_spi_set_speed,
+ .set_mode = atcspi200_spi_set_mode,
+};
+
+static const struct udevice_id atcspi200_spi_ids[] = {
+ { .compatible = "andestech,atcspi200" },
+ { }
+};
+
+U_BOOT_DRIVER(atcspi200_spi) = {
+ .name = "atcspi200_spi",
+ .id = UCLASS_SPI,
+ .of_match = atcspi200_spi_ids,
+ .ops = &atcspi200_spi_ops,
+ .of_to_plat = atcspi200_ofdata_to_platadata,
+ .priv_auto = sizeof(struct nds_spi_slave),
+ .probe = atcspi200_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/ath79_spi.c b/roms/u-boot/drivers/spi/ath79_spi.c
new file mode 100644
index 000000000..205567ef5
--- /dev/null
+++ b/roms/u-boot/drivers/spi/ath79_spi.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015-2016 Wills Wang <wills.wang@live.com>
+ */
+
+#include <common.h>
+#include <clock_legacy.h>
+#include <spi.h>
+#include <dm.h>
+#include <div64.h>
+#include <errno.h>
+#include <time.h>
+#include <asm/io.h>
+#include <asm/addrspace.h>
+#include <asm/types.h>
+#include <dm/pinctrl.h>
+#include <mach/ar71xx_regs.h>
+
+/* CLOCK_DIVIDER = 3 (SPI clock = 200 / 8 ~ 25 MHz) */
+#define ATH79_SPI_CLK_DIV(x) (((x) >> 1) - 1)
+#define ATH79_SPI_RRW_DELAY_FACTOR 12000
+#define ATH79_SPI_MHZ (1000 * 1000)
+
+struct ath79_spi_priv {
+ void __iomem *regs;
+ u32 rrw_delay;
+};
+
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct ath79_spi_priv *priv = dev_get_priv(bus);
+
+ writel(AR71XX_SPI_FS_GPIO, priv->regs + AR71XX_SPI_REG_FS);
+ writel(AR71XX_SPI_IOC_CS_ALL, priv->regs + AR71XX_SPI_REG_IOC);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct ath79_spi_priv *priv = dev_get_priv(bus);
+
+ writel(AR71XX_SPI_IOC_CS_ALL, priv->regs + AR71XX_SPI_REG_IOC);
+ writel(0, priv->regs + AR71XX_SPI_REG_FS);
+}
+
+static int ath79_spi_claim_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int ath79_spi_release_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int ath79_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct ath79_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave = dev_get_parent_plat(dev);
+ u8 *rx = din;
+ const u8 *tx = dout;
+ u8 curbyte, curbitlen, restbits;
+ u32 bytes = bitlen / 8;
+ u32 out, in;
+ u64 tick;
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ restbits = (bitlen % 8);
+ if (restbits)
+ bytes++;
+
+ out = AR71XX_SPI_IOC_CS_ALL & ~(AR71XX_SPI_IOC_CS(slave->cs));
+ while (bytes > 0) {
+ bytes--;
+ curbyte = 0;
+ if (tx)
+ curbyte = *tx++;
+
+ if (restbits && !bytes) {
+ curbitlen = restbits;
+ curbyte <<= 8 - restbits;
+ } else {
+ curbitlen = 8;
+ }
+
+ for (curbyte <<= (8 - curbitlen); curbitlen; curbitlen--) {
+ if (curbyte & 0x80)
+ out |= AR71XX_SPI_IOC_DO;
+ else
+ out &= ~(AR71XX_SPI_IOC_DO);
+
+ writel(out, priv->regs + AR71XX_SPI_REG_IOC);
+
+ /* delay for low level */
+ if (priv->rrw_delay) {
+ tick = get_ticks() + priv->rrw_delay;
+ while (get_ticks() < tick)
+ /*NOP*/;
+ }
+
+ writel(out | AR71XX_SPI_IOC_CLK,
+ priv->regs + AR71XX_SPI_REG_IOC);
+
+ /* delay for high level */
+ if (priv->rrw_delay) {
+ tick = get_ticks() + priv->rrw_delay;
+ while (get_ticks() < tick)
+ /*NOP*/;
+ }
+
+ curbyte <<= 1;
+ }
+
+ if (!bytes)
+ writel(out, priv->regs + AR71XX_SPI_REG_IOC);
+
+ in = readl(priv->regs + AR71XX_SPI_REG_RDS);
+ if (rx) {
+ if (restbits && !bytes)
+ *rx++ = (in << (8 - restbits));
+ else
+ *rx++ = in;
+ }
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ return 0;
+}
+
+
+static int ath79_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct ath79_spi_priv *priv = dev_get_priv(bus);
+ u32 val, div = 0;
+ u64 time;
+
+ if (speed)
+ div = get_bus_freq(0) / speed;
+
+ if (div > 63)
+ div = 63;
+
+ if (div < 5)
+ div = 5;
+
+ /* calculate delay */
+ time = get_tbclk();
+ do_div(time, speed / 2);
+ val = get_bus_freq(0) / ATH79_SPI_MHZ;
+ val = ATH79_SPI_RRW_DELAY_FACTOR / val;
+ if (time > val)
+ priv->rrw_delay = time - val + 1;
+ else
+ priv->rrw_delay = 0;
+
+ writel(AR71XX_SPI_FS_GPIO, priv->regs + AR71XX_SPI_REG_FS);
+ clrsetbits_be32(priv->regs + AR71XX_SPI_REG_CTRL,
+ AR71XX_SPI_CTRL_DIV_MASK,
+ ATH79_SPI_CLK_DIV(div));
+ writel(0, priv->regs + AR71XX_SPI_REG_FS);
+ return 0;
+}
+
+static int ath79_spi_set_mode(struct udevice *bus, uint mode)
+{
+ return 0;
+}
+
+static int ath79_spi_probe(struct udevice *bus)
+{
+ struct ath79_spi_priv *priv = dev_get_priv(bus);
+ fdt_addr_t addr;
+
+ addr = dev_read_addr(bus);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ priv->regs = map_physmem(addr,
+ AR71XX_SPI_SIZE,
+ MAP_NOCACHE);
+
+ /* Init SPI Hardware, disable remap, set clock */
+ writel(AR71XX_SPI_FS_GPIO, priv->regs + AR71XX_SPI_REG_FS);
+ writel(AR71XX_SPI_CTRL_RD | ATH79_SPI_CLK_DIV(8),
+ priv->regs + AR71XX_SPI_REG_CTRL);
+ writel(0, priv->regs + AR71XX_SPI_REG_FS);
+
+ return 0;
+}
+
+static int ath79_cs_info(struct udevice *bus, uint cs,
+ struct spi_cs_info *info)
+{
+ /* Always allow activity on CS 0/1/2 */
+ if (cs >= 3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct dm_spi_ops ath79_spi_ops = {
+ .claim_bus = ath79_spi_claim_bus,
+ .release_bus = ath79_spi_release_bus,
+ .xfer = ath79_spi_xfer,
+ .set_speed = ath79_spi_set_speed,
+ .set_mode = ath79_spi_set_mode,
+ .cs_info = ath79_cs_info,
+};
+
+static const struct udevice_id ath79_spi_ids[] = {
+ { .compatible = "qca,ar7100-spi" },
+ {}
+};
+
+U_BOOT_DRIVER(ath79_spi) = {
+ .name = "ath79_spi",
+ .id = UCLASS_SPI,
+ .of_match = ath79_spi_ids,
+ .ops = &ath79_spi_ops,
+ .priv_auto = sizeof(struct ath79_spi_priv),
+ .probe = ath79_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/atmel-quadspi.c b/roms/u-boot/drivers/spi/atmel-quadspi.c
new file mode 100644
index 000000000..b1a3aa9a2
--- /dev/null
+++ b/roms/u-boot/drivers/spi/atmel-quadspi.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Atmel QSPI Controller
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ * Copyright (C) 2018 Cryptera A/S
+ *
+ * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
+ */
+
+#include <malloc.h>
+#include <asm/io.h>
+#include <clk.h>
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <mach/clk.h>
+#include <spi.h>
+#include <spi-mem.h>
+
+/* QSPI register offsets */
+#define QSPI_CR 0x0000 /* Control Register */
+#define QSPI_MR 0x0004 /* Mode Register */
+#define QSPI_RD 0x0008 /* Receive Data Register */
+#define QSPI_TD 0x000c /* Transmit Data Register */
+#define QSPI_SR 0x0010 /* Status Register */
+#define QSPI_IER 0x0014 /* Interrupt Enable Register */
+#define QSPI_IDR 0x0018 /* Interrupt Disable Register */
+#define QSPI_IMR 0x001c /* Interrupt Mask Register */
+#define QSPI_SCR 0x0020 /* Serial Clock Register */
+
+#define QSPI_IAR 0x0030 /* Instruction Address Register */
+#define QSPI_ICR 0x0034 /* Instruction Code Register */
+#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
+#define QSPI_IFR 0x0038 /* Instruction Frame Register */
+#define QSPI_RICR 0x003C /* Read Instruction Code Register */
+
+#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
+#define QSPI_SKR 0x0044 /* Scrambling Key Register */
+
+#define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */
+#define QSPI_WPSR 0x00E8 /* Write Protection Status Register */
+
+#define QSPI_VERSION 0x00FC /* Version Register */
+
+/* Bitfields in QSPI_CR (Control Register) */
+#define QSPI_CR_QSPIEN BIT(0)
+#define QSPI_CR_QSPIDIS BIT(1)
+#define QSPI_CR_SWRST BIT(7)
+#define QSPI_CR_LASTXFER BIT(24)
+
+/* Bitfields in QSPI_MR (Mode Register) */
+#define QSPI_MR_SMM BIT(0)
+#define QSPI_MR_LLB BIT(1)
+#define QSPI_MR_WDRBT BIT(2)
+#define QSPI_MR_SMRM BIT(3)
+#define QSPI_MR_CSMODE_MASK GENMASK(5, 4)
+#define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4)
+#define QSPI_MR_CSMODE_LASTXFER (1 << 4)
+#define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4)
+#define QSPI_MR_NBBITS_MASK GENMASK(11, 8)
+#define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK)
+#define QSPI_MR_DLYBCT_MASK GENMASK(23, 16)
+#define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK)
+#define QSPI_MR_DLYCS_MASK GENMASK(31, 24)
+#define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK)
+
+/* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */
+#define QSPI_SR_RDRF BIT(0)
+#define QSPI_SR_TDRE BIT(1)
+#define QSPI_SR_TXEMPTY BIT(2)
+#define QSPI_SR_OVRES BIT(3)
+#define QSPI_SR_CSR BIT(8)
+#define QSPI_SR_CSS BIT(9)
+#define QSPI_SR_INSTRE BIT(10)
+#define QSPI_SR_QSPIENS BIT(24)
+
+#define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR)
+
+/* Bitfields in QSPI_SCR (Serial Clock Register) */
+#define QSPI_SCR_CPOL BIT(0)
+#define QSPI_SCR_CPHA BIT(1)
+#define QSPI_SCR_SCBR_MASK GENMASK(15, 8)
+#define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK)
+#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
+#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
+
+/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
+#define QSPI_ICR_INST_MASK GENMASK(7, 0)
+#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
+#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
+#define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK)
+
+/* Bitfields in QSPI_IFR (Instruction Frame Register) */
+#define QSPI_IFR_WIDTH_MASK GENMASK(2, 0)
+#define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0)
+#define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0)
+#define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0)
+#define QSPI_IFR_WIDTH_DUAL_IO (3 << 0)
+#define QSPI_IFR_WIDTH_QUAD_IO (4 << 0)
+#define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0)
+#define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0)
+#define QSPI_IFR_INSTEN BIT(4)
+#define QSPI_IFR_ADDREN BIT(5)
+#define QSPI_IFR_OPTEN BIT(6)
+#define QSPI_IFR_DATAEN BIT(7)
+#define QSPI_IFR_OPTL_MASK GENMASK(9, 8)
+#define QSPI_IFR_OPTL_1BIT (0 << 8)
+#define QSPI_IFR_OPTL_2BIT (1 << 8)
+#define QSPI_IFR_OPTL_4BIT (2 << 8)
+#define QSPI_IFR_OPTL_8BIT (3 << 8)
+#define QSPI_IFR_ADDRL BIT(10)
+#define QSPI_IFR_TFRTYP_MEM BIT(12)
+#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
+#define QSPI_IFR_CRM BIT(14)
+#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
+#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
+#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
+
+/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
+#define QSPI_SMR_SCREN BIT(0)
+#define QSPI_SMR_RVDIS BIT(1)
+
+/* Bitfields in QSPI_WPMR (Write Protection Mode Register) */
+#define QSPI_WPMR_WPEN BIT(0)
+#define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8)
+#define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK)
+
+/* Bitfields in QSPI_WPSR (Write Protection Status Register) */
+#define QSPI_WPSR_WPVS BIT(0)
+#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
+#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
+
+struct atmel_qspi_caps {
+ bool has_qspick;
+ bool has_ricr;
+};
+
+struct atmel_qspi {
+ void __iomem *regs;
+ void __iomem *mem;
+ resource_size_t mmap_size;
+ const struct atmel_qspi_caps *caps;
+ struct udevice *dev;
+ ulong bus_clk_rate;
+ u32 mr;
+};
+
+struct atmel_qspi_mode {
+ u8 cmd_buswidth;
+ u8 addr_buswidth;
+ u8 data_buswidth;
+ u32 config;
+};
+
+static const struct atmel_qspi_mode atmel_qspi_modes[] = {
+ { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
+ { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
+ { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
+ { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
+ { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
+ { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
+ { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
+};
+
+#ifdef VERBOSE_DEBUG
+static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
+{
+ switch (offset) {
+ case QSPI_CR:
+ return "CR";
+ case QSPI_MR:
+ return "MR";
+ case QSPI_RD:
+ return "MR";
+ case QSPI_TD:
+ return "TD";
+ case QSPI_SR:
+ return "SR";
+ case QSPI_IER:
+ return "IER";
+ case QSPI_IDR:
+ return "IDR";
+ case QSPI_IMR:
+ return "IMR";
+ case QSPI_SCR:
+ return "SCR";
+ case QSPI_IAR:
+ return "IAR";
+ case QSPI_ICR:
+ return "ICR/WICR";
+ case QSPI_IFR:
+ return "IFR";
+ case QSPI_RICR:
+ return "RICR";
+ case QSPI_SMR:
+ return "SMR";
+ case QSPI_SKR:
+ return "SKR";
+ case QSPI_WPMR:
+ return "WPMR";
+ case QSPI_WPSR:
+ return "WPSR";
+ case QSPI_VERSION:
+ return "VERSION";
+ default:
+ snprintf(tmp, sz, "0x%02x", offset);
+ break;
+ }
+
+ return tmp;
+}
+#endif /* VERBOSE_DEBUG */
+
+static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
+{
+ u32 value = readl(aq->regs + offset);
+
+#ifdef VERBOSE_DEBUG
+ char tmp[16];
+
+ dev_vdbg(aq->dev, "read 0x%08x from %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ return value;
+}
+
+static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
+{
+#ifdef VERBOSE_DEBUG
+ char tmp[16];
+
+ dev_vdbg(aq->dev, "write 0x%08x into %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ writel(value, aq->regs + offset);
+}
+
+static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
+ const struct atmel_qspi_mode *mode)
+{
+ if (op->cmd.buswidth != mode->cmd_buswidth)
+ return false;
+
+ if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
+ return false;
+
+ if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
+ return false;
+
+ return true;
+}
+
+static int atmel_qspi_find_mode(const struct spi_mem_op *op)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
+ if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
+ return i;
+
+ return -ENOTSUPP;
+}
+
+static bool atmel_qspi_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ if (atmel_qspi_find_mode(op) < 0)
+ return false;
+
+ /* special case not supported by hardware */
+ if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
+ op->dummy.nbytes == 0)
+ return false;
+
+ return true;
+}
+
+static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
+ const struct spi_mem_op *op, u32 *offset)
+{
+ u32 iar, icr, ifr;
+ u32 dummy_cycles = 0;
+ int mode;
+
+ iar = 0;
+ icr = QSPI_ICR_INST(op->cmd.opcode);
+ ifr = QSPI_IFR_INSTEN;
+
+ mode = atmel_qspi_find_mode(op);
+ if (mode < 0)
+ return mode;
+ ifr |= atmel_qspi_modes[mode].config;
+
+ if (op->dummy.buswidth && op->dummy.nbytes)
+ dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+
+ /*
+ * The controller allows 24 and 32-bit addressing while NAND-flash
+ * requires 16-bit long. Handling 8-bit long addresses is done using
+ * the option field. For the 16-bit addresses, the workaround depends
+ * of the number of requested dummy bits. If there are 8 or more dummy
+ * cycles, the address is shifted and sent with the first dummy byte.
+ * Otherwise opcode is disabled and the first byte of the address
+ * contains the command opcode (works only if the opcode and address
+ * use the same buswidth). The limitation is when the 16-bit address is
+ * used without enough dummy cycles and the opcode is using a different
+ * buswidth than the address.
+ */
+ if (op->addr.buswidth) {
+ switch (op->addr.nbytes) {
+ case 0:
+ break;
+ case 1:
+ ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
+ icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
+ break;
+ case 2:
+ if (dummy_cycles < 8 / op->addr.buswidth) {
+ ifr &= ~QSPI_IFR_INSTEN;
+ ifr |= QSPI_IFR_ADDREN;
+ iar = (op->cmd.opcode << 16) |
+ (op->addr.val & 0xffff);
+ } else {
+ ifr |= QSPI_IFR_ADDREN;
+ iar = (op->addr.val << 8) & 0xffffff;
+ dummy_cycles -= 8 / op->addr.buswidth;
+ }
+ break;
+ case 3:
+ ifr |= QSPI_IFR_ADDREN;
+ iar = op->addr.val & 0xffffff;
+ break;
+ case 4:
+ ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
+ iar = op->addr.val & 0x7ffffff;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ /* offset of the data access in the QSPI memory space */
+ *offset = iar;
+
+ /* Set number of dummy cycles */
+ if (dummy_cycles)
+ ifr |= QSPI_IFR_NBDUM(dummy_cycles);
+
+ /* Set data enable */
+ if (op->data.nbytes)
+ ifr |= QSPI_IFR_DATAEN;
+
+ /*
+ * If the QSPI controller is set in regular SPI mode, set it in
+ * Serial Memory Mode (SMM).
+ */
+ if (aq->mr != QSPI_MR_SMM) {
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
+ }
+
+ /* Clear pending interrupts */
+ (void)atmel_qspi_read(aq, QSPI_SR);
+
+ if (aq->caps->has_ricr) {
+ if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ ifr |= QSPI_IFR_APBTFRTYP_READ;
+
+ /* Set QSPI Instruction Frame registers */
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ atmel_qspi_write(icr, aq, QSPI_RICR);
+ else
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
+ } else {
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
+
+ /* Set QSPI Instruction Frame registers */
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+ atmel_qspi_write(icr, aq, QSPI_ICR);
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
+ }
+
+ return 0;
+}
+
+static int atmel_qspi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct atmel_qspi *aq = dev_get_priv(slave->dev->parent);
+ u32 sr, imr, offset;
+ int err;
+
+ /*
+ * Check if the address exceeds the MMIO window size. An improvement
+ * would be to add support for regular SPI mode and fall back to it
+ * when the flash memories overrun the controller's memory space.
+ */
+ if (op->addr.val + op->data.nbytes > aq->mmap_size)
+ return -ENOTSUPP;
+
+ err = atmel_qspi_set_cfg(aq, op, &offset);
+ if (err)
+ return err;
+
+ /* Skip to the final steps if there is no data */
+ if (op->data.nbytes) {
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)atmel_qspi_read(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ memcpy_fromio(op->data.buf.in, aq->mem + offset,
+ op->data.nbytes);
+ else
+ memcpy_toio(aq->mem + offset, op->data.buf.out,
+ op->data.nbytes);
+
+ /* Release the chip-select */
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
+ }
+
+ /* Poll INSTruction End and Chip Select Rise flags. */
+ imr = QSPI_SR_INSTRE | QSPI_SR_CSR;
+ return readl_poll_timeout(aq->regs + QSPI_SR, sr, (sr & imr) == imr,
+ 1000000);
+}
+
+static int atmel_qspi_set_speed(struct udevice *bus, uint hz)
+{
+ struct atmel_qspi *aq = dev_get_priv(bus);
+ u32 scr, scbr, mask, new_value;
+
+ /* Compute the QSPI baudrate */
+ scbr = DIV_ROUND_UP(aq->bus_clk_rate, hz);
+ if (scbr > 0)
+ scbr--;
+
+ new_value = QSPI_SCR_SCBR(scbr);
+ mask = QSPI_SCR_SCBR_MASK;
+
+ scr = atmel_qspi_read(aq, QSPI_SCR);
+ if ((scr & mask) == new_value)
+ return 0;
+
+ scr = (scr & ~mask) | new_value;
+ atmel_qspi_write(scr, aq, QSPI_SCR);
+
+ return 0;
+}
+
+static int atmel_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct atmel_qspi *aq = dev_get_priv(bus);
+ u32 scr, mask, new_value = 0;
+
+ if (mode & SPI_CPOL)
+ new_value = QSPI_SCR_CPOL;
+ if (mode & SPI_CPHA)
+ new_value = QSPI_SCR_CPHA;
+
+ mask = QSPI_SCR_CPOL | QSPI_SCR_CPHA;
+
+ scr = atmel_qspi_read(aq, QSPI_SCR);
+ if ((scr & mask) == new_value)
+ return 0;
+
+ scr = (scr & ~mask) | new_value;
+ atmel_qspi_write(scr, aq, QSPI_SCR);
+
+ return 0;
+}
+
+static int atmel_qspi_enable_clk(struct udevice *dev)
+{
+ struct atmel_qspi *aq = dev_get_priv(dev);
+ struct clk pclk, qspick;
+ int ret;
+
+ ret = clk_get_by_name(dev, "pclk", &pclk);
+ if (ret)
+ ret = clk_get_by_index(dev, 0, &pclk);
+
+ if (ret) {
+ dev_err(dev, "Missing QSPI peripheral clock\n");
+ return ret;
+ }
+
+ ret = clk_enable(&pclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable QSPI peripheral clock\n");
+ goto free_pclk;
+ }
+
+ if (aq->caps->has_qspick) {
+ /* Get the QSPI system clock */
+ ret = clk_get_by_name(dev, "qspick", &qspick);
+ if (ret) {
+ dev_err(dev, "Missing QSPI peripheral clock\n");
+ goto free_pclk;
+ }
+
+ ret = clk_enable(&qspick);
+ if (ret)
+ dev_err(dev, "Failed to enable QSPI system clock\n");
+ clk_free(&qspick);
+ }
+
+ aq->bus_clk_rate = clk_get_rate(&pclk);
+ if (!aq->bus_clk_rate)
+ ret = -EINVAL;
+
+free_pclk:
+ clk_free(&pclk);
+
+ return ret;
+}
+
+static void atmel_qspi_init(struct atmel_qspi *aq)
+{
+ /* Reset the QSPI controller */
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
+
+ /* Set the QSPI controller by default in Serial Memory Mode */
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
+ aq->mr = QSPI_MR_SMM;
+
+ /* Enable the QSPI controller */
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
+}
+
+static int atmel_qspi_probe(struct udevice *dev)
+{
+ struct atmel_qspi *aq = dev_get_priv(dev);
+ struct resource res;
+ int ret;
+
+ aq->caps = (struct atmel_qspi_caps *)dev_get_driver_data(dev);
+ if (!aq->caps) {
+ dev_err(dev, "Could not retrieve QSPI caps\n");
+ return -EINVAL;
+ };
+
+ /* Map the registers */
+ ret = dev_read_resource_byname(dev, "qspi_base", &res);
+ if (ret) {
+ dev_err(dev, "missing registers\n");
+ return ret;
+ }
+
+ aq->regs = devm_ioremap(dev, res.start, resource_size(&res));
+ if (IS_ERR(aq->regs))
+ return PTR_ERR(aq->regs);
+
+ /* Map the AHB memory */
+ ret = dev_read_resource_byname(dev, "qspi_mmap", &res);
+ if (ret) {
+ dev_err(dev, "missing AHB memory\n");
+ return ret;
+ }
+
+ aq->mem = devm_ioremap(dev, res.start, resource_size(&res));
+ if (IS_ERR(aq->mem))
+ return PTR_ERR(aq->mem);
+
+ aq->mmap_size = resource_size(&res);
+
+ ret = atmel_qspi_enable_clk(dev);
+ if (ret)
+ return ret;
+
+ aq->dev = dev;
+
+ atmel_qspi_init(aq);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
+ .supports_op = atmel_qspi_supports_op,
+ .exec_op = atmel_qspi_exec_op,
+};
+
+static const struct dm_spi_ops atmel_qspi_ops = {
+ .set_speed = atmel_qspi_set_speed,
+ .set_mode = atmel_qspi_set_mode,
+ .mem_ops = &atmel_qspi_mem_ops,
+};
+
+static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
+
+static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
+ .has_qspick = true,
+ .has_ricr = true,
+};
+
+static const struct udevice_id atmel_qspi_ids[] = {
+ {
+ .compatible = "atmel,sama5d2-qspi",
+ .data = (ulong)&atmel_sama5d2_qspi_caps,
+ },
+ {
+ .compatible = "microchip,sam9x60-qspi",
+ .data = (ulong)&atmel_sam9x60_qspi_caps,
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(atmel_qspi) = {
+ .name = "atmel_qspi",
+ .id = UCLASS_SPI,
+ .of_match = atmel_qspi_ids,
+ .ops = &atmel_qspi_ops,
+ .priv_auto = sizeof(struct atmel_qspi),
+ .probe = atmel_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/atmel_spi.c b/roms/u-boot/drivers/spi/atmel_spi.c
new file mode 100644
index 000000000..702e22535
--- /dev/null
+++ b/roms/u-boot/drivers/spi/atmel_spi.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2007 Atmel Corporation
+ */
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <fdtdec.h>
+#include <spi.h>
+#include <malloc.h>
+#include <wait_bit.h>
+#include <asm/io.h>
+#include <asm/arch/clk.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/at91_spi.h>
+#if CONFIG_IS_ENABLED(DM_GPIO)
+#include <asm/gpio.h>
+#endif
+#include <linux/bitops.h>
+
+/*
+ * Register definitions for the Atmel AT32/AT91 SPI Controller
+ */
+/* Register offsets */
+#define ATMEL_SPI_CR 0x0000
+#define ATMEL_SPI_MR 0x0004
+#define ATMEL_SPI_RDR 0x0008
+#define ATMEL_SPI_TDR 0x000c
+#define ATMEL_SPI_SR 0x0010
+#define ATMEL_SPI_IER 0x0014
+#define ATMEL_SPI_IDR 0x0018
+#define ATMEL_SPI_IMR 0x001c
+#define ATMEL_SPI_CSR(x) (0x0030 + 4 * (x))
+#define ATMEL_SPI_VERSION 0x00fc
+
+/* Bits in CR */
+#define ATMEL_SPI_CR_SPIEN BIT(0)
+#define ATMEL_SPI_CR_SPIDIS BIT(1)
+#define ATMEL_SPI_CR_SWRST BIT(7)
+#define ATMEL_SPI_CR_LASTXFER BIT(24)
+
+/* Bits in MR */
+#define ATMEL_SPI_MR_MSTR BIT(0)
+#define ATMEL_SPI_MR_PS BIT(1)
+#define ATMEL_SPI_MR_PCSDEC BIT(2)
+#define ATMEL_SPI_MR_FDIV BIT(3)
+#define ATMEL_SPI_MR_MODFDIS BIT(4)
+#define ATMEL_SPI_MR_WDRBT BIT(5)
+#define ATMEL_SPI_MR_LLB BIT(7)
+#define ATMEL_SPI_MR_PCS(x) (((x) & 15) << 16)
+#define ATMEL_SPI_MR_DLYBCS(x) ((x) << 24)
+
+/* Bits in RDR */
+#define ATMEL_SPI_RDR_RD(x) (x)
+#define ATMEL_SPI_RDR_PCS(x) ((x) << 16)
+
+/* Bits in TDR */
+#define ATMEL_SPI_TDR_TD(x) (x)
+#define ATMEL_SPI_TDR_PCS(x) ((x) << 16)
+#define ATMEL_SPI_TDR_LASTXFER BIT(24)
+
+/* Bits in SR/IER/IDR/IMR */
+#define ATMEL_SPI_SR_RDRF BIT(0)
+#define ATMEL_SPI_SR_TDRE BIT(1)
+#define ATMEL_SPI_SR_MODF BIT(2)
+#define ATMEL_SPI_SR_OVRES BIT(3)
+#define ATMEL_SPI_SR_ENDRX BIT(4)
+#define ATMEL_SPI_SR_ENDTX BIT(5)
+#define ATMEL_SPI_SR_RXBUFF BIT(6)
+#define ATMEL_SPI_SR_TXBUFE BIT(7)
+#define ATMEL_SPI_SR_NSSR BIT(8)
+#define ATMEL_SPI_SR_TXEMPTY BIT(9)
+#define ATMEL_SPI_SR_SPIENS BIT(16)
+
+/* Bits in CSRx */
+#define ATMEL_SPI_CSRx_CPOL BIT(0)
+#define ATMEL_SPI_CSRx_NCPHA BIT(1)
+#define ATMEL_SPI_CSRx_CSAAT BIT(3)
+#define ATMEL_SPI_CSRx_BITS(x) ((x) << 4)
+#define ATMEL_SPI_CSRx_SCBR(x) ((x) << 8)
+#define ATMEL_SPI_CSRx_SCBR_MAX GENMASK(7, 0)
+#define ATMEL_SPI_CSRx_DLYBS(x) ((x) << 16)
+#define ATMEL_SPI_CSRx_DLYBCT(x) ((x) << 24)
+
+/* Bits in VERSION */
+#define ATMEL_SPI_VERSION_REV(x) ((x) & 0xfff)
+#define ATMEL_SPI_VERSION_MFN(x) ((x) << 16)
+
+/* Constants for CSRx:BITS */
+#define ATMEL_SPI_BITS_8 0
+#define ATMEL_SPI_BITS_9 1
+#define ATMEL_SPI_BITS_10 2
+#define ATMEL_SPI_BITS_11 3
+#define ATMEL_SPI_BITS_12 4
+#define ATMEL_SPI_BITS_13 5
+#define ATMEL_SPI_BITS_14 6
+#define ATMEL_SPI_BITS_15 7
+#define ATMEL_SPI_BITS_16 8
+
+#define MAX_CS_COUNT 4
+
+/* Register access macros */
+#define spi_readl(as, reg) \
+ readl(as->regs + ATMEL_SPI_##reg)
+#define spi_writel(as, reg, value) \
+ writel(value, as->regs + ATMEL_SPI_##reg)
+
+struct atmel_spi_plat {
+ struct at91_spi *regs;
+};
+
+struct atmel_spi_priv {
+ unsigned int freq; /* Default frequency */
+ unsigned int mode;
+ ulong bus_clk_rate;
+#if CONFIG_IS_ENABLED(DM_GPIO)
+ struct gpio_desc cs_gpios[MAX_CS_COUNT];
+#endif
+};
+
+static int atmel_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct atmel_spi_plat *bus_plat = dev_get_plat(bus);
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ struct at91_spi *reg_base = bus_plat->regs;
+ u32 cs = slave_plat->cs;
+ u32 freq = priv->freq;
+ u32 scbr, csrx, mode;
+
+ scbr = (priv->bus_clk_rate + freq - 1) / freq;
+ if (scbr > ATMEL_SPI_CSRx_SCBR_MAX)
+ return -EINVAL;
+
+ if (scbr < 1)
+ scbr = 1;
+
+ csrx = ATMEL_SPI_CSRx_SCBR(scbr);
+ csrx |= ATMEL_SPI_CSRx_BITS(ATMEL_SPI_BITS_8);
+
+ if (!(priv->mode & SPI_CPHA))
+ csrx |= ATMEL_SPI_CSRx_NCPHA;
+ if (priv->mode & SPI_CPOL)
+ csrx |= ATMEL_SPI_CSRx_CPOL;
+
+ writel(csrx, &reg_base->csr[cs]);
+
+ mode = ATMEL_SPI_MR_MSTR |
+ ATMEL_SPI_MR_MODFDIS |
+ ATMEL_SPI_MR_WDRBT |
+ ATMEL_SPI_MR_PCS(~(1 << cs));
+
+ writel(mode, &reg_base->mr);
+
+ writel(ATMEL_SPI_CR_SPIEN, &reg_base->cr);
+
+ return 0;
+}
+
+static int atmel_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct atmel_spi_plat *bus_plat = dev_get_plat(bus);
+
+ writel(ATMEL_SPI_CR_SPIDIS, &bus_plat->regs->cr);
+
+ return 0;
+}
+
+static void atmel_spi_cs_activate(struct udevice *dev)
+{
+#if CONFIG_IS_ENABLED(DM_GPIO)
+ struct udevice *bus = dev_get_parent(dev);
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ u32 cs = slave_plat->cs;
+
+ if (!dm_gpio_is_valid(&priv->cs_gpios[cs]))
+ return;
+
+ dm_gpio_set_value(&priv->cs_gpios[cs], 0);
+#endif
+}
+
+static void atmel_spi_cs_deactivate(struct udevice *dev)
+{
+#if CONFIG_IS_ENABLED(DM_GPIO)
+ struct udevice *bus = dev_get_parent(dev);
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ u32 cs = slave_plat->cs;
+
+ if (!dm_gpio_is_valid(&priv->cs_gpios[cs]))
+ return;
+
+ dm_gpio_set_value(&priv->cs_gpios[cs], 1);
+#endif
+}
+
+static int atmel_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct atmel_spi_plat *bus_plat = dev_get_plat(bus);
+ struct at91_spi *reg_base = bus_plat->regs;
+
+ u32 len_tx, len_rx, len;
+ u32 status;
+ const u8 *txp = dout;
+ u8 *rxp = din;
+ u8 value;
+
+ if (bitlen == 0)
+ goto out;
+
+ /*
+ * The controller can do non-multiple-of-8 bit
+ * transfers, but this driver currently doesn't support it.
+ *
+ * It's also not clear how such transfers are supposed to be
+ * represented as a stream of bytes...this is a limitation of
+ * the current SPI interface.
+ */
+ if (bitlen % 8) {
+ /* Errors always terminate an ongoing transfer */
+ flags |= SPI_XFER_END;
+ goto out;
+ }
+
+ len = bitlen / 8;
+
+ /*
+ * The controller can do automatic CS control, but it is
+ * somewhat quirky, and it doesn't really buy us much anyway
+ * in the context of U-Boot.
+ */
+ if (flags & SPI_XFER_BEGIN) {
+ atmel_spi_cs_activate(dev);
+
+ /*
+ * sometimes the RDR is not empty when we get here,
+ * in theory that should not happen, but it DOES happen.
+ * Read it here to be on the safe side.
+ * That also clears the OVRES flag. Required if the
+ * following loop exits due to OVRES!
+ */
+ readl(&reg_base->rdr);
+ }
+
+ for (len_tx = 0, len_rx = 0; len_rx < len; ) {
+ status = readl(&reg_base->sr);
+
+ if (status & ATMEL_SPI_SR_OVRES)
+ return -1;
+
+ if ((len_tx < len) && (status & ATMEL_SPI_SR_TDRE)) {
+ if (txp)
+ value = *txp++;
+ else
+ value = 0;
+ writel(value, &reg_base->tdr);
+ len_tx++;
+ }
+
+ if (status & ATMEL_SPI_SR_RDRF) {
+ value = readl(&reg_base->rdr);
+ if (rxp)
+ *rxp++ = value;
+ len_rx++;
+ }
+ }
+
+out:
+ if (flags & SPI_XFER_END) {
+ /*
+ * Wait until the transfer is completely done before
+ * we deactivate CS.
+ */
+ wait_for_bit_le32(&reg_base->sr,
+ ATMEL_SPI_SR_TXEMPTY, true, 1000, false);
+
+ atmel_spi_cs_deactivate(dev);
+ }
+
+ return 0;
+}
+
+static int atmel_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+
+ priv->freq = speed;
+
+ return 0;
+}
+
+static int atmel_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static const struct dm_spi_ops atmel_spi_ops = {
+ .claim_bus = atmel_spi_claim_bus,
+ .release_bus = atmel_spi_release_bus,
+ .xfer = atmel_spi_xfer,
+ .set_speed = atmel_spi_set_speed,
+ .set_mode = atmel_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static int atmel_spi_enable_clk(struct udevice *bus)
+{
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+ struct clk clk;
+ ulong clk_rate;
+ int ret;
+
+ ret = clk_get_by_index(bus, 0, &clk);
+ if (ret)
+ return -EINVAL;
+
+ ret = clk_enable(&clk);
+ if (ret)
+ return ret;
+
+ clk_rate = clk_get_rate(&clk);
+ if (!clk_rate)
+ return -EINVAL;
+
+ priv->bus_clk_rate = clk_rate;
+
+ clk_free(&clk);
+
+ return 0;
+}
+
+static int atmel_spi_probe(struct udevice *bus)
+{
+ struct atmel_spi_plat *bus_plat = dev_get_plat(bus);
+ int ret;
+
+ ret = atmel_spi_enable_clk(bus);
+ if (ret)
+ return ret;
+
+ bus_plat->regs = dev_read_addr_ptr(bus);
+
+#if CONFIG_IS_ENABLED(DM_GPIO)
+ struct atmel_spi_priv *priv = dev_get_priv(bus);
+ int i;
+
+ ret = gpio_request_list_by_name(bus, "cs-gpios", priv->cs_gpios,
+ ARRAY_SIZE(priv->cs_gpios), 0);
+ if (ret < 0) {
+ pr_err("Can't get %s gpios! Error: %d", bus->name, ret);
+ return ret;
+ }
+
+ for(i = 0; i < ARRAY_SIZE(priv->cs_gpios); i++) {
+ if (!dm_gpio_is_valid(&priv->cs_gpios[i]))
+ continue;
+
+ dm_gpio_set_dir_flags(&priv->cs_gpios[i],
+ GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ }
+#endif
+
+ writel(ATMEL_SPI_CR_SWRST, &bus_plat->regs->cr);
+
+ return 0;
+}
+
+static const struct udevice_id atmel_spi_ids[] = {
+ { .compatible = "atmel,at91rm9200-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(atmel_spi) = {
+ .name = "atmel_spi",
+ .id = UCLASS_SPI,
+ .of_match = atmel_spi_ids,
+ .ops = &atmel_spi_ops,
+ .plat_auto = sizeof(struct atmel_spi_plat),
+ .priv_auto = sizeof(struct atmel_spi_priv),
+ .probe = atmel_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/atmel_spi.h b/roms/u-boot/drivers/spi/atmel_spi.h
new file mode 100644
index 000000000..9663cca5e
--- /dev/null
+++ b/roms/u-boot/drivers/spi/atmel_spi.h
@@ -0,0 +1,86 @@
+/*
+ * Register definitions for the Atmel AT32/AT91 SPI Controller
+ */
+
+/* Register offsets */
+#include <linux/bitops.h>
+#define ATMEL_SPI_CR 0x0000
+#define ATMEL_SPI_MR 0x0004
+#define ATMEL_SPI_RDR 0x0008
+#define ATMEL_SPI_TDR 0x000c
+#define ATMEL_SPI_SR 0x0010
+#define ATMEL_SPI_IER 0x0014
+#define ATMEL_SPI_IDR 0x0018
+#define ATMEL_SPI_IMR 0x001c
+#define ATMEL_SPI_CSR(x) (0x0030 + 4 * (x))
+#define ATMEL_SPI_VERSION 0x00fc
+
+/* Bits in CR */
+#define ATMEL_SPI_CR_SPIEN BIT(0)
+#define ATMEL_SPI_CR_SPIDIS BIT(1)
+#define ATMEL_SPI_CR_SWRST BIT(7)
+#define ATMEL_SPI_CR_LASTXFER BIT(24)
+
+/* Bits in MR */
+#define ATMEL_SPI_MR_MSTR BIT(0)
+#define ATMEL_SPI_MR_PS BIT(1)
+#define ATMEL_SPI_MR_PCSDEC BIT(2)
+#define ATMEL_SPI_MR_FDIV BIT(3)
+#define ATMEL_SPI_MR_MODFDIS BIT(4)
+#define ATMEL_SPI_MR_WDRBT BIT(5)
+#define ATMEL_SPI_MR_LLB BIT(7)
+#define ATMEL_SPI_MR_PCS(x) (((x) & 15) << 16)
+#define ATMEL_SPI_MR_DLYBCS(x) ((x) << 24)
+
+/* Bits in RDR */
+#define ATMEL_SPI_RDR_RD(x) (x)
+#define ATMEL_SPI_RDR_PCS(x) ((x) << 16)
+
+/* Bits in TDR */
+#define ATMEL_SPI_TDR_TD(x) (x)
+#define ATMEL_SPI_TDR_PCS(x) ((x) << 16)
+#define ATMEL_SPI_TDR_LASTXFER BIT(24)
+
+/* Bits in SR/IER/IDR/IMR */
+#define ATMEL_SPI_SR_RDRF BIT(0)
+#define ATMEL_SPI_SR_TDRE BIT(1)
+#define ATMEL_SPI_SR_MODF BIT(2)
+#define ATMEL_SPI_SR_OVRES BIT(3)
+#define ATMEL_SPI_SR_ENDRX BIT(4)
+#define ATMEL_SPI_SR_ENDTX BIT(5)
+#define ATMEL_SPI_SR_RXBUFF BIT(6)
+#define ATMEL_SPI_SR_TXBUFE BIT(7)
+#define ATMEL_SPI_SR_NSSR BIT(8)
+#define ATMEL_SPI_SR_TXEMPTY BIT(9)
+#define ATMEL_SPI_SR_SPIENS BIT(16)
+
+/* Bits in CSRx */
+#define ATMEL_SPI_CSRx_CPOL BIT(0)
+#define ATMEL_SPI_CSRx_NCPHA BIT(1)
+#define ATMEL_SPI_CSRx_CSAAT BIT(3)
+#define ATMEL_SPI_CSRx_BITS(x) ((x) << 4)
+#define ATMEL_SPI_CSRx_SCBR(x) ((x) << 8)
+#define ATMEL_SPI_CSRx_SCBR_MAX GENMASK(7, 0)
+#define ATMEL_SPI_CSRx_DLYBS(x) ((x) << 16)
+#define ATMEL_SPI_CSRx_DLYBCT(x) ((x) << 24)
+
+/* Bits in VERSION */
+#define ATMEL_SPI_VERSION_REV(x) ((x) & 0xfff)
+#define ATMEL_SPI_VERSION_MFN(x) ((x) << 16)
+
+/* Constants for CSRx:BITS */
+#define ATMEL_SPI_BITS_8 0
+#define ATMEL_SPI_BITS_9 1
+#define ATMEL_SPI_BITS_10 2
+#define ATMEL_SPI_BITS_11 3
+#define ATMEL_SPI_BITS_12 4
+#define ATMEL_SPI_BITS_13 5
+#define ATMEL_SPI_BITS_14 6
+#define ATMEL_SPI_BITS_15 7
+#define ATMEL_SPI_BITS_16 8
+
+/* Register access macros */
+#define spi_readl(as, reg) \
+ readl(as->regs + ATMEL_SPI_##reg)
+#define spi_writel(as, reg, value) \
+ writel(value, as->regs + ATMEL_SPI_##reg)
diff --git a/roms/u-boot/drivers/spi/bcm63xx_hsspi.c b/roms/u-boot/drivers/spi/bcm63xx_hsspi.c
new file mode 100644
index 000000000..85108df56
--- /dev/null
+++ b/roms/u-boot/drivers/spi/bcm63xx_hsspi.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Álvaro Fernández Rojas <noltari@gmail.com>
+ *
+ * Derived from linux/drivers/spi/spi-bcm63xx-hsspi.c:
+ * Copyright (C) 2000-2010 Broadcom Corporation
+ * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <reset.h>
+#include <wait_bit.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+
+#define HSSPI_PP 0
+
+#define SPI_MAX_SYNC_CLOCK 30000000
+
+/* SPI Control register */
+#define SPI_CTL_REG 0x000
+#define SPI_CTL_CS_POL_SHIFT 0
+#define SPI_CTL_CS_POL_MASK (0xff << SPI_CTL_CS_POL_SHIFT)
+#define SPI_CTL_CLK_GATE_SHIFT 16
+#define SPI_CTL_CLK_GATE_MASK (1 << SPI_CTL_CLK_GATE_SHIFT)
+#define SPI_CTL_CLK_POL_SHIFT 17
+#define SPI_CTL_CLK_POL_MASK (1 << SPI_CTL_CLK_POL_SHIFT)
+
+/* SPI Interrupts registers */
+#define SPI_IR_STAT_REG 0x008
+#define SPI_IR_ST_MASK_REG 0x00c
+#define SPI_IR_MASK_REG 0x010
+
+#define SPI_IR_CLEAR_ALL 0xff001f1f
+
+/* SPI Ping-Pong Command registers */
+#define SPI_CMD_REG (0x080 + (0x40 * (HSSPI_PP)) + 0x00)
+#define SPI_CMD_OP_SHIFT 0
+#define SPI_CMD_OP_START (0x1 << SPI_CMD_OP_SHIFT)
+#define SPI_CMD_PFL_SHIFT 8
+#define SPI_CMD_PFL_MASK (0x7 << SPI_CMD_PFL_SHIFT)
+#define SPI_CMD_SLAVE_SHIFT 12
+#define SPI_CMD_SLAVE_MASK (0x7 << SPI_CMD_SLAVE_SHIFT)
+
+/* SPI Ping-Pong Status registers */
+#define SPI_STAT_REG (0x080 + (0x40 * (HSSPI_PP)) + 0x04)
+#define SPI_STAT_SRCBUSY_SHIFT 1
+#define SPI_STAT_SRCBUSY_MASK (1 << SPI_STAT_SRCBUSY_SHIFT)
+
+/* SPI Profile Clock registers */
+#define SPI_PFL_CLK_REG(x) (0x100 + (0x20 * (x)) + 0x00)
+#define SPI_PFL_CLK_FREQ_SHIFT 0
+#define SPI_PFL_CLK_FREQ_MASK (0x3fff << SPI_PFL_CLK_FREQ_SHIFT)
+#define SPI_PFL_CLK_RSTLOOP_SHIFT 15
+#define SPI_PFL_CLK_RSTLOOP_MASK (1 << SPI_PFL_CLK_RSTLOOP_SHIFT)
+
+/* SPI Profile Signal registers */
+#define SPI_PFL_SIG_REG(x) (0x100 + (0x20 * (x)) + 0x04)
+#define SPI_PFL_SIG_LATCHRIS_SHIFT 12
+#define SPI_PFL_SIG_LATCHRIS_MASK (1 << SPI_PFL_SIG_LATCHRIS_SHIFT)
+#define SPI_PFL_SIG_LAUNCHRIS_SHIFT 13
+#define SPI_PFL_SIG_LAUNCHRIS_MASK (1 << SPI_PFL_SIG_LAUNCHRIS_SHIFT)
+#define SPI_PFL_SIG_ASYNCIN_SHIFT 16
+#define SPI_PFL_SIG_ASYNCIN_MASK (1 << SPI_PFL_SIG_ASYNCIN_SHIFT)
+
+/* SPI Profile Mode registers */
+#define SPI_PFL_MODE_REG(x) (0x100 + (0x20 * (x)) + 0x08)
+#define SPI_PFL_MODE_FILL_SHIFT 0
+#define SPI_PFL_MODE_FILL_MASK (0xff << SPI_PFL_MODE_FILL_SHIFT)
+#define SPI_PFL_MODE_MDRDSZ_SHIFT 16
+#define SPI_PFL_MODE_MDRDSZ_MASK (1 << SPI_PFL_MODE_MDRDSZ_SHIFT)
+#define SPI_PFL_MODE_MDWRSZ_SHIFT 18
+#define SPI_PFL_MODE_MDWRSZ_MASK (1 << SPI_PFL_MODE_MDWRSZ_SHIFT)
+#define SPI_PFL_MODE_3WIRE_SHIFT 20
+#define SPI_PFL_MODE_3WIRE_MASK (1 << SPI_PFL_MODE_3WIRE_SHIFT)
+
+/* SPI Ping-Pong FIFO registers */
+#define HSSPI_FIFO_SIZE 0x200
+#define HSSPI_FIFO_BASE (0x200 + \
+ (HSSPI_FIFO_SIZE * HSSPI_PP))
+
+/* SPI Ping-Pong FIFO OP register */
+#define HSSPI_FIFO_OP_SIZE 0x2
+#define HSSPI_FIFO_OP_REG (HSSPI_FIFO_BASE + 0x00)
+#define HSSPI_FIFO_OP_BYTES_SHIFT 0
+#define HSSPI_FIFO_OP_BYTES_MASK (0x3ff << HSSPI_FIFO_OP_BYTES_SHIFT)
+#define HSSPI_FIFO_OP_MBIT_SHIFT 11
+#define HSSPI_FIFO_OP_MBIT_MASK (1 << HSSPI_FIFO_OP_MBIT_SHIFT)
+#define HSSPI_FIFO_OP_CODE_SHIFT 13
+#define HSSPI_FIFO_OP_READ_WRITE (1 << HSSPI_FIFO_OP_CODE_SHIFT)
+#define HSSPI_FIFO_OP_CODE_W (2 << HSSPI_FIFO_OP_CODE_SHIFT)
+#define HSSPI_FIFO_OP_CODE_R (3 << HSSPI_FIFO_OP_CODE_SHIFT)
+
+struct bcm63xx_hsspi_priv {
+ void __iomem *regs;
+ ulong clk_rate;
+ uint8_t num_cs;
+ uint8_t cs_pols;
+ uint speed;
+};
+
+static int bcm63xx_hsspi_cs_info(struct udevice *bus, uint cs,
+ struct spi_cs_info *info)
+{
+ struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus);
+
+ if (cs >= priv->num_cs) {
+ printf("no cs %u\n", cs);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus);
+
+ /* clock polarity */
+ if (mode & SPI_CPOL)
+ setbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_POL_MASK);
+ else
+ clrbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_POL_MASK);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_set_speed(struct udevice *bus, uint speed)
+{
+ struct bcm63xx_hsspi_priv *priv = dev_get_priv(bus);
+
+ priv->speed = speed;
+
+ return 0;
+}
+
+static void bcm63xx_hsspi_activate_cs(struct bcm63xx_hsspi_priv *priv,
+ struct dm_spi_slave_plat *plat)
+{
+ uint32_t clr, set;
+
+ /* profile clock */
+ set = DIV_ROUND_UP(priv->clk_rate, priv->speed);
+ set = DIV_ROUND_UP(2048, set);
+ set &= SPI_PFL_CLK_FREQ_MASK;
+ set |= SPI_PFL_CLK_RSTLOOP_MASK;
+ writel(set, priv->regs + SPI_PFL_CLK_REG(plat->cs));
+
+ /* profile signal */
+ set = 0;
+ clr = SPI_PFL_SIG_LAUNCHRIS_MASK |
+ SPI_PFL_SIG_LATCHRIS_MASK |
+ SPI_PFL_SIG_ASYNCIN_MASK;
+
+ /* latch/launch config */
+ if (plat->mode & SPI_CPHA)
+ set |= SPI_PFL_SIG_LAUNCHRIS_MASK;
+ else
+ set |= SPI_PFL_SIG_LATCHRIS_MASK;
+
+ /* async clk */
+ if (priv->speed > SPI_MAX_SYNC_CLOCK)
+ set |= SPI_PFL_SIG_ASYNCIN_MASK;
+
+ clrsetbits_32(priv->regs + SPI_PFL_SIG_REG(plat->cs), clr, set);
+
+ /* global control */
+ set = 0;
+ clr = 0;
+
+ /* invert cs polarity */
+ if (priv->cs_pols & BIT(plat->cs))
+ clr |= BIT(plat->cs);
+ else
+ set |= BIT(plat->cs);
+
+ /* invert dummy cs polarity */
+ if (priv->cs_pols & BIT(!plat->cs))
+ clr |= BIT(!plat->cs);
+ else
+ set |= BIT(!plat->cs);
+
+ clrsetbits_32(priv->regs + SPI_CTL_REG, clr, set);
+}
+
+static void bcm63xx_hsspi_deactivate_cs(struct bcm63xx_hsspi_priv *priv)
+{
+ /* restore cs polarities */
+ clrsetbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CS_POL_MASK,
+ priv->cs_pols);
+}
+
+/*
+ * BCM63xx HSSPI driver doesn't allow keeping CS active between transfers
+ * because they are controlled by HW.
+ * However, it provides a mechanism to prepend write transfers prior to read
+ * transfers (with a maximum prepend of 15 bytes), which is usually enough for
+ * SPI-connected flashes since reading requires prepending a write transfer of
+ * 5 bytes. On the other hand it also provides a way to invert each CS
+ * polarity, not only between transfers like the older BCM63xx SPI driver, but
+ * also the rest of the time.
+ *
+ * Instead of using the prepend mechanism, this implementation inverts the
+ * polarity of both the desired CS and another dummy CS when the bus is
+ * claimed. This way, the dummy CS is restored to its inactive value when
+ * transfers are issued and the desired CS is preserved in its active value
+ * all the time. This hack is also used in the upstream linux driver and
+ * allows keeping CS active between trasnfers even if the HW doesn't give
+ * this possibility.
+ */
+static int bcm63xx_hsspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent);
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+ size_t data_bytes = bitlen / 8;
+ size_t step_size = HSSPI_FIFO_SIZE;
+ uint16_t opcode = 0;
+ uint32_t val;
+ const uint8_t *tx = dout;
+ uint8_t *rx = din;
+
+ if (flags & SPI_XFER_BEGIN)
+ bcm63xx_hsspi_activate_cs(priv, plat);
+
+ /* fifo operation */
+ if (tx && rx)
+ opcode = HSSPI_FIFO_OP_READ_WRITE;
+ else if (rx)
+ opcode = HSSPI_FIFO_OP_CODE_R;
+ else if (tx)
+ opcode = HSSPI_FIFO_OP_CODE_W;
+
+ if (opcode != HSSPI_FIFO_OP_CODE_R)
+ step_size -= HSSPI_FIFO_OP_SIZE;
+
+ /* dual mode */
+ if ((opcode == HSSPI_FIFO_OP_CODE_R && plat->mode == SPI_RX_DUAL) ||
+ (opcode == HSSPI_FIFO_OP_CODE_W && plat->mode == SPI_TX_DUAL))
+ opcode |= HSSPI_FIFO_OP_MBIT_MASK;
+
+ /* profile mode */
+ val = SPI_PFL_MODE_FILL_MASK |
+ SPI_PFL_MODE_MDRDSZ_MASK |
+ SPI_PFL_MODE_MDWRSZ_MASK;
+ if (plat->mode & SPI_3WIRE)
+ val |= SPI_PFL_MODE_3WIRE_MASK;
+ writel(val, priv->regs + SPI_PFL_MODE_REG(plat->cs));
+
+ /* transfer loop */
+ while (data_bytes > 0) {
+ size_t curr_step = min(step_size, data_bytes);
+ int ret;
+
+ /* copy tx data */
+ if (tx) {
+ memcpy_toio(priv->regs + HSSPI_FIFO_BASE +
+ HSSPI_FIFO_OP_SIZE, tx, curr_step);
+ tx += curr_step;
+ }
+
+ /* set fifo operation */
+ writew(cpu_to_be16(opcode | (curr_step & HSSPI_FIFO_OP_BYTES_MASK)),
+ priv->regs + HSSPI_FIFO_OP_REG);
+
+ /* issue the transfer */
+ val = SPI_CMD_OP_START;
+ val |= (plat->cs << SPI_CMD_PFL_SHIFT) &
+ SPI_CMD_PFL_MASK;
+ val |= (!plat->cs << SPI_CMD_SLAVE_SHIFT) &
+ SPI_CMD_SLAVE_MASK;
+ writel(val, priv->regs + SPI_CMD_REG);
+
+ /* wait for completion */
+ ret = wait_for_bit_32(priv->regs + SPI_STAT_REG,
+ SPI_STAT_SRCBUSY_MASK, false,
+ 1000, false);
+ if (ret) {
+ printf("interrupt timeout\n");
+ return ret;
+ }
+
+ /* copy rx data */
+ if (rx) {
+ memcpy_fromio(rx, priv->regs + HSSPI_FIFO_BASE,
+ curr_step);
+ rx += curr_step;
+ }
+
+ data_bytes -= curr_step;
+ }
+
+ if (flags & SPI_XFER_END)
+ bcm63xx_hsspi_deactivate_cs(priv);
+
+ return 0;
+}
+
+static const struct dm_spi_ops bcm63xx_hsspi_ops = {
+ .cs_info = bcm63xx_hsspi_cs_info,
+ .set_mode = bcm63xx_hsspi_set_mode,
+ .set_speed = bcm63xx_hsspi_set_speed,
+ .xfer = bcm63xx_hsspi_xfer,
+};
+
+static const struct udevice_id bcm63xx_hsspi_ids[] = {
+ { .compatible = "brcm,bcm6328-hsspi", },
+ { /* sentinel */ }
+};
+
+static int bcm63xx_hsspi_child_pre_probe(struct udevice *dev)
+{
+ struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev->parent);
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+
+ /* check cs */
+ if (plat->cs >= priv->num_cs) {
+ printf("no cs %u\n", plat->cs);
+ return -ENODEV;
+ }
+
+ /* cs polarity */
+ if (plat->mode & SPI_CS_HIGH)
+ priv->cs_pols |= BIT(plat->cs);
+ else
+ priv->cs_pols &= ~BIT(plat->cs);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_probe(struct udevice *dev)
+{
+ struct bcm63xx_hsspi_priv *priv = dev_get_priv(dev);
+ struct reset_ctl rst_ctl;
+ struct clk clk;
+ int ret;
+
+ priv->regs = dev_remap_addr(dev);
+ if (!priv->regs)
+ return -EINVAL;
+
+ priv->num_cs = dev_read_u32_default(dev, "num-cs", 8);
+
+ /* enable clock */
+ ret = clk_get_by_name(dev, "hsspi", &clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(&clk);
+ if (ret < 0 && ret != -ENOSYS)
+ return ret;
+
+ ret = clk_free(&clk);
+ if (ret < 0 && ret != -ENOSYS)
+ return ret;
+
+ /* get clock rate */
+ ret = clk_get_by_name(dev, "pll", &clk);
+ if (ret < 0 && ret != -ENOSYS)
+ return ret;
+
+ priv->clk_rate = clk_get_rate(&clk);
+
+ ret = clk_free(&clk);
+ if (ret < 0 && ret != -ENOSYS)
+ return ret;
+
+ /* perform reset */
+ ret = reset_get_by_index(dev, 0, &rst_ctl);
+ if (ret >= 0) {
+ ret = reset_deassert(&rst_ctl);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = reset_free(&rst_ctl);
+ if (ret < 0)
+ return ret;
+
+ /* initialize hardware */
+ writel(0, priv->regs + SPI_IR_MASK_REG);
+
+ /* clear pending interrupts */
+ writel(SPI_IR_CLEAR_ALL, priv->regs + SPI_IR_STAT_REG);
+
+ /* enable clk gate */
+ setbits_32(priv->regs + SPI_CTL_REG, SPI_CTL_CLK_GATE_MASK);
+
+ /* read default cs polarities */
+ priv->cs_pols = readl(priv->regs + SPI_CTL_REG) &
+ SPI_CTL_CS_POL_MASK;
+
+ return 0;
+}
+
+U_BOOT_DRIVER(bcm63xx_hsspi) = {
+ .name = "bcm63xx_hsspi",
+ .id = UCLASS_SPI,
+ .of_match = bcm63xx_hsspi_ids,
+ .ops = &bcm63xx_hsspi_ops,
+ .priv_auto = sizeof(struct bcm63xx_hsspi_priv),
+ .child_pre_probe = bcm63xx_hsspi_child_pre_probe,
+ .probe = bcm63xx_hsspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/bcm63xx_spi.c b/roms/u-boot/drivers/spi/bcm63xx_spi.c
new file mode 100644
index 000000000..dd5e62b2f
--- /dev/null
+++ b/roms/u-boot/drivers/spi/bcm63xx_spi.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Álvaro Fernández Rojas <noltari@gmail.com>
+ *
+ * Derived from linux/drivers/spi/spi-bcm63xx.c:
+ * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <reset.h>
+#include <wait_bit.h>
+#include <asm/io.h>
+
+/* BCM6348 SPI core */
+#define SPI_6348_CLK 0x06
+#define SPI_6348_CMD 0x00
+#define SPI_6348_CTL 0x40
+#define SPI_6348_CTL_SHIFT 6
+#define SPI_6348_FILL 0x07
+#define SPI_6348_IR_MASK 0x04
+#define SPI_6348_IR_STAT 0x02
+#define SPI_6348_RX 0x80
+#define SPI_6348_RX_SIZE 0x3f
+#define SPI_6348_TX 0x41
+#define SPI_6348_TX_SIZE 0x3f
+
+/* BCM6358 SPI core */
+#define SPI_6358_CLK 0x706
+#define SPI_6358_CMD 0x700
+#define SPI_6358_CTL 0x000
+#define SPI_6358_CTL_SHIFT 14
+#define SPI_6358_FILL 0x707
+#define SPI_6358_IR_MASK 0x702
+#define SPI_6358_IR_STAT 0x704
+#define SPI_6358_RX 0x400
+#define SPI_6358_RX_SIZE 0x220
+#define SPI_6358_TX 0x002
+#define SPI_6358_TX_SIZE 0x21e
+
+/* SPI Clock register */
+#define SPI_CLK_SHIFT 0
+#define SPI_CLK_20MHZ (0 << SPI_CLK_SHIFT)
+#define SPI_CLK_0_391MHZ (1 << SPI_CLK_SHIFT)
+#define SPI_CLK_0_781MHZ (2 << SPI_CLK_SHIFT)
+#define SPI_CLK_1_563MHZ (3 << SPI_CLK_SHIFT)
+#define SPI_CLK_3_125MHZ (4 << SPI_CLK_SHIFT)
+#define SPI_CLK_6_250MHZ (5 << SPI_CLK_SHIFT)
+#define SPI_CLK_12_50MHZ (6 << SPI_CLK_SHIFT)
+#define SPI_CLK_25MHZ (7 << SPI_CLK_SHIFT)
+#define SPI_CLK_MASK (7 << SPI_CLK_SHIFT)
+#define SPI_CLK_SSOFF_SHIFT 3
+#define SPI_CLK_SSOFF_2 (2 << SPI_CLK_SSOFF_SHIFT)
+#define SPI_CLK_SSOFF_MASK (7 << SPI_CLK_SSOFF_SHIFT)
+#define SPI_CLK_BSWAP_SHIFT 7
+#define SPI_CLK_BSWAP_MASK (1 << SPI_CLK_BSWAP_SHIFT)
+
+/* SPI Command register */
+#define SPI_CMD_OP_SHIFT 0
+#define SPI_CMD_OP_START (0x3 << SPI_CMD_OP_SHIFT)
+#define SPI_CMD_SLAVE_SHIFT 4
+#define SPI_CMD_SLAVE_MASK (0xf << SPI_CMD_SLAVE_SHIFT)
+#define SPI_CMD_PREPEND_SHIFT 8
+#define SPI_CMD_PREPEND_BYTES 0xf
+#define SPI_CMD_3WIRE_SHIFT 12
+#define SPI_CMD_3WIRE_MASK (1 << SPI_CMD_3WIRE_SHIFT)
+
+/* SPI Control register */
+#define SPI_CTL_TYPE_FD_RW 0
+#define SPI_CTL_TYPE_HD_W 1
+#define SPI_CTL_TYPE_HD_R 2
+
+/* SPI Interrupt registers */
+#define SPI_IR_DONE_SHIFT 0
+#define SPI_IR_DONE_MASK (1 << SPI_IR_DONE_SHIFT)
+#define SPI_IR_RXOVER_SHIFT 1
+#define SPI_IR_RXOVER_MASK (1 << SPI_IR_RXOVER_SHIFT)
+#define SPI_IR_TXUNDER_SHIFT 2
+#define SPI_IR_TXUNDER_MASK (1 << SPI_IR_TXUNDER_SHIFT)
+#define SPI_IR_TXOVER_SHIFT 3
+#define SPI_IR_TXOVER_MASK (1 << SPI_IR_TXOVER_SHIFT)
+#define SPI_IR_RXUNDER_SHIFT 4
+#define SPI_IR_RXUNDER_MASK (1 << SPI_IR_RXUNDER_SHIFT)
+#define SPI_IR_CLEAR_MASK (SPI_IR_DONE_MASK |\
+ SPI_IR_RXOVER_MASK |\
+ SPI_IR_TXUNDER_MASK |\
+ SPI_IR_TXOVER_MASK |\
+ SPI_IR_RXUNDER_MASK)
+
+enum bcm63xx_regs_spi {
+ SPI_CLK,
+ SPI_CMD,
+ SPI_CTL,
+ SPI_CTL_SHIFT,
+ SPI_FILL,
+ SPI_IR_MASK,
+ SPI_IR_STAT,
+ SPI_RX,
+ SPI_RX_SIZE,
+ SPI_TX,
+ SPI_TX_SIZE,
+};
+
+struct bcm63xx_spi_priv {
+ const unsigned long *regs;
+ void __iomem *base;
+ size_t tx_bytes;
+ uint8_t num_cs;
+};
+
+#define SPI_CLK_CNT 8
+static const unsigned bcm63xx_spi_freq_table[SPI_CLK_CNT][2] = {
+ { 25000000, SPI_CLK_25MHZ },
+ { 20000000, SPI_CLK_20MHZ },
+ { 12500000, SPI_CLK_12_50MHZ },
+ { 6250000, SPI_CLK_6_250MHZ },
+ { 3125000, SPI_CLK_3_125MHZ },
+ { 1563000, SPI_CLK_1_563MHZ },
+ { 781000, SPI_CLK_0_781MHZ },
+ { 391000, SPI_CLK_0_391MHZ }
+};
+
+static int bcm63xx_spi_cs_info(struct udevice *bus, uint cs,
+ struct spi_cs_info *info)
+{
+ struct bcm63xx_spi_priv *priv = dev_get_priv(bus);
+
+ if (cs >= priv->num_cs) {
+ printf("no cs %u\n", cs);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct bcm63xx_spi_priv *priv = dev_get_priv(bus);
+ const unsigned long *regs = priv->regs;
+
+ if (mode & SPI_LSB_FIRST)
+ setbits_8(priv->base + regs[SPI_CLK], SPI_CLK_BSWAP_MASK);
+ else
+ clrbits_8(priv->base + regs[SPI_CLK], SPI_CLK_BSWAP_MASK);
+
+ return 0;
+}
+
+static int bcm63xx_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct bcm63xx_spi_priv *priv = dev_get_priv(bus);
+ const unsigned long *regs = priv->regs;
+ uint8_t clk_cfg;
+ int i;
+
+ /* default to lowest clock configuration */
+ clk_cfg = SPI_CLK_0_391MHZ;
+
+ /* find the closest clock configuration */
+ for (i = 0; i < SPI_CLK_CNT; i++) {
+ if (speed >= bcm63xx_spi_freq_table[i][0]) {
+ clk_cfg = bcm63xx_spi_freq_table[i][1];
+ break;
+ }
+ }
+
+ /* write clock configuration */
+ clrsetbits_8(priv->base + regs[SPI_CLK],
+ SPI_CLK_SSOFF_MASK | SPI_CLK_MASK,
+ clk_cfg | SPI_CLK_SSOFF_2);
+
+ return 0;
+}
+
+/*
+ * BCM63xx SPI driver doesn't allow keeping CS active between transfers since
+ * they are HW controlled.
+ * However, it provides a mechanism to prepend write transfers prior to read
+ * transfers (with a maximum prepend of 15 bytes), which is usually enough for
+ * SPI-connected flashes since reading requires prepending a write transfer of
+ * 5 bytes.
+ *
+ * This implementation takes advantage of the prepend mechanism and combines
+ * multiple transfers into a single one where possible (single/multiple write
+ * transfer(s) followed by a final read/write transfer).
+ * However, it's not possible to buffer reads, which means that read transfers
+ * should always be done as the final ones.
+ * On the other hand, take into account that combining write transfers into
+ * a single one is just buffering and doesn't require prepend mechanism.
+ */
+static int bcm63xx_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct bcm63xx_spi_priv *priv = dev_get_priv(dev->parent);
+ const unsigned long *regs = priv->regs;
+ size_t data_bytes = bitlen / 8;
+
+ if (flags & SPI_XFER_BEGIN) {
+ /* clear prepends */
+ priv->tx_bytes = 0;
+
+ /* initialize hardware */
+ writeb_be(0, priv->base + regs[SPI_IR_MASK]);
+ }
+
+ if (din) {
+ /* buffering reads not possible since cs is hw controlled */
+ if (!(flags & SPI_XFER_END)) {
+ printf("unable to buffer reads\n");
+ return -EINVAL;
+ }
+
+ /* check rx size */
+ if (data_bytes > regs[SPI_RX_SIZE]) {
+ printf("max rx bytes exceeded\n");
+ return -EMSGSIZE;
+ }
+ }
+
+ if (dout) {
+ /* check tx size */
+ if (priv->tx_bytes + data_bytes > regs[SPI_TX_SIZE]) {
+ printf("max tx bytes exceeded\n");
+ return -EMSGSIZE;
+ }
+
+ /* copy tx data */
+ memcpy_toio(priv->base + regs[SPI_TX] + priv->tx_bytes,
+ dout, data_bytes);
+ priv->tx_bytes += data_bytes;
+ }
+
+ if (flags & SPI_XFER_END) {
+ struct dm_spi_slave_plat *plat =
+ dev_get_parent_plat(dev);
+ uint16_t val, cmd;
+ int ret;
+
+ /* determine control config */
+ if (dout && !din) {
+ /* buffered write transfers */
+ val = priv->tx_bytes;
+ val |= (SPI_CTL_TYPE_HD_W << regs[SPI_CTL_SHIFT]);
+ priv->tx_bytes = 0;
+ } else {
+ if (dout && din && (flags & SPI_XFER_ONCE)) {
+ /* full duplex read/write */
+ val = data_bytes;
+ val |= (SPI_CTL_TYPE_FD_RW <<
+ regs[SPI_CTL_SHIFT]);
+ priv->tx_bytes = 0;
+ } else {
+ /* prepended write transfer */
+ val = data_bytes;
+ val |= (SPI_CTL_TYPE_HD_R <<
+ regs[SPI_CTL_SHIFT]);
+ if (priv->tx_bytes > SPI_CMD_PREPEND_BYTES) {
+ printf("max prepend bytes exceeded\n");
+ return -EMSGSIZE;
+ }
+ }
+ }
+
+ if (regs[SPI_CTL_SHIFT] >= 8)
+ writew_be(val, priv->base + regs[SPI_CTL]);
+ else
+ writeb_be(val, priv->base + regs[SPI_CTL]);
+
+ /* clear interrupts */
+ writeb_be(SPI_IR_CLEAR_MASK, priv->base + regs[SPI_IR_STAT]);
+
+ /* issue the transfer */
+ cmd = SPI_CMD_OP_START;
+ cmd |= (plat->cs << SPI_CMD_SLAVE_SHIFT) & SPI_CMD_SLAVE_MASK;
+ cmd |= (priv->tx_bytes << SPI_CMD_PREPEND_SHIFT);
+ if (plat->mode & SPI_3WIRE)
+ cmd |= SPI_CMD_3WIRE_MASK;
+ writew_be(cmd, priv->base + regs[SPI_CMD]);
+
+ /* enable interrupts */
+ writeb_be(SPI_IR_DONE_MASK, priv->base + regs[SPI_IR_MASK]);
+
+ ret = wait_for_bit_8(priv->base + regs[SPI_IR_STAT],
+ SPI_IR_DONE_MASK, true, 1000, false);
+ if (ret) {
+ printf("interrupt timeout\n");
+ return ret;
+ }
+
+ /* copy rx data */
+ if (din)
+ memcpy_fromio(din, priv->base + regs[SPI_RX],
+ data_bytes);
+ }
+
+ return 0;
+}
+
+static const struct dm_spi_ops bcm63xx_spi_ops = {
+ .cs_info = bcm63xx_spi_cs_info,
+ .set_mode = bcm63xx_spi_set_mode,
+ .set_speed = bcm63xx_spi_set_speed,
+ .xfer = bcm63xx_spi_xfer,
+};
+
+static const unsigned long bcm6348_spi_regs[] = {
+ [SPI_CLK] = SPI_6348_CLK,
+ [SPI_CMD] = SPI_6348_CMD,
+ [SPI_CTL] = SPI_6348_CTL,
+ [SPI_CTL_SHIFT] = SPI_6348_CTL_SHIFT,
+ [SPI_FILL] = SPI_6348_FILL,
+ [SPI_IR_MASK] = SPI_6348_IR_MASK,
+ [SPI_IR_STAT] = SPI_6348_IR_STAT,
+ [SPI_RX] = SPI_6348_RX,
+ [SPI_RX_SIZE] = SPI_6348_RX_SIZE,
+ [SPI_TX] = SPI_6348_TX,
+ [SPI_TX_SIZE] = SPI_6348_TX_SIZE,
+};
+
+static const unsigned long bcm6358_spi_regs[] = {
+ [SPI_CLK] = SPI_6358_CLK,
+ [SPI_CMD] = SPI_6358_CMD,
+ [SPI_CTL] = SPI_6358_CTL,
+ [SPI_CTL_SHIFT] = SPI_6358_CTL_SHIFT,
+ [SPI_FILL] = SPI_6358_FILL,
+ [SPI_IR_MASK] = SPI_6358_IR_MASK,
+ [SPI_IR_STAT] = SPI_6358_IR_STAT,
+ [SPI_RX] = SPI_6358_RX,
+ [SPI_RX_SIZE] = SPI_6358_RX_SIZE,
+ [SPI_TX] = SPI_6358_TX,
+ [SPI_TX_SIZE] = SPI_6358_TX_SIZE,
+};
+
+static const struct udevice_id bcm63xx_spi_ids[] = {
+ {
+ .compatible = "brcm,bcm6348-spi",
+ .data = (ulong)&bcm6348_spi_regs,
+ }, {
+ .compatible = "brcm,bcm6358-spi",
+ .data = (ulong)&bcm6358_spi_regs,
+ }, { /* sentinel */ }
+};
+
+static int bcm63xx_spi_child_pre_probe(struct udevice *dev)
+{
+ struct bcm63xx_spi_priv *priv = dev_get_priv(dev->parent);
+ const unsigned long *regs = priv->regs;
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+
+ /* check cs */
+ if (plat->cs >= priv->num_cs) {
+ printf("no cs %u\n", plat->cs);
+ return -ENODEV;
+ }
+
+ /* max read/write sizes */
+ slave->max_read_size = regs[SPI_RX_SIZE];
+ slave->max_write_size = regs[SPI_TX_SIZE];
+
+ return 0;
+}
+
+static int bcm63xx_spi_probe(struct udevice *dev)
+{
+ struct bcm63xx_spi_priv *priv = dev_get_priv(dev);
+ const unsigned long *regs =
+ (const unsigned long *)dev_get_driver_data(dev);
+ struct reset_ctl rst_ctl;
+ struct clk clk;
+ int ret;
+
+ priv->base = dev_remap_addr(dev);
+ if (!priv->base)
+ return -EINVAL;
+
+ priv->regs = regs;
+ priv->num_cs = dev_read_u32_default(dev, "num-cs", 8);
+
+ /* enable clock */
+ ret = clk_get_by_index(dev, 0, &clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(&clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_free(&clk);
+ if (ret < 0)
+ return ret;
+
+ /* perform reset */
+ ret = reset_get_by_index(dev, 0, &rst_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = reset_deassert(&rst_ctl);
+ if (ret < 0)
+ return ret;
+
+ ret = reset_free(&rst_ctl);
+ if (ret < 0)
+ return ret;
+
+ /* initialize hardware */
+ writeb_be(0, priv->base + regs[SPI_IR_MASK]);
+
+ /* set fill register */
+ writeb_be(0xff, priv->base + regs[SPI_FILL]);
+
+ return 0;
+}
+
+U_BOOT_DRIVER(bcm63xx_spi) = {
+ .name = "bcm63xx_spi",
+ .id = UCLASS_SPI,
+ .of_match = bcm63xx_spi_ids,
+ .ops = &bcm63xx_spi_ops,
+ .priv_auto = sizeof(struct bcm63xx_spi_priv),
+ .child_pre_probe = bcm63xx_spi_child_pre_probe,
+ .probe = bcm63xx_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/bcmstb_spi.c b/roms/u-boot/drivers/spi/bcmstb_spi.c
new file mode 100644
index 000000000..503c47a27
--- /dev/null
+++ b/roms/u-boot/drivers/spi/bcmstb_spi.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2018 Cisco Systems, Inc.
+ *
+ * Author: Thomas Fitzsimmons <fitzsim@fitzsim.org>
+ */
+
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <command.h>
+#include <config.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <time.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define SPBR_MIN 8
+#define BITS_PER_WORD 8
+
+#define NUM_TXRAM 32
+#define NUM_RXRAM 32
+#define NUM_CDRAM 16
+
+/* hif_mspi register structure. */
+struct bcmstb_hif_mspi_regs {
+ u32 spcr0_lsb; /* 0x000 */
+ u32 spcr0_msb; /* 0x004 */
+ u32 spcr1_lsb; /* 0x008 */
+ u32 spcr1_msb; /* 0x00c */
+ u32 newqp; /* 0x010 */
+ u32 endqp; /* 0x014 */
+ u32 spcr2; /* 0x018 */
+ u32 reserved0; /* 0x01c */
+ u32 mspi_status; /* 0x020 */
+ u32 cptqp; /* 0x024 */
+ u32 spcr3; /* 0x028 */
+ u32 revision; /* 0x02c */
+ u32 reserved1[4]; /* 0x030 */
+ u32 txram[NUM_TXRAM]; /* 0x040 */
+ u32 rxram[NUM_RXRAM]; /* 0x0c0 */
+ u32 cdram[NUM_CDRAM]; /* 0x140 */
+ u32 write_lock; /* 0x180 */
+};
+
+/* hif_mspi masks. */
+#define HIF_MSPI_SPCR2_CONT_AFTER_CMD_MASK 0x00000080
+#define HIF_MSPI_SPCR2_SPE_MASK 0x00000040
+#define HIF_MSPI_SPCR2_SPIFIE_MASK 0x00000020
+#define HIF_MSPI_WRITE_LOCK_WRITE_LOCK_MASK 0x00000001
+
+/* bspi offsets. */
+#define BSPI_MAST_N_BOOT_CTRL 0x008
+
+/* bspi_raf is not used in this driver. */
+
+/* hif_spi_intr2 offsets and masks. */
+#define HIF_SPI_INTR2_CPU_CLEAR 0x08
+#define HIF_SPI_INTR2_CPU_MASK_SET 0x10
+#define HIF_SPI_INTR2_CPU_MASK_CLEAR 0x14
+#define HIF_SPI_INTR2_CPU_SET_MSPI_DONE_MASK 0x00000020
+
+/* SPI transfer timeout in milliseconds. */
+#define HIF_MSPI_WAIT 10
+
+enum bcmstb_base_type {
+ HIF_MSPI,
+ BSPI,
+ HIF_SPI_INTR2,
+ CS_REG,
+ BASE_LAST,
+};
+
+struct bcmstb_spi_plat {
+ void *base[4];
+};
+
+struct bcmstb_spi_priv {
+ struct bcmstb_hif_mspi_regs *regs;
+ void *bspi;
+ void *hif_spi_intr2;
+ void *cs_reg;
+ int default_cs;
+ int curr_cs;
+ uint tx_slot;
+ uint rx_slot;
+ u8 saved_cmd[NUM_CDRAM];
+ uint saved_cmd_len;
+ void *saved_din_addr;
+};
+
+static int bcmstb_spi_of_to_plat(struct udevice *bus)
+{
+ struct bcmstb_spi_plat *plat = dev_get_plat(bus);
+ const void *fdt = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+ int ret = 0;
+ int i = 0;
+ struct fdt_resource resource = { 0 };
+ char *names[BASE_LAST] = { "hif_mspi", "bspi", "hif_spi_intr2",
+ "cs_reg" };
+ const phys_addr_t defaults[BASE_LAST] = { BCMSTB_HIF_MSPI_BASE,
+ BCMSTB_BSPI_BASE,
+ BCMSTB_HIF_SPI_INTR2,
+ BCMSTB_CS_REG };
+
+ for (i = 0; i < BASE_LAST; i++) {
+ plat->base[i] = (void *)defaults[i];
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ names[i], &resource);
+ if (ret) {
+ printf("%s: Assuming BCMSTB SPI %s address 0x0x%p\n",
+ __func__, names[i], (void *)defaults[i]);
+ } else {
+ plat->base[i] = (void *)resource.start;
+ debug("BCMSTB SPI %s address: 0x0x%p\n",
+ names[i], (void *)plat->base[i]);
+ }
+ }
+
+ return 0;
+}
+
+static void bcmstb_spi_hw_set_parms(struct bcmstb_spi_priv *priv)
+{
+ writel(SPBR_MIN, &priv->regs->spcr0_lsb);
+ writel(BITS_PER_WORD << 2 | SPI_MODE_3, &priv->regs->spcr0_msb);
+}
+
+static void bcmstb_spi_enable_interrupt(void *base, u32 mask)
+{
+ void *reg = base + HIF_SPI_INTR2_CPU_MASK_CLEAR;
+
+ writel(readl(reg) | mask, reg);
+ readl(reg);
+}
+
+static void bcmstb_spi_disable_interrupt(void *base, u32 mask)
+{
+ void *reg = base + HIF_SPI_INTR2_CPU_MASK_SET;
+
+ writel(readl(reg) | mask, reg);
+ readl(reg);
+}
+
+static void bcmstb_spi_clear_interrupt(void *base, u32 mask)
+{
+ void *reg = base + HIF_SPI_INTR2_CPU_CLEAR;
+
+ writel(readl(reg) | mask, reg);
+ readl(reg);
+}
+
+static int bcmstb_spi_probe(struct udevice *bus)
+{
+ struct bcmstb_spi_plat *plat = dev_get_plat(bus);
+ struct bcmstb_spi_priv *priv = dev_get_priv(bus);
+
+ priv->regs = plat->base[HIF_MSPI];
+ priv->bspi = plat->base[BSPI];
+ priv->hif_spi_intr2 = plat->base[HIF_SPI_INTR2];
+ priv->cs_reg = plat->base[CS_REG];
+ priv->default_cs = 0;
+ priv->curr_cs = -1;
+ priv->tx_slot = 0;
+ priv->rx_slot = 0;
+ memset(priv->saved_cmd, 0, NUM_CDRAM);
+ priv->saved_cmd_len = 0;
+ priv->saved_din_addr = NULL;
+
+ debug("spi_xfer: tx regs: 0x%p\n", &priv->regs->txram[0]);
+ debug("spi_xfer: rx regs: 0x%p\n", &priv->regs->rxram[0]);
+
+ /* Disable BSPI. */
+ writel(1, priv->bspi + BSPI_MAST_N_BOOT_CTRL);
+ readl(priv->bspi + BSPI_MAST_N_BOOT_CTRL);
+
+ /* Set up interrupts. */
+ bcmstb_spi_disable_interrupt(priv->hif_spi_intr2, 0xffffffff);
+ bcmstb_spi_clear_interrupt(priv->hif_spi_intr2, 0xffffffff);
+ bcmstb_spi_enable_interrupt(priv->hif_spi_intr2,
+ HIF_SPI_INTR2_CPU_SET_MSPI_DONE_MASK);
+
+ /* Set up control registers. */
+ writel(0, &priv->regs->spcr1_lsb);
+ writel(0, &priv->regs->spcr1_msb);
+ writel(0, &priv->regs->newqp);
+ writel(0, &priv->regs->endqp);
+ writel(HIF_MSPI_SPCR2_SPIFIE_MASK, &priv->regs->spcr2);
+ writel(0, &priv->regs->spcr3);
+
+ bcmstb_spi_hw_set_parms(priv);
+
+ return 0;
+}
+
+static void bcmstb_spi_submit(struct bcmstb_spi_priv *priv, bool done)
+{
+ debug("WR NEWQP: %d\n", 0);
+ writel(0, &priv->regs->newqp);
+
+ debug("WR ENDQP: %d\n", priv->tx_slot - 1);
+ writel(priv->tx_slot - 1, &priv->regs->endqp);
+
+ if (done) {
+ debug("WR CDRAM[%d]: %02x\n", priv->tx_slot - 1,
+ readl(&priv->regs->cdram[priv->tx_slot - 1]) & ~0x80);
+ writel(readl(&priv->regs->cdram[priv->tx_slot - 1]) & ~0x80,
+ &priv->regs->cdram[priv->tx_slot - 1]);
+ }
+
+ /* Force chip select first time. */
+ if (priv->curr_cs != priv->default_cs) {
+ debug("spi_xfer: switching chip select to %d\n",
+ priv->default_cs);
+ writel((readl(priv->cs_reg) & ~0xff) | (1 << priv->default_cs),
+ priv->cs_reg);
+ readl(priv->cs_reg);
+ udelay(10);
+ priv->curr_cs = priv->default_cs;
+ }
+
+ debug("WR WRITE_LOCK: %02x\n", 1);
+ writel((readl(&priv->regs->write_lock) &
+ ~HIF_MSPI_WRITE_LOCK_WRITE_LOCK_MASK) | 1,
+ &priv->regs->write_lock);
+ readl(&priv->regs->write_lock);
+
+ debug("WR SPCR2: %02x\n",
+ HIF_MSPI_SPCR2_SPIFIE_MASK |
+ HIF_MSPI_SPCR2_SPE_MASK |
+ HIF_MSPI_SPCR2_CONT_AFTER_CMD_MASK);
+ writel(HIF_MSPI_SPCR2_SPIFIE_MASK |
+ HIF_MSPI_SPCR2_SPE_MASK |
+ HIF_MSPI_SPCR2_CONT_AFTER_CMD_MASK,
+ &priv->regs->spcr2);
+}
+
+static int bcmstb_spi_wait(struct bcmstb_spi_priv *priv)
+{
+ u32 start_time = get_timer(0);
+ u32 status = readl(&priv->regs->mspi_status);
+
+ while (!(status & 1)) {
+ if (get_timer(start_time) > HIF_MSPI_WAIT)
+ return -ETIMEDOUT;
+ status = readl(&priv->regs->mspi_status);
+ }
+
+ writel(readl(&priv->regs->mspi_status) & ~1, &priv->regs->mspi_status);
+ bcmstb_spi_clear_interrupt(priv->hif_spi_intr2,
+ HIF_SPI_INTR2_CPU_SET_MSPI_DONE_MASK);
+
+ return 0;
+}
+
+static int bcmstb_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ uint len = bitlen / 8;
+ uint tx_len = len;
+ uint rx_len = len;
+ const u8 *out_bytes = (u8 *)dout;
+ u8 *in_bytes = (u8 *)din;
+ struct udevice *bus = dev_get_parent(dev);
+ struct bcmstb_spi_priv *priv = dev_get_priv(bus);
+ struct bcmstb_hif_mspi_regs *regs = priv->regs;
+
+ debug("spi_xfer: %d, t: 0x%p, r: 0x%p, f: %lx\n",
+ len, dout, din, flags);
+ debug("spi_xfer: chip select: %x\n", readl(priv->cs_reg) & 0xff);
+ debug("spi_xfer: tx addr: 0x%p\n", &regs->txram[0]);
+ debug("spi_xfer: rx addr: 0x%p\n", &regs->rxram[0]);
+ debug("spi_xfer: cd addr: 0x%p\n", &regs->cdram[0]);
+
+ if (flags & SPI_XFER_END) {
+ debug("spi_xfer: clearing saved din address: 0x%p\n",
+ priv->saved_din_addr);
+ priv->saved_din_addr = NULL;
+ priv->saved_cmd_len = 0;
+ memset(priv->saved_cmd, 0, NUM_CDRAM);
+ }
+
+ if (bitlen == 0)
+ return 0;
+
+ if (bitlen % 8) {
+ printf("%s: Non-byte-aligned transfer\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (flags & ~(SPI_XFER_BEGIN | SPI_XFER_END)) {
+ printf("%s: Unsupported flags: %lx\n", __func__, flags);
+ return -EOPNOTSUPP;
+ }
+
+ if (flags & SPI_XFER_BEGIN) {
+ priv->tx_slot = 0;
+ priv->rx_slot = 0;
+
+ if (out_bytes && len > NUM_CDRAM) {
+ printf("%s: Unable to save transfer\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (out_bytes && !(flags & SPI_XFER_END)) {
+ /*
+ * This is the start of a transmit operation
+ * that will need repeating if the calling
+ * code polls for the result. Save it for
+ * subsequent transmission.
+ */
+ debug("spi_xfer: saving command: %x, %d\n",
+ out_bytes[0], len);
+ priv->saved_cmd_len = len;
+ memcpy(priv->saved_cmd, out_bytes, priv->saved_cmd_len);
+ }
+ }
+
+ if (!(flags & (SPI_XFER_BEGIN | SPI_XFER_END))) {
+ if (priv->saved_din_addr == din) {
+ /*
+ * The caller is polling for status. Repeat
+ * the last transmission.
+ */
+ int ret = 0;
+
+ debug("spi_xfer: Making recursive call\n");
+ ret = bcmstb_spi_xfer(dev, priv->saved_cmd_len * 8,
+ priv->saved_cmd, NULL,
+ SPI_XFER_BEGIN);
+ if (ret) {
+ printf("%s: Recursive call failed\n", __func__);
+ return ret;
+ }
+ } else {
+ debug("spi_xfer: saving din address: 0x%p\n", din);
+ priv->saved_din_addr = din;
+ }
+ }
+
+ while (rx_len > 0) {
+ priv->rx_slot = priv->tx_slot;
+
+ while (priv->tx_slot < NUM_CDRAM && tx_len > 0) {
+ bcmstb_spi_hw_set_parms(priv);
+ debug("WR TXRAM[%d]: %02x\n", priv->tx_slot,
+ out_bytes ? out_bytes[len - tx_len] : 0xff);
+ writel(out_bytes ? out_bytes[len - tx_len] : 0xff,
+ &regs->txram[priv->tx_slot << 1]);
+ debug("WR CDRAM[%d]: %02x\n", priv->tx_slot, 0x8e);
+ writel(0x8e, &regs->cdram[priv->tx_slot]);
+ priv->tx_slot++;
+ tx_len--;
+ if (!in_bytes)
+ rx_len--;
+ }
+
+ debug("spi_xfer: early return clauses: %d, %d, %d\n",
+ len <= NUM_CDRAM,
+ !in_bytes,
+ (flags & (SPI_XFER_BEGIN |
+ SPI_XFER_END)) == SPI_XFER_BEGIN);
+ if (len <= NUM_CDRAM &&
+ !in_bytes &&
+ (flags & (SPI_XFER_BEGIN | SPI_XFER_END)) == SPI_XFER_BEGIN)
+ return 0;
+
+ bcmstb_spi_submit(priv, tx_len == 0);
+
+ if (bcmstb_spi_wait(priv) == -ETIMEDOUT) {
+ printf("%s: Timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ priv->tx_slot %= NUM_CDRAM;
+
+ if (in_bytes) {
+ while (priv->rx_slot < NUM_CDRAM && rx_len > 0) {
+ in_bytes[len - rx_len] =
+ readl(&regs->rxram[(priv->rx_slot << 1)
+ + 1])
+ & 0xff;
+ debug("RD RXRAM[%d]: %02x\n",
+ priv->rx_slot, in_bytes[len - rx_len]);
+ priv->rx_slot++;
+ rx_len--;
+ }
+ }
+ }
+
+ if (flags & SPI_XFER_END) {
+ debug("WR WRITE_LOCK: %02x\n", 0);
+ writel((readl(&priv->regs->write_lock) &
+ ~HIF_MSPI_WRITE_LOCK_WRITE_LOCK_MASK) | 0,
+ &priv->regs->write_lock);
+ readl(&priv->regs->write_lock);
+ }
+
+ return 0;
+}
+
+static int bcmstb_spi_set_speed(struct udevice *dev, uint speed)
+{
+ return 0;
+}
+
+static int bcmstb_spi_set_mode(struct udevice *dev, uint mode)
+{
+ return 0;
+}
+
+static const struct dm_spi_ops bcmstb_spi_ops = {
+ .xfer = bcmstb_spi_xfer,
+ .set_speed = bcmstb_spi_set_speed,
+ .set_mode = bcmstb_spi_set_mode,
+};
+
+static const struct udevice_id bcmstb_spi_id[] = {
+ { .compatible = "brcm,spi-brcmstb" },
+ { }
+};
+
+U_BOOT_DRIVER(bcmstb_spi) = {
+ .name = "bcmstb_spi",
+ .id = UCLASS_SPI,
+ .of_match = bcmstb_spi_id,
+ .ops = &bcmstb_spi_ops,
+ .of_to_plat = bcmstb_spi_of_to_plat,
+ .probe = bcmstb_spi_probe,
+ .plat_auto = sizeof(struct bcmstb_spi_plat),
+ .priv_auto = sizeof(struct bcmstb_spi_priv),
+};
diff --git a/roms/u-boot/drivers/spi/ca_sflash.c b/roms/u-boot/drivers/spi/ca_sflash.c
new file mode 100644
index 000000000..38bddd386
--- /dev/null
+++ b/roms/u-boot/drivers/spi/ca_sflash.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Cortina SPI-FLASH Controller
+ *
+ * Copyright (C) 2020 Cortina Access Inc. All Rights Reserved.
+ *
+ * Author: PengPeng Chen <pengpeng.chen@cortina-access.com>
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <clk.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <linux/compat.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/sizes.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <reset.h>
+#include <asm/global_data.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct ca_sflash_regs {
+ u32 idr; /* 0x00:Flash word ID Register */
+ u32 tc; /* 0x04:Flash Timeout Counter Register */
+ u32 sr; /* 0x08:Flash Status Register */
+ u32 tr; /* 0x0C:Flash Type Register */
+ u32 asr; /* 0x10:Flash ACCESS START/BUSY Register */
+ u32 isr; /* 0x14:Flash Interrupt Status Register */
+ u32 imr; /* 0x18:Flash Interrupt Mask Register */
+ u32 fcr; /* 0x1C:NAND Flash FIFO Control Register */
+ u32 ffsr; /* 0x20:Flash FIFO Status Register */
+ u32 ffar; /* 0x24:Flash FIFO ADDRESS Register */
+ u32 ffmar; /* 0x28:Flash FIFO MATCHING ADDRESS Register */
+ u32 ffdr; /* 0x2C:Flash FIFO Data Register */
+ u32 ar; /* 0x30:Serial Flash Access Register */
+ u32 ear; /* 0x34:Serial Flash Extend Access Register */
+ u32 adr; /* 0x38:Serial Flash ADdress Register */
+ u32 dr; /* 0x3C:Serial Flash Data Register */
+ u32 tmr; /* 0x40:Serial Flash Timing Register */
+};
+
+/*
+ * FLASH_TYPE
+ */
+#define CA_FLASH_TR_PIN BIT(15)
+#define CA_FLASH_TR_TYPE_MSK GENMASK(14, 12)
+#define CA_FLASH_TR_TYPE(tp) (((tp) << 12) & CA_FLASH_TR_TYPE_MSK)
+#define CA_FLASH_TR_WIDTH BIT(11)
+#define CA_FLASH_TR_SIZE_MSK GENMASK(10, 9)
+#define CA_FLASH_TR_SIZE(sz) (((sz) << 9) & CA_FLASH_TR_SIZE_MSK)
+
+/*
+ * FLASH_FLASH_ACCESS_START
+ */
+#define CA_FLASH_ASR_IND_START_EN BIT(1)
+#define CA_FLASH_ASR_DMA_START_EN BIT(3)
+#define CA_FLASH_ASR_WR_ACCESS_EN BIT(9)
+
+/*
+ * FLASH_FLASH_INTERRUPT
+ */
+#define CA_FLASH_ISR_REG_IRQ BIT(1)
+#define CA_FLASH_ISR_FIFO_IRQ BIT(2)
+
+/*
+ * FLASH_SF_ACCESS
+ */
+#define CA_SF_AR_OP_MSK GENMASK(7, 0)
+#define CA_SF_AR_OP(op) ((op) << 0 & CA_SF_AR_OP_MSK)
+#define CA_SF_AR_ACCODE_MSK GENMASK(11, 8)
+#define CA_SF_AR_ACCODE(ac) (((ac) << 8) & CA_SF_AR_ACCODE_MSK)
+#define CA_SF_AR_FORCE_TERM BIT(12)
+#define CA_SF_AR_FORCE_BURST BIT(13)
+#define CA_SF_AR_AUTO_MODE_EN BIT(15)
+#define CA_SF_AR_CHIP_EN_ALT BIT(16)
+#define CA_SF_AR_HI_SPEED_RD BIT(17)
+#define CA_SF_AR_MIO_INF_DC BIT(24)
+#define CA_SF_AR_MIO_INF_AC BIT(25)
+#define CA_SF_AR_MIO_INF_CC BIT(26)
+#define CA_SF_AR_DDR_MSK GENMASK(29, 28)
+#define CA_SF_AR_DDR(ddr) (((ddr) << 28) & CA_SF_AR_DDR_MSK)
+#define CA_SF_AR_MIO_INF_MSK GENMASK(31, 30)
+#define CA_SF_AR_MIO_INF(io) (((io) << 30) & CA_SF_AR_MIO_INF_MSK)
+
+/*
+ * FLASH_SF_EXT_ACCESS
+ */
+#define CA_SF_EAR_OP_MSK GENMASK(7, 0)
+#define CA_SF_EAR_OP(op) (((op) << 0) & CA_SF_EAR_OP_MSK)
+#define CA_SF_EAR_DATA_CNT_MSK GENMASK(20, 8)
+#define CA_SF_EAR_DATA_CNT(cnt) (((cnt) << 8) & CA_SF_EAR_DATA_CNT_MSK)
+#define CA_SF_EAR_DATA_CNT_MAX (4096)
+#define CA_SF_EAR_ADDR_CNT_MSK GENMASK(23, 21)
+#define CA_SF_EAR_ADDR_CNT(cnt) (((cnt) << 21) & CA_SF_EAR_ADDR_CNT_MSK)
+#define CA_SF_EAR_ADDR_CNT_MAX (5)
+#define CA_SF_EAR_DUMY_CNT_MSK GENMASK(29, 24)
+#define CA_SF_EAR_DUMY_CNT(cnt) (((cnt) << 24) & CA_SF_EAR_DUMY_CNT_MSK)
+#define CA_SF_EAR_DUMY_CNT_MAX (32)
+#define CA_SF_EAR_DRD_CMD_EN BIT(31)
+
+/*
+ * FLASH_SF_ADDRESS
+ */
+#define CA_SF_ADR_REG_MSK GENMASK(31, 0)
+#define CA_SF_ADR_REG(addr) (((addr) << 0) & CA_SF_ADR_REG_MSK)
+
+/*
+ * FLASH_SF_DATA
+ */
+#define CA_SF_DR_REG_MSK GENMASK(31, 0)
+#define CA_SF_DR_REG(addr) (((addr) << 0) & CA_SF_DR_REG_MSK)
+
+/*
+ * FLASH_SF_TIMING
+ */
+#define CA_SF_TMR_IDLE_MSK GENMASK(7, 0)
+#define CA_SF_TMR_IDLE(idle) (((idle) << 0) & CA_SF_TMR_IDLE_MSK)
+#define CA_SF_TMR_HOLD_MSK GENMASK(15, 8)
+#define CA_SF_TMR_HOLD(hold) (((hold) << 8) & CA_SF_TMR_HOLD_MSK)
+#define CA_SF_TMR_SETUP_MSK GENMASK(23, 16)
+#define CA_SF_TMR_SETUP(setup) (((setup) << 16) & CA_SF_TMR_SETUP_MSK)
+#define CA_SF_TMR_CLK_MSK GENMASK(26, 24)
+#define CA_SF_TMR_CLK(clk) (((clk) << 24) & CA_SF_TMR_CLK_MSK)
+
+#define CA_SFLASH_IND_WRITE 0
+#define CA_SFLASH_IND_READ 1
+#define CA_SFLASH_MEM_MAP 3
+#define CA_SFLASH_FIFO_TIMEOUT_US 30000
+#define CA_SFLASH_BUSY_TIMEOUT_US 40000
+
+#define CA_SF_AC_OP 0x00
+#define CA_SF_AC_OP_1_DATA 0x01
+#define CA_SF_AC_OP_2_DATA 0x02
+#define CA_SF_AC_OP_3_DATA 0x03
+#define CA_SF_AC_OP_4_DATA 0x04
+#define CA_SF_AC_OP_3_ADDR 0x05
+#define CA_SF_AC_OP_4_ADDR (CA_SF_AC_OP_3_ADDR)
+#define CA_SF_AC_OP_3_ADDR_1_DATA 0x06
+#define CA_SF_AC_OP_4_ADDR_1_DATA (CA_SF_AC_OP_3_ADDR_1_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_2_DATA 0x07
+#define CA_SF_AC_OP_4_ADDR_2_DATA (CA_SF_AC_OP_3_ADDR_2_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_3_DATA 0x08
+#define CA_SF_AC_OP_4_ADDR_3_DATA (CA_SF_AC_OP_3_ADDR_3_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_4_DATA 0x09
+#define CA_SF_AC_OP_4_ADDR_4_DATA (CA_SF_AC_OP_3_ADDR_4_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_X_1_DATA 0x0A
+#define CA_SF_AC_OP_4_ADDR_X_1_DATA (CA_SF_AC_OP_3_ADDR_X_1_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_X_2_DATA 0x0B
+#define CA_SF_AC_OP_4_ADDR_X_2_DATA (CA_SF_AC_OP_3_ADDR_X_2_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_X_3_DATA 0x0C
+#define CA_SF_AC_OP_4_ADDR_X_3_DATA (CA_SF_AC_OP_3_ADDR_X_3_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_X_4_DATA 0x0D
+#define CA_SF_AC_OP_4_ADDR_X_4_DATA (CA_SF_AC_OP_3_ADDR_X_4_DATA << 2)
+#define CA_SF_AC_OP_3_ADDR_4X_1_DATA 0x0E
+#define CA_SF_AC_OP_4_ADDR_4X_1_DATA (CA_SF_AC_OP_3_ADDR_4X_1_DATA << 2)
+#define CA_SF_AC_OP_EXTEND 0x0F
+
+#define CA_SF_ACCESS_MIO_SINGLE 0
+#define CA_SF_ACCESS_MIO_DUAL 1
+#define CA_SF_ACCESS_MIO_QUARD 2
+
+enum access_type {
+ RD_ACCESS,
+ WR_ACCESS,
+};
+
+struct ca_sflash_priv {
+ struct ca_sflash_regs *regs;
+ u8 rx_width;
+ u8 tx_width;
+};
+
+/*
+ * This function doesn't do anything except help with debugging
+ */
+static int ca_sflash_claim_bus(struct udevice *dev)
+{
+ debug("%s:\n", __func__);
+ return 0;
+}
+
+static int ca_sflash_release_bus(struct udevice *dev)
+{
+ debug("%s:\n", __func__);
+ return 0;
+}
+
+static int ca_sflash_set_speed(struct udevice *dev, uint speed)
+{
+ debug("%s:\n", __func__);
+ return 0;
+}
+
+static int ca_sflash_set_mode(struct udevice *dev, uint mode)
+{
+ struct ca_sflash_priv *priv = dev_get_priv(dev);
+
+ if (mode & SPI_RX_QUAD)
+ priv->rx_width = 4;
+ else if (mode & SPI_RX_DUAL)
+ priv->rx_width = 2;
+ else
+ priv->rx_width = 1;
+
+ if (mode & SPI_TX_QUAD)
+ priv->tx_width = 4;
+ else if (mode & SPI_TX_DUAL)
+ priv->tx_width = 2;
+ else
+ priv->tx_width = 1;
+
+ debug("%s: mode=%d, rx_width=%d, tx_width=%d\n",
+ __func__, mode, priv->rx_width, priv->tx_width);
+
+ return 0;
+}
+
+static int _ca_sflash_wait_for_not_busy(struct ca_sflash_priv *priv)
+{
+ u32 asr;
+
+ if (readl_poll_timeout(&priv->regs->asr, asr,
+ !(asr & CA_FLASH_ASR_IND_START_EN),
+ CA_SFLASH_BUSY_TIMEOUT_US)) {
+ pr_err("busy timeout (stat:%#x)\n", asr);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _ca_sflash_wait_cmd(struct ca_sflash_priv *priv,
+ enum access_type type)
+{
+ if (type == WR_ACCESS) {
+ /* Enable write access and start the sflash indirect access */
+ clrsetbits_le32(&priv->regs->asr, GENMASK(31, 0),
+ CA_FLASH_ASR_WR_ACCESS_EN
+ | CA_FLASH_ASR_IND_START_EN);
+ } else if (type == RD_ACCESS) {
+ /* Start the sflash indirect access */
+ clrsetbits_le32(&priv->regs->asr, GENMASK(31, 0),
+ CA_FLASH_ASR_IND_START_EN);
+ } else {
+ printf("%s: !error access type.\n", __func__);
+ return -1;
+ }
+
+ /* Wait til the action(rd/wr) completed */
+ return _ca_sflash_wait_for_not_busy(priv);
+}
+
+static int _ca_sflash_read(struct ca_sflash_priv *priv,
+ u8 *buf, unsigned int data_len)
+{
+ u32 reg_data;
+ int len;
+
+ len = data_len;
+ while (len >= 4) {
+ if (_ca_sflash_wait_cmd(priv, RD_ACCESS))
+ return -1;
+ reg_data = readl(&priv->regs->dr);
+ *buf++ = reg_data & 0xFF;
+ *buf++ = (reg_data >> 8) & 0xFF;
+ *buf++ = (reg_data >> 16) & 0xFF;
+ *buf++ = (reg_data >> 24) & 0xFF;
+ len -= 4;
+ debug("%s: reg_data=%#08x\n",
+ __func__, reg_data);
+ }
+
+ if (len > 0) {
+ if (_ca_sflash_wait_cmd(priv, RD_ACCESS))
+ return -1;
+ reg_data = readl(&priv->regs->dr);
+ debug("%s: reg_data=%#08x\n",
+ __func__, reg_data);
+ }
+
+ switch (len) {
+ case 3:
+ *buf++ = reg_data & 0xFF;
+ *buf++ = (reg_data >> 8) & 0xFF;
+ *buf++ = (reg_data >> 16) & 0xFF;
+ break;
+ case 2:
+ *buf++ = reg_data & 0xFF;
+ *buf++ = (reg_data >> 8) & 0xFF;
+ break;
+ case 1:
+ *buf++ = reg_data & 0xFF;
+ break;
+ case 0:
+ break;
+ default:
+ printf("%s: error data_length %d!\n", __func__, len);
+ }
+
+ return 0;
+}
+
+static int _ca_sflash_mio_set(struct ca_sflash_priv *priv,
+ u8 width)
+{
+ if (width == 4) {
+ setbits_le32(&priv->regs->ar,
+ CA_SF_AR_MIO_INF_DC
+ | CA_SF_AR_MIO_INF(CA_SF_ACCESS_MIO_QUARD)
+ | CA_SF_AR_FORCE_BURST);
+ } else if (width == 2) {
+ setbits_le32(&priv->regs->ar,
+ CA_SF_AR_MIO_INF_DC
+ | CA_SF_AR_MIO_INF(CA_SF_ACCESS_MIO_DUAL)
+ | CA_SF_AR_FORCE_BURST);
+ } else if (width == 1) {
+ setbits_le32(&priv->regs->ar,
+ CA_SF_AR_MIO_INF(CA_SF_ACCESS_MIO_SINGLE)
+ | CA_SF_AR_FORCE_BURST);
+ } else {
+ printf("%s: error rx/tx width %d!\n", __func__, width);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _ca_sflash_write(struct ca_sflash_priv *priv,
+ u8 *buf, unsigned int data_len)
+{
+ u32 reg_data;
+ int len;
+
+ len = data_len;
+ while (len > 0) {
+ reg_data = buf[0]
+ | (buf[1] << 8)
+ | (buf[2] << 16)
+ | (buf[3] << 24);
+
+ debug("%s: reg_data=%#08x\n",
+ __func__, reg_data);
+ /* Fill data */
+ clrsetbits_le32(&priv->regs->dr, GENMASK(31, 0), reg_data);
+
+ if (_ca_sflash_wait_cmd(priv, WR_ACCESS))
+ return -1;
+
+ len -= 4;
+ buf += 4;
+ }
+
+ return 0;
+}
+
+static int _ca_sflash_access_data(struct ca_sflash_priv *priv,
+ struct spi_mem_op *op)
+{
+ int total_cnt;
+ unsigned int len;
+ unsigned int data_cnt = op->data.nbytes;
+ u64 addr_offset = op->addr.val;
+ u8 addr_cnt = op->addr.nbytes;
+ u8 *data_buf = NULL;
+ u8 *buf = NULL;
+
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ data_buf = (u8 *)op->data.buf.in;
+ else
+ data_buf = (u8 *)op->data.buf.out;
+
+ if (data_cnt > CA_SF_EAR_DATA_CNT_MAX)
+ buf = malloc(CA_SF_EAR_DATA_CNT_MAX);
+ else
+ buf = malloc(data_cnt);
+
+ total_cnt = data_cnt;
+ while (total_cnt > 0) {
+ /* Fill address */
+ if (addr_cnt > 0)
+ clrsetbits_le32(&priv->regs->adr,
+ GENMASK(31, 0), (u32)addr_offset);
+
+ if (total_cnt > CA_SF_EAR_DATA_CNT_MAX) {
+ len = CA_SF_EAR_DATA_CNT_MAX;
+ addr_offset += CA_SF_EAR_DATA_CNT_MAX;
+ /* Clear start bit before next bulk read */
+ clrbits_le32(&priv->regs->asr, GENMASK(31, 0));
+ } else {
+ len = total_cnt;
+ }
+
+ memset(buf, 0, len);
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (_ca_sflash_read(priv, buf, len))
+ break;
+ memcpy(data_buf, buf, len);
+ } else {
+ memcpy(buf, data_buf, len);
+ if (_ca_sflash_write(priv, buf, len))
+ break;
+ }
+
+ total_cnt -= len;
+ data_buf += len;
+ }
+ if (buf)
+ free(buf);
+
+ return total_cnt > 0 ? -1 : 0;
+}
+
+static int _ca_sflash_issue_cmd(struct ca_sflash_priv *priv,
+ struct spi_mem_op *op, u8 opcode)
+{
+ u8 dummy_cnt = op->dummy.nbytes;
+ u8 addr_cnt = op->addr.nbytes;
+ u8 mio_width;
+ unsigned int data_cnt = op->data.nbytes;
+ u64 addr_offset = op->addr.val;
+
+ /* Set the access register */
+ clrsetbits_le32(&priv->regs->ar,
+ GENMASK(31, 0), CA_SF_AR_ACCODE(opcode));
+
+ if (opcode == CA_SF_AC_OP_EXTEND) { /* read_data, write_data */
+ if (data_cnt > 6) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ mio_width = priv->rx_width;
+ else
+ mio_width = priv->tx_width;
+ if (_ca_sflash_mio_set(priv, mio_width))
+ return -1;
+ }
+ debug("%s: FLASH ACCESS reg=%#08x\n",
+ __func__, readl(&priv->regs->ar));
+
+ /* Use command in extend_access register */
+ clrsetbits_le32(&priv->regs->ear,
+ GENMASK(31, 0), CA_SF_EAR_OP(op->cmd.opcode)
+ | CA_SF_EAR_DUMY_CNT(dummy_cnt * 8 - 1)
+ | CA_SF_EAR_ADDR_CNT(addr_cnt - 1)
+ | CA_SF_EAR_DATA_CNT(4 - 1)
+ | CA_SF_EAR_DRD_CMD_EN);
+ debug("%s: FLASH EXT ACCESS reg=%#08x\n",
+ __func__, readl(&priv->regs->ear));
+
+ if (_ca_sflash_access_data(priv, op))
+ return -1;
+ } else { /* reset_op, wr_enable, wr_disable */
+ setbits_le32(&priv->regs->ar,
+ CA_SF_AR_OP(op->cmd.opcode));
+ debug("%s: FLASH ACCESS reg=%#08x\n",
+ __func__, readl(&priv->regs->ar));
+
+ if (opcode == CA_SF_AC_OP_4_ADDR) { /* erase_op */
+ /* Configure address length */
+ if (addr_cnt > 3) /* 4 Bytes address */
+ setbits_le32(&priv->regs->tr,
+ CA_FLASH_TR_SIZE(2));
+ else /* 3 Bytes address */
+ clrbits_le32(&priv->regs->tr,
+ CA_FLASH_TR_SIZE_MSK);
+
+ /* Fill address */
+ if (addr_cnt > 0)
+ clrsetbits_le32(&priv->regs->adr,
+ GENMASK(31, 0),
+ (u32)addr_offset);
+ }
+
+ if (_ca_sflash_wait_cmd(priv, RD_ACCESS))
+ return -1;
+ }
+ /* elapse 10us before issuing any other command */
+ udelay(10);
+
+ return 0;
+}
+
+static int ca_sflash_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct ca_sflash_priv *priv = dev_get_priv(slave->dev->parent);
+ u8 opcode;
+
+ debug("%s: cmd:%#02x addr.val:%#llx addr.len:%#x data.len:%#x data.dir:%#x\n",
+ __func__, op->cmd.opcode, op->addr.val,
+ op->addr.nbytes, op->data.nbytes, op->data.dir);
+
+ if (op->data.nbytes == 0 && op->addr.nbytes == 0) {
+ opcode = CA_SF_AC_OP;
+ } else if (op->data.nbytes == 0 && op->addr.nbytes > 0) {
+ opcode = CA_SF_AC_OP_4_ADDR;
+ } else if (op->data.nbytes > 0) {
+ opcode = CA_SF_AC_OP_EXTEND;
+ } else {
+ printf("%s: can't support cmd.opcode:(%#02x) type currently!\n",
+ __func__, op->cmd.opcode);
+ return -1;
+ }
+
+ return _ca_sflash_issue_cmd(priv, (struct spi_mem_op *)op, opcode);
+}
+
+static void ca_sflash_init(struct ca_sflash_priv *priv)
+{
+ /* Set FLASH_TYPE as serial flash, value: 0x0400*/
+ clrsetbits_le32(&priv->regs->tr,
+ GENMASK(31, 0), CA_FLASH_TR_SIZE(2));
+ debug("%s: FLASH_TYPE reg=%#x\n",
+ __func__, readl(&priv->regs->tr));
+
+ /* Minimize flash timing, value: 0x07010101 */
+ clrsetbits_le32(&priv->regs->tmr,
+ GENMASK(31, 0),
+ CA_SF_TMR_CLK(0x07)
+ | CA_SF_TMR_SETUP(0x01)
+ | CA_SF_TMR_HOLD(0x01)
+ | CA_SF_TMR_IDLE(0x01));
+ debug("%s: FLASH_TIMING reg=%#x\n",
+ __func__, readl(&priv->regs->tmr));
+}
+
+static int ca_sflash_probe(struct udevice *dev)
+{
+ struct ca_sflash_priv *priv = dev_get_priv(dev);
+ struct resource res;
+ int ret;
+
+ /* Map the registers */
+ ret = dev_read_resource_byname(dev, "sflash-regs", &res);
+ if (ret) {
+ dev_err(dev, "can't get regs base addresses(ret = %d)!\n", ret);
+ return ret;
+ }
+ priv->regs = devm_ioremap(dev, res.start, resource_size(&res));
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ ca_sflash_init(priv);
+
+ printf("SFLASH: Controller probed ready\n");
+ return 0;
+}
+
+static const struct spi_controller_mem_ops ca_sflash_mem_ops = {
+ .exec_op = ca_sflash_exec_op,
+};
+
+static const struct dm_spi_ops ca_sflash_ops = {
+ .claim_bus = ca_sflash_claim_bus,
+ .release_bus = ca_sflash_release_bus,
+ .set_speed = ca_sflash_set_speed,
+ .set_mode = ca_sflash_set_mode,
+ .mem_ops = &ca_sflash_mem_ops,
+};
+
+static const struct udevice_id ca_sflash_ids[] = {
+ {.compatible = "cortina,ca-sflash"},
+ {}
+};
+
+U_BOOT_DRIVER(ca_sflash) = {
+ .name = "ca_sflash",
+ .id = UCLASS_SPI,
+ .of_match = ca_sflash_ids,
+ .ops = &ca_sflash_ops,
+ .priv_auto = sizeof(struct ca_sflash_priv),
+ .probe = ca_sflash_probe,
+};
diff --git a/roms/u-boot/drivers/spi/cadence_qspi.c b/roms/u-boot/drivers/spi/cadence_qspi.c
new file mode 100644
index 000000000..67980431b
--- /dev/null
+++ b/roms/u-boot/drivers/spi/cadence_qspi.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2012
+ * Altera Corporation <www.altera.com>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <log.h>
+#include <asm-generic/io.h>
+#include <dm.h>
+#include <fdtdec.h>
+#include <malloc.h>
+#include <reset.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <dm/device_compat.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/sizes.h>
+#include "cadence_qspi.h"
+
+#define CQSPI_STIG_READ 0
+#define CQSPI_STIG_WRITE 1
+#define CQSPI_READ 2
+#define CQSPI_WRITE 3
+
+static int cadence_spi_write_speed(struct udevice *bus, uint hz)
+{
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
+
+ cadence_qspi_apb_config_baudrate_div(priv->regbase,
+ plat->ref_clk_hz, hz);
+
+ /* Reconfigure delay timing if speed is changed. */
+ cadence_qspi_apb_delay(priv->regbase, plat->ref_clk_hz, hz,
+ plat->tshsl_ns, plat->tsd2d_ns,
+ plat->tchsh_ns, plat->tslch_ns);
+
+ return 0;
+}
+
+static int cadence_spi_read_id(void *reg_base, u8 len, u8 *idcode)
+{
+ struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
+ SPI_MEM_OP_NO_ADDR,
+ SPI_MEM_OP_NO_DUMMY,
+ SPI_MEM_OP_DATA_IN(len, idcode, 1));
+
+ return cadence_qspi_apb_command_read(reg_base, &op);
+}
+
+/* Calibration sequence to determine the read data capture delay register */
+static int spi_calibration(struct udevice *bus, uint hz)
+{
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
+ void *base = priv->regbase;
+ unsigned int idcode = 0, temp = 0;
+ int err = 0, i, range_lo = -1, range_hi = -1;
+
+ /* start with slowest clock (1 MHz) */
+ cadence_spi_write_speed(bus, 1000000);
+
+ /* configure the read data capture delay register to 0 */
+ cadence_qspi_apb_readdata_capture(base, 1, 0);
+
+ /* Enable QSPI */
+ cadence_qspi_apb_controller_enable(base);
+
+ /* read the ID which will be our golden value */
+ err = cadence_spi_read_id(base, 3, (u8 *)&idcode);
+ if (err) {
+ puts("SF: Calibration failed (read)\n");
+ return err;
+ }
+
+ /* use back the intended clock and find low range */
+ cadence_spi_write_speed(bus, hz);
+ for (i = 0; i < CQSPI_READ_CAPTURE_MAX_DELAY; i++) {
+ /* Disable QSPI */
+ cadence_qspi_apb_controller_disable(base);
+
+ /* reconfigure the read data capture delay register */
+ cadence_qspi_apb_readdata_capture(base, 1, i);
+
+ /* Enable back QSPI */
+ cadence_qspi_apb_controller_enable(base);
+
+ /* issue a RDID to get the ID value */
+ err = cadence_spi_read_id(base, 3, (u8 *)&temp);
+ if (err) {
+ puts("SF: Calibration failed (read)\n");
+ return err;
+ }
+
+ /* search for range lo */
+ if (range_lo == -1 && temp == idcode) {
+ range_lo = i;
+ continue;
+ }
+
+ /* search for range hi */
+ if (range_lo != -1 && temp != idcode) {
+ range_hi = i - 1;
+ break;
+ }
+ range_hi = i;
+ }
+
+ if (range_lo == -1) {
+ puts("SF: Calibration failed (low range)\n");
+ return err;
+ }
+
+ /* Disable QSPI for subsequent initialization */
+ cadence_qspi_apb_controller_disable(base);
+
+ /* configure the final value for read data capture delay register */
+ cadence_qspi_apb_readdata_capture(base, 1, (range_hi + range_lo) / 2);
+ debug("SF: Read data capture delay calibrated to %i (%i - %i)\n",
+ (range_hi + range_lo) / 2, range_lo, range_hi);
+
+ /* just to ensure we do once only when speed or chip select change */
+ priv->qspi_calibrated_hz = hz;
+ priv->qspi_calibrated_cs = spi_chip_select(bus);
+
+ return 0;
+}
+
+static int cadence_spi_set_speed(struct udevice *bus, uint hz)
+{
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
+ int err;
+
+ if (hz > plat->max_hz)
+ hz = plat->max_hz;
+
+ /* Disable QSPI */
+ cadence_qspi_apb_controller_disable(priv->regbase);
+
+ /*
+ * Calibration required for different current SCLK speed, requested
+ * SCLK speed or chip select
+ */
+ if (priv->previous_hz != hz ||
+ priv->qspi_calibrated_hz != hz ||
+ priv->qspi_calibrated_cs != spi_chip_select(bus)) {
+ err = spi_calibration(bus, hz);
+ if (err)
+ return err;
+
+ /* prevent calibration run when same as previous request */
+ priv->previous_hz = hz;
+ }
+
+ /* Enable QSPI */
+ cadence_qspi_apb_controller_enable(priv->regbase);
+
+ debug("%s: speed=%d\n", __func__, hz);
+
+ return 0;
+}
+
+static int cadence_spi_probe(struct udevice *bus)
+{
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
+ struct clk clk;
+ int ret;
+
+ priv->regbase = plat->regbase;
+ priv->ahbbase = plat->ahbbase;
+
+ if (plat->ref_clk_hz == 0) {
+ ret = clk_get_by_index(bus, 0, &clk);
+ if (ret) {
+#ifdef CONFIG_CQSPI_REF_CLK
+ plat->ref_clk_hz = CONFIG_CQSPI_REF_CLK;
+#else
+ return ret;
+#endif
+ } else {
+ plat->ref_clk_hz = clk_get_rate(&clk);
+ clk_free(&clk);
+ if (IS_ERR_VALUE(plat->ref_clk_hz))
+ return plat->ref_clk_hz;
+ }
+ }
+
+ ret = reset_get_bulk(bus, &priv->resets);
+ if (ret)
+ dev_warn(bus, "Can't get reset: %d\n", ret);
+ else
+ reset_deassert_bulk(&priv->resets);
+
+ if (!priv->qspi_is_init) {
+ cadence_qspi_apb_controller_init(plat);
+ priv->qspi_is_init = 1;
+ }
+
+ return 0;
+}
+
+static int cadence_spi_remove(struct udevice *dev)
+{
+ struct cadence_spi_priv *priv = dev_get_priv(dev);
+
+ return reset_release_bulk(&priv->resets);
+}
+
+static int cadence_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
+
+ /* Disable QSPI */
+ cadence_qspi_apb_controller_disable(priv->regbase);
+
+ /* Set SPI mode */
+ cadence_qspi_apb_set_clk_mode(priv->regbase, mode);
+
+ /* Enable Direct Access Controller */
+ if (plat->use_dac_mode)
+ cadence_qspi_apb_dac_mode_enable(priv->regbase);
+
+ /* Enable QSPI */
+ cadence_qspi_apb_controller_enable(priv->regbase);
+
+ return 0;
+}
+
+static int cadence_spi_mem_exec_op(struct spi_slave *spi,
+ const struct spi_mem_op *op)
+{
+ struct udevice *bus = spi->dev->parent;
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
+ struct cadence_spi_priv *priv = dev_get_priv(bus);
+ void *base = priv->regbase;
+ int err = 0;
+ u32 mode;
+
+ /* Set Chip select */
+ cadence_qspi_apb_chipselect(base, spi_chip_select(spi->dev),
+ plat->is_decoded_cs);
+
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
+ if (!op->addr.nbytes)
+ mode = CQSPI_STIG_READ;
+ else
+ mode = CQSPI_READ;
+ } else {
+ if (!op->addr.nbytes || !op->data.buf.out)
+ mode = CQSPI_STIG_WRITE;
+ else
+ mode = CQSPI_WRITE;
+ }
+
+ switch (mode) {
+ case CQSPI_STIG_READ:
+ err = cadence_qspi_apb_command_read(base, op);
+ break;
+ case CQSPI_STIG_WRITE:
+ err = cadence_qspi_apb_command_write(base, op);
+ break;
+ case CQSPI_READ:
+ err = cadence_qspi_apb_read_setup(plat, op);
+ if (!err)
+ err = cadence_qspi_apb_read_execute(plat, op);
+ break;
+ case CQSPI_WRITE:
+ err = cadence_qspi_apb_write_setup(plat, op);
+ if (!err)
+ err = cadence_qspi_apb_write_execute(plat, op);
+ break;
+ default:
+ err = -1;
+ break;
+ }
+
+ return err;
+}
+
+static int cadence_spi_of_to_plat(struct udevice *bus)
+{
+ struct cadence_spi_plat *plat = dev_get_plat(bus);
+ ofnode subnode;
+
+ plat->regbase = (void *)devfdt_get_addr_index(bus, 0);
+ plat->ahbbase = (void *)devfdt_get_addr_size_index(bus, 1,
+ &plat->ahbsize);
+ plat->is_decoded_cs = dev_read_bool(bus, "cdns,is-decoded-cs");
+ plat->fifo_depth = dev_read_u32_default(bus, "cdns,fifo-depth", 128);
+ plat->fifo_width = dev_read_u32_default(bus, "cdns,fifo-width", 4);
+ plat->trigger_address = dev_read_u32_default(bus,
+ "cdns,trigger-address",
+ 0);
+ /* Use DAC mode only when MMIO window is at least 8M wide */
+ if (plat->ahbsize >= SZ_8M)
+ plat->use_dac_mode = true;
+
+ /* All other paramters are embedded in the child node */
+ subnode = dev_read_first_subnode(bus);
+ if (!ofnode_valid(subnode)) {
+ printf("Error: subnode with SPI flash config missing!\n");
+ return -ENODEV;
+ }
+
+ /* Use 500 KHz as a suitable default */
+ plat->max_hz = ofnode_read_u32_default(subnode, "spi-max-frequency",
+ 500000);
+
+ /* Read other parameters from DT */
+ plat->page_size = ofnode_read_u32_default(subnode, "page-size", 256);
+ plat->block_size = ofnode_read_u32_default(subnode, "block-size", 16);
+ plat->tshsl_ns = ofnode_read_u32_default(subnode, "cdns,tshsl-ns",
+ 200);
+ plat->tsd2d_ns = ofnode_read_u32_default(subnode, "cdns,tsd2d-ns",
+ 255);
+ plat->tchsh_ns = ofnode_read_u32_default(subnode, "cdns,tchsh-ns", 20);
+ plat->tslch_ns = ofnode_read_u32_default(subnode, "cdns,tslch-ns", 20);
+
+ debug("%s: regbase=%p ahbbase=%p max-frequency=%d page-size=%d\n",
+ __func__, plat->regbase, plat->ahbbase, plat->max_hz,
+ plat->page_size);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops cadence_spi_mem_ops = {
+ .exec_op = cadence_spi_mem_exec_op,
+};
+
+static const struct dm_spi_ops cadence_spi_ops = {
+ .set_speed = cadence_spi_set_speed,
+ .set_mode = cadence_spi_set_mode,
+ .mem_ops = &cadence_spi_mem_ops,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id cadence_spi_ids[] = {
+ { .compatible = "cdns,qspi-nor" },
+ { .compatible = "ti,am654-ospi" },
+ { }
+};
+
+U_BOOT_DRIVER(cadence_spi) = {
+ .name = "cadence_spi",
+ .id = UCLASS_SPI,
+ .of_match = cadence_spi_ids,
+ .ops = &cadence_spi_ops,
+ .of_to_plat = cadence_spi_of_to_plat,
+ .plat_auto = sizeof(struct cadence_spi_plat),
+ .priv_auto = sizeof(struct cadence_spi_priv),
+ .probe = cadence_spi_probe,
+ .remove = cadence_spi_remove,
+ .flags = DM_FLAG_OS_PREPARE,
+};
diff --git a/roms/u-boot/drivers/spi/cadence_qspi.h b/roms/u-boot/drivers/spi/cadence_qspi.h
new file mode 100644
index 000000000..64c586760
--- /dev/null
+++ b/roms/u-boot/drivers/spi/cadence_qspi.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2012
+ * Altera Corporation <www.altera.com>
+ */
+
+#ifndef __CADENCE_QSPI_H__
+#define __CADENCE_QSPI_H__
+
+#include <reset.h>
+
+#define CQSPI_IS_ADDR(cmd_len) (cmd_len > 1 ? 1 : 0)
+
+#define CQSPI_NO_DECODER_MAX_CS 4
+#define CQSPI_DECODER_MAX_CS 16
+#define CQSPI_READ_CAPTURE_MAX_DELAY 16
+
+struct cadence_spi_plat {
+ unsigned int ref_clk_hz;
+ unsigned int max_hz;
+ void *regbase;
+ void *ahbbase;
+ bool is_decoded_cs;
+ u32 fifo_depth;
+ u32 fifo_width;
+ u32 trigger_address;
+ fdt_addr_t ahbsize;
+ bool use_dac_mode;
+
+ /* Flash parameters */
+ u32 page_size;
+ u32 block_size;
+ u32 tshsl_ns;
+ u32 tsd2d_ns;
+ u32 tchsh_ns;
+ u32 tslch_ns;
+};
+
+struct cadence_spi_priv {
+ void *regbase;
+ void *ahbbase;
+ size_t cmd_len;
+ u8 cmd_buf[32];
+ size_t data_len;
+
+ int qspi_is_init;
+ unsigned int qspi_calibrated_hz;
+ unsigned int qspi_calibrated_cs;
+ unsigned int previous_hz;
+
+ struct reset_ctl_bulk resets;
+};
+
+/* Functions call declaration */
+void cadence_qspi_apb_controller_init(struct cadence_spi_plat *plat);
+void cadence_qspi_apb_controller_enable(void *reg_base_addr);
+void cadence_qspi_apb_controller_disable(void *reg_base_addr);
+void cadence_qspi_apb_dac_mode_enable(void *reg_base);
+
+int cadence_qspi_apb_command_read(void *reg_base_addr,
+ const struct spi_mem_op *op);
+int cadence_qspi_apb_command_write(void *reg_base_addr,
+ const struct spi_mem_op *op);
+
+int cadence_qspi_apb_read_setup(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op);
+int cadence_qspi_apb_read_execute(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op);
+int cadence_qspi_apb_write_setup(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op);
+int cadence_qspi_apb_write_execute(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op);
+
+void cadence_qspi_apb_chipselect(void *reg_base,
+ unsigned int chip_select, unsigned int decoder_enable);
+void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode);
+void cadence_qspi_apb_config_baudrate_div(void *reg_base,
+ unsigned int ref_clk_hz, unsigned int sclk_hz);
+void cadence_qspi_apb_delay(void *reg_base,
+ unsigned int ref_clk, unsigned int sclk_hz,
+ unsigned int tshsl_ns, unsigned int tsd2d_ns,
+ unsigned int tchsh_ns, unsigned int tslch_ns);
+void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy);
+void cadence_qspi_apb_readdata_capture(void *reg_base,
+ unsigned int bypass, unsigned int delay);
+
+#endif /* __CADENCE_QSPI_H__ */
diff --git a/roms/u-boot/drivers/spi/cadence_qspi_apb.c b/roms/u-boot/drivers/spi/cadence_qspi_apb.c
new file mode 100644
index 000000000..b051f462e
--- /dev/null
+++ b/roms/u-boot/drivers/spi/cadence_qspi_apb.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright (C) 2012 Altera Corporation <www.altera.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * - Neither the name of the Altera Corporation nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL ALTERA CORPORATION BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <common.h>
+#include <log.h>
+#include <asm/io.h>
+#include <dma.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <wait_bit.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <malloc.h>
+#include "cadence_qspi.h"
+
+#define CQSPI_REG_POLL_US 1 /* 1us */
+#define CQSPI_REG_RETRY 10000
+#define CQSPI_POLL_IDLE_RETRY 3
+
+/* Transfer mode */
+#define CQSPI_INST_TYPE_SINGLE 0
+#define CQSPI_INST_TYPE_DUAL 1
+#define CQSPI_INST_TYPE_QUAD 2
+#define CQSPI_INST_TYPE_OCTAL 3
+
+#define CQSPI_STIG_DATA_LEN_MAX 8
+
+#define CQSPI_DUMMY_CLKS_PER_BYTE 8
+#define CQSPI_DUMMY_BYTES_MAX 4
+
+/****************************************************************************
+ * Controller's configuration and status register (offset from QSPI_BASE)
+ ****************************************************************************/
+#define CQSPI_REG_CONFIG 0x00
+#define CQSPI_REG_CONFIG_ENABLE BIT(0)
+#define CQSPI_REG_CONFIG_CLK_POL BIT(1)
+#define CQSPI_REG_CONFIG_CLK_PHA BIT(2)
+#define CQSPI_REG_CONFIG_DIRECT BIT(7)
+#define CQSPI_REG_CONFIG_DECODE BIT(9)
+#define CQSPI_REG_CONFIG_XIP_IMM BIT(18)
+#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
+#define CQSPI_REG_CONFIG_BAUD_LSB 19
+#define CQSPI_REG_CONFIG_IDLE_LSB 31
+#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
+#define CQSPI_REG_CONFIG_BAUD_MASK 0xF
+
+#define CQSPI_REG_RD_INSTR 0x04
+#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
+#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
+#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
+#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
+#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
+#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
+
+#define CQSPI_REG_WR_INSTR 0x08
+#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
+#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
+
+#define CQSPI_REG_DELAY 0x0C
+#define CQSPI_REG_DELAY_TSLCH_LSB 0
+#define CQSPI_REG_DELAY_TCHSH_LSB 8
+#define CQSPI_REG_DELAY_TSD2D_LSB 16
+#define CQSPI_REG_DELAY_TSHSL_LSB 24
+#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
+#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
+#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
+#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
+
+#define CQSPI_REG_RD_DATA_CAPTURE 0x10
+#define CQSPI_REG_RD_DATA_CAPTURE_BYPASS BIT(0)
+#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB 1
+#define CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK 0xF
+
+#define CQSPI_REG_SIZE 0x14
+#define CQSPI_REG_SIZE_ADDRESS_LSB 0
+#define CQSPI_REG_SIZE_PAGE_LSB 4
+#define CQSPI_REG_SIZE_BLOCK_LSB 16
+#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
+#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
+#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
+
+#define CQSPI_REG_SRAMPARTITION 0x18
+#define CQSPI_REG_INDIRECTTRIGGER 0x1C
+
+#define CQSPI_REG_REMAP 0x24
+#define CQSPI_REG_MODE_BIT 0x28
+
+#define CQSPI_REG_SDRAMLEVEL 0x2C
+#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
+#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
+#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
+#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
+
+#define CQSPI_REG_IRQSTATUS 0x40
+#define CQSPI_REG_IRQMASK 0x44
+
+#define CQSPI_REG_INDIRECTRD 0x60
+#define CQSPI_REG_INDIRECTRD_START BIT(0)
+#define CQSPI_REG_INDIRECTRD_CANCEL BIT(1)
+#define CQSPI_REG_INDIRECTRD_INPROGRESS BIT(2)
+#define CQSPI_REG_INDIRECTRD_DONE BIT(5)
+
+#define CQSPI_REG_INDIRECTRDWATERMARK 0x64
+#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
+#define CQSPI_REG_INDIRECTRDBYTES 0x6C
+
+#define CQSPI_REG_CMDCTRL 0x90
+#define CQSPI_REG_CMDCTRL_EXECUTE BIT(0)
+#define CQSPI_REG_CMDCTRL_INPROGRESS BIT(1)
+#define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
+#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
+#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
+#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
+#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
+#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
+#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
+#define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
+#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
+#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
+#define CQSPI_REG_CMDCTRL_OPCODE_MASK 0xFF
+
+#define CQSPI_REG_INDIRECTWR 0x70
+#define CQSPI_REG_INDIRECTWR_START BIT(0)
+#define CQSPI_REG_INDIRECTWR_CANCEL BIT(1)
+#define CQSPI_REG_INDIRECTWR_INPROGRESS BIT(2)
+#define CQSPI_REG_INDIRECTWR_DONE BIT(5)
+
+#define CQSPI_REG_INDIRECTWRWATERMARK 0x74
+#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
+#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+
+#define CQSPI_REG_CMDADDRESS 0x94
+#define CQSPI_REG_CMDREADDATALOWER 0xA0
+#define CQSPI_REG_CMDREADDATAUPPER 0xA4
+#define CQSPI_REG_CMDWRITEDATALOWER 0xA8
+#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
+
+#define CQSPI_REG_IS_IDLE(base) \
+ ((readl(base + CQSPI_REG_CONFIG) >> \
+ CQSPI_REG_CONFIG_IDLE_LSB) & 0x1)
+
+#define CQSPI_GET_RD_SRAM_LEVEL(reg_base) \
+ (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
+ CQSPI_REG_SDRAMLEVEL_RD_LSB) & CQSPI_REG_SDRAMLEVEL_RD_MASK)
+
+#define CQSPI_GET_WR_SRAM_LEVEL(reg_base) \
+ (((readl(reg_base + CQSPI_REG_SDRAMLEVEL)) >> \
+ CQSPI_REG_SDRAMLEVEL_WR_LSB) & CQSPI_REG_SDRAMLEVEL_WR_MASK)
+
+void cadence_qspi_apb_controller_enable(void *reg_base)
+{
+ unsigned int reg;
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENABLE;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+void cadence_qspi_apb_controller_disable(void *reg_base)
+{
+ unsigned int reg;
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_ENABLE;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+void cadence_qspi_apb_dac_mode_enable(void *reg_base)
+{
+ unsigned int reg;
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DIRECT;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+}
+
+/* Return 1 if idle, otherwise return 0 (busy). */
+static unsigned int cadence_qspi_wait_idle(void *reg_base)
+{
+ unsigned int start, count = 0;
+ /* timeout in unit of ms */
+ unsigned int timeout = 5000;
+
+ start = get_timer(0);
+ for ( ; get_timer(start) < timeout ; ) {
+ if (CQSPI_REG_IS_IDLE(reg_base))
+ count++;
+ else
+ count = 0;
+ /*
+ * Ensure the QSPI controller is in true idle state after
+ * reading back the same idle status consecutively
+ */
+ if (count >= CQSPI_POLL_IDLE_RETRY)
+ return 1;
+ }
+
+ /* Timeout, still in busy mode. */
+ printf("QSPI: QSPI is still busy after poll for %d times.\n",
+ CQSPI_REG_RETRY);
+ return 0;
+}
+
+void cadence_qspi_apb_readdata_capture(void *reg_base,
+ unsigned int bypass, unsigned int delay)
+{
+ unsigned int reg;
+ cadence_qspi_apb_controller_disable(reg_base);
+
+ reg = readl(reg_base + CQSPI_REG_RD_DATA_CAPTURE);
+
+ if (bypass)
+ reg |= CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
+ else
+ reg &= ~CQSPI_REG_RD_DATA_CAPTURE_BYPASS;
+
+ reg &= ~(CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK
+ << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB);
+
+ reg |= (delay & CQSPI_REG_RD_DATA_CAPTURE_DELAY_MASK)
+ << CQSPI_REG_RD_DATA_CAPTURE_DELAY_LSB;
+
+ writel(reg, reg_base + CQSPI_REG_RD_DATA_CAPTURE);
+
+ cadence_qspi_apb_controller_enable(reg_base);
+}
+
+void cadence_qspi_apb_config_baudrate_div(void *reg_base,
+ unsigned int ref_clk_hz, unsigned int sclk_hz)
+{
+ unsigned int reg;
+ unsigned int div;
+
+ cadence_qspi_apb_controller_disable(reg_base);
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
+
+ /*
+ * The baud_div field in the config reg is 4 bits, and the ref clock is
+ * divided by 2 * (baud_div + 1). Round up the divider to ensure the
+ * SPI clock rate is less than or equal to the requested clock rate.
+ */
+ div = DIV_ROUND_UP(ref_clk_hz, sclk_hz * 2) - 1;
+
+ /* ensure the baud rate doesn't exceed the max value */
+ if (div > CQSPI_REG_CONFIG_BAUD_MASK)
+ div = CQSPI_REG_CONFIG_BAUD_MASK;
+
+ debug("%s: ref_clk %dHz sclk %dHz Div 0x%x, actual %dHz\n", __func__,
+ ref_clk_hz, sclk_hz, div, ref_clk_hz / (2 * (div + 1)));
+
+ reg |= (div << CQSPI_REG_CONFIG_BAUD_LSB);
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ cadence_qspi_apb_controller_enable(reg_base);
+}
+
+void cadence_qspi_apb_set_clk_mode(void *reg_base, uint mode)
+{
+ unsigned int reg;
+
+ cadence_qspi_apb_controller_disable(reg_base);
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg &= ~(CQSPI_REG_CONFIG_CLK_POL | CQSPI_REG_CONFIG_CLK_PHA);
+
+ if (mode & SPI_CPOL)
+ reg |= CQSPI_REG_CONFIG_CLK_POL;
+ if (mode & SPI_CPHA)
+ reg |= CQSPI_REG_CONFIG_CLK_PHA;
+
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ cadence_qspi_apb_controller_enable(reg_base);
+}
+
+void cadence_qspi_apb_chipselect(void *reg_base,
+ unsigned int chip_select, unsigned int decoder_enable)
+{
+ unsigned int reg;
+
+ cadence_qspi_apb_controller_disable(reg_base);
+
+ debug("%s : chipselect %d decode %d\n", __func__, chip_select,
+ decoder_enable);
+
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ /* docoder */
+ if (decoder_enable) {
+ reg |= CQSPI_REG_CONFIG_DECODE;
+ } else {
+ reg &= ~CQSPI_REG_CONFIG_DECODE;
+ /* Convert CS if without decoder.
+ * CS0 to 4b'1110
+ * CS1 to 4b'1101
+ * CS2 to 4b'1011
+ * CS3 to 4b'0111
+ */
+ chip_select = 0xF & ~(1 << chip_select);
+ }
+
+ reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
+ reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
+ << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ cadence_qspi_apb_controller_enable(reg_base);
+}
+
+void cadence_qspi_apb_delay(void *reg_base,
+ unsigned int ref_clk, unsigned int sclk_hz,
+ unsigned int tshsl_ns, unsigned int tsd2d_ns,
+ unsigned int tchsh_ns, unsigned int tslch_ns)
+{
+ unsigned int ref_clk_ns;
+ unsigned int sclk_ns;
+ unsigned int tshsl, tchsh, tslch, tsd2d;
+ unsigned int reg;
+
+ cadence_qspi_apb_controller_disable(reg_base);
+
+ /* Convert to ns. */
+ ref_clk_ns = DIV_ROUND_UP(1000000000, ref_clk);
+
+ /* Convert to ns. */
+ sclk_ns = DIV_ROUND_UP(1000000000, sclk_hz);
+
+ /* The controller adds additional delay to that programmed in the reg */
+ if (tshsl_ns >= sclk_ns + ref_clk_ns)
+ tshsl_ns -= sclk_ns + ref_clk_ns;
+ if (tchsh_ns >= sclk_ns + 3 * ref_clk_ns)
+ tchsh_ns -= sclk_ns + 3 * ref_clk_ns;
+ tshsl = DIV_ROUND_UP(tshsl_ns, ref_clk_ns);
+ tchsh = DIV_ROUND_UP(tchsh_ns, ref_clk_ns);
+ tslch = DIV_ROUND_UP(tslch_ns, ref_clk_ns);
+ tsd2d = DIV_ROUND_UP(tsd2d_ns, ref_clk_ns);
+
+ reg = ((tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
+ << CQSPI_REG_DELAY_TSHSL_LSB);
+ reg |= ((tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
+ << CQSPI_REG_DELAY_TCHSH_LSB);
+ reg |= ((tslch & CQSPI_REG_DELAY_TSLCH_MASK)
+ << CQSPI_REG_DELAY_TSLCH_LSB);
+ reg |= ((tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
+ << CQSPI_REG_DELAY_TSD2D_LSB);
+ writel(reg, reg_base + CQSPI_REG_DELAY);
+
+ cadence_qspi_apb_controller_enable(reg_base);
+}
+
+void cadence_qspi_apb_controller_init(struct cadence_spi_plat *plat)
+{
+ unsigned reg;
+
+ cadence_qspi_apb_controller_disable(plat->regbase);
+
+ /* Configure the device size and address bytes */
+ reg = readl(plat->regbase + CQSPI_REG_SIZE);
+ /* Clear the previous value */
+ reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
+ reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
+ reg |= (plat->page_size << CQSPI_REG_SIZE_PAGE_LSB);
+ reg |= (plat->block_size << CQSPI_REG_SIZE_BLOCK_LSB);
+ writel(reg, plat->regbase + CQSPI_REG_SIZE);
+
+ /* Configure the remap address register, no remap */
+ writel(0, plat->regbase + CQSPI_REG_REMAP);
+
+ /* Indirect mode configurations */
+ writel(plat->fifo_depth / 2, plat->regbase + CQSPI_REG_SRAMPARTITION);
+
+ /* Disable all interrupts */
+ writel(0, plat->regbase + CQSPI_REG_IRQMASK);
+
+ cadence_qspi_apb_controller_enable(plat->regbase);
+}
+
+static int cadence_qspi_apb_exec_flash_cmd(void *reg_base,
+ unsigned int reg)
+{
+ unsigned int retry = CQSPI_REG_RETRY;
+
+ /* Write the CMDCTRL without start execution. */
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+ /* Start execute */
+ reg |= CQSPI_REG_CMDCTRL_EXECUTE;
+ writel(reg, reg_base + CQSPI_REG_CMDCTRL);
+
+ while (retry--) {
+ reg = readl(reg_base + CQSPI_REG_CMDCTRL);
+ if ((reg & CQSPI_REG_CMDCTRL_INPROGRESS) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (!retry) {
+ printf("QSPI: flash command execution timeout\n");
+ return -EIO;
+ }
+
+ /* Polling QSPI idle status. */
+ if (!cadence_qspi_wait_idle(reg_base))
+ return -EIO;
+
+ return 0;
+}
+
+/* For command RDID, RDSR. */
+int cadence_qspi_apb_command_read(void *reg_base, const struct spi_mem_op *op)
+{
+ unsigned int reg;
+ unsigned int read_len;
+ int status;
+ unsigned int rxlen = op->data.nbytes;
+ void *rxbuf = op->data.buf.in;
+
+ if (rxlen > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
+ printf("QSPI: Invalid input arguments rxlen %u\n", rxlen);
+ return -EINVAL;
+ }
+
+ reg = op->cmd.opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
+
+ /* 0 means 1 byte. */
+ reg |= (((rxlen - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+ status = cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
+ if (status != 0)
+ return status;
+
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
+
+ /* Put the read value into rx_buf */
+ read_len = (rxlen > 4) ? 4 : rxlen;
+ memcpy(rxbuf, &reg, read_len);
+ rxbuf += read_len;
+
+ if (rxlen > 4) {
+ reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
+
+ read_len = rxlen - read_len;
+ memcpy(rxbuf, &reg, read_len);
+ }
+ return 0;
+}
+
+/* For commands: WRSR, WREN, WRDI, CHIP_ERASE, BE, etc. */
+int cadence_qspi_apb_command_write(void *reg_base, const struct spi_mem_op *op)
+{
+ unsigned int reg = 0;
+ unsigned int wr_data;
+ unsigned int wr_len;
+ unsigned int txlen = op->data.nbytes;
+ const void *txbuf = op->data.buf.out;
+ u32 addr;
+
+ /* Reorder address to SPI bus order if only transferring address */
+ if (!txlen) {
+ addr = cpu_to_be32(op->addr.val);
+ if (op->addr.nbytes == 3)
+ addr >>= 8;
+ txbuf = &addr;
+ txlen = op->addr.nbytes;
+ }
+
+ if (txlen > CQSPI_STIG_DATA_LEN_MAX) {
+ printf("QSPI: Invalid input arguments txlen %u\n", txlen);
+ return -EINVAL;
+ }
+
+ reg |= op->cmd.opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
+
+ if (txlen) {
+ /* writing data = yes */
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
+ reg |= ((txlen - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
+
+ wr_len = txlen > 4 ? 4 : txlen;
+ memcpy(&wr_data, txbuf, wr_len);
+ writel(wr_data, reg_base +
+ CQSPI_REG_CMDWRITEDATALOWER);
+
+ if (txlen > 4) {
+ txbuf += wr_len;
+ wr_len = txlen - wr_len;
+ memcpy(&wr_data, txbuf, wr_len);
+ writel(wr_data, reg_base +
+ CQSPI_REG_CMDWRITEDATAUPPER);
+ }
+ }
+
+ /* Execute the command */
+ return cadence_qspi_apb_exec_flash_cmd(reg_base, reg);
+}
+
+/* Opcode + Address (3/4 bytes) + dummy bytes (0-4 bytes) */
+int cadence_qspi_apb_read_setup(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op)
+{
+ unsigned int reg;
+ unsigned int rd_reg;
+ unsigned int dummy_clk;
+ unsigned int dummy_bytes = op->dummy.nbytes;
+
+ /* Setup the indirect trigger address */
+ writel(plat->trigger_address,
+ plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
+
+ /* Configure the opcode */
+ rd_reg = op->cmd.opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
+
+ if (op->data.buswidth == 8)
+ /* Instruction and address at DQ0, data at DQ0-7. */
+ rd_reg |= CQSPI_INST_TYPE_OCTAL << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+ else if (op->data.buswidth == 4)
+ /* Instruction and address at DQ0, data at DQ0-3. */
+ rd_reg |= CQSPI_INST_TYPE_QUAD << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
+
+ writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTRDSTARTADDR);
+
+ if (dummy_bytes) {
+ if (dummy_bytes > CQSPI_DUMMY_BYTES_MAX)
+ dummy_bytes = CQSPI_DUMMY_BYTES_MAX;
+
+ /* Convert to clock cycles. */
+ dummy_clk = dummy_bytes * CQSPI_DUMMY_CLKS_PER_BYTE;
+
+ if (dummy_clk)
+ rd_reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
+ << CQSPI_REG_RD_INSTR_DUMMY_LSB;
+ }
+
+ writel(rd_reg, plat->regbase + CQSPI_REG_RD_INSTR);
+
+ /* set device size */
+ reg = readl(plat->regbase + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (op->addr.nbytes - 1);
+ writel(reg, plat->regbase + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static u32 cadence_qspi_get_rd_sram_level(struct cadence_spi_plat *plat)
+{
+ u32 reg = readl(plat->regbase + CQSPI_REG_SDRAMLEVEL);
+ reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
+ return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
+}
+
+static int cadence_qspi_wait_for_data(struct cadence_spi_plat *plat)
+{
+ unsigned int timeout = 10000;
+ u32 reg;
+
+ while (timeout--) {
+ reg = cadence_qspi_get_rd_sram_level(plat);
+ if (reg)
+ return reg;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int
+cadence_qspi_apb_indirect_read_execute(struct cadence_spi_plat *plat,
+ unsigned int n_rx, u8 *rxbuf)
+{
+ unsigned int remaining = n_rx;
+ unsigned int bytes_to_read = 0;
+ int ret;
+
+ writel(n_rx, plat->regbase + CQSPI_REG_INDIRECTRDBYTES);
+
+ /* Start the indirect read transfer */
+ writel(CQSPI_REG_INDIRECTRD_START,
+ plat->regbase + CQSPI_REG_INDIRECTRD);
+
+ while (remaining > 0) {
+ ret = cadence_qspi_wait_for_data(plat);
+ if (ret < 0) {
+ printf("Indirect write timed out (%i)\n", ret);
+ goto failrd;
+ }
+
+ bytes_to_read = ret;
+
+ while (bytes_to_read != 0) {
+ bytes_to_read *= plat->fifo_width;
+ bytes_to_read = bytes_to_read > remaining ?
+ remaining : bytes_to_read;
+ /*
+ * Handle non-4-byte aligned access to avoid
+ * data abort.
+ */
+ if (((uintptr_t)rxbuf % 4) || (bytes_to_read % 4))
+ readsb(plat->ahbbase, rxbuf, bytes_to_read);
+ else
+ readsl(plat->ahbbase, rxbuf,
+ bytes_to_read >> 2);
+ rxbuf += bytes_to_read;
+ remaining -= bytes_to_read;
+ bytes_to_read = cadence_qspi_get_rd_sram_level(plat);
+ }
+ }
+
+ /* Check indirect done status */
+ ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTRD,
+ CQSPI_REG_INDIRECTRD_DONE, 1, 10, 0);
+ if (ret) {
+ printf("Indirect read completion error (%i)\n", ret);
+ goto failrd;
+ }
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE,
+ plat->regbase + CQSPI_REG_INDIRECTRD);
+
+ return 0;
+
+failrd:
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTRD_CANCEL,
+ plat->regbase + CQSPI_REG_INDIRECTRD);
+ return ret;
+}
+
+int cadence_qspi_apb_read_execute(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op)
+{
+ u64 from = op->addr.val;
+ void *buf = op->data.buf.in;
+ size_t len = op->data.nbytes;
+
+ if (plat->use_dac_mode && (from + len < plat->ahbsize)) {
+ if (len < 256 ||
+ dma_memcpy(buf, plat->ahbbase + from, len) < 0) {
+ memcpy_fromio(buf, plat->ahbbase + from, len);
+ }
+ if (!cadence_qspi_wait_idle(plat->regbase))
+ return -EIO;
+ return 0;
+ }
+
+ return cadence_qspi_apb_indirect_read_execute(plat, len, buf);
+}
+
+/* Opcode + Address (3/4 bytes) */
+int cadence_qspi_apb_write_setup(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op)
+{
+ unsigned int reg;
+
+ /* Setup the indirect trigger address */
+ writel(plat->trigger_address,
+ plat->regbase + CQSPI_REG_INDIRECTTRIGGER);
+
+ /* Configure the opcode */
+ reg = op->cmd.opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
+ writel(reg, plat->regbase + CQSPI_REG_WR_INSTR);
+
+ writel(op->addr.val, plat->regbase + CQSPI_REG_INDIRECTWRSTARTADDR);
+
+ reg = readl(plat->regbase + CQSPI_REG_SIZE);
+ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
+ reg |= (op->addr.nbytes - 1);
+ writel(reg, plat->regbase + CQSPI_REG_SIZE);
+ return 0;
+}
+
+static int
+cadence_qspi_apb_indirect_write_execute(struct cadence_spi_plat *plat,
+ unsigned int n_tx, const u8 *txbuf)
+{
+ unsigned int page_size = plat->page_size;
+ unsigned int remaining = n_tx;
+ const u8 *bb_txbuf = txbuf;
+ void *bounce_buf = NULL;
+ unsigned int write_bytes;
+ int ret;
+
+ /*
+ * Use bounce buffer for non 32 bit aligned txbuf to avoid data
+ * aborts
+ */
+ if ((uintptr_t)txbuf % 4) {
+ bounce_buf = malloc(n_tx);
+ if (!bounce_buf)
+ return -ENOMEM;
+ memcpy(bounce_buf, txbuf, n_tx);
+ bb_txbuf = bounce_buf;
+ }
+
+ /* Configure the indirect read transfer bytes */
+ writel(n_tx, plat->regbase + CQSPI_REG_INDIRECTWRBYTES);
+
+ /* Start the indirect write transfer */
+ writel(CQSPI_REG_INDIRECTWR_START,
+ plat->regbase + CQSPI_REG_INDIRECTWR);
+
+ while (remaining > 0) {
+ write_bytes = remaining > page_size ? page_size : remaining;
+ writesl(plat->ahbbase, bb_txbuf, write_bytes >> 2);
+ if (write_bytes % 4)
+ writesb(plat->ahbbase,
+ bb_txbuf + rounddown(write_bytes, 4),
+ write_bytes % 4);
+
+ ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_SDRAMLEVEL,
+ CQSPI_REG_SDRAMLEVEL_WR_MASK <<
+ CQSPI_REG_SDRAMLEVEL_WR_LSB, 0, 10, 0);
+ if (ret) {
+ printf("Indirect write timed out (%i)\n", ret);
+ goto failwr;
+ }
+
+ bb_txbuf += write_bytes;
+ remaining -= write_bytes;
+ }
+
+ /* Check indirect done status */
+ ret = wait_for_bit_le32(plat->regbase + CQSPI_REG_INDIRECTWR,
+ CQSPI_REG_INDIRECTWR_DONE, 1, 10, 0);
+ if (ret) {
+ printf("Indirect write completion error (%i)\n", ret);
+ goto failwr;
+ }
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTWR_DONE,
+ plat->regbase + CQSPI_REG_INDIRECTWR);
+ if (bounce_buf)
+ free(bounce_buf);
+ return 0;
+
+failwr:
+ /* Cancel the indirect write */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL,
+ plat->regbase + CQSPI_REG_INDIRECTWR);
+ if (bounce_buf)
+ free(bounce_buf);
+ return ret;
+}
+
+int cadence_qspi_apb_write_execute(struct cadence_spi_plat *plat,
+ const struct spi_mem_op *op)
+{
+ u32 to = op->addr.val;
+ const void *buf = op->data.buf.out;
+ size_t len = op->data.nbytes;
+
+ if (plat->use_dac_mode && (to + len < plat->ahbsize)) {
+ memcpy_toio(plat->ahbbase + to, buf, len);
+ if (!cadence_qspi_wait_idle(plat->regbase))
+ return -EIO;
+ return 0;
+ }
+
+ return cadence_qspi_apb_indirect_write_execute(plat, len, buf);
+}
+
+void cadence_qspi_apb_enter_xip(void *reg_base, char xip_dummy)
+{
+ unsigned int reg;
+
+ /* enter XiP mode immediately and enable direct mode */
+ reg = readl(reg_base + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_ENABLE;
+ reg |= CQSPI_REG_CONFIG_DIRECT;
+ reg |= CQSPI_REG_CONFIG_XIP_IMM;
+ writel(reg, reg_base + CQSPI_REG_CONFIG);
+
+ /* keep the XiP mode */
+ writel(xip_dummy, reg_base + CQSPI_REG_MODE_BIT);
+
+ /* Enable mode bit at devrd */
+ reg = readl(reg_base + CQSPI_REG_RD_INSTR);
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+ writel(reg, reg_base + CQSPI_REG_RD_INSTR);
+}
diff --git a/roms/u-boot/drivers/spi/cf_spi.c b/roms/u-boot/drivers/spi/cf_spi.c
new file mode 100644
index 000000000..6511c0e0e
--- /dev/null
+++ b/roms/u-boot/drivers/spi/cf_spi.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ *
+ * (C) Copyright 2000-2003
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * Copyright (C) 2004-2009 Freescale Semiconductor, Inc.
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com)
+ *
+ * Support for DM and DT, non-DM code removed.
+ * Copyright (C) 2018 Angelo Dureghello <angelo@sysam.it>
+ *
+ * TODO: fsl_dspi.c should work as a driver for the DSPI module.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <asm/global_data.h>
+#include <dm/platform_data/spi_coldfire.h>
+#include <spi.h>
+#include <malloc.h>
+#include <asm/coldfire/dspi.h>
+#include <asm/io.h>
+
+struct coldfire_spi_priv {
+ struct dspi *regs;
+ uint baudrate;
+ int mode;
+ int charbit;
+};
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#ifndef CONFIG_SPI_IDLE_VAL
+#if defined(CONFIG_SPI_MMC)
+#define CONFIG_SPI_IDLE_VAL 0xFFFF
+#else
+#define CONFIG_SPI_IDLE_VAL 0x0
+#endif
+#endif
+
+/*
+ * DSPI specific mode
+ *
+ * bit 31 - 28: Transfer size 3 to 16 bits
+ * 27 - 26: PCS to SCK delay prescaler
+ * 25 - 24: After SCK delay prescaler
+ * 23 - 22: Delay after transfer prescaler
+ * 21 : Allow overwrite for bit 31-22 and bit 20-8
+ * 20 : Double baud rate
+ * 19 - 16: PCS to SCK delay scaler
+ * 15 - 12: After SCK delay scaler
+ * 11 - 8: Delay after transfer scaler
+ * 7 - 0: SPI_CPHA, SPI_CPOL, SPI_LSB_FIRST
+ */
+#define SPI_MODE_MOD 0x00200000
+#define SPI_MODE_DBLRATE 0x00100000
+
+#define SPI_MODE_XFER_SZ_MASK 0xf0000000
+#define SPI_MODE_DLY_PRE_MASK 0x0fc00000
+#define SPI_MODE_DLY_SCA_MASK 0x000fff00
+
+#define MCF_FRM_SZ_16BIT DSPI_CTAR_TRSZ(0xf)
+#define MCF_DSPI_SPEED_BESTMATCH 0x7FFFFFFF
+#define MCF_DSPI_MAX_CTAR_REGS 8
+
+/* Default values */
+#define MCF_DSPI_DEFAULT_SCK_FREQ 10000000
+#define MCF_DSPI_DEFAULT_MAX_CS 4
+#define MCF_DSPI_DEFAULT_MODE 0
+
+#define MCF_DSPI_DEFAULT_CTAR (DSPI_CTAR_TRSZ(7) | \
+ DSPI_CTAR_PCSSCK_1CLK | \
+ DSPI_CTAR_PASC(0) | \
+ DSPI_CTAR_PDT(0) | \
+ DSPI_CTAR_CSSCK(0) | \
+ DSPI_CTAR_ASC(0) | \
+ DSPI_CTAR_DT(1) | \
+ DSPI_CTAR_BR(6))
+
+#define MCF_CTAR_MODE_MASK (MCF_FRM_SZ_16BIT | \
+ DSPI_CTAR_PCSSCK(3) | \
+ DSPI_CTAR_PASC_7CLK | \
+ DSPI_CTAR_PDT(3) | \
+ DSPI_CTAR_CSSCK(0x0f) | \
+ DSPI_CTAR_ASC(0x0f) | \
+ DSPI_CTAR_DT(0x0f))
+
+#define setup_ctrl(ctrl, cs) ((ctrl & 0xFF000000) | ((1 << cs) << 16))
+
+static inline void cfspi_tx(struct coldfire_spi_priv *cfspi,
+ u32 ctrl, u16 data)
+{
+ /*
+ * Need to check fifo level here
+ */
+ while ((readl(&cfspi->regs->sr) & 0x0000F000) >= 0x4000)
+ ;
+
+ writel(ctrl | data, &cfspi->regs->tfr);
+}
+
+static inline u16 cfspi_rx(struct coldfire_spi_priv *cfspi)
+{
+
+ while ((readl(&cfspi->regs->sr) & 0x000000F0) == 0)
+ ;
+
+ return readw(&cfspi->regs->rfr);
+}
+
+static int coldfire_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct coldfire_spi_priv *cfspi = dev_get_priv(bus);
+ struct dspi *dspi = cfspi->regs;
+ struct dm_spi_slave_plat *slave_plat =
+ dev_get_parent_plat(dev);
+
+ if ((in_be32(&dspi->sr) & DSPI_SR_TXRXS) != DSPI_SR_TXRXS)
+ return -1;
+
+ /* Clear FIFO and resume transfer */
+ clrbits_be32(&dspi->mcr, DSPI_MCR_CTXF | DSPI_MCR_CRXF);
+
+ dspi_chip_select(slave_plat->cs);
+
+ return 0;
+}
+
+static int coldfire_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct coldfire_spi_priv *cfspi = dev_get_priv(bus);
+ struct dspi *dspi = cfspi->regs;
+ struct dm_spi_slave_plat *slave_plat =
+ dev_get_parent_plat(dev);
+
+ /* Clear FIFO */
+ clrbits_be32(&dspi->mcr, DSPI_MCR_CTXF | DSPI_MCR_CRXF);
+
+ dspi_chip_unselect(slave_plat->cs);
+
+ return 0;
+}
+
+static int coldfire_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din,
+ unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct coldfire_spi_priv *cfspi = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ u16 *spi_rd16 = NULL, *spi_wr16 = NULL;
+ u8 *spi_rd = NULL, *spi_wr = NULL;
+ static u32 ctrl;
+ uint len = bitlen >> 3;
+
+ if (cfspi->charbit == 16) {
+ bitlen >>= 1;
+ spi_wr16 = (u16 *)dout;
+ spi_rd16 = (u16 *)din;
+ } else {
+ spi_wr = (u8 *)dout;
+ spi_rd = (u8 *)din;
+ }
+
+ if ((flags & SPI_XFER_BEGIN) == SPI_XFER_BEGIN)
+ ctrl |= DSPI_TFR_CONT;
+
+ ctrl = setup_ctrl(ctrl, slave_plat->cs);
+
+ if (len > 1) {
+ int tmp_len = len - 1;
+
+ while (tmp_len--) {
+ if (dout) {
+ if (cfspi->charbit == 16)
+ cfspi_tx(cfspi, ctrl, *spi_wr16++);
+ else
+ cfspi_tx(cfspi, ctrl, *spi_wr++);
+ cfspi_rx(cfspi);
+ }
+
+ if (din) {
+ cfspi_tx(cfspi, ctrl, CONFIG_SPI_IDLE_VAL);
+ if (cfspi->charbit == 16)
+ *spi_rd16++ = cfspi_rx(cfspi);
+ else
+ *spi_rd++ = cfspi_rx(cfspi);
+ }
+ }
+
+ len = 1; /* remaining byte */
+ }
+
+ if (flags & SPI_XFER_END)
+ ctrl &= ~DSPI_TFR_CONT;
+
+ if (len) {
+ if (dout) {
+ if (cfspi->charbit == 16)
+ cfspi_tx(cfspi, ctrl, *spi_wr16);
+ else
+ cfspi_tx(cfspi, ctrl, *spi_wr);
+ cfspi_rx(cfspi);
+ }
+
+ if (din) {
+ cfspi_tx(cfspi, ctrl, CONFIG_SPI_IDLE_VAL);
+ if (cfspi->charbit == 16)
+ *spi_rd16 = cfspi_rx(cfspi);
+ else
+ *spi_rd = cfspi_rx(cfspi);
+ }
+ } else {
+ /* dummy read */
+ cfspi_tx(cfspi, ctrl, CONFIG_SPI_IDLE_VAL);
+ cfspi_rx(cfspi);
+ }
+
+ return 0;
+}
+
+static int coldfire_spi_set_speed(struct udevice *bus, uint max_hz)
+{
+ struct coldfire_spi_priv *cfspi = dev_get_priv(bus);
+ struct dspi *dspi = cfspi->regs;
+ int prescaler[] = { 2, 3, 5, 7 };
+ int scaler[] = {
+ 2, 4, 6, 8,
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768
+ };
+ int i, j, pbrcnt, brcnt, diff, tmp, dbr = 0;
+ int best_i, best_j, bestmatch = MCF_DSPI_SPEED_BESTMATCH, baud_speed;
+ u32 bus_setup;
+
+ cfspi->baudrate = max_hz;
+
+ /* Read current setup */
+ bus_setup = readl(&dspi->ctar[dev_seq(bus)]);
+
+ tmp = (prescaler[3] * scaler[15]);
+ /* Maximum and minimum baudrate it can handle */
+ if ((cfspi->baudrate > (gd->bus_clk >> 1)) ||
+ (cfspi->baudrate < (gd->bus_clk / tmp))) {
+ printf("Exceed baudrate limitation: Max %d - Min %d\n",
+ (int)(gd->bus_clk >> 1), (int)(gd->bus_clk / tmp));
+ return -1;
+ }
+
+ /* Activate Double Baud when it exceed 1/4 the bus clk */
+ if ((bus_setup & DSPI_CTAR_DBR) ||
+ (cfspi->baudrate > (gd->bus_clk / (prescaler[0] * scaler[0])))) {
+ bus_setup |= DSPI_CTAR_DBR;
+ dbr = 1;
+ }
+
+ /* Overwrite default value set in platform configuration file */
+ if (cfspi->mode & SPI_MODE_MOD) {
+ /*
+ * Check to see if it is enabled by default in platform
+ * config, or manual setting passed by mode parameter
+ */
+ if (cfspi->mode & SPI_MODE_DBLRATE) {
+ bus_setup |= DSPI_CTAR_DBR;
+ dbr = 1;
+ }
+ }
+
+ pbrcnt = sizeof(prescaler) / sizeof(int);
+ brcnt = sizeof(scaler) / sizeof(int);
+
+ /* baudrate calculation - to closer value, may not be exact match */
+ for (best_i = 0, best_j = 0, i = 0; i < pbrcnt; i++) {
+ baud_speed = gd->bus_clk / prescaler[i];
+ for (j = 0; j < brcnt; j++) {
+ tmp = (baud_speed / scaler[j]) * (1 + dbr);
+
+ if (tmp > cfspi->baudrate)
+ diff = tmp - cfspi->baudrate;
+ else
+ diff = cfspi->baudrate - tmp;
+
+ if (diff < bestmatch) {
+ bestmatch = diff;
+ best_i = i;
+ best_j = j;
+ }
+ }
+ }
+
+ bus_setup &= ~(DSPI_CTAR_PBR(0x03) | DSPI_CTAR_BR(0x0f));
+ bus_setup |= (DSPI_CTAR_PBR(best_i) | DSPI_CTAR_BR(best_j));
+ writel(bus_setup, &dspi->ctar[dev_seq(bus)]);
+
+ return 0;
+}
+
+static int coldfire_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct coldfire_spi_priv *cfspi = dev_get_priv(bus);
+ struct dspi *dspi = cfspi->regs;
+ u32 bus_setup = 0;
+
+ cfspi->mode = mode;
+
+ if (cfspi->mode & SPI_CPOL)
+ bus_setup |= DSPI_CTAR_CPOL;
+ if (cfspi->mode & SPI_CPHA)
+ bus_setup |= DSPI_CTAR_CPHA;
+ if (cfspi->mode & SPI_LSB_FIRST)
+ bus_setup |= DSPI_CTAR_LSBFE;
+
+ /* Overwrite default value set in platform configuration file */
+ if (cfspi->mode & SPI_MODE_MOD) {
+ if ((cfspi->mode & SPI_MODE_XFER_SZ_MASK) == 0)
+ bus_setup |=
+ readl(&dspi->ctar[dev_seq(bus)]) & MCF_FRM_SZ_16BIT;
+ else
+ bus_setup |=
+ ((cfspi->mode & SPI_MODE_XFER_SZ_MASK) >> 1);
+
+ /* PSCSCK, PASC, PDT */
+ bus_setup |= (cfspi->mode & SPI_MODE_DLY_PRE_MASK) >> 4;
+ /* CSSCK, ASC, DT */
+ bus_setup |= (cfspi->mode & SPI_MODE_DLY_SCA_MASK) >> 4;
+ } else {
+ bus_setup |=
+ (readl(&dspi->ctar[dev_seq(bus)]) & MCF_CTAR_MODE_MASK);
+ }
+
+ cfspi->charbit =
+ ((readl(&dspi->ctar[dev_seq(bus)]) & MCF_FRM_SZ_16BIT) ==
+ MCF_FRM_SZ_16BIT) ? 16 : 8;
+
+ setbits_be32(&dspi->ctar[dev_seq(bus)], bus_setup);
+
+ return 0;
+}
+
+static int coldfire_spi_probe(struct udevice *bus)
+{
+ struct coldfire_spi_plat *plat = dev_get_plat(bus);
+ struct coldfire_spi_priv *cfspi = dev_get_priv(bus);
+ struct dspi *dspi = cfspi->regs;
+ int i;
+
+ cfspi->regs = (struct dspi *)plat->regs_addr;
+
+ cfspi->baudrate = plat->speed_hz;
+ cfspi->mode = plat->mode;
+
+ for (i = 0; i < MCF_DSPI_MAX_CTAR_REGS; i++) {
+ unsigned int ctar = 0;
+
+ if (plat->ctar[i][0] == 0)
+ break;
+
+ ctar = DSPI_CTAR_TRSZ(plat->ctar[i][0]) |
+ DSPI_CTAR_PCSSCK(plat->ctar[i][1]) |
+ DSPI_CTAR_PASC(plat->ctar[i][2]) |
+ DSPI_CTAR_PDT(plat->ctar[i][3]) |
+ DSPI_CTAR_CSSCK(plat->ctar[i][4]) |
+ DSPI_CTAR_ASC(plat->ctar[i][5]) |
+ DSPI_CTAR_DT(plat->ctar[i][6]) |
+ DSPI_CTAR_BR(plat->ctar[i][7]);
+
+ writel(ctar, &cfspi->regs->ctar[i]);
+ }
+
+ /* Default CTARs */
+ for (i = 0; i < MCF_DSPI_MAX_CTAR_REGS; i++)
+ writel(MCF_DSPI_DEFAULT_CTAR, &dspi->ctar[i]);
+
+ dspi->mcr = DSPI_MCR_MSTR | DSPI_MCR_CSIS7 | DSPI_MCR_CSIS6 |
+ DSPI_MCR_CSIS5 | DSPI_MCR_CSIS4 | DSPI_MCR_CSIS3 |
+ DSPI_MCR_CSIS2 | DSPI_MCR_CSIS1 | DSPI_MCR_CSIS0 |
+ DSPI_MCR_CRXF | DSPI_MCR_CTXF;
+
+ return 0;
+}
+
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+static int coldfire_dspi_of_to_plat(struct udevice *bus)
+{
+ fdt_addr_t addr;
+ struct coldfire_spi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+ int *ctar, len;
+
+ addr = dev_read_addr(bus);
+ if (addr == FDT_ADDR_T_NONE)
+ return -ENOMEM;
+
+ plat->regs_addr = addr;
+
+ plat->num_cs = fdtdec_get_int(blob, node, "num-cs",
+ MCF_DSPI_DEFAULT_MAX_CS);
+
+ plat->speed_hz = fdtdec_get_int(blob, node, "spi-max-frequency",
+ MCF_DSPI_DEFAULT_SCK_FREQ);
+
+ plat->mode = fdtdec_get_int(blob, node, "spi-mode",
+ MCF_DSPI_DEFAULT_MODE);
+
+ memset(plat->ctar, 0, sizeof(plat->ctar));
+
+ ctar = (int *)fdt_getprop(blob, node, "ctar-params", &len);
+
+ if (ctar && len) {
+ int i, q, ctar_regs;
+
+ ctar_regs = len / sizeof(unsigned int) / MAX_CTAR_FIELDS;
+
+ if (ctar_regs > MAX_CTAR_REGS)
+ ctar_regs = MAX_CTAR_REGS;
+
+ for (i = 0; i < ctar_regs; i++) {
+ for (q = 0; q < MAX_CTAR_FIELDS; q++)
+ plat->ctar[i][q] = *ctar++;
+ }
+ }
+
+ debug("DSPI: regs=%pa, max-frequency=%d, num-cs=%d, mode=%d\n",
+ (void *)plat->regs_addr,
+ plat->speed_hz, plat->num_cs, plat->mode);
+
+ return 0;
+}
+
+static const struct udevice_id coldfire_spi_ids[] = {
+ { .compatible = "fsl,mcf-dspi" },
+ { }
+};
+#endif
+
+static const struct dm_spi_ops coldfire_spi_ops = {
+ .claim_bus = coldfire_spi_claim_bus,
+ .release_bus = coldfire_spi_release_bus,
+ .xfer = coldfire_spi_xfer,
+ .set_speed = coldfire_spi_set_speed,
+ .set_mode = coldfire_spi_set_mode,
+};
+
+U_BOOT_DRIVER(coldfire_spi) = {
+ .name = "spi_coldfire",
+ .id = UCLASS_SPI,
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .of_match = coldfire_spi_ids,
+ .of_to_plat = coldfire_dspi_of_to_plat,
+ .plat_auto = sizeof(struct coldfire_spi_plat),
+#endif
+ .probe = coldfire_spi_probe,
+ .ops = &coldfire_spi_ops,
+ .priv_auto = sizeof(struct coldfire_spi_priv),
+};
diff --git a/roms/u-boot/drivers/spi/davinci_spi.c b/roms/u-boot/drivers/spi/davinci_spi.c
new file mode 100644
index 000000000..15557a623
--- /dev/null
+++ b/roms/u-boot/drivers/spi/davinci_spi.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Driver for SPI controller on DaVinci. Based on atmel_spi.c
+ * by Atmel Corporation
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ */
+
+#include <common.h>
+#include <log.h>
+#include <spi.h>
+#include <malloc.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <dm.h>
+#include <dm/platform_data/spi_davinci.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+/* SPIGCR0 */
+#define SPIGCR0_SPIENA_MASK 0x1
+#define SPIGCR0_SPIRST_MASK 0x0
+
+/* SPIGCR0 */
+#define SPIGCR1_CLKMOD_MASK BIT(1)
+#define SPIGCR1_MASTER_MASK BIT(0)
+#define SPIGCR1_SPIENA_MASK BIT(24)
+
+/* SPIPC0 */
+#define SPIPC0_DIFUN_MASK BIT(11) /* SIMO */
+#define SPIPC0_DOFUN_MASK BIT(10) /* SOMI */
+#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
+#define SPIPC0_EN0FUN_MASK BIT(0)
+
+/* SPIFMT0 */
+#define SPIFMT_SHIFTDIR_SHIFT 20
+#define SPIFMT_POLARITY_SHIFT 17
+#define SPIFMT_PHASE_SHIFT 16
+#define SPIFMT_PRESCALE_SHIFT 8
+
+/* SPIDAT1 */
+#define SPIDAT1_CSHOLD_SHIFT 28
+#define SPIDAT1_CSNR_SHIFT 16
+
+/* SPIDELAY */
+#define SPI_C2TDELAY_SHIFT 24
+#define SPI_T2CDELAY_SHIFT 16
+
+/* SPIBUF */
+#define SPIBUF_RXEMPTY_MASK BIT(31)
+#define SPIBUF_TXFULL_MASK BIT(29)
+
+/* SPIDEF */
+#define SPIDEF_CSDEF0_MASK BIT(0)
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* davinci spi register set */
+struct davinci_spi_regs {
+ dv_reg gcr0; /* 0x00 */
+ dv_reg gcr1; /* 0x04 */
+ dv_reg int0; /* 0x08 */
+ dv_reg lvl; /* 0x0c */
+ dv_reg flg; /* 0x10 */
+ dv_reg pc0; /* 0x14 */
+ dv_reg pc1; /* 0x18 */
+ dv_reg pc2; /* 0x1c */
+ dv_reg pc3; /* 0x20 */
+ dv_reg pc4; /* 0x24 */
+ dv_reg pc5; /* 0x28 */
+ dv_reg rsvd[3];
+ dv_reg dat0; /* 0x38 */
+ dv_reg dat1; /* 0x3c */
+ dv_reg buf; /* 0x40 */
+ dv_reg emu; /* 0x44 */
+ dv_reg delay; /* 0x48 */
+ dv_reg def; /* 0x4c */
+ dv_reg fmt0; /* 0x50 */
+ dv_reg fmt1; /* 0x54 */
+ dv_reg fmt2; /* 0x58 */
+ dv_reg fmt3; /* 0x5c */
+ dv_reg intvec0; /* 0x60 */
+ dv_reg intvec1; /* 0x64 */
+};
+
+/* davinci spi slave */
+struct davinci_spi_slave {
+ struct davinci_spi_regs *regs;
+ unsigned int freq; /* current SPI bus frequency */
+ unsigned int mode; /* current SPI mode used */
+ u8 num_cs; /* total no. of CS available */
+ u8 cur_cs; /* CS of current slave */
+ bool half_duplex; /* true, if master is half-duplex only */
+};
+
+/*
+ * This functions needs to act like a macro to avoid pipeline reloads in the
+ * loops below. Use always_inline. This gains us about 160KiB/s and the bloat
+ * appears to be zero bytes (da830).
+ */
+__attribute__((always_inline))
+static inline u32 davinci_spi_xfer_data(struct davinci_spi_slave *ds, u32 data)
+{
+ u32 buf_reg_val;
+
+ /* send out data */
+ writel(data, &ds->regs->dat1);
+
+ /* wait for the data to clock in/out */
+ while ((buf_reg_val = readl(&ds->regs->buf)) & SPIBUF_RXEMPTY_MASK)
+ ;
+
+ return buf_reg_val;
+}
+
+static int davinci_spi_read(struct davinci_spi_slave *ds, unsigned int len,
+ u8 *rxp, unsigned long flags)
+{
+ unsigned int data1_reg_val;
+
+ /* enable CS hold, CS[n] and clear the data bits */
+ data1_reg_val = ((1 << SPIDAT1_CSHOLD_SHIFT) |
+ (ds->cur_cs << SPIDAT1_CSNR_SHIFT));
+
+ /* wait till TXFULL is deasserted */
+ while (readl(&ds->regs->buf) & SPIBUF_TXFULL_MASK)
+ ;
+
+ /* preload the TX buffer to avoid clock starvation */
+ writel(data1_reg_val, &ds->regs->dat1);
+
+ /* keep reading 1 byte until only 1 byte left */
+ while ((len--) > 1)
+ *rxp++ = davinci_spi_xfer_data(ds, data1_reg_val);
+
+ /* clear CS hold when we reach the end */
+ if (flags & SPI_XFER_END)
+ data1_reg_val &= ~(1 << SPIDAT1_CSHOLD_SHIFT);
+
+ /* read the last byte */
+ *rxp = davinci_spi_xfer_data(ds, data1_reg_val);
+
+ return 0;
+}
+
+static int davinci_spi_write(struct davinci_spi_slave *ds, unsigned int len,
+ const u8 *txp, unsigned long flags)
+{
+ unsigned int data1_reg_val;
+
+ /* enable CS hold and clear the data bits */
+ data1_reg_val = ((1 << SPIDAT1_CSHOLD_SHIFT) |
+ (ds->cur_cs << SPIDAT1_CSNR_SHIFT));
+
+ /* wait till TXFULL is deasserted */
+ while (readl(&ds->regs->buf) & SPIBUF_TXFULL_MASK)
+ ;
+
+ /* preload the TX buffer to avoid clock starvation */
+ if (len > 2) {
+ writel(data1_reg_val | *txp++, &ds->regs->dat1);
+ len--;
+ }
+
+ /* keep writing 1 byte until only 1 byte left */
+ while ((len--) > 1)
+ davinci_spi_xfer_data(ds, data1_reg_val | *txp++);
+
+ /* clear CS hold when we reach the end */
+ if (flags & SPI_XFER_END)
+ data1_reg_val &= ~(1 << SPIDAT1_CSHOLD_SHIFT);
+
+ /* write the last byte */
+ davinci_spi_xfer_data(ds, data1_reg_val | *txp);
+
+ return 0;
+}
+
+static int davinci_spi_read_write(struct davinci_spi_slave *ds, unsigned
+ int len, u8 *rxp, const u8 *txp,
+ unsigned long flags)
+{
+ unsigned int data1_reg_val;
+
+ /* enable CS hold and clear the data bits */
+ data1_reg_val = ((1 << SPIDAT1_CSHOLD_SHIFT) |
+ (ds->cur_cs << SPIDAT1_CSNR_SHIFT));
+
+ /* wait till TXFULL is deasserted */
+ while (readl(&ds->regs->buf) & SPIBUF_TXFULL_MASK)
+ ;
+
+ /* keep reading and writing 1 byte until only 1 byte left */
+ while ((len--) > 1)
+ *rxp++ = davinci_spi_xfer_data(ds, data1_reg_val | *txp++);
+
+ /* clear CS hold when we reach the end */
+ if (flags & SPI_XFER_END)
+ data1_reg_val &= ~(1 << SPIDAT1_CSHOLD_SHIFT);
+
+ /* read and write the last byte */
+ *rxp = davinci_spi_xfer_data(ds, data1_reg_val | *txp);
+
+ return 0;
+}
+
+
+static int __davinci_spi_claim_bus(struct davinci_spi_slave *ds, int cs)
+{
+ unsigned int mode = 0, scalar;
+
+ /* Enable the SPI hardware */
+ writel(SPIGCR0_SPIRST_MASK, &ds->regs->gcr0);
+ udelay(1000);
+ writel(SPIGCR0_SPIENA_MASK, &ds->regs->gcr0);
+
+ /* Set master mode, powered up and not activated */
+ writel(SPIGCR1_MASTER_MASK | SPIGCR1_CLKMOD_MASK, &ds->regs->gcr1);
+
+ /* CS, CLK, SIMO and SOMI are functional pins */
+ writel(((1 << cs) | SPIPC0_CLKFUN_MASK |
+ SPIPC0_DOFUN_MASK | SPIPC0_DIFUN_MASK), &ds->regs->pc0);
+
+ /* setup format */
+ scalar = ((CONFIG_SYS_SPI_CLK / ds->freq) - 1) & 0xFF;
+
+ /*
+ * Use following format:
+ * character length = 8,
+ * MSB shifted out first
+ */
+ if (ds->mode & SPI_CPOL)
+ mode |= SPI_CPOL;
+ if (!(ds->mode & SPI_CPHA))
+ mode |= SPI_CPHA;
+ writel(8 | (scalar << SPIFMT_PRESCALE_SHIFT) |
+ (mode << SPIFMT_PHASE_SHIFT), &ds->regs->fmt0);
+
+ /*
+ * Including a minor delay. No science here. Should be good even with
+ * no delay
+ */
+ writel((50 << SPI_C2TDELAY_SHIFT) |
+ (50 << SPI_T2CDELAY_SHIFT), &ds->regs->delay);
+
+ /* default chip select register */
+ writel(SPIDEF_CSDEF0_MASK, &ds->regs->def);
+
+ /* no interrupts */
+ writel(0, &ds->regs->int0);
+ writel(0, &ds->regs->lvl);
+
+ /* enable SPI */
+ writel((readl(&ds->regs->gcr1) | SPIGCR1_SPIENA_MASK), &ds->regs->gcr1);
+
+ return 0;
+}
+
+static int __davinci_spi_release_bus(struct davinci_spi_slave *ds)
+{
+ /* Disable the SPI hardware */
+ writel(SPIGCR0_SPIRST_MASK, &ds->regs->gcr0);
+
+ return 0;
+}
+
+static int __davinci_spi_xfer(struct davinci_spi_slave *ds,
+ unsigned int bitlen, const void *dout, void *din,
+ unsigned long flags)
+{
+ unsigned int len;
+
+ if (bitlen == 0)
+ /* Finish any previously submitted transfers */
+ goto out;
+
+ /*
+ * It's not clear how non-8-bit-aligned transfers are supposed to be
+ * represented as a stream of bytes...this is a limitation of
+ * the current SPI interface - here we terminate on receiving such a
+ * transfer request.
+ */
+ if (bitlen % 8) {
+ /* Errors always terminate an ongoing transfer */
+ flags |= SPI_XFER_END;
+ goto out;
+ }
+
+ len = bitlen / 8;
+
+ if (!dout)
+ return davinci_spi_read(ds, len, din, flags);
+ if (!din)
+ return davinci_spi_write(ds, len, dout, flags);
+ if (!ds->half_duplex)
+ return davinci_spi_read_write(ds, len, din, dout, flags);
+
+ printf("SPI full duplex not supported\n");
+ flags |= SPI_XFER_END;
+
+out:
+ if (flags & SPI_XFER_END) {
+ u8 dummy = 0;
+ davinci_spi_write(ds, 1, &dummy, flags);
+ }
+ return 0;
+}
+
+static int davinci_spi_set_speed(struct udevice *bus, uint max_hz)
+{
+ struct davinci_spi_slave *ds = dev_get_priv(bus);
+
+ debug("%s speed %u\n", __func__, max_hz);
+ if (max_hz > CONFIG_SYS_SPI_CLK / 2)
+ return -EINVAL;
+
+ ds->freq = max_hz;
+
+ return 0;
+}
+
+static int davinci_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct davinci_spi_slave *ds = dev_get_priv(bus);
+
+ debug("%s mode %u\n", __func__, mode);
+ ds->mode = mode;
+
+ return 0;
+}
+
+static int davinci_spi_claim_bus(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave_plat =
+ dev_get_parent_plat(dev);
+ struct udevice *bus = dev->parent;
+ struct davinci_spi_slave *ds = dev_get_priv(bus);
+
+ if (slave_plat->cs >= ds->num_cs) {
+ printf("Invalid SPI chipselect\n");
+ return -EINVAL;
+ }
+ ds->half_duplex = slave_plat->mode & SPI_PREAMBLE;
+
+ return __davinci_spi_claim_bus(ds, slave_plat->cs);
+}
+
+static int davinci_spi_release_bus(struct udevice *dev)
+{
+ struct davinci_spi_slave *ds = dev_get_priv(dev->parent);
+
+ return __davinci_spi_release_bus(ds);
+}
+
+static int davinci_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din,
+ unsigned long flags)
+{
+ struct dm_spi_slave_plat *slave =
+ dev_get_parent_plat(dev);
+ struct udevice *bus = dev->parent;
+ struct davinci_spi_slave *ds = dev_get_priv(bus);
+
+ if (slave->cs >= ds->num_cs) {
+ printf("Invalid SPI chipselect\n");
+ return -EINVAL;
+ }
+ ds->cur_cs = slave->cs;
+
+ return __davinci_spi_xfer(ds, bitlen, dout, din, flags);
+}
+
+static const struct dm_spi_ops davinci_spi_ops = {
+ .claim_bus = davinci_spi_claim_bus,
+ .release_bus = davinci_spi_release_bus,
+ .xfer = davinci_spi_xfer,
+ .set_speed = davinci_spi_set_speed,
+ .set_mode = davinci_spi_set_mode,
+};
+
+static int davinci_spi_probe(struct udevice *bus)
+{
+ struct davinci_spi_slave *ds = dev_get_priv(bus);
+ struct davinci_spi_plat *plat = dev_get_plat(bus);
+ ds->regs = plat->regs;
+ ds->num_cs = plat->num_cs;
+
+ return 0;
+}
+
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+static int davinci_ofdata_to_platadata(struct udevice *bus)
+{
+ struct davinci_spi_plat *plat = dev_get_plat(bus);
+ fdt_addr_t addr;
+
+ addr = dev_read_addr(bus);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ plat->regs = (struct davinci_spi_regs *)addr;
+ plat->num_cs = fdtdec_get_int(gd->fdt_blob, dev_of_offset(bus), "num-cs", 4);
+
+ return 0;
+}
+
+static const struct udevice_id davinci_spi_ids[] = {
+ { .compatible = "ti,keystone-spi" },
+ { .compatible = "ti,dm6441-spi" },
+ { .compatible = "ti,da830-spi" },
+ { }
+};
+#endif
+
+U_BOOT_DRIVER(davinci_spi) = {
+ .name = "davinci_spi",
+ .id = UCLASS_SPI,
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .of_match = davinci_spi_ids,
+ .of_to_plat = davinci_ofdata_to_platadata,
+ .plat_auto = sizeof(struct davinci_spi_plat),
+#endif
+ .probe = davinci_spi_probe,
+ .ops = &davinci_spi_ops,
+ .priv_auto = sizeof(struct davinci_spi_slave),
+};
diff --git a/roms/u-boot/drivers/spi/designware_spi.c b/roms/u-boot/drivers/spi/designware_spi.c
new file mode 100644
index 000000000..742121140
--- /dev/null
+++ b/roms/u-boot/drivers/spi/designware_spi.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Designware master SPI core controller driver
+ *
+ * Copyright (C) 2014 Stefan Roese <sr@denx.de>
+ * Copyright (C) 2020 Sean Anderson <seanga2@gmail.com>
+ *
+ * Very loosely based on the Linux driver:
+ * drivers/spi/spi-dw.c, which is:
+ * Copyright (c) 2009, Intel Corporation.
+ */
+
+#define LOG_CATEGORY UCLASS_SPI
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <log.h>
+#include <malloc.h>
+#include <reset.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <asm/io.h>
+#include <asm-generic/gpio.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <linux/iopoll.h>
+#include <linux/sizes.h>
+
+/* Register offsets */
+#define DW_SPI_CTRLR0 0x00
+#define DW_SPI_CTRLR1 0x04
+#define DW_SPI_SSIENR 0x08
+#define DW_SPI_MWCR 0x0c
+#define DW_SPI_SER 0x10
+#define DW_SPI_BAUDR 0x14
+#define DW_SPI_TXFTLR 0x18
+#define DW_SPI_RXFTLR 0x1c
+#define DW_SPI_TXFLR 0x20
+#define DW_SPI_RXFLR 0x24
+#define DW_SPI_SR 0x28
+#define DW_SPI_IMR 0x2c
+#define DW_SPI_ISR 0x30
+#define DW_SPI_RISR 0x34
+#define DW_SPI_TXOICR 0x38
+#define DW_SPI_RXOICR 0x3c
+#define DW_SPI_RXUICR 0x40
+#define DW_SPI_MSTICR 0x44
+#define DW_SPI_ICR 0x48
+#define DW_SPI_DMACR 0x4c
+#define DW_SPI_DMATDLR 0x50
+#define DW_SPI_DMARDLR 0x54
+#define DW_SPI_IDR 0x58
+#define DW_SPI_VERSION 0x5c
+#define DW_SPI_DR 0x60
+
+/* Bit fields in CTRLR0 */
+/*
+ * Only present when SSI_MAX_XFER_SIZE=16. This is the default, and the only
+ * option before version 3.23a.
+ */
+#define CTRLR0_DFS_MASK GENMASK(3, 0)
+
+#define CTRLR0_FRF_MASK GENMASK(5, 4)
+#define CTRLR0_FRF_SPI 0x0
+#define CTRLR0_FRF_SSP 0x1
+#define CTRLR0_FRF_MICROWIRE 0x2
+#define CTRLR0_FRF_RESV 0x3
+
+#define CTRLR0_MODE_MASK GENMASK(7, 6)
+#define CTRLR0_MODE_SCPH 0x1
+#define CTRLR0_MODE_SCPOL 0x2
+
+#define CTRLR0_TMOD_MASK GENMASK(9, 8)
+#define CTRLR0_TMOD_TR 0x0 /* xmit & recv */
+#define CTRLR0_TMOD_TO 0x1 /* xmit only */
+#define CTRLR0_TMOD_RO 0x2 /* recv only */
+#define CTRLR0_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define CTRLR0_SLVOE_OFFSET 10
+#define CTRLR0_SRL_OFFSET 11
+#define CTRLR0_CFS_MASK GENMASK(15, 12)
+
+/* Only present when SSI_MAX_XFER_SIZE=32 */
+#define CTRLR0_DFS_32_MASK GENMASK(20, 16)
+
+/* The next field is only present on versions after 4.00a */
+#define CTRLR0_SPI_FRF_MASK GENMASK(22, 21)
+#define CTRLR0_SPI_FRF_BYTE 0x0
+#define CTRLR0_SPI_FRF_DUAL 0x1
+#define CTRLR0_SPI_FRF_QUAD 0x2
+
+/* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */
+#define DWC_SSI_CTRLR0_DFS_MASK GENMASK(4, 0)
+#define DWC_SSI_CTRLR0_FRF_MASK GENMASK(7, 6)
+#define DWC_SSI_CTRLR0_MODE_MASK GENMASK(9, 8)
+#define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
+#define DWC_SSI_CTRLR0_SRL_OFFSET 13
+#define DWC_SSI_CTRLR0_SPI_FRF_MASK GENMASK(23, 22)
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK GENMASK(6, 0) /* cover 7 bits */
+#define SR_BUSY BIT(0)
+#define SR_TF_NOT_FULL BIT(1)
+#define SR_TF_EMPT BIT(2)
+#define SR_RF_NOT_EMPT BIT(3)
+#define SR_RF_FULL BIT(4)
+#define SR_TX_ERR BIT(5)
+#define SR_DCOL BIT(6)
+
+#define RX_TIMEOUT 1000 /* timeout in ms */
+
+struct dw_spi_plat {
+ s32 frequency; /* Default clock frequency, -1 for none */
+ void __iomem *regs;
+};
+
+struct dw_spi_priv {
+ struct clk clk;
+ struct reset_ctl_bulk resets;
+ struct gpio_desc cs_gpio; /* External chip-select gpio */
+
+ u32 (*update_cr0)(struct dw_spi_priv *priv);
+
+ void __iomem *regs;
+ unsigned long bus_clk_rate;
+ unsigned int freq; /* Default frequency */
+ unsigned int mode;
+
+ const void *tx;
+ const void *tx_end;
+ void *rx;
+ void *rx_end;
+ u32 fifo_len; /* depth of the FIFO buffer */
+ u32 max_xfer; /* Maximum transfer size (in bits) */
+
+ int bits_per_word;
+ int len;
+ u8 cs; /* chip select pin */
+ u8 tmode; /* TR/TO/RO/EEPROM */
+ u8 type; /* SPI/SSP/MicroWire */
+};
+
+static inline u32 dw_read(struct dw_spi_priv *priv, u32 offset)
+{
+ return __raw_readl(priv->regs + offset);
+}
+
+static inline void dw_write(struct dw_spi_priv *priv, u32 offset, u32 val)
+{
+ __raw_writel(val, priv->regs + offset);
+}
+
+static u32 dw_spi_dw16_update_cr0(struct dw_spi_priv *priv)
+{
+ return FIELD_PREP(CTRLR0_DFS_MASK, priv->bits_per_word - 1)
+ | FIELD_PREP(CTRLR0_FRF_MASK, priv->type)
+ | FIELD_PREP(CTRLR0_MODE_MASK, priv->mode)
+ | FIELD_PREP(CTRLR0_TMOD_MASK, priv->tmode);
+}
+
+static u32 dw_spi_dw32_update_cr0(struct dw_spi_priv *priv)
+{
+ return FIELD_PREP(CTRLR0_DFS_32_MASK, priv->bits_per_word - 1)
+ | FIELD_PREP(CTRLR0_FRF_MASK, priv->type)
+ | FIELD_PREP(CTRLR0_MODE_MASK, priv->mode)
+ | FIELD_PREP(CTRLR0_TMOD_MASK, priv->tmode);
+}
+
+static u32 dw_spi_dwc_update_cr0(struct dw_spi_priv *priv)
+{
+ return FIELD_PREP(DWC_SSI_CTRLR0_DFS_MASK, priv->bits_per_word - 1)
+ | FIELD_PREP(DWC_SSI_CTRLR0_FRF_MASK, priv->type)
+ | FIELD_PREP(DWC_SSI_CTRLR0_MODE_MASK, priv->mode)
+ | FIELD_PREP(DWC_SSI_CTRLR0_TMOD_MASK, priv->tmode);
+}
+
+static int dw_spi_apb_init(struct udevice *bus, struct dw_spi_priv *priv)
+{
+ /* If we read zeros from DFS, then we need to use DFS_32 instead */
+ dw_write(priv, DW_SPI_SSIENR, 0);
+ dw_write(priv, DW_SPI_CTRLR0, 0xffffffff);
+ if (FIELD_GET(CTRLR0_DFS_MASK, dw_read(priv, DW_SPI_CTRLR0))) {
+ priv->max_xfer = 16;
+ priv->update_cr0 = dw_spi_dw16_update_cr0;
+ } else {
+ priv->max_xfer = 32;
+ priv->update_cr0 = dw_spi_dw32_update_cr0;
+ }
+
+ return 0;
+}
+
+static int dw_spi_dwc_init(struct udevice *bus, struct dw_spi_priv *priv)
+{
+ priv->max_xfer = 32;
+ priv->update_cr0 = dw_spi_dwc_update_cr0;
+ return 0;
+}
+
+static int request_gpio_cs(struct udevice *bus)
+{
+#if CONFIG_IS_ENABLED(DM_GPIO) && !defined(CONFIG_SPL_BUILD)
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ /* External chip select gpio line is optional */
+ ret = gpio_request_by_name(bus, "cs-gpios", 0, &priv->cs_gpio,
+ GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ if (ret == -ENOENT)
+ return 0;
+
+ if (ret < 0) {
+ dev_err(bus, "Couldn't request gpio! (error %d)\n", ret);
+ return ret;
+ }
+
+ if (dm_gpio_is_valid(&priv->cs_gpio)) {
+ dm_gpio_set_dir_flags(&priv->cs_gpio,
+ GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ }
+
+ dev_dbg(bus, "Using external gpio for CS management\n");
+#endif
+ return 0;
+}
+
+static int dw_spi_of_to_plat(struct udevice *bus)
+{
+ struct dw_spi_plat *plat = dev_get_plat(bus);
+
+ plat->regs = dev_read_addr_ptr(bus);
+ if (!plat->regs)
+ return -EINVAL;
+
+ /* Use 500KHz as a suitable default */
+ plat->frequency = dev_read_u32_default(bus, "spi-max-frequency",
+ 500000);
+
+ if (dev_read_bool(bus, "spi-slave"))
+ return -EINVAL;
+
+ dev_info(bus, "max-frequency=%d\n", plat->frequency);
+
+ return request_gpio_cs(bus);
+}
+
+/* Restart the controller, disable all interrupts, clean rx fifo */
+static void spi_hw_init(struct udevice *bus, struct dw_spi_priv *priv)
+{
+ dw_write(priv, DW_SPI_SSIENR, 0);
+ dw_write(priv, DW_SPI_IMR, 0xff);
+ dw_write(priv, DW_SPI_SSIENR, 1);
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec
+ */
+ if (!priv->fifo_len) {
+ u32 fifo;
+
+ for (fifo = 1; fifo < 256; fifo++) {
+ dw_write(priv, DW_SPI_TXFTLR, fifo);
+ if (fifo != dw_read(priv, DW_SPI_TXFTLR))
+ break;
+ }
+
+ priv->fifo_len = (fifo == 1) ? 0 : fifo;
+ dw_write(priv, DW_SPI_TXFTLR, 0);
+ }
+ dev_dbg(bus, "fifo_len=%d\n", priv->fifo_len);
+}
+
+/*
+ * We define dw_spi_get_clk function as 'weak' as some targets
+ * (like SOCFPGA_GEN5 and SOCFPGA_ARRIA10) don't use standard clock API
+ * and implement dw_spi_get_clk their own way in their clock manager.
+ */
+__weak int dw_spi_get_clk(struct udevice *bus, ulong *rate)
+{
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ ret = clk_get_by_index(bus, 0, &priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(&priv->clk);
+ if (ret && ret != -ENOSYS && ret != -ENOTSUPP)
+ return ret;
+
+ *rate = clk_get_rate(&priv->clk);
+ if (!*rate)
+ goto err_rate;
+
+ dev_dbg(bus, "Got clock via device tree: %lu Hz\n", *rate);
+
+ return 0;
+
+err_rate:
+ clk_disable(&priv->clk);
+ clk_free(&priv->clk);
+
+ return -EINVAL;
+}
+
+static int dw_spi_reset(struct udevice *bus)
+{
+ int ret;
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+
+ ret = reset_get_bulk(bus, &priv->resets);
+ if (ret) {
+ /*
+ * Return 0 if error due to !CONFIG_DM_RESET and reset
+ * DT property is not present.
+ */
+ if (ret == -ENOENT || ret == -ENOTSUPP)
+ return 0;
+
+ dev_warn(bus, "Couldn't find/assert reset device (error %d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = reset_deassert_bulk(&priv->resets);
+ if (ret) {
+ reset_release_bulk(&priv->resets);
+ dev_err(bus, "Failed to de-assert reset for SPI (error %d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+typedef int (*dw_spi_init_t)(struct udevice *bus, struct dw_spi_priv *priv);
+
+static int dw_spi_probe(struct udevice *bus)
+{
+ dw_spi_init_t init = (dw_spi_init_t)dev_get_driver_data(bus);
+ struct dw_spi_plat *plat = dev_get_plat(bus);
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+ u32 version;
+
+ priv->regs = plat->regs;
+ priv->freq = plat->frequency;
+
+ ret = dw_spi_get_clk(bus, &priv->bus_clk_rate);
+ if (ret)
+ return ret;
+
+ ret = dw_spi_reset(bus);
+ if (ret)
+ return ret;
+
+ if (!init)
+ return -EINVAL;
+ ret = init(bus, priv);
+ if (ret)
+ return ret;
+
+ version = dw_read(priv, DW_SPI_VERSION);
+ dev_dbg(bus, "ssi_version_id=%c.%c%c%c ssi_max_xfer_size=%u\n",
+ version >> 24, version >> 16, version >> 8, version,
+ priv->max_xfer);
+
+ /* Currently only bits_per_word == 8 supported */
+ priv->bits_per_word = 8;
+
+ priv->tmode = 0; /* Tx & Rx */
+
+ /* Basic HW init */
+ spi_hw_init(bus, priv);
+
+ return 0;
+}
+
+/* Return the max entries we can fill into tx fifo */
+static inline u32 tx_max(struct dw_spi_priv *priv)
+{
+ u32 tx_left, tx_room, rxtx_gap;
+
+ tx_left = (priv->tx_end - priv->tx) / (priv->bits_per_word >> 3);
+ tx_room = priv->fifo_len - dw_read(priv, DW_SPI_TXFLR);
+
+ /*
+ * Another concern is about the tx/rx mismatch, we
+ * thought about using (priv->fifo_len - rxflr - txflr) as
+ * one maximum value for tx, but it doesn't cover the
+ * data which is out of tx/rx fifo and inside the
+ * shift registers. So a control from sw point of
+ * view is taken.
+ */
+ rxtx_gap = ((priv->rx_end - priv->rx) - (priv->tx_end - priv->tx)) /
+ (priv->bits_per_word >> 3);
+
+ return min3(tx_left, tx_room, (u32)(priv->fifo_len - rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static inline u32 rx_max(struct dw_spi_priv *priv)
+{
+ u32 rx_left = (priv->rx_end - priv->rx) / (priv->bits_per_word >> 3);
+
+ return min_t(u32, rx_left, dw_read(priv, DW_SPI_RXFLR));
+}
+
+static void dw_writer(struct dw_spi_priv *priv)
+{
+ u32 max = tx_max(priv);
+ u32 txw = 0xFFFFFFFF;
+
+ while (max--) {
+ /* Set the tx word if the transfer's original "tx" is not null */
+ if (priv->tx_end - priv->len) {
+ if (priv->bits_per_word == 8)
+ txw = *(u8 *)(priv->tx);
+ else
+ txw = *(u16 *)(priv->tx);
+ }
+ dw_write(priv, DW_SPI_DR, txw);
+ log_content("tx=0x%02x\n", txw);
+ priv->tx += priv->bits_per_word >> 3;
+ }
+}
+
+static void dw_reader(struct dw_spi_priv *priv)
+{
+ u32 max = rx_max(priv);
+ u16 rxw;
+
+ while (max--) {
+ rxw = dw_read(priv, DW_SPI_DR);
+ log_content("rx=0x%02x\n", rxw);
+
+ /* Care about rx if the transfer's original "rx" is not null */
+ if (priv->rx_end - priv->len) {
+ if (priv->bits_per_word == 8)
+ *(u8 *)(priv->rx) = rxw;
+ else
+ *(u16 *)(priv->rx) = rxw;
+ }
+ priv->rx += priv->bits_per_word >> 3;
+ }
+}
+
+static int poll_transfer(struct dw_spi_priv *priv)
+{
+ do {
+ dw_writer(priv);
+ dw_reader(priv);
+ } while (priv->rx_end > priv->rx);
+
+ return 0;
+}
+
+/*
+ * We define external_cs_manage function as 'weak' as some targets
+ * (like MSCC Ocelot) don't control the external CS pin using a GPIO
+ * controller. These SoCs use specific registers to control by
+ * software the SPI pins (and especially the CS).
+ */
+__weak void external_cs_manage(struct udevice *dev, bool on)
+{
+#if CONFIG_IS_ENABLED(DM_GPIO) && !defined(CONFIG_SPL_BUILD)
+ struct dw_spi_priv *priv = dev_get_priv(dev->parent);
+
+ if (!dm_gpio_is_valid(&priv->cs_gpio))
+ return;
+
+ dm_gpio_set_value(&priv->cs_gpio, on ? 1 : 0);
+#endif
+}
+
+static int dw_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ const u8 *tx = dout;
+ u8 *rx = din;
+ int ret = 0;
+ u32 cr0 = 0;
+ u32 val;
+ u32 cs;
+
+ /* spi core configured to do 8 bit transfers */
+ if (bitlen % 8) {
+ dev_err(dev, "Non byte aligned SPI transfer.\n");
+ return -1;
+ }
+
+ /* Start the transaction if necessary. */
+ if (flags & SPI_XFER_BEGIN)
+ external_cs_manage(dev, false);
+
+ if (rx && tx)
+ priv->tmode = CTRLR0_TMOD_TR;
+ else if (rx)
+ priv->tmode = CTRLR0_TMOD_RO;
+ else
+ /*
+ * In transmit only mode (CTRL0_TMOD_TO) input FIFO never gets
+ * any data which breaks our logic in poll_transfer() above.
+ */
+ priv->tmode = CTRLR0_TMOD_TR;
+
+ cr0 = priv->update_cr0(priv);
+
+ priv->len = bitlen >> 3;
+
+ priv->tx = (void *)tx;
+ priv->tx_end = priv->tx + priv->len;
+ priv->rx = rx;
+ priv->rx_end = priv->rx + priv->len;
+
+ /* Disable controller before writing control registers */
+ dw_write(priv, DW_SPI_SSIENR, 0);
+
+ dev_dbg(dev, "cr0=%08x rx=%p tx=%p len=%d [bytes]\n", cr0, rx, tx,
+ priv->len);
+ /* Reprogram cr0 only if changed */
+ if (dw_read(priv, DW_SPI_CTRLR0) != cr0)
+ dw_write(priv, DW_SPI_CTRLR0, cr0);
+
+ /*
+ * Configure the desired SS (slave select 0...3) in the controller
+ * The DW SPI controller will activate and deactivate this CS
+ * automatically. So no cs_activate() etc is needed in this driver.
+ */
+ cs = spi_chip_select(dev);
+ dw_write(priv, DW_SPI_SER, 1 << cs);
+
+ /* Enable controller after writing control registers */
+ dw_write(priv, DW_SPI_SSIENR, 1);
+
+ /* Start transfer in a polling loop */
+ ret = poll_transfer(priv);
+
+ /*
+ * Wait for current transmit operation to complete.
+ * Otherwise if some data still exists in Tx FIFO it can be
+ * silently flushed, i.e. dropped on disabling of the controller,
+ * which happens when writing 0 to DW_SPI_SSIENR which happens
+ * in the beginning of new transfer.
+ */
+ if (readl_poll_timeout(priv->regs + DW_SPI_SR, val,
+ (val & SR_TF_EMPT) && !(val & SR_BUSY),
+ RX_TIMEOUT * 1000)) {
+ ret = -ETIMEDOUT;
+ }
+
+ /* Stop the transaction if necessary */
+ if (flags & SPI_XFER_END)
+ external_cs_manage(dev, true);
+
+ return ret;
+}
+
+/*
+ * This function is necessary for reading SPI flash with the native CS
+ * c.f. https://lkml.org/lkml/2015/12/23/132
+ */
+static int dw_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
+{
+ bool read = op->data.dir == SPI_MEM_DATA_IN;
+ int pos, i, ret = 0;
+ struct udevice *bus = slave->dev->parent;
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ u8 op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ u8 op_buf[op_len];
+ u32 cr0;
+
+ if (read)
+ priv->tmode = CTRLR0_TMOD_EPROMREAD;
+ else
+ priv->tmode = CTRLR0_TMOD_TO;
+
+ cr0 = priv->update_cr0(priv);
+ dev_dbg(bus, "cr0=%08x buf=%p len=%u [bytes]\n", cr0, op->data.buf.in,
+ op->data.nbytes);
+
+ dw_write(priv, DW_SPI_SSIENR, 0);
+ dw_write(priv, DW_SPI_CTRLR0, cr0);
+ if (read)
+ dw_write(priv, DW_SPI_CTRLR1, op->data.nbytes - 1);
+ dw_write(priv, DW_SPI_SSIENR, 1);
+
+ /* From spi_mem_exec_op */
+ pos = 0;
+ op_buf[pos++] = op->cmd.opcode;
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ op_buf[pos + i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ pos += op->addr.nbytes;
+ }
+ if (op->dummy.nbytes)
+ memset(op_buf + pos, 0xff, op->dummy.nbytes);
+
+ external_cs_manage(slave->dev, false);
+
+ priv->tx = &op_buf;
+ priv->tx_end = priv->tx + op_len;
+ priv->rx = NULL;
+ priv->rx_end = NULL;
+ while (priv->tx != priv->tx_end)
+ dw_writer(priv);
+
+ /*
+ * XXX: The following are tight loops! Enabling debug messages may cause
+ * them to fail because we are not reading/writing the fifo fast enough.
+ */
+ if (read) {
+ priv->rx = op->data.buf.in;
+ priv->rx_end = priv->rx + op->data.nbytes;
+
+ dw_write(priv, DW_SPI_SER, 1 << spi_chip_select(slave->dev));
+ while (priv->rx != priv->rx_end)
+ dw_reader(priv);
+ } else {
+ u32 val;
+
+ priv->tx = op->data.buf.out;
+ priv->tx_end = priv->tx + op->data.nbytes;
+
+ /* Fill up the write fifo before starting the transfer */
+ dw_writer(priv);
+ dw_write(priv, DW_SPI_SER, 1 << spi_chip_select(slave->dev));
+ while (priv->tx != priv->tx_end)
+ dw_writer(priv);
+
+ if (readl_poll_timeout(priv->regs + DW_SPI_SR, val,
+ (val & SR_TF_EMPT) && !(val & SR_BUSY),
+ RX_TIMEOUT * 1000)) {
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ dw_write(priv, DW_SPI_SER, 0);
+ external_cs_manage(slave->dev, true);
+
+ dev_dbg(bus, "%u bytes xfered\n", op->data.nbytes);
+ return ret;
+}
+
+/* The size of ctrl1 limits data transfers to 64K */
+static int dw_spi_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
+{
+ op->data.nbytes = min(op->data.nbytes, (unsigned int)SZ_64K);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops dw_spi_mem_ops = {
+ .exec_op = dw_spi_exec_op,
+ .adjust_op_size = dw_spi_adjust_op_size,
+};
+
+static int dw_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct dw_spi_plat *plat = dev_get_plat(bus);
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ u16 clk_div;
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+
+ /* Disable controller before writing control registers */
+ dw_write(priv, DW_SPI_SSIENR, 0);
+
+ /* clk_div doesn't support odd number */
+ clk_div = priv->bus_clk_rate / speed;
+ clk_div = (clk_div + 1) & 0xfffe;
+ dw_write(priv, DW_SPI_BAUDR, clk_div);
+
+ /* Enable controller after writing control registers */
+ dw_write(priv, DW_SPI_SSIENR, 1);
+
+ priv->freq = speed;
+ dev_dbg(bus, "speed=%d clk_div=%d\n", priv->freq, clk_div);
+
+ return 0;
+}
+
+static int dw_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+
+ /*
+ * Can't set mode yet. Since this depends on if rx, tx, or
+ * rx & tx is requested. So we have to defer this to the
+ * real transfer function.
+ */
+ priv->mode = mode;
+ dev_dbg(bus, "mode=%d\n", priv->mode);
+
+ return 0;
+}
+
+static int dw_spi_remove(struct udevice *bus)
+{
+ struct dw_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ ret = reset_release_bulk(&priv->resets);
+ if (ret)
+ return ret;
+
+#if CONFIG_IS_ENABLED(CLK)
+ ret = clk_disable(&priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_free(&priv->clk);
+ if (ret)
+ return ret;
+#endif
+ return 0;
+}
+
+static const struct dm_spi_ops dw_spi_ops = {
+ .xfer = dw_spi_xfer,
+ .mem_ops = &dw_spi_mem_ops,
+ .set_speed = dw_spi_set_speed,
+ .set_mode = dw_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id dw_spi_ids[] = {
+ /* Generic compatible strings */
+
+ { .compatible = "snps,dw-apb-ssi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "snps,dw-apb-ssi-3.20a", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "snps,dw-apb-ssi-3.22a", .data = (ulong)dw_spi_apb_init },
+ /* First version with SSI_MAX_XFER_SIZE */
+ { .compatible = "snps,dw-apb-ssi-3.23a", .data = (ulong)dw_spi_apb_init },
+ /* First version with Dual/Quad SPI; unused by this driver */
+ { .compatible = "snps,dw-apb-ssi-4.00a", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "snps,dw-apb-ssi-4.01", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "snps,dwc-ssi-1.01a", .data = (ulong)dw_spi_dwc_init },
+
+ /* Compatible strings for specific SoCs */
+
+ /*
+ * Both the Cyclone V and Arria V share a device tree and have the same
+ * version of this device. This compatible string is used for those
+ * devices, and is not used for sofpgas in general.
+ */
+ { .compatible = "altr,socfpga-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "altr,socfpga-arria10-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "canaan,kendryte-k210-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "canaan,kendryte-k210-ssi", .data = (ulong)dw_spi_dwc_init },
+ { .compatible = "intel,stratix10-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "intel,agilex-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "mscc,ocelot-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "mscc,jaguar2-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "snps,axs10x-spi", .data = (ulong)dw_spi_apb_init },
+ { .compatible = "snps,hsdk-spi", .data = (ulong)dw_spi_apb_init },
+ { }
+};
+
+U_BOOT_DRIVER(dw_spi) = {
+ .name = "dw_spi",
+ .id = UCLASS_SPI,
+ .of_match = dw_spi_ids,
+ .ops = &dw_spi_ops,
+ .of_to_plat = dw_spi_of_to_plat,
+ .plat_auto = sizeof(struct dw_spi_plat),
+ .priv_auto = sizeof(struct dw_spi_priv),
+ .probe = dw_spi_probe,
+ .remove = dw_spi_remove,
+};
diff --git a/roms/u-boot/drivers/spi/exynos_spi.c b/roms/u-boot/drivers/spi/exynos_spi.c
new file mode 100644
index 000000000..1bcc3ad31
--- /dev/null
+++ b/roms/u-boot/drivers/spi/exynos_spi.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2012 SAMSUNG Electronics
+ * Padmavathi Venna <padma.v@samsung.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <fdtdec.h>
+#include <time.h>
+#include <asm/arch/clk.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/cpu.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/pinmux.h>
+#include <asm/arch/spi.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct exynos_spi_plat {
+ enum periph_id periph_id;
+ s32 frequency; /* Default clock frequency, -1 for none */
+ struct exynos_spi *regs;
+ uint deactivate_delay_us; /* Delay to wait after deactivate */
+};
+
+struct exynos_spi_priv {
+ struct exynos_spi *regs;
+ unsigned int freq; /* Default frequency */
+ unsigned int mode;
+ enum periph_id periph_id; /* Peripheral ID for this device */
+ unsigned int fifo_size;
+ int skip_preamble;
+ ulong last_transaction_us; /* Time of last transaction end */
+};
+
+/**
+ * Flush spi tx, rx fifos and reset the SPI controller
+ *
+ * @param regs Pointer to SPI registers
+ */
+static void spi_flush_fifo(struct exynos_spi *regs)
+{
+ clrsetbits_le32(&regs->ch_cfg, SPI_CH_HS_EN, SPI_CH_RST);
+ clrbits_le32(&regs->ch_cfg, SPI_CH_RST);
+ setbits_le32(&regs->ch_cfg, SPI_TX_CH_ON | SPI_RX_CH_ON);
+}
+
+static void spi_get_fifo_levels(struct exynos_spi *regs,
+ int *rx_lvl, int *tx_lvl)
+{
+ uint32_t spi_sts = readl(&regs->spi_sts);
+
+ *rx_lvl = (spi_sts >> SPI_RX_LVL_OFFSET) & SPI_FIFO_LVL_MASK;
+ *tx_lvl = (spi_sts >> SPI_TX_LVL_OFFSET) & SPI_FIFO_LVL_MASK;
+}
+
+/**
+ * If there's something to transfer, do a software reset and set a
+ * transaction size.
+ *
+ * @param regs SPI peripheral registers
+ * @param count Number of bytes to transfer
+ * @param step Number of bytes to transfer in each packet (1 or 4)
+ */
+static void spi_request_bytes(struct exynos_spi *regs, int count, int step)
+{
+ debug("%s: regs=%p, count=%d, step=%d\n", __func__, regs, count, step);
+
+ /* For word address we need to swap bytes */
+ if (step == 4) {
+ setbits_le32(&regs->mode_cfg,
+ SPI_MODE_CH_WIDTH_WORD | SPI_MODE_BUS_WIDTH_WORD);
+ count /= 4;
+ setbits_le32(&regs->swap_cfg, SPI_TX_SWAP_EN | SPI_RX_SWAP_EN |
+ SPI_TX_BYTE_SWAP | SPI_RX_BYTE_SWAP |
+ SPI_TX_HWORD_SWAP | SPI_RX_HWORD_SWAP);
+ } else {
+ /* Select byte access and clear the swap configuration */
+ clrbits_le32(&regs->mode_cfg,
+ SPI_MODE_CH_WIDTH_WORD | SPI_MODE_BUS_WIDTH_WORD);
+ writel(0, &regs->swap_cfg);
+ }
+
+ assert(count && count < (1 << 16));
+ setbits_le32(&regs->ch_cfg, SPI_CH_RST);
+ clrbits_le32(&regs->ch_cfg, SPI_CH_RST);
+
+ writel(count | SPI_PACKET_CNT_EN, &regs->pkt_cnt);
+}
+
+static int spi_rx_tx(struct exynos_spi_priv *priv, int todo,
+ void **dinp, void const **doutp, unsigned long flags)
+{
+ struct exynos_spi *regs = priv->regs;
+ uchar *rxp = *dinp;
+ const uchar *txp = *doutp;
+ int rx_lvl, tx_lvl;
+ uint out_bytes, in_bytes;
+ int toread;
+ unsigned start = get_timer(0);
+ int stopping;
+ int step;
+
+ out_bytes = in_bytes = todo;
+
+ stopping = priv->skip_preamble && (flags & SPI_XFER_END) &&
+ !(priv->mode & SPI_SLAVE);
+
+ /*
+ * Try to transfer words if we can. This helps read performance at
+ * SPI clock speeds above about 20MHz.
+ */
+ step = 1;
+ if (!((todo | (uintptr_t)rxp | (uintptr_t)txp) & 3) &&
+ !priv->skip_preamble)
+ step = 4;
+
+ /*
+ * If there's something to send, do a software reset and set a
+ * transaction size.
+ */
+ spi_request_bytes(regs, todo, step);
+
+ /*
+ * Bytes are transmitted/received in pairs. Wait to receive all the
+ * data because then transmission will be done as well.
+ */
+ toread = in_bytes;
+
+ while (in_bytes) {
+ int temp;
+
+ /* Keep the fifos full/empty. */
+ spi_get_fifo_levels(regs, &rx_lvl, &tx_lvl);
+
+ /*
+ * Don't completely fill the txfifo, since we don't want our
+ * rxfifo to overflow, and it may already contain data.
+ */
+ while (tx_lvl < priv->fifo_size/2 && out_bytes) {
+ if (!txp)
+ temp = -1;
+ else if (step == 4)
+ temp = *(uint32_t *)txp;
+ else
+ temp = *txp;
+ writel(temp, &regs->tx_data);
+ out_bytes -= step;
+ if (txp)
+ txp += step;
+ tx_lvl += step;
+ }
+ if (rx_lvl >= step) {
+ while (rx_lvl >= step) {
+ temp = readl(&regs->rx_data);
+ if (priv->skip_preamble) {
+ if (temp == SPI_PREAMBLE_END_BYTE) {
+ priv->skip_preamble = 0;
+ stopping = 0;
+ }
+ } else {
+ if (rxp || stopping) {
+ if (step == 4)
+ *(uint32_t *)rxp = temp;
+ else
+ *rxp = temp;
+ rxp += step;
+ }
+ in_bytes -= step;
+ }
+ toread -= step;
+ rx_lvl -= step;
+ }
+ } else if (!toread) {
+ /*
+ * We have run out of input data, but haven't read
+ * enough bytes after the preamble yet. Read some more,
+ * and make sure that we transmit dummy bytes too, to
+ * keep things going.
+ */
+ assert(!out_bytes);
+ out_bytes = in_bytes;
+ toread = in_bytes;
+ txp = NULL;
+ spi_request_bytes(regs, toread, step);
+ }
+ if (priv->skip_preamble && get_timer(start) > 100) {
+ debug("SPI timeout: in_bytes=%d, out_bytes=%d, ",
+ in_bytes, out_bytes);
+ return -ETIMEDOUT;
+ }
+ }
+
+ *dinp = rxp;
+ *doutp = txp;
+
+ return 0;
+}
+
+/**
+ * Activate the CS by driving it LOW
+ *
+ * @param slave Pointer to spi_slave to which controller has to
+ * communicate with
+ */
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct exynos_spi_plat *pdata = dev_get_plat(bus);
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+
+ /* If it's too soon to do another transaction, wait */
+ if (pdata->deactivate_delay_us &&
+ priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < pdata->deactivate_delay_us)
+ udelay(pdata->deactivate_delay_us - delay_us);
+ }
+
+ clrbits_le32(&priv->regs->cs_reg, SPI_SLAVE_SIG_INACT);
+ debug("Activate CS, bus '%s'\n", bus->name);
+ priv->skip_preamble = priv->mode & SPI_PREAMBLE;
+}
+
+/**
+ * Deactivate the CS by driving it HIGH
+ *
+ * @param slave Pointer to spi_slave to which controller has to
+ * communicate with
+ */
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct exynos_spi_plat *pdata = dev_get_plat(bus);
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+
+ setbits_le32(&priv->regs->cs_reg, SPI_SLAVE_SIG_INACT);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (pdata->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+
+ debug("Deactivate CS, bus '%s'\n", bus->name);
+}
+
+static int exynos_spi_of_to_plat(struct udevice *bus)
+{
+ struct exynos_spi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->regs = dev_read_addr_ptr(bus);
+ plat->periph_id = pinmux_decode_periph_id(blob, node);
+
+ if (plat->periph_id == PERIPH_ID_NONE) {
+ debug("%s: Invalid peripheral ID %d\n", __func__,
+ plat->periph_id);
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Use 500KHz as a suitable default */
+ plat->frequency = fdtdec_get_int(blob, node, "spi-max-frequency",
+ 500000);
+ plat->deactivate_delay_us = fdtdec_get_int(blob, node,
+ "spi-deactivate-delay", 0);
+ debug("%s: regs=%p, periph_id=%d, max-frequency=%d, deactivate_delay=%d\n",
+ __func__, plat->regs, plat->periph_id, plat->frequency,
+ plat->deactivate_delay_us);
+
+ return 0;
+}
+
+static int exynos_spi_probe(struct udevice *bus)
+{
+ struct exynos_spi_plat *plat = dev_get_plat(bus);
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+
+ priv->regs = plat->regs;
+ if (plat->periph_id == PERIPH_ID_SPI1 ||
+ plat->periph_id == PERIPH_ID_SPI2)
+ priv->fifo_size = 64;
+ else
+ priv->fifo_size = 256;
+
+ priv->skip_preamble = 0;
+ priv->last_transaction_us = timer_get_us();
+ priv->freq = plat->frequency;
+ priv->periph_id = plat->periph_id;
+
+ return 0;
+}
+
+static int exynos_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+
+ exynos_pinmux_config(priv->periph_id, PINMUX_FLAG_NONE);
+ spi_flush_fifo(priv->regs);
+
+ writel(SPI_FB_DELAY_180, &priv->regs->fb_clk);
+
+ return 0;
+}
+
+static int exynos_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+
+ spi_flush_fifo(priv->regs);
+
+ return 0;
+}
+
+static int exynos_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+ int upto, todo;
+ int bytelen;
+ int ret = 0;
+
+ /* spi core configured to do 8 bit transfers */
+ if (bitlen % 8) {
+ debug("Non byte aligned SPI transfer.\n");
+ return -1;
+ }
+
+ /* Start the transaction, if necessary. */
+ if ((flags & SPI_XFER_BEGIN))
+ spi_cs_activate(dev);
+
+ /*
+ * Exynos SPI limits each transfer to 65535 transfers. To keep
+ * things simple, allow a maximum of 65532 bytes. We could allow
+ * more in word mode, but the performance difference is small.
+ */
+ bytelen = bitlen / 8;
+ for (upto = 0; !ret && upto < bytelen; upto += todo) {
+ todo = min(bytelen - upto, (1 << 16) - 4);
+ ret = spi_rx_tx(priv, todo, &din, &dout, flags);
+ if (ret)
+ break;
+ }
+
+ /* Stop the transaction, if necessary. */
+ if ((flags & SPI_XFER_END) && !(priv->mode & SPI_SLAVE)) {
+ spi_cs_deactivate(dev);
+ if (priv->skip_preamble) {
+ assert(!priv->skip_preamble);
+ debug("Failed to complete premable transaction\n");
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static int exynos_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct exynos_spi_plat *plat = dev_get_plat(bus);
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+ ret = set_spi_clk(priv->periph_id, speed);
+ if (ret)
+ return ret;
+ priv->freq = speed;
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int exynos_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct exynos_spi_priv *priv = dev_get_priv(bus);
+ uint32_t reg;
+
+ reg = readl(&priv->regs->ch_cfg);
+ reg &= ~(SPI_CH_CPHA_B | SPI_CH_CPOL_L);
+
+ if (mode & SPI_CPHA)
+ reg |= SPI_CH_CPHA_B;
+
+ if (mode & SPI_CPOL)
+ reg |= SPI_CH_CPOL_L;
+
+ writel(reg, &priv->regs->ch_cfg);
+ priv->mode = mode;
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops exynos_spi_ops = {
+ .claim_bus = exynos_spi_claim_bus,
+ .release_bus = exynos_spi_release_bus,
+ .xfer = exynos_spi_xfer,
+ .set_speed = exynos_spi_set_speed,
+ .set_mode = exynos_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id exynos_spi_ids[] = {
+ { .compatible = "samsung,exynos-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(exynos_spi) = {
+ .name = "exynos_spi",
+ .id = UCLASS_SPI,
+ .of_match = exynos_spi_ids,
+ .ops = &exynos_spi_ops,
+ .of_to_plat = exynos_spi_of_to_plat,
+ .plat_auto = sizeof(struct exynos_spi_plat),
+ .priv_auto = sizeof(struct exynos_spi_priv),
+ .probe = exynos_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/fsl_dspi.c b/roms/u-boot/drivers/spi/fsl_dspi.c
new file mode 100644
index 000000000..8fe3508c6
--- /dev/null
+++ b/roms/u-boot/drivers/spi/fsl_dspi.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2000-2003
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * Copyright (C) 2004-2009, 2015 Freescale Semiconductor, Inc.
+ * TsiChung Liew (Tsi-Chung.Liew@freescale.com)
+ * Chao Fu (B44548@freescale.com)
+ * Haikun Wang (B53464@freescale.com)
+ */
+
+#include <asm/global_data.h>
+#include <linux/math64.h>
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <common.h>
+#include <log.h>
+#include <spi.h>
+#include <malloc.h>
+#include <asm/io.h>
+#include <fdtdec.h>
+#ifndef CONFIG_M68K
+#include <asm/arch/clock.h>
+#endif
+#include <fsl_dspi.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+/* linux/include/time.h */
+#define NSEC_PER_SEC 1000000000L
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* fsl_dspi_plat flags */
+#define DSPI_FLAG_REGMAP_ENDIAN_BIG BIT(0)
+
+/* idle data value */
+#define DSPI_IDLE_VAL 0x0
+
+/* max chipselect signals number */
+#define FSL_DSPI_MAX_CHIPSELECT 6
+
+/* default SCK frequency, unit: HZ */
+#define FSL_DSPI_DEFAULT_SCK_FREQ 10000000
+
+/* tx/rx data wait timeout value, unit: us */
+#define DSPI_TXRX_WAIT_TIMEOUT 1000000
+
+/* CTAR register pre-configure value */
+#define DSPI_CTAR_DEFAULT_VALUE (DSPI_CTAR_TRSZ(7) | \
+ DSPI_CTAR_PCSSCK_1CLK | \
+ DSPI_CTAR_PASC(0) | \
+ DSPI_CTAR_PDT(0) | \
+ DSPI_CTAR_CSSCK(0) | \
+ DSPI_CTAR_ASC(0) | \
+ DSPI_CTAR_DT(0))
+
+/* CTAR register pre-configure mask */
+#define DSPI_CTAR_SET_MODE_MASK (DSPI_CTAR_TRSZ(15) | \
+ DSPI_CTAR_PCSSCK(3) | \
+ DSPI_CTAR_PASC(3) | \
+ DSPI_CTAR_PDT(3) | \
+ DSPI_CTAR_CSSCK(15) | \
+ DSPI_CTAR_ASC(15) | \
+ DSPI_CTAR_DT(15))
+
+/**
+ * struct fsl_dspi_plat - platform data for Freescale DSPI
+ *
+ * @flags: Flags for DSPI DSPI_FLAG_...
+ * @speed_hz: Default SCK frequency
+ * @num_chipselect: Number of DSPI chipselect signals
+ * @regs_addr: Base address of DSPI registers
+ */
+struct fsl_dspi_plat {
+ uint flags;
+ uint speed_hz;
+ uint num_chipselect;
+ fdt_addr_t regs_addr;
+};
+
+/**
+ * struct fsl_dspi_priv - private data for Freescale DSPI
+ *
+ * @flags: Flags for DSPI DSPI_FLAG_...
+ * @mode: SPI mode to use for slave device (see SPI mode flags)
+ * @mcr_val: MCR register configure value
+ * @bus_clk: DSPI input clk frequency
+ * @speed_hz: Default SCK frequency
+ * @charbit: How many bits in every transfer
+ * @num_chipselect: Number of DSPI chipselect signals
+ * @ctar_val: CTAR register configure value of per chipselect slave device
+ * @regs: Point to DSPI register structure for I/O access
+ */
+struct fsl_dspi_priv {
+ uint flags;
+ uint mode;
+ uint mcr_val;
+ uint bus_clk;
+ uint speed_hz;
+ uint charbit;
+ uint num_chipselect;
+ uint ctar_val[FSL_DSPI_MAX_CHIPSELECT];
+ struct dspi *regs;
+};
+
+__weak void cpu_dspi_port_conf(void)
+{
+}
+
+__weak int cpu_dspi_claim_bus(uint bus, uint cs)
+{
+ return 0;
+}
+
+__weak void cpu_dspi_release_bus(uint bus, uint cs)
+{
+}
+
+static uint dspi_read32(uint flags, uint *addr)
+{
+ return flags & DSPI_FLAG_REGMAP_ENDIAN_BIG ?
+ in_be32(addr) : in_le32(addr);
+}
+
+static void dspi_write32(uint flags, uint *addr, uint val)
+{
+ flags & DSPI_FLAG_REGMAP_ENDIAN_BIG ?
+ out_be32(addr, val) : out_le32(addr, val);
+}
+
+static void dspi_halt(struct fsl_dspi_priv *priv, u8 halt)
+{
+ uint mcr_val;
+
+ mcr_val = dspi_read32(priv->flags, &priv->regs->mcr);
+
+ if (halt)
+ mcr_val |= DSPI_MCR_HALT;
+ else
+ mcr_val &= ~DSPI_MCR_HALT;
+
+ dspi_write32(priv->flags, &priv->regs->mcr, mcr_val);
+}
+
+static void fsl_dspi_init_mcr(struct fsl_dspi_priv *priv, uint cfg_val)
+{
+ /* halt DSPI module */
+ dspi_halt(priv, 1);
+
+ dspi_write32(priv->flags, &priv->regs->mcr, cfg_val);
+
+ /* resume module */
+ dspi_halt(priv, 0);
+
+ priv->mcr_val = cfg_val;
+}
+
+static void fsl_dspi_cfg_cs_active_state(struct fsl_dspi_priv *priv,
+ uint cs, uint state)
+{
+ uint mcr_val;
+
+ dspi_halt(priv, 1);
+
+ mcr_val = dspi_read32(priv->flags, &priv->regs->mcr);
+ if (state & SPI_CS_HIGH)
+ /* CSx inactive state is low */
+ mcr_val &= ~DSPI_MCR_PCSIS(cs);
+ else
+ /* CSx inactive state is high */
+ mcr_val |= DSPI_MCR_PCSIS(cs);
+ dspi_write32(priv->flags, &priv->regs->mcr, mcr_val);
+
+ dspi_halt(priv, 0);
+}
+
+static int fsl_dspi_cfg_ctar_mode(struct fsl_dspi_priv *priv,
+ uint cs, uint mode)
+{
+ uint bus_setup;
+
+ bus_setup = dspi_read32(priv->flags, &priv->regs->ctar[0]);
+
+ bus_setup &= ~DSPI_CTAR_SET_MODE_MASK;
+ bus_setup |= priv->ctar_val[cs];
+ bus_setup &= ~(DSPI_CTAR_CPOL | DSPI_CTAR_CPHA | DSPI_CTAR_LSBFE);
+
+ if (mode & SPI_CPOL)
+ bus_setup |= DSPI_CTAR_CPOL;
+ if (mode & SPI_CPHA)
+ bus_setup |= DSPI_CTAR_CPHA;
+ if (mode & SPI_LSB_FIRST)
+ bus_setup |= DSPI_CTAR_LSBFE;
+
+ dspi_write32(priv->flags, &priv->regs->ctar[0], bus_setup);
+
+ priv->charbit =
+ ((dspi_read32(priv->flags, &priv->regs->ctar[0]) &
+ DSPI_CTAR_TRSZ(15)) == DSPI_CTAR_TRSZ(15)) ? 16 : 8;
+
+ return 0;
+}
+
+static void fsl_dspi_clr_fifo(struct fsl_dspi_priv *priv)
+{
+ uint mcr_val;
+
+ dspi_halt(priv, 1);
+ mcr_val = dspi_read32(priv->flags, &priv->regs->mcr);
+ /* flush RX and TX FIFO */
+ mcr_val |= (DSPI_MCR_CTXF | DSPI_MCR_CRXF);
+ dspi_write32(priv->flags, &priv->regs->mcr, mcr_val);
+ dspi_halt(priv, 0);
+}
+
+static void dspi_tx(struct fsl_dspi_priv *priv, u32 ctrl, u16 data)
+{
+ int timeout = DSPI_TXRX_WAIT_TIMEOUT;
+
+ /* wait for empty entries in TXFIFO or timeout */
+ while (DSPI_SR_TXCTR(dspi_read32(priv->flags, &priv->regs->sr)) >= 4 &&
+ timeout--)
+ udelay(1);
+
+ if (timeout >= 0)
+ dspi_write32(priv->flags, &priv->regs->tfr, (ctrl | data));
+ else
+ debug("dspi_tx: waiting timeout!\n");
+}
+
+static u16 dspi_rx(struct fsl_dspi_priv *priv)
+{
+ int timeout = DSPI_TXRX_WAIT_TIMEOUT;
+
+ /* wait for valid entries in RXFIFO or timeout */
+ while (DSPI_SR_RXCTR(dspi_read32(priv->flags, &priv->regs->sr)) == 0 &&
+ timeout--)
+ udelay(1);
+
+ if (timeout >= 0)
+ return (u16)DSPI_RFR_RXDATA(
+ dspi_read32(priv->flags, &priv->regs->rfr));
+ else {
+ debug("dspi_rx: waiting timeout!\n");
+ return (u16)(~0);
+ }
+}
+
+static int dspi_xfer(struct fsl_dspi_priv *priv, uint cs, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ u16 *spi_rd16 = NULL, *spi_wr16 = NULL;
+ u8 *spi_rd = NULL, *spi_wr = NULL;
+ static u32 ctrl;
+ uint len = bitlen >> 3;
+
+ if (priv->charbit == 16) {
+ bitlen >>= 1;
+ spi_wr16 = (u16 *)dout;
+ spi_rd16 = (u16 *)din;
+ } else {
+ spi_wr = (u8 *)dout;
+ spi_rd = (u8 *)din;
+ }
+
+ if ((flags & SPI_XFER_BEGIN) == SPI_XFER_BEGIN)
+ ctrl |= DSPI_TFR_CONT;
+
+ ctrl = ctrl & DSPI_TFR_CONT;
+ ctrl = ctrl | DSPI_TFR_CTAS(0) | DSPI_TFR_PCS(cs);
+
+ if (len > 1) {
+ int tmp_len = len - 1;
+ while (tmp_len--) {
+ if ((dout != NULL) && (din != NULL)) {
+ if (priv->charbit == 16) {
+ dspi_tx(priv, ctrl, *spi_wr16++);
+ *spi_rd16++ = dspi_rx(priv);
+ }
+ else {
+ dspi_tx(priv, ctrl, *spi_wr++);
+ *spi_rd++ = dspi_rx(priv);
+ }
+ }
+
+ else if (dout != NULL) {
+ if (priv->charbit == 16)
+ dspi_tx(priv, ctrl, *spi_wr16++);
+ else
+ dspi_tx(priv, ctrl, *spi_wr++);
+ dspi_rx(priv);
+ }
+
+ else if (din != NULL) {
+ dspi_tx(priv, ctrl, DSPI_IDLE_VAL);
+ if (priv->charbit == 16)
+ *spi_rd16++ = dspi_rx(priv);
+ else
+ *spi_rd++ = dspi_rx(priv);
+ }
+ }
+
+ len = 1; /* remaining byte */
+ }
+
+ if ((flags & SPI_XFER_END) == SPI_XFER_END)
+ ctrl &= ~DSPI_TFR_CONT;
+
+ if (len) {
+ if ((dout != NULL) && (din != NULL)) {
+ if (priv->charbit == 16) {
+ dspi_tx(priv, ctrl, *spi_wr16++);
+ *spi_rd16++ = dspi_rx(priv);
+ }
+ else {
+ dspi_tx(priv, ctrl, *spi_wr++);
+ *spi_rd++ = dspi_rx(priv);
+ }
+ }
+
+ else if (dout != NULL) {
+ if (priv->charbit == 16)
+ dspi_tx(priv, ctrl, *spi_wr16);
+ else
+ dspi_tx(priv, ctrl, *spi_wr);
+ dspi_rx(priv);
+ }
+
+ else if (din != NULL) {
+ dspi_tx(priv, ctrl, DSPI_IDLE_VAL);
+ if (priv->charbit == 16)
+ *spi_rd16 = dspi_rx(priv);
+ else
+ *spi_rd = dspi_rx(priv);
+ }
+ } else {
+ /* dummy read */
+ dspi_tx(priv, ctrl, DSPI_IDLE_VAL);
+ dspi_rx(priv);
+ }
+
+ return 0;
+}
+
+/**
+ * Calculate the divide value between input clk frequency and expected SCK frequency
+ * Formula: SCK = (clkrate/pbr) x ((1+dbr)/br)
+ * Dbr: use default value 0
+ *
+ * @pbr: return Baud Rate Prescaler value
+ * @br: return Baud Rate Scaler value
+ * @speed_hz: expected SCK frequency
+ * @clkrate: input clk frequency
+ */
+static int fsl_dspi_hz_to_spi_baud(int *pbr, int *br,
+ int speed_hz, uint clkrate)
+{
+ /* Valid baud rate pre-scaler values */
+ int pbr_tbl[4] = {2, 3, 5, 7};
+ int brs[16] = {2, 4, 6, 8,
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768};
+ int temp, i = 0, j = 0;
+
+ temp = clkrate / speed_hz;
+
+ for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++)
+ for (j = 0; j < ARRAY_SIZE(brs); j++) {
+ if (pbr_tbl[i] * brs[j] >= temp) {
+ *pbr = i;
+ *br = j;
+ return 0;
+ }
+ }
+
+ debug("Can not find valid baud rate,speed_hz is %d, ", speed_hz);
+ debug("clkrate is %d, we use the max prescaler value.\n", clkrate);
+
+ *pbr = ARRAY_SIZE(pbr_tbl) - 1;
+ *br = ARRAY_SIZE(brs) - 1;
+ return -EINVAL;
+}
+
+static void ns_delay_scale(unsigned char *psc, unsigned char *sc, int delay_ns,
+ unsigned long clkrate)
+{
+ int scale_needed, scale, minscale = INT_MAX;
+ int pscale_tbl[4] = {1, 3, 5, 7};
+ u32 remainder;
+ int i, j;
+
+ scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
+ &remainder);
+ if (remainder)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
+ for (j = 0; j <= DSPI_CTAR_SCALE_BITS; j++) {
+ scale = pscale_tbl[i] * (2 << j);
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *psc = i;
+ *sc = j;
+ }
+ break;
+ }
+ }
+
+ if (minscale == INT_MAX) {
+ pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
+ delay_ns, clkrate);
+ *psc = ARRAY_SIZE(pscale_tbl) - 1;
+ *sc = DSPI_CTAR_SCALE_BITS;
+ }
+}
+
+static int fsl_dspi_cfg_speed(struct fsl_dspi_priv *priv, uint speed)
+{
+ int ret;
+ uint bus_setup;
+ int best_i, best_j, bus_clk;
+
+ bus_clk = priv->bus_clk;
+
+ debug("DSPI set_speed: expected SCK speed %u, bus_clk %u.\n",
+ speed, bus_clk);
+
+ bus_setup = dspi_read32(priv->flags, &priv->regs->ctar[0]);
+ bus_setup &= ~(DSPI_CTAR_DBR | DSPI_CTAR_PBR(0x3) | DSPI_CTAR_BR(0xf));
+
+ ret = fsl_dspi_hz_to_spi_baud(&best_i, &best_j, speed, bus_clk);
+ if (ret) {
+ speed = priv->speed_hz;
+ debug("DSPI set_speed use default SCK rate %u.\n", speed);
+ fsl_dspi_hz_to_spi_baud(&best_i, &best_j, speed, bus_clk);
+ }
+
+ bus_setup |= (DSPI_CTAR_PBR(best_i) | DSPI_CTAR_BR(best_j));
+ dspi_write32(priv->flags, &priv->regs->ctar[0], bus_setup);
+
+ priv->speed_hz = speed;
+
+ return 0;
+}
+
+static int fsl_dspi_child_pre_probe(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ struct fsl_dspi_priv *priv = dev_get_priv(dev->parent);
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ unsigned char pcssck = 0, cssck = 0;
+ unsigned char pasc = 0, asc = 0;
+
+ if (slave_plat->cs >= priv->num_chipselect) {
+ debug("DSPI invalid chipselect number %d(max %d)!\n",
+ slave_plat->cs, priv->num_chipselect - 1);
+ return -EINVAL;
+ }
+
+ ofnode_read_u32(dev_ofnode(dev), "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+ ofnode_read_u32(dev_ofnode(dev), "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+
+ /* Set PCS to SCK delay scale values */
+ ns_delay_scale(&pcssck, &cssck, cs_sck_delay, priv->bus_clk);
+
+ /* Set After SCK delay scale values */
+ ns_delay_scale(&pasc, &asc, sck_cs_delay, priv->bus_clk);
+
+ priv->ctar_val[slave_plat->cs] = DSPI_CTAR_DEFAULT_VALUE |
+ DSPI_CTAR_PCSSCK(pcssck) |
+ DSPI_CTAR_PASC(pasc);
+
+ debug("DSPI pre_probe slave device on CS %u, max_hz %u, mode 0x%x.\n",
+ slave_plat->cs, slave_plat->max_hz, slave_plat->mode);
+
+ return 0;
+}
+
+static int fsl_dspi_probe(struct udevice *bus)
+{
+ struct fsl_dspi_plat *plat = dev_get_plat(bus);
+ struct fsl_dspi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_bus *dm_spi_bus;
+ uint mcr_cfg_val;
+
+ dm_spi_bus = dev_get_uclass_priv(bus);
+
+ /* cpu speical pin muxing configure */
+ cpu_dspi_port_conf();
+
+ /* get input clk frequency */
+ priv->regs = (struct dspi *)plat->regs_addr;
+ priv->flags = plat->flags;
+#ifdef CONFIG_M68K
+ priv->bus_clk = gd->bus_clk;
+#else
+ priv->bus_clk = mxc_get_clock(MXC_DSPI_CLK);
+#endif
+ priv->num_chipselect = plat->num_chipselect;
+ priv->speed_hz = plat->speed_hz;
+ /* frame data length in bits, default 8bits */
+ priv->charbit = 8;
+
+ dm_spi_bus->max_hz = plat->speed_hz;
+
+ /* default: all CS signals inactive state is high */
+ mcr_cfg_val = DSPI_MCR_MSTR | DSPI_MCR_PCSIS_MASK |
+ DSPI_MCR_CRXF | DSPI_MCR_CTXF;
+ fsl_dspi_init_mcr(priv, mcr_cfg_val);
+
+ debug("%s probe done, bus-num %d.\n", bus->name, dev_seq(bus));
+
+ return 0;
+}
+
+static int fsl_dspi_claim_bus(struct udevice *dev)
+{
+ uint sr_val;
+ struct fsl_dspi_priv *priv;
+ struct udevice *bus = dev->parent;
+ struct dm_spi_slave_plat *slave_plat =
+ dev_get_parent_plat(dev);
+
+ priv = dev_get_priv(bus);
+
+ /* processor special preparation work */
+ cpu_dspi_claim_bus(dev_seq(bus), slave_plat->cs);
+
+ /* configure transfer mode */
+ fsl_dspi_cfg_ctar_mode(priv, slave_plat->cs, priv->mode);
+
+ /* configure active state of CSX */
+ fsl_dspi_cfg_cs_active_state(priv, slave_plat->cs,
+ priv->mode);
+
+ fsl_dspi_clr_fifo(priv);
+
+ /* check module TX and RX status */
+ sr_val = dspi_read32(priv->flags, &priv->regs->sr);
+ if ((sr_val & DSPI_SR_TXRXS) != DSPI_SR_TXRXS) {
+ debug("DSPI RX/TX not ready!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fsl_dspi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct fsl_dspi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat =
+ dev_get_parent_plat(dev);
+
+ /* halt module */
+ dspi_halt(priv, 1);
+
+ /* processor special release work */
+ cpu_dspi_release_bus(dev_seq(bus), slave_plat->cs);
+
+ return 0;
+}
+
+/**
+ * This function doesn't do anything except help with debugging
+ */
+static int fsl_dspi_bind(struct udevice *bus)
+{
+ debug("%s assigned seq %d.\n", bus->name, dev_seq(bus));
+ return 0;
+}
+
+static int fsl_dspi_of_to_plat(struct udevice *bus)
+{
+ fdt_addr_t addr;
+ struct fsl_dspi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ if (fdtdec_get_bool(blob, node, "big-endian"))
+ plat->flags |= DSPI_FLAG_REGMAP_ENDIAN_BIG;
+
+ plat->num_chipselect =
+ fdtdec_get_int(blob, node, "num-cs", FSL_DSPI_MAX_CHIPSELECT);
+
+ addr = dev_read_addr(bus);
+ if (addr == FDT_ADDR_T_NONE) {
+ debug("DSPI: Can't get base address or size\n");
+ return -ENOMEM;
+ }
+ plat->regs_addr = addr;
+
+ plat->speed_hz = fdtdec_get_int(blob,
+ node, "spi-max-frequency", FSL_DSPI_DEFAULT_SCK_FREQ);
+
+ debug("DSPI: regs=%pa, max-frequency=%d, endianess=%s, num-cs=%d\n",
+ &plat->regs_addr, plat->speed_hz,
+ plat->flags & DSPI_FLAG_REGMAP_ENDIAN_BIG ? "be" : "le",
+ plat->num_chipselect);
+
+ return 0;
+}
+
+static int fsl_dspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct fsl_dspi_priv *priv;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ struct udevice *bus;
+
+ bus = dev->parent;
+ priv = dev_get_priv(bus);
+
+ return dspi_xfer(priv, slave_plat->cs, bitlen, dout, din, flags);
+}
+
+static int fsl_dspi_set_speed(struct udevice *bus, uint speed)
+{
+ struct fsl_dspi_priv *priv = dev_get_priv(bus);
+
+ return fsl_dspi_cfg_speed(priv, speed);
+}
+
+static int fsl_dspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct fsl_dspi_priv *priv = dev_get_priv(bus);
+
+ debug("DSPI set_mode: mode 0x%x.\n", mode);
+
+ /*
+ * We store some chipselect special configure value in priv->ctar_val,
+ * and we can't get the correct chipselect number here,
+ * so just store mode value.
+ * Do really configuration when claim_bus.
+ */
+ priv->mode = mode;
+
+ return 0;
+}
+
+static const struct dm_spi_ops fsl_dspi_ops = {
+ .claim_bus = fsl_dspi_claim_bus,
+ .release_bus = fsl_dspi_release_bus,
+ .xfer = fsl_dspi_xfer,
+ .set_speed = fsl_dspi_set_speed,
+ .set_mode = fsl_dspi_set_mode,
+};
+
+static const struct udevice_id fsl_dspi_ids[] = {
+ { .compatible = "fsl,vf610-dspi" },
+ { }
+};
+
+U_BOOT_DRIVER(fsl_dspi) = {
+ .name = "fsl_dspi",
+ .id = UCLASS_SPI,
+ .of_match = fsl_dspi_ids,
+ .ops = &fsl_dspi_ops,
+ .of_to_plat = fsl_dspi_of_to_plat,
+ .plat_auto = sizeof(struct fsl_dspi_plat),
+ .priv_auto = sizeof(struct fsl_dspi_priv),
+ .probe = fsl_dspi_probe,
+ .child_pre_probe = fsl_dspi_child_pre_probe,
+ .bind = fsl_dspi_bind,
+};
diff --git a/roms/u-boot/drivers/spi/fsl_espi.c b/roms/u-boot/drivers/spi/fsl_espi.c
new file mode 100644
index 000000000..387b54715
--- /dev/null
+++ b/roms/u-boot/drivers/spi/fsl_espi.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * eSPI controller driver.
+ *
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright 2020 NXP
+ * Author: Mingkai Hu (Mingkai.hu@freescale.com)
+ * Chuanhua Han (chuanhua.han@nxp.com)
+ */
+
+#include <common.h>
+#include <log.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+#include <malloc.h>
+#include <spi.h>
+#include <asm/global_data.h>
+#include <asm/immap_85xx.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <dm/platform_data/fsl_espi.h>
+
+struct fsl_spi_slave {
+ struct spi_slave slave;
+ ccsr_espi_t *espi;
+ u32 speed_hz;
+ unsigned int cs;
+ unsigned int div16;
+ unsigned int pm;
+ int tx_timeout;
+ unsigned int mode;
+ size_t cmd_len;
+ u8 cmd_buf[16];
+ size_t data_len;
+ unsigned int max_transfer_length;
+};
+
+#define to_fsl_spi_slave(s) container_of(s, struct fsl_spi_slave, slave)
+#define US_PER_SECOND 1000000UL
+
+/* default SCK frequency, unit: HZ */
+#define FSL_ESPI_DEFAULT_SCK_FREQ 10000000
+
+#define ESPI_MAX_CS_NUM 4
+#define ESPI_FIFO_WIDTH_BIT 32
+
+#define ESPI_EV_RNE BIT(9)
+#define ESPI_EV_TNF BIT(8)
+#define ESPI_EV_DON BIT(14)
+#define ESPI_EV_TXE BIT(15)
+#define ESPI_EV_RFCNT_SHIFT 24
+#define ESPI_EV_RFCNT_MASK (0x3f << ESPI_EV_RFCNT_SHIFT)
+
+#define ESPI_MODE_EN BIT(31) /* Enable interface */
+#define ESPI_MODE_TXTHR(x) ((x) << 8) /* Tx FIFO threshold */
+#define ESPI_MODE_RXTHR(x) ((x) << 0) /* Rx FIFO threshold */
+
+#define ESPI_COM_CS(x) ((x) << 30)
+#define ESPI_COM_TRANLEN(x) ((x) << 0)
+
+#define ESPI_CSMODE_CI_INACTIVEHIGH BIT(31)
+#define ESPI_CSMODE_CP_BEGIN_EDGCLK BIT(30)
+#define ESPI_CSMODE_REV_MSB_FIRST BIT(29)
+#define ESPI_CSMODE_DIV16 BIT(28)
+#define ESPI_CSMODE_PM(x) ((x) << 24)
+#define ESPI_CSMODE_POL_ASSERTED_LOW BIT(20)
+#define ESPI_CSMODE_LEN(x) ((x) << 16)
+#define ESPI_CSMODE_CSBEF(x) ((x) << 12)
+#define ESPI_CSMODE_CSAFT(x) ((x) << 8)
+#define ESPI_CSMODE_CSCG(x) ((x) << 3)
+
+#define ESPI_CSMODE_INIT_VAL (ESPI_CSMODE_POL_ASSERTED_LOW | \
+ ESPI_CSMODE_CSBEF(0) | ESPI_CSMODE_CSAFT(0) | \
+ ESPI_CSMODE_CSCG(1))
+
+#define ESPI_MAX_DATA_TRANSFER_LEN 0xFFF0
+
+void fsl_spi_cs_activate(struct spi_slave *slave, uint cs)
+{
+ struct fsl_spi_slave *fsl = to_fsl_spi_slave(slave);
+ ccsr_espi_t *espi = fsl->espi;
+ unsigned int com = 0;
+ size_t data_len = fsl->data_len;
+
+ com &= ~(ESPI_COM_CS(0x3) | ESPI_COM_TRANLEN(0xFFFF));
+ com |= ESPI_COM_CS(cs);
+ com |= ESPI_COM_TRANLEN(data_len - 1);
+ out_be32(&espi->com, com);
+}
+
+void fsl_spi_cs_deactivate(struct spi_slave *slave)
+{
+ struct fsl_spi_slave *fsl = to_fsl_spi_slave(slave);
+ ccsr_espi_t *espi = fsl->espi;
+
+ /* clear the RXCNT and TXCNT */
+ out_be32(&espi->mode, in_be32(&espi->mode) & (~ESPI_MODE_EN));
+ out_be32(&espi->mode, in_be32(&espi->mode) | ESPI_MODE_EN);
+}
+
+static void fsl_espi_tx(struct fsl_spi_slave *fsl, const void *dout)
+{
+ ccsr_espi_t *espi = fsl->espi;
+ unsigned int tmpdout, event;
+ int tmp_tx_timeout;
+
+ if (dout)
+ tmpdout = *(u32 *)dout;
+ else
+ tmpdout = 0;
+
+ out_be32(&espi->tx, tmpdout);
+ out_be32(&espi->event, ESPI_EV_TNF);
+ debug("***spi_xfer:...%08x written\n", tmpdout);
+
+ tmp_tx_timeout = fsl->tx_timeout;
+ /* Wait for eSPI transmit to go out */
+ while (tmp_tx_timeout--) {
+ event = in_be32(&espi->event);
+ if (event & ESPI_EV_DON || event & ESPI_EV_TXE) {
+ out_be32(&espi->event, ESPI_EV_TXE);
+ break;
+ }
+ udelay(1);
+ }
+
+ if (tmp_tx_timeout < 0)
+ debug("***spi_xfer:...Tx timeout! event = %08x\n", event);
+}
+
+static int fsl_espi_rx(struct fsl_spi_slave *fsl, void *din,
+ unsigned int bytes)
+{
+ ccsr_espi_t *espi = fsl->espi;
+ unsigned int tmpdin, rx_times;
+ unsigned char *buf, *p_cursor;
+
+ if (bytes <= 0)
+ return 0;
+
+ rx_times = DIV_ROUND_UP(bytes, 4);
+ buf = (unsigned char *)malloc(4 * rx_times);
+ if (!buf) {
+ debug("SF: Failed to malloc memory.\n");
+ return -1;
+ }
+ p_cursor = buf;
+ while (rx_times--) {
+ tmpdin = in_be32(&espi->rx);
+ debug("***spi_xfer:...%08x readed\n", tmpdin);
+ *(u32 *)p_cursor = tmpdin;
+ p_cursor += 4;
+ }
+
+ if (din)
+ memcpy(din, buf, bytes);
+
+ free(buf);
+ out_be32(&espi->event, ESPI_EV_RNE);
+
+ return bytes;
+}
+
+void espi_release_bus(struct fsl_spi_slave *fsl)
+{
+ /* Disable the SPI hardware */
+ out_be32(&fsl->espi->mode,
+ in_be32(&fsl->espi->mode) & (~ESPI_MODE_EN));
+}
+
+int espi_xfer(struct fsl_spi_slave *fsl, uint cs, unsigned int bitlen,
+ const void *data_out, void *data_in, unsigned long flags)
+{
+ struct spi_slave *slave = &fsl->slave;
+ ccsr_espi_t *espi = fsl->espi;
+ unsigned int event, rx_bytes;
+ const void *dout = NULL;
+ void *din = NULL;
+ int len = 0;
+ int num_blks, num_chunks, max_tran_len, tran_len;
+ int num_bytes;
+ unsigned char *buffer = NULL;
+ size_t buf_len;
+ u8 *cmd_buf = fsl->cmd_buf;
+ size_t cmd_len = fsl->cmd_len;
+ size_t data_len = bitlen / 8;
+ size_t rx_offset = 0;
+ int rf_cnt;
+
+ max_tran_len = fsl->max_transfer_length;
+ switch (flags) {
+ case SPI_XFER_BEGIN:
+ cmd_len = data_len;
+ fsl->cmd_len = cmd_len;
+ memcpy(cmd_buf, data_out, cmd_len);
+ return 0;
+ case 0:
+ case SPI_XFER_END:
+ if (bitlen == 0) {
+ fsl_spi_cs_deactivate(slave);
+ return 0;
+ }
+ buf_len = 2 * cmd_len + min(data_len, (size_t)max_tran_len);
+ len = cmd_len + data_len;
+ rx_offset = cmd_len;
+ buffer = (unsigned char *)malloc(buf_len);
+ if (!buffer) {
+ debug("SF: Failed to malloc memory.\n");
+ return 1;
+ }
+ memcpy(buffer, cmd_buf, cmd_len);
+ if (data_in == NULL)
+ memcpy(buffer + cmd_len, data_out, data_len);
+ break;
+ case SPI_XFER_BEGIN | SPI_XFER_END:
+ len = data_len;
+ buffer = (unsigned char *)malloc(len * 2);
+ if (!buffer) {
+ debug("SF: Failed to malloc memory.\n");
+ return 1;
+ }
+ memcpy(buffer, data_out, len);
+ rx_offset = len;
+ cmd_len = 0;
+ break;
+ }
+
+ debug("spi_xfer: data_out %08X(%p) data_in %08X(%p) len %u\n",
+ *(uint *)data_out, data_out, *(uint *)data_in, data_in, len);
+
+ num_chunks = DIV_ROUND_UP(data_len, max_tran_len);
+ while (num_chunks--) {
+ if (data_in)
+ din = buffer + rx_offset;
+ dout = buffer;
+ tran_len = min(data_len, (size_t)max_tran_len);
+ num_blks = DIV_ROUND_UP(tran_len + cmd_len, 4);
+ num_bytes = (tran_len + cmd_len) % 4;
+ fsl->data_len = tran_len + cmd_len;
+ fsl_spi_cs_activate(slave, cs);
+
+ /* Clear all eSPI events */
+ out_be32(&espi->event , 0xffffffff);
+ /* handle data in 32-bit chunks */
+ while (num_blks) {
+ event = in_be32(&espi->event);
+ if (event & ESPI_EV_TNF) {
+ fsl_espi_tx(fsl, dout);
+ /* Set up the next iteration */
+ if (len > 4) {
+ len -= 4;
+ dout += 4;
+ }
+ }
+
+ event = in_be32(&espi->event);
+ if (event & ESPI_EV_RNE) {
+ rf_cnt = ((event & ESPI_EV_RFCNT_MASK)
+ >> ESPI_EV_RFCNT_SHIFT);
+ if (rf_cnt >= 4)
+ rx_bytes = 4;
+ else if (num_blks == 1 && rf_cnt == num_bytes)
+ rx_bytes = num_bytes;
+ else
+ continue;
+ if (fsl_espi_rx(fsl, din, rx_bytes)
+ == rx_bytes) {
+ num_blks--;
+ if (din)
+ din = (unsigned char *)din
+ + rx_bytes;
+ }
+ }
+ }
+ if (data_in) {
+ memcpy(data_in, buffer + 2 * cmd_len, tran_len);
+ if (*buffer == 0x0b) {
+ data_in += tran_len;
+ data_len -= tran_len;
+ *(int *)buffer += tran_len;
+ }
+ }
+ fsl_spi_cs_deactivate(slave);
+ }
+
+ free(buffer);
+ return 0;
+}
+
+void espi_claim_bus(struct fsl_spi_slave *fsl, unsigned int cs)
+{
+ ccsr_espi_t *espi = fsl->espi;
+ unsigned char pm = fsl->pm;
+ unsigned int mode = fsl->mode;
+ unsigned int div16 = fsl->div16;
+ int i;
+
+ /* Enable eSPI interface */
+ out_be32(&espi->mode, ESPI_MODE_RXTHR(3)
+ | ESPI_MODE_TXTHR(4) | ESPI_MODE_EN);
+
+ out_be32(&espi->event, 0xffffffff); /* Clear all eSPI events */
+ out_be32(&espi->mask, 0x00000000); /* Mask all eSPI interrupts */
+
+ /* Init CS mode interface */
+ for (i = 0; i < ESPI_MAX_CS_NUM; i++)
+ out_be32(&espi->csmode[i], ESPI_CSMODE_INIT_VAL);
+
+ out_be32(&espi->csmode[cs], in_be32(&espi->csmode[cs]) &
+ ~(ESPI_CSMODE_PM(0xF) | ESPI_CSMODE_DIV16
+ | ESPI_CSMODE_CI_INACTIVEHIGH | ESPI_CSMODE_CP_BEGIN_EDGCLK
+ | ESPI_CSMODE_REV_MSB_FIRST | ESPI_CSMODE_LEN(0xF)));
+
+ /* Set eSPI BRG clock source */
+ out_be32(&espi->csmode[cs], in_be32(&espi->csmode[cs])
+ | ESPI_CSMODE_PM(pm) | div16);
+
+ /* Set eSPI mode */
+ if (mode & SPI_CPHA)
+ out_be32(&espi->csmode[cs], in_be32(&espi->csmode[cs])
+ | ESPI_CSMODE_CP_BEGIN_EDGCLK);
+ if (mode & SPI_CPOL)
+ out_be32(&espi->csmode[cs], in_be32(&espi->csmode[cs])
+ | ESPI_CSMODE_CI_INACTIVEHIGH);
+
+ /* Character bit order: msb first */
+ out_be32(&espi->csmode[cs], in_be32(&espi->csmode[cs])
+ | ESPI_CSMODE_REV_MSB_FIRST);
+
+ /* Character length in bits, between 0x3~0xf, i.e. 4bits~16bits */
+ out_be32(&espi->csmode[cs], in_be32(&espi->csmode[cs])
+ | ESPI_CSMODE_LEN(7));
+}
+
+void espi_setup_slave(struct fsl_spi_slave *fsl)
+{
+ unsigned int max_hz;
+ sys_info_t sysinfo;
+ unsigned long spibrg = 0;
+ unsigned long spi_freq = 0;
+ unsigned char pm = 0;
+
+ max_hz = fsl->speed_hz;
+
+ get_sys_info(&sysinfo);
+ spibrg = sysinfo.freq_systembus / 2;
+ fsl->div16 = 0;
+ if ((spibrg / max_hz) > 32) {
+ fsl->div16 = ESPI_CSMODE_DIV16;
+ pm = spibrg / (max_hz * 16 * 2);
+ if (pm > 16) {
+ pm = 16;
+ debug("max_hz is too low: %d Hz, %ld Hz is used.\n",
+ max_hz, spibrg / (32 * 16));
+ }
+ } else {
+ pm = spibrg / (max_hz * 2);
+ }
+ if (pm)
+ pm--;
+ fsl->pm = pm;
+
+ if (fsl->div16)
+ spi_freq = spibrg / ((pm + 1) * 2 * 16);
+ else
+ spi_freq = spibrg / ((pm + 1) * 2);
+
+ /* set tx_timeout to 10 times of one espi FIFO entry go out */
+ fsl->tx_timeout = DIV_ROUND_UP((US_PER_SECOND * ESPI_FIFO_WIDTH_BIT
+ * 10), spi_freq);/* Set eSPI BRG clock source */
+}
+
+#if !CONFIG_IS_ENABLED(DM_SPI)
+int spi_cs_is_valid(unsigned int bus, unsigned int cs)
+{
+ return bus == 0 && cs < ESPI_MAX_CS_NUM;
+}
+
+struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
+ unsigned int max_hz, unsigned int mode)
+{
+ struct fsl_spi_slave *fsl;
+
+ if (!spi_cs_is_valid(bus, cs))
+ return NULL;
+
+ fsl = spi_alloc_slave(struct fsl_spi_slave, bus, cs);
+ if (!fsl)
+ return NULL;
+
+ fsl->espi = (void *)(CONFIG_SYS_MPC85xx_ESPI_ADDR);
+ fsl->mode = mode;
+ fsl->max_transfer_length = ESPI_MAX_DATA_TRANSFER_LEN;
+ fsl->speed_hz = max_hz;
+
+ espi_setup_slave(fsl);
+
+ return &fsl->slave;
+}
+
+void spi_free_slave(struct spi_slave *slave)
+{
+ struct fsl_spi_slave *fsl = to_fsl_spi_slave(slave);
+
+ free(fsl);
+}
+
+int spi_claim_bus(struct spi_slave *slave)
+{
+ struct fsl_spi_slave *fsl = to_fsl_spi_slave(slave);
+
+ espi_claim_bus(fsl, slave->cs);
+
+ return 0;
+}
+
+void spi_release_bus(struct spi_slave *slave)
+{
+ struct fsl_spi_slave *fsl = to_fsl_spi_slave(slave);
+
+ espi_release_bus(fsl);
+}
+
+int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
+ void *din, unsigned long flags)
+{
+ struct fsl_spi_slave *fsl = (struct fsl_spi_slave *)slave;
+
+ return espi_xfer(fsl, slave->cs, bitlen, dout, din, flags);
+}
+#else
+static void __espi_set_speed(struct fsl_spi_slave *fsl)
+{
+ espi_setup_slave(fsl);
+
+ /* Set eSPI BRG clock source */
+ out_be32(&fsl->espi->csmode[fsl->cs],
+ in_be32(&fsl->espi->csmode[fsl->cs])
+ | ESPI_CSMODE_PM(fsl->pm) | fsl->div16);
+}
+
+static void __espi_set_mode(struct fsl_spi_slave *fsl)
+{
+ /* Set eSPI mode */
+ if (fsl->mode & SPI_CPHA)
+ out_be32(&fsl->espi->csmode[fsl->cs],
+ in_be32(&fsl->espi->csmode[fsl->cs])
+ | ESPI_CSMODE_CP_BEGIN_EDGCLK);
+ if (fsl->mode & SPI_CPOL)
+ out_be32(&fsl->espi->csmode[fsl->cs],
+ in_be32(&fsl->espi->csmode[fsl->cs])
+ | ESPI_CSMODE_CI_INACTIVEHIGH);
+}
+
+static int fsl_espi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ espi_claim_bus(fsl, fsl->cs);
+
+ return 0;
+}
+
+static int fsl_espi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ espi_release_bus(fsl);
+
+ return 0;
+}
+
+static int fsl_espi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ return espi_xfer(fsl, fsl->cs, bitlen, dout, din, flags);
+}
+
+static int fsl_espi_set_speed(struct udevice *bus, uint speed)
+{
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ debug("%s speed %u\n", __func__, speed);
+ fsl->speed_hz = speed;
+
+ __espi_set_speed(fsl);
+
+ return 0;
+}
+
+static int fsl_espi_set_mode(struct udevice *bus, uint mode)
+{
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ debug("%s mode %u\n", __func__, mode);
+ fsl->mode = mode;
+
+ __espi_set_mode(fsl);
+
+ return 0;
+}
+
+static int fsl_espi_child_pre_probe(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ struct udevice *bus = dev->parent;
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ debug("%s cs %u\n", __func__, slave_plat->cs);
+ fsl->cs = slave_plat->cs;
+
+ return 0;
+}
+
+static int fsl_espi_probe(struct udevice *bus)
+{
+ struct fsl_espi_plat *plat = dev_get_plat(bus);
+ struct fsl_spi_slave *fsl = dev_get_priv(bus);
+
+ fsl->espi = (ccsr_espi_t *)((u32)plat->regs_addr);
+ fsl->max_transfer_length = ESPI_MAX_DATA_TRANSFER_LEN;
+ fsl->speed_hz = plat->speed_hz;
+
+ debug("%s probe done, bus-num %d.\n", bus->name, dev_seq(bus));
+
+ return 0;
+}
+
+static const struct dm_spi_ops fsl_espi_ops = {
+ .claim_bus = fsl_espi_claim_bus,
+ .release_bus = fsl_espi_release_bus,
+ .xfer = fsl_espi_xfer,
+ .set_speed = fsl_espi_set_speed,
+ .set_mode = fsl_espi_set_mode,
+};
+
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+static int fsl_espi_of_to_plat(struct udevice *bus)
+{
+ fdt_addr_t addr;
+ struct fsl_espi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ addr = dev_read_addr(bus);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ plat->regs_addr = lower_32_bits(addr);
+ plat->speed_hz = fdtdec_get_int(blob, node, "spi-max-frequency",
+ FSL_ESPI_DEFAULT_SCK_FREQ);
+
+ debug("ESPI: regs=%p, max-frequency=%d\n",
+ &plat->regs_addr, plat->speed_hz);
+
+ return 0;
+}
+
+static const struct udevice_id fsl_espi_ids[] = {
+ { .compatible = "fsl,mpc8536-espi" },
+ { }
+};
+#endif
+
+U_BOOT_DRIVER(fsl_espi) = {
+ .name = "fsl_espi",
+ .id = UCLASS_SPI,
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .of_match = fsl_espi_ids,
+ .of_to_plat = fsl_espi_of_to_plat,
+#endif
+ .ops = &fsl_espi_ops,
+ .plat_auto = sizeof(struct fsl_espi_plat),
+ .priv_auto = sizeof(struct fsl_spi_slave),
+ .probe = fsl_espi_probe,
+ .child_pre_probe = fsl_espi_child_pre_probe,
+};
+#endif
diff --git a/roms/u-boot/drivers/spi/fsl_qspi.c b/roms/u-boot/drivers/spi/fsl_qspi.c
new file mode 100644
index 000000000..3f97730ba
--- /dev/null
+++ b/roms/u-boot/drivers/spi/fsl_qspi.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Freescale QuadSPI driver.
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ * Copyright (C) 2018 Bootlin
+ * Copyright (C) 2018 exceet electronics GmbH
+ * Copyright (C) 2018 Kontron Electronics GmbH
+ * Copyright 2019-2020 NXP
+ *
+ * This driver is a ported version of Linux Freescale QSPI driver taken from
+ * v5.5-rc1 tag having following information.
+ *
+ * Transition to SPI MEM interface:
+ * Authors:
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
+ * Suresh Gupta <suresh.gupta@nxp.com>
+ *
+ * Based on the original fsl-quadspi.c spi-nor driver.
+ * Transition to spi-mem in spi-fsl-qspi.c
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <log.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <asm/global_data.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+#include <linux/sizes.h>
+#include <linux/iopoll.h>
+#include <linux/iopoll.h>
+#include <linux/sizes.h>
+#include <linux/err.h>
+#include <asm/io.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (15).
+ */
+#define SEQID_LUT 15
+#define SEQID_LUT_AHB 14
+
+/* Registers used by the driver */
+#define QUADSPI_MCR 0x00
+#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
+#define QUADSPI_MCR_MDIS_MASK BIT(14)
+#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
+#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
+#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
+#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
+#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
+#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
+
+#define QUADSPI_IPCR 0x08
+#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+#define QUADSPI_FLSHCR 0x0c
+#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
+#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
+#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
+
+#define QUADSPI_BUF3CR 0x1c
+#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
+#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
+#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
+
+#define QUADSPI_BFGENCR 0x20
+#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
+
+#define QUADSPI_BUF0IND 0x30
+#define QUADSPI_BUF1IND 0x34
+#define QUADSPI_BUF2IND 0x38
+#define QUADSPI_SFAR 0x100
+
+#define QUADSPI_SMPR 0x108
+#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
+#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
+#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
+#define QUADSPI_SMPR_HSENA_MASK BIT(0)
+
+#define QUADSPI_RBCT 0x110
+#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
+#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
+
+#define QUADSPI_TBDR 0x154
+
+#define QUADSPI_SR 0x15c
+#define QUADSPI_SR_IP_ACC_MASK BIT(1)
+#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
+
+#define QUADSPI_FR 0x160
+#define QUADSPI_FR_TFF_MASK BIT(0)
+
+#define QUADSPI_RSER 0x164
+#define QUADSPI_RSER_TFIE BIT(0)
+
+#define QUADSPI_SPTRCLR 0x16c
+#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
+#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
+
+#define QUADSPI_SFA1AD 0x180
+#define QUADSPI_SFA2AD 0x184
+#define QUADSPI_SFB1AD 0x188
+#define QUADSPI_SFB2AD 0x18c
+#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
+
+#define QUADSPI_LUTKEY 0x300
+#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define QUADSPI_LCKCR 0x304
+#define QUADSPI_LCKER_LOCK BIT(0)
+#define QUADSPI_LCKER_UNLOCK BIT(1)
+
+#define QUADSPI_LUT_BASE 0x310
+#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define QUADSPI_LUT_REG(idx) \
+ (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
+
+#define QUADSPI_AHB_LUT_OFFSET (SEQID_LUT_AHB * 4 * 4)
+#define QUADSPI_AHB_LUT_REG(idx) \
+ (QUADSPI_LUT_BASE + QUADSPI_AHB_LUT_OFFSET + (idx) * 4)
+
+/* Instruction set for the LUT register */
+#define LUT_STOP 0
+#define LUT_CMD 1
+#define LUT_ADDR 2
+#define LUT_DUMMY 3
+#define LUT_MODE 4
+#define LUT_MODE2 5
+#define LUT_MODE4 6
+#define LUT_FSL_READ 7
+#define LUT_FSL_WRITE 8
+#define LUT_JMP_ON_CS 9
+#define LUT_ADDR_DDR 10
+#define LUT_MODE_DDR 11
+#define LUT_MODE2_DDR 12
+#define LUT_MODE4_DDR 13
+#define LUT_FSL_READ_DDR 14
+#define LUT_FSL_WRITE_DDR 15
+#define LUT_DATA_LEARN 16
+
+/*
+ * The PAD definitions for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:3].
+ * For example, the quad read needs four IO lines,
+ * so you should use LUT_PAD(4).
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
+
+/* Controller needs driver to swap endianness */
+#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
+
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
+
+/*
+ * TKT253890, the controller needs the driver to fill the txfifo with
+ * 16 bytes at least to trigger a data transfer, even though the extra
+ * data won't be transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890 BIT(2)
+
+/* TKT245618, the controller cannot wake up from wait mode */
+#define QUADSPI_QUIRK_TKT245618 BIT(3)
+
+/*
+ * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
+ * internally. No need to add it when setting SFXXAD and SFAR registers
+ */
+#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+
+/*
+ * Controller uses TDH bits in register QUADSPI_FLSHCR.
+ * They need to be set in accordance with the DDR/SDR mode.
+ */
+#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
+
+/*
+ * Controller only has Two CS on flash A, no flash B port
+ */
+#define QUADSPI_QUIRK_SINGLE_BUS BIT(6)
+
+struct fsl_qspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct fsl_qspi_devtype_data vybrid_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6sx_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx7d_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data imx7ulp_data = {
+ .rxfifo = SZ_64,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_128,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
+ QUADSPI_QUIRK_USE_TDH_SETTING | QUADSPI_QUIRK_SINGLE_BUS,
+ .little_endian = true,
+};
+
+static const struct fsl_qspi_devtype_data ls1021a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = 0,
+ .little_endian = false,
+};
+
+static const struct fsl_qspi_devtype_data ls2080a_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_64,
+ .ahb_buf_size = SZ_1K,
+ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
+ .little_endian = true,
+};
+
+struct fsl_qspi {
+ struct udevice *dev;
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ u32 memmap_size;
+ const struct fsl_qspi_devtype_data *devtype_data;
+ int selected;
+};
+
+static inline int needs_swap_endian(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
+}
+
+static inline int needs_4x_clock(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
+}
+
+static inline int needs_amba_base_offset(struct fsl_qspi *q)
+{
+ return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
+}
+
+static inline int needs_tdh_setting(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
+}
+
+static inline int needs_single_bus(struct fsl_qspi *q)
+{
+ return q->devtype_data->quirks & QUADSPI_QUIRK_SINGLE_BUS;
+}
+
+/*
+ * An IC bug makes it necessary to rearrange the 32-bit data.
+ * Later chips, such as IMX6SLX, have fixed this bug.
+ */
+static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
+{
+ return needs_swap_endian(q) ? __swab32(a) : a;
+}
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The QSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the QSPI controller can use
+ * big-endian or little-endian.
+ */
+static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ out_le32(addr, val);
+ else
+ out_be32(addr, val);
+}
+
+static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
+{
+ if (q->devtype_data->little_endian)
+ return in_le32(addr);
+
+ return in_be32(addr);
+}
+
+static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool fsl_qspi_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
+ int ret;
+
+ ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of instructions needed for the op, needs
+ * to fit into a single LUT entry.
+ */
+ if (op->addr.nbytes +
+ (op->dummy.nbytes ? 1 : 0) +
+ (op->data.nbytes ? 1 : 0) > 6)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.nbytes &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > q->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > q->devtype_data->txfifo)
+ return false;
+
+ return spi_mem_default_supports_op(slave, op);
+}
+
+static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
+ if (op->addr.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
+ LUT_PAD(op->addr.buswidth),
+ (op->addr.nbytes == 4) ? 0x20 : 0x18);
+ lutidx++;
+ }
+ } else {
+ /*
+ * For some unknown reason, using LUT_ADDR doesn't work in some
+ * cases (at least with only one byte long addresses), so
+ * let's use LUT_MODE to write the address bytes one by one
+ */
+ for (i = 0; i < op->addr.nbytes; i++) {
+ u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
+ LUT_PAD(op->addr.buswidth),
+ addrbyte);
+ lutidx++;
+ }
+ }
+
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ LUT_PAD(op->dummy.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_FSL_READ : LUT_FSL_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
+
+ dev_dbg(q->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
+ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
+
+ if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN &&
+ op->addr.nbytes) {
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ qspi_writel(q, lutval[i], base + QUADSPI_AHB_LUT_REG(i));
+ }
+ }
+
+ /* lock LUT */
+ qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
+ qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
+}
+
+/*
+ * If we have changed the content of the flash by writing or erasing, or if we
+ * read from flash with a different offset into the page buffer, we need to
+ * invalidate the AHB buffer. If we do not do so, we may read out the wrong
+ * data. The spec tells us reset the AHB domain and Serial Flash domain at
+ * the same time.
+ */
+static void fsl_qspi_invalidate(struct fsl_qspi *q)
+{
+ u32 reg;
+
+ reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
+ reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+
+ /*
+ * The minimum delay : 1 AHB + 2 SFCK clocks.
+ * Delay 1 us is enough.
+ */
+ udelay(1);
+
+ reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
+ qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
+}
+
+static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_slave *slave)
+{
+ struct dm_spi_slave_plat *plat =
+ dev_get_parent_plat(slave->dev);
+
+ if (q->selected == plat->cs)
+ return;
+
+ q->selected = plat->cs;
+ fsl_qspi_invalidate(q);
+}
+
+static u32 fsl_qspi_memsize_per_cs(struct fsl_qspi *q)
+{
+ if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
+ if (needs_single_bus(q))
+ return q->memmap_size / 2;
+ else
+ return q->memmap_size / 4;
+ } else {
+ return ALIGN(q->devtype_data->ahb_buf_size, 0x400);
+ }
+}
+
+static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ void __iomem *ahb_read_addr = q->ahb_addr;
+
+ if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
+ if (op->addr.nbytes)
+ ahb_read_addr += op->addr.val;
+ }
+
+ memcpy_fromio(op->data.buf.in,
+ ahb_read_addr + q->selected * fsl_qspi_memsize_per_cs(q),
+ op->data.nbytes);
+}
+
+static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ memcpy(&val, op->data.buf.out + i, 4);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (i < op->data.nbytes) {
+ memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
+ val = fsl_qspi_endian_xchg(q, val);
+ qspi_writel(q, val, base + QUADSPI_TBDR);
+ }
+
+ if (needs_fill_txfifo(q)) {
+ for (i = op->data.nbytes; i < 16; i += 4)
+ qspi_writel(q, 0, base + QUADSPI_TBDR);
+ }
+}
+
+static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int i;
+ u8 *buf = op->data.buf.in;
+ u32 val;
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, 4);
+ }
+
+ if (i < op->data.nbytes) {
+ val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
+ val = fsl_qspi_endian_xchg(q, val);
+ memcpy(buf + i, &val, op->data.nbytes - i);
+ }
+}
+
+static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
+ u32 mask, u32 delay_us, u32 timeout_us)
+{
+ u32 reg;
+
+ if (!q->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ return readl_poll_timeout(base, reg, !(reg & mask), timeout_us);
+}
+
+static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
+{
+ void __iomem *base = q->iobase;
+ int err = 0;
+
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
+ base + QUADSPI_IPCR);
+
+ /* wait for the controller being ready */
+ err = fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR,
+ (QUADSPI_SR_IP_ACC_MASK |
+ QUADSPI_SR_AHB_ACC_MASK),
+ 10, 1000);
+
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ fsl_qspi_read_rxfifo(q, op);
+
+ return err;
+}
+
+static int fsl_qspi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
+ void __iomem *base = q->iobase;
+ u32 addr_offset = 0;
+ int err = 0;
+
+ /* wait for the controller being ready */
+ fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
+ QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
+
+ fsl_qspi_select_mem(q, slave);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP)) {
+ if (op->addr.nbytes)
+ addr_offset += op->addr.val;
+ }
+
+ qspi_writel(q,
+ q->selected * fsl_qspi_memsize_per_cs(q) + addr_offset,
+ base + QUADSPI_SFAR);
+
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
+ QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
+ base + QUADSPI_MCR);
+
+ qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
+ base + QUADSPI_SPTRCLR);
+
+ fsl_qspi_prepare_lut(q, op);
+
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ fsl_qspi_read_ahb(q, op);
+ } else {
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
+ QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
+
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ fsl_qspi_fill_txfifo(q, op);
+
+ err = fsl_qspi_do_op(q, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ fsl_qspi_invalidate(q);
+
+ return err;
+}
+
+static int fsl_qspi_adjust_op_size(struct spi_slave *slave,
+ struct spi_mem_op *op)
+{
+ struct fsl_qspi *q = dev_get_priv(slave->dev->parent);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > q->devtype_data->txfifo)
+ op->data.nbytes = q->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > q->devtype_data->ahb_buf_size)
+ op->data.nbytes = q->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int fsl_qspi_default_setup(struct fsl_qspi *q)
+{
+ void __iomem *base = q->iobase;
+ u32 reg, addr_offset = 0, memsize_cs;
+
+ /* Reset the module */
+ qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+ base + QUADSPI_MCR);
+ udelay(1);
+
+ /* Disable the module */
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
+ /*
+ * Previous boot stages (BootROM, bootloader) might have used DDR
+ * mode and did not clear the TDH bits. As we currently use SDR mode
+ * only, clear the TDH bits if necessary.
+ */
+ if (needs_tdh_setting(q))
+ qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
+ ~QUADSPI_FLSHCR_TDH_MASK,
+ base + QUADSPI_FLSHCR);
+
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+ | QUADSPI_SMPR_HSENA_MASK
+ | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
+
+ /* We only use the buffer3 for AHB read */
+ qspi_writel(q, 0, base + QUADSPI_BUF0IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF1IND);
+ qspi_writel(q, 0, base + QUADSPI_BUF2IND);
+
+ if (IS_ENABLED(CONFIG_FSL_QSPI_AHB_FULL_MAP))
+ qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT_AHB),
+ q->iobase + QUADSPI_BFGENCR);
+ else
+ qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
+ q->iobase + QUADSPI_BFGENCR);
+
+ qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
+ qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
+ QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
+ base + QUADSPI_BUF3CR);
+
+ if (needs_amba_base_offset(q))
+ addr_offset = q->memmap_phy;
+
+ /*
+ * In HW there can be a maximum of four chips on two buses with
+ * two chip selects on each bus. We use four chip selects in SW
+ * to differentiate between the four chips.
+ * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
+ * SFB2AD accordingly.
+ */
+ memsize_cs = fsl_qspi_memsize_per_cs(q);
+ qspi_writel(q, memsize_cs + addr_offset,
+ base + QUADSPI_SFA1AD);
+ qspi_writel(q, memsize_cs * 2 + addr_offset,
+ base + QUADSPI_SFA2AD);
+ if (!needs_single_bus(q)) {
+ qspi_writel(q, memsize_cs * 3 + addr_offset,
+ base + QUADSPI_SFB1AD);
+ qspi_writel(q, memsize_cs * 4 + addr_offset,
+ base + QUADSPI_SFB2AD);
+ }
+
+ q->selected = -1;
+
+ /* Enable the module */
+ qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
+ base + QUADSPI_MCR);
+ return 0;
+}
+
+static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
+ .adjust_op_size = fsl_qspi_adjust_op_size,
+ .supports_op = fsl_qspi_supports_op,
+ .exec_op = fsl_qspi_exec_op,
+};
+
+static int fsl_qspi_probe(struct udevice *bus)
+{
+ struct dm_spi_bus *dm_bus = dev_get_uclass_priv(bus);
+ struct fsl_qspi *q = dev_get_priv(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+ struct fdt_resource res;
+ int ret;
+
+ q->dev = bus;
+ q->devtype_data = (struct fsl_qspi_devtype_data *)
+ dev_get_driver_data(bus);
+
+ /* find the resources */
+ ret = fdt_get_named_resource(blob, node, "reg", "reg-names", "QuadSPI",
+ &res);
+ if (ret) {
+ dev_err(bus, "Can't get regs base addresses(ret = %d)!\n", ret);
+ return -ENOMEM;
+ }
+
+ q->iobase = map_physmem(res.start, res.end - res.start, MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(blob, node, "reg", "reg-names",
+ "QuadSPI-memory", &res);
+ if (ret) {
+ dev_err(bus, "Can't get AMBA base addresses(ret = %d)!\n", ret);
+ return -ENOMEM;
+ }
+
+ q->ahb_addr = map_physmem(res.start, res.end - res.start, MAP_NOCACHE);
+ q->memmap_phy = res.start;
+ q->memmap_size = res.end - res.start;
+
+ dm_bus->max_hz = fdtdec_get_int(blob, node, "spi-max-frequency",
+ 66000000);
+
+ fsl_qspi_default_setup(q);
+
+ return 0;
+}
+
+static int fsl_qspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ return 0;
+}
+
+static int fsl_qspi_claim_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int fsl_qspi_release_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int fsl_qspi_set_speed(struct udevice *bus, uint speed)
+{
+ return 0;
+}
+
+static int fsl_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ return 0;
+}
+
+static const struct dm_spi_ops fsl_qspi_ops = {
+ .claim_bus = fsl_qspi_claim_bus,
+ .release_bus = fsl_qspi_release_bus,
+ .xfer = fsl_qspi_xfer,
+ .set_speed = fsl_qspi_set_speed,
+ .set_mode = fsl_qspi_set_mode,
+ .mem_ops = &fsl_qspi_mem_ops,
+};
+
+static const struct udevice_id fsl_qspi_ids[] = {
+ { .compatible = "fsl,vf610-qspi", .data = (ulong)&vybrid_data, },
+ { .compatible = "fsl,imx6sx-qspi", .data = (ulong)&imx6sx_data, },
+ { .compatible = "fsl,imx6ul-qspi", .data = (ulong)&imx6ul_data, },
+ { .compatible = "fsl,imx7d-qspi", .data = (ulong)&imx7d_data, },
+ { .compatible = "fsl,imx7ulp-qspi", .data = (ulong)&imx7ulp_data, },
+ { .compatible = "fsl,ls1021a-qspi", .data = (ulong)&ls1021a_data, },
+ { .compatible = "fsl,ls1088a-qspi", .data = (ulong)&ls2080a_data, },
+ { .compatible = "fsl,ls2080a-qspi", .data = (ulong)&ls2080a_data, },
+ { }
+};
+
+U_BOOT_DRIVER(fsl_qspi) = {
+ .name = "fsl_qspi",
+ .id = UCLASS_SPI,
+ .of_match = fsl_qspi_ids,
+ .ops = &fsl_qspi_ops,
+ .priv_auto = sizeof(struct fsl_qspi),
+ .probe = fsl_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/ich.c b/roms/u-boot/drivers/spi/ich.c
new file mode 100644
index 000000000..1cd410493
--- /dev/null
+++ b/roms/u-boot/drivers/spi/ich.c
@@ -0,0 +1,1003 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2011-12 The Chromium OS Authors.
+ *
+ * This file is derived from the flashrom project.
+ */
+
+#define LOG_CATEGORY UCLASS_SPI
+
+#include <common.h>
+#include <bootstage.h>
+#include <div64.h>
+#include <dm.h>
+#include <dt-structs.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <pch.h>
+#include <pci.h>
+#include <pci_ids.h>
+#include <spi.h>
+#include <spi_flash.h>
+#include <spi-mem.h>
+#include <spl.h>
+#include <asm/fast_spi.h>
+#include <asm/io.h>
+#include <dm/uclass-internal.h>
+#include <asm/mtrr.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/sizes.h>
+
+#include "ich.h"
+
+#ifdef DEBUG_TRACE
+#define debug_trace(fmt, args...) debug(fmt, ##args)
+#else
+#define debug_trace(x, args...)
+#endif
+
+static u8 ich_readb(struct ich_spi_priv *priv, int reg)
+{
+ u8 value = readb(priv->base + reg);
+
+ debug_trace("read %2.2x from %4.4x\n", value, reg);
+
+ return value;
+}
+
+static u16 ich_readw(struct ich_spi_priv *priv, int reg)
+{
+ u16 value = readw(priv->base + reg);
+
+ debug_trace("read %4.4x from %4.4x\n", value, reg);
+
+ return value;
+}
+
+static u32 ich_readl(struct ich_spi_priv *priv, int reg)
+{
+ u32 value = readl(priv->base + reg);
+
+ debug_trace("read %8.8x from %4.4x\n", value, reg);
+
+ return value;
+}
+
+static void ich_writeb(struct ich_spi_priv *priv, u8 value, int reg)
+{
+ writeb(value, priv->base + reg);
+ debug_trace("wrote %2.2x to %4.4x\n", value, reg);
+}
+
+static void ich_writew(struct ich_spi_priv *priv, u16 value, int reg)
+{
+ writew(value, priv->base + reg);
+ debug_trace("wrote %4.4x to %4.4x\n", value, reg);
+}
+
+static void ich_writel(struct ich_spi_priv *priv, u32 value, int reg)
+{
+ writel(value, priv->base + reg);
+ debug_trace("wrote %8.8x to %4.4x\n", value, reg);
+}
+
+static void write_reg(struct ich_spi_priv *priv, const void *value,
+ int dest_reg, uint32_t size)
+{
+ memcpy_toio(priv->base + dest_reg, value, size);
+}
+
+static void read_reg(struct ich_spi_priv *priv, int src_reg, void *value,
+ uint32_t size)
+{
+ memcpy_fromio(value, priv->base + src_reg, size);
+}
+
+static void ich_set_bbar(struct ich_spi_priv *ctlr, uint32_t minaddr)
+{
+ const uint32_t bbar_mask = 0x00ffff00;
+ uint32_t ichspi_bbar;
+
+ if (ctlr->bbar) {
+ minaddr &= bbar_mask;
+ ichspi_bbar = ich_readl(ctlr, ctlr->bbar) & ~bbar_mask;
+ ichspi_bbar |= minaddr;
+ ich_writel(ctlr, ichspi_bbar, ctlr->bbar);
+ }
+}
+
+/* @return 1 if the SPI flash supports the 33MHz speed */
+static bool ich9_can_do_33mhz(struct udevice *dev)
+{
+ struct ich_spi_priv *priv = dev_get_priv(dev);
+ u32 fdod, speed;
+
+ if (!CONFIG_IS_ENABLED(PCI))
+ return false;
+ /* Observe SPI Descriptor Component Section 0 */
+ dm_pci_write_config32(priv->pch, 0xb0, 0x1000);
+
+ /* Extract the Write/Erase SPI Frequency from descriptor */
+ dm_pci_read_config32(priv->pch, 0xb4, &fdod);
+
+ /* Bits 23:21 have the fast read clock frequency, 0=20MHz, 1=33MHz */
+ speed = (fdod >> 21) & 7;
+
+ return speed == 1;
+}
+
+static void spi_lock_down(struct ich_spi_plat *plat, void *sbase)
+{
+ if (plat->ich_version == ICHV_7) {
+ struct ich7_spi_regs *ich7_spi = sbase;
+
+ setbits_le16(&ich7_spi->spis, SPIS_LOCK);
+ } else if (plat->ich_version == ICHV_9) {
+ struct ich9_spi_regs *ich9_spi = sbase;
+
+ setbits_le16(&ich9_spi->hsfs, HSFS_FLOCKDN);
+ }
+}
+
+static bool spi_lock_status(struct ich_spi_plat *plat, void *sbase)
+{
+ int lock = 0;
+
+ if (plat->ich_version == ICHV_7) {
+ struct ich7_spi_regs *ich7_spi = sbase;
+
+ lock = readw(&ich7_spi->spis) & SPIS_LOCK;
+ } else if (plat->ich_version == ICHV_9) {
+ struct ich9_spi_regs *ich9_spi = sbase;
+
+ lock = readw(&ich9_spi->hsfs) & HSFS_FLOCKDN;
+ }
+
+ return lock != 0;
+}
+
+static int spi_setup_opcode(struct ich_spi_priv *ctlr, struct spi_trans *trans,
+ bool lock)
+{
+ uint16_t optypes;
+ uint8_t opmenu[ctlr->menubytes];
+
+ if (!lock) {
+ /* The lock is off, so just use index 0. */
+ ich_writeb(ctlr, trans->opcode, ctlr->opmenu);
+ optypes = ich_readw(ctlr, ctlr->optype);
+ optypes = (optypes & 0xfffc) | (trans->type & 0x3);
+ ich_writew(ctlr, optypes, ctlr->optype);
+ return 0;
+ } else {
+ /* The lock is on. See if what we need is on the menu. */
+ uint8_t optype;
+ uint16_t opcode_index;
+
+ /* Write Enable is handled as atomic prefix */
+ if (trans->opcode == SPI_OPCODE_WREN)
+ return 0;
+
+ read_reg(ctlr, ctlr->opmenu, opmenu, sizeof(opmenu));
+ for (opcode_index = 0; opcode_index < ctlr->menubytes;
+ opcode_index++) {
+ if (opmenu[opcode_index] == trans->opcode)
+ break;
+ }
+
+ if (opcode_index == ctlr->menubytes) {
+ debug("ICH SPI: Opcode %x not found\n", trans->opcode);
+ return -EINVAL;
+ }
+
+ optypes = ich_readw(ctlr, ctlr->optype);
+ optype = (optypes >> (opcode_index * 2)) & 0x3;
+
+ if (optype != trans->type) {
+ debug("ICH SPI: Transaction doesn't fit type %d\n",
+ optype);
+ return -ENOSPC;
+ }
+ return opcode_index;
+ }
+}
+
+/*
+ * Wait for up to 6s til status register bit(s) turn 1 (in case wait_til_set
+ * below is true) or 0. In case the wait was for the bit(s) to set - write
+ * those bits back, which would cause resetting them.
+ *
+ * Return the last read status value on success or -1 on failure.
+ */
+static int ich_status_poll(struct ich_spi_priv *ctlr, u16 bitmask,
+ int wait_til_set)
+{
+ int timeout = 600000; /* This will result in 6s */
+ u16 status = 0;
+
+ while (timeout--) {
+ status = ich_readw(ctlr, ctlr->status);
+ if (wait_til_set ^ ((status & bitmask) == 0)) {
+ if (wait_til_set) {
+ ich_writew(ctlr, status & bitmask,
+ ctlr->status);
+ }
+ return status;
+ }
+ udelay(10);
+ }
+ debug("ICH SPI: SCIP timeout, read %x, expected %x, wts %x %x\n",
+ status, bitmask, wait_til_set, status & bitmask);
+
+ return -ETIMEDOUT;
+}
+
+static void ich_spi_config_opcode(struct udevice *dev)
+{
+ struct ich_spi_priv *ctlr = dev_get_priv(dev);
+
+ /*
+ * PREOP, OPTYPE, OPMENU1/OPMENU2 registers can be locked down
+ * to prevent accidental or intentional writes. Before they get
+ * locked down, these registers should be initialized properly.
+ */
+ ich_writew(ctlr, SPI_OPPREFIX, ctlr->preop);
+ ich_writew(ctlr, SPI_OPTYPE, ctlr->optype);
+ ich_writel(ctlr, SPI_OPMENU_LOWER, ctlr->opmenu);
+ ich_writel(ctlr, SPI_OPMENU_UPPER, ctlr->opmenu + sizeof(u32));
+}
+
+static int ich_spi_exec_op_swseq(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct udevice *bus = dev_get_parent(slave->dev);
+ struct ich_spi_plat *plat = dev_get_plat(bus);
+ struct ich_spi_priv *ctlr = dev_get_priv(bus);
+ uint16_t control;
+ int16_t opcode_index;
+ int with_address;
+ int status;
+ struct spi_trans *trans = &ctlr->trans;
+ bool lock = spi_lock_status(plat, ctlr->base);
+ int ret = 0;
+
+ trans->in = NULL;
+ trans->out = NULL;
+ trans->type = 0xFF;
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ trans->in = op->data.buf.in;
+ trans->bytesin = op->data.nbytes;
+ } else {
+ trans->out = op->data.buf.out;
+ trans->bytesout = op->data.nbytes;
+ }
+ }
+
+ if (trans->opcode != op->cmd.opcode)
+ trans->opcode = op->cmd.opcode;
+
+ if (lock && trans->opcode == SPI_OPCODE_WRDIS)
+ return 0;
+
+ if (trans->opcode == SPI_OPCODE_WREN) {
+ /*
+ * Treat Write Enable as Atomic Pre-Op if possible
+ * in order to prevent the Management Engine from
+ * issuing a transaction between WREN and DATA.
+ */
+ if (!lock)
+ ich_writew(ctlr, trans->opcode, ctlr->preop);
+ return 0;
+ }
+
+ ret = ich_status_poll(ctlr, SPIS_SCIP, 0);
+ if (ret < 0)
+ return ret;
+
+ if (plat->ich_version == ICHV_7)
+ ich_writew(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
+ else
+ ich_writeb(ctlr, SPIS_CDS | SPIS_FCERR, ctlr->status);
+
+ /* Try to guess spi transaction type */
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->addr.nbytes)
+ trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
+ else
+ trans->type = SPI_OPCODE_TYPE_WRITE_NO_ADDRESS;
+ } else {
+ if (op->addr.nbytes)
+ trans->type = SPI_OPCODE_TYPE_READ_WITH_ADDRESS;
+ else
+ trans->type = SPI_OPCODE_TYPE_READ_NO_ADDRESS;
+ }
+ /* Special erase case handling */
+ if (op->addr.nbytes && !op->data.buswidth)
+ trans->type = SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS;
+
+ opcode_index = spi_setup_opcode(ctlr, trans, lock);
+ if (opcode_index < 0)
+ return -EINVAL;
+
+ if (op->addr.nbytes) {
+ trans->offset = op->addr.val;
+ with_address = 1;
+ }
+
+ if (ctlr->speed && ctlr->max_speed >= 33000000) {
+ int byte;
+
+ byte = ich_readb(ctlr, ctlr->speed);
+ if (ctlr->cur_speed >= 33000000)
+ byte |= SSFC_SCF_33MHZ;
+ else
+ byte &= ~SSFC_SCF_33MHZ;
+ ich_writeb(ctlr, byte, ctlr->speed);
+ }
+
+ /* Preset control fields */
+ control = SPIC_SCGO | ((opcode_index & 0x07) << 4);
+
+ /* Issue atomic preop cycle if needed */
+ if (ich_readw(ctlr, ctlr->preop))
+ control |= SPIC_ACS;
+
+ if (!trans->bytesout && !trans->bytesin) {
+ /* SPI addresses are 24 bit only */
+ if (with_address) {
+ ich_writel(ctlr, trans->offset & 0x00FFFFFF,
+ ctlr->addr);
+ }
+ /*
+ * This is a 'no data' command (like Write Enable), its
+ * bitesout size was 1, decremented to zero while executing
+ * spi_setup_opcode() above. Tell the chip to send the
+ * command.
+ */
+ ich_writew(ctlr, control, ctlr->control);
+
+ /* wait for the result */
+ status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
+ if (status < 0)
+ return status;
+
+ if (status & SPIS_FCERR) {
+ debug("ICH SPI: Command transaction error\n");
+ return -EIO;
+ }
+
+ return 0;
+ }
+
+ while (trans->bytesout || trans->bytesin) {
+ uint32_t data_length;
+
+ /* SPI addresses are 24 bit only */
+ ich_writel(ctlr, trans->offset & 0x00FFFFFF, ctlr->addr);
+
+ if (trans->bytesout)
+ data_length = min(trans->bytesout, ctlr->databytes);
+ else
+ data_length = min(trans->bytesin, ctlr->databytes);
+
+ /* Program data into FDATA0 to N */
+ if (trans->bytesout) {
+ write_reg(ctlr, trans->out, ctlr->data, data_length);
+ trans->bytesout -= data_length;
+ }
+
+ /* Add proper control fields' values */
+ control &= ~((ctlr->databytes - 1) << 8);
+ control |= SPIC_DS;
+ control |= (data_length - 1) << 8;
+
+ /* write it */
+ ich_writew(ctlr, control, ctlr->control);
+
+ /* Wait for Cycle Done Status or Flash Cycle Error */
+ status = ich_status_poll(ctlr, SPIS_CDS | SPIS_FCERR, 1);
+ if (status < 0)
+ return status;
+
+ if (status & SPIS_FCERR) {
+ debug("ICH SPI: Data transaction error %x\n", status);
+ return -EIO;
+ }
+
+ if (trans->bytesin) {
+ read_reg(ctlr, ctlr->data, trans->in, data_length);
+ trans->bytesin -= data_length;
+ }
+ }
+
+ /* Clear atomic preop now that xfer is done */
+ if (!lock)
+ ich_writew(ctlr, 0, ctlr->preop);
+
+ return 0;
+}
+
+/*
+ * Ensure read/write xfer len is not greater than SPIBAR_FDATA_FIFO_SIZE and
+ * that the operation does not cross page boundary.
+ */
+static uint get_xfer_len(u32 offset, int len, int page_size)
+{
+ uint xfer_len = min(len, SPIBAR_FDATA_FIFO_SIZE);
+ uint bytes_left = ALIGN(offset, page_size) - offset;
+
+ if (bytes_left)
+ xfer_len = min(xfer_len, bytes_left);
+
+ return xfer_len;
+}
+
+/* Fill FDATAn FIFO in preparation for a write transaction */
+static void fill_xfer_fifo(struct fast_spi_regs *regs, const void *data,
+ uint len)
+{
+ memcpy(regs->fdata, data, len);
+}
+
+/* Drain FDATAn FIFO after a read transaction populates data */
+static void drain_xfer_fifo(struct fast_spi_regs *regs, void *dest, uint len)
+{
+ memcpy(dest, regs->fdata, len);
+}
+
+/* Fire up a transfer using the hardware sequencer */
+static void start_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
+ uint offset, uint len)
+{
+ /* Make sure all W1C status bits get cleared */
+ u32 hsfsts;
+
+ hsfsts = readl(&regs->hsfsts_ctl);
+ hsfsts &= ~(HSFSTS_FCYCLE_MASK | HSFSTS_FDBC_MASK);
+ hsfsts |= HSFSTS_AEL | HSFSTS_FCERR | HSFSTS_FDONE;
+
+ /* Set up transaction parameters */
+ hsfsts |= hsfsts_cycle << HSFSTS_FCYCLE_SHIFT;
+ hsfsts |= ((len - 1) << HSFSTS_FDBC_SHIFT) & HSFSTS_FDBC_MASK;
+ hsfsts |= HSFSTS_FGO;
+
+ writel(offset, &regs->faddr);
+ writel(hsfsts, &regs->hsfsts_ctl);
+}
+
+static int wait_for_hwseq_xfer(struct fast_spi_regs *regs, uint offset)
+{
+ ulong start;
+ u32 hsfsts;
+
+ start = get_timer(0);
+ do {
+ hsfsts = readl(&regs->hsfsts_ctl);
+ if (hsfsts & HSFSTS_FCERR) {
+ debug("SPI transaction error at offset %x HSFSTS = %08x\n",
+ offset, hsfsts);
+ return -EIO;
+ }
+ if (hsfsts & HSFSTS_AEL)
+ return -EPERM;
+
+ if (hsfsts & HSFSTS_FDONE)
+ return 0;
+ } while (get_timer(start) < SPIBAR_HWSEQ_XFER_TIMEOUT_MS);
+
+ debug("SPI transaction timeout at offset %x HSFSTS = %08x, timer %d\n",
+ offset, hsfsts, (uint)get_timer(start));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * exec_sync_hwseq_xfer() - Execute flash transfer by hardware sequencing
+ *
+ * This waits until complete or timeout
+ *
+ * @regs: SPI registers
+ * @hsfsts_cycle: Cycle type (enum hsfsts_cycle_t)
+ * @offset: Offset to access
+ * @len: Number of bytes to transfer (can be 0)
+ * @return 0 if OK, -EIO on flash-cycle error (FCERR), -EPERM on access error
+ * (AEL), -ETIMEDOUT on timeout
+ */
+static int exec_sync_hwseq_xfer(struct fast_spi_regs *regs, uint hsfsts_cycle,
+ uint offset, uint len)
+{
+ start_hwseq_xfer(regs, hsfsts_cycle, offset, len);
+
+ return wait_for_hwseq_xfer(regs, offset);
+}
+
+static int ich_spi_exec_op_hwseq(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct spi_flash *flash = dev_get_uclass_priv(slave->dev);
+ struct udevice *bus = dev_get_parent(slave->dev);
+ struct ich_spi_priv *priv = dev_get_priv(bus);
+ struct fast_spi_regs *regs = priv->base;
+ uint page_size;
+ uint offset;
+ int cycle;
+ uint len;
+ bool out;
+ int ret;
+ u8 *buf;
+
+ offset = op->addr.val;
+ len = op->data.nbytes;
+
+ switch (op->cmd.opcode) {
+ case SPINOR_OP_RDID:
+ cycle = HSFSTS_CYCLE_RDID;
+ break;
+ case SPINOR_OP_READ_FAST:
+ cycle = HSFSTS_CYCLE_READ;
+ break;
+ case SPINOR_OP_PP:
+ cycle = HSFSTS_CYCLE_WRITE;
+ break;
+ case SPINOR_OP_WREN:
+ /* Nothing needs to be done */
+ return 0;
+ case SPINOR_OP_WRSR:
+ cycle = HSFSTS_CYCLE_WR_STATUS;
+ break;
+ case SPINOR_OP_RDSR:
+ cycle = HSFSTS_CYCLE_RD_STATUS;
+ break;
+ case SPINOR_OP_WRDI:
+ return 0; /* ignore */
+ case SPINOR_OP_BE_4K:
+ cycle = HSFSTS_CYCLE_4K_ERASE;
+ ret = exec_sync_hwseq_xfer(regs, cycle, offset, 0);
+ return ret;
+ default:
+ debug("Unknown cycle %x\n", op->cmd.opcode);
+ return -EINVAL;
+ };
+
+ out = op->data.dir == SPI_MEM_DATA_OUT;
+ buf = out ? (u8 *)op->data.buf.out : op->data.buf.in;
+ page_size = flash->page_size ? : 256;
+
+ while (len) {
+ uint xfer_len = get_xfer_len(offset, len, page_size);
+
+ if (out)
+ fill_xfer_fifo(regs, buf, xfer_len);
+
+ ret = exec_sync_hwseq_xfer(regs, cycle, offset, xfer_len);
+ if (ret)
+ return ret;
+
+ if (!out)
+ drain_xfer_fifo(regs, buf, xfer_len);
+
+ offset += xfer_len;
+ buf += xfer_len;
+ len -= xfer_len;
+ }
+
+ return 0;
+}
+
+static int ich_spi_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
+{
+ struct udevice *bus = dev_get_parent(slave->dev);
+ struct ich_spi_plat *plat = dev_get_plat(bus);
+ int ret;
+
+ bootstage_start(BOOTSTAGE_ID_ACCUM_SPI, "fast_spi");
+ if (plat->hwseq)
+ ret = ich_spi_exec_op_hwseq(slave, op);
+ else
+ ret = ich_spi_exec_op_swseq(slave, op);
+ bootstage_accum(BOOTSTAGE_ID_ACCUM_SPI);
+
+ return ret;
+}
+
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+/**
+ * ich_spi_get_basics() - Get basic information about the ICH device
+ *
+ * This works without probing any devices if requested.
+ *
+ * @bus: SPI controller to use
+ * @can_probe: true if this function is allowed to probe the PCH
+ * @pchp: Returns a pointer to the pch, or NULL if not found
+ * @ich_versionp: Returns ICH version detected on success
+ * @mmio_basep: Returns the address of the SPI registers on success
+ * @return 0 if OK, -EPROTOTYPE if the PCH could not be found, -EAGAIN if
+ * the function cannot success without probing, possible another error if
+ * pch_get_spi_base() fails
+ */
+static int ich_spi_get_basics(struct udevice *bus, bool can_probe,
+ struct udevice **pchp,
+ enum ich_version *ich_versionp, ulong *mmio_basep)
+{
+ struct udevice *pch = NULL;
+ int ret = 0;
+
+ /* Find a PCH if there is one */
+ if (can_probe) {
+ pch = dev_get_parent(bus);
+ if (device_get_uclass_id(pch) != UCLASS_PCH) {
+ uclass_first_device(UCLASS_PCH, &pch);
+ if (!pch)
+ return log_msg_ret("uclass", -EPROTOTYPE);
+ }
+ }
+
+ *ich_versionp = dev_get_driver_data(bus);
+ if (*ich_versionp == ICHV_APL)
+ *mmio_basep = dm_pci_read_bar32(bus, 0);
+ else if (pch)
+ ret = pch_get_spi_base(pch, mmio_basep);
+ else
+ return -EAGAIN;
+ *pchp = pch;
+
+ return ret;
+}
+#endif
+
+/**
+ * ich_get_mmap_bus() - Handle the get_mmap() method for a bus
+ *
+ * There are several cases to consider:
+ * 1. Using of-platdata, in which case we have the BDF and can access the
+ * registers by reading the BAR
+ * 2. Not using of-platdata, but still with a SPI controller that is on its own
+ * PCI PDF. In this case we read the BDF from the parent plat and again get
+ * the registers by reading the BAR
+ * 3. Using a SPI controller that is a child of the PCH, in which case we try
+ * to find the registers by asking the PCH. This only works if the PCH has
+ * been probed (which it will be if the bus is probed since parents are
+ * probed before children), since the PCH may not have a PCI address until
+ * its parent (the PCI bus itself) has been probed. If you are using this
+ * method then you should make sure the SPI bus is probed.
+ *
+ * The first two cases are useful in early init. The last one is more useful
+ * afterwards.
+ */
+static int ich_get_mmap_bus(struct udevice *bus, ulong *map_basep,
+ uint *map_sizep, uint *offsetp)
+{
+ pci_dev_t spi_bdf;
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ if (device_is_on_pci_bus(bus)) {
+ struct pci_child_plat *pplat;
+
+ pplat = dev_get_parent_plat(bus);
+ spi_bdf = pplat->devfn;
+ } else {
+ enum ich_version ich_version;
+ struct fast_spi_regs *regs;
+ struct udevice *pch;
+ ulong mmio_base;
+ int ret;
+
+ ret = ich_spi_get_basics(bus, device_active(bus), &pch,
+ &ich_version, &mmio_base);
+ if (ret)
+ return log_msg_ret("basics", ret);
+ regs = (struct fast_spi_regs *)mmio_base;
+
+ return fast_spi_get_bios_mmap_regs(regs, map_basep, map_sizep,
+ offsetp);
+ }
+#else
+ struct ich_spi_plat *plat = dev_get_plat(bus);
+
+ /*
+ * We cannot rely on plat->bdf being set up yet since this method can
+ * be called before the device is probed. Use the of-platdata directly
+ * instead.
+ */
+ spi_bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
+#endif
+
+ return fast_spi_get_bios_mmap(spi_bdf, map_basep, map_sizep, offsetp);
+}
+
+static int ich_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
+ uint *offsetp)
+{
+ struct udevice *bus = dev_get_parent(dev);
+
+ return ich_get_mmap_bus(bus, map_basep, map_sizep, offsetp);
+}
+
+static int ich_spi_adjust_size(struct spi_slave *slave, struct spi_mem_op *op)
+{
+ unsigned int page_offset;
+ int addr = op->addr.val;
+ unsigned int byte_count = op->data.nbytes;
+
+ if (hweight32(ICH_BOUNDARY) == 1) {
+ page_offset = addr & (ICH_BOUNDARY - 1);
+ } else {
+ u64 aux = addr;
+
+ page_offset = do_div(aux, ICH_BOUNDARY);
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (slave->max_read_size) {
+ op->data.nbytes = min(ICH_BOUNDARY - page_offset,
+ slave->max_read_size);
+ }
+ } else if (slave->max_write_size) {
+ op->data.nbytes = min(ICH_BOUNDARY - page_offset,
+ slave->max_write_size);
+ }
+
+ op->data.nbytes = min(op->data.nbytes, byte_count);
+
+ return 0;
+}
+
+static int ich_protect_lockdown(struct udevice *dev)
+{
+ struct ich_spi_plat *plat = dev_get_plat(dev);
+ struct ich_spi_priv *priv = dev_get_priv(dev);
+ int ret = -ENOSYS;
+
+ /* Disable the BIOS write protect so write commands are allowed */
+ if (priv->pch)
+ ret = pch_set_spi_protect(priv->pch, false);
+ if (ret == -ENOSYS) {
+ u8 bios_cntl;
+
+ bios_cntl = ich_readb(priv, priv->bcr);
+ bios_cntl &= ~BIT(5); /* clear Enable InSMM_STS (EISS) */
+ bios_cntl |= 1; /* Write Protect Disable (WPD) */
+ ich_writeb(priv, bios_cntl, priv->bcr);
+ } else if (ret) {
+ debug("%s: Failed to disable write-protect: err=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Lock down SPI controller settings if required */
+ if (plat->lockdown) {
+ ich_spi_config_opcode(dev);
+ spi_lock_down(plat, priv->base);
+ }
+
+ return 0;
+}
+
+static int ich_init_controller(struct udevice *dev,
+ struct ich_spi_plat *plat,
+ struct ich_spi_priv *ctlr)
+{
+ if (spl_phase() == PHASE_TPL) {
+ struct ich_spi_plat *plat = dev_get_plat(dev);
+ int ret;
+
+ ret = fast_spi_early_init(plat->bdf, plat->mmio_base);
+ if (ret)
+ return ret;
+ }
+
+ ctlr->base = (void *)plat->mmio_base;
+ if (plat->ich_version == ICHV_7) {
+ struct ich7_spi_regs *ich7_spi = ctlr->base;
+
+ ctlr->opmenu = offsetof(struct ich7_spi_regs, opmenu);
+ ctlr->menubytes = sizeof(ich7_spi->opmenu);
+ ctlr->optype = offsetof(struct ich7_spi_regs, optype);
+ ctlr->addr = offsetof(struct ich7_spi_regs, spia);
+ ctlr->data = offsetof(struct ich7_spi_regs, spid);
+ ctlr->databytes = sizeof(ich7_spi->spid);
+ ctlr->status = offsetof(struct ich7_spi_regs, spis);
+ ctlr->control = offsetof(struct ich7_spi_regs, spic);
+ ctlr->bbar = offsetof(struct ich7_spi_regs, bbar);
+ ctlr->preop = offsetof(struct ich7_spi_regs, preop);
+ } else if (plat->ich_version == ICHV_9) {
+ struct ich9_spi_regs *ich9_spi = ctlr->base;
+
+ ctlr->opmenu = offsetof(struct ich9_spi_regs, opmenu);
+ ctlr->menubytes = sizeof(ich9_spi->opmenu);
+ ctlr->optype = offsetof(struct ich9_spi_regs, optype);
+ ctlr->addr = offsetof(struct ich9_spi_regs, faddr);
+ ctlr->data = offsetof(struct ich9_spi_regs, fdata);
+ ctlr->databytes = sizeof(ich9_spi->fdata);
+ ctlr->status = offsetof(struct ich9_spi_regs, ssfs);
+ ctlr->control = offsetof(struct ich9_spi_regs, ssfc);
+ ctlr->speed = ctlr->control + 2;
+ ctlr->bbar = offsetof(struct ich9_spi_regs, bbar);
+ ctlr->preop = offsetof(struct ich9_spi_regs, preop);
+ ctlr->bcr = offsetof(struct ich9_spi_regs, bcr);
+ ctlr->pr = &ich9_spi->pr[0];
+ } else if (plat->ich_version == ICHV_APL) {
+ } else {
+ debug("ICH SPI: Unrecognised ICH version %d\n",
+ plat->ich_version);
+ return -EINVAL;
+ }
+
+ /* Work out the maximum speed we can support */
+ ctlr->max_speed = 20000000;
+ if (plat->ich_version == ICHV_9 && ich9_can_do_33mhz(dev))
+ ctlr->max_speed = 33000000;
+ debug("ICH SPI: Version ID %d detected at %lx, speed %ld\n",
+ plat->ich_version, plat->mmio_base, ctlr->max_speed);
+
+ ich_set_bbar(ctlr, 0);
+
+ return 0;
+}
+
+static int ich_cache_bios_region(struct udevice *dev)
+{
+ ulong map_base;
+ uint map_size;
+ uint offset;
+ ulong base;
+ int ret;
+
+ ret = ich_get_mmap_bus(dev, &map_base, &map_size, &offset);
+ if (ret)
+ return ret;
+
+ /* Don't use WRBACK since we are not supposed to write to SPI flash */
+ base = SZ_4G - map_size;
+ mtrr_set_next_var(MTRR_TYPE_WRPROT, base, map_size);
+ log_debug("BIOS cache base=%lx, size=%x\n", base, (uint)map_size);
+
+ return 0;
+}
+
+static int ich_spi_probe(struct udevice *dev)
+{
+ struct ich_spi_plat *plat = dev_get_plat(dev);
+ struct ich_spi_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ ret = ich_init_controller(dev, plat, priv);
+ if (ret)
+ return ret;
+
+ if (spl_phase() == PHASE_TPL) {
+ /* Cache the BIOS to speed things up */
+ ret = ich_cache_bios_region(dev);
+ if (ret)
+ return ret;
+ } else {
+ ret = ich_protect_lockdown(dev);
+ if (ret)
+ return ret;
+ }
+ priv->cur_speed = priv->max_speed;
+
+ return 0;
+}
+
+static int ich_spi_remove(struct udevice *bus)
+{
+ /*
+ * Configure SPI controller so that the Linux MTD driver can fully
+ * access the SPI NOR chip
+ */
+ ich_spi_config_opcode(bus);
+
+ return 0;
+}
+
+static int ich_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct ich_spi_priv *priv = dev_get_priv(bus);
+
+ priv->cur_speed = speed;
+
+ return 0;
+}
+
+static int ich_spi_set_mode(struct udevice *bus, uint mode)
+{
+ debug("%s: mode=%d\n", __func__, mode);
+
+ return 0;
+}
+
+static int ich_spi_child_pre_probe(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct ich_spi_plat *plat = dev_get_plat(bus);
+ struct ich_spi_priv *priv = dev_get_priv(bus);
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+
+ /*
+ * Yes this controller can only write a small number of bytes at
+ * once! The limit is typically 64 bytes. For hardware sequencing a
+ * a loop is used to get around this.
+ */
+ if (!plat->hwseq)
+ slave->max_write_size = priv->databytes;
+ /*
+ * ICH 7 SPI controller only supports array read command
+ * and byte program command for SST flash
+ */
+ if (plat->ich_version == ICHV_7)
+ slave->mode = SPI_RX_SLOW | SPI_TX_BYTE;
+
+ return 0;
+}
+
+static int ich_spi_of_to_plat(struct udevice *dev)
+{
+ struct ich_spi_plat *plat = dev_get_plat(dev);
+
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct ich_spi_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ ret = ich_spi_get_basics(dev, true, &priv->pch, &plat->ich_version,
+ &plat->mmio_base);
+ if (ret)
+ return log_msg_ret("basics", ret);
+ plat->lockdown = dev_read_bool(dev, "intel,spi-lock-down");
+ /*
+ * Use an int so that the property is present in of-platdata even
+ * when false.
+ */
+ plat->hwseq = dev_read_u32_default(dev, "intel,hardware-seq", 0);
+#else
+ plat->ich_version = ICHV_APL;
+ plat->mmio_base = plat->dtplat.early_regs[0];
+ plat->bdf = pci_ofplat_get_devfn(plat->dtplat.reg[0]);
+ plat->hwseq = plat->dtplat.intel_hardware_seq;
+#endif
+ debug("%s: mmio_base=%lx\n", __func__, plat->mmio_base);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops ich_controller_mem_ops = {
+ .adjust_op_size = ich_spi_adjust_size,
+ .supports_op = NULL,
+ .exec_op = ich_spi_exec_op,
+};
+
+static const struct dm_spi_ops ich_spi_ops = {
+ /* xfer is not supported */
+ .set_speed = ich_spi_set_speed,
+ .set_mode = ich_spi_set_mode,
+ .mem_ops = &ich_controller_mem_ops,
+ .get_mmap = ich_get_mmap,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id ich_spi_ids[] = {
+ { .compatible = "intel,ich7-spi", ICHV_7 },
+ { .compatible = "intel,ich9-spi", ICHV_9 },
+ { .compatible = "intel,fast-spi", ICHV_APL },
+ { }
+};
+
+U_BOOT_DRIVER(intel_fast_spi) = {
+ .name = "intel_fast_spi",
+ .id = UCLASS_SPI,
+ .of_match = ich_spi_ids,
+ .ops = &ich_spi_ops,
+ .of_to_plat = ich_spi_of_to_plat,
+ .plat_auto = sizeof(struct ich_spi_plat),
+ .priv_auto = sizeof(struct ich_spi_priv),
+ .child_pre_probe = ich_spi_child_pre_probe,
+ .probe = ich_spi_probe,
+ .remove = ich_spi_remove,
+ .flags = DM_FLAG_OS_PREPARE,
+};
diff --git a/roms/u-boot/drivers/spi/ich.h b/roms/u-boot/drivers/spi/ich.h
new file mode 100644
index 000000000..8fd150d44
--- /dev/null
+++ b/roms/u-boot/drivers/spi/ich.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2011 The Chromium OS Authors.
+ *
+ * This file is derived from the flashrom project.
+ */
+
+#ifndef _ICH_H_
+#define _ICH_H_
+
+#include <linux/bitops.h>
+struct ich7_spi_regs {
+ uint16_t spis;
+ uint16_t spic;
+ uint32_t spia;
+ uint64_t spid[8];
+ uint64_t _pad;
+ uint32_t bbar;
+ uint16_t preop;
+ uint16_t optype;
+ uint8_t opmenu[8];
+} __packed;
+
+struct ich9_spi_regs {
+ uint32_t bfpr; /* 0x00 */
+ uint16_t hsfs;
+ uint16_t hsfc;
+ uint32_t faddr;
+ uint32_t _reserved0;
+ uint32_t fdata[16]; /* 0x10 */
+ uint32_t frap; /* 0x50 */
+ uint32_t freg[5];
+ uint32_t _reserved1[3];
+ uint32_t pr[5]; /* 0x74 */
+ uint32_t _reserved2[2];
+ uint8_t ssfs; /* 0x90 */
+ uint8_t ssfc[3];
+ uint16_t preop; /* 0x94 */
+ uint16_t optype;
+ uint8_t opmenu[8]; /* 0x98 */
+ uint32_t bbar;
+ uint8_t _reserved3[12];
+ uint32_t fdoc; /* 0xb0 */
+ uint32_t fdod;
+ uint8_t _reserved4[8];
+ uint32_t afc; /* 0xc0 */
+ uint32_t lvscc;
+ uint32_t uvscc;
+ uint8_t _reserved5[4];
+ uint32_t fpb; /* 0xd0 */
+ uint8_t _reserved6[28];
+ uint32_t srdl; /* 0xf0 */
+ uint32_t srdc;
+ uint32_t scs;
+ uint32_t bcr;
+} __packed;
+
+enum {
+ SPIS_SCIP = 0x0001,
+ SPIS_GRANT = 0x0002,
+ SPIS_CDS = 0x0004,
+ SPIS_FCERR = 0x0008,
+ SSFS_AEL = 0x0010,
+ SPIS_LOCK = 0x8000,
+ SPIS_RESERVED_MASK = 0x7ff0,
+ SSFS_RESERVED_MASK = 0x7fe2
+};
+
+enum {
+ SPIC_SCGO = 0x000002,
+ SPIC_ACS = 0x000004,
+ SPIC_SPOP = 0x000008,
+ SPIC_DBC = 0x003f00,
+ SPIC_DS = 0x004000,
+ SPIC_SME = 0x008000,
+ SSFC_SCF_MASK = 0x070000,
+ SSFC_RESERVED = 0xf80000,
+
+ /* Mask for speed byte, biuts 23:16 of SSFC */
+ SSFC_SCF_33MHZ = 0x01,
+};
+
+enum {
+ HSFS_FDONE = 0x0001,
+ HSFS_FCERR = 0x0002,
+ HSFS_AEL = 0x0004,
+ HSFS_BERASE_MASK = 0x0018,
+ HSFS_BERASE_SHIFT = 3,
+ HSFS_SCIP = 0x0020,
+ HSFS_FDOPSS = 0x2000,
+ HSFS_FDV = 0x4000,
+ HSFS_FLOCKDN = 0x8000
+};
+
+enum {
+ HSFC_FGO = 0x0001,
+ HSFC_FCYCLE_MASK = 0x0006,
+ HSFC_FCYCLE_SHIFT = 1,
+ HSFC_FDBC_MASK = 0x3f00,
+ HSFC_FDBC_SHIFT = 8,
+ HSFC_FSMIE = 0x8000
+};
+
+struct spi_trans {
+ uint8_t cmd;
+ const uint8_t *out;
+ uint32_t bytesout;
+ uint8_t *in;
+ uint32_t bytesin;
+ uint8_t type;
+ uint8_t opcode;
+ uint32_t offset;
+};
+
+#define SPI_OPCODE_WRSR 0x01
+#define SPI_OPCODE_PAGE_PROGRAM 0x02
+#define SPI_OPCODE_READ 0x03
+#define SPI_OPCODE_WRDIS 0x04
+#define SPI_OPCODE_RDSR 0x05
+#define SPI_OPCODE_WREN 0x06
+#define SPI_OPCODE_FAST_READ 0x0b
+#define SPI_OPCODE_ERASE_SECT 0x20
+#define SPI_OPCODE_READ_ID 0x9f
+#define SPI_OPCODE_ERASE_BLOCK 0xd8
+
+#define SPI_OPCODE_TYPE_READ_NO_ADDRESS 0
+#define SPI_OPCODE_TYPE_WRITE_NO_ADDRESS 1
+#define SPI_OPCODE_TYPE_READ_WITH_ADDRESS 2
+#define SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS 3
+
+#define SPI_OPMENU_0 SPI_OPCODE_WRSR
+#define SPI_OPTYPE_0 SPI_OPCODE_TYPE_WRITE_NO_ADDRESS
+
+#define SPI_OPMENU_1 SPI_OPCODE_PAGE_PROGRAM
+#define SPI_OPTYPE_1 SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS
+
+#define SPI_OPMENU_2 SPI_OPCODE_READ
+#define SPI_OPTYPE_2 SPI_OPCODE_TYPE_READ_WITH_ADDRESS
+
+#define SPI_OPMENU_3 SPI_OPCODE_RDSR
+#define SPI_OPTYPE_3 SPI_OPCODE_TYPE_READ_NO_ADDRESS
+
+#define SPI_OPMENU_4 SPI_OPCODE_ERASE_SECT
+#define SPI_OPTYPE_4 SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS
+
+#define SPI_OPMENU_5 SPI_OPCODE_READ_ID
+#define SPI_OPTYPE_5 SPI_OPCODE_TYPE_READ_NO_ADDRESS
+
+#define SPI_OPMENU_6 SPI_OPCODE_ERASE_BLOCK
+#define SPI_OPTYPE_6 SPI_OPCODE_TYPE_WRITE_WITH_ADDRESS
+
+#define SPI_OPMENU_7 SPI_OPCODE_FAST_READ
+#define SPI_OPTYPE_7 SPI_OPCODE_TYPE_READ_WITH_ADDRESS
+
+#define SPI_OPPREFIX ((SPI_OPCODE_WREN << 8) | SPI_OPCODE_WREN)
+#define SPI_OPTYPE ((SPI_OPTYPE_7 << 14) | (SPI_OPTYPE_6 << 12) | \
+ (SPI_OPTYPE_5 << 10) | (SPI_OPTYPE_4 << 8) | \
+ (SPI_OPTYPE_3 << 6) | (SPI_OPTYPE_2 << 4) | \
+ (SPI_OPTYPE_1 << 2) | (SPI_OPTYPE_0 << 0))
+#define SPI_OPMENU_UPPER ((SPI_OPMENU_7 << 24) | (SPI_OPMENU_6 << 16) | \
+ (SPI_OPMENU_5 << 8) | (SPI_OPMENU_4 << 0))
+#define SPI_OPMENU_LOWER ((SPI_OPMENU_3 << 24) | (SPI_OPMENU_2 << 16) | \
+ (SPI_OPMENU_1 << 8) | (SPI_OPMENU_0 << 0))
+
+#define ICH_BOUNDARY 0x1000
+
+#define HSFSTS_FDBC_SHIFT 24
+#define HSFSTS_FDBC_MASK (0x3f << HSFSTS_FDBC_SHIFT)
+#define HSFSTS_WET BIT(21)
+#define HSFSTS_FCYCLE_SHIFT 17
+#define HSFSTS_FCYCLE_MASK (0xf << HSFSTS_FCYCLE_SHIFT)
+
+/* Supported flash cycle types */
+enum hsfsts_cycle_t {
+ HSFSTS_CYCLE_READ = 0,
+ HSFSTS_CYCLE_WRITE = 2,
+ HSFSTS_CYCLE_4K_ERASE,
+ HSFSTS_CYCLE_64K_ERASE,
+ HSFSTS_CYCLE_RDSFDP,
+ HSFSTS_CYCLE_RDID,
+ HSFSTS_CYCLE_WR_STATUS,
+ HSFSTS_CYCLE_RD_STATUS,
+};
+
+#define HSFSTS_FGO BIT(16)
+#define HSFSTS_FLOCKDN BIT(15)
+#define HSFSTS_FDV BIT(14)
+#define HSFSTS_FDOPSS BIT(13)
+#define HSFSTS_WRSDIS BIT(11)
+#define HSFSTS_SAF_CE BIT(8)
+#define HSFSTS_SAF_ACTIVE BIT(7)
+#define HSFSTS_SAF_LE BIT(6)
+#define HSFSTS_SCIP BIT(5)
+#define HSFSTS_SAF_DLE BIT(4)
+#define HSFSTS_SAF_ERROR BIT(3)
+#define HSFSTS_AEL BIT(2)
+#define HSFSTS_FCERR BIT(1)
+#define HSFSTS_FDONE BIT(0)
+#define HSFSTS_W1C_BITS 0xff
+
+/* Maximum bytes of data that can fit in FDATAn (0x10) registers */
+#define SPIBAR_FDATA_FIFO_SIZE 0x40
+
+#define SPIBAR_HWSEQ_XFER_TIMEOUT_MS 5000
+
+enum ich_version {
+ ICHV_7,
+ ICHV_9,
+ ICHV_APL,
+};
+
+struct ich_spi_priv {
+ int opmenu;
+ int menubytes;
+ void *base; /* Base of register set */
+ int preop;
+ int optype;
+ int addr;
+ int data;
+ unsigned databytes;
+ int status;
+ int control;
+ int bbar;
+ int bcr;
+ uint32_t *pr; /* only for ich9 */
+ int speed; /* pointer to speed control */
+ ulong max_speed; /* Maximum bus speed in MHz */
+ ulong cur_speed; /* Current bus speed */
+ struct spi_trans trans; /* current transaction in progress */
+ struct udevice *pch; /* PCH, used to control SPI access */
+};
+
+struct ich_spi_plat {
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct dtd_intel_fast_spi dtplat;
+#endif
+ enum ich_version ich_version; /* Controller version, 7 or 9 */
+ bool lockdown; /* lock down controller settings? */
+ ulong mmio_base; /* Base of MMIO registers */
+ pci_dev_t bdf; /* PCI address used by of-platdata */
+ bool hwseq; /* Use hardware sequencing (not s/w) */
+};
+
+#endif /* _ICH_H_ */
diff --git a/roms/u-boot/drivers/spi/kirkwood_spi.c b/roms/u-boot/drivers/spi/kirkwood_spi.c
new file mode 100644
index 000000000..bc5da0a1e
--- /dev/null
+++ b/roms/u-boot/drivers/spi/kirkwood_spi.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2009
+ * Marvell Semiconductor <www.marvell.com>
+ * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
+ *
+ * Derived from drivers/spi/mpc8xxx_spi.c
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/io.h>
+#include <asm/arch/soc.h>
+#ifdef CONFIG_ARCH_KIRKWOOD
+#include <asm/arch/mpp.h>
+#endif
+#include <asm/arch-mvebu/spi.h>
+
+struct mvebu_spi_dev {
+ bool is_errata_50mhz_ac;
+};
+
+struct mvebu_spi_plat {
+ struct kwspi_registers *spireg;
+ bool is_errata_50mhz_ac;
+};
+
+struct mvebu_spi_priv {
+ struct kwspi_registers *spireg;
+};
+
+static void _spi_cs_activate(struct kwspi_registers *reg)
+{
+ setbits_le32(&reg->ctrl, KWSPI_CSN_ACT);
+}
+
+static void _spi_cs_deactivate(struct kwspi_registers *reg)
+{
+ clrbits_le32(&reg->ctrl, KWSPI_CSN_ACT);
+}
+
+static int _spi_xfer(struct kwspi_registers *reg, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ unsigned int tmpdout, tmpdin;
+ int tm, isread = 0;
+
+ debug("spi_xfer: dout %p din %p bitlen %u\n", dout, din, bitlen);
+
+ if (flags & SPI_XFER_BEGIN)
+ _spi_cs_activate(reg);
+
+ /*
+ * handle data in 8-bit chunks
+ * TBD: 2byte xfer mode to be enabled
+ */
+ clrsetbits_le32(&reg->cfg, KWSPI_XFERLEN_MASK, KWSPI_XFERLEN_1BYTE);
+
+ while (bitlen > 4) {
+ debug("loopstart bitlen %d\n", bitlen);
+ tmpdout = 0;
+
+ /* Shift data so it's msb-justified */
+ if (dout)
+ tmpdout = *(u32 *)dout & 0xff;
+
+ clrbits_le32(&reg->irq_cause, KWSPI_SMEMRDIRQ);
+ writel(tmpdout, &reg->dout); /* Write the data out */
+ debug("*** spi_xfer: ... %08x written, bitlen %d\n",
+ tmpdout, bitlen);
+
+ /*
+ * Wait for SPI transmit to get out
+ * or time out (1 second = 1000 ms)
+ * The NE event must be read and cleared first
+ */
+ for (tm = 0, isread = 0; tm < KWSPI_TIMEOUT; ++tm) {
+ if (readl(&reg->irq_cause) & KWSPI_SMEMRDIRQ) {
+ isread = 1;
+ tmpdin = readl(&reg->din);
+ debug("spi_xfer: din %p..%08x read\n",
+ din, tmpdin);
+
+ if (din) {
+ *((u8 *)din) = (u8)tmpdin;
+ din += 1;
+ }
+ if (dout)
+ dout += 1;
+ bitlen -= 8;
+ }
+ if (isread)
+ break;
+ }
+ if (tm >= KWSPI_TIMEOUT)
+ printf("*** spi_xfer: Time out during SPI transfer\n");
+
+ debug("loopend bitlen %d\n", bitlen);
+ }
+
+ if (flags & SPI_XFER_END)
+ _spi_cs_deactivate(reg);
+
+ return 0;
+}
+
+static int mvebu_spi_set_speed(struct udevice *bus, uint hz)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct dm_spi_bus *spi = dev_get_uclass_priv(bus);
+ struct kwspi_registers *reg = plat->spireg;
+ u32 data, divider;
+ unsigned int spr, sppr;
+
+ if (spi->max_hz && (hz > spi->max_hz)) {
+ debug("%s: limit speed to the max_hz of the bus %d\n",
+ __func__, spi->max_hz);
+ hz = spi->max_hz;
+ }
+
+ /*
+ * Calculate spi clock prescaller using max_hz.
+ * SPPR is SPI Baud Rate Pre-selection, it holds bits 5 and 7:6 in
+ * SPI Interface Configuration Register;
+ * SPR is SPI Baud Rate Selection, it holds bits 3:0 in SPI Interface
+ * Configuration Register.
+ * The SPR together with the SPPR define the SPI CLK frequency as
+ * follows:
+ * SPI actual frequency = core_clk / (SPR * (2 ^ SPPR))
+ */
+ divider = DIV_ROUND_UP(CONFIG_SYS_TCLK, hz);
+ if (divider < 16) {
+ /* This is the easy case, divider is less than 16 */
+ spr = divider;
+ sppr = 0;
+
+ } else {
+ unsigned int two_pow_sppr;
+ /*
+ * Find the highest bit set in divider. This and the
+ * three next bits define SPR (apart from rounding).
+ * SPPR is then the number of zero bits that must be
+ * appended:
+ */
+ sppr = fls(divider) - 4;
+
+ /*
+ * As SPR only has 4 bits, we have to round divider up
+ * to the next multiple of 2 ** sppr.
+ */
+ two_pow_sppr = 1 << sppr;
+ divider = (divider + two_pow_sppr - 1) & -two_pow_sppr;
+
+ /*
+ * recalculate sppr as rounding up divider might have
+ * increased it enough to change the position of the
+ * highest set bit. In this case the bit that now
+ * doesn't make it into SPR is 0, so there is no need to
+ * round again.
+ */
+ sppr = fls(divider) - 4;
+ spr = divider >> sppr;
+
+ /*
+ * Now do range checking. SPR is constructed to have a
+ * width of 4 bits, so this is fine for sure. So we
+ * still need to check for sppr to fit into 3 bits:
+ */
+ if (sppr > 7)
+ return -EINVAL;
+ }
+
+ data = ((sppr & 0x6) << 5) | ((sppr & 0x1) << 4) | spr;
+
+ /* program spi clock prescaler using max_hz */
+ writel(KWSPI_ADRLEN_3BYTE | data, &reg->cfg);
+ debug("data = 0x%08x\n", data);
+
+ return 0;
+}
+
+static void mvebu_spi_50mhz_ac_timing_erratum(struct udevice *bus, uint mode)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct kwspi_registers *reg = plat->spireg;
+ u32 data;
+
+ /*
+ * Erratum description: (Erratum NO. FE-9144572) The device
+ * SPI interface supports frequencies of up to 50 MHz.
+ * However, due to this erratum, when the device core clock is
+ * 250 MHz and the SPI interfaces is configured for 50MHz SPI
+ * clock and CPOL=CPHA=1 there might occur data corruption on
+ * reads from the SPI device.
+ * Erratum Workaround:
+ * Work in one of the following configurations:
+ * 1. Set CPOL=CPHA=0 in "SPI Interface Configuration
+ * Register".
+ * 2. Set TMISO_SAMPLE value to 0x2 in "SPI Timing Parameters 1
+ * Register" before setting the interface.
+ */
+ data = readl(&reg->timing1);
+ data &= ~KW_SPI_TMISO_SAMPLE_MASK;
+
+ if (CONFIG_SYS_TCLK == 250000000 &&
+ mode & SPI_CPOL &&
+ mode & SPI_CPHA)
+ data |= KW_SPI_TMISO_SAMPLE_2;
+ else
+ data |= KW_SPI_TMISO_SAMPLE_1;
+
+ writel(data, &reg->timing1);
+}
+
+static int mvebu_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct kwspi_registers *reg = plat->spireg;
+ u32 data = readl(&reg->cfg);
+
+ data &= ~(KWSPI_CPHA | KWSPI_CPOL | KWSPI_RXLSBF | KWSPI_TXLSBF);
+
+ if (mode & SPI_CPHA)
+ data |= KWSPI_CPHA;
+ if (mode & SPI_CPOL)
+ data |= KWSPI_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ data |= (KWSPI_RXLSBF | KWSPI_TXLSBF);
+
+ writel(data, &reg->cfg);
+
+ if (plat->is_errata_50mhz_ac)
+ mvebu_spi_50mhz_ac_timing_erratum(bus, mode);
+
+ return 0;
+}
+
+static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+
+ return _spi_xfer(plat->spireg, bitlen, dout, din, flags);
+}
+
+__attribute__((weak)) int mvebu_board_spi_claim_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int mvebu_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+
+ /* Configure the chip-select in the CTRL register */
+ clrsetbits_le32(&plat->spireg->ctrl,
+ KWSPI_CS_MASK << KWSPI_CS_SHIFT,
+ spi_chip_select(dev) << KWSPI_CS_SHIFT);
+
+ return mvebu_board_spi_claim_bus(dev);
+}
+
+__attribute__((weak)) int mvebu_board_spi_release_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int mvebu_spi_release_bus(struct udevice *dev)
+{
+ return mvebu_board_spi_release_bus(dev);
+}
+
+static int mvebu_spi_probe(struct udevice *bus)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct kwspi_registers *reg = plat->spireg;
+
+ writel(KWSPI_SMEMRDY, &reg->ctrl);
+ writel(KWSPI_SMEMRDIRQ, &reg->irq_cause);
+ writel(KWSPI_IRQMASK, &reg->irq_mask);
+
+ return 0;
+}
+
+static int mvebu_spi_of_to_plat(struct udevice *bus)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ const struct mvebu_spi_dev *drvdata =
+ (struct mvebu_spi_dev *)dev_get_driver_data(bus);
+
+ plat->spireg = dev_read_addr_ptr(bus);
+ plat->is_errata_50mhz_ac = drvdata->is_errata_50mhz_ac;
+
+ return 0;
+}
+
+static const struct dm_spi_ops mvebu_spi_ops = {
+ .claim_bus = mvebu_spi_claim_bus,
+ .release_bus = mvebu_spi_release_bus,
+ .xfer = mvebu_spi_xfer,
+ .set_speed = mvebu_spi_set_speed,
+ .set_mode = mvebu_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct mvebu_spi_dev armada_spi_dev_data = {
+ .is_errata_50mhz_ac = false,
+};
+
+static const struct mvebu_spi_dev armada_xp_spi_dev_data = {
+ .is_errata_50mhz_ac = false,
+};
+
+static const struct mvebu_spi_dev armada_375_spi_dev_data = {
+ .is_errata_50mhz_ac = false,
+};
+
+static const struct mvebu_spi_dev armada_380_spi_dev_data = {
+ .is_errata_50mhz_ac = true,
+};
+
+static const struct udevice_id mvebu_spi_ids[] = {
+ {
+ .compatible = "marvell,orion-spi",
+ .data = (ulong)&armada_spi_dev_data,
+ },
+ {
+ .compatible = "marvell,armada-375-spi",
+ .data = (ulong)&armada_375_spi_dev_data
+ },
+ {
+ .compatible = "marvell,armada-380-spi",
+ .data = (ulong)&armada_380_spi_dev_data
+ },
+ {
+ .compatible = "marvell,armada-xp-spi",
+ .data = (ulong)&armada_xp_spi_dev_data
+ },
+ { }
+};
+
+U_BOOT_DRIVER(mvebu_spi) = {
+ .name = "mvebu_spi",
+ .id = UCLASS_SPI,
+ .of_match = mvebu_spi_ids,
+ .ops = &mvebu_spi_ops,
+ .of_to_plat = mvebu_spi_of_to_plat,
+ .plat_auto = sizeof(struct mvebu_spi_plat),
+ .priv_auto = sizeof(struct mvebu_spi_priv),
+ .probe = mvebu_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/meson_spifc.c b/roms/u-boot/drivers/spi/meson_spifc.c
new file mode 100644
index 000000000..d99a15140
--- /dev/null
+++ b/roms/u-boot/drivers/spi/meson_spifc.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * Amlogic Meson SPI Flash Controller driver
+ */
+
+#include <common.h>
+#include <log.h>
+#include <spi.h>
+#include <clk.h>
+#include <dm.h>
+#include <regmap.h>
+#include <errno.h>
+#include <asm/io.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+
+/* register map */
+#define REG_CMD 0x00
+#define REG_ADDR 0x04
+#define REG_CTRL 0x08
+#define REG_CTRL1 0x0c
+#define REG_STATUS 0x10
+#define REG_CTRL2 0x14
+#define REG_CLOCK 0x18
+#define REG_USER 0x1c
+#define REG_USER1 0x20
+#define REG_USER2 0x24
+#define REG_USER3 0x28
+#define REG_USER4 0x2c
+#define REG_SLAVE 0x30
+#define REG_SLAVE1 0x34
+#define REG_SLAVE2 0x38
+#define REG_SLAVE3 0x3c
+#define REG_C0 0x40
+#define REG_B8 0x60
+#define REG_MAX 0x7c
+
+/* register fields */
+#define CMD_USER BIT(18)
+#define CTRL_ENABLE_AHB BIT(17)
+#define CLOCK_SOURCE BIT(31)
+#define CLOCK_DIV_SHIFT 12
+#define CLOCK_DIV_MASK (0x3f << CLOCK_DIV_SHIFT)
+#define CLOCK_CNT_HIGH_SHIFT 6
+#define CLOCK_CNT_HIGH_MASK (0x3f << CLOCK_CNT_HIGH_SHIFT)
+#define CLOCK_CNT_LOW_SHIFT 0
+#define CLOCK_CNT_LOW_MASK (0x3f << CLOCK_CNT_LOW_SHIFT)
+#define USER_DIN_EN_MS BIT(0)
+#define USER_CMP_MODE BIT(2)
+#define USER_CLK_NOT_INV BIT(7)
+#define USER_UC_DOUT_SEL BIT(27)
+#define USER_UC_DIN_SEL BIT(28)
+#define USER_UC_MASK ((BIT(5) - 1) << 27)
+#define USER1_BN_UC_DOUT_SHIFT 17
+#define USER1_BN_UC_DOUT_MASK (0xff << 16)
+#define USER1_BN_UC_DIN_SHIFT 8
+#define USER1_BN_UC_DIN_MASK (0xff << 8)
+#define USER4_CS_POL_HIGH BIT(23)
+#define USER4_IDLE_CLK_HIGH BIT(29)
+#define USER4_CS_ACT BIT(30)
+#define SLAVE_TRST_DONE BIT(4)
+#define SLAVE_OP_MODE BIT(30)
+#define SLAVE_SW_RST BIT(31)
+
+#define SPIFC_BUFFER_SIZE 64
+
+struct meson_spifc_priv {
+ struct regmap *regmap;
+ struct clk clk;
+};
+
+/**
+ * meson_spifc_drain_buffer() - copy data from device buffer to memory
+ * @spifc: the Meson SPI device
+ * @buf: the destination buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_drain_buffer(struct meson_spifc_priv *spifc,
+ u8 *buf, int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ regmap_read(spifc->regmap, REG_C0 + i, &data);
+
+ if (len - i >= 4) {
+ *((u32 *)buf) = data;
+ buf += 4;
+ } else {
+ memcpy(buf, &data, len - i);
+ break;
+ }
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_fill_buffer() - copy data from memory to device buffer
+ * @spifc: the Meson SPI device
+ * @buf: the source buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_fill_buffer(struct meson_spifc_priv *spifc,
+ const u8 *buf, int len)
+{
+ u32 data = 0;
+ int i = 0;
+
+ while (i < len) {
+ if (len - i >= 4)
+ data = *(u32 *)buf;
+ else
+ memcpy(&data, buf, len - i);
+
+ regmap_write(spifc->regmap, REG_C0 + i, data);
+
+ buf += 4;
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_txrx() - transfer a chunk of data
+ * @spifc: the Meson SPI device
+ * @dout: data buffer for TX
+ * @din: data buffer for RX
+ * @offset: offset of the data to transfer
+ * @len: length of the data to transfer
+ * @last_xfer: whether this is the last transfer of the message
+ * @last_chunk: whether this is the last chunk of the transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_txrx(struct meson_spifc_priv *spifc,
+ const u8 *dout, u8 *din, int offset,
+ int len, bool last_xfer, bool last_chunk)
+{
+ bool keep_cs = true;
+ u32 data;
+ int ret;
+
+ if (dout)
+ meson_spifc_fill_buffer(spifc, dout + offset, len);
+
+ /* enable DOUT stage */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_UC_MASK,
+ USER_UC_DOUT_SEL);
+ regmap_write(spifc->regmap, REG_USER1,
+ (8 * len - 1) << USER1_BN_UC_DOUT_SHIFT);
+
+ /* enable data input during DOUT */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_DIN_EN_MS,
+ USER_DIN_EN_MS);
+
+ if (last_chunk && last_xfer)
+ keep_cs = false;
+
+ regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_ACT,
+ keep_cs ? USER4_CS_ACT : 0);
+
+ /* clear transition done bit */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_TRST_DONE, 0);
+ /* start transfer */
+ regmap_update_bits(spifc->regmap, REG_CMD, CMD_USER, CMD_USER);
+
+ /* wait for the current operation to terminate */
+ ret = regmap_read_poll_timeout(spifc->regmap, REG_SLAVE, data,
+ (data & SLAVE_TRST_DONE),
+ 0, 5 * CONFIG_SYS_HZ);
+
+ if (!ret && din)
+ meson_spifc_drain_buffer(spifc, din + offset, len);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_xfer() - perform a single transfer
+ * @dev: the SPI controller device
+ * @bitlen: length of the transfer
+ * @dout: data buffer for TX
+ * @din: data buffer for RX
+ * @flags: transfer flags
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_xfer(struct udevice *slave, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct meson_spifc_priv *spifc = dev_get_priv(slave->parent);
+ int blen = bitlen / 8;
+ int len, done = 0, ret = 0;
+
+ if (bitlen % 8)
+ return -EINVAL;
+
+ debug("xfer len %d (%d) dout %p din %p\n", bitlen, blen, dout, din);
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB, 0);
+
+ while (done < blen && !ret) {
+ len = min_t(int, blen - done, SPIFC_BUFFER_SIZE);
+ ret = meson_spifc_txrx(spifc, dout, din, done, len,
+ flags & SPI_XFER_END,
+ done + len >= blen);
+ done += len;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB,
+ CTRL_ENABLE_AHB);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_set_speed() - program the clock divider
+ * @dev: the SPI controller device
+ * @speed: desired speed in Hz
+ */
+static int meson_spifc_set_speed(struct udevice *dev, uint speed)
+{
+ struct meson_spifc_priv *spifc = dev_get_priv(dev);
+ unsigned long parent, value;
+ int n;
+
+ parent = clk_get_rate(&spifc->clk);
+ n = max_t(int, parent / speed - 1, 1);
+
+ debug("parent %lu, speed %u, n %d\n", parent, speed, n);
+
+ value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK;
+ value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK;
+ value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) &
+ CLOCK_CNT_HIGH_MASK;
+
+ regmap_write(spifc->regmap, REG_CLOCK, value);
+
+ return 0;
+}
+
+/**
+ * meson_spifc_set_mode() - setups the SPI bus mode
+ * @dev: the SPI controller device
+ * @mode: desired mode bitfield
+ * Return: 0 on success, -ENODEV on error
+ */
+static int meson_spifc_set_mode(struct udevice *dev, uint mode)
+{
+ struct meson_spifc_priv *spifc = dev_get_priv(dev);
+
+ if (mode & (SPI_CPHA | SPI_RX_QUAD | SPI_RX_DUAL |
+ SPI_TX_QUAD | SPI_TX_DUAL))
+ return -ENODEV;
+
+ regmap_update_bits(spifc->regmap, REG_USER, USER_CLK_NOT_INV,
+ mode & SPI_CPOL ? USER_CLK_NOT_INV : 0);
+
+ regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_POL_HIGH,
+ mode & SPI_CS_HIGH ? USER4_CS_POL_HIGH : 0);
+
+ return 0;
+}
+
+/**
+ * meson_spifc_hw_init() - reset and initialize the SPI controller
+ * @spifc: the Meson SPI device
+ */
+static void meson_spifc_hw_init(struct meson_spifc_priv *spifc)
+{
+ /* reset device */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_SW_RST,
+ SLAVE_SW_RST);
+ /* disable compatible mode */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_CMP_MODE, 0);
+ /* set master mode */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_OP_MODE, 0);
+}
+
+static const struct dm_spi_ops meson_spifc_ops = {
+ .xfer = meson_spifc_xfer,
+ .set_speed = meson_spifc_set_speed,
+ .set_mode = meson_spifc_set_mode,
+};
+
+static int meson_spifc_probe(struct udevice *dev)
+{
+ struct meson_spifc_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ ret = regmap_init_mem(dev_ofnode(dev), &priv->regmap);
+ if (ret)
+ return ret;
+
+ ret = clk_get_by_index(dev, 0, &priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(&priv->clk);
+ if (ret)
+ return ret;
+
+ meson_spifc_hw_init(priv);
+
+ return 0;
+}
+
+static const struct udevice_id meson_spifc_ids[] = {
+ { .compatible = "amlogic,meson-gxbb-spifc", },
+ { }
+};
+
+U_BOOT_DRIVER(meson_spifc) = {
+ .name = "meson_spifc",
+ .id = UCLASS_SPI,
+ .of_match = meson_spifc_ids,
+ .ops = &meson_spifc_ops,
+ .probe = meson_spifc_probe,
+ .priv_auto = sizeof(struct meson_spifc_priv),
+};
diff --git a/roms/u-boot/drivers/spi/mpc8xx_spi.c b/roms/u-boot/drivers/spi/mpc8xx_spi.c
new file mode 100644
index 000000000..0026ad23e
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mpc8xx_spi.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2001 Navin Boppuri / Prashant Patel
+ * <nboppuri@trinetcommunication.com>,
+ * <pmpatel@trinetcommunication.com>
+ * Copyright (c) 2001 Gerd Mennchen <Gerd.Mennchen@icn.siemens.de>
+ * Copyright (c) 2001 Wolfgang Denk, DENX Software Engineering, <wd@denx.de>.
+ */
+
+/*
+ * MPC8xx CPM SPI interface.
+ *
+ * Parts of this code are probably not portable and/or specific to
+ * the board which I used for the tests. Please send fixes/complaints
+ * to wd@denx.de
+ *
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <mpc8xx.h>
+#include <spi.h>
+#include <linux/delay.h>
+
+#include <asm/cpm_8xx.h>
+#include <asm/io.h>
+
+#define CPM_SPI_BASE_RX CPM_SPI_BASE
+#define CPM_SPI_BASE_TX (CPM_SPI_BASE + sizeof(cbd_t))
+
+#define MAX_BUFFER 0x104
+
+static int mpc8xx_spi_probe(struct udevice *dev)
+{
+ immap_t __iomem *immr = (immap_t __iomem *)CONFIG_SYS_IMMR;
+ cpm8xx_t __iomem *cp = &immr->im_cpm;
+ spi_t __iomem *spi = (spi_t __iomem *)&cp->cp_dparam[PROFF_SPI];
+ cbd_t __iomem *tbdf, *rbdf;
+
+ /* Disable relocation */
+ out_be16(&spi->spi_rpbase, 0);
+
+/* 1 */
+ /* ------------------------------------------------
+ * Initialize Port B SPI pins -> page 34-8 MPC860UM
+ * (we are only in Master Mode !)
+ * ------------------------------------------------ */
+
+ /* --------------------------------------------
+ * GPIO or per. Function
+ * PBPAR[28] = 1 [0x00000008] -> PERI: (SPIMISO)
+ * PBPAR[29] = 1 [0x00000004] -> PERI: (SPIMOSI)
+ * PBPAR[30] = 1 [0x00000002] -> PERI: (SPICLK)
+ * PBPAR[31] = 0 [0x00000001] -> GPIO: (CS for PCUE/CCM-EEPROM)
+ * -------------------------------------------- */
+ clrsetbits_be32(&cp->cp_pbpar, 0x00000001, 0x0000000E); /* set bits */
+
+ /* ----------------------------------------------
+ * In/Out or per. Function 0/1
+ * PBDIR[28] = 1 [0x00000008] -> PERI1: SPIMISO
+ * PBDIR[29] = 1 [0x00000004] -> PERI1: SPIMOSI
+ * PBDIR[30] = 1 [0x00000002] -> PERI1: SPICLK
+ * PBDIR[31] = 1 [0x00000001] -> GPIO OUT: CS for PCUE/CCM-EEPROM
+ * ---------------------------------------------- */
+ setbits_be32(&cp->cp_pbdir, 0x0000000F);
+
+ /* ----------------------------------------------
+ * open drain or active output
+ * PBODR[28] = 1 [0x00000008] -> open drain: SPIMISO
+ * PBODR[29] = 0 [0x00000004] -> active output SPIMOSI
+ * PBODR[30] = 0 [0x00000002] -> active output: SPICLK
+ * PBODR[31] = 0 [0x00000001] -> active output GPIO OUT: CS for PCUE/CCM
+ * ---------------------------------------------- */
+
+ clrsetbits_be16(&cp->cp_pbodr, 0x00000007, 0x00000008);
+
+ /* Initialize the parameter ram.
+ * We need to make sure many things are initialized to zero
+ */
+ out_be32(&spi->spi_rstate, 0);
+ out_be32(&spi->spi_rdp, 0);
+ out_be16(&spi->spi_rbptr, 0);
+ out_be16(&spi->spi_rbc, 0);
+ out_be32(&spi->spi_rxtmp, 0);
+ out_be32(&spi->spi_tstate, 0);
+ out_be32(&spi->spi_tdp, 0);
+ out_be16(&spi->spi_tbptr, 0);
+ out_be16(&spi->spi_tbc, 0);
+ out_be32(&spi->spi_txtmp, 0);
+
+/* 3 */
+ /* Set up the SPI parameters in the parameter ram */
+ out_be16(&spi->spi_rbase, CPM_SPI_BASE_RX);
+ out_be16(&spi->spi_tbase, CPM_SPI_BASE_TX);
+
+ /***********IMPORTANT******************/
+
+ /*
+ * Setting transmit and receive buffer descriptor pointers
+ * initially to rbase and tbase. Only the microcode patches
+ * documentation talks about initializing this pointer. This
+ * is missing from the sample I2C driver. If you dont
+ * initialize these pointers, the kernel hangs.
+ */
+ out_be16(&spi->spi_rbptr, CPM_SPI_BASE_RX);
+ out_be16(&spi->spi_tbptr, CPM_SPI_BASE_TX);
+
+/* 4 */
+ /* Init SPI Tx + Rx Parameters */
+ while (in_be16(&cp->cp_cpcr) & CPM_CR_FLG)
+ ;
+
+ out_be16(&cp->cp_cpcr, mk_cr_cmd(CPM_CR_CH_SPI, CPM_CR_INIT_TRX) |
+ CPM_CR_FLG);
+ while (in_be16(&cp->cp_cpcr) & CPM_CR_FLG)
+ ;
+
+/* 5 */
+ /* Set SDMA configuration register */
+ out_be32(&immr->im_siu_conf.sc_sdcr, 0x0001);
+
+/* 6 */
+ /* Set to big endian. */
+ out_8(&spi->spi_tfcr, SMC_EB);
+ out_8(&spi->spi_rfcr, SMC_EB);
+
+/* 7 */
+ /* Set maximum receive size. */
+ out_be16(&spi->spi_mrblr, MAX_BUFFER);
+
+/* 8 + 9 */
+ /* tx and rx buffer descriptors */
+ tbdf = (cbd_t __iomem *)&cp->cp_dpmem[CPM_SPI_BASE_TX];
+ rbdf = (cbd_t __iomem *)&cp->cp_dpmem[CPM_SPI_BASE_RX];
+
+ clrbits_be16(&tbdf->cbd_sc, BD_SC_READY);
+ clrbits_be16(&rbdf->cbd_sc, BD_SC_EMPTY);
+
+/* 10 + 11 */
+ out_8(&cp->cp_spim, 0); /* Mask all SPI events */
+ out_8(&cp->cp_spie, SPI_EMASK); /* Clear all SPI events */
+
+ return 0;
+}
+
+static int mpc8xx_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ immap_t __iomem *immr = (immap_t __iomem *)CONFIG_SYS_IMMR;
+ cpm8xx_t __iomem *cp = &immr->im_cpm;
+ cbd_t __iomem *tbdf, *rbdf;
+ int tm;
+ size_t count = (bitlen + 7) / 8;
+
+ if (count > MAX_BUFFER)
+ return -EINVAL;
+
+ tbdf = (cbd_t __iomem *)&cp->cp_dpmem[CPM_SPI_BASE_TX];
+ rbdf = (cbd_t __iomem *)&cp->cp_dpmem[CPM_SPI_BASE_RX];
+
+ /* Set CS for device */
+ clrbits_be32(&cp->cp_pbdat, 0x0001);
+
+ /* Setting tx bd status and data length */
+ out_be32(&tbdf->cbd_bufaddr, (ulong)dout);
+ out_be16(&tbdf->cbd_sc, BD_SC_READY | BD_SC_LAST | BD_SC_WRAP);
+ out_be16(&tbdf->cbd_datlen, count);
+
+ /* Setting rx bd status and data length */
+ out_be32(&rbdf->cbd_bufaddr, (ulong)din);
+ out_be16(&rbdf->cbd_sc, BD_SC_EMPTY | BD_SC_WRAP);
+ out_be16(&rbdf->cbd_datlen, 0); /* rx length has no significance */
+
+ clrsetbits_be16(&cp->cp_spmode, ~SPMODE_LOOP, SPMODE_REV | SPMODE_MSTR |
+ SPMODE_EN | SPMODE_LEN(8) | SPMODE_PM(0x8));
+ out_8(&cp->cp_spim, 0); /* Mask all SPI events */
+ out_8(&cp->cp_spie, SPI_EMASK); /* Clear all SPI events */
+
+ /* start spi transfer */
+ setbits_8(&cp->cp_spcom, SPI_STR); /* Start transmit */
+
+ /* --------------------------------
+ * Wait for SPI transmit to get out
+ * or time out (1 second = 1000 ms)
+ * -------------------------------- */
+ for (tm = 0; tm < 1000; ++tm) {
+ if (in_8(&cp->cp_spie) & SPI_TXB) /* Tx Buffer Empty */
+ break;
+ if ((in_be16(&tbdf->cbd_sc) & BD_SC_READY) == 0)
+ break;
+ udelay(1000);
+ }
+ if (tm >= 1000)
+ printf("*** spi_xfer: Time out while xferring to/from SPI!\n");
+
+ /* Clear CS for device */
+ setbits_be32(&cp->cp_pbdat, 0x0001);
+
+ return count;
+}
+
+static const struct dm_spi_ops mpc8xx_spi_ops = {
+ .xfer = mpc8xx_spi_xfer,
+};
+
+static const struct udevice_id mpc8xx_spi_ids[] = {
+ { .compatible = "fsl,mpc8xx-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(mpc8xx_spi) = {
+ .name = "mpc8xx_spi",
+ .id = UCLASS_SPI,
+ .of_match = mpc8xx_spi_ids,
+ .ops = &mpc8xx_spi_ops,
+ .probe = mpc8xx_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/mpc8xxx_spi.c b/roms/u-boot/drivers/spi/mpc8xxx_spi.c
new file mode 100644
index 000000000..6869d60d9
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mpc8xxx_spi.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2006 Ben Warren, Qstreams Networks Inc.
+ * With help from the common/soft_spi and arch/powerpc/cpu/mpc8260 drivers
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/mpc8xxx_spi.h>
+#include <asm-generic/gpio.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+enum {
+ SPI_EV_NE = BIT(31 - 22), /* Receiver Not Empty */
+ SPI_EV_NF = BIT(31 - 23), /* Transmitter Not Full */
+};
+
+enum {
+ SPI_MODE_LOOP = BIT(31 - 1), /* Loopback mode */
+ SPI_MODE_CI = BIT(31 - 2), /* Clock invert */
+ SPI_MODE_CP = BIT(31 - 3), /* Clock phase */
+ SPI_MODE_DIV16 = BIT(31 - 4), /* Divide clock source by 16 */
+ SPI_MODE_REV = BIT(31 - 5), /* Reverse mode - MSB first */
+ SPI_MODE_MS = BIT(31 - 6), /* Always master */
+ SPI_MODE_EN = BIT(31 - 7), /* Enable interface */
+
+ SPI_MODE_LEN_MASK = 0xf00000,
+ SPI_MODE_LEN_SHIFT = 20,
+ SPI_MODE_PM_SHIFT = 16,
+ SPI_MODE_PM_MASK = 0xf0000,
+
+ SPI_COM_LST = BIT(31 - 9),
+};
+
+struct mpc8xxx_priv {
+ spi8xxx_t *spi;
+ struct gpio_desc gpios[16];
+ int cs_count;
+ ulong clk_rate;
+};
+
+#define SPI_TIMEOUT 1000
+
+static int mpc8xxx_spi_of_to_plat(struct udevice *dev)
+{
+ struct mpc8xxx_priv *priv = dev_get_priv(dev);
+ struct clk clk;
+ int ret;
+
+ priv->spi = (spi8xxx_t *)dev_read_addr(dev);
+
+ ret = gpio_request_list_by_name(dev, "gpios", priv->gpios,
+ ARRAY_SIZE(priv->gpios), GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
+ if (ret < 0)
+ return -EINVAL;
+
+ priv->cs_count = ret;
+
+ ret = clk_get_by_index(dev, 0, &clk);
+ if (ret) {
+ dev_err(dev, "%s: clock not defined\n", __func__);
+ return ret;
+ }
+
+ priv->clk_rate = clk_get_rate(&clk);
+ if (!priv->clk_rate) {
+ dev_err(dev, "%s: failed to get clock rate\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mpc8xxx_spi_probe(struct udevice *dev)
+{
+ struct mpc8xxx_priv *priv = dev_get_priv(dev);
+ spi8xxx_t *spi = priv->spi;
+
+ /*
+ * SPI pins on the MPC83xx are not muxed, so all we do is initialize
+ * some registers
+ */
+ out_be32(&priv->spi->mode, SPI_MODE_REV | SPI_MODE_MS);
+
+ /* set len to 8 bits */
+ setbits_be32(&spi->mode, (8 - 1) << SPI_MODE_LEN_SHIFT);
+
+ setbits_be32(&spi->mode, SPI_MODE_EN);
+
+ /* Clear all SPI events */
+ setbits_be32(&priv->spi->event, 0xffffffff);
+ /* Mask all SPI interrupts */
+ clrbits_be32(&priv->spi->mask, 0xffffffff);
+ /* LST bit doesn't do anything, so disregard */
+ out_be32(&priv->spi->com, 0);
+
+ return 0;
+}
+
+static void mpc8xxx_spi_cs_activate(struct udevice *dev)
+{
+ struct mpc8xxx_priv *priv = dev_get_priv(dev->parent);
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+
+ dm_gpio_set_value(&priv->gpios[plat->cs], 1);
+}
+
+static void mpc8xxx_spi_cs_deactivate(struct udevice *dev)
+{
+ struct mpc8xxx_priv *priv = dev_get_priv(dev->parent);
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+
+ dm_gpio_set_value(&priv->gpios[plat->cs], 0);
+}
+
+static int mpc8xxx_spi_xfer(struct udevice *dev, uint bitlen,
+ const void *dout, void *din, ulong flags)
+{
+ struct udevice *bus = dev->parent;
+ struct mpc8xxx_priv *priv = dev_get_priv(bus);
+ spi8xxx_t *spi = priv->spi;
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+ u32 tmpdin = 0, tmpdout = 0, n;
+ const u8 *cout = dout;
+ u8 *cin = din;
+
+ debug("%s: slave %s:%u dout %08X din %08X bitlen %u\n", __func__,
+ bus->name, plat->cs, (uint)dout, (uint)din, bitlen);
+ if (plat->cs >= priv->cs_count) {
+ dev_err(dev, "chip select index %d too large (cs_count=%d)\n",
+ plat->cs, priv->cs_count);
+ return -EINVAL;
+ }
+ if (bitlen % 8) {
+ printf("*** spi_xfer: bitlen must be multiple of 8\n");
+ return -ENOTSUPP;
+ }
+
+ if (flags & SPI_XFER_BEGIN)
+ mpc8xxx_spi_cs_activate(dev);
+
+ /* Clear all SPI events */
+ setbits_be32(&spi->event, 0xffffffff);
+ n = bitlen / 8;
+
+ /* Handle data in 8-bit chunks */
+ while (n--) {
+ ulong start;
+
+ if (cout)
+ tmpdout = *cout++;
+
+ /* Write the data out */
+ out_be32(&spi->tx, tmpdout);
+
+ debug("*** %s: ... %08x written\n", __func__, tmpdout);
+
+ /*
+ * Wait for SPI transmit to get out
+ * or time out (1 second = 1000 ms)
+ * The NE event must be read and cleared first
+ */
+ start = get_timer(0);
+ do {
+ u32 event = in_be32(&spi->event);
+ bool have_ne = event & SPI_EV_NE;
+ bool have_nf = event & SPI_EV_NF;
+
+ if (!have_ne)
+ continue;
+
+ tmpdin = in_be32(&spi->rx);
+ setbits_be32(&spi->event, SPI_EV_NE);
+
+ if (cin)
+ *cin++ = tmpdin;
+
+ /*
+ * Only bail when we've had both NE and NF events.
+ * This will cause timeouts on RO devices, so maybe
+ * in the future put an arbitrary delay after writing
+ * the device. Arbitrary delays suck, though...
+ */
+ if (have_nf)
+ break;
+
+ mdelay(1);
+ } while (get_timer(start) < SPI_TIMEOUT);
+
+ if (get_timer(start) >= SPI_TIMEOUT) {
+ debug("*** %s: Time out during SPI transfer\n",
+ __func__);
+ return -ETIMEDOUT;
+ }
+
+ debug("*** %s: transfer ended. Value=%08x\n", __func__, tmpdin);
+ }
+
+ if (flags & SPI_XFER_END)
+ mpc8xxx_spi_cs_deactivate(dev);
+
+ return 0;
+}
+
+static int mpc8xxx_spi_set_speed(struct udevice *dev, uint speed)
+{
+ struct mpc8xxx_priv *priv = dev_get_priv(dev);
+ spi8xxx_t *spi = priv->spi;
+ u32 bits, mask, div16, pm;
+ u32 mode;
+ ulong clk;
+
+ clk = priv->clk_rate;
+ if (clk / 64 > speed) {
+ div16 = SPI_MODE_DIV16;
+ clk /= 16;
+ } else {
+ div16 = 0;
+ }
+ pm = (clk - 1)/(4*speed) + 1;
+ if (pm > 16) {
+ dev_err(dev, "requested speed %u too small\n", speed);
+ return -EINVAL;
+ }
+ pm--;
+
+ bits = div16 | (pm << SPI_MODE_PM_SHIFT);
+ mask = SPI_MODE_DIV16 | SPI_MODE_PM_MASK;
+ mode = in_be32(&spi->mode);
+ if ((mode & mask) != bits) {
+ /* Must clear mode[EN] while changing speed. */
+ mode &= ~(mask | SPI_MODE_EN);
+ out_be32(&spi->mode, mode);
+ mode |= bits;
+ out_be32(&spi->mode, mode);
+ mode |= SPI_MODE_EN;
+ out_be32(&spi->mode, mode);
+ }
+
+ debug("requested speed %u, set speed to %lu/(%s4*%u) == %lu\n",
+ speed, priv->clk_rate, div16 ? "16*" : "", pm + 1,
+ clk/(4*(pm + 1)));
+
+ return 0;
+}
+
+static int mpc8xxx_spi_set_mode(struct udevice *dev, uint mode)
+{
+ /* TODO(mario.six@gdsys.cc): Using SPI_CPHA (for clock phase) and
+ * SPI_CPOL (for clock polarity) should work
+ */
+ return 0;
+}
+
+static const struct dm_spi_ops mpc8xxx_spi_ops = {
+ .xfer = mpc8xxx_spi_xfer,
+ .set_speed = mpc8xxx_spi_set_speed,
+ .set_mode = mpc8xxx_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id mpc8xxx_spi_ids[] = {
+ { .compatible = "fsl,spi" },
+ { }
+};
+
+U_BOOT_DRIVER(mpc8xxx_spi) = {
+ .name = "mpc8xxx_spi",
+ .id = UCLASS_SPI,
+ .of_match = mpc8xxx_spi_ids,
+ .ops = &mpc8xxx_spi_ops,
+ .of_to_plat = mpc8xxx_spi_of_to_plat,
+ .probe = mpc8xxx_spi_probe,
+ .priv_auto = sizeof(struct mpc8xxx_priv),
+};
diff --git a/roms/u-boot/drivers/spi/mscc_bb_spi.c b/roms/u-boot/drivers/spi/mscc_bb_spi.c
new file mode 100644
index 000000000..2a01ea061
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mscc_bb_spi.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi SoCs spi driver
+ *
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/gpio.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+struct mscc_bb_priv {
+ void __iomem *regs;
+ u32 deactivate_delay_us;
+ bool cs_active; /* State flag as to whether CS is asserted */
+ int cs_num;
+ u32 svalue; /* Value to start transfer with */
+ u32 clk1; /* Clock value start */
+ u32 clk2; /* Clock value 2nd phase */
+};
+
+/* Delay 24 instructions for this particular application */
+#define hold_time_delay() mscc_vcoreiii_nop_delay(3)
+
+static int mscc_bb_spi_cs_activate(struct mscc_bb_priv *priv, int mode, int cs)
+{
+ if (!priv->cs_active) {
+ int cpha = mode & SPI_CPHA;
+ u32 cs_value;
+
+ priv->cs_num = cs;
+
+ if (cpha) {
+ /* Initial clock starts SCK=1 */
+ priv->clk1 = ICPU_SW_MODE_SW_SPI_SCK;
+ priv->clk2 = 0;
+ } else {
+ /* Initial clock starts SCK=0 */
+ priv->clk1 = 0;
+ priv->clk2 = ICPU_SW_MODE_SW_SPI_SCK;
+ }
+
+ /* Enable bitbang, SCK_OE, SDO_OE */
+ priv->svalue = (ICPU_SW_MODE_SW_PIN_CTRL_MODE | /* Bitbang */
+ ICPU_SW_MODE_SW_SPI_SCK_OE | /* SCK_OE */
+ ICPU_SW_MODE_SW_SPI_SDO_OE); /* SDO OE */
+
+ /* Add CS */
+ if (cs >= 0) {
+ cs_value =
+ ICPU_SW_MODE_SW_SPI_CS_OE(BIT(cs)) |
+ ICPU_SW_MODE_SW_SPI_CS(BIT(cs));
+ } else {
+ cs_value = 0;
+ }
+
+ priv->svalue |= cs_value;
+
+ /* Enable the CS in HW, Initial clock value */
+ writel(priv->svalue | priv->clk2, priv->regs);
+
+ priv->cs_active = true;
+ debug("Activated CS%d\n", priv->cs_num);
+ }
+
+ return 0;
+}
+
+static int mscc_bb_spi_cs_deactivate(struct mscc_bb_priv *priv, int deact_delay)
+{
+ if (priv->cs_active) {
+ /* Keep driving the CLK to its current value while
+ * actively deselecting CS.
+ */
+ u32 value = readl(priv->regs);
+
+ value &= ~ICPU_SW_MODE_SW_SPI_CS_M;
+ writel(value, priv->regs);
+ hold_time_delay();
+
+ /* Stop driving the clock, but keep CS with nCS == 1 */
+ value &= ~ICPU_SW_MODE_SW_SPI_SCK_OE;
+ writel(value, priv->regs);
+
+ /* Deselect hold time delay */
+ if (deact_delay)
+ udelay(deact_delay);
+
+ /* Drop everything */
+ writel(0, priv->regs);
+
+ priv->cs_active = false;
+ debug("Deactivated CS%d\n", priv->cs_num);
+ }
+
+ return 0;
+}
+
+int mscc_bb_spi_claim_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+int mscc_bb_spi_release_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+int mscc_bb_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+ struct mscc_bb_priv *priv = dev_get_priv(bus);
+ u32 i, count;
+ const u8 *txd = dout;
+ u8 *rxd = din;
+
+ debug("spi_xfer: slave %s:%s cs%d mode %d, dout %p din %p bitlen %u\n",
+ dev->parent->name, dev->name, plat->cs, plat->mode, dout,
+ din, bitlen);
+
+ if (flags & SPI_XFER_BEGIN)
+ mscc_bb_spi_cs_activate(priv, plat->mode, plat->cs);
+
+ count = bitlen / 8;
+ for (i = 0; i < count; i++) {
+ u32 rx = 0, mask = 0x80, value;
+
+ while (mask) {
+ /* Initial condition: CLK is low. */
+ value = priv->svalue;
+ if (txd && txd[i] & mask)
+ value |= ICPU_SW_MODE_SW_SPI_SDO;
+
+ /* Drive data while taking CLK low. The device
+ * we're accessing will sample on the
+ * following rising edge and will output data
+ * on this edge for us to be sampled at the
+ * end of this loop.
+ */
+ writel(value | priv->clk1, priv->regs);
+
+ /* Wait for t_setup. All devices do have a
+ * setup-time, so we always insert some delay
+ * here. Some devices have a very long
+ * setup-time, which can be adjusted by the
+ * user through vcoreiii_device->delay.
+ */
+ hold_time_delay();
+
+ /* Drive the clock high. */
+ writel(value | priv->clk2, priv->regs);
+
+ /* Wait for t_hold. See comment about t_setup
+ * above.
+ */
+ hold_time_delay();
+
+ /* We sample as close to the next falling edge
+ * as possible.
+ */
+ value = readl(priv->regs);
+ if (value & ICPU_SW_MODE_SW_SPI_SDI)
+ rx |= mask;
+ mask >>= 1;
+ }
+ if (rxd) {
+ debug("Read 0x%02x\n", rx);
+ rxd[i] = (u8)rx;
+ }
+ debug("spi_xfer: byte %d/%d\n", i + 1, count);
+ }
+
+ debug("spi_xfer: done\n");
+
+ if (flags & SPI_XFER_END)
+ mscc_bb_spi_cs_deactivate(priv, priv->deactivate_delay_us);
+
+ return 0;
+}
+
+int mscc_bb_spi_set_speed(struct udevice *dev, unsigned int speed)
+{
+ /* Accept any speed */
+ return 0;
+}
+
+int mscc_bb_spi_set_mode(struct udevice *dev, unsigned int mode)
+{
+ return 0;
+}
+
+static const struct dm_spi_ops mscc_bb_ops = {
+ .claim_bus = mscc_bb_spi_claim_bus,
+ .release_bus = mscc_bb_spi_release_bus,
+ .xfer = mscc_bb_spi_xfer,
+ .set_speed = mscc_bb_spi_set_speed,
+ .set_mode = mscc_bb_spi_set_mode,
+};
+
+static const struct udevice_id mscc_bb_ids[] = {
+ { .compatible = "mscc,luton-bb-spi" },
+ { }
+};
+
+static int mscc_bb_spi_probe(struct udevice *bus)
+{
+ struct mscc_bb_priv *priv = dev_get_priv(bus);
+
+ debug("%s: loaded, priv %p\n", __func__, priv);
+
+ priv->regs = (void __iomem *)dev_read_addr(bus);
+
+ priv->deactivate_delay_us =
+ dev_read_u32_default(bus, "spi-deactivate-delay", 0);
+
+ priv->cs_active = false;
+
+ return 0;
+}
+
+U_BOOT_DRIVER(mscc_bb) = {
+ .name = "mscc_bb",
+ .id = UCLASS_SPI,
+ .of_match = mscc_bb_ids,
+ .ops = &mscc_bb_ops,
+ .priv_auto = sizeof(struct mscc_bb_priv),
+ .probe = mscc_bb_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/mt7620_spi.c b/roms/u-boot/drivers/spi/mt7620_spi.c
new file mode 100644
index 000000000..6554e3716
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mt7620_spi.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ *
+ * Generic SPI driver for MediaTek MT7620 SoC
+ */
+
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <spi.h>
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+
+#define MT7620_SPI_NUM_CS 2
+#define MT7620_SPI_MASTER1_OFF 0x00
+#define MT7620_SPI_MASTER2_OFF 0x40
+
+/* SPI_STAT */
+#define SPI_BUSY BIT(0)
+
+/* SPI_CFG */
+#define MSB_FIRST BIT(8)
+#define SPI_CLK_POL BIT(6)
+#define RX_CLK_EDGE BIT(5)
+#define TX_CLK_EDGE BIT(4)
+#define SPI_CLK_S 0
+#define SPI_CLK_M GENMASK(2, 0)
+
+/* SPI_CTL */
+#define START_WR BIT(2)
+#define START_RD BIT(1)
+#define SPI_HIGH BIT(0)
+
+#define SPI_ARB 0xf0
+#define ARB_EN BIT(31)
+
+#define POLLING_SCALE 10
+#define POLLING_FRAC_USEC 100
+
+struct mt7620_spi_master_regs {
+ u32 stat;
+ u32 reserved0[3];
+ u32 cfg;
+ u32 ctl;
+ u32 reserved1[2];
+ u32 data;
+};
+
+struct mt7620_spi {
+ void __iomem *regs;
+ struct mt7620_spi_master_regs *m[MT7620_SPI_NUM_CS];
+ unsigned int sys_freq;
+ u32 wait_us;
+ uint mode;
+ uint speed;
+};
+
+static void mt7620_spi_master_setup(struct mt7620_spi *ms, int cs)
+{
+ u32 rate, prescale, freq, tmo, cfg;
+
+ /* Calculate the clock divsior */
+ rate = DIV_ROUND_UP(ms->sys_freq, ms->speed);
+ rate = roundup_pow_of_two(rate);
+
+ prescale = ilog2(rate / 2);
+ if (prescale > 6)
+ prescale = 6;
+
+ /* Calculate the real clock, and usecs for one byte transaction */
+ freq = ms->sys_freq >> (prescale + 1);
+ tmo = DIV_ROUND_UP(8 * 1000000, freq);
+
+ /* 10 times tolerance plus 100us */
+ ms->wait_us = POLLING_SCALE * tmo + POLLING_FRAC_USEC;
+
+ /* set SPI_CFG */
+ cfg = prescale << SPI_CLK_S;
+
+ switch (ms->mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ cfg |= TX_CLK_EDGE;
+ break;
+ case SPI_MODE_1:
+ cfg |= RX_CLK_EDGE;
+ break;
+ case SPI_MODE_2:
+ cfg |= SPI_CLK_POL | RX_CLK_EDGE;
+ break;
+ case SPI_MODE_3:
+ cfg |= SPI_CLK_POL | TX_CLK_EDGE;
+ break;
+ }
+
+ if (!(ms->mode & SPI_LSB_FIRST))
+ cfg |= MSB_FIRST;
+
+ writel(cfg, &ms->m[cs]->cfg);
+
+ writel(SPI_HIGH, &ms->m[cs]->ctl);
+}
+
+static void mt7620_spi_set_cs(struct mt7620_spi *ms, int cs, bool enable)
+{
+ if (enable)
+ mt7620_spi_master_setup(ms, cs);
+
+ if (ms->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (enable)
+ clrbits_32(&ms->m[cs]->ctl, SPI_HIGH);
+ else
+ setbits_32(&ms->m[cs]->ctl, SPI_HIGH);
+}
+
+static int mt7620_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct mt7620_spi *ms = dev_get_priv(bus);
+
+ ms->mode = mode;
+
+ /* Mode 0 is buggy. Force to use mode 3 */
+ if ((mode & SPI_MODE_3) == SPI_MODE_0)
+ ms->mode |= SPI_MODE_3;
+
+ return 0;
+}
+
+static int mt7620_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct mt7620_spi *ms = dev_get_priv(bus);
+
+ ms->speed = speed;
+
+ return 0;
+}
+
+static inline int mt7620_spi_busy_poll(struct mt7620_spi *ms, int cs)
+{
+ u32 val;
+
+ return readl_poll_timeout(&ms->m[cs]->stat, val, !(val & SPI_BUSY),
+ ms->wait_us);
+}
+
+static int mt7620_spi_read(struct mt7620_spi *ms, int cs, u8 *buf, size_t len)
+{
+ int ret;
+
+ while (len) {
+ setbits_32(&ms->m[cs]->ctl, START_RD);
+
+ ret = mt7620_spi_busy_poll(ms, cs);
+ if (ret)
+ return ret;
+
+ *buf++ = (u8)readl(&ms->m[cs]->data);
+
+ len--;
+ }
+
+ return 0;
+}
+
+static int mt7620_spi_write(struct mt7620_spi *ms, int cs, const u8 *buf,
+ size_t len)
+{
+ int ret;
+
+ while (len) {
+ writel(*buf++, &ms->m[cs]->data);
+ setbits_32(&ms->m[cs]->ctl, START_WR);
+
+ ret = mt7620_spi_busy_poll(ms, cs);
+ if (ret)
+ return ret;
+
+ len--;
+ }
+
+ return 0;
+}
+
+static int mt7620_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct mt7620_spi *ms = dev_get_priv(bus);
+ int total_size = bitlen >> 3;
+ int cs, ret = 0;
+
+ /*
+ * This driver only supports half-duplex, so complain and bail out
+ * upon full-duplex messages
+ */
+ if (dout && din) {
+ dev_err(dev, "mt7620_spi: Only half-duplex is supported\n");
+ return -EIO;
+ }
+
+ cs = spi_chip_select(dev);
+ if (cs < 0 || cs >= MT7620_SPI_NUM_CS) {
+ dev_err(dev, "mt7620_spi: Invalid chip select %d\n", cs);
+ return -EINVAL;
+ }
+
+ if (flags & SPI_XFER_BEGIN)
+ mt7620_spi_set_cs(ms, cs, true);
+
+ if (din)
+ ret = mt7620_spi_read(ms, cs, din, total_size);
+ else if (dout)
+ ret = mt7620_spi_write(ms, cs, dout, total_size);
+
+ if (ret)
+ dev_err(dev, "mt7620_spi: %s transaction timeout\n",
+ din ? "read" : "write");
+
+ if (flags & SPI_XFER_END)
+ mt7620_spi_set_cs(ms, cs, false);
+
+ return ret;
+}
+
+static int mt7620_spi_probe(struct udevice *dev)
+{
+ struct mt7620_spi *ms = dev_get_priv(dev);
+ struct clk clk;
+ int ret;
+
+ ms->regs = dev_remap_addr(dev);
+ if (!ms->regs)
+ return -EINVAL;
+
+ ms->m[0] = ms->regs + MT7620_SPI_MASTER1_OFF;
+ ms->m[1] = ms->regs + MT7620_SPI_MASTER2_OFF;
+
+ ret = clk_get_by_index(dev, 0, &clk);
+ if (ret < 0) {
+ dev_err(dev, "mt7620_spi: Please provide a clock!\n");
+ return ret;
+ }
+
+ clk_enable(&clk);
+
+ ms->sys_freq = clk_get_rate(&clk);
+ if (!ms->sys_freq) {
+ dev_err(dev, "mt7620_spi: Please provide a valid bus clock!\n");
+ return -EINVAL;
+ }
+
+ writel(ARB_EN, ms->regs + SPI_ARB);
+
+ return 0;
+}
+
+static const struct dm_spi_ops mt7620_spi_ops = {
+ .set_mode = mt7620_spi_set_mode,
+ .set_speed = mt7620_spi_set_speed,
+ .xfer = mt7620_spi_xfer,
+};
+
+static const struct udevice_id mt7620_spi_ids[] = {
+ { .compatible = "mediatek,mt7620-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(mt7620_spi) = {
+ .name = "mt7620_spi",
+ .id = UCLASS_SPI,
+ .of_match = mt7620_spi_ids,
+ .ops = &mt7620_spi_ops,
+ .priv_auto = sizeof(struct mt7620_spi),
+ .probe = mt7620_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/mt7621_spi.c b/roms/u-boot/drivers/spi/mt7621_spi.c
new file mode 100644
index 000000000..eb0931747
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mt7621_spi.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Stefan Roese <sr@denx.de>
+ *
+ * Derived from the Linux driver version drivers/spi/spi-mt7621.c
+ * Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <spi.h>
+#include <wait_bit.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+
+#define MT7621_RX_FIFO_LEN 32
+#define MT7621_TX_FIFO_LEN 36
+
+#define MT7621_SPI_TRANS 0x00
+#define MT7621_SPI_TRANS_START BIT(8)
+#define MT7621_SPI_TRANS_BUSY BIT(16)
+#define TRANS_ADDR_SZ GENMASK(20, 19)
+#define TRANS_ADDR_SZ_SHIFT 19
+#define TRANS_MOSI_BCNT GENMASK(3, 0)
+#define TRANS_MOSI_BCNT_SHIFT 0
+
+#define MT7621_SPI_OPCODE 0x04
+#define MT7621_SPI_DATA0 0x08
+#define MT7621_SPI_DATA4 0x18
+#define MT7621_SPI_MASTER 0x28
+#define MT7621_SPI_MOREBUF 0x2c
+#define MT7621_SPI_POLAR 0x38
+
+#define MT7621_LSB_FIRST BIT(3)
+#define MT7621_CPOL BIT(4)
+#define MT7621_CPHA BIT(5)
+
+#define MASTER_MORE_BUFMODE BIT(2)
+#define MASTER_RS_CLK_SEL GENMASK(27, 16)
+#define MASTER_RS_CLK_SEL_SHIFT 16
+#define MASTER_RS_SLAVE_SEL GENMASK(31, 29)
+
+#define MOREBUF_CMD_CNT GENMASK(29, 24)
+#define MOREBUF_CMD_CNT_SHIFT 24
+#define MOREBUF_MISO_CNT GENMASK(20, 12)
+#define MOREBUF_MISO_CNT_SHIFT 12
+#define MOREBUF_MOSI_CNT GENMASK(8, 0)
+#define MOREBUF_MOSI_CNT_SHIFT 0
+
+struct mt7621_spi {
+ void __iomem *base;
+ unsigned int sys_freq;
+};
+
+static void mt7621_spi_set_cs(struct mt7621_spi *rs, int cs, int enable)
+{
+ debug("%s: cs#%d -> %s\n", __func__, cs, enable ? "enable" : "disable");
+
+ if (enable) {
+ setbits_le32(rs->base + MT7621_SPI_MASTER,
+ MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE);
+ iowrite32(BIT(cs), rs->base + MT7621_SPI_POLAR);
+ } else {
+ iowrite32(0, rs->base + MT7621_SPI_POLAR);
+ iowrite32((2 << TRANS_ADDR_SZ_SHIFT) |
+ (1 << TRANS_MOSI_BCNT_SHIFT),
+ rs->base + MT7621_SPI_TRANS);
+ clrbits_le32(rs->base + MT7621_SPI_MASTER,
+ MASTER_RS_SLAVE_SEL | MASTER_MORE_BUFMODE);
+ }
+}
+
+static int mt7621_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct mt7621_spi *rs = dev_get_priv(bus);
+ u32 reg;
+
+ debug("%s: mode=0x%08x\n", __func__, mode);
+ reg = ioread32(rs->base + MT7621_SPI_MASTER);
+
+ reg &= ~MT7621_LSB_FIRST;
+ if (mode & SPI_LSB_FIRST)
+ reg |= MT7621_LSB_FIRST;
+
+ reg &= ~(MT7621_CPHA | MT7621_CPOL);
+ switch (mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ break;
+ case SPI_MODE_1:
+ reg |= MT7621_CPHA;
+ break;
+ case SPI_MODE_2:
+ reg |= MT7621_CPOL;
+ break;
+ case SPI_MODE_3:
+ reg |= MT7621_CPOL | MT7621_CPHA;
+ break;
+ }
+ iowrite32(reg, rs->base + MT7621_SPI_MASTER);
+
+ return 0;
+}
+
+static int mt7621_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct mt7621_spi *rs = dev_get_priv(bus);
+ u32 rate;
+ u32 reg;
+
+ debug("%s: speed=%d\n", __func__, speed);
+ rate = DIV_ROUND_UP(rs->sys_freq, speed);
+ debug("rate:%u\n", rate);
+
+ if (rate > 4097)
+ return -EINVAL;
+
+ if (rate < 2)
+ rate = 2;
+
+ reg = ioread32(rs->base + MT7621_SPI_MASTER);
+ reg &= ~MASTER_RS_CLK_SEL;
+ reg |= (rate - 2) << MASTER_RS_CLK_SEL_SHIFT;
+ iowrite32(reg, rs->base + MT7621_SPI_MASTER);
+
+ return 0;
+}
+
+static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
+{
+ int ret;
+
+ ret = wait_for_bit_le32(rs->base + MT7621_SPI_TRANS,
+ MT7621_SPI_TRANS_BUSY, 0, 10, 0);
+ if (ret)
+ pr_err("Timeout in %s!\n", __func__);
+
+ return ret;
+}
+
+static int mt7621_spi_read(struct mt7621_spi *rs, u8 *buf, size_t len)
+{
+ size_t rx_len;
+ int i, ret;
+ u32 val = 0;
+
+ while (len) {
+ rx_len = min_t(size_t, len, MT7621_RX_FIFO_LEN);
+
+ iowrite32((rx_len * 8) << MOREBUF_MISO_CNT_SHIFT,
+ rs->base + MT7621_SPI_MOREBUF);
+ iowrite32(MT7621_SPI_TRANS_START, rs->base + MT7621_SPI_TRANS);
+
+ ret = mt7621_spi_wait_till_ready(rs);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rx_len; i++) {
+ if ((i % 4) == 0)
+ val = ioread32(rs->base + MT7621_SPI_DATA0 + i);
+ *buf++ = val & 0xff;
+ val >>= 8;
+ }
+
+ len -= rx_len;
+ }
+
+ return ret;
+}
+
+static int mt7621_spi_write(struct mt7621_spi *rs, const u8 *buf, size_t len)
+{
+ size_t tx_len, opcode_len, dido_len;
+ int i, ret;
+ u32 val;
+
+ while (len) {
+ tx_len = min_t(size_t, len, MT7621_TX_FIFO_LEN);
+
+ opcode_len = min_t(size_t, tx_len, 4);
+ dido_len = tx_len - opcode_len;
+
+ val = 0;
+ for (i = 0; i < opcode_len; i++) {
+ val <<= 8;
+ val |= *buf++;
+ }
+
+ iowrite32(val, rs->base + MT7621_SPI_OPCODE);
+
+ val = 0;
+ for (i = 0; i < dido_len; i++) {
+ val |= (*buf++) << ((i % 4) * 8);
+
+ if ((i % 4 == 3) || (i == dido_len - 1)) {
+ iowrite32(val, rs->base + MT7621_SPI_DATA0 +
+ (i & ~3));
+ val = 0;
+ }
+ }
+
+ iowrite32(((opcode_len * 8) << MOREBUF_CMD_CNT_SHIFT) |
+ ((dido_len * 8) << MOREBUF_MOSI_CNT_SHIFT),
+ rs->base + MT7621_SPI_MOREBUF);
+ iowrite32(MT7621_SPI_TRANS_START, rs->base + MT7621_SPI_TRANS);
+
+ ret = mt7621_spi_wait_till_ready(rs);
+ if (ret)
+ return ret;
+
+ len -= tx_len;
+ }
+
+ return 0;
+}
+
+static int mt7621_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct mt7621_spi *rs = dev_get_priv(bus);
+ int total_size = bitlen >> 3;
+ int ret = 0;
+
+ debug("%s: dout=%p, din=%p, len=%x, flags=%lx\n", __func__, dout, din,
+ total_size, flags);
+
+ /*
+ * This driver only supports half-duplex, so complain and bail out
+ * upon full-duplex messages
+ */
+ if (dout && din) {
+ printf("Only half-duplex SPI transfer supported\n");
+ return -EIO;
+ }
+
+ mt7621_spi_wait_till_ready(rs);
+
+ /*
+ * Set CS active upon start of SPI message. This message can
+ * be split upon multiple calls to this xfer function
+ */
+ if (flags & SPI_XFER_BEGIN)
+ mt7621_spi_set_cs(rs, spi_chip_select(dev), 1);
+
+ if (din)
+ ret = mt7621_spi_read(rs, din, total_size);
+ else if (dout)
+ ret = mt7621_spi_write(rs, dout, total_size);
+
+ if (flags & SPI_XFER_END)
+ mt7621_spi_set_cs(rs, spi_chip_select(dev), 0);
+
+ return ret;
+}
+
+static int mt7621_spi_probe(struct udevice *dev)
+{
+ struct mt7621_spi *rs = dev_get_priv(dev);
+ struct clk clk;
+ int ret;
+
+ rs->base = dev_remap_addr(dev);
+ if (!rs->base)
+ return -EINVAL;
+
+ ret = clk_get_by_index(dev, 0, &clk);
+ if (ret < 0) {
+ printf("Please provide a clock!\n");
+ return ret;
+ }
+
+ clk_enable(&clk);
+
+ rs->sys_freq = clk_get_rate(&clk);
+ if (!rs->sys_freq) {
+ printf("Please provide a valid clock!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct dm_spi_ops mt7621_spi_ops = {
+ .set_mode = mt7621_spi_set_mode,
+ .set_speed = mt7621_spi_set_speed,
+ .xfer = mt7621_spi_xfer,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id mt7621_spi_ids[] = {
+ { .compatible = "ralink,mt7621-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(mt7621_spi) = {
+ .name = "mt7621_spi",
+ .id = UCLASS_SPI,
+ .of_match = mt7621_spi_ids,
+ .ops = &mt7621_spi_ops,
+ .priv_auto = sizeof(struct mt7621_spi),
+ .probe = mt7621_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/mtk_snfi_spi.c b/roms/u-boot/drivers/spi/mtk_snfi_spi.c
new file mode 100644
index 000000000..b6ab5fa3a
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mtk_snfi_spi.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Weijie Gao <weijie.gao@mediatek.com>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <errno.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <stdbool.h>
+#include <watchdog.h>
+#include <dm/pinctrl.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#define SNFI_MAC_CTL 0x500
+#define MAC_XIO_SEL BIT(4)
+#define SF_MAC_EN BIT(3)
+#define SF_TRIG BIT(2)
+#define WIP_READY BIT(1)
+#define WIP BIT(0)
+
+#define SNFI_MAC_OUTL 0x504
+#define SNFI_MAC_INL 0x508
+
+#define SNFI_MISC_CTL 0x538
+#define SW_RST BIT(28)
+#define FIFO_RD_LTC_SHIFT 25
+#define FIFO_RD_LTC GENMASK(26, 25)
+#define LATCH_LAT_SHIFT 8
+#define LATCH_LAT GENMASK(9, 8)
+#define CS_DESELECT_CYC_SHIFT 0
+#define CS_DESELECT_CYC GENMASK(4, 0)
+
+#define SNF_STA_CTL1 0x550
+#define SPI_STATE GENMASK(3, 0)
+
+#define SNFI_GPRAM_OFFSET 0x800
+#define SNFI_GPRAM_SIZE 0x80
+
+#define SNFI_POLL_INTERVAL 500000
+#define SNFI_RST_POLL_INTERVAL 1000000
+
+struct mtk_snfi_priv {
+ void __iomem *base;
+
+ struct clk nfi_clk;
+ struct clk pad_clk;
+};
+
+static int mtk_snfi_adjust_op_size(struct spi_slave *slave,
+ struct spi_mem_op *op)
+{
+ u32 nbytes;
+
+ /*
+ * When there is input data, it will be appended after the output
+ * data in the GPRAM. So the total size of either pure output data
+ * or the output+input data must not exceed the GPRAM size.
+ */
+
+ nbytes = sizeof(op->cmd.opcode) + op->addr.nbytes +
+ op->dummy.nbytes;
+
+ if (nbytes + op->data.nbytes <= SNFI_GPRAM_SIZE)
+ return 0;
+
+ if (nbytes >= SNFI_GPRAM_SIZE)
+ return -ENOTSUPP;
+
+ op->data.nbytes = SNFI_GPRAM_SIZE - nbytes;
+
+ return 0;
+}
+
+static bool mtk_snfi_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 ||
+ op->dummy.buswidth > 1 || op->data.buswidth > 1)
+ return false;
+
+ return true;
+}
+
+static int mtk_snfi_mac_trigger(struct mtk_snfi_priv *priv,
+ struct udevice *bus, u32 outlen, u32 inlen)
+{
+ int ret;
+ u32 val;
+
+#ifdef CONFIG_PINCTRL
+ pinctrl_select_state(bus, "snfi");
+#endif
+
+ writel(SF_MAC_EN, priv->base + SNFI_MAC_CTL);
+ writel(outlen, priv->base + SNFI_MAC_OUTL);
+ writel(inlen, priv->base + SNFI_MAC_INL);
+
+ writel(SF_MAC_EN | SF_TRIG, priv->base + SNFI_MAC_CTL);
+
+ ret = readl_poll_timeout(priv->base + SNFI_MAC_CTL, val,
+ val & WIP_READY, SNFI_POLL_INTERVAL);
+ if (ret) {
+ printf("%s: timed out waiting for WIP_READY\n", __func__);
+ goto cleanup;
+ }
+
+ ret = readl_poll_timeout(priv->base + SNFI_MAC_CTL, val,
+ !(val & WIP), SNFI_POLL_INTERVAL);
+ if (ret)
+ printf("%s: timed out waiting for WIP cleared\n", __func__);
+
+ writel(0, priv->base + SNFI_MAC_CTL);
+
+cleanup:
+#ifdef CONFIG_PINCTRL
+ pinctrl_select_state(bus, "default");
+#endif
+
+ return ret;
+}
+
+static int mtk_snfi_mac_reset(struct mtk_snfi_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ setbits_32(priv->base + SNFI_MISC_CTL, SW_RST);
+
+ ret = readl_poll_timeout(priv->base + SNF_STA_CTL1, val,
+ !(val & SPI_STATE), SNFI_POLL_INTERVAL);
+ if (ret)
+ printf("%s: failed to reset snfi mac\n", __func__);
+
+ writel((2 << FIFO_RD_LTC_SHIFT) |
+ (10 << CS_DESELECT_CYC_SHIFT),
+ priv->base + SNFI_MISC_CTL);
+
+ return ret;
+}
+
+static void mtk_snfi_copy_to_gpram(struct mtk_snfi_priv *priv,
+ const void *data, size_t len)
+{
+ void __iomem *gpram = priv->base + SNFI_GPRAM_OFFSET;
+ size_t i, n = (len + sizeof(u32) - 1) / sizeof(u32);
+ const u32 *buff = data;
+
+ /*
+ * The output data will always be copied to the beginning of
+ * the GPRAM. Uses word write for better performace.
+ *
+ * Trailing bytes in the last word are not cared.
+ */
+
+ for (i = 0; i < n; i++)
+ writel(buff[i], gpram + i * sizeof(u32));
+}
+
+static void mtk_snfi_copy_from_gpram(struct mtk_snfi_priv *priv, u8 *cache,
+ void *data, size_t pos, size_t len)
+{
+ void __iomem *gpram = priv->base + SNFI_GPRAM_OFFSET;
+ u32 *buff = (u32 *)cache;
+ size_t i, off, end;
+
+ /* Start position in the buffer */
+ off = pos & (sizeof(u32) - 1);
+
+ /* End position for copy */
+ end = (len + pos + sizeof(u32) - 1) & (~(sizeof(u32) - 1));
+
+ /* Start position for copy */
+ pos &= ~(sizeof(u32) - 1);
+
+ /*
+ * Read aligned data from GPRAM to buffer first.
+ * Uses word read for better performace.
+ */
+ i = 0;
+ while (pos < end) {
+ buff[i++] = readl(gpram + pos);
+ pos += sizeof(u32);
+ }
+
+ /* Copy rx data */
+ memcpy(data, cache + off, len);
+}
+
+static int mtk_snfi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct udevice *bus = dev_get_parent(slave->dev);
+ struct mtk_snfi_priv *priv = dev_get_priv(bus);
+ u8 gpram_cache[SNFI_GPRAM_SIZE];
+ u32 i, len = 0, inlen = 0;
+ int addr_sh;
+ int ret;
+
+ WATCHDOG_RESET();
+
+ ret = mtk_snfi_mac_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Put opcode */
+ gpram_cache[len++] = op->cmd.opcode;
+
+ /* Put address */
+ addr_sh = (op->addr.nbytes - 1) * 8;
+ while (addr_sh >= 0) {
+ gpram_cache[len++] = (op->addr.val >> addr_sh) & 0xff;
+ addr_sh -= 8;
+ }
+
+ /* Put dummy bytes */
+ for (i = 0; i < op->dummy.nbytes; i++)
+ gpram_cache[len++] = 0;
+
+ /* Put output data */
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) {
+ memcpy(gpram_cache + len, op->data.buf.out, op->data.nbytes);
+ len += op->data.nbytes;
+ }
+
+ /* Copy final output data to GPRAM */
+ mtk_snfi_copy_to_gpram(priv, gpram_cache, len);
+
+ /* Start one SPI transaction */
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ inlen = op->data.nbytes;
+
+ ret = mtk_snfi_mac_trigger(priv, bus, len, inlen);
+ if (ret)
+ return ret;
+
+ /* Copy input data from GPRAM */
+ if (inlen)
+ mtk_snfi_copy_from_gpram(priv, gpram_cache, op->data.buf.in,
+ len, inlen);
+
+ return 0;
+}
+
+static int mtk_snfi_spi_probe(struct udevice *bus)
+{
+ struct mtk_snfi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ priv->base = dev_read_addr_ptr(bus);
+ if (!priv->base)
+ return -EINVAL;
+
+ ret = clk_get_by_name(bus, "nfi_clk", &priv->nfi_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_get_by_name(bus, "pad_clk", &priv->pad_clk);
+ if (ret < 0)
+ return ret;
+
+ clk_enable(&priv->nfi_clk);
+ clk_enable(&priv->pad_clk);
+
+ return 0;
+}
+
+static int mtk_snfi_set_speed(struct udevice *bus, uint speed)
+{
+ /*
+ * The SNFI does not have a bus clock divider.
+ * The bus clock is set in dts (pad_clk, UNIVPLL2_D8 = 50MHz).
+ */
+
+ return 0;
+}
+
+static int mtk_snfi_set_mode(struct udevice *bus, uint mode)
+{
+ /* The SNFI supports only mode 0 */
+
+ if (mode)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops mtk_snfi_mem_ops = {
+ .adjust_op_size = mtk_snfi_adjust_op_size,
+ .supports_op = mtk_snfi_supports_op,
+ .exec_op = mtk_snfi_exec_op,
+};
+
+static const struct dm_spi_ops mtk_snfi_spi_ops = {
+ .mem_ops = &mtk_snfi_mem_ops,
+ .set_speed = mtk_snfi_set_speed,
+ .set_mode = mtk_snfi_set_mode,
+};
+
+static const struct udevice_id mtk_snfi_spi_ids[] = {
+ { .compatible = "mediatek,mtk-snfi-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(mtk_snfi_spi) = {
+ .name = "mtk_snfi_spi",
+ .id = UCLASS_SPI,
+ .of_match = mtk_snfi_spi_ids,
+ .ops = &mtk_snfi_spi_ops,
+ .priv_auto = sizeof(struct mtk_snfi_priv),
+ .probe = mtk_snfi_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/mtk_snor.c b/roms/u-boot/drivers/spi/mtk_snor.c
new file mode 100644
index 000000000..04f588a75
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mtk_snor.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek SPI-NOR controller driver
+//
+// Copyright (C) 2020 SkyLake Huang <SkyLake.Huang@mediatek.com>
+//
+// Some parts are based on drivers/spi/spi-mtk-nor.c of linux version
+
+#include <clk.h>
+#include <common.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <dm/device.h>
+#include <dm/device_compat.h>
+#include <dm/devres.h>
+#include <dm/pinctrl.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <stdbool.h>
+#include <watchdog.h>
+#include <linux/dma-mapping.h>
+
+#define DRIVER_NAME "mtk-spi-nor"
+
+#define MTK_NOR_REG_CMD 0x00
+#define MTK_NOR_CMD_WRSR BIT(5)
+#define MTK_NOR_CMD_WRITE BIT(4)
+#define MTK_NOR_CMD_PROGRAM BIT(2)
+#define MTK_NOR_CMD_RDSR BIT(1)
+#define MTK_NOR_CMD_READ BIT(0)
+#define MTK_NOR_CMD_MASK GENMASK(5, 0)
+
+#define MTK_NOR_REG_PRG_CNT 0x04
+#define MTK_NOR_REG_RDSR 0x08
+#define MTK_NOR_REG_RDATA 0x0c
+
+#define MTK_NOR_REG_RADR0 0x10
+#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
+#define MTK_NOR_REG_RADR3 0xc8
+
+#define MTK_NOR_REG_WDATA 0x1c
+
+#define MTK_NOR_REG_PRGDATA0 0x20
+#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
+#define MTK_NOR_REG_PRGDATA_MAX 5
+
+#define MTK_NOR_REG_SHIFT0 0x38
+#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
+#define MTK_NOR_REG_SHIFT_MAX 9
+
+#define MTK_NOR_REG_CFG1 0x60
+#define MTK_NOR_FAST_READ BIT(0)
+
+#define MTK_NOR_REG_CFG2 0x64
+#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
+#define MTK_NOR_WR_BUF_EN BIT(0)
+
+#define MTK_NOR_REG_PP_DATA 0x98
+
+#define MTK_NOR_REG_IRQ_STAT 0xa8
+#define MTK_NOR_REG_IRQ_EN 0xac
+#define MTK_NOR_IRQ_DMA BIT(7)
+#define MTK_NOR_IRQ_WRSR BIT(5)
+#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
+
+#define MTK_NOR_REG_CFG3 0xb4
+#define MTK_NOR_DISABLE_WREN BIT(7)
+#define MTK_NOR_DISABLE_SR_POLL BIT(5)
+
+#define MTK_NOR_REG_WP 0xc4
+#define MTK_NOR_ENABLE_SF_CMD 0x30
+
+#define MTK_NOR_REG_BUSCFG 0xcc
+#define MTK_NOR_4B_ADDR BIT(4)
+#define MTK_NOR_QUAD_ADDR BIT(3)
+#define MTK_NOR_QUAD_READ BIT(2)
+#define MTK_NOR_DUAL_ADDR BIT(1)
+#define MTK_NOR_DUAL_READ BIT(0)
+#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
+
+#define MTK_NOR_REG_DMA_CTL 0x718
+#define MTK_NOR_DMA_START BIT(0)
+
+#define MTK_NOR_REG_DMA_FADR 0x71c
+#define MTK_NOR_REG_DMA_DADR 0x720
+#define MTK_NOR_REG_DMA_END_DADR 0x724
+
+#define MTK_NOR_PRG_MAX_SIZE 6
+// Reading DMA src/dst addresses have to be 16-byte aligned
+#define MTK_NOR_DMA_ALIGN 16
+#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
+// and we allocate a bounce buffer if destination address isn't aligned.
+#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
+
+// Buffered page program can do one 128-byte transfer
+#define MTK_NOR_PP_SIZE 128
+
+#define CLK_TO_US(priv, clkcnt) DIV_ROUND_UP(clkcnt, (priv)->spi_freq / 1000000)
+
+#define MTK_NOR_UNLOCK_ALL 0x0
+
+struct mtk_snor_priv {
+ struct device *dev;
+ void __iomem *base;
+ u8 *buffer;
+ struct clk spi_clk;
+ struct clk ctlr_clk;
+ unsigned int spi_freq;
+ bool wbuf_en;
+};
+
+static inline void mtk_snor_rmw(struct mtk_snor_priv *priv, u32 reg, u32 set,
+ u32 clr)
+{
+ u32 val = readl(priv->base + reg);
+
+ val &= ~clr;
+ val |= set;
+ writel(val, priv->base + reg);
+}
+
+static inline int mtk_snor_cmd_exec(struct mtk_snor_priv *priv, u32 cmd,
+ ulong clk)
+{
+ unsigned long long delay = CLK_TO_US(priv, clk);
+ u32 reg;
+ int ret;
+
+ writel(cmd, priv->base + MTK_NOR_REG_CMD);
+ delay = (delay + 1) * 200;
+ ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CMD, reg,
+ !(reg & cmd), delay);
+ if (ret < 0)
+ dev_err(priv->dev, "command %u timeout.\n", cmd);
+ return ret;
+}
+
+static void mtk_snor_set_addr(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ u32 addr = op->addr.val;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ writeb(addr & 0xff, priv->base + MTK_NOR_REG_RADR(i));
+ addr >>= 8;
+ }
+ if (op->addr.nbytes == 4) {
+ writeb(addr & 0xff, priv->base + MTK_NOR_REG_RADR3);
+ mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
+ } else {
+ mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
+ }
+}
+
+static bool need_bounce(const struct spi_mem_op *op)
+{
+ return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
+}
+
+static int mtk_snor_adjust_op_size(struct spi_slave *slave,
+ struct spi_mem_op *op)
+{
+ if (!op->data.nbytes)
+ return 0;
+
+ if (op->addr.nbytes == 3 || op->addr.nbytes == 4) {
+ if (op->data.dir == SPI_MEM_DATA_IN) { //&&
+ // limit size to prevent timeout calculation overflow
+ if (op->data.nbytes > 0x400000)
+ op->data.nbytes = 0x400000;
+ if (op->addr.val & MTK_NOR_DMA_ALIGN_MASK ||
+ op->data.nbytes < MTK_NOR_DMA_ALIGN)
+ op->data.nbytes = 1;
+ else if (!need_bounce(op))
+ op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
+ else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
+ op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
+ return 0;
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes >= MTK_NOR_PP_SIZE)
+ op->data.nbytes = MTK_NOR_PP_SIZE;
+ else
+ op->data.nbytes = 1;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static bool mtk_snor_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ /* This controller only supports 1-1-1 write mode */
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ (op->cmd.buswidth != 1 || op->data.buswidth != 1))
+ return false;
+
+ return true;
+}
+
+static void mtk_snor_setup_bus(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ u32 reg = 0;
+
+ if (op->addr.nbytes == 4)
+ reg |= MTK_NOR_4B_ADDR;
+
+ if (op->data.buswidth == 4) {
+ reg |= MTK_NOR_QUAD_READ;
+ writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA(4));
+ if (op->addr.buswidth == 4)
+ reg |= MTK_NOR_QUAD_ADDR;
+ } else if (op->data.buswidth == 2) {
+ reg |= MTK_NOR_DUAL_READ;
+ writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA(3));
+ if (op->addr.buswidth == 2)
+ reg |= MTK_NOR_DUAL_ADDR;
+ } else {
+ if (op->cmd.opcode == 0x0b)
+ mtk_snor_rmw(priv, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ,
+ 0);
+ else
+ mtk_snor_rmw(priv, MTK_NOR_REG_CFG1, 0,
+ MTK_NOR_FAST_READ);
+ }
+ mtk_snor_rmw(priv, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
+}
+
+static int mtk_snor_dma_exec(struct mtk_snor_priv *priv, u32 from,
+ unsigned int length, dma_addr_t dma_addr)
+{
+ int ret = 0;
+ ulong delay;
+ u32 reg;
+
+ writel(from, priv->base + MTK_NOR_REG_DMA_FADR);
+ writel(dma_addr, priv->base + MTK_NOR_REG_DMA_DADR);
+ writel(dma_addr + length, priv->base + MTK_NOR_REG_DMA_END_DADR);
+
+ mtk_snor_rmw(priv, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
+
+ delay = CLK_TO_US(priv, (length + 5) * BITS_PER_BYTE);
+
+ delay = (delay + 1) * 100;
+ ret = readl_poll_timeout(priv->base + MTK_NOR_REG_DMA_CTL, reg,
+ !(reg & MTK_NOR_DMA_START), delay);
+
+ if (ret < 0)
+ dev_err(priv->dev, "dma read timeout.\n");
+
+ return ret;
+}
+
+static int mtk_snor_read_bounce(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ unsigned int rdlen;
+ int ret;
+
+ if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
+ rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) &
+ ~MTK_NOR_DMA_ALIGN_MASK;
+ else
+ rdlen = op->data.nbytes;
+
+ ret = mtk_snor_dma_exec(priv, op->addr.val, rdlen,
+ (dma_addr_t)priv->buffer);
+
+ if (!ret)
+ memcpy(op->data.buf.in, priv->buffer, op->data.nbytes);
+
+ return ret;
+}
+
+static int mtk_snor_read_dma(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ int ret;
+ dma_addr_t dma_addr;
+
+ if (need_bounce(op))
+ return mtk_snor_read_bounce(priv, op);
+
+ dma_addr = dma_map_single(op->data.buf.in, op->data.nbytes,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(priv->dev, dma_addr))
+ return -EINVAL;
+
+ ret = mtk_snor_dma_exec(priv, op->addr.val, op->data.nbytes, dma_addr);
+
+ dma_unmap_single(dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
+
+ return ret;
+}
+
+static int mtk_snor_read_pio(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ u8 *buf = op->data.buf.in;
+ int ret;
+
+ ret = mtk_snor_cmd_exec(priv, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
+ if (!ret)
+ buf[0] = readb(priv->base + MTK_NOR_REG_RDATA);
+ return ret;
+}
+
+static int mtk_snor_write_buffer_enable(struct mtk_snor_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ if (priv->wbuf_en)
+ return 0;
+
+ val = readl(priv->base + MTK_NOR_REG_CFG2);
+ writel(val | MTK_NOR_WR_BUF_EN, priv->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CFG2, val,
+ val & MTK_NOR_WR_BUF_EN, 10000);
+ if (!ret)
+ priv->wbuf_en = true;
+ return ret;
+}
+
+static int mtk_snor_write_buffer_disable(struct mtk_snor_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ if (!priv->wbuf_en)
+ return 0;
+ val = readl(priv->base + MTK_NOR_REG_CFG2);
+ writel(val & ~MTK_NOR_WR_BUF_EN, priv->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(priv->base + MTK_NOR_REG_CFG2, val,
+ !(val & MTK_NOR_WR_BUF_EN), 10000);
+ if (!ret)
+ priv->wbuf_en = false;
+ return ret;
+}
+
+static int mtk_snor_pp_buffered(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ u32 val;
+ int ret, i;
+
+ ret = mtk_snor_write_buffer_enable(priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < op->data.nbytes; i += 4) {
+ val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
+ buf[i];
+ writel(val, priv->base + MTK_NOR_REG_PP_DATA);
+ }
+ mtk_snor_cmd_exec(priv, MTK_NOR_CMD_WRITE,
+ (op->data.nbytes + 5) * BITS_PER_BYTE);
+ return mtk_snor_write_buffer_disable(priv);
+}
+
+static int mtk_snor_pp_unbuffered(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ int ret;
+
+ ret = mtk_snor_write_buffer_disable(priv);
+ if (ret < 0)
+ return ret;
+ writeb(buf[0], priv->base + MTK_NOR_REG_WDATA);
+ return mtk_snor_cmd_exec(priv, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
+}
+
+static int mtk_snor_cmd_program(struct mtk_snor_priv *priv,
+ const struct spi_mem_op *op)
+{
+ u32 tx_len = 0;
+ u32 trx_len = 0;
+ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
+ void __iomem *reg;
+ u8 *txbuf;
+ int tx_cnt = 0;
+ u8 *rxbuf = op->data.buf.in;
+ int i = 0;
+
+ tx_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+ trx_len = tx_len + op->data.nbytes;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ tx_len += op->data.nbytes;
+
+ txbuf = kmalloc_array(tx_len, sizeof(u8), GFP_KERNEL);
+ memset(txbuf, 0x0, tx_len * sizeof(u8));
+
+ /* Join all bytes to be transferred */
+ txbuf[tx_cnt] = op->cmd.opcode;
+ tx_cnt++;
+ for (i = op->addr.nbytes; i > 0; i--, tx_cnt++)
+ txbuf[tx_cnt] = ((u8 *)&op->addr.val)[i - 1];
+ for (i = op->dummy.nbytes; i > 0; i--, tx_cnt++)
+ txbuf[tx_cnt] = 0x0;
+ if (op->data.dir == SPI_MEM_DATA_OUT)
+ for (i = op->data.nbytes; i > 0; i--, tx_cnt++)
+ txbuf[tx_cnt] = ((u8 *)op->data.buf.out)[i - 1];
+
+ for (i = MTK_NOR_REG_PRGDATA_MAX; i >= 0; i--)
+ writeb(0, priv->base + MTK_NOR_REG_PRGDATA(i));
+
+ for (i = 0; i < tx_len; i++, reg_offset--)
+ writeb(txbuf[i], priv->base + MTK_NOR_REG_PRGDATA(reg_offset));
+
+ kfree(txbuf);
+
+ writel(trx_len * BITS_PER_BYTE, priv->base + MTK_NOR_REG_PRG_CNT);
+
+ mtk_snor_cmd_exec(priv, MTK_NOR_CMD_PROGRAM, trx_len * BITS_PER_BYTE);
+
+ reg_offset = op->data.nbytes - 1;
+ for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
+ reg = priv->base + MTK_NOR_REG_SHIFT(reg_offset);
+ rxbuf[i] = readb(reg);
+ }
+
+ return 0;
+}
+
+static int mtk_snor_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct udevice *bus = dev_get_parent(slave->dev);
+ struct mtk_snor_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ if (op->data.dir == SPI_MEM_NO_DATA || op->addr.nbytes == 0) {
+ return mtk_snor_cmd_program(priv, op);
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ mtk_snor_set_addr(priv, op);
+ writeb(op->cmd.opcode, priv->base + MTK_NOR_REG_PRGDATA0);
+ if (op->data.nbytes == MTK_NOR_PP_SIZE)
+ return mtk_snor_pp_buffered(priv, op);
+ return mtk_snor_pp_unbuffered(priv, op);
+ } else if (op->data.dir == SPI_MEM_DATA_IN) {
+ ret = mtk_snor_write_buffer_disable(priv);
+ if (ret < 0)
+ return ret;
+ mtk_snor_setup_bus(priv, op);
+ if (op->data.nbytes == 1) {
+ mtk_snor_set_addr(priv, op);
+ return mtk_snor_read_pio(priv, op);
+ } else {
+ return mtk_snor_read_dma(priv, op);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static int mtk_snor_probe(struct udevice *bus)
+{
+ struct mtk_snor_priv *priv = dev_get_priv(bus);
+ u8 *buffer;
+ int ret;
+ u32 reg;
+
+ priv->base = (void __iomem *)devfdt_get_addr(bus);
+ if (!priv->base)
+ return -EINVAL;
+
+ ret = clk_get_by_name(bus, "spi", &priv->spi_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_get_by_name(bus, "sf", &priv->ctlr_clk);
+ if (ret < 0)
+ return ret;
+
+ buffer = devm_kmalloc(bus, MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
+ GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+ if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
+ buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
+ ~MTK_NOR_DMA_ALIGN_MASK);
+ priv->buffer = buffer;
+
+ clk_enable(&priv->spi_clk);
+ clk_enable(&priv->ctlr_clk);
+
+ priv->spi_freq = clk_get_rate(&priv->spi_clk);
+ printf("spi frequency: %d Hz\n", priv->spi_freq);
+
+ /* With this setting, we issue one command at a time to
+ * accommodate to SPI-mem framework.
+ */
+ writel(MTK_NOR_ENABLE_SF_CMD, priv->base + MTK_NOR_REG_WP);
+ mtk_snor_rmw(priv, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
+ mtk_snor_rmw(priv, MTK_NOR_REG_CFG3,
+ MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
+
+ /* Unlock all blocks using write status command.
+ * SPI-MEM hasn't implemented unlock procedure on MXIC devices.
+ * We may remove this later.
+ */
+ writel(2 * BITS_PER_BYTE, priv->base + MTK_NOR_REG_PRG_CNT);
+ writel(MTK_NOR_UNLOCK_ALL, priv->base + MTK_NOR_REG_PRGDATA(5));
+ writel(MTK_NOR_IRQ_WRSR, priv->base + MTK_NOR_REG_IRQ_EN);
+ writel(MTK_NOR_CMD_WRSR, priv->base + MTK_NOR_REG_CMD);
+ ret = readl_poll_timeout(priv->base + MTK_NOR_REG_IRQ_STAT, reg,
+ !(reg & MTK_NOR_IRQ_WRSR),
+ ((3 * BITS_PER_BYTE) + 1) * 200);
+
+ return 0;
+}
+
+static int mtk_snor_set_speed(struct udevice *bus, uint speed)
+{
+ /* MTK's SNOR controller does not have a bus clock divider.
+ * We setup maximum bus clock in dts.
+ */
+
+ return 0;
+}
+
+static int mtk_snor_set_mode(struct udevice *bus, uint mode)
+{
+ /* We set up mode later for each transmission.
+ */
+ return 0;
+}
+
+static const struct spi_controller_mem_ops mtk_snor_mem_ops = {
+ .adjust_op_size = mtk_snor_adjust_op_size,
+ .supports_op = mtk_snor_supports_op,
+ .exec_op = mtk_snor_exec_op
+};
+
+static const struct dm_spi_ops mtk_snor_ops = {
+ .mem_ops = &mtk_snor_mem_ops,
+ .set_speed = mtk_snor_set_speed,
+ .set_mode = mtk_snor_set_mode,
+};
+
+static const struct udevice_id mtk_snor_ids[] = {
+ { .compatible = "mediatek,mtk-snor" },
+ {}
+};
+
+U_BOOT_DRIVER(mtk_snor) = {
+ .name = "mtk_snor",
+ .id = UCLASS_SPI,
+ .of_match = mtk_snor_ids,
+ .ops = &mtk_snor_ops,
+ .priv_auto = sizeof(struct mtk_snor_priv),
+ .probe = mtk_snor_probe,
+};
diff --git a/roms/u-boot/drivers/spi/mvebu_a3700_spi.c b/roms/u-boot/drivers/spi/mvebu_a3700_spi.c
new file mode 100644
index 000000000..b1dce048a
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mvebu_a3700_spi.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2015 Marvell International Ltd.
+ *
+ * Copyright (C) 2016 Stefan Roese <sr@denx.de>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <clk.h>
+#include <wait_bit.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <asm/gpio.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define MVEBU_SPI_A3700_XFER_RDY BIT(1)
+#define MVEBU_SPI_A3700_FIFO_FLUSH BIT(9)
+#define MVEBU_SPI_A3700_BYTE_LEN BIT(5)
+#define MVEBU_SPI_A3700_CLK_PHA BIT(6)
+#define MVEBU_SPI_A3700_CLK_POL BIT(7)
+#define MVEBU_SPI_A3700_FIFO_EN BIT(17)
+#define MVEBU_SPI_A3700_SPI_EN_0 BIT(16)
+#define MVEBU_SPI_A3700_CLK_PRESCALE_MASK 0x1f
+
+#define MAX_CS_COUNT 4
+
+/* SPI registers */
+struct spi_reg {
+ u32 ctrl; /* 0x10600 */
+ u32 cfg; /* 0x10604 */
+ u32 dout; /* 0x10608 */
+ u32 din; /* 0x1060c */
+};
+
+struct mvebu_spi_plat {
+ struct spi_reg *spireg;
+ struct clk clk;
+ struct gpio_desc cs_gpios[MAX_CS_COUNT];
+};
+
+static void spi_cs_activate(struct mvebu_spi_plat *plat, int cs)
+{
+ if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs]))
+ dm_gpio_set_value(&plat->cs_gpios[cs], 1);
+ else
+ setbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
+}
+
+static void spi_cs_deactivate(struct mvebu_spi_plat *plat, int cs)
+{
+ if (CONFIG_IS_ENABLED(DM_GPIO) && dm_gpio_is_valid(&plat->cs_gpios[cs]))
+ dm_gpio_set_value(&plat->cs_gpios[cs], 0);
+ else
+ clrbits_le32(&plat->spireg->ctrl, MVEBU_SPI_A3700_SPI_EN_0 << cs);
+}
+
+/**
+ * spi_legacy_shift_byte() - triggers the real SPI transfer
+ * @bytelen: Indicate how many bytes to transfer.
+ * @dout: Buffer address of what to send.
+ * @din: Buffer address of where to receive.
+ *
+ * This function triggers the real SPI transfer in legacy mode. It
+ * will shift out char buffer from @dout, and shift in char buffer to
+ * @din, if necessary.
+ *
+ * This function assumes that only one byte is shifted at one time.
+ * However, it is not its responisbility to set the transfer type to
+ * one-byte. Also, it does not guarantee that it will work if transfer
+ * type becomes two-byte. See spi_set_legacy() for details.
+ *
+ * In legacy mode, simply write to the SPI_DOUT register will trigger
+ * the transfer.
+ *
+ * If @dout == NULL, which means no actual data needs to be sent out,
+ * then the function will shift out 0x00 in order to shift in data.
+ * The XFER_RDY flag is checked every time before accessing SPI_DOUT
+ * and SPI_DIN register.
+ *
+ * The number of transfers to be triggerred is decided by @bytelen.
+ *
+ * Return: 0 - cool
+ * -ETIMEDOUT - XFER_RDY flag timeout
+ */
+static int spi_legacy_shift_byte(struct spi_reg *reg, unsigned int bytelen,
+ const void *dout, void *din)
+{
+ const u8 *dout_8;
+ u8 *din_8;
+ int ret;
+
+ /* Use 0x00 as dummy dout */
+ const u8 dummy_dout = 0x0;
+ u32 pending_dout = 0x0;
+
+ /* dout_8: pointer of current dout */
+ dout_8 = dout;
+ /* din_8: pointer of current din */
+ din_8 = din;
+
+ while (bytelen) {
+ ret = wait_for_bit_le32(&reg->ctrl,
+ MVEBU_SPI_A3700_XFER_RDY,
+ true,100, false);
+ if (ret)
+ return ret;
+
+ if (dout)
+ pending_dout = (u32)*dout_8;
+ else
+ pending_dout = (u32)dummy_dout;
+
+ /* Trigger the xfer */
+ writel(pending_dout, &reg->dout);
+
+ if (din) {
+ ret = wait_for_bit_le32(&reg->ctrl,
+ MVEBU_SPI_A3700_XFER_RDY,
+ true, 100, false);
+ if (ret)
+ return ret;
+
+ /* Read what is transferred in */
+ *din_8 = (u8)readl(&reg->din);
+ }
+
+ /* Don't increment the current pointer if NULL */
+ if (dout)
+ dout_8++;
+ if (din)
+ din_8++;
+
+ bytelen--;
+ }
+
+ return 0;
+}
+
+static int mvebu_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct spi_reg *reg = plat->spireg;
+ unsigned int bytelen;
+ int ret;
+
+ bytelen = bitlen / 8;
+
+ if (dout && din)
+ debug("This is a duplex transfer.\n");
+
+ /* Activate CS */
+ if (flags & SPI_XFER_BEGIN) {
+ debug("SPI: activate cs.\n");
+ spi_cs_activate(plat, spi_chip_select(dev));
+ }
+
+ /* Send and/or receive */
+ if (dout || din) {
+ ret = spi_legacy_shift_byte(reg, bytelen, dout, din);
+ if (ret)
+ return ret;
+ }
+
+ /* Deactivate CS */
+ if (flags & SPI_XFER_END) {
+ ret = wait_for_bit_le32(&reg->ctrl,
+ MVEBU_SPI_A3700_XFER_RDY,
+ true, 100, false);
+ if (ret)
+ return ret;
+
+ debug("SPI: deactivate cs.\n");
+ spi_cs_deactivate(plat, spi_chip_select(dev));
+ }
+
+ return 0;
+}
+
+static int mvebu_spi_set_speed(struct udevice *bus, uint hz)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct spi_reg *reg = plat->spireg;
+ u32 data, prescale;
+
+ data = readl(&reg->cfg);
+
+ prescale = DIV_ROUND_UP(clk_get_rate(&plat->clk), hz);
+ if (prescale > 0xf)
+ prescale = 0x10 + (prescale + 1) / 2;
+ prescale = min(prescale, 0x1fu);
+
+ data &= ~MVEBU_SPI_A3700_CLK_PRESCALE_MASK;
+ data |= prescale & MVEBU_SPI_A3700_CLK_PRESCALE_MASK;
+
+ writel(data, &reg->cfg);
+
+ return 0;
+}
+
+static int mvebu_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct spi_reg *reg = plat->spireg;
+
+ /*
+ * Set SPI polarity
+ * 0: Serial interface clock is low when inactive
+ * 1: Serial interface clock is high when inactive
+ */
+ if (mode & SPI_CPOL)
+ setbits_le32(&reg->cfg, MVEBU_SPI_A3700_CLK_POL);
+ else
+ clrbits_le32(&reg->cfg, MVEBU_SPI_A3700_CLK_POL);
+ if (mode & SPI_CPHA)
+ setbits_le32(&reg->cfg, MVEBU_SPI_A3700_CLK_PHA);
+ else
+ clrbits_le32(&reg->cfg, MVEBU_SPI_A3700_CLK_PHA);
+
+ return 0;
+}
+
+static int mvebu_spi_probe(struct udevice *bus)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ struct spi_reg *reg = plat->spireg;
+ u32 data;
+ int ret;
+
+ /*
+ * Settings SPI controller to be working in legacy mode, which
+ * means use only DO pin (I/O 1) for Data Out, and DI pin (I/O 0)
+ * for Data In.
+ */
+
+ /* Flush read/write FIFO */
+ data = readl(&reg->cfg);
+ writel(data | MVEBU_SPI_A3700_FIFO_FLUSH, &reg->cfg);
+ ret = wait_for_bit_le32(&reg->cfg, MVEBU_SPI_A3700_FIFO_FLUSH,
+ false, 1000, false);
+ if (ret)
+ return ret;
+
+ /* Disable FIFO mode */
+ data &= ~MVEBU_SPI_A3700_FIFO_EN;
+
+ /* Always shift 1 byte at a time */
+ data &= ~MVEBU_SPI_A3700_BYTE_LEN;
+
+ writel(data, &reg->cfg);
+
+ /* Set up CS GPIOs in device tree, if any */
+ if (CONFIG_IS_ENABLED(DM_GPIO) && gpio_get_list_count(bus, "cs-gpios") > 0) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(plat->cs_gpios); i++) {
+ ret = gpio_request_by_name(bus, "cs-gpios", i, &plat->cs_gpios[i], 0);
+ if (ret < 0 || !dm_gpio_is_valid(&plat->cs_gpios[i])) {
+ /* Use the native CS function for this line */
+ continue;
+ }
+
+ ret = dm_gpio_set_dir_flags(&plat->cs_gpios[i],
+ GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
+ if (ret) {
+ dev_err(bus, "Setting cs %d error\n", i);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mvebu_spi_of_to_plat(struct udevice *bus)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+ int ret;
+
+ plat->spireg = dev_read_addr_ptr(bus);
+
+ ret = clk_get_by_index(bus, 0, &plat->clk);
+ if (ret) {
+ dev_err(bus, "cannot get clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mvebu_spi_remove(struct udevice *bus)
+{
+ struct mvebu_spi_plat *plat = dev_get_plat(bus);
+
+ clk_free(&plat->clk);
+
+ return 0;
+}
+
+static const struct dm_spi_ops mvebu_spi_ops = {
+ .xfer = mvebu_spi_xfer,
+ .set_speed = mvebu_spi_set_speed,
+ .set_mode = mvebu_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id mvebu_spi_ids[] = {
+ { .compatible = "marvell,armada-3700-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(mvebu_spi) = {
+ .name = "mvebu_spi",
+ .id = UCLASS_SPI,
+ .of_match = mvebu_spi_ids,
+ .ops = &mvebu_spi_ops,
+ .of_to_plat = mvebu_spi_of_to_plat,
+ .plat_auto = sizeof(struct mvebu_spi_plat),
+ .probe = mvebu_spi_probe,
+ .remove = mvebu_spi_remove,
+};
diff --git a/roms/u-boot/drivers/spi/mxc_spi.c b/roms/u-boot/drivers/spi/mxc_spi.c
new file mode 100644
index 000000000..f3dddbdbd
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mxc_spi.c
@@ -0,0 +1,703 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2008, Guennadi Liakhovetski <lg@denx.de>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/global_data.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <asm/arch/imx-regs.h>
+#include <asm/arch/clock.h>
+#include <asm/mach-imx/spi.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* MX35 and older is CSPI */
+#if defined(CONFIG_MX25) || defined(CONFIG_MX31) || defined(CONFIG_MX35)
+#define MXC_CSPI
+struct cspi_regs {
+ u32 rxdata;
+ u32 txdata;
+ u32 ctrl;
+ u32 intr;
+ u32 dma;
+ u32 stat;
+ u32 period;
+ u32 test;
+};
+
+#define MXC_CSPICTRL_EN BIT(0)
+#define MXC_CSPICTRL_MODE BIT(1)
+#define MXC_CSPICTRL_XCH BIT(2)
+#define MXC_CSPICTRL_SMC BIT(3)
+#define MXC_CSPICTRL_POL BIT(4)
+#define MXC_CSPICTRL_PHA BIT(5)
+#define MXC_CSPICTRL_SSCTL BIT(6)
+#define MXC_CSPICTRL_SSPOL BIT(7)
+#define MXC_CSPICTRL_DATARATE(x) (((x) & 0x7) << 16)
+#define MXC_CSPICTRL_RXOVF BIT(6)
+#define MXC_CSPIPERIOD_32KHZ BIT(15)
+#define MAX_SPI_BYTES 4
+#if defined(CONFIG_MX25) || defined(CONFIG_MX35)
+#define MXC_CSPICTRL_CHIPSELECT(x) (((x) & 0x3) << 12)
+#define MXC_CSPICTRL_BITCOUNT(x) (((x) & 0xfff) << 20)
+#define MXC_CSPICTRL_TC BIT(7)
+#define MXC_CSPICTRL_MAXBITS 0xfff
+#else /* MX31 */
+#define MXC_CSPICTRL_CHIPSELECT(x) (((x) & 0x3) << 24)
+#define MXC_CSPICTRL_BITCOUNT(x) (((x) & 0x1f) << 8)
+#define MXC_CSPICTRL_TC BIT(8)
+#define MXC_CSPICTRL_MAXBITS 0x1f
+#endif
+
+#else /* MX51 and newer is ECSPI */
+#define MXC_ECSPI
+struct cspi_regs {
+ u32 rxdata;
+ u32 txdata;
+ u32 ctrl;
+ u32 cfg;
+ u32 intr;
+ u32 dma;
+ u32 stat;
+ u32 period;
+};
+
+#define MXC_CSPICTRL_EN BIT(0)
+#define MXC_CSPICTRL_MODE BIT(1)
+#define MXC_CSPICTRL_XCH BIT(2)
+#define MXC_CSPICTRL_MODE_MASK (0xf << 4)
+#define MXC_CSPICTRL_CHIPSELECT(x) (((x) & 0x3) << 12)
+#define MXC_CSPICTRL_BITCOUNT(x) (((x) & 0xfff) << 20)
+#define MXC_CSPICTRL_PREDIV(x) (((x) & 0xF) << 12)
+#define MXC_CSPICTRL_POSTDIV(x) (((x) & 0xF) << 8)
+#define MXC_CSPICTRL_SELCHAN(x) (((x) & 0x3) << 18)
+#define MXC_CSPICTRL_MAXBITS 0xfff
+#define MXC_CSPICTRL_TC BIT(7)
+#define MXC_CSPICTRL_RXOVF BIT(6)
+#define MXC_CSPIPERIOD_32KHZ BIT(15)
+#define MAX_SPI_BYTES 32
+
+/* Bit position inside CTRL register to be associated with SS */
+#define MXC_CSPICTRL_CHAN 18
+
+/* Bit position inside CON register to be associated with SS */
+#define MXC_CSPICON_PHA 0 /* SCLK phase control */
+#define MXC_CSPICON_POL 4 /* SCLK polarity */
+#define MXC_CSPICON_SSPOL 12 /* SS polarity */
+#define MXC_CSPICON_CTL 20 /* inactive state of SCLK */
+#endif
+
+#ifdef CONFIG_MX27
+/* i.MX27 has a completely wrong register layout and register definitions in the
+ * datasheet, the correct one is in the Freescale's Linux driver */
+
+#error "i.MX27 CSPI not supported due to drastic differences in register definitions" \
+"See linux mxc_spi driver from Freescale for details."
+#endif
+
+__weak int board_spi_cs_gpio(unsigned bus, unsigned cs)
+{
+ return -1;
+}
+
+#define OUT MXC_GPIO_DIRECTION_OUT
+
+#define reg_read readl
+#define reg_write(a, v) writel(v, a)
+
+#if !defined(CONFIG_SYS_SPI_MXC_WAIT)
+#define CONFIG_SYS_SPI_MXC_WAIT (CONFIG_SYS_HZ/100) /* 10 ms */
+#endif
+
+#define MAX_CS_COUNT 4
+
+struct mxc_spi_slave {
+ struct spi_slave slave;
+ unsigned long base;
+ u32 ctrl_reg;
+#if defined(MXC_ECSPI)
+ u32 cfg_reg;
+#endif
+ int gpio;
+ int ss_pol;
+ unsigned int max_hz;
+ unsigned int mode;
+ struct gpio_desc ss;
+ struct gpio_desc cs_gpios[MAX_CS_COUNT];
+ struct udevice *dev;
+};
+
+static inline struct mxc_spi_slave *to_mxc_spi_slave(struct spi_slave *slave)
+{
+ return container_of(slave, struct mxc_spi_slave, slave);
+}
+
+static void mxc_spi_cs_activate(struct mxc_spi_slave *mxcs)
+{
+#if CONFIG_IS_ENABLED(DM_SPI)
+ struct udevice *dev = mxcs->dev;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ u32 cs = slave_plat->cs;
+
+ if (!dm_gpio_is_valid(&mxcs->cs_gpios[cs]))
+ return;
+
+ dm_gpio_set_value(&mxcs->cs_gpios[cs], 1);
+#else
+ if (mxcs->gpio > 0)
+ gpio_set_value(mxcs->gpio, mxcs->ss_pol);
+#endif
+}
+
+static void mxc_spi_cs_deactivate(struct mxc_spi_slave *mxcs)
+{
+#if CONFIG_IS_ENABLED(DM_SPI)
+ struct udevice *dev = mxcs->dev;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ u32 cs = slave_plat->cs;
+
+ if (!dm_gpio_is_valid(&mxcs->cs_gpios[cs]))
+ return;
+
+ dm_gpio_set_value(&mxcs->cs_gpios[cs], 0);
+#else
+ if (mxcs->gpio > 0)
+ gpio_set_value(mxcs->gpio, !(mxcs->ss_pol));
+#endif
+}
+
+u32 get_cspi_div(u32 div)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (div <= (4 << i))
+ return i;
+ }
+ return i;
+}
+
+#ifdef MXC_CSPI
+static s32 spi_cfg_mxc(struct mxc_spi_slave *mxcs, unsigned int cs)
+{
+ unsigned int ctrl_reg;
+ u32 clk_src;
+ u32 div;
+ unsigned int max_hz = mxcs->max_hz;
+ unsigned int mode = mxcs->mode;
+
+ clk_src = mxc_get_clock(MXC_CSPI_CLK);
+
+ div = DIV_ROUND_UP(clk_src, max_hz);
+ div = get_cspi_div(div);
+
+ debug("clk %d Hz, div %d, real clk %d Hz\n",
+ max_hz, div, clk_src / (4 << div));
+
+ ctrl_reg = MXC_CSPICTRL_CHIPSELECT(cs) |
+ MXC_CSPICTRL_BITCOUNT(MXC_CSPICTRL_MAXBITS) |
+ MXC_CSPICTRL_DATARATE(div) |
+ MXC_CSPICTRL_EN |
+#ifdef CONFIG_MX35
+ MXC_CSPICTRL_SSCTL |
+#endif
+ MXC_CSPICTRL_MODE;
+
+ if (mode & SPI_CPHA)
+ ctrl_reg |= MXC_CSPICTRL_PHA;
+ if (mode & SPI_CPOL)
+ ctrl_reg |= MXC_CSPICTRL_POL;
+ if (mode & SPI_CS_HIGH)
+ ctrl_reg |= MXC_CSPICTRL_SSPOL;
+ mxcs->ctrl_reg = ctrl_reg;
+
+ return 0;
+}
+#endif
+
+#ifdef MXC_ECSPI
+static s32 spi_cfg_mxc(struct mxc_spi_slave *mxcs, unsigned int cs)
+{
+ u32 clk_src = mxc_get_clock(MXC_CSPI_CLK);
+ s32 reg_ctrl, reg_config;
+ u32 ss_pol = 0, sclkpol = 0, sclkpha = 0, sclkctl = 0;
+ u32 pre_div = 0, post_div = 0;
+ struct cspi_regs *regs = (struct cspi_regs *)mxcs->base;
+ unsigned int max_hz = mxcs->max_hz;
+ unsigned int mode = mxcs->mode;
+
+ /*
+ * Reset SPI and set all CSs to master mode, if toggling
+ * between slave and master mode we might see a glitch
+ * on the clock line
+ */
+ reg_ctrl = MXC_CSPICTRL_MODE_MASK;
+ reg_write(&regs->ctrl, reg_ctrl);
+ reg_ctrl |= MXC_CSPICTRL_EN;
+ reg_write(&regs->ctrl, reg_ctrl);
+
+ if (clk_src > max_hz) {
+ pre_div = (clk_src - 1) / max_hz;
+ /* fls(1) = 1, fls(0x80000000) = 32, fls(16) = 5 */
+ post_div = fls(pre_div);
+ if (post_div > 4) {
+ post_div -= 4;
+ if (post_div >= 16) {
+ printf("Error: no divider for the freq: %d\n",
+ max_hz);
+ return -1;
+ }
+ pre_div >>= post_div;
+ } else {
+ post_div = 0;
+ }
+ }
+
+ debug("pre_div = %d, post_div=%d\n", pre_div, post_div);
+ reg_ctrl = (reg_ctrl & ~MXC_CSPICTRL_SELCHAN(3)) |
+ MXC_CSPICTRL_SELCHAN(cs);
+ reg_ctrl = (reg_ctrl & ~MXC_CSPICTRL_PREDIV(0x0F)) |
+ MXC_CSPICTRL_PREDIV(pre_div);
+ reg_ctrl = (reg_ctrl & ~MXC_CSPICTRL_POSTDIV(0x0F)) |
+ MXC_CSPICTRL_POSTDIV(post_div);
+
+ if (mode & SPI_CS_HIGH)
+ ss_pol = 1;
+
+ if (mode & SPI_CPOL) {
+ sclkpol = 1;
+ sclkctl = 1;
+ }
+
+ if (mode & SPI_CPHA)
+ sclkpha = 1;
+
+ reg_config = reg_read(&regs->cfg);
+
+ /*
+ * Configuration register setup
+ * The MX51 supports different setup for each SS
+ */
+ reg_config = (reg_config & ~(1 << (cs + MXC_CSPICON_SSPOL))) |
+ (ss_pol << (cs + MXC_CSPICON_SSPOL));
+ reg_config = (reg_config & ~(1 << (cs + MXC_CSPICON_POL))) |
+ (sclkpol << (cs + MXC_CSPICON_POL));
+ reg_config = (reg_config & ~(1 << (cs + MXC_CSPICON_CTL))) |
+ (sclkctl << (cs + MXC_CSPICON_CTL));
+ reg_config = (reg_config & ~(1 << (cs + MXC_CSPICON_PHA))) |
+ (sclkpha << (cs + MXC_CSPICON_PHA));
+
+ debug("reg_ctrl = 0x%x\n", reg_ctrl);
+ reg_write(&regs->ctrl, reg_ctrl);
+ debug("reg_config = 0x%x\n", reg_config);
+ reg_write(&regs->cfg, reg_config);
+
+ /* save config register and control register */
+ mxcs->ctrl_reg = reg_ctrl;
+ mxcs->cfg_reg = reg_config;
+
+ /* clear interrupt reg */
+ reg_write(&regs->intr, 0);
+ reg_write(&regs->stat, MXC_CSPICTRL_TC | MXC_CSPICTRL_RXOVF);
+
+ return 0;
+}
+#endif
+
+int spi_xchg_single(struct mxc_spi_slave *mxcs, unsigned int bitlen,
+ const u8 *dout, u8 *din, unsigned long flags)
+{
+ int nbytes = DIV_ROUND_UP(bitlen, 8);
+ u32 data, cnt, i;
+ struct cspi_regs *regs = (struct cspi_regs *)mxcs->base;
+ u32 ts;
+ int status;
+
+ debug("%s: bitlen %d dout 0x%lx din 0x%lx\n",
+ __func__, bitlen, (ulong)dout, (ulong)din);
+
+ mxcs->ctrl_reg = (mxcs->ctrl_reg &
+ ~MXC_CSPICTRL_BITCOUNT(MXC_CSPICTRL_MAXBITS)) |
+ MXC_CSPICTRL_BITCOUNT(bitlen - 1);
+
+ reg_write(&regs->ctrl, mxcs->ctrl_reg | MXC_CSPICTRL_EN);
+#ifdef MXC_ECSPI
+ reg_write(&regs->cfg, mxcs->cfg_reg);
+#endif
+
+ /* Clear interrupt register */
+ reg_write(&regs->stat, MXC_CSPICTRL_TC | MXC_CSPICTRL_RXOVF);
+
+ /*
+ * The SPI controller works only with words,
+ * check if less than a word is sent.
+ * Access to the FIFO is only 32 bit
+ */
+ if (bitlen % 32) {
+ data = 0;
+ cnt = (bitlen % 32) / 8;
+ if (dout) {
+ for (i = 0; i < cnt; i++) {
+ data = (data << 8) | (*dout++ & 0xFF);
+ }
+ }
+ debug("Sending SPI 0x%x\n", data);
+
+ reg_write(&regs->txdata, data);
+ nbytes -= cnt;
+ }
+
+ data = 0;
+
+ while (nbytes > 0) {
+ data = 0;
+ if (dout) {
+ /* Buffer is not 32-bit aligned */
+ if ((unsigned long)dout & 0x03) {
+ data = 0;
+ for (i = 0; i < 4; i++)
+ data = (data << 8) | (*dout++ & 0xFF);
+ } else {
+ data = *(u32 *)dout;
+ data = cpu_to_be32(data);
+ dout += 4;
+ }
+ }
+ debug("Sending SPI 0x%x\n", data);
+ reg_write(&regs->txdata, data);
+ nbytes -= 4;
+ }
+
+ /* FIFO is written, now starts the transfer setting the XCH bit */
+ reg_write(&regs->ctrl, mxcs->ctrl_reg |
+ MXC_CSPICTRL_EN | MXC_CSPICTRL_XCH);
+
+ ts = get_timer(0);
+ status = reg_read(&regs->stat);
+ /* Wait until the TC (Transfer completed) bit is set */
+ while ((status & MXC_CSPICTRL_TC) == 0) {
+ if (get_timer(ts) > CONFIG_SYS_SPI_MXC_WAIT) {
+ printf("spi_xchg_single: Timeout!\n");
+ return -1;
+ }
+ status = reg_read(&regs->stat);
+ }
+
+ /* Transfer completed, clear any pending request */
+ reg_write(&regs->stat, MXC_CSPICTRL_TC | MXC_CSPICTRL_RXOVF);
+
+ nbytes = DIV_ROUND_UP(bitlen, 8);
+
+ cnt = nbytes % 32;
+
+ if (bitlen % 32) {
+ data = reg_read(&regs->rxdata);
+ cnt = (bitlen % 32) / 8;
+ data = cpu_to_be32(data) >> ((sizeof(data) - cnt) * 8);
+ debug("SPI Rx unaligned: 0x%x\n", data);
+ if (din) {
+ memcpy(din, &data, cnt);
+ din += cnt;
+ }
+ nbytes -= cnt;
+ }
+
+ while (nbytes > 0) {
+ u32 tmp;
+ tmp = reg_read(&regs->rxdata);
+ data = cpu_to_be32(tmp);
+ debug("SPI Rx: 0x%x 0x%x\n", tmp, data);
+ cnt = min_t(u32, nbytes, sizeof(data));
+ if (din) {
+ memcpy(din, &data, cnt);
+ din += cnt;
+ }
+ nbytes -= cnt;
+ }
+
+ return 0;
+
+}
+
+static int mxc_spi_xfer_internal(struct mxc_spi_slave *mxcs,
+ unsigned int bitlen, const void *dout,
+ void *din, unsigned long flags)
+{
+ int n_bytes = DIV_ROUND_UP(bitlen, 8);
+ int n_bits;
+ int ret;
+ u32 blk_size;
+ u8 *p_outbuf = (u8 *)dout;
+ u8 *p_inbuf = (u8 *)din;
+
+ if (!mxcs)
+ return -EINVAL;
+
+ if (flags & SPI_XFER_BEGIN)
+ mxc_spi_cs_activate(mxcs);
+
+ while (n_bytes > 0) {
+ if (n_bytes < MAX_SPI_BYTES)
+ blk_size = n_bytes;
+ else
+ blk_size = MAX_SPI_BYTES;
+
+ n_bits = blk_size * 8;
+
+ ret = spi_xchg_single(mxcs, n_bits, p_outbuf, p_inbuf, 0);
+
+ if (ret)
+ return ret;
+ if (dout)
+ p_outbuf += blk_size;
+ if (din)
+ p_inbuf += blk_size;
+ n_bytes -= blk_size;
+ }
+
+ if (flags & SPI_XFER_END) {
+ mxc_spi_cs_deactivate(mxcs);
+ }
+
+ return 0;
+}
+
+static int mxc_spi_claim_bus_internal(struct mxc_spi_slave *mxcs, int cs)
+{
+ struct cspi_regs *regs = (struct cspi_regs *)mxcs->base;
+ int ret;
+
+ reg_write(&regs->rxdata, 1);
+ udelay(1);
+ ret = spi_cfg_mxc(mxcs, cs);
+ if (ret) {
+ printf("mxc_spi: cannot setup SPI controller\n");
+ return ret;
+ }
+ reg_write(&regs->period, MXC_CSPIPERIOD_32KHZ);
+ reg_write(&regs->intr, 0);
+
+ return 0;
+}
+
+#if !CONFIG_IS_ENABLED(DM_SPI)
+int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
+ void *din, unsigned long flags)
+{
+ struct mxc_spi_slave *mxcs = to_mxc_spi_slave(slave);
+
+ return mxc_spi_xfer_internal(mxcs, bitlen, dout, din, flags);
+}
+
+/*
+ * Some SPI devices require active chip-select over multiple
+ * transactions, we achieve this using a GPIO. Still, the SPI
+ * controller has to be configured to use one of its own chipselects.
+ * To use this feature you have to implement board_spi_cs_gpio() to assign
+ * a gpio value for each cs (-1 if cs doesn't need to use gpio).
+ * You must use some unused on this SPI controller cs between 0 and 3.
+ */
+static int setup_cs_gpio(struct mxc_spi_slave *mxcs,
+ unsigned int bus, unsigned int cs)
+{
+ int ret;
+
+ mxcs->gpio = board_spi_cs_gpio(bus, cs);
+ if (mxcs->gpio == -1)
+ return 0;
+
+ gpio_request(mxcs->gpio, "spi-cs");
+ ret = gpio_direction_output(mxcs->gpio, !(mxcs->ss_pol));
+ if (ret) {
+ printf("mxc_spi: cannot setup gpio %d\n", mxcs->gpio);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long spi_bases[] = {
+ MXC_SPI_BASE_ADDRESSES
+};
+
+struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
+ unsigned int max_hz, unsigned int mode)
+{
+ struct mxc_spi_slave *mxcs;
+ int ret;
+
+ if (bus >= ARRAY_SIZE(spi_bases))
+ return NULL;
+
+ if (max_hz == 0) {
+ printf("Error: desired clock is 0\n");
+ return NULL;
+ }
+
+ mxcs = spi_alloc_slave(struct mxc_spi_slave, bus, cs);
+ if (!mxcs) {
+ puts("mxc_spi: SPI Slave not allocated !\n");
+ return NULL;
+ }
+
+ mxcs->ss_pol = (mode & SPI_CS_HIGH) ? 1 : 0;
+
+ ret = setup_cs_gpio(mxcs, bus, cs);
+ if (ret < 0) {
+ free(mxcs);
+ return NULL;
+ }
+
+ mxcs->base = spi_bases[bus];
+ mxcs->max_hz = max_hz;
+ mxcs->mode = mode;
+
+ return &mxcs->slave;
+}
+
+void spi_free_slave(struct spi_slave *slave)
+{
+ struct mxc_spi_slave *mxcs = to_mxc_spi_slave(slave);
+
+ free(mxcs);
+}
+
+int spi_claim_bus(struct spi_slave *slave)
+{
+ struct mxc_spi_slave *mxcs = to_mxc_spi_slave(slave);
+
+ return mxc_spi_claim_bus_internal(mxcs, slave->cs);
+}
+
+void spi_release_bus(struct spi_slave *slave)
+{
+ /* TODO: Shut the controller down */
+}
+#else
+
+static int mxc_spi_probe(struct udevice *bus)
+{
+ struct mxc_spi_slave *mxcs = dev_get_plat(bus);
+ int node = dev_of_offset(bus);
+ const void *blob = gd->fdt_blob;
+ int ret;
+ int i;
+
+ ret = gpio_request_list_by_name(bus, "cs-gpios", mxcs->cs_gpios,
+ ARRAY_SIZE(mxcs->cs_gpios), 0);
+ if (ret < 0) {
+ pr_err("Can't get %s gpios! Error: %d", bus->name, ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mxcs->cs_gpios); i++) {
+ if (!dm_gpio_is_valid(&mxcs->cs_gpios[i]))
+ continue;
+
+ ret = dm_gpio_set_dir_flags(&mxcs->cs_gpios[i],
+ GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
+ if (ret) {
+ dev_err(bus, "Setting cs %d error\n", i);
+ return ret;
+ }
+ }
+
+ mxcs->base = dev_read_addr(bus);
+ if (mxcs->base == FDT_ADDR_T_NONE)
+ return -ENODEV;
+
+#if CONFIG_IS_ENABLED(CLK)
+ struct clk clk;
+ ret = clk_get_by_index(bus, 0, &clk);
+ if (ret)
+ return ret;
+
+ clk_enable(&clk);
+
+ mxcs->max_hz = clk_get_rate(&clk);
+#else
+ mxcs->max_hz = fdtdec_get_int(blob, node, "spi-max-frequency",
+ 20000000);
+#endif
+
+ return 0;
+}
+
+static int mxc_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct mxc_spi_slave *mxcs = dev_get_plat(dev->parent);
+
+
+ return mxc_spi_xfer_internal(mxcs, bitlen, dout, din, flags);
+}
+
+static int mxc_spi_claim_bus(struct udevice *dev)
+{
+ struct mxc_spi_slave *mxcs = dev_get_plat(dev->parent);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ mxcs->dev = dev;
+
+ return mxc_spi_claim_bus_internal(mxcs, slave_plat->cs);
+}
+
+static int mxc_spi_release_bus(struct udevice *dev)
+{
+ return 0;
+}
+
+static int mxc_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct mxc_spi_slave *mxcs = dev_get_plat(bus);
+
+ mxcs->max_hz = speed;
+
+ return 0;
+}
+
+static int mxc_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct mxc_spi_slave *mxcs = dev_get_plat(bus);
+
+ mxcs->mode = mode;
+ mxcs->ss_pol = (mode & SPI_CS_HIGH) ? 1 : 0;
+
+ return 0;
+}
+
+static const struct dm_spi_ops mxc_spi_ops = {
+ .claim_bus = mxc_spi_claim_bus,
+ .release_bus = mxc_spi_release_bus,
+ .xfer = mxc_spi_xfer,
+ .set_speed = mxc_spi_set_speed,
+ .set_mode = mxc_spi_set_mode,
+};
+
+static const struct udevice_id mxc_spi_ids[] = {
+ { .compatible = "fsl,imx51-ecspi" },
+ { }
+};
+
+U_BOOT_DRIVER(mxc_spi) = {
+ .name = "mxc_spi",
+ .id = UCLASS_SPI,
+ .of_match = mxc_spi_ids,
+ .ops = &mxc_spi_ops,
+ .plat_auto = sizeof(struct mxc_spi_slave),
+ .probe = mxc_spi_probe,
+};
+#endif
diff --git a/roms/u-boot/drivers/spi/mxs_spi.c b/roms/u-boot/drivers/spi/mxs_spi.c
new file mode 100644
index 000000000..d41352a0b
--- /dev/null
+++ b/roms/u-boot/drivers/spi/mxs_spi.c
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Freescale i.MX28 SPI driver
+ *
+ * Copyright (C) 2019 DENX Software Engineering
+ * Lukasz Majewski, DENX Software Engineering, lukma@denx.de
+ *
+ * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
+ * on behalf of DENX Software Engineering GmbH
+ *
+ * NOTE: This driver only supports the SPI-controller chipselects,
+ * GPIO driven chipselects are not supported.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dt-structs.h>
+#include <cpu_func.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <spi.h>
+#include <asm/cache.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/imx-regs.h>
+#include <asm/arch/sys_proto.h>
+#include <asm/mach-imx/dma.h>
+
+#define MXS_SPI_MAX_TIMEOUT 1000000
+#define MXS_SPI_PORT_OFFSET 0x2000
+#define MXS_SSP_CHIPSELECT_MASK 0x00300000
+#define MXS_SSP_CHIPSELECT_SHIFT 20
+
+#define MXSSSP_SMALL_TRANSFER 512
+
+/* Base numbers of i.MX2[38] clk for ssp0 IP block */
+#define MXS_SSP_IMX23_CLKID_SSP0 33
+#define MXS_SSP_IMX28_CLKID_SSP0 46
+
+struct mxs_spi_plat {
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct dtd_fsl_imx23_spi dtplat;
+#endif
+ s32 frequency; /* Default clock frequency, -1 for none */
+ fdt_addr_t base; /* SPI IP block base address */
+ int num_cs; /* Number of CSes supported */
+ int dma_id; /* ID of the DMA channel */
+ int clk_id; /* ID of the SSP clock */
+};
+
+struct mxs_spi_priv {
+ struct mxs_ssp_regs *regs;
+ unsigned int dma_channel;
+ unsigned int max_freq;
+ unsigned int clk_id;
+ unsigned int mode;
+};
+
+static void mxs_spi_start_xfer(struct mxs_ssp_regs *ssp_regs)
+{
+ writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_set);
+ writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_clr);
+}
+
+static void mxs_spi_end_xfer(struct mxs_ssp_regs *ssp_regs)
+{
+ writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_clr);
+ writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_set);
+}
+
+static int mxs_spi_xfer_pio(struct mxs_spi_priv *priv,
+ char *data, int length, int write,
+ unsigned long flags)
+{
+ struct mxs_ssp_regs *ssp_regs = priv->regs;
+
+ if (flags & SPI_XFER_BEGIN)
+ mxs_spi_start_xfer(ssp_regs);
+
+ while (length--) {
+ /* We transfer 1 byte */
+#if defined(CONFIG_MX23)
+ writel(SSP_CTRL0_XFER_COUNT_MASK, &ssp_regs->hw_ssp_ctrl0_clr);
+ writel(1, &ssp_regs->hw_ssp_ctrl0_set);
+#elif defined(CONFIG_MX28)
+ writel(1, &ssp_regs->hw_ssp_xfer_size);
+#endif
+
+ if ((flags & SPI_XFER_END) && !length)
+ mxs_spi_end_xfer(ssp_regs);
+
+ if (write)
+ writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_clr);
+ else
+ writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_set);
+
+ writel(SSP_CTRL0_RUN, &ssp_regs->hw_ssp_ctrl0_set);
+
+ if (mxs_wait_mask_set(&ssp_regs->hw_ssp_ctrl0_reg,
+ SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) {
+ printf("MXS SPI: Timeout waiting for start\n");
+ return -ETIMEDOUT;
+ }
+
+ if (write)
+ writel(*data++, &ssp_regs->hw_ssp_data);
+
+ writel(SSP_CTRL0_DATA_XFER, &ssp_regs->hw_ssp_ctrl0_set);
+
+ if (!write) {
+ if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_status_reg,
+ SSP_STATUS_FIFO_EMPTY, MXS_SPI_MAX_TIMEOUT)) {
+ printf("MXS SPI: Timeout waiting for data\n");
+ return -ETIMEDOUT;
+ }
+
+ *data = readl(&ssp_regs->hw_ssp_data);
+ data++;
+ }
+
+ if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_ctrl0_reg,
+ SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) {
+ printf("MXS SPI: Timeout waiting for finish\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int mxs_spi_xfer_dma(struct mxs_spi_priv *priv,
+ char *data, int length, int write,
+ unsigned long flags)
+{ struct mxs_ssp_regs *ssp_regs = priv->regs;
+ const int xfer_max_sz = 0xff00;
+ const int desc_count = DIV_ROUND_UP(length, xfer_max_sz) + 1;
+ struct mxs_dma_desc *dp;
+ uint32_t ctrl0;
+ uint32_t cache_data_count;
+ const uint32_t dstart = (uint32_t)data;
+ int dmach;
+ int tl;
+ int ret = 0;
+
+#if defined(CONFIG_MX23)
+ const int mxs_spi_pio_words = 1;
+#elif defined(CONFIG_MX28)
+ const int mxs_spi_pio_words = 4;
+#endif
+
+ ALLOC_CACHE_ALIGN_BUFFER(struct mxs_dma_desc, desc, desc_count);
+
+ memset(desc, 0, sizeof(struct mxs_dma_desc) * desc_count);
+
+ ctrl0 = readl(&ssp_regs->hw_ssp_ctrl0);
+ ctrl0 |= SSP_CTRL0_DATA_XFER;
+
+ if (flags & SPI_XFER_BEGIN)
+ ctrl0 |= SSP_CTRL0_LOCK_CS;
+ if (!write)
+ ctrl0 |= SSP_CTRL0_READ;
+
+ if (length % ARCH_DMA_MINALIGN)
+ cache_data_count = roundup(length, ARCH_DMA_MINALIGN);
+ else
+ cache_data_count = length;
+
+ /* Flush data to DRAM so DMA can pick them up */
+ if (write)
+ flush_dcache_range(dstart, dstart + cache_data_count);
+
+ /* Invalidate the area, so no writeback into the RAM races with DMA */
+ invalidate_dcache_range(dstart, dstart + cache_data_count);
+
+ dmach = priv->dma_channel;
+
+ dp = desc;
+ while (length) {
+ dp->address = (dma_addr_t)dp;
+ dp->cmd.address = (dma_addr_t)data;
+
+ /*
+ * This is correct, even though it does indeed look insane.
+ * I hereby have to, wholeheartedly, thank Freescale Inc.,
+ * for always inventing insane hardware and keeping me busy
+ * and employed ;-)
+ */
+ if (write)
+ dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ;
+ else
+ dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE;
+
+ /*
+ * The DMA controller can transfer large chunks (64kB) at
+ * time by setting the transfer length to 0. Setting tl to
+ * 0x10000 will overflow below and make .data contain 0.
+ * Otherwise, 0xff00 is the transfer maximum.
+ */
+ if (length >= 0x10000)
+ tl = 0x10000;
+ else
+ tl = min(length, xfer_max_sz);
+
+ dp->cmd.data |=
+ ((tl & 0xffff) << MXS_DMA_DESC_BYTES_OFFSET) |
+ (mxs_spi_pio_words << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
+ MXS_DMA_DESC_HALT_ON_TERMINATE |
+ MXS_DMA_DESC_TERMINATE_FLUSH;
+
+ data += tl;
+ length -= tl;
+
+ if (!length) {
+ dp->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM;
+
+ if (flags & SPI_XFER_END) {
+ ctrl0 &= ~SSP_CTRL0_LOCK_CS;
+ ctrl0 |= SSP_CTRL0_IGNORE_CRC;
+ }
+ }
+
+ /*
+ * Write CTRL0, CMD0, CMD1 and XFER_SIZE registers in
+ * case of MX28, write only CTRL0 in case of MX23 due
+ * to the difference in register layout. It is utterly
+ * essential that the XFER_SIZE register is written on
+ * a per-descriptor basis with the same size as is the
+ * descriptor!
+ */
+ dp->cmd.pio_words[0] = ctrl0;
+#ifdef CONFIG_MX28
+ dp->cmd.pio_words[1] = 0;
+ dp->cmd.pio_words[2] = 0;
+ dp->cmd.pio_words[3] = tl;
+#endif
+
+ mxs_dma_desc_append(dmach, dp);
+
+ dp++;
+ }
+
+ if (mxs_dma_go(dmach))
+ ret = -EINVAL;
+
+ /* The data arrived into DRAM, invalidate cache over them */
+ if (!write)
+ invalidate_dcache_range(dstart, dstart + cache_data_count);
+
+ return ret;
+}
+
+int mxs_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct mxs_spi_priv *priv = dev_get_priv(bus);
+ struct mxs_ssp_regs *ssp_regs = priv->regs;
+ int len = bitlen / 8;
+ char dummy;
+ int write = 0;
+ char *data = NULL;
+ int dma = 1;
+
+ if (bitlen == 0) {
+ if (flags & SPI_XFER_END) {
+ din = (void *)&dummy;
+ len = 1;
+ } else
+ return 0;
+ }
+
+ /* Half-duplex only */
+ if (din && dout)
+ return -EINVAL;
+ /* No data */
+ if (!din && !dout)
+ return 0;
+
+ if (dout) {
+ data = (char *)dout;
+ write = 1;
+ } else if (din) {
+ data = (char *)din;
+ write = 0;
+ }
+
+ /*
+ * Check for alignment, if the buffer is aligned, do DMA transfer,
+ * PIO otherwise. This is a temporary workaround until proper bounce
+ * buffer is in place.
+ */
+ if (dma) {
+ if (((uint32_t)data) & (ARCH_DMA_MINALIGN - 1))
+ dma = 0;
+ if (((uint32_t)len) & (ARCH_DMA_MINALIGN - 1))
+ dma = 0;
+ }
+
+ if (!dma || (len < MXSSSP_SMALL_TRANSFER)) {
+ writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_clr);
+ return mxs_spi_xfer_pio(priv, data, len, write, flags);
+ } else {
+ writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_set);
+ return mxs_spi_xfer_dma(priv, data, len, write, flags);
+ }
+}
+
+static int mxs_spi_probe(struct udevice *bus)
+{
+ struct mxs_spi_plat *plat = dev_get_plat(bus);
+ struct mxs_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ debug("%s: probe\n", __func__);
+
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct dtd_fsl_imx23_spi *dtplat = &plat->dtplat;
+ struct phandle_1_arg *p1a = &dtplat->clocks[0];
+
+ priv->regs = (struct mxs_ssp_regs *)dtplat->reg[0];
+ priv->dma_channel = dtplat->dmas[1];
+ priv->clk_id = p1a->arg[0];
+ priv->max_freq = dtplat->spi_max_frequency;
+ plat->num_cs = dtplat->num_cs;
+
+ debug("OF_PLATDATA: regs: 0x%x max freq: %d clkid: %d\n",
+ (unsigned int)priv->regs, priv->max_freq, priv->clk_id);
+#else
+ priv->regs = (struct mxs_ssp_regs *)plat->base;
+ priv->max_freq = plat->frequency;
+
+ priv->dma_channel = plat->dma_id;
+ priv->clk_id = plat->clk_id;
+#endif
+
+ mxs_reset_block(&priv->regs->hw_ssp_ctrl0_reg);
+
+ ret = mxs_dma_init_channel(priv->dma_channel);
+ if (ret) {
+ printf("%s: DMA init channel error %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mxs_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct mxs_spi_priv *priv = dev_get_priv(bus);
+ struct mxs_ssp_regs *ssp_regs = priv->regs;
+ int cs = spi_chip_select(dev);
+
+ /*
+ * i.MX28 supports up to 3 CS (SSn0, SSn1, SSn2)
+ * To set them it uses following tuple (WAIT_FOR_IRQ,WAIT_FOR_CMD),
+ * where:
+ *
+ * WAIT_FOR_IRQ is bit 21 of HW_SSP_CTRL0
+ * WAIT_FOR_CMD is bit 20 (#defined as MXS_SSP_CHIPSELECT_SHIFT here) of
+ * HW_SSP_CTRL0
+ * SSn0 b00
+ * SSn1 b01
+ * SSn2 b10 (which require setting WAIT_FOR_IRQ)
+ *
+ * However, for now i.MX28 SPI driver will support up till 2 CSes
+ * (SSn0, and SSn1).
+ */
+
+ /* Ungate SSP clock and set active CS */
+ clrsetbits_le32(&ssp_regs->hw_ssp_ctrl0,
+ BIT(MXS_SSP_CHIPSELECT_SHIFT) |
+ SSP_CTRL0_CLKGATE, (cs << MXS_SSP_CHIPSELECT_SHIFT));
+
+ return 0;
+}
+
+static int mxs_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct mxs_spi_priv *priv = dev_get_priv(bus);
+ struct mxs_ssp_regs *ssp_regs = priv->regs;
+
+ /* Gate SSP clock */
+ setbits_le32(&ssp_regs->hw_ssp_ctrl0, SSP_CTRL0_CLKGATE);
+
+ return 0;
+}
+
+static int mxs_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct mxs_spi_priv *priv = dev_get_priv(bus);
+#ifdef CONFIG_MX28
+ int clkid = priv->clk_id - MXS_SSP_IMX28_CLKID_SSP0;
+#else /* CONFIG_MX23 */
+ int clkid = priv->clk_id - MXS_SSP_IMX23_CLKID_SSP0;
+#endif
+ if (speed > priv->max_freq)
+ speed = priv->max_freq;
+
+ debug("%s speed: %u [Hz] clkid: %d\n", __func__, speed, clkid);
+ mxs_set_ssp_busclock(clkid, speed / 1000);
+
+ return 0;
+}
+
+static int mxs_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct mxs_spi_priv *priv = dev_get_priv(bus);
+ struct mxs_ssp_regs *ssp_regs = priv->regs;
+ u32 reg;
+
+ priv->mode = mode;
+ debug("%s: mode 0x%x\n", __func__, mode);
+
+ reg = SSP_CTRL1_SSP_MODE_SPI | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS;
+ reg |= (priv->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0;
+ reg |= (priv->mode & SPI_CPHA) ? SSP_CTRL1_PHASE : 0;
+ writel(reg, &ssp_regs->hw_ssp_ctrl1);
+
+ /* Single bit SPI support */
+ writel(SSP_CTRL0_BUS_WIDTH_ONE_BIT, &ssp_regs->hw_ssp_ctrl0);
+
+ return 0;
+}
+
+static const struct dm_spi_ops mxs_spi_ops = {
+ .claim_bus = mxs_spi_claim_bus,
+ .release_bus = mxs_spi_release_bus,
+ .xfer = mxs_spi_xfer,
+ .set_speed = mxs_spi_set_speed,
+ .set_mode = mxs_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+static int mxs_of_to_plat(struct udevice *bus)
+{
+ struct mxs_spi_plat *plat = dev_get_plat(bus);
+ u32 prop[2];
+ int ret;
+
+ plat->base = dev_read_addr(bus);
+ plat->frequency =
+ dev_read_u32_default(bus, "spi-max-frequency", 40000000);
+ plat->num_cs = dev_read_u32_default(bus, "num-cs", 2);
+
+ ret = dev_read_u32_array(bus, "dmas", prop, ARRAY_SIZE(prop));
+ if (ret) {
+ printf("%s: Reading 'dmas' property failed!\n", __func__);
+ return ret;
+ }
+ plat->dma_id = prop[1];
+
+ ret = dev_read_u32_array(bus, "clocks", prop, ARRAY_SIZE(prop));
+ if (ret) {
+ printf("%s: Reading 'clocks' property failed!\n", __func__);
+ return ret;
+ }
+ plat->clk_id = prop[1];
+
+ debug("%s: base=0x%x, max-frequency=%d num-cs=%d dma_id=%d clk_id=%d\n",
+ __func__, (uint)plat->base, plat->frequency, plat->num_cs,
+ plat->dma_id, plat->clk_id);
+
+ return 0;
+}
+
+static const struct udevice_id mxs_spi_ids[] = {
+ { .compatible = "fsl,imx23-spi" },
+ { .compatible = "fsl,imx28-spi" },
+ { }
+};
+#endif
+
+U_BOOT_DRIVER(fsl_imx23_spi) = {
+ .name = "fsl_imx23_spi",
+ .id = UCLASS_SPI,
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .of_match = mxs_spi_ids,
+ .of_to_plat = mxs_of_to_plat,
+#endif
+ .plat_auto = sizeof(struct mxs_spi_plat),
+ .ops = &mxs_spi_ops,
+ .priv_auto = sizeof(struct mxs_spi_priv),
+ .probe = mxs_spi_probe,
+};
+
+DM_DRIVER_ALIAS(fsl_imx23_spi, fsl_imx28_spi)
diff --git a/roms/u-boot/drivers/spi/nxp_fspi.c b/roms/u-boot/drivers/spi/nxp_fspi.c
new file mode 100644
index 000000000..6c5bad4c2
--- /dev/null
+++ b/roms/u-boot/drivers/spi/nxp_fspi.c
@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NXP FlexSPI(FSPI) controller driver.
+ *
+ * Copyright (c) 2019 Michael Walle <michael@walle.cc>
+ * Copyright (c) 2019 NXP
+ *
+ * This driver was originally ported from the linux kernel v5.4-rc3, which had
+ * the following notes:
+ *
+ * FlexSPI is a flexsible SPI host controller which supports two SPI
+ * channels and up to 4 external devices. Each channel supports
+ * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional
+ * data lines).
+ *
+ * FlexSPI controller is driven by the LUT(Look-up Table) registers
+ * LUT registers are a look-up-table for sequences of instructions.
+ * A valid sequence consists of four LUT registers.
+ * Maximum 32 LUT sequences can be programmed simultaneously.
+ *
+ * LUTs are being created at run-time based on the commands passed
+ * from the spi-mem framework, thus using single LUT index.
+ *
+ * Software triggered Flash read/write access by IP Bus.
+ *
+ * Memory mapped read access by AHB Bus.
+ *
+ * Based on SPI MEM interface and spi-fsl-qspi.c driver.
+ *
+ * Author:
+ * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>
+ * Boris Brezillon <bbrezillon@kernel.org>
+ * Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/iopoll.h>
+#include <linux/bug.h>
+#include <linux/err.h>
+
+/*
+ * The driver only uses one single LUT entry, that is updated on
+ * each call of exec_op(). Index 0 is preset at boot with a basic
+ * read operation, so let's use the last entry (31).
+ */
+#define SEQID_LUT 31
+
+/* Registers used by the driver */
+#define FSPI_MCR0 0x00
+#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
+#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR0_LEARN_EN BIT(15)
+#define FSPI_MCR0_SCRFRUN_EN BIT(14)
+#define FSPI_MCR0_OCTCOMB_EN BIT(13)
+#define FSPI_MCR0_DOZE_EN BIT(12)
+#define FSPI_MCR0_HSEN BIT(11)
+#define FSPI_MCR0_SERCLKDIV BIT(8)
+#define FSPI_MCR0_ATDF_EN BIT(7)
+#define FSPI_MCR0_ARDF_EN BIT(6)
+#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4)
+#define FSPI_MCR0_END_CFG(x) ((x) << 2)
+#define FSPI_MCR0_MDIS BIT(1)
+#define FSPI_MCR0_SWRST BIT(0)
+
+#define FSPI_MCR1 0x04
+#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16)
+#define FSPI_MCR1_AHB_TIMEOUT(x) (x)
+
+#define FSPI_MCR2 0x08
+#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24)
+#define FSPI_MCR2_SAMEDEVICEEN BIT(15)
+#define FSPI_MCR2_CLRLRPHS BIT(14)
+#define FSPI_MCR2_ABRDATSZ BIT(8)
+#define FSPI_MCR2_ABRLEARN BIT(7)
+#define FSPI_MCR2_ABR_READ BIT(6)
+#define FSPI_MCR2_ABRWRITE BIT(5)
+#define FSPI_MCR2_ABRDUMMY BIT(4)
+#define FSPI_MCR2_ABR_MODE BIT(3)
+#define FSPI_MCR2_ABRCADDR BIT(2)
+#define FSPI_MCR2_ABRRADDR BIT(1)
+#define FSPI_MCR2_ABR_CMD BIT(0)
+
+#define FSPI_AHBCR 0x0c
+#define FSPI_AHBCR_RDADDROPT BIT(6)
+#define FSPI_AHBCR_PREF_EN BIT(5)
+#define FSPI_AHBCR_BUFF_EN BIT(4)
+#define FSPI_AHBCR_CACH_EN BIT(3)
+#define FSPI_AHBCR_CLRTXBUF BIT(2)
+#define FSPI_AHBCR_CLRRXBUF BIT(1)
+#define FSPI_AHBCR_PAR_EN BIT(0)
+
+#define FSPI_INTEN 0x10
+#define FSPI_INTEN_SCLKSBWR BIT(9)
+#define FSPI_INTEN_SCLKSBRD BIT(8)
+#define FSPI_INTEN_DATALRNFL BIT(7)
+#define FSPI_INTEN_IPTXWE BIT(6)
+#define FSPI_INTEN_IPRXWA BIT(5)
+#define FSPI_INTEN_AHBCMDERR BIT(4)
+#define FSPI_INTEN_IPCMDERR BIT(3)
+#define FSPI_INTEN_AHBCMDGE BIT(2)
+#define FSPI_INTEN_IPCMDGE BIT(1)
+#define FSPI_INTEN_IPCMDDONE BIT(0)
+
+#define FSPI_INTR 0x14
+#define FSPI_INTR_SCLKSBWR BIT(9)
+#define FSPI_INTR_SCLKSBRD BIT(8)
+#define FSPI_INTR_DATALRNFL BIT(7)
+#define FSPI_INTR_IPTXWE BIT(6)
+#define FSPI_INTR_IPRXWA BIT(5)
+#define FSPI_INTR_AHBCMDERR BIT(4)
+#define FSPI_INTR_IPCMDERR BIT(3)
+#define FSPI_INTR_AHBCMDGE BIT(2)
+#define FSPI_INTR_IPCMDGE BIT(1)
+#define FSPI_INTR_IPCMDDONE BIT(0)
+
+#define FSPI_LUTKEY 0x18
+#define FSPI_LUTKEY_VALUE 0x5AF05AF0
+
+#define FSPI_LCKCR 0x1C
+
+#define FSPI_LCKER_LOCK 0x1
+#define FSPI_LCKER_UNLOCK 0x2
+
+#define FSPI_BUFXCR_INVALID_MSTRID 0xE
+#define FSPI_AHBRX_BUF0CR0 0x20
+#define FSPI_AHBRX_BUF1CR0 0x24
+#define FSPI_AHBRX_BUF2CR0 0x28
+#define FSPI_AHBRX_BUF3CR0 0x2C
+#define FSPI_AHBRX_BUF4CR0 0x30
+#define FSPI_AHBRX_BUF5CR0 0x34
+#define FSPI_AHBRX_BUF6CR0 0x38
+#define FSPI_AHBRX_BUF7CR0 0x3C
+#define FSPI_AHBRXBUF0CR7_PREF BIT(31)
+
+#define FSPI_AHBRX_BUF0CR1 0x40
+#define FSPI_AHBRX_BUF1CR1 0x44
+#define FSPI_AHBRX_BUF2CR1 0x48
+#define FSPI_AHBRX_BUF3CR1 0x4C
+#define FSPI_AHBRX_BUF4CR1 0x50
+#define FSPI_AHBRX_BUF5CR1 0x54
+#define FSPI_AHBRX_BUF6CR1 0x58
+#define FSPI_AHBRX_BUF7CR1 0x5C
+
+#define FSPI_FLSHA1CR0 0x60
+#define FSPI_FLSHA2CR0 0x64
+#define FSPI_FLSHB1CR0 0x68
+#define FSPI_FLSHB2CR0 0x6C
+#define FSPI_FLSHXCR0_SZ_KB 10
+#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB)
+
+#define FSPI_FLSHA1CR1 0x70
+#define FSPI_FLSHA2CR1 0x74
+#define FSPI_FLSHB1CR1 0x78
+#define FSPI_FLSHB2CR1 0x7C
+#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16)
+#define FSPI_FLSHXCR1_CAS(x) ((x) << 11)
+#define FSPI_FLSHXCR1_WA BIT(10)
+#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5)
+#define FSPI_FLSHXCR1_TCSS(x) (x)
+
+#define FSPI_FLSHA1CR2 0x80
+#define FSPI_FLSHA2CR2 0x84
+#define FSPI_FLSHB1CR2 0x88
+#define FSPI_FLSHB2CR2 0x8C
+#define FSPI_FLSHXCR2_CLRINSP BIT(24)
+#define FSPI_FLSHXCR2_AWRWAIT BIT(16)
+#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
+#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
+#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
+#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
+
+#define FSPI_IPCR0 0xA0
+
+#define FSPI_IPCR1 0xA4
+#define FSPI_IPCR1_IPAREN BIT(31)
+#define FSPI_IPCR1_SEQNUM_SHIFT 24
+#define FSPI_IPCR1_SEQID_SHIFT 16
+#define FSPI_IPCR1_IDATSZ(x) (x)
+
+#define FSPI_IPCMD 0xB0
+#define FSPI_IPCMD_TRG BIT(0)
+
+#define FSPI_DLPR 0xB4
+
+#define FSPI_IPRXFCR 0xB8
+#define FSPI_IPRXFCR_CLR BIT(0)
+#define FSPI_IPRXFCR_DMA_EN BIT(1)
+#define FSPI_IPRXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_IPTXFCR 0xBC
+#define FSPI_IPTXFCR_CLR BIT(0)
+#define FSPI_IPTXFCR_DMA_EN BIT(1)
+#define FSPI_IPTXFCR_WMRK(x) ((x) << 2)
+
+#define FSPI_DLLACR 0xC0
+#define FSPI_DLLACR_OVRDEN BIT(8)
+
+#define FSPI_DLLBCR 0xC4
+#define FSPI_DLLBCR_OVRDEN BIT(8)
+
+#define FSPI_STS0 0xE0
+#define FSPI_STS0_DLPHB(x) ((x) << 8)
+#define FSPI_STS0_DLPHA(x) ((x) << 4)
+#define FSPI_STS0_CMD_SRC(x) ((x) << 2)
+#define FSPI_STS0_ARB_IDLE BIT(1)
+#define FSPI_STS0_SEQ_IDLE BIT(0)
+
+#define FSPI_STS1 0xE4
+#define FSPI_STS1_IP_ERRCD(x) ((x) << 24)
+#define FSPI_STS1_IP_ERRID(x) ((x) << 16)
+#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8)
+#define FSPI_STS1_AHB_ERRID(x) (x)
+
+#define FSPI_AHBSPNST 0xEC
+#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16)
+#define FSPI_AHBSPNST_BUFID(x) ((x) << 1)
+#define FSPI_AHBSPNST_ACTIVE BIT(0)
+
+#define FSPI_IPRXFSTS 0xF0
+#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16)
+#define FSPI_IPRXFSTS_FILL(x) (x)
+
+#define FSPI_IPTXFSTS 0xF4
+#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16)
+#define FSPI_IPTXFSTS_FILL(x) (x)
+
+#define FSPI_RFDR 0x100
+#define FSPI_TFDR 0x180
+
+#define FSPI_LUT_BASE 0x200
+#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+#define FSPI_LUT_REG(idx) \
+ (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
+
+/* register map end */
+
+/* Instruction set for the LUT register. */
+#define LUT_STOP 0x00
+#define LUT_CMD 0x01
+#define LUT_ADDR 0x02
+#define LUT_CADDR_SDR 0x03
+#define LUT_MODE 0x04
+#define LUT_MODE2 0x05
+#define LUT_MODE4 0x06
+#define LUT_MODE8 0x07
+#define LUT_NXP_WRITE 0x08
+#define LUT_NXP_READ 0x09
+#define LUT_LEARN_SDR 0x0A
+#define LUT_DATSZ_SDR 0x0B
+#define LUT_DUMMY 0x0C
+#define LUT_DUMMY_RWDS_SDR 0x0D
+#define LUT_JMP_ON_CS 0x1F
+#define LUT_CMD_DDR 0x21
+#define LUT_ADDR_DDR 0x22
+#define LUT_CADDR_DDR 0x23
+#define LUT_MODE_DDR 0x24
+#define LUT_MODE2_DDR 0x25
+#define LUT_MODE4_DDR 0x26
+#define LUT_MODE8_DDR 0x27
+#define LUT_WRITE_DDR 0x28
+#define LUT_READ_DDR 0x29
+#define LUT_LEARN_DDR 0x2A
+#define LUT_DATSZ_DDR 0x2B
+#define LUT_DUMMY_DDR 0x2C
+#define LUT_DUMMY_RWDS_DDR 0x2D
+
+/*
+ * Calculate number of required PAD bits for LUT register.
+ *
+ * The pad stands for the number of IO lines [0:7].
+ * For example, the octal read needs eight IO lines,
+ * so you should use LUT_PAD(8). This macro
+ * returns 3 i.e. use eight (2^3) IP lines for read.
+ */
+#define LUT_PAD(x) (fls(x) - 1)
+
+/*
+ * Macro for constructing the LUT entries with the following
+ * register layout:
+ *
+ * ---------------------------------------------------
+ * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
+ * ---------------------------------------------------
+ */
+#define PAD_SHIFT 8
+#define INSTR_SHIFT 10
+#define OPRND_SHIFT 16
+
+/* Macros for constructing the LUT register. */
+#define LUT_DEF(idx, ins, pad, opr) \
+ ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
+ (opr)) << (((idx) % 2) * OPRND_SHIFT))
+
+#define POLL_TOUT 5000
+#define NXP_FSPI_MAX_CHIPSELECT 4
+
+struct nxp_fspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+ unsigned int ahb_buf_size;
+ unsigned int quirks;
+ bool little_endian;
+};
+
+static const struct nxp_fspi_devtype_data lx2160a_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+static const struct nxp_fspi_devtype_data imx8mm_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+struct nxp_fspi {
+ struct udevice *dev;
+ void __iomem *iobase;
+ void __iomem *ahb_addr;
+ u32 memmap_phy;
+ u32 memmap_phy_size;
+ struct clk clk, clk_en;
+ const struct nxp_fspi_devtype_data *devtype_data;
+};
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The FSPI controller's endianness is independent of
+ * the CPU core's endianness. So far, although the CPU
+ * core is little-endian the FSPI controller can use
+ * big-endian or little-endian.
+ */
+static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ out_le32(addr, val);
+ else
+ out_be32(addr, val);
+}
+
+static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr)
+{
+ if (f->devtype_data->little_endian)
+ return in_le32(addr);
+ else
+ return in_be32(addr);
+}
+
+static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width)
+{
+ switch (width) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+static bool nxp_fspi_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f;
+ struct udevice *bus;
+ int ret;
+
+ bus = slave->dev->parent;
+ f = dev_get_priv(bus);
+
+ ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth);
+
+ if (op->addr.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth);
+
+ if (op->dummy.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth);
+
+ if (op->data.nbytes)
+ ret |= nxp_fspi_check_buswidth(f, op->data.buswidth);
+
+ if (ret)
+ return false;
+
+ /*
+ * The number of address bytes should be equal to or less than 4 bytes.
+ */
+ if (op->addr.nbytes > 4)
+ return false;
+
+ /*
+ * If requested address value is greater than controller assigned
+ * memory mapped space, return error as it didn't fit in the range
+ * of assigned address space.
+ */
+ if (op->addr.val >= f->memmap_phy_size)
+ return false;
+
+ /* Max 64 dummy clock cycles supported */
+ if (op->dummy.buswidth &&
+ (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
+ return false;
+
+ /* Max data length, check controller limits and alignment */
+ if (op->data.dir == SPI_MEM_DATA_IN &&
+ (op->data.nbytes > f->devtype_data->ahb_buf_size ||
+ (op->data.nbytes > f->devtype_data->rxfifo - 4 &&
+ !IS_ALIGNED(op->data.nbytes, 8))))
+ return false;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT &&
+ op->data.nbytes > f->devtype_data->txfifo)
+ return false;
+
+ return true;
+}
+
+/* Instead of busy looping invoke readl_poll_sleep_timeout functionality. */
+static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base,
+ u32 mask, u32 delay_us,
+ u32 timeout_us, bool c)
+{
+ u32 reg;
+
+ if (!f->devtype_data->little_endian)
+ mask = (u32)cpu_to_be32(mask);
+
+ if (c)
+ return readl_poll_sleep_timeout(base, reg, (reg & mask),
+ delay_us, timeout_us);
+ else
+ return readl_poll_sleep_timeout(base, reg, !(reg & mask),
+ delay_us, timeout_us);
+}
+
+/*
+ * If the slave device content being changed by Write/Erase, need to
+ * invalidate the AHB buffer. This can be achieved by doing the reset
+ * of controller after setting MCR0[SWRESET] bit.
+ */
+static inline void nxp_fspi_invalid(struct nxp_fspi *f)
+{
+ u32 reg;
+ int ret;
+
+ reg = fspi_readl(f, f->iobase + FSPI_MCR0);
+ fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0);
+
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+}
+
+static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ u32 lutval[4] = {};
+ int lutidx = 1, i;
+
+ /* cmd */
+ lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
+ op->cmd.opcode);
+
+ /* addr bytes */
+ if (op->addr.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
+ LUT_PAD(op->addr.buswidth),
+ op->addr.nbytes * 8);
+ lutidx++;
+ }
+
+ /* dummy bytes, if needed */
+ if (op->dummy.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
+ /*
+ * Due to FlexSPI controller limitation number of PAD for dummy
+ * buswidth needs to be programmed as equal to data buswidth.
+ */
+ LUT_PAD(op->data.buswidth),
+ op->dummy.nbytes * 8 /
+ op->dummy.buswidth);
+ lutidx++;
+ }
+
+ /* read/write data bytes */
+ if (op->data.nbytes) {
+ lutval[lutidx / 2] |= LUT_DEF(lutidx,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ LUT_NXP_READ : LUT_NXP_WRITE,
+ LUT_PAD(op->data.buswidth),
+ 0);
+ lutidx++;
+ }
+
+ /* stop condition. */
+ lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
+
+ /* unlock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
+
+ /* fill LUT */
+ for (i = 0; i < ARRAY_SIZE(lutval); i++)
+ fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
+
+ dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
+ op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
+
+ /* lock LUT */
+ fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
+ fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
+}
+
+#if CONFIG_IS_ENABLED(CLK)
+static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
+{
+ int ret;
+
+ ret = clk_enable(&f->clk_en);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(&f->clk);
+ if (ret) {
+ clk_disable(&f->clk_en);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
+{
+ clk_disable(&f->clk);
+ clk_disable(&f->clk_en);
+}
+#endif
+
+/*
+ * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
+ * register and start base address of the slave device.
+ *
+ * (Higher address)
+ * -------- <-- FLSHB2CR0
+ * | B2 |
+ * | |
+ * B2 start address --> -------- <-- FLSHB1CR0
+ * | B1 |
+ * | |
+ * B1 start address --> -------- <-- FLSHA2CR0
+ * | A2 |
+ * | |
+ * A2 start address --> -------- <-- FLSHA1CR0
+ * | A1 |
+ * | |
+ * A1 start address --> -------- (Lower address)
+ *
+ *
+ * Start base address defines the starting address range for given CS and
+ * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS.
+ *
+ * But, different targets are having different combinations of number of CS,
+ * some targets only have single CS or two CS covering controller's full
+ * memory mapped space area.
+ * Thus, implementation is being done as independent of the size and number
+ * of the connected slave device.
+ * Assign controller memory mapped space size as the size to the connected
+ * slave device.
+ * Mark FLSHxxCR0 as zero initially and then assign value only to the selected
+ * chip-select Flash configuration register.
+ *
+ * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the
+ * memory mapped size of the controller.
+ * Value for rest of the CS FLSHxxCR0 register would be zero.
+ *
+ */
+static void nxp_fspi_select_mem(struct nxp_fspi *f, int chip_select)
+{
+ u64 size_kb;
+
+ /* Reset FLSHxxCR0 registers */
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0);
+ fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0);
+
+ /* Assign controller memory mapped space as size, KBytes, of flash. */
+ size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size);
+
+ fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 +
+ 4 * chip_select);
+
+ dev_dbg(f->dev, "Slave device [CS:%x] selected\n", chip_select);
+}
+
+static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ u32 len = op->data.nbytes;
+
+ /* Read out the data directly from the AHB buffer. */
+ memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len);
+}
+
+static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ u8 *buf = (u8 *)op->data.buf.out;
+
+ /* clear the TX FIFO. */
+ fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR);
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * write request controller can write max 8 bytes of data.
+ */
+
+ for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) {
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ fspi_writel(f, *(u32 *)(buf + i), base + FSPI_TFDR);
+ fspi_writel(f, *(u32 *)(buf + i + 4), base + FSPI_TFDR + 4);
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+
+ if (i < op->data.nbytes) {
+ u32 data = 0;
+ int j;
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+ memcpy(&data, buf + i + j, 4);
+ fspi_writel(f, data, base + FSPI_TFDR + j);
+ }
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+ }
+}
+
+static void nxp_fspi_read_rxfifo(struct nxp_fspi *f,
+ const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int i, ret;
+ int len = op->data.nbytes;
+ u8 *buf = (u8 *)op->data.buf.in;
+
+ /*
+ * Default value of water mark level is 8 bytes, hence in single
+ * read request controller can read max 8 bytes of data.
+ */
+ for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) {
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR);
+ *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+ }
+
+ if (i < len) {
+ u32 tmp;
+ int size, j;
+
+ buf = op->data.buf.in + i;
+ /* Wait for RXFIFO available */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPRXWA, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+ len = op->data.nbytes - i;
+ for (j = 0; j < op->data.nbytes - i; j += 4) {
+ tmp = fspi_readl(f, base + FSPI_RFDR + j);
+ size = min(len, 4);
+ memcpy(buf + j, &tmp, size);
+ len -= size;
+ }
+ }
+
+ /* invalid the RXFIFO */
+ fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR);
+ /* move the FIFO pointer */
+ fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
+}
+
+static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
+{
+ void __iomem *base = f->iobase;
+ int seqnum = 0;
+ int err = 0;
+ u32 reg;
+
+ reg = fspi_readl(f, base + FSPI_IPRXFCR);
+ /* invalid RXFIFO first */
+ reg &= ~FSPI_IPRXFCR_DMA_EN;
+ reg = reg | FSPI_IPRXFCR_CLR;
+ fspi_writel(f, reg, base + FSPI_IPRXFCR);
+
+ fspi_writel(f, op->addr.val, base + FSPI_IPCR0);
+ /*
+ * Always start the sequence at the same index since we update
+ * the LUT at each exec_op() call. And also specify the DATA
+ * length, since it's has not been specified in the LUT.
+ */
+ fspi_writel(f, op->data.nbytes |
+ (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
+ (seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
+ base + FSPI_IPCR1);
+
+ /* Trigger the LUT now. */
+ fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD);
+
+ /* Wait for the completion. */
+ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
+ FSPI_STS0_ARB_IDLE, 1, 1000 * 1000, true);
+
+ /* Invoke IP data read, if request is of data read. */
+ if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
+ nxp_fspi_read_rxfifo(f, op);
+
+ return err;
+}
+
+static int nxp_fspi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct nxp_fspi *f;
+ struct udevice *bus;
+ int err = 0;
+
+ bus = slave->dev->parent;
+ f = dev_get_priv(bus);
+
+ /* Wait for controller being ready. */
+ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
+ FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
+ WARN_ON(err);
+
+ nxp_fspi_prepare_lut(f, op);
+ /*
+ * If we have large chunks of data, we read them through the AHB bus
+ * by accessing the mapped memory. In all other cases we use
+ * IP commands to access the flash.
+ */
+ if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
+ op->data.dir == SPI_MEM_DATA_IN) {
+ nxp_fspi_read_ahb(f, op);
+ } else {
+ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
+ nxp_fspi_fill_txfifo(f, op);
+
+ err = nxp_fspi_do_op(f, op);
+ }
+
+ /* Invalidate the data in the AHB buffer. */
+ nxp_fspi_invalid(f);
+
+ return err;
+}
+
+static int nxp_fspi_adjust_op_size(struct spi_slave *slave,
+ struct spi_mem_op *op)
+{
+ struct nxp_fspi *f;
+ struct udevice *bus;
+
+ bus = slave->dev->parent;
+ f = dev_get_priv(bus);
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes > f->devtype_data->txfifo)
+ op->data.nbytes = f->devtype_data->txfifo;
+ } else {
+ if (op->data.nbytes > f->devtype_data->ahb_buf_size)
+ op->data.nbytes = f->devtype_data->ahb_buf_size;
+ else if (op->data.nbytes > (f->devtype_data->rxfifo - 4))
+ op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
+ }
+
+ return 0;
+}
+
+static int nxp_fspi_default_setup(struct nxp_fspi *f)
+{
+ void __iomem *base = f->iobase;
+ int ret, i;
+ u32 reg;
+
+#if CONFIG_IS_ENABLED(CLK)
+ /* disable and unprepare clock to avoid glitch pass to controller */
+ nxp_fspi_clk_disable_unprep(f);
+
+ /* the default frequency, we will change it later if necessary. */
+ ret = clk_set_rate(&f->clk, 20000000);
+ if (ret < 0)
+ return ret;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
+#endif
+
+ /* Reset the module */
+ /* w1c register, wait unit clear */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
+ FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
+ WARN_ON(ret);
+
+ /* Disable the module */
+ fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
+
+ /* Reset the DLL register to default value */
+ fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
+ fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
+
+ /* enable module */
+ fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF),
+ base + FSPI_MCR0);
+
+ /*
+ * Disable same device enable bit and configure all slave devices
+ * independently.
+ */
+ reg = fspi_readl(f, f->iobase + FSPI_MCR2);
+ reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN);
+ fspi_writel(f, reg, base + FSPI_MCR2);
+
+ /* AHB configuration for access buffer 0~7. */
+ for (i = 0; i < 7; i++)
+ fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
+
+ /*
+ * Set ADATSZ with the maximum AHB buffer size to improve the read
+ * performance.
+ */
+ fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 |
+ FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0);
+
+ /* prefetch and no start address alignment limitation */
+ fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
+ base + FSPI_AHBCR);
+
+ /* AHB Read - Set lut sequence ID for all CS. */
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
+ fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
+
+ return 0;
+}
+
+static int nxp_fspi_probe(struct udevice *bus)
+{
+ struct nxp_fspi *f = dev_get_priv(bus);
+
+ f->devtype_data =
+ (struct nxp_fspi_devtype_data *)dev_get_driver_data(bus);
+ nxp_fspi_default_setup(f);
+
+ return 0;
+}
+
+static int nxp_fspi_claim_bus(struct udevice *dev)
+{
+ struct nxp_fspi *f;
+ struct udevice *bus;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ bus = dev->parent;
+ f = dev_get_priv(bus);
+
+ nxp_fspi_select_mem(f, slave_plat->cs);
+
+ return 0;
+}
+
+static int nxp_fspi_set_speed(struct udevice *bus, uint speed)
+{
+#if CONFIG_IS_ENABLED(CLK)
+ struct nxp_fspi *f = dev_get_priv(bus);
+ int ret;
+
+ nxp_fspi_clk_disable_unprep(f);
+
+ ret = clk_set_rate(&f->clk, speed);
+ if (ret < 0)
+ return ret;
+
+ ret = nxp_fspi_clk_prep_enable(f);
+ if (ret)
+ return ret;
+#endif
+ return 0;
+}
+
+static int nxp_fspi_set_mode(struct udevice *bus, uint mode)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+static int nxp_fspi_of_to_plat(struct udevice *bus)
+{
+ struct nxp_fspi *f = dev_get_priv(bus);
+#if CONFIG_IS_ENABLED(CLK)
+ int ret;
+#endif
+
+ fdt_addr_t iobase;
+ fdt_addr_t iobase_size;
+ fdt_addr_t ahb_addr;
+ fdt_addr_t ahb_size;
+
+ f->dev = bus;
+
+ iobase = devfdt_get_addr_size_name(bus, "fspi_base", &iobase_size);
+ if (iobase == FDT_ADDR_T_NONE) {
+ dev_err(bus, "fspi_base regs missing\n");
+ return -ENODEV;
+ }
+ f->iobase = map_physmem(iobase, iobase_size, MAP_NOCACHE);
+
+ ahb_addr = devfdt_get_addr_size_name(bus, "fspi_mmap", &ahb_size);
+ if (ahb_addr == FDT_ADDR_T_NONE) {
+ dev_err(bus, "fspi_mmap regs missing\n");
+ return -ENODEV;
+ }
+ f->ahb_addr = map_physmem(ahb_addr, ahb_size, MAP_NOCACHE);
+ f->memmap_phy_size = ahb_size;
+
+#if CONFIG_IS_ENABLED(CLK)
+ ret = clk_get_by_name(bus, "fspi_en", &f->clk_en);
+ if (ret) {
+ dev_err(bus, "failed to get fspi_en clock\n");
+ return ret;
+ }
+
+ ret = clk_get_by_name(bus, "fspi", &f->clk);
+ if (ret) {
+ dev_err(bus, "failed to get fspi clock\n");
+ return ret;
+ }
+#endif
+
+ dev_dbg(bus, "iobase=<0x%llx>, ahb_addr=<0x%llx>\n", iobase, ahb_addr);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
+ .adjust_op_size = nxp_fspi_adjust_op_size,
+ .supports_op = nxp_fspi_supports_op,
+ .exec_op = nxp_fspi_exec_op,
+};
+
+static const struct dm_spi_ops nxp_fspi_ops = {
+ .claim_bus = nxp_fspi_claim_bus,
+ .set_speed = nxp_fspi_set_speed,
+ .set_mode = nxp_fspi_set_mode,
+ .mem_ops = &nxp_fspi_mem_ops,
+};
+
+static const struct udevice_id nxp_fspi_ids[] = {
+ { .compatible = "nxp,lx2160a-fspi", .data = (ulong)&lx2160a_data, },
+ { .compatible = "nxp,imx8mm-fspi", .data = (ulong)&imx8mm_data, },
+ { }
+};
+
+U_BOOT_DRIVER(nxp_fspi) = {
+ .name = "nxp_fspi",
+ .id = UCLASS_SPI,
+ .of_match = nxp_fspi_ids,
+ .ops = &nxp_fspi_ops,
+ .of_to_plat = nxp_fspi_of_to_plat,
+ .priv_auto = sizeof(struct nxp_fspi),
+ .probe = nxp_fspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/octeon_spi.c b/roms/u-boot/drivers/spi/octeon_spi.c
new file mode 100644
index 000000000..6ac66d2f9
--- /dev/null
+++ b/roms/u-boot/drivers/spi/octeon_spi.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell International Ltd.
+ */
+
+#include <clk.h>
+#include <dm.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <watchdog.h>
+#include <asm/io.h>
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/compat.h>
+#include <linux/delay.h>
+
+#define OCTEON_SPI_MAX_BYTES 9
+#define OCTEON_SPI_MAX_CLOCK_HZ 50000000
+
+#define OCTEON_SPI_NUM_CS 4
+
+#define OCTEON_SPI_CS_VALID(cs) ((cs) < OCTEON_SPI_NUM_CS)
+
+#define MPI_CFG 0x0000
+#define MPI_STS 0x0008
+#define MPI_TX 0x0010
+#define MPI_XMIT 0x0018
+#define MPI_WIDE_DAT 0x0040
+#define MPI_IO_CTL 0x0048
+#define MPI_DAT(X) (0x0080 + ((X) << 3))
+#define MPI_WIDE_BUF(X) (0x0800 + ((X) << 3))
+#define MPI_CYA_CFG 0x1000
+#define MPI_CLKEN 0x1080
+
+#define MPI_CFG_ENABLE BIT_ULL(0)
+#define MPI_CFG_IDLELO BIT_ULL(1)
+#define MPI_CFG_CLK_CONT BIT_ULL(2)
+#define MPI_CFG_WIREOR BIT_ULL(3)
+#define MPI_CFG_LSBFIRST BIT_ULL(4)
+#define MPI_CFG_CS_STICKY BIT_ULL(5)
+#define MPI_CFG_CSHI BIT_ULL(7)
+#define MPI_CFG_IDLECLKS GENMASK_ULL(9, 8)
+#define MPI_CFG_TRITX BIT_ULL(10)
+#define MPI_CFG_CSLATE BIT_ULL(11)
+#define MPI_CFG_CSENA0 BIT_ULL(12)
+#define MPI_CFG_CSENA1 BIT_ULL(13)
+#define MPI_CFG_CSENA2 BIT_ULL(14)
+#define MPI_CFG_CSENA3 BIT_ULL(15)
+#define MPI_CFG_CLKDIV GENMASK_ULL(28, 16)
+#define MPI_CFG_LEGACY_DIS BIT_ULL(31)
+#define MPI_CFG_IOMODE GENMASK_ULL(35, 34)
+#define MPI_CFG_TB100_EN BIT_ULL(49)
+
+#define MPI_DAT_DATA GENMASK_ULL(7, 0)
+
+#define MPI_STS_BUSY BIT_ULL(0)
+#define MPI_STS_MPI_INTR BIT_ULL(1)
+#define MPI_STS_RXNUM GENMASK_ULL(12, 8)
+
+#define MPI_TX_TOTNUM GENMASK_ULL(4, 0)
+#define MPI_TX_TXNUM GENMASK_ULL(12, 8)
+#define MPI_TX_LEAVECS BIT_ULL(16)
+#define MPI_TX_CSID GENMASK_ULL(21, 20)
+
+#define MPI_XMIT_TOTNUM GENMASK_ULL(10, 0)
+#define MPI_XMIT_TXNUM GENMASK_ULL(30, 20)
+#define MPI_XMIT_BUF_SEL BIT_ULL(59)
+#define MPI_XMIT_LEAVECS BIT_ULL(60)
+#define MPI_XMIT_CSID GENMASK_ULL(62, 61)
+
+/* Used on Octeon TX2 */
+void board_acquire_flash_arb(bool acquire);
+
+/* Local driver data structure */
+struct octeon_spi {
+ void __iomem *base; /* Register base address */
+ struct clk clk;
+ u32 clkdiv; /* Clock divisor for device speed */
+};
+
+static u64 octeon_spi_set_mpicfg(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave = dev_get_parent_plat(dev);
+ struct udevice *bus = dev_get_parent(dev);
+ struct octeon_spi *priv = dev_get_priv(bus);
+ u64 mpi_cfg;
+ uint max_speed = slave->max_hz;
+ bool cpha, cpol;
+
+ if (!max_speed)
+ max_speed = 12500000;
+ if (max_speed > OCTEON_SPI_MAX_CLOCK_HZ)
+ max_speed = OCTEON_SPI_MAX_CLOCK_HZ;
+
+ debug("\n slave params %d %d %d\n", slave->cs,
+ slave->max_hz, slave->mode);
+ cpha = !!(slave->mode & SPI_CPHA);
+ cpol = !!(slave->mode & SPI_CPOL);
+
+ mpi_cfg = FIELD_PREP(MPI_CFG_CLKDIV, priv->clkdiv & 0x1fff) |
+ FIELD_PREP(MPI_CFG_CSHI, !!(slave->mode & SPI_CS_HIGH)) |
+ FIELD_PREP(MPI_CFG_LSBFIRST, !!(slave->mode & SPI_LSB_FIRST)) |
+ FIELD_PREP(MPI_CFG_WIREOR, !!(slave->mode & SPI_3WIRE)) |
+ FIELD_PREP(MPI_CFG_IDLELO, cpha != cpol) |
+ FIELD_PREP(MPI_CFG_CSLATE, cpha) |
+ MPI_CFG_CSENA0 | MPI_CFG_CSENA1 |
+ MPI_CFG_CSENA2 | MPI_CFG_CSENA1 |
+ MPI_CFG_ENABLE;
+
+ debug("\n mpi_cfg %llx\n", mpi_cfg);
+ return mpi_cfg;
+}
+
+/**
+ * Wait until the SPI bus is ready
+ *
+ * @param dev SPI device to wait for
+ */
+static void octeon_spi_wait_ready(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct octeon_spi *priv = dev_get_priv(bus);
+ void *base = priv->base;
+ u64 mpi_sts;
+
+ do {
+ mpi_sts = readq(base + MPI_STS);
+ WATCHDOG_RESET();
+ } while (mpi_sts & MPI_STS_BUSY);
+
+ debug("%s(%s)\n", __func__, dev->name);
+}
+
+/**
+ * Claim the bus for a slave device
+ *
+ * @param dev SPI bus
+ *
+ * @return 0 for success, -EINVAL if chip select is invalid
+ */
+static int octeon_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct octeon_spi *priv = dev_get_priv(bus);
+ void *base = priv->base;
+ u64 mpi_cfg;
+
+ debug("\n\n%s(%s)\n", __func__, dev->name);
+ if (!OCTEON_SPI_CS_VALID(spi_chip_select(dev)))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2))
+ board_acquire_flash_arb(true);
+
+ mpi_cfg = readq(base + MPI_CFG);
+ mpi_cfg &= ~MPI_CFG_TRITX;
+ mpi_cfg |= MPI_CFG_ENABLE;
+ writeq(mpi_cfg, base + MPI_CFG);
+ mpi_cfg = readq(base + MPI_CFG);
+ udelay(5); /** Wait for bus to settle */
+
+ return 0;
+}
+
+/**
+ * Release the bus to a slave device
+ *
+ * @param dev SPI bus
+ *
+ * @return 0 for success, -EINVAL if chip select is invalid
+ */
+static int octeon_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct octeon_spi *priv = dev_get_priv(bus);
+ void *base = priv->base;
+ u64 mpi_cfg;
+
+ debug("%s(%s)\n\n", __func__, dev->name);
+ if (!OCTEON_SPI_CS_VALID(spi_chip_select(dev)))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_ARCH_OCTEONTX2))
+ board_acquire_flash_arb(false);
+
+ mpi_cfg = readq(base + MPI_CFG);
+ mpi_cfg &= ~MPI_CFG_ENABLE;
+ writeq(mpi_cfg, base + MPI_CFG);
+ mpi_cfg = readq(base + MPI_CFG);
+ udelay(1);
+
+ return 0;
+}
+
+static int octeon_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct octeon_spi *priv = dev_get_priv(bus);
+ void *base = priv->base;
+ u64 mpi_tx;
+ u64 mpi_cfg;
+ u64 wide_dat = 0;
+ int len = bitlen / 8;
+ int i;
+ const u8 *tx_data = dout;
+ u8 *rx_data = din;
+ int cs = spi_chip_select(dev);
+
+ if (!OCTEON_SPI_CS_VALID(cs))
+ return -EINVAL;
+
+ debug("\n %s(%s, %u, %p, %p, 0x%lx), cs: %d\n",
+ __func__, dev->name, bitlen, dout, din, flags, cs);
+
+ mpi_cfg = octeon_spi_set_mpicfg(dev);
+ if (mpi_cfg != readq(base + MPI_CFG)) {
+ writeq(mpi_cfg, base + MPI_CFG);
+ mpi_cfg = readq(base + MPI_CFG);
+ udelay(10);
+ }
+
+ debug("\n mpi_cfg upd %llx\n", mpi_cfg);
+
+ /*
+ * Start by writing and reading 8 bytes at a time. While we can support
+ * up to 10, it's easier to just use 8 with the MPI_WIDE_DAT register.
+ */
+ while (len > 8) {
+ if (tx_data) {
+ wide_dat = get_unaligned((u64 *)tx_data);
+ debug(" tx: %016llx \t", (unsigned long long)wide_dat);
+ tx_data += 8;
+ writeq(wide_dat, base + MPI_WIDE_DAT);
+ }
+
+ mpi_tx = FIELD_PREP(MPI_TX_CSID, cs) |
+ FIELD_PREP(MPI_TX_LEAVECS, 1) |
+ FIELD_PREP(MPI_TX_TXNUM, tx_data ? 8 : 0) |
+ FIELD_PREP(MPI_TX_TOTNUM, 8);
+ writeq(mpi_tx, base + MPI_TX);
+
+ octeon_spi_wait_ready(dev);
+
+ debug("\n ");
+
+ if (rx_data) {
+ wide_dat = readq(base + MPI_WIDE_DAT);
+ debug(" rx: %016llx\t", (unsigned long long)wide_dat);
+ *(u64 *)rx_data = wide_dat;
+ rx_data += 8;
+ }
+ len -= 8;
+ }
+
+ debug("\n ");
+
+ /* Write and read the rest of the data */
+ if (tx_data) {
+ for (i = 0; i < len; i++) {
+ debug(" tx: %02x\n", *tx_data);
+ writeq(*tx_data++, base + MPI_DAT(i));
+ }
+ }
+
+ mpi_tx = FIELD_PREP(MPI_TX_CSID, cs) |
+ FIELD_PREP(MPI_TX_LEAVECS, !(flags & SPI_XFER_END)) |
+ FIELD_PREP(MPI_TX_TXNUM, tx_data ? len : 0) |
+ FIELD_PREP(MPI_TX_TOTNUM, len);
+ writeq(mpi_tx, base + MPI_TX);
+
+ octeon_spi_wait_ready(dev);
+
+ debug("\n ");
+
+ if (rx_data) {
+ for (i = 0; i < len; i++) {
+ *rx_data = readq(base + MPI_DAT(i)) & 0xff;
+ debug(" rx: %02x\n", *rx_data);
+ rx_data++;
+ }
+ }
+
+ return 0;
+}
+
+static int octeontx2_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct octeon_spi *priv = dev_get_priv(bus);
+ void *base = priv->base;
+ u64 mpi_xmit;
+ u64 mpi_cfg;
+ u64 wide_dat = 0;
+ int len = bitlen / 8;
+ int rem;
+ int i;
+ const u8 *tx_data = dout;
+ u8 *rx_data = din;
+ int cs = spi_chip_select(dev);
+
+ if (!OCTEON_SPI_CS_VALID(cs))
+ return -EINVAL;
+
+ debug("\n %s(%s, %u, %p, %p, 0x%lx), cs: %d\n",
+ __func__, dev->name, bitlen, dout, din, flags, cs);
+
+ mpi_cfg = octeon_spi_set_mpicfg(dev);
+
+ mpi_cfg |= MPI_CFG_TRITX | MPI_CFG_LEGACY_DIS | MPI_CFG_CS_STICKY |
+ MPI_CFG_TB100_EN;
+
+ mpi_cfg &= ~MPI_CFG_IOMODE;
+ if (flags & (SPI_TX_DUAL | SPI_RX_DUAL))
+ mpi_cfg |= FIELD_PREP(MPI_CFG_IOMODE, 2);
+ if (flags & (SPI_TX_QUAD | SPI_RX_QUAD))
+ mpi_cfg |= FIELD_PREP(MPI_CFG_IOMODE, 3);
+
+ if (mpi_cfg != readq(base + MPI_CFG)) {
+ writeq(mpi_cfg, base + MPI_CFG);
+ mpi_cfg = readq(base + MPI_CFG);
+ udelay(10);
+ }
+
+ debug("\n mpi_cfg upd %llx\n\n", mpi_cfg);
+
+ /* Start by writing or reading 1024 bytes at a time. */
+ while (len > 1024) {
+ if (tx_data) {
+ /* 8 bytes per iteration */
+ for (i = 0; i < 128; i++) {
+ wide_dat = get_unaligned((u64 *)tx_data);
+ debug(" tx: %016llx \t",
+ (unsigned long long)wide_dat);
+ if ((i % 4) == 3)
+ debug("\n");
+ tx_data += 8;
+ writeq(wide_dat, base + MPI_WIDE_BUF(i));
+ }
+ }
+
+ mpi_xmit = FIELD_PREP(MPI_XMIT_CSID, cs) | MPI_XMIT_LEAVECS |
+ FIELD_PREP(MPI_XMIT_TXNUM, tx_data ? 1024 : 0) |
+ FIELD_PREP(MPI_XMIT_TOTNUM, 1024);
+ writeq(mpi_xmit, base + MPI_XMIT);
+
+ octeon_spi_wait_ready(dev);
+
+ debug("\n ");
+
+ if (rx_data) {
+ /* 8 bytes per iteration */
+ for (i = 0; i < 128; i++) {
+ wide_dat = readq(base + MPI_WIDE_BUF(i));
+ debug(" rx: %016llx\t",
+ (unsigned long long)wide_dat);
+ if ((i % 4) == 3)
+ debug("\n");
+ *(u64 *)rx_data = wide_dat;
+ rx_data += 8;
+ }
+ }
+ len -= 1024;
+ }
+
+ if (tx_data) {
+ rem = len % 8;
+ /* 8 bytes per iteration */
+ for (i = 0; i < len / 8; i++) {
+ wide_dat = get_unaligned((u64 *)tx_data);
+ debug(" tx: %016llx \t",
+ (unsigned long long)wide_dat);
+ if ((i % 4) == 3)
+ debug("\n");
+ tx_data += 8;
+ writeq(wide_dat, base + MPI_WIDE_BUF(i));
+ }
+ if (rem) {
+ memcpy(&wide_dat, tx_data, rem);
+ debug(" rtx: %016llx\t", wide_dat);
+ writeq(wide_dat, base + MPI_WIDE_BUF(i));
+ }
+ }
+
+ mpi_xmit = FIELD_PREP(MPI_XMIT_CSID, cs) |
+ FIELD_PREP(MPI_XMIT_LEAVECS, !(flags & SPI_XFER_END)) |
+ FIELD_PREP(MPI_XMIT_TXNUM, tx_data ? len : 0) |
+ FIELD_PREP(MPI_XMIT_TOTNUM, len);
+ writeq(mpi_xmit, base + MPI_XMIT);
+
+ octeon_spi_wait_ready(dev);
+
+ debug("\n ");
+
+ if (rx_data) {
+ rem = len % 8;
+ /* 8 bytes per iteration */
+ for (i = 0; i < len / 8; i++) {
+ wide_dat = readq(base + MPI_WIDE_BUF(i));
+ debug(" rx: %016llx\t",
+ (unsigned long long)wide_dat);
+ if ((i % 4) == 3)
+ debug("\n");
+ *(u64 *)rx_data = wide_dat;
+ rx_data += 8;
+ }
+ if (rem) {
+ wide_dat = readq(base + MPI_WIDE_BUF(i));
+ debug(" rrx: %016llx\t",
+ (unsigned long long)wide_dat);
+ memcpy(rx_data, &wide_dat, rem);
+ rx_data += rem;
+ }
+ }
+
+ return 0;
+}
+
+static bool octeon_spi_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ /* For now, support only below combinations
+ * 1-1-1
+ * 1-1-2 1-2-2
+ * 1-1-4 1-4-4
+ */
+ if (op->cmd.buswidth != 1)
+ return false;
+ return true;
+}
+
+static int octeon_spi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ unsigned long flags = SPI_XFER_BEGIN;
+ const void *tx;
+ void *rx;
+ u8 opcode, *buf;
+ u8 *addr;
+ int i, temp, ret;
+
+ if (op->cmd.buswidth != 1)
+ return -ENOTSUPP;
+
+ /* Send CMD */
+ i = 0;
+ opcode = op->cmd.opcode;
+
+ if (!op->data.nbytes && !op->addr.nbytes && !op->dummy.nbytes)
+ flags |= SPI_XFER_END;
+
+ ret = octeontx2_spi_xfer(slave->dev, 8, (void *)&opcode, NULL, flags);
+ if (ret < 0)
+ return ret;
+
+ /* Send Address and dummy */
+ if (op->addr.nbytes) {
+ /* Alloc buffer for address+dummy */
+ buf = (u8 *)calloc(1, op->addr.nbytes + op->dummy.nbytes);
+ if (!buf) {
+ printf("%s Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ addr = (u8 *)&op->addr.val;
+ for (temp = 0; temp < op->addr.nbytes; temp++)
+ buf[i++] = *(u8 *)(addr + op->addr.nbytes - 1 - temp);
+ for (temp = 0; temp < op->dummy.nbytes; temp++)
+ buf[i++] = 0xff;
+ if (op->addr.buswidth == 2)
+ flags |= SPI_RX_DUAL;
+ if (op->addr.buswidth == 4)
+ flags |= SPI_RX_QUAD;
+
+ if (!op->data.nbytes)
+ flags |= SPI_XFER_END;
+ ret = octeontx2_spi_xfer(slave->dev, i * 8, (void *)buf, NULL,
+ flags);
+ free(buf);
+ if (ret < 0)
+ return ret;
+ }
+ if (!op->data.nbytes)
+ return 0;
+
+ /* Send/Receive Data */
+ flags |= SPI_XFER_END;
+ if (op->data.buswidth == 2)
+ flags |= SPI_RX_DUAL;
+ if (op->data.buswidth == 4)
+ flags |= SPI_RX_QUAD;
+
+ rx = (op->data.dir == SPI_MEM_DATA_IN) ? op->data.buf.in : NULL;
+ tx = (op->data.dir == SPI_MEM_DATA_OUT) ? op->data.buf.out : NULL;
+
+ ret = octeontx2_spi_xfer(slave->dev, (op->data.nbytes * 8), tx, rx,
+ flags);
+ return ret;
+}
+
+static const struct spi_controller_mem_ops octeontx2_spi_mem_ops = {
+ .supports_op = octeon_spi_supports_op,
+ .exec_op = octeon_spi_exec_op,
+};
+
+/**
+ * Set the speed of the SPI bus
+ *
+ * @param bus bus to set
+ * @param max_hz maximum speed supported
+ */
+static int octeon_spi_set_speed(struct udevice *bus, uint max_hz)
+{
+ struct octeon_spi *priv = dev_get_priv(bus);
+ ulong clk_rate;
+ u32 calc_hz;
+
+ if (max_hz > OCTEON_SPI_MAX_CLOCK_HZ)
+ max_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+
+ if (device_is_compatible(bus, "cavium,thunderx-spi"))
+ clk_rate = 100000000;
+ else
+ clk_rate = clk_get_rate(&priv->clk);
+ if (IS_ERR_VALUE(clk_rate))
+ return -EINVAL;
+
+ debug("%s(%s, %u, %lu)\n", __func__, bus->name, max_hz, clk_rate);
+
+ priv->clkdiv = clk_rate / (2 * max_hz);
+ while (1) {
+ calc_hz = clk_rate / (2 * priv->clkdiv);
+ if (calc_hz <= max_hz)
+ break;
+ priv->clkdiv += 1;
+ }
+
+ if (priv->clkdiv > 8191)
+ return -EINVAL;
+
+ debug("%s: clkdiv=%d\n", __func__, priv->clkdiv);
+
+ return 0;
+}
+
+static int octeon_spi_set_mode(struct udevice *bus, uint mode)
+{
+ /* We don't set it here */
+ return 0;
+}
+
+static struct dm_spi_ops octeon_spi_ops = {
+ .claim_bus = octeon_spi_claim_bus,
+ .release_bus = octeon_spi_release_bus,
+ .set_speed = octeon_spi_set_speed,
+ .set_mode = octeon_spi_set_mode,
+ .xfer = octeon_spi_xfer,
+};
+
+static int octeon_spi_probe(struct udevice *dev)
+{
+ struct octeon_spi *priv = dev_get_priv(dev);
+ int ret;
+
+ /* Octeon TX & TX2 use PCI based probing */
+ if (device_is_compatible(dev, "cavium,thunder-8190-spi")) {
+ pci_dev_t bdf = dm_pci_get_bdf(dev);
+
+ debug("SPI PCI device: %x\n", bdf);
+ priv->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
+ PCI_REGION_MEM);
+ /* Add base offset */
+ priv->base += 0x1000;
+
+ /*
+ * Octeon TX2 needs a different xfer function and supports
+ * mem_ops
+ */
+ if (device_is_compatible(dev, "cavium,thunderx-spi")) {
+ octeon_spi_ops.xfer = octeontx2_spi_xfer;
+ octeon_spi_ops.mem_ops = &octeontx2_spi_mem_ops;
+ }
+ } else {
+ priv->base = dev_remap_addr(dev);
+ }
+
+ ret = clk_get_by_index(dev, 0, &priv->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(&priv->clk);
+ if (ret)
+ return ret;
+
+ debug("SPI bus %s %d at %p\n", dev->name, dev_seq(dev), priv->base);
+
+ return 0;
+}
+
+static const struct udevice_id octeon_spi_ids[] = {
+ /* MIPS Octeon */
+ { .compatible = "cavium,octeon-3010-spi" },
+ /* ARM Octeon TX / TX2 */
+ { .compatible = "cavium,thunder-8190-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(octeon_spi) = {
+ .name = "spi_octeon",
+ .id = UCLASS_SPI,
+ .of_match = octeon_spi_ids,
+ .probe = octeon_spi_probe,
+ .priv_auto = sizeof(struct octeon_spi),
+ .ops = &octeon_spi_ops,
+};
diff --git a/roms/u-boot/drivers/spi/omap3_spi.c b/roms/u-boot/drivers/spi/omap3_spi.c
new file mode 100644
index 000000000..c69f8fee6
--- /dev/null
+++ b/roms/u-boot/drivers/spi/omap3_spi.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Jagan Teki <jteki@openedev.com>
+ * Christophe Ricard <christophe.ricard@gmail.com>
+ *
+ * Copyright (C) 2010 Dirk Behme <dirk.behme@googlemail.com>
+ *
+ * Driver for McSPI controller on OMAP3. Based on davinci_spi.c
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * Parts taken from linux/drivers/spi/omap2_mcspi.c
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ *
+ * Modified by Ruslan Araslanov <ruslan.araslanov@vitecmm.com>
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <spi.h>
+#include <malloc.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <omap3_spi.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct omap2_mcspi_platform_config {
+ unsigned int regs_offset;
+};
+
+struct omap3_spi_priv {
+ struct mcspi *regs;
+ unsigned int cs;
+ unsigned int freq;
+ unsigned int mode;
+ unsigned int wordlen;
+ unsigned int pin_dir:1;
+
+ bool bus_claimed;
+};
+
+static void omap3_spi_write_chconf(struct omap3_spi_priv *priv, int val)
+{
+ writel(val, &priv->regs->channel[priv->cs].chconf);
+ /* Flash post writes to make immediate effect */
+ readl(&priv->regs->channel[priv->cs].chconf);
+}
+
+static void omap3_spi_set_enable(struct omap3_spi_priv *priv, int enable)
+{
+ writel(enable, &priv->regs->channel[priv->cs].chctrl);
+ /* Flash post writes to make immediate effect */
+ readl(&priv->regs->channel[priv->cs].chctrl);
+}
+
+static int omap3_spi_write(struct omap3_spi_priv *priv, unsigned int len,
+ const void *txp, unsigned long flags)
+{
+ ulong start;
+ int i, chconf;
+
+ chconf = readl(&priv->regs->channel[priv->cs].chconf);
+
+ /* Enable the channel */
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_EN);
+
+ chconf &= ~(OMAP3_MCSPI_CHCONF_TRM_MASK | OMAP3_MCSPI_CHCONF_WL_MASK);
+ chconf |= (priv->wordlen - 1) << 7;
+ chconf |= OMAP3_MCSPI_CHCONF_TRM_TX_ONLY;
+ chconf |= OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+
+ for (i = 0; i < len; i++) {
+ /* wait till TX register is empty (TXS == 1) */
+ start = get_timer(0);
+ while (!(readl(&priv->regs->channel[priv->cs].chstat) &
+ OMAP3_MCSPI_CHSTAT_TXS)) {
+ if (get_timer(start) > SPI_WAIT_TIMEOUT) {
+ printf("SPI TXS timed out, status=0x%08x\n",
+ readl(&priv->regs->channel[priv->cs].chstat));
+ return -1;
+ }
+ }
+ /* Write the data */
+ unsigned int *tx = &priv->regs->channel[priv->cs].tx;
+ if (priv->wordlen > 16)
+ writel(((u32 *)txp)[i], tx);
+ else if (priv->wordlen > 8)
+ writel(((u16 *)txp)[i], tx);
+ else
+ writel(((u8 *)txp)[i], tx);
+ }
+
+ /* wait to finish of transfer */
+ while ((readl(&priv->regs->channel[priv->cs].chstat) &
+ (OMAP3_MCSPI_CHSTAT_EOT | OMAP3_MCSPI_CHSTAT_TXS)) !=
+ (OMAP3_MCSPI_CHSTAT_EOT | OMAP3_MCSPI_CHSTAT_TXS))
+ ;
+
+ /* Disable the channel otherwise the next immediate RX will get affected */
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_DIS);
+
+ if (flags & SPI_XFER_END) {
+
+ chconf &= ~OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+ }
+ return 0;
+}
+
+static int omap3_spi_read(struct omap3_spi_priv *priv, unsigned int len,
+ void *rxp, unsigned long flags)
+{
+ int i, chconf;
+ ulong start;
+
+ chconf = readl(&priv->regs->channel[priv->cs].chconf);
+
+ /* Enable the channel */
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_EN);
+
+ chconf &= ~(OMAP3_MCSPI_CHCONF_TRM_MASK | OMAP3_MCSPI_CHCONF_WL_MASK);
+ chconf |= (priv->wordlen - 1) << 7;
+ chconf |= OMAP3_MCSPI_CHCONF_TRM_RX_ONLY;
+ chconf |= OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+
+ writel(0, &priv->regs->channel[priv->cs].tx);
+
+ for (i = 0; i < len; i++) {
+ start = get_timer(0);
+ /* Wait till RX register contains data (RXS == 1) */
+ while (!(readl(&priv->regs->channel[priv->cs].chstat) &
+ OMAP3_MCSPI_CHSTAT_RXS)) {
+ if (get_timer(start) > SPI_WAIT_TIMEOUT) {
+ printf("SPI RXS timed out, status=0x%08x\n",
+ readl(&priv->regs->channel[priv->cs].chstat));
+ return -1;
+ }
+ }
+
+ /* Disable the channel to prevent furher receiving */
+ if (i == (len - 1))
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_DIS);
+
+ /* Read the data */
+ unsigned int *rx = &priv->regs->channel[priv->cs].rx;
+ if (priv->wordlen > 16)
+ ((u32 *)rxp)[i] = readl(rx);
+ else if (priv->wordlen > 8)
+ ((u16 *)rxp)[i] = (u16)readl(rx);
+ else
+ ((u8 *)rxp)[i] = (u8)readl(rx);
+ }
+
+ if (flags & SPI_XFER_END) {
+ chconf &= ~OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+ }
+
+ return 0;
+}
+
+/*McSPI Transmit Receive Mode*/
+static int omap3_spi_txrx(struct omap3_spi_priv *priv, unsigned int len,
+ const void *txp, void *rxp, unsigned long flags)
+{
+ ulong start;
+ int chconf, i = 0;
+
+ chconf = readl(&priv->regs->channel[priv->cs].chconf);
+
+ /*Enable SPI channel*/
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_EN);
+
+ /*set TRANSMIT-RECEIVE Mode*/
+ chconf &= ~(OMAP3_MCSPI_CHCONF_TRM_MASK | OMAP3_MCSPI_CHCONF_WL_MASK);
+ chconf |= (priv->wordlen - 1) << 7;
+ chconf |= OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+
+ /*Shift in and out 1 byte at time*/
+ for (i=0; i < len; i++){
+ /* Write: wait for TX empty (TXS == 1)*/
+ start = get_timer(0);
+ while (!(readl(&priv->regs->channel[priv->cs].chstat) &
+ OMAP3_MCSPI_CHSTAT_TXS)) {
+ if (get_timer(start) > SPI_WAIT_TIMEOUT) {
+ printf("SPI TXS timed out, status=0x%08x\n",
+ readl(&priv->regs->channel[priv->cs].chstat));
+ return -1;
+ }
+ }
+ /* Write the data */
+ unsigned int *tx = &priv->regs->channel[priv->cs].tx;
+ if (priv->wordlen > 16)
+ writel(((u32 *)txp)[i], tx);
+ else if (priv->wordlen > 8)
+ writel(((u16 *)txp)[i], tx);
+ else
+ writel(((u8 *)txp)[i], tx);
+
+ /*Read: wait for RX containing data (RXS == 1)*/
+ start = get_timer(0);
+ while (!(readl(&priv->regs->channel[priv->cs].chstat) &
+ OMAP3_MCSPI_CHSTAT_RXS)) {
+ if (get_timer(start) > SPI_WAIT_TIMEOUT) {
+ printf("SPI RXS timed out, status=0x%08x\n",
+ readl(&priv->regs->channel[priv->cs].chstat));
+ return -1;
+ }
+ }
+ /* Read the data */
+ unsigned int *rx = &priv->regs->channel[priv->cs].rx;
+ if (priv->wordlen > 16)
+ ((u32 *)rxp)[i] = readl(rx);
+ else if (priv->wordlen > 8)
+ ((u16 *)rxp)[i] = (u16)readl(rx);
+ else
+ ((u8 *)rxp)[i] = (u8)readl(rx);
+ }
+ /* Disable the channel */
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_DIS);
+
+ /*if transfer must be terminated disable the channel*/
+ if (flags & SPI_XFER_END) {
+ chconf &= ~OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+ }
+
+ return 0;
+}
+
+static int _spi_xfer(struct omap3_spi_priv *priv, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ unsigned int len;
+ int ret = -1;
+
+ if (priv->wordlen < 4 || priv->wordlen > 32) {
+ printf("omap3_spi: invalid wordlen %d\n", priv->wordlen);
+ return -1;
+ }
+
+ if (bitlen % priv->wordlen)
+ return -1;
+
+ len = bitlen / priv->wordlen;
+
+ if (bitlen == 0) { /* only change CS */
+ int chconf = readl(&priv->regs->channel[priv->cs].chconf);
+
+ if (flags & SPI_XFER_BEGIN) {
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_EN);
+ chconf |= OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+ }
+ if (flags & SPI_XFER_END) {
+ chconf &= ~OMAP3_MCSPI_CHCONF_FORCE;
+ omap3_spi_write_chconf(priv, chconf);
+ omap3_spi_set_enable(priv, OMAP3_MCSPI_CHCTRL_DIS);
+ }
+ ret = 0;
+ } else {
+ if (dout != NULL && din != NULL)
+ ret = omap3_spi_txrx(priv, len, dout, din, flags);
+ else if (dout != NULL)
+ ret = omap3_spi_write(priv, len, dout, flags);
+ else if (din != NULL)
+ ret = omap3_spi_read(priv, len, din, flags);
+ }
+ return ret;
+}
+
+static void _omap3_spi_set_speed(struct omap3_spi_priv *priv)
+{
+ uint32_t confr, div = 0;
+
+ confr = readl(&priv->regs->channel[priv->cs].chconf);
+
+ /* Calculate clock divisor. Valid range: 0x0 - 0xC ( /1 - /4096 ) */
+ if (priv->freq) {
+ while (div <= 0xC && (OMAP3_MCSPI_MAX_FREQ / (1 << div))
+ > priv->freq)
+ div++;
+ } else {
+ div = 0xC;
+ }
+
+ /* set clock divisor */
+ confr &= ~OMAP3_MCSPI_CHCONF_CLKD_MASK;
+ confr |= div << 2;
+
+ omap3_spi_write_chconf(priv, confr);
+}
+
+static void _omap3_spi_set_mode(struct omap3_spi_priv *priv)
+{
+ uint32_t confr;
+
+ confr = readl(&priv->regs->channel[priv->cs].chconf);
+
+ /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ if (priv->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+ confr &= ~(OMAP3_MCSPI_CHCONF_IS|OMAP3_MCSPI_CHCONF_DPE1);
+ confr |= OMAP3_MCSPI_CHCONF_DPE0;
+ } else {
+ confr &= ~OMAP3_MCSPI_CHCONF_DPE0;
+ confr |= OMAP3_MCSPI_CHCONF_IS|OMAP3_MCSPI_CHCONF_DPE1;
+ }
+
+ /* set SPI mode 0..3 */
+ confr &= ~(OMAP3_MCSPI_CHCONF_POL | OMAP3_MCSPI_CHCONF_PHA);
+ if (priv->mode & SPI_CPHA)
+ confr |= OMAP3_MCSPI_CHCONF_PHA;
+ if (priv->mode & SPI_CPOL)
+ confr |= OMAP3_MCSPI_CHCONF_POL;
+
+ /* set chipselect polarity; manage with FORCE */
+ if (!(priv->mode & SPI_CS_HIGH))
+ confr |= OMAP3_MCSPI_CHCONF_EPOL; /* active-low; normal */
+ else
+ confr &= ~OMAP3_MCSPI_CHCONF_EPOL;
+
+ /* Transmit & receive mode */
+ confr &= ~OMAP3_MCSPI_CHCONF_TRM_MASK;
+
+ omap3_spi_write_chconf(priv, confr);
+}
+
+static void _omap3_spi_set_wordlen(struct omap3_spi_priv *priv)
+{
+ unsigned int confr;
+
+ /* McSPI individual channel configuration */
+ confr = readl(&priv->regs->channel[priv->cs].chconf);
+
+ /* wordlength */
+ confr &= ~OMAP3_MCSPI_CHCONF_WL_MASK;
+ confr |= (priv->wordlen - 1) << 7;
+
+ omap3_spi_write_chconf(priv, confr);
+}
+
+static void spi_reset(struct mcspi *regs)
+{
+ unsigned int tmp;
+
+ writel(OMAP3_MCSPI_SYSCONFIG_SOFTRESET, &regs->sysconfig);
+ do {
+ tmp = readl(&regs->sysstatus);
+ } while (!(tmp & OMAP3_MCSPI_SYSSTATUS_RESETDONE));
+
+ writel(OMAP3_MCSPI_SYSCONFIG_AUTOIDLE |
+ OMAP3_MCSPI_SYSCONFIG_ENAWAKEUP |
+ OMAP3_MCSPI_SYSCONFIG_SMARTIDLE, &regs->sysconfig);
+
+ writel(OMAP3_MCSPI_WAKEUPENABLE_WKEN, &regs->wakeupenable);
+}
+
+static void _omap3_spi_claim_bus(struct omap3_spi_priv *priv)
+{
+ unsigned int conf;
+ /*
+ * setup when switching from (reset default) slave mode
+ * to single-channel master mode
+ */
+ conf = readl(&priv->regs->modulctrl);
+ conf &= ~(OMAP3_MCSPI_MODULCTRL_STEST | OMAP3_MCSPI_MODULCTRL_MS);
+ conf |= OMAP3_MCSPI_MODULCTRL_SINGLE;
+
+ writel(conf, &priv->regs->modulctrl);
+
+ priv->bus_claimed = true;
+}
+
+static int omap3_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct omap3_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ priv->cs = slave_plat->cs;
+ if (!priv->freq)
+ priv->freq = slave_plat->max_hz;
+
+ _omap3_spi_claim_bus(priv);
+ _omap3_spi_set_speed(priv);
+ _omap3_spi_set_mode(priv);
+
+ return 0;
+}
+
+static int omap3_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct omap3_spi_priv *priv = dev_get_priv(bus);
+
+ writel(OMAP3_MCSPI_MODULCTRL_MS, &priv->regs->modulctrl);
+
+ priv->bus_claimed = false;
+
+ return 0;
+}
+
+static int omap3_spi_set_wordlen(struct udevice *dev, unsigned int wordlen)
+{
+ struct udevice *bus = dev->parent;
+ struct omap3_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ priv->cs = slave_plat->cs;
+ priv->wordlen = wordlen;
+ _omap3_spi_set_wordlen(priv);
+
+ return 0;
+}
+
+static int omap3_spi_probe(struct udevice *dev)
+{
+ struct omap3_spi_priv *priv = dev_get_priv(dev);
+ struct omap3_spi_plat *plat = dev_get_plat(dev);
+
+ priv->regs = plat->regs;
+ priv->pin_dir = plat->pin_dir;
+ priv->wordlen = SPI_DEFAULT_WORDLEN;
+
+ spi_reset(priv->regs);
+
+ return 0;
+}
+
+static int omap3_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct omap3_spi_priv *priv = dev_get_priv(bus);
+
+ return _spi_xfer(priv, bitlen, dout, din, flags);
+}
+
+static int omap3_spi_set_speed(struct udevice *dev, unsigned int speed)
+{
+
+ struct omap3_spi_priv *priv = dev_get_priv(dev);
+
+ priv->freq = speed;
+ if (priv->bus_claimed)
+ _omap3_spi_set_speed(priv);
+
+ return 0;
+}
+
+static int omap3_spi_set_mode(struct udevice *dev, uint mode)
+{
+ struct omap3_spi_priv *priv = dev_get_priv(dev);
+
+ priv->mode = mode;
+
+ if (priv->bus_claimed)
+ _omap3_spi_set_mode(priv);
+
+ return 0;
+}
+
+static const struct dm_spi_ops omap3_spi_ops = {
+ .claim_bus = omap3_spi_claim_bus,
+ .release_bus = omap3_spi_release_bus,
+ .set_wordlen = omap3_spi_set_wordlen,
+ .xfer = omap3_spi_xfer,
+ .set_speed = omap3_spi_set_speed,
+ .set_mode = omap3_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+static struct omap2_mcspi_platform_config omap2_pdata = {
+ .regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static int omap3_spi_of_to_plat(struct udevice *dev)
+{
+ struct omap2_mcspi_platform_config *data =
+ (struct omap2_mcspi_platform_config *)dev_get_driver_data(dev);
+ struct omap3_spi_plat *plat = dev_get_plat(dev);
+
+ plat->regs = (struct mcspi *)(dev_read_addr(dev) + data->regs_offset);
+
+ if (dev_read_bool(dev, "ti,pindir-d0-out-d1-in"))
+ plat->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+ else
+ plat->pin_dir = MCSPI_PINDIR_D0_IN_D1_OUT;
+
+ return 0;
+}
+
+static const struct udevice_id omap3_spi_ids[] = {
+ { .compatible = "ti,omap2-mcspi", .data = (ulong)&omap2_pdata },
+ { .compatible = "ti,omap4-mcspi", .data = (ulong)&omap4_pdata },
+ { }
+};
+#endif
+U_BOOT_DRIVER(omap3_spi) = {
+ .name = "omap3_spi",
+ .id = UCLASS_SPI,
+ .flags = DM_FLAG_PRE_RELOC,
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .of_match = omap3_spi_ids,
+ .of_to_plat = omap3_spi_of_to_plat,
+ .plat_auto = sizeof(struct omap3_spi_plat),
+#endif
+ .probe = omap3_spi_probe,
+ .ops = &omap3_spi_ops,
+ .priv_auto = sizeof(struct omap3_spi_priv),
+};
diff --git a/roms/u-boot/drivers/spi/pic32_spi.c b/roms/u-boot/drivers/spi/pic32_spi.c
new file mode 100644
index 000000000..45f07f083
--- /dev/null
+++ b/roms/u-boot/drivers/spi/pic32_spi.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Microchip PIC32 SPI controller driver.
+ *
+ * Copyright (c) 2015, Microchip Technology Inc.
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <asm/global_data.h>
+#include <linux/bitops.h>
+#include <linux/compat.h>
+#include <malloc.h>
+#include <spi.h>
+
+#include <asm/types.h>
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <dt-bindings/clock/microchip,clock.h>
+#include <mach/pic32.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* PIC32 SPI controller registers */
+struct pic32_reg_spi {
+ struct pic32_reg_atomic ctrl;
+ struct pic32_reg_atomic status;
+ struct pic32_reg_atomic buf;
+ struct pic32_reg_atomic baud;
+ struct pic32_reg_atomic ctrl2;
+};
+
+/* Bit fields in SPI Control Register */
+#define PIC32_SPI_CTRL_MSTEN BIT(5) /* Enable SPI Master */
+#define PIC32_SPI_CTRL_CKP BIT(6) /* active low */
+#define PIC32_SPI_CTRL_CKE BIT(8) /* Tx on falling edge */
+#define PIC32_SPI_CTRL_SMP BIT(9) /* Rx at middle or end of tx */
+#define PIC32_SPI_CTRL_BPW_MASK 0x03 /* Bits per word */
+#define PIC32_SPI_CTRL_BPW_8 0x0
+#define PIC32_SPI_CTRL_BPW_16 0x1
+#define PIC32_SPI_CTRL_BPW_32 0x2
+#define PIC32_SPI_CTRL_BPW_SHIFT 10
+#define PIC32_SPI_CTRL_ON BIT(15) /* Macro enable */
+#define PIC32_SPI_CTRL_ENHBUF BIT(16) /* Enable enhanced buffering */
+#define PIC32_SPI_CTRL_MCLKSEL BIT(23) /* Select SPI Clock src */
+#define PIC32_SPI_CTRL_MSSEN BIT(28) /* SPI macro will drive SS */
+#define PIC32_SPI_CTRL_FRMEN BIT(31) /* Enable framing mode */
+
+/* Bit fields in SPI Status Register */
+#define PIC32_SPI_STAT_RX_OV BIT(6) /* err, s/w needs to clear */
+#define PIC32_SPI_STAT_TF_LVL_MASK 0x1f
+#define PIC32_SPI_STAT_TF_LVL_SHIFT 16
+#define PIC32_SPI_STAT_RF_LVL_MASK 0x1f
+#define PIC32_SPI_STAT_RF_LVL_SHIFT 24
+
+/* Bit fields in SPI Baud Register */
+#define PIC32_SPI_BAUD_MASK 0x1ff
+
+struct pic32_spi_priv {
+ struct pic32_reg_spi *regs;
+ u32 fifo_depth; /* FIFO depth in bytes */
+ u32 fifo_n_word; /* FIFO depth in words */
+ struct gpio_desc cs_gpio;
+
+ /* Current SPI slave specific */
+ ulong clk_rate;
+ u32 speed_hz; /* spi-clk rate */
+ int mode;
+
+ /* Current message/transfer state */
+ const void *tx;
+ const void *tx_end;
+ const void *rx;
+ const void *rx_end;
+ u32 len;
+
+ /* SPI FiFo accessor */
+ void (*rx_fifo)(struct pic32_spi_priv *);
+ void (*tx_fifo)(struct pic32_spi_priv *);
+};
+
+static inline void pic32_spi_enable(struct pic32_spi_priv *priv)
+{
+ writel(PIC32_SPI_CTRL_ON, &priv->regs->ctrl.set);
+}
+
+static inline void pic32_spi_disable(struct pic32_spi_priv *priv)
+{
+ writel(PIC32_SPI_CTRL_ON, &priv->regs->ctrl.clr);
+}
+
+static inline u32 pic32_spi_rx_fifo_level(struct pic32_spi_priv *priv)
+{
+ u32 sr = readl(&priv->regs->status.raw);
+
+ return (sr >> PIC32_SPI_STAT_RF_LVL_SHIFT) & PIC32_SPI_STAT_RF_LVL_MASK;
+}
+
+static inline u32 pic32_spi_tx_fifo_level(struct pic32_spi_priv *priv)
+{
+ u32 sr = readl(&priv->regs->status.raw);
+
+ return (sr >> PIC32_SPI_STAT_TF_LVL_SHIFT) & PIC32_SPI_STAT_TF_LVL_MASK;
+}
+
+/* Return the max entries we can fill into tx fifo */
+static u32 pic32_tx_max(struct pic32_spi_priv *priv, int n_bytes)
+{
+ u32 tx_left, tx_room, rxtx_gap;
+
+ tx_left = (priv->tx_end - priv->tx) / n_bytes;
+ tx_room = priv->fifo_n_word - pic32_spi_tx_fifo_level(priv);
+
+ rxtx_gap = (priv->rx_end - priv->rx) - (priv->tx_end - priv->tx);
+ rxtx_gap /= n_bytes;
+ return min3(tx_left, tx_room, (u32)(priv->fifo_n_word - rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static u32 pic32_rx_max(struct pic32_spi_priv *priv, int n_bytes)
+{
+ u32 rx_left = (priv->rx_end - priv->rx) / n_bytes;
+
+ return min_t(u32, rx_left, pic32_spi_rx_fifo_level(priv));
+}
+
+#define BUILD_SPI_FIFO_RW(__name, __type, __bwl) \
+static void pic32_spi_rx_##__name(struct pic32_spi_priv *priv) \
+{ \
+ __type val; \
+ u32 mx = pic32_rx_max(priv, sizeof(__type)); \
+ \
+ for (; mx; mx--) { \
+ val = read##__bwl(&priv->regs->buf.raw); \
+ if (priv->rx_end - priv->len) \
+ *(__type *)(priv->rx) = val; \
+ priv->rx += sizeof(__type); \
+ } \
+} \
+ \
+static void pic32_spi_tx_##__name(struct pic32_spi_priv *priv) \
+{ \
+ __type val; \
+ u32 mx = pic32_tx_max(priv, sizeof(__type)); \
+ \
+ for (; mx ; mx--) { \
+ val = (__type) ~0U; \
+ if (priv->tx_end - priv->len) \
+ val = *(__type *)(priv->tx); \
+ write##__bwl(val, &priv->regs->buf.raw); \
+ priv->tx += sizeof(__type); \
+ } \
+}
+BUILD_SPI_FIFO_RW(byte, u8, b);
+BUILD_SPI_FIFO_RW(word, u16, w);
+BUILD_SPI_FIFO_RW(dword, u32, l);
+
+static int pic32_spi_set_word_size(struct pic32_spi_priv *priv,
+ unsigned int wordlen)
+{
+ u32 bits_per_word;
+ u32 val;
+
+ switch (wordlen) {
+ case 8:
+ priv->rx_fifo = pic32_spi_rx_byte;
+ priv->tx_fifo = pic32_spi_tx_byte;
+ bits_per_word = PIC32_SPI_CTRL_BPW_8;
+ break;
+ case 16:
+ priv->rx_fifo = pic32_spi_rx_word;
+ priv->tx_fifo = pic32_spi_tx_word;
+ bits_per_word = PIC32_SPI_CTRL_BPW_16;
+ break;
+ case 32:
+ priv->rx_fifo = pic32_spi_rx_dword;
+ priv->tx_fifo = pic32_spi_tx_dword;
+ bits_per_word = PIC32_SPI_CTRL_BPW_32;
+ break;
+ default:
+ printf("pic32-spi: unsupported wordlen\n");
+ return -EINVAL;
+ }
+
+ /* set bits-per-word */
+ val = readl(&priv->regs->ctrl.raw);
+ val &= ~(PIC32_SPI_CTRL_BPW_MASK << PIC32_SPI_CTRL_BPW_SHIFT);
+ val |= bits_per_word << PIC32_SPI_CTRL_BPW_SHIFT;
+ writel(val, &priv->regs->ctrl.raw);
+
+ /* calculate maximum number of words fifo can hold */
+ priv->fifo_n_word = DIV_ROUND_UP(priv->fifo_depth, wordlen / 8);
+
+ return 0;
+}
+
+static int pic32_spi_claim_bus(struct udevice *slave)
+{
+ struct pic32_spi_priv *priv = dev_get_priv(slave->parent);
+
+ /* enable chip */
+ pic32_spi_enable(priv);
+
+ return 0;
+}
+
+static int pic32_spi_release_bus(struct udevice *slave)
+{
+ struct pic32_spi_priv *priv = dev_get_priv(slave->parent);
+
+ /* disable chip */
+ pic32_spi_disable(priv);
+
+ return 0;
+}
+
+static void spi_cs_activate(struct pic32_spi_priv *priv)
+{
+ if (!dm_gpio_is_valid(&priv->cs_gpio))
+ return;
+
+ dm_gpio_set_value(&priv->cs_gpio, 1);
+}
+
+static void spi_cs_deactivate(struct pic32_spi_priv *priv)
+{
+ if (!dm_gpio_is_valid(&priv->cs_gpio))
+ return;
+
+ dm_gpio_set_value(&priv->cs_gpio, 0);
+}
+
+static int pic32_spi_xfer(struct udevice *slave, unsigned int bitlen,
+ const void *tx_buf, void *rx_buf,
+ unsigned long flags)
+{
+ struct dm_spi_slave_plat *slave_plat;
+ struct udevice *bus = slave->parent;
+ struct pic32_spi_priv *priv;
+ int len = bitlen / 8;
+ int ret = 0;
+ ulong tbase;
+
+ priv = dev_get_priv(bus);
+ slave_plat = dev_get_parent_plat(slave);
+
+ debug("spi_xfer: bus:%i cs:%i flags:%lx\n",
+ dev_seq(bus), slave_plat->cs, flags);
+ debug("msg tx %p, rx %p submitted of %d byte(s)\n",
+ tx_buf, rx_buf, len);
+
+ /* assert cs */
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(priv);
+
+ /* set current transfer information */
+ priv->tx = tx_buf;
+ priv->rx = rx_buf;
+ priv->tx_end = priv->tx + len;
+ priv->rx_end = priv->rx + len;
+ priv->len = len;
+
+ /* transact by polling */
+ tbase = get_timer(0);
+ for (;;) {
+ priv->tx_fifo(priv);
+ priv->rx_fifo(priv);
+
+ /* received sufficient data */
+ if (priv->rx >= priv->rx_end) {
+ ret = 0;
+ break;
+ }
+
+ if (get_timer(tbase) > 5 * CONFIG_SYS_HZ) {
+ printf("pic32_spi: error, xfer timedout.\n");
+ flags |= SPI_XFER_END;
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ /* deassert cs */
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(priv);
+
+ return ret;
+}
+
+static int pic32_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct pic32_spi_priv *priv = dev_get_priv(bus);
+ u32 div;
+
+ debug("%s: %s, speed %u\n", __func__, bus->name, speed);
+
+ /* div = [clk_in / (2 * spi_clk)] - 1 */
+ div = (priv->clk_rate / 2 / speed) - 1;
+ div &= PIC32_SPI_BAUD_MASK;
+ writel(div, &priv->regs->baud.raw);
+
+ priv->speed_hz = speed;
+
+ return 0;
+}
+
+static int pic32_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct pic32_spi_priv *priv = dev_get_priv(bus);
+ u32 val;
+
+ debug("%s: %s, mode %d\n", __func__, bus->name, mode);
+
+ /* set spi-clk mode */
+ val = readl(&priv->regs->ctrl.raw);
+ /* HIGH when idle */
+ if (mode & SPI_CPOL)
+ val |= PIC32_SPI_CTRL_CKP;
+ else
+ val &= ~PIC32_SPI_CTRL_CKP;
+
+ /* TX at idle-to-active clk transition */
+ if (mode & SPI_CPHA)
+ val &= ~PIC32_SPI_CTRL_CKE;
+ else
+ val |= PIC32_SPI_CTRL_CKE;
+
+ /* RX at end of tx */
+ val |= PIC32_SPI_CTRL_SMP;
+ writel(val, &priv->regs->ctrl.raw);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static int pic32_spi_set_wordlen(struct udevice *slave, unsigned int wordlen)
+{
+ struct pic32_spi_priv *priv = dev_get_priv(slave->parent);
+
+ return pic32_spi_set_word_size(priv, wordlen);
+}
+
+static void pic32_spi_hw_init(struct pic32_spi_priv *priv)
+{
+ u32 val;
+
+ /* disable module */
+ pic32_spi_disable(priv);
+
+ val = readl(&priv->regs->ctrl);
+
+ /* enable enhanced fifo of 128bit deep */
+ val |= PIC32_SPI_CTRL_ENHBUF;
+ priv->fifo_depth = 16;
+
+ /* disable framing mode */
+ val &= ~PIC32_SPI_CTRL_FRMEN;
+
+ /* enable master mode */
+ val |= PIC32_SPI_CTRL_MSTEN;
+
+ /* select clk source */
+ val &= ~PIC32_SPI_CTRL_MCLKSEL;
+
+ /* set manual /CS mode */
+ val &= ~PIC32_SPI_CTRL_MSSEN;
+
+ writel(val, &priv->regs->ctrl);
+
+ /* clear rx overflow indicator */
+ writel(PIC32_SPI_STAT_RX_OV, &priv->regs->status.clr);
+}
+
+static int pic32_spi_probe(struct udevice *bus)
+{
+ struct pic32_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_bus *dm_spi = dev_get_uclass_priv(bus);
+ int node = dev_of_offset(bus);
+ struct udevice *clkdev;
+ fdt_addr_t addr;
+ fdt_size_t size;
+ int ret;
+
+ debug("%s: %d, bus: %i\n", __func__, __LINE__, dev_seq(bus));
+ addr = fdtdec_get_addr_size(gd->fdt_blob, node, "reg", &size);
+ if (addr == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ priv->regs = ioremap(addr, size);
+ if (!priv->regs)
+ return -EINVAL;
+
+ dm_spi->max_hz = fdtdec_get_int(gd->fdt_blob, node, "spi-max-frequency",
+ 250000000);
+ /* get clock rate */
+ ret = clk_get_by_index(bus, 0, &clkdev);
+ if (ret < 0) {
+ printf("pic32-spi: error, clk not found\n");
+ return ret;
+ }
+ priv->clk_rate = clk_get_periph_rate(clkdev, ret);
+
+ /* initialize HW */
+ pic32_spi_hw_init(priv);
+
+ /* set word len */
+ pic32_spi_set_word_size(priv, SPI_DEFAULT_WORDLEN);
+
+ /* PIC32 SPI controller can automatically drive /CS during transfer
+ * depending on fifo fill-level. /CS will stay asserted as long as
+ * TX fifo is non-empty, else will be deasserted confirming completion
+ * of the ongoing transfer. To avoid this sort of error we will drive
+ * /CS manually by toggling cs-gpio pins.
+ */
+ ret = gpio_request_by_name_nodev(offset_to_ofnode(node), "cs-gpios", 0,
+ &priv->cs_gpio, GPIOD_IS_OUT);
+ if (ret) {
+ printf("pic32-spi: error, cs-gpios not found\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dm_spi_ops pic32_spi_ops = {
+ .claim_bus = pic32_spi_claim_bus,
+ .release_bus = pic32_spi_release_bus,
+ .xfer = pic32_spi_xfer,
+ .set_speed = pic32_spi_set_speed,
+ .set_mode = pic32_spi_set_mode,
+ .set_wordlen = pic32_spi_set_wordlen,
+};
+
+static const struct udevice_id pic32_spi_ids[] = {
+ { .compatible = "microchip,pic32mzda-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(pic32_spi) = {
+ .name = "pic32_spi",
+ .id = UCLASS_SPI,
+ .of_match = pic32_spi_ids,
+ .ops = &pic32_spi_ops,
+ .priv_auto = sizeof(struct pic32_spi_priv),
+ .probe = pic32_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/pl022_spi.c b/roms/u-boot/drivers/spi/pl022_spi.c
new file mode 100644
index 000000000..9856a5669
--- /dev/null
+++ b/roms/u-boot/drivers/spi/pl022_spi.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2012
+ * Armando Visconti, ST Microelectronics, armando.visconti@st.com.
+ *
+ * (C) Copyright 2018
+ * Quentin Schulz, Bootlin, quentin.schulz@bootlin.com
+ *
+ * Driver for ARM PL022 SPI Controller.
+ */
+
+#include <clk.h>
+#include <common.h>
+#include <dm.h>
+#include <dm/platform_data/spi_pl022.h>
+#include <linux/io.h>
+#include <asm/global_data.h>
+#include <spi.h>
+
+#define SSP_CR0 0x000
+#define SSP_CR1 0x004
+#define SSP_DR 0x008
+#define SSP_SR 0x00C
+#define SSP_CPSR 0x010
+#define SSP_IMSC 0x014
+#define SSP_RIS 0x018
+#define SSP_MIS 0x01C
+#define SSP_ICR 0x020
+#define SSP_DMACR 0x024
+#define SSP_CSR 0x030 /* vendor extension */
+#define SSP_ITCR 0x080
+#define SSP_ITIP 0x084
+#define SSP_ITOP 0x088
+#define SSP_TDR 0x08C
+
+#define SSP_PID0 0xFE0
+#define SSP_PID1 0xFE4
+#define SSP_PID2 0xFE8
+#define SSP_PID3 0xFEC
+
+#define SSP_CID0 0xFF0
+#define SSP_CID1 0xFF4
+#define SSP_CID2 0xFF8
+#define SSP_CID3 0xFFC
+
+/* SSP Control Register 0 - SSP_CR0 */
+#define SSP_CR0_SPO (0x1 << 6)
+#define SSP_CR0_SPH (0x1 << 7)
+#define SSP_CR0_BIT_MODE(x) ((x) - 1)
+#define SSP_SCR_MIN (0x00)
+#define SSP_SCR_MAX (0xFF)
+#define SSP_SCR_SHFT 8
+#define DFLT_CLKRATE 2
+
+/* SSP Control Register 1 - SSP_CR1 */
+#define SSP_CR1_MASK_SSE (0x1 << 1)
+
+#define SSP_CPSR_MIN (0x02)
+#define SSP_CPSR_MAX (0xFE)
+#define DFLT_PRESCALE (0x40)
+
+/* SSP Status Register - SSP_SR */
+#define SSP_SR_MASK_TFE (0x1 << 0) /* Transmit FIFO empty */
+#define SSP_SR_MASK_TNF (0x1 << 1) /* Transmit FIFO not full */
+#define SSP_SR_MASK_RNE (0x1 << 2) /* Receive FIFO not empty */
+#define SSP_SR_MASK_RFF (0x1 << 3) /* Receive FIFO full */
+#define SSP_SR_MASK_BSY (0x1 << 4) /* Busy Flag */
+
+struct pl022_spi_slave {
+ void *base;
+ unsigned int freq;
+};
+
+/*
+ * ARM PL022 exists in different 'flavors'.
+ * This drivers currently support the standard variant (0x00041022), that has a
+ * 16bit wide and 8 locations deep TX/RX FIFO.
+ */
+static int pl022_is_supported(struct pl022_spi_slave *ps)
+{
+ /* PL022 version is 0x00041022 */
+ if ((readw(ps->base + SSP_PID0) == 0x22) &&
+ (readw(ps->base + SSP_PID1) == 0x10) &&
+ ((readw(ps->base + SSP_PID2) & 0xf) == 0x04) &&
+ (readw(ps->base + SSP_PID3) == 0x00))
+ return 1;
+
+ return 0;
+}
+
+static int pl022_spi_probe(struct udevice *bus)
+{
+ struct pl022_spi_pdata *plat = dev_get_plat(bus);
+ struct pl022_spi_slave *ps = dev_get_priv(bus);
+
+ ps->base = ioremap(plat->addr, plat->size);
+ ps->freq = plat->freq;
+
+ /* Check the PL022 version */
+ if (!pl022_is_supported(ps))
+ return -ENOTSUPP;
+
+ /* 8 bits per word, high polarity and default clock rate */
+ writew(SSP_CR0_BIT_MODE(8), ps->base + SSP_CR0);
+ writew(DFLT_PRESCALE, ps->base + SSP_CPSR);
+
+ return 0;
+}
+
+static void flush(struct pl022_spi_slave *ps)
+{
+ do {
+ while (readw(ps->base + SSP_SR) & SSP_SR_MASK_RNE)
+ readw(ps->base + SSP_DR);
+ } while (readw(ps->base + SSP_SR) & SSP_SR_MASK_BSY);
+}
+
+static int pl022_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct pl022_spi_slave *ps = dev_get_priv(bus);
+ u16 reg;
+
+ /* Enable the SPI hardware */
+ reg = readw(ps->base + SSP_CR1);
+ reg |= SSP_CR1_MASK_SSE;
+ writew(reg, ps->base + SSP_CR1);
+
+ flush(ps);
+
+ return 0;
+}
+
+static int pl022_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct pl022_spi_slave *ps = dev_get_priv(bus);
+ u16 reg;
+
+ flush(ps);
+
+ /* Disable the SPI hardware */
+ reg = readw(ps->base + SSP_CR1);
+ reg &= ~SSP_CR1_MASK_SSE;
+ writew(reg, ps->base + SSP_CR1);
+
+ return 0;
+}
+
+static int pl022_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct pl022_spi_slave *ps = dev_get_priv(bus);
+ u32 len_tx = 0, len_rx = 0, len;
+ u32 ret = 0;
+ const u8 *txp = dout;
+ u8 *rxp = din, value;
+
+ if (bitlen == 0)
+ /* Finish any previously submitted transfers */
+ return 0;
+
+ /*
+ * TODO: The controller can do non-multiple-of-8 bit
+ * transfers, but this driver currently doesn't support it.
+ *
+ * It's also not clear how such transfers are supposed to be
+ * represented as a stream of bytes...this is a limitation of
+ * the current SPI interface.
+ */
+ if (bitlen % 8) {
+ /* Errors always terminate an ongoing transfer */
+ flags |= SPI_XFER_END;
+ return -1;
+ }
+
+ len = bitlen / 8;
+
+ while (len_tx < len) {
+ if (readw(ps->base + SSP_SR) & SSP_SR_MASK_TNF) {
+ value = txp ? *txp++ : 0;
+ writew(value, ps->base + SSP_DR);
+ len_tx++;
+ }
+
+ if (readw(ps->base + SSP_SR) & SSP_SR_MASK_RNE) {
+ value = readw(ps->base + SSP_DR);
+ if (rxp)
+ *rxp++ = value;
+ len_rx++;
+ }
+ }
+
+ while (len_rx < len_tx) {
+ if (readw(ps->base + SSP_SR) & SSP_SR_MASK_RNE) {
+ value = readw(ps->base + SSP_DR);
+ if (rxp)
+ *rxp++ = value;
+ len_rx++;
+ }
+ }
+
+ return ret;
+}
+
+static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
+{
+ return rate / (cpsdvsr * (1 + scr));
+}
+
+static int pl022_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct pl022_spi_slave *ps = dev_get_priv(bus);
+ u16 scr = SSP_SCR_MIN, cr0 = 0, cpsr = SSP_CPSR_MIN, best_scr = scr,
+ best_cpsr = cpsr;
+ u32 min, max, best_freq = 0, tmp;
+ u32 rate = ps->freq;
+ bool found = false;
+
+ max = spi_rate(rate, SSP_CPSR_MIN, SSP_SCR_MIN);
+ min = spi_rate(rate, SSP_CPSR_MAX, SSP_SCR_MAX);
+
+ if (speed > max || speed < min) {
+ pr_err("Tried to set speed to %dHz but min=%d and max=%d\n",
+ speed, min, max);
+ return -EINVAL;
+ }
+
+ while (cpsr <= SSP_CPSR_MAX && !found) {
+ while (scr <= SSP_SCR_MAX) {
+ tmp = spi_rate(rate, cpsr, scr);
+
+ if (abs(speed - tmp) < abs(speed - best_freq)) {
+ best_freq = tmp;
+ best_cpsr = cpsr;
+ best_scr = scr;
+
+ if (tmp == speed) {
+ found = true;
+ break;
+ }
+ }
+
+ scr++;
+ }
+ cpsr += 2;
+ scr = SSP_SCR_MIN;
+ }
+
+ writew(best_cpsr, ps->base + SSP_CPSR);
+ cr0 = readw(ps->base + SSP_CR0);
+ writew(cr0 | (best_scr << SSP_SCR_SHFT), ps->base + SSP_CR0);
+
+ return 0;
+}
+
+static int pl022_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct pl022_spi_slave *ps = dev_get_priv(bus);
+ u16 reg;
+
+ reg = readw(ps->base + SSP_CR0);
+ reg &= ~(SSP_CR0_SPH | SSP_CR0_SPO);
+ if (mode & SPI_CPHA)
+ reg |= SSP_CR0_SPH;
+ if (mode & SPI_CPOL)
+ reg |= SSP_CR0_SPO;
+ writew(reg, ps->base + SSP_CR0);
+
+ return 0;
+}
+
+static int pl022_cs_info(struct udevice *bus, uint cs,
+ struct spi_cs_info *info)
+{
+ return 0;
+}
+
+static const struct dm_spi_ops pl022_spi_ops = {
+ .claim_bus = pl022_spi_claim_bus,
+ .release_bus = pl022_spi_release_bus,
+ .xfer = pl022_spi_xfer,
+ .set_speed = pl022_spi_set_speed,
+ .set_mode = pl022_spi_set_mode,
+ .cs_info = pl022_cs_info,
+};
+
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+static int pl022_spi_of_to_plat(struct udevice *bus)
+{
+ struct pl022_spi_pdata *plat = dev_get_plat(bus);
+ const void *fdt = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+ struct clk clkdev;
+ int ret;
+
+ plat->addr = fdtdec_get_addr_size(fdt, node, "reg", &plat->size);
+
+ ret = clk_get_by_index(bus, 0, &clkdev);
+ if (ret)
+ return ret;
+
+ plat->freq = clk_get_rate(&clkdev);
+
+ return 0;
+}
+
+static const struct udevice_id pl022_spi_ids[] = {
+ { .compatible = "arm,pl022-spi" },
+ { }
+};
+#endif
+
+U_BOOT_DRIVER(pl022_spi) = {
+ .name = "pl022_spi",
+ .id = UCLASS_SPI,
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .of_match = pl022_spi_ids,
+ .of_to_plat = pl022_spi_of_to_plat,
+#endif
+ .ops = &pl022_spi_ops,
+ .plat_auto = sizeof(struct pl022_spi_pdata),
+ .priv_auto = sizeof(struct pl022_spi_slave),
+ .probe = pl022_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/renesas_rpc_spi.c b/roms/u-boot/drivers/spi/renesas_rpc_spi.c
new file mode 100644
index 000000000..26b6aa85c
--- /dev/null
+++ b/roms/u-boot/drivers/spi/renesas_rpc_spi.c
@@ -0,0 +1,472 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Renesas RCar Gen3 RPC QSPI driver
+ *
+ * Copyright (C) 2018 Marek Vasut <marek.vasut@gmail.com>
+ */
+
+#include <common.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <clk.h>
+#include <dm.h>
+#include <dm/of_access.h>
+#include <dt-structs.h>
+#include <errno.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <spi.h>
+#include <wait_bit.h>
+
+#define RPC_CMNCR 0x0000 /* R/W */
+#define RPC_CMNCR_MD BIT(31)
+#define RPC_CMNCR_SFDE BIT(24)
+#define RPC_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
+#define RPC_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
+#define RPC_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
+#define RPC_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
+#define RPC_CMNCR_MOIIO_HIZ (RPC_CMNCR_MOIIO0(3) | RPC_CMNCR_MOIIO1(3) | \
+ RPC_CMNCR_MOIIO2(3) | RPC_CMNCR_MOIIO3(3))
+#define RPC_CMNCR_IO3FV(val) (((val) & 0x3) << 14)
+#define RPC_CMNCR_IO2FV(val) (((val) & 0x3) << 12)
+#define RPC_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
+#define RPC_CMNCR_IOFV_HIZ (RPC_CMNCR_IO0FV(3) | RPC_CMNCR_IO2FV(3) | \
+ RPC_CMNCR_IO3FV(3))
+#define RPC_CMNCR_CPHAT BIT(6)
+#define RPC_CMNCR_CPHAR BIT(5)
+#define RPC_CMNCR_SSLP BIT(4)
+#define RPC_CMNCR_CPOL BIT(3)
+#define RPC_CMNCR_BSZ(val) (((val) & 0x3) << 0)
+
+#define RPC_SSLDR 0x0004 /* R/W */
+#define RPC_SSLDR_SPNDL(d) (((d) & 0x7) << 16)
+#define RPC_SSLDR_SLNDL(d) (((d) & 0x7) << 8)
+#define RPC_SSLDR_SCKDL(d) (((d) & 0x7) << 0)
+
+#define RPC_DRCR 0x000C /* R/W */
+#define RPC_DRCR_SSLN BIT(24)
+#define RPC_DRCR_RBURST(v) (((v) & 0x1F) << 16)
+#define RPC_DRCR_RCF BIT(9)
+#define RPC_DRCR_RBE BIT(8)
+#define RPC_DRCR_SSLE BIT(0)
+
+#define RPC_DRCMR 0x0010 /* R/W */
+#define RPC_DRCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPC_DRCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPC_DREAR 0x0014 /* R/W */
+#define RPC_DREAR_EAV(v) (((v) & 0xFF) << 16)
+#define RPC_DREAR_EAC(v) (((v) & 0x7) << 0)
+
+#define RPC_DROPR 0x0018 /* R/W */
+#define RPC_DROPR_OPD3(o) (((o) & 0xFF) << 24)
+#define RPC_DROPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPC_DROPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPC_DROPR_OPD0(o) (((o) & 0xFF) << 0)
+
+#define RPC_DRENR 0x001C /* R/W */
+#define RPC_DRENR_CDB(o) (u32)((((o) & 0x3) << 30))
+#define RPC_DRENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPC_DRENR_ADB(o) (((o) & 0x3) << 24)
+#define RPC_DRENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPC_DRENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPC_DRENR_DME BIT(15)
+#define RPC_DRENR_CDE BIT(14)
+#define RPC_DRENR_OCDE BIT(12)
+#define RPC_DRENR_ADE(v) (((v) & 0xF) << 8)
+#define RPC_DRENR_OPDE(v) (((v) & 0xF) << 4)
+
+#define RPC_SMCR 0x0020 /* R/W */
+#define RPC_SMCR_SSLKP BIT(8)
+#define RPC_SMCR_SPIRE BIT(2)
+#define RPC_SMCR_SPIWE BIT(1)
+#define RPC_SMCR_SPIE BIT(0)
+
+#define RPC_SMCMR 0x0024 /* R/W */
+#define RPC_SMCMR_CMD(c) (((c) & 0xFF) << 16)
+#define RPC_SMCMR_OCMD(c) (((c) & 0xFF) << 0)
+
+#define RPC_SMADR 0x0028 /* R/W */
+#define RPC_SMOPR 0x002C /* R/W */
+#define RPC_SMOPR_OPD0(o) (((o) & 0xFF) << 0)
+#define RPC_SMOPR_OPD1(o) (((o) & 0xFF) << 8)
+#define RPC_SMOPR_OPD2(o) (((o) & 0xFF) << 16)
+#define RPC_SMOPR_OPD3(o) (((o) & 0xFF) << 24)
+
+#define RPC_SMENR 0x0030 /* R/W */
+#define RPC_SMENR_CDB(o) (((o) & 0x3) << 30)
+#define RPC_SMENR_OCDB(o) (((o) & 0x3) << 28)
+#define RPC_SMENR_ADB(o) (((o) & 0x3) << 24)
+#define RPC_SMENR_OPDB(o) (((o) & 0x3) << 20)
+#define RPC_SMENR_SPIDB(o) (((o) & 0x3) << 16)
+#define RPC_SMENR_DME BIT(15)
+#define RPC_SMENR_CDE BIT(14)
+#define RPC_SMENR_OCDE BIT(12)
+#define RPC_SMENR_ADE(v) (((v) & 0xF) << 8)
+#define RPC_SMENR_OPDE(v) (((v) & 0xF) << 4)
+#define RPC_SMENR_SPIDE(v) (((v) & 0xF) << 0)
+
+#define RPC_SMRDR0 0x0038 /* R */
+#define RPC_SMRDR1 0x003C /* R */
+#define RPC_SMWDR0 0x0040 /* R/W */
+#define RPC_SMWDR1 0x0044 /* R/W */
+#define RPC_CMNSR 0x0048 /* R */
+#define RPC_CMNSR_SSLF BIT(1)
+#define RPC_CMNSR_TEND BIT(0)
+
+#define RPC_DRDMCR 0x0058 /* R/W */
+#define RPC_DRDMCR_DMCYC(v) (((v) & 0xF) << 0)
+
+#define RPC_DRDRENR 0x005C /* R/W */
+#define RPC_DRDRENR_HYPE (0x5 << 12)
+#define RPC_DRDRENR_ADDRE BIT(8)
+#define RPC_DRDRENR_OPDRE BIT(4)
+#define RPC_DRDRENR_DRDRE BIT(0)
+
+#define RPC_SMDMCR 0x0060 /* R/W */
+#define RPC_SMDMCR_DMCYC(v) (((v) & 0xF) << 0)
+
+#define RPC_SMDRENR 0x0064 /* R/W */
+#define RPC_SMDRENR_HYPE (0x5 << 12)
+#define RPC_SMDRENR_ADDRE BIT(8)
+#define RPC_SMDRENR_OPDRE BIT(4)
+#define RPC_SMDRENR_SPIDRE BIT(0)
+
+#define RPC_PHYCNT 0x007C /* R/W */
+#define RPC_PHYCNT_CAL BIT(31)
+#define PRC_PHYCNT_OCTA_AA BIT(22)
+#define PRC_PHYCNT_OCTA_SA BIT(23)
+#define PRC_PHYCNT_EXDS BIT(21)
+#define RPC_PHYCNT_OCT BIT(20)
+#define RPC_PHYCNT_STRTIM(v) (((v) & 0x7) << 15)
+#define RPC_PHYCNT_WBUF2 BIT(4)
+#define RPC_PHYCNT_WBUF BIT(2)
+#define RPC_PHYCNT_MEM(v) (((v) & 0x3) << 0)
+
+#define RPC_PHYINT 0x0088 /* R/W */
+#define RPC_PHYINT_RSTEN BIT(18)
+#define RPC_PHYINT_WPEN BIT(17)
+#define RPC_PHYINT_INTEN BIT(16)
+#define RPC_PHYINT_RST BIT(2)
+#define RPC_PHYINT_WP BIT(1)
+#define RPC_PHYINT_INT BIT(0)
+
+#define RPC_WBUF 0x8000 /* R/W size=4/8/16/32/64Bytes */
+#define RPC_WBUF_SIZE 0x100
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct rpc_spi_plat {
+ fdt_addr_t regs;
+ fdt_addr_t extr;
+ s32 freq; /* Default clock freq, -1 for none */
+};
+
+struct rpc_spi_priv {
+ fdt_addr_t regs;
+ fdt_addr_t extr;
+ struct clk clk;
+
+ u8 cmdcopy[8];
+ u32 cmdlen;
+ bool cmdstarted;
+};
+
+static int rpc_spi_wait_sslf(struct udevice *dev)
+{
+ struct rpc_spi_priv *priv = dev_get_priv(dev->parent);
+
+ return wait_for_bit_le32((void *)priv->regs + RPC_CMNSR, RPC_CMNSR_SSLF,
+ false, 1000, false);
+}
+
+static int rpc_spi_wait_tend(struct udevice *dev)
+{
+ struct rpc_spi_priv *priv = dev_get_priv(dev->parent);
+
+ return wait_for_bit_le32((void *)priv->regs + RPC_CMNSR, RPC_CMNSR_TEND,
+ true, 1000, false);
+}
+
+static void rpc_spi_flush_read_cache(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct rpc_spi_priv *priv = dev_get_priv(bus);
+
+ /* Flush read cache */
+ writel(RPC_DRCR_SSLN | RPC_DRCR_RBURST(0x1f) |
+ RPC_DRCR_RCF | RPC_DRCR_RBE | RPC_DRCR_SSLE,
+ priv->regs + RPC_DRCR);
+ readl(priv->regs + RPC_DRCR);
+
+}
+
+static int rpc_spi_claim_bus(struct udevice *dev, bool manual)
+{
+ struct udevice *bus = dev->parent;
+ struct rpc_spi_priv *priv = dev_get_priv(bus);
+
+ /*
+ * NOTE: The 0x260 are undocumented bits, but they must be set.
+ * NOTE: On H3 ES1.x (not supported in mainline U-Boot), the
+ * RPC_PHYCNT_STRTIM shall be 0, while on newer parts, the
+ * RPC_PHYCNT_STRTIM shall be 6.
+ */
+ writel(RPC_PHYCNT_CAL | RPC_PHYCNT_STRTIM(6) | 0x260,
+ priv->regs + RPC_PHYCNT);
+ writel((manual ? RPC_CMNCR_MD : 0) | RPC_CMNCR_SFDE |
+ RPC_CMNCR_MOIIO_HIZ | RPC_CMNCR_IOFV_HIZ | RPC_CMNCR_BSZ(0),
+ priv->regs + RPC_CMNCR);
+
+ writel(RPC_SSLDR_SPNDL(7) | RPC_SSLDR_SLNDL(7) |
+ RPC_SSLDR_SCKDL(7), priv->regs + RPC_SSLDR);
+
+ rpc_spi_flush_read_cache(dev);
+
+ return 0;
+}
+
+static int rpc_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct rpc_spi_priv *priv = dev_get_priv(bus);
+
+ /* NOTE: The 0x260 are undocumented bits, but they must be set. */
+ writel(RPC_PHYCNT_STRTIM(6) | 0x260, priv->regs + RPC_PHYCNT);
+
+ rpc_spi_flush_read_cache(dev);
+
+ return 0;
+}
+
+static int rpc_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct rpc_spi_priv *priv = dev_get_priv(bus);
+ u32 wlen = dout ? (bitlen / 8) : 0;
+ u32 rlen = din ? (bitlen / 8) : 0;
+ u32 wloop = DIV_ROUND_UP(wlen, 4);
+ u32 smenr, smcr, offset;
+ int ret = 0;
+
+ if (!priv->cmdstarted) {
+ if (!wlen || rlen)
+ BUG();
+
+ memcpy(priv->cmdcopy, dout, wlen);
+ priv->cmdlen = wlen;
+
+ /* Command transfer start */
+ priv->cmdstarted = true;
+ if (!(flags & SPI_XFER_END))
+ return 0;
+ }
+
+ offset = (priv->cmdcopy[1] << 16) | (priv->cmdcopy[2] << 8) |
+ (priv->cmdcopy[3] << 0);
+
+ smenr = 0;
+
+ if (wlen || (!rlen && !wlen) || flags == SPI_XFER_ONCE) {
+ if (wlen && flags == SPI_XFER_END)
+ smenr = RPC_SMENR_SPIDE(0xf);
+
+ rpc_spi_claim_bus(dev, true);
+
+ writel(0, priv->regs + RPC_SMCR);
+
+ if (priv->cmdlen >= 1) { /* Command(1) */
+ writel(RPC_SMCMR_CMD(priv->cmdcopy[0]),
+ priv->regs + RPC_SMCMR);
+ smenr |= RPC_SMENR_CDE;
+ } else {
+ writel(0, priv->regs + RPC_SMCMR);
+ }
+
+ if (priv->cmdlen >= 4) { /* Address(3) */
+ writel(offset, priv->regs + RPC_SMADR);
+ smenr |= RPC_SMENR_ADE(7);
+ } else {
+ writel(0, priv->regs + RPC_SMADR);
+ }
+
+ if (priv->cmdlen >= 5) { /* Dummy(n) */
+ writel(8 * (priv->cmdlen - 4) - 1,
+ priv->regs + RPC_SMDMCR);
+ smenr |= RPC_SMENR_DME;
+ } else {
+ writel(0, priv->regs + RPC_SMDMCR);
+ }
+
+ writel(0, priv->regs + RPC_SMOPR);
+
+ writel(0, priv->regs + RPC_SMDRENR);
+
+ if (wlen && flags == SPI_XFER_END) {
+ u32 *datout = (u32 *)dout;
+
+ while (wloop--) {
+ smcr = RPC_SMCR_SPIWE | RPC_SMCR_SPIE;
+ if (wloop >= 1)
+ smcr |= RPC_SMCR_SSLKP;
+ writel(smenr, priv->regs + RPC_SMENR);
+ writel(*datout, priv->regs + RPC_SMWDR0);
+ writel(smcr, priv->regs + RPC_SMCR);
+ ret = rpc_spi_wait_tend(dev);
+ if (ret)
+ goto err;
+ datout++;
+ smenr = RPC_SMENR_SPIDE(0xf);
+ }
+
+ ret = rpc_spi_wait_sslf(dev);
+
+ } else {
+ writel(smenr, priv->regs + RPC_SMENR);
+ writel(RPC_SMCR_SPIE, priv->regs + RPC_SMCR);
+ ret = rpc_spi_wait_tend(dev);
+ }
+ } else { /* Read data only, using DRx ext access */
+ rpc_spi_claim_bus(dev, false);
+
+ if (priv->cmdlen >= 1) { /* Command(1) */
+ writel(RPC_DRCMR_CMD(priv->cmdcopy[0]),
+ priv->regs + RPC_DRCMR);
+ smenr |= RPC_DRENR_CDE;
+ } else {
+ writel(0, priv->regs + RPC_DRCMR);
+ }
+
+ if (priv->cmdlen >= 4) /* Address(3) */
+ smenr |= RPC_DRENR_ADE(7);
+
+ if (priv->cmdlen >= 5) { /* Dummy(n) */
+ writel(8 * (priv->cmdlen - 4) - 1,
+ priv->regs + RPC_DRDMCR);
+ smenr |= RPC_DRENR_DME;
+ } else {
+ writel(0, priv->regs + RPC_DRDMCR);
+ }
+
+ writel(0, priv->regs + RPC_DROPR);
+
+ writel(smenr, priv->regs + RPC_DRENR);
+
+ if (rlen)
+ memcpy_fromio(din, (void *)(priv->extr + offset), rlen);
+ else
+ readl(priv->extr); /* Dummy read */
+ }
+
+err:
+ priv->cmdstarted = false;
+
+ rpc_spi_release_bus(dev);
+
+ return ret;
+}
+
+static int rpc_spi_set_speed(struct udevice *bus, uint speed)
+{
+ /* This is a SPI NOR controller, do nothing. */
+ return 0;
+}
+
+static int rpc_spi_set_mode(struct udevice *bus, uint mode)
+{
+ /* This is a SPI NOR controller, do nothing. */
+ return 0;
+}
+
+static int rpc_spi_bind(struct udevice *parent)
+{
+ const void *fdt = gd->fdt_blob;
+ ofnode node;
+ int ret, off;
+
+ /*
+ * Check if there are any SPI NOR child nodes, if so, bind as
+ * this controller will be operated in SPI mode.
+ */
+ dev_for_each_subnode(node, parent) {
+ off = ofnode_to_offset(node);
+
+ ret = fdt_node_check_compatible(fdt, off, "spi-flash");
+ if (!ret)
+ return 0;
+
+ ret = fdt_node_check_compatible(fdt, off, "jedec,spi-nor");
+ if (!ret)
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int rpc_spi_probe(struct udevice *dev)
+{
+ struct rpc_spi_plat *plat = dev_get_plat(dev);
+ struct rpc_spi_priv *priv = dev_get_priv(dev);
+
+ priv->regs = plat->regs;
+ priv->extr = plat->extr;
+#if CONFIG_IS_ENABLED(CLK)
+ clk_enable(&priv->clk);
+#endif
+ return 0;
+}
+
+static int rpc_spi_of_to_plat(struct udevice *bus)
+{
+ struct rpc_spi_plat *plat = dev_get_plat(bus);
+
+ plat->regs = dev_read_addr_index(bus, 0);
+ plat->extr = dev_read_addr_index(bus, 1);
+
+#if CONFIG_IS_ENABLED(CLK)
+ struct rpc_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ ret = clk_get_by_index(bus, 0, &priv->clk);
+ if (ret < 0) {
+ printf("%s: Could not get clock for %s: %d\n",
+ __func__, bus->name, ret);
+ return ret;
+ }
+#endif
+
+ plat->freq = dev_read_u32_default(bus, "spi-max-freq", 50000000);
+
+ return 0;
+}
+
+static const struct dm_spi_ops rpc_spi_ops = {
+ .xfer = rpc_spi_xfer,
+ .set_speed = rpc_spi_set_speed,
+ .set_mode = rpc_spi_set_mode,
+};
+
+static const struct udevice_id rpc_spi_ids[] = {
+ { .compatible = "renesas,rpc-r7s72100" },
+ { .compatible = "renesas,rpc-r8a7795" },
+ { .compatible = "renesas,rpc-r8a7796" },
+ { .compatible = "renesas,rpc-r8a77965" },
+ { .compatible = "renesas,rpc-r8a77970" },
+ { .compatible = "renesas,rpc-r8a77995" },
+ { .compatible = "renesas,rcar-gen3-rpc" },
+ { }
+};
+
+U_BOOT_DRIVER(rpc_spi) = {
+ .name = "rpc_spi",
+ .id = UCLASS_SPI,
+ .of_match = rpc_spi_ids,
+ .ops = &rpc_spi_ops,
+ .of_to_plat = rpc_spi_of_to_plat,
+ .plat_auto = sizeof(struct rpc_spi_plat),
+ .priv_auto = sizeof(struct rpc_spi_priv),
+ .bind = rpc_spi_bind,
+ .probe = rpc_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/rk_spi.c b/roms/u-boot/drivers/spi/rk_spi.c
new file mode 100644
index 000000000..40bd8851b
--- /dev/null
+++ b/roms/u-boot/drivers/spi/rk_spi.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * spi driver for rockchip
+ *
+ * (C) 2019 Theobroma Systems Design und Consulting GmbH
+ *
+ * (C) Copyright 2015 Google, Inc
+ *
+ * (C) Copyright 2008-2013 Rockchip Electronics
+ * Peter, Software Engineering, <superpeter.cai@gmail.com>.
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <dt-structs.h>
+#include <errno.h>
+#include <log.h>
+#include <spi.h>
+#include <time.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <asm/io.h>
+#include <asm/arch-rockchip/clock.h>
+#include <asm/arch-rockchip/periph.h>
+#include <dm/pinctrl.h>
+#include "rk_spi.h"
+
+/* Change to 1 to output registers at the start of each transaction */
+#define DEBUG_RK_SPI 0
+
+/*
+ * ctrlr1 is 16-bits, so we should support lengths of 0xffff + 1. However,
+ * the controller seems to hang when given 0x10000, so stick with this for now.
+ */
+#define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
+
+struct rockchip_spi_params {
+ /* RXFIFO overruns and TXFIFO underruns stop the master clock */
+ bool master_manages_fifo;
+};
+
+struct rockchip_spi_plat {
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct dtd_rockchip_rk3288_spi of_plat;
+#endif
+ s32 frequency; /* Default clock frequency, -1 for none */
+ fdt_addr_t base;
+ uint deactivate_delay_us; /* Delay to wait after deactivate */
+ uint activate_delay_us; /* Delay to wait after activate */
+};
+
+struct rockchip_spi_priv {
+ struct rockchip_spi *regs;
+ struct clk clk;
+ unsigned int max_freq;
+ unsigned int mode;
+ ulong last_transaction_us; /* Time of last transaction end */
+ unsigned int speed_hz;
+ unsigned int last_speed_hz;
+ uint input_rate;
+};
+
+#define SPI_FIFO_DEPTH 32
+
+static void rkspi_dump_regs(struct rockchip_spi *regs)
+{
+ debug("ctrl0: \t\t0x%08x\n", readl(&regs->ctrlr0));
+ debug("ctrl1: \t\t0x%08x\n", readl(&regs->ctrlr1));
+ debug("ssienr: \t\t0x%08x\n", readl(&regs->enr));
+ debug("ser: \t\t0x%08x\n", readl(&regs->ser));
+ debug("baudr: \t\t0x%08x\n", readl(&regs->baudr));
+ debug("txftlr: \t\t0x%08x\n", readl(&regs->txftlr));
+ debug("rxftlr: \t\t0x%08x\n", readl(&regs->rxftlr));
+ debug("txflr: \t\t0x%08x\n", readl(&regs->txflr));
+ debug("rxflr: \t\t0x%08x\n", readl(&regs->rxflr));
+ debug("sr: \t\t0x%08x\n", readl(&regs->sr));
+ debug("imr: \t\t0x%08x\n", readl(&regs->imr));
+ debug("isr: \t\t0x%08x\n", readl(&regs->isr));
+ debug("dmacr: \t\t0x%08x\n", readl(&regs->dmacr));
+ debug("dmatdlr: \t0x%08x\n", readl(&regs->dmatdlr));
+ debug("dmardlr: \t0x%08x\n", readl(&regs->dmardlr));
+}
+
+static void rkspi_enable_chip(struct rockchip_spi *regs, bool enable)
+{
+ writel(enable ? 1 : 0, &regs->enr);
+}
+
+static void rkspi_set_clk(struct rockchip_spi_priv *priv, uint speed)
+{
+ /*
+ * We should try not to exceed the speed requested by the caller:
+ * when selecting a divider, we need to make sure we round up.
+ */
+ uint clk_div = DIV_ROUND_UP(priv->input_rate, speed);
+
+ /* The baudrate register (BAUDR) is defined as a 32bit register where
+ * the upper 16bit are reserved and having 'Fsclk_out' in the lower
+ * 16bits with 'Fsclk_out' defined as follows:
+ *
+ * Fsclk_out = Fspi_clk/ SCKDV
+ * Where SCKDV is any even value between 2 and 65534.
+ */
+ if (clk_div > 0xfffe) {
+ clk_div = 0xfffe;
+ debug("%s: can't divide down to %d Hz (actual will be %d Hz)\n",
+ __func__, speed, priv->input_rate / clk_div);
+ }
+
+ /* Round up to the next even 16bit number */
+ clk_div = (clk_div + 1) & 0xfffe;
+
+ debug("spi speed %u, div %u\n", speed, clk_div);
+
+ clrsetbits_le32(&priv->regs->baudr, 0xffff, clk_div);
+ priv->last_speed_hz = speed;
+}
+
+static int rkspi_wait_till_not_busy(struct rockchip_spi *regs)
+{
+ unsigned long start;
+
+ start = get_timer(0);
+ while (readl(&regs->sr) & SR_BUSY) {
+ if (get_timer(start) > ROCKCHIP_SPI_TIMEOUT_MS) {
+ debug("RK SPI: Status keeps busy for 1000us after a read/write!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static void spi_cs_activate(struct udevice *dev, uint cs)
+{
+ struct udevice *bus = dev->parent;
+ struct rockchip_spi_plat *plat = dev_get_plat(bus);
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ struct rockchip_spi *regs = priv->regs;
+
+ /* If it's too soon to do another transaction, wait */
+ if (plat->deactivate_delay_us && priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < plat->deactivate_delay_us) {
+ ulong additional_delay_us =
+ plat->deactivate_delay_us - delay_us;
+ debug("%s: delaying by %ld us\n",
+ __func__, additional_delay_us);
+ udelay(additional_delay_us);
+ }
+ }
+
+ debug("activate cs%u\n", cs);
+ writel(1 << cs, &regs->ser);
+ if (plat->activate_delay_us)
+ udelay(plat->activate_delay_us);
+}
+
+static void spi_cs_deactivate(struct udevice *dev, uint cs)
+{
+ struct udevice *bus = dev->parent;
+ struct rockchip_spi_plat *plat = dev_get_plat(bus);
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ struct rockchip_spi *regs = priv->regs;
+
+ debug("deactivate cs%u\n", cs);
+ writel(0, &regs->ser);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (plat->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+}
+
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+static int conv_of_plat(struct udevice *dev)
+{
+ struct rockchip_spi_plat *plat = dev_get_plat(dev);
+ struct dtd_rockchip_rk3288_spi *dtplat = &plat->of_plat;
+ struct rockchip_spi_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ plat->base = dtplat->reg[0];
+ plat->frequency = 20000000;
+ ret = clk_get_by_driver_info(dev, dtplat->clocks, &priv->clk);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#endif
+
+static int rockchip_spi_of_to_plat(struct udevice *bus)
+{
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct rockchip_spi_plat *plat = dev_get_plat(bus);
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ plat->base = dev_read_addr(bus);
+
+ ret = clk_get_by_index(bus, 0, &priv->clk);
+ if (ret < 0) {
+ debug("%s: Could not get clock for %s: %d\n", __func__,
+ bus->name, ret);
+ return ret;
+ }
+
+ plat->frequency =
+ dev_read_u32_default(bus, "spi-max-frequency", 50000000);
+ plat->deactivate_delay_us =
+ dev_read_u32_default(bus, "spi-deactivate-delay", 0);
+ plat->activate_delay_us =
+ dev_read_u32_default(bus, "spi-activate-delay", 0);
+
+ debug("%s: base=%x, max-frequency=%d, deactivate_delay=%d\n",
+ __func__, (uint)plat->base, plat->frequency,
+ plat->deactivate_delay_us);
+#endif
+
+ return 0;
+}
+
+static int rockchip_spi_calc_modclk(ulong max_freq)
+{
+ /*
+ * While this is not strictly correct for the RK3368, as the
+ * GPLL will be 576MHz, things will still work, as the
+ * clk_set_rate(...) implementation in our clock-driver will
+ * chose the next closest rate not exceeding what we request
+ * based on the output of this function.
+ */
+
+ unsigned div;
+ const unsigned long gpll_hz = 594000000UL;
+
+ /*
+ * We need to find an input clock that provides at least twice
+ * the maximum frequency and can be generated from the assumed
+ * speed of GPLL (594MHz) using an integer divider.
+ *
+ * To give us more achievable bitrates at higher speeds (these
+ * are generated by dividing by an even 16-bit integer from
+ * this frequency), we try to have an input frequency of at
+ * least 4x our max_freq.
+ */
+
+ div = DIV_ROUND_UP(gpll_hz, max_freq * 4);
+ return gpll_hz / div;
+}
+
+static int rockchip_spi_probe(struct udevice *bus)
+{
+ struct rockchip_spi_plat *plat = dev_get_plat(bus);
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ debug("%s: probe\n", __func__);
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ ret = conv_of_plat(bus);
+ if (ret)
+ return ret;
+#endif
+ priv->regs = (struct rockchip_spi *)plat->base;
+
+ priv->last_transaction_us = timer_get_us();
+ priv->max_freq = plat->frequency;
+
+ /* Clamp the value from the DTS against any hardware limits */
+ if (priv->max_freq > ROCKCHIP_SPI_MAX_RATE)
+ priv->max_freq = ROCKCHIP_SPI_MAX_RATE;
+
+ /* Find a module-input clock that fits with the max_freq setting */
+ ret = clk_set_rate(&priv->clk,
+ rockchip_spi_calc_modclk(priv->max_freq));
+ if (ret < 0) {
+ debug("%s: Failed to set clock: %d\n", __func__, ret);
+ return ret;
+ }
+ priv->input_rate = ret;
+ debug("%s: rate = %u\n", __func__, priv->input_rate);
+
+ return 0;
+}
+
+static int rockchip_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ struct rockchip_spi *regs = priv->regs;
+ uint ctrlr0;
+
+ /* Disable the SPI hardware */
+ rkspi_enable_chip(regs, false);
+
+ if (priv->speed_hz != priv->last_speed_hz)
+ rkspi_set_clk(priv, priv->speed_hz);
+
+ /* Operation Mode */
+ ctrlr0 = OMOD_MASTER << OMOD_SHIFT;
+
+ /* Data Frame Size */
+ ctrlr0 |= DFS_8BIT << DFS_SHIFT;
+
+ /* set SPI mode 0..3 */
+ if (priv->mode & SPI_CPOL)
+ ctrlr0 |= SCOL_HIGH << SCOL_SHIFT;
+ if (priv->mode & SPI_CPHA)
+ ctrlr0 |= SCPH_TOGSTA << SCPH_SHIFT;
+
+ /* Chip Select Mode */
+ ctrlr0 |= CSM_KEEP << CSM_SHIFT;
+
+ /* SSN to Sclk_out delay */
+ ctrlr0 |= SSN_DELAY_ONE << SSN_DELAY_SHIFT;
+
+ /* Serial Endian Mode */
+ ctrlr0 |= SEM_LITTLE << SEM_SHIFT;
+
+ /* First Bit Mode */
+ ctrlr0 |= FBM_MSB << FBM_SHIFT;
+
+ /* Byte and Halfword Transform */
+ ctrlr0 |= HALF_WORD_OFF << HALF_WORD_TX_SHIFT;
+
+ /* Rxd Sample Delay */
+ ctrlr0 |= 0 << RXDSD_SHIFT;
+
+ /* Frame Format */
+ ctrlr0 |= FRF_SPI << FRF_SHIFT;
+
+ /* Tx and Rx mode */
+ ctrlr0 |= TMOD_TR << TMOD_SHIFT;
+
+ writel(ctrlr0, &regs->ctrlr0);
+
+ return 0;
+}
+
+static int rockchip_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+
+ rkspi_enable_chip(priv->regs, false);
+
+ return 0;
+}
+
+static inline int rockchip_spi_16bit_reader(struct udevice *dev,
+ u8 **din, int *len)
+{
+ struct udevice *bus = dev->parent;
+ const struct rockchip_spi_params * const data =
+ (void *)dev_get_driver_data(bus);
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ struct rockchip_spi *regs = priv->regs;
+ const u32 saved_ctrlr0 = readl(&regs->ctrlr0);
+#if defined(DEBUG)
+ u32 statistics_rxlevels[33] = { };
+#endif
+ u32 frames = *len / 2;
+ u8 *in = (u8 *)(*din);
+ u32 max_chunk_size = SPI_FIFO_DEPTH;
+
+ if (!frames)
+ return 0;
+
+ /*
+ * If we know that the hardware will manage RXFIFO overruns
+ * (i.e. stop the SPI clock until there's space in the FIFO),
+ * we the allow largest possible chunk size that can be
+ * represented in CTRLR1.
+ */
+ if (data && data->master_manages_fifo)
+ max_chunk_size = ROCKCHIP_SPI_MAX_TRANLEN;
+
+ // rockchip_spi_configure(dev, mode, size)
+ rkspi_enable_chip(regs, false);
+ clrsetbits_le32(&regs->ctrlr0,
+ TMOD_MASK << TMOD_SHIFT,
+ TMOD_RO << TMOD_SHIFT);
+ /* 16bit data frame size */
+ clrsetbits_le32(&regs->ctrlr0, DFS_MASK, DFS_16BIT);
+
+ /* Update caller's context */
+ const u32 bytes_to_process = 2 * frames;
+ *din += bytes_to_process;
+ *len -= bytes_to_process;
+
+ /* Process our frames */
+ while (frames) {
+ u32 chunk_size = min(frames, max_chunk_size);
+
+ frames -= chunk_size;
+
+ writew(chunk_size - 1, &regs->ctrlr1);
+ rkspi_enable_chip(regs, true);
+
+ do {
+ u32 rx_level = readw(&regs->rxflr);
+#if defined(DEBUG)
+ statistics_rxlevels[rx_level]++;
+#endif
+ chunk_size -= rx_level;
+ while (rx_level--) {
+ u16 val = readw(regs->rxdr);
+ *in++ = val & 0xff;
+ *in++ = val >> 8;
+ }
+ } while (chunk_size);
+
+ rkspi_enable_chip(regs, false);
+ }
+
+#if defined(DEBUG)
+ debug("%s: observed rx_level during processing:\n", __func__);
+ for (int i = 0; i <= 32; ++i)
+ if (statistics_rxlevels[i])
+ debug("\t%2d: %d\n", i, statistics_rxlevels[i]);
+#endif
+ /* Restore the original transfer setup and return error-free. */
+ writel(saved_ctrlr0, &regs->ctrlr0);
+ return 0;
+}
+
+static int rockchip_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+ struct rockchip_spi *regs = priv->regs;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ int len = bitlen >> 3;
+ const u8 *out = dout;
+ u8 *in = din;
+ int toread, towrite;
+ int ret = 0;
+
+ debug("%s: dout=%p, din=%p, len=%x, flags=%lx\n", __func__, dout, din,
+ len, flags);
+ if (DEBUG_RK_SPI)
+ rkspi_dump_regs(regs);
+
+ /* Assert CS before transfer */
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev, slave_plat->cs);
+
+ /*
+ * To ensure fast loading of firmware images (e.g. full U-Boot
+ * stage, ATF, Linux kernel) from SPI flash, we optimise the
+ * case of read-only transfers by using the full 16bits of each
+ * FIFO element.
+ */
+ if (!out)
+ ret = rockchip_spi_16bit_reader(dev, &in, &len);
+
+ /* This is the original 8bit reader/writer code */
+ while (len > 0) {
+ int todo = min(len, ROCKCHIP_SPI_MAX_TRANLEN);
+
+ rkspi_enable_chip(regs, false);
+ writel(todo - 1, &regs->ctrlr1);
+ rkspi_enable_chip(regs, true);
+
+ toread = todo;
+ towrite = todo;
+ while (toread || towrite) {
+ u32 status = readl(&regs->sr);
+
+ if (towrite && !(status & SR_TF_FULL)) {
+ writel(out ? *out++ : 0, regs->txdr);
+ towrite--;
+ }
+ if (toread && !(status & SR_RF_EMPT)) {
+ u32 byte = readl(regs->rxdr);
+
+ if (in)
+ *in++ = byte;
+ toread--;
+ }
+ }
+
+ /*
+ * In case that there's a transmit-component, we need to wait
+ * until the control goes idle before we can disable the SPI
+ * control logic (as this will implictly flush the FIFOs).
+ */
+ if (out) {
+ ret = rkspi_wait_till_not_busy(regs);
+ if (ret)
+ break;
+ }
+
+ len -= todo;
+ }
+
+ /* Deassert CS after transfer */
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev, slave_plat->cs);
+
+ rkspi_enable_chip(regs, false);
+
+ return ret;
+}
+
+static int rockchip_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+
+ /* Clamp to the maximum frequency specified in the DTS */
+ if (speed > priv->max_freq)
+ speed = priv->max_freq;
+
+ priv->speed_hz = speed;
+
+ return 0;
+}
+
+static int rockchip_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct rockchip_spi_priv *priv = dev_get_priv(bus);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static const struct dm_spi_ops rockchip_spi_ops = {
+ .claim_bus = rockchip_spi_claim_bus,
+ .release_bus = rockchip_spi_release_bus,
+ .xfer = rockchip_spi_xfer,
+ .set_speed = rockchip_spi_set_speed,
+ .set_mode = rockchip_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+const struct rockchip_spi_params rk3399_spi_params = {
+ .master_manages_fifo = true,
+};
+
+static const struct udevice_id rockchip_spi_ids[] = {
+ { .compatible = "rockchip,rk3066-spi" },
+ { .compatible = "rockchip,rk3288-spi" },
+ { .compatible = "rockchip,rk3328-spi" },
+ { .compatible = "rockchip,rk3368-spi",
+ .data = (ulong)&rk3399_spi_params },
+ { .compatible = "rockchip,rk3399-spi",
+ .data = (ulong)&rk3399_spi_params },
+ { }
+};
+
+U_BOOT_DRIVER(rockchip_rk3288_spi) = {
+ .name = "rockchip_rk3288_spi",
+ .id = UCLASS_SPI,
+ .of_match = rockchip_spi_ids,
+ .ops = &rockchip_spi_ops,
+ .of_to_plat = rockchip_spi_of_to_plat,
+ .plat_auto = sizeof(struct rockchip_spi_plat),
+ .priv_auto = sizeof(struct rockchip_spi_priv),
+ .probe = rockchip_spi_probe,
+};
+
+DM_DRIVER_ALIAS(rockchip_rk3288_spi, rockchip_rk3368_spi)
diff --git a/roms/u-boot/drivers/spi/rk_spi.h b/roms/u-boot/drivers/spi/rk_spi.h
new file mode 100644
index 000000000..2e0d1eeb9
--- /dev/null
+++ b/roms/u-boot/drivers/spi/rk_spi.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * SPI driver for rockchip
+ *
+ * (C) Copyright 2015 Google, Inc
+ *
+ * (C) Copyright 2008-2013 Rockchip Electronics
+ * Peter, Software Engineering, <superpeter.cai@gmail.com>.
+ */
+
+#ifndef __RK_SPI_H
+#define __RK_SPI_H
+
+struct rockchip_spi {
+ u32 ctrlr0;
+ u32 ctrlr1;
+ u32 enr;
+ u32 ser;
+ u32 baudr;
+ u32 txftlr;
+ u32 rxftlr;
+ u32 txflr;
+ u32 rxflr;
+ u32 sr;
+ u32 ipr;
+ u32 imr;
+ u32 isr;
+ u32 risr;
+ u32 icr;
+ u32 dmacr;
+ u32 dmatdlr;
+ u32 dmardlr; /* 0x44 */
+ u32 reserved[0xef];
+ u32 txdr[0x100]; /* 0x400 */
+ u32 rxdr[0x100]; /* 0x800 */
+};
+
+/* CTRLR0 */
+enum {
+ DFS_SHIFT = 0, /* Data Frame Size */
+ DFS_MASK = 3,
+ DFS_4BIT = 0,
+ DFS_8BIT,
+ DFS_16BIT,
+ DFS_RESV,
+
+ CFS_SHIFT = 2, /* Control Frame Size */
+ CFS_MASK = 0xf,
+
+ SCPH_SHIFT = 6, /* Serial Clock Phase */
+ SCPH_MASK = 1,
+ SCPH_TOGMID = 0, /* SCLK toggles in middle of first data bit */
+ SCPH_TOGSTA, /* SCLK toggles at start of first data bit */
+
+ SCOL_SHIFT = 7, /* Serial Clock Polarity */
+ SCOL_MASK = 1,
+ SCOL_LOW = 0, /* Inactive state of serial clock is low */
+ SCOL_HIGH, /* Inactive state of serial clock is high */
+
+ CSM_SHIFT = 8, /* Chip Select Mode */
+ CSM_MASK = 0x3,
+ CSM_KEEP = 0, /* ss_n stays low after each frame */
+ CSM_HALF, /* ss_n high for half sclk_out cycles */
+ CSM_ONE, /* ss_n high for one sclk_out cycle */
+ CSM_RESV,
+
+ SSN_DELAY_SHIFT = 10, /* SSN to Sclk_out delay */
+ SSN_DELAY_MASK = 1,
+ SSN_DELAY_HALF = 0, /* 1/2 sclk_out cycle */
+ SSN_DELAY_ONE = 1, /* 1 sclk_out cycle */
+
+ SEM_SHIFT = 11, /* Serial Endian Mode */
+ SEM_MASK = 1,
+ SEM_LITTLE = 0, /* little endian */
+ SEM_BIG, /* big endian */
+
+ FBM_SHIFT = 12, /* First Bit Mode */
+ FBM_MASK = 1,
+ FBM_MSB = 0, /* first bit is MSB */
+ FBM_LSB, /* first bit in LSB */
+
+ HALF_WORD_TX_SHIFT = 13, /* Byte and Halfword Transform */
+ HALF_WORD_MASK = 1,
+ HALF_WORD_ON = 0, /* apb 16bit write/read, spi 8bit write/read */
+ HALF_WORD_OFF, /* apb 8bit write/read, spi 8bit write/read */
+
+ RXDSD_SHIFT = 14, /* Rxd Sample Delay, in cycles */
+ RXDSD_MASK = 3,
+
+ FRF_SHIFT = 16, /* Frame Format */
+ FRF_MASK = 3,
+ FRF_SPI = 0, /* Motorola SPI */
+ FRF_SSP, /* Texas Instruments SSP*/
+ FRF_MICROWIRE, /* National Semiconductors Microwire */
+ FRF_RESV,
+
+ TMOD_SHIFT = 18, /* Transfer Mode */
+ TMOD_MASK = 3,
+ TMOD_TR = 0, /* xmit & recv */
+ TMOD_TO, /* xmit only */
+ TMOD_RO, /* recv only */
+ TMOD_RESV,
+
+ OMOD_SHIFT = 20, /* Operation Mode */
+ OMOD_MASK = 1,
+ OMOD_MASTER = 0, /* Master Mode */
+ OMOD_SLAVE, /* Slave Mode */
+};
+
+/* SR */
+enum {
+ SR_MASK = 0x7f,
+ SR_BUSY = 1 << 0,
+ SR_TF_FULL = 1 << 1,
+ SR_TF_EMPT = 1 << 2,
+ SR_RF_EMPT = 1 << 3,
+ SR_RF_FULL = 1 << 4,
+};
+
+#define ROCKCHIP_SPI_TIMEOUT_MS 1000
+
+/*
+ * We limit the maximum bitrate to 50MBit/s (50MHz) due to an assumed
+ * hardware limitation... the Linux kernel source has the following
+ * comment:
+ * "sclk_out: spi master internal logic in rk3x can support 50Mhz"
+ */
+#define ROCKCHIP_SPI_MAX_RATE 50000000
+
+#endif /* __RK_SPI_H */
diff --git a/roms/u-boot/drivers/spi/sandbox_spi.c b/roms/u-boot/drivers/spi/sandbox_spi.c
new file mode 100644
index 000000000..0564d8b55
--- /dev/null
+++ b/roms/u-boot/drivers/spi/sandbox_spi.c
@@ -0,0 +1,179 @@
+/*
+ * Simulate a SPI port
+ *
+ * Copyright (c) 2011-2013 The Chromium OS Authors.
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#define LOG_CATEGORY UCLASS_SPI
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi_flash.h>
+#include <os.h>
+
+#include <linux/errno.h>
+#include <asm/spi.h>
+#include <asm/state.h>
+#include <dm/acpi.h>
+#include <dm/device-internal.h>
+
+#ifndef CONFIG_SPI_IDLE_VAL
+# define CONFIG_SPI_IDLE_VAL 0xFF
+#endif
+
+/**
+ * struct sandbox_spi_priv - Sandbox SPI private data
+ *
+ * Helper struct to keep track of the sandbox SPI bus internal state. It is
+ * used in unit tests to verify that dm spi functions update the bus
+ * speed/mode properly (for instance, when jumping back and forth between spi
+ * slaves claiming the bus, we need to make sure that the bus speed is updated
+ * accordingly for each slave).
+ *
+ * @speed: Current bus speed.
+ * @mode: Current bus mode.
+ */
+struct sandbox_spi_priv {
+ uint speed;
+ uint mode;
+};
+
+__weak int sandbox_spi_get_emul(struct sandbox_state *state,
+ struct udevice *bus, struct udevice *slave,
+ struct udevice **emulp)
+{
+ return -ENOENT;
+}
+
+uint sandbox_spi_get_speed(struct udevice *dev)
+{
+ struct sandbox_spi_priv *priv = dev_get_priv(dev);
+
+ return priv->speed;
+}
+
+uint sandbox_spi_get_mode(struct udevice *dev)
+{
+ struct sandbox_spi_priv *priv = dev_get_priv(dev);
+
+ return priv->mode;
+}
+
+static int sandbox_spi_xfer(struct udevice *slave, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = slave->parent;
+ struct sandbox_state *state = state_get_current();
+ struct dm_spi_emul_ops *ops;
+ struct udevice *emul;
+ uint bytes = bitlen / 8, i;
+ int ret;
+ uint busnum, cs;
+
+ if (bitlen == 0)
+ return 0;
+
+ /* we can only do 8 bit transfers */
+ if (bitlen % 8) {
+ printf("sandbox_spi: xfer: invalid bitlen size %u; needs to be 8bit\n",
+ bitlen);
+ return -EINVAL;
+ }
+
+ busnum = dev_seq(bus);
+ cs = spi_chip_select(slave);
+ if (busnum >= CONFIG_SANDBOX_SPI_MAX_BUS ||
+ cs >= CONFIG_SANDBOX_SPI_MAX_CS) {
+ printf("%s: busnum=%u, cs=%u: out of range\n", __func__,
+ busnum, cs);
+ return -ENOENT;
+ }
+ ret = sandbox_spi_get_emul(state, bus, slave, &emul);
+ if (ret) {
+ printf("%s: busnum=%u, cs=%u: no emulation available (err=%d)\n",
+ __func__, busnum, cs, ret);
+ return -ENOENT;
+ }
+ ret = device_probe(emul);
+ if (ret)
+ return ret;
+
+ ops = spi_emul_get_ops(emul);
+ ret = ops->xfer(emul, bitlen, dout, din, flags);
+
+ log_content("sandbox_spi: xfer: got back %i (that's %s)\n rx:",
+ ret, ret ? "bad" : "good");
+ if (din) {
+ for (i = 0; i < bytes; ++i)
+ log_content(" %u:%02x", i, ((u8 *)din)[i]);
+ }
+ log_content("\n");
+
+ return ret;
+}
+
+static int sandbox_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct sandbox_spi_priv *priv = dev_get_priv(bus);
+
+ priv->speed = speed;
+
+ return 0;
+}
+
+static int sandbox_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct sandbox_spi_priv *priv = dev_get_priv(bus);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static int sandbox_cs_info(struct udevice *bus, uint cs,
+ struct spi_cs_info *info)
+{
+ /* Always allow activity on CS 0, CS 1 */
+ if (cs >= 2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sandbox_spi_get_mmap(struct udevice *dev, ulong *map_basep,
+ uint *map_sizep, uint *offsetp)
+{
+ *map_basep = 0x1000;
+ *map_sizep = 0x2000;
+ *offsetp = 0x100;
+
+ return 0;
+}
+
+static const struct dm_spi_ops sandbox_spi_ops = {
+ .xfer = sandbox_spi_xfer,
+ .set_speed = sandbox_spi_set_speed,
+ .set_mode = sandbox_spi_set_mode,
+ .cs_info = sandbox_cs_info,
+ .get_mmap = sandbox_spi_get_mmap,
+};
+
+static const struct udevice_id sandbox_spi_ids[] = {
+ { .compatible = "sandbox,spi" },
+ { }
+};
+
+U_BOOT_DRIVER(sandbox_spi) = {
+ .name = "sandbox_spi",
+ .id = UCLASS_SPI,
+ .of_match = sandbox_spi_ids,
+ .ops = &sandbox_spi_ops,
+ .priv_auto = sizeof(struct sandbox_spi_priv),
+};
diff --git a/roms/u-boot/drivers/spi/sh_qspi.c b/roms/u-boot/drivers/spi/sh_qspi.c
new file mode 100644
index 000000000..5ba8a8ea7
--- /dev/null
+++ b/roms/u-boot/drivers/spi/sh_qspi.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SH QSPI (Quad SPI) driver
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ */
+
+#include <common.h>
+#include <console.h>
+#include <malloc.h>
+#include <spi.h>
+#include <wait_bit.h>
+#include <asm/arch/rmobile.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+
+/* SH QSPI register bit masks <REG>_<BIT> */
+#define SPCR_MSTR 0x08
+#define SPCR_SPE 0x40
+#define SPSR_SPRFF 0x80
+#define SPSR_SPTEF 0x20
+#define SPPCR_IO3FV 0x04
+#define SPPCR_IO2FV 0x02
+#define SPPCR_IO1FV 0x01
+#define SPBDCR_RXBC0 BIT(0)
+#define SPCMD_SCKDEN BIT(15)
+#define SPCMD_SLNDEN BIT(14)
+#define SPCMD_SPNDEN BIT(13)
+#define SPCMD_SSLKP BIT(7)
+#define SPCMD_BRDV0 BIT(2)
+#define SPCMD_INIT1 SPCMD_SCKDEN | SPCMD_SLNDEN | \
+ SPCMD_SPNDEN | SPCMD_SSLKP | \
+ SPCMD_BRDV0
+#define SPCMD_INIT2 SPCMD_SPNDEN | SPCMD_SSLKP | \
+ SPCMD_BRDV0
+#define SPBFCR_TXRST BIT(7)
+#define SPBFCR_RXRST BIT(6)
+#define SPBFCR_TXTRG 0x30
+#define SPBFCR_RXTRG 0x07
+
+/* SH QSPI register set */
+struct sh_qspi_regs {
+ u8 spcr;
+ u8 sslp;
+ u8 sppcr;
+ u8 spsr;
+ u32 spdr;
+ u8 spscr;
+ u8 spssr;
+ u8 spbr;
+ u8 spdcr;
+ u8 spckd;
+ u8 sslnd;
+ u8 spnd;
+ u8 dummy0;
+ u16 spcmd0;
+ u16 spcmd1;
+ u16 spcmd2;
+ u16 spcmd3;
+ u8 spbfcr;
+ u8 dummy1;
+ u16 spbdcr;
+ u32 spbmul0;
+ u32 spbmul1;
+ u32 spbmul2;
+ u32 spbmul3;
+};
+
+struct sh_qspi_slave {
+#if !CONFIG_IS_ENABLED(DM_SPI)
+ struct spi_slave slave;
+#endif
+ struct sh_qspi_regs *regs;
+};
+
+static void sh_qspi_init(struct sh_qspi_slave *ss)
+{
+ /* QSPI initialize */
+ /* Set master mode only */
+ writeb(SPCR_MSTR, &ss->regs->spcr);
+
+ /* Set SSL signal level */
+ writeb(0x00, &ss->regs->sslp);
+
+ /* Set MOSI signal value when transfer is in idle state */
+ writeb(SPPCR_IO3FV|SPPCR_IO2FV, &ss->regs->sppcr);
+
+ /* Set bit rate. See 58.3.8 Quad Serial Peripheral Interface */
+ writeb(0x01, &ss->regs->spbr);
+
+ /* Disable Dummy Data Transmission */
+ writeb(0x00, &ss->regs->spdcr);
+
+ /* Set clock delay value */
+ writeb(0x00, &ss->regs->spckd);
+
+ /* Set SSL negation delay value */
+ writeb(0x00, &ss->regs->sslnd);
+
+ /* Set next-access delay value */
+ writeb(0x00, &ss->regs->spnd);
+
+ /* Set equence command */
+ writew(SPCMD_INIT2, &ss->regs->spcmd0);
+
+ /* Reset transfer and receive Buffer */
+ setbits_8(&ss->regs->spbfcr, SPBFCR_TXRST|SPBFCR_RXRST);
+
+ /* Clear transfer and receive Buffer control bit */
+ clrbits_8(&ss->regs->spbfcr, SPBFCR_TXRST|SPBFCR_RXRST);
+
+ /* Set equence control method. Use equence0 only */
+ writeb(0x00, &ss->regs->spscr);
+
+ /* Enable SPI function */
+ setbits_8(&ss->regs->spcr, SPCR_SPE);
+}
+
+static void sh_qspi_cs_activate(struct sh_qspi_slave *ss)
+{
+ /* Set master mode only */
+ writeb(SPCR_MSTR, &ss->regs->spcr);
+
+ /* Set command */
+ writew(SPCMD_INIT1, &ss->regs->spcmd0);
+
+ /* Reset transfer and receive Buffer */
+ setbits_8(&ss->regs->spbfcr, SPBFCR_TXRST|SPBFCR_RXRST);
+
+ /* Clear transfer and receive Buffer control bit */
+ clrbits_8(&ss->regs->spbfcr, SPBFCR_TXRST|SPBFCR_RXRST);
+
+ /* Set equence control method. Use equence0 only */
+ writeb(0x00, &ss->regs->spscr);
+
+ /* Enable SPI function */
+ setbits_8(&ss->regs->spcr, SPCR_SPE);
+}
+
+static void sh_qspi_cs_deactivate(struct sh_qspi_slave *ss)
+{
+ /* Disable SPI Function */
+ clrbits_8(&ss->regs->spcr, SPCR_SPE);
+}
+
+static int sh_qspi_xfer_common(struct sh_qspi_slave *ss, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ u32 nbyte, chunk;
+ int i, ret = 0;
+ u8 dtdata = 0, drdata;
+ u8 *tdata = &dtdata, *rdata = &drdata;
+ u32 *spbmul0 = &ss->regs->spbmul0;
+
+ if (dout == NULL && din == NULL) {
+ if (flags & SPI_XFER_END)
+ sh_qspi_cs_deactivate(ss);
+ return 0;
+ }
+
+ if (bitlen % 8) {
+ printf("%s: bitlen is not 8bit alined %d", __func__, bitlen);
+ return 1;
+ }
+
+ nbyte = bitlen / 8;
+
+ if (flags & SPI_XFER_BEGIN) {
+ sh_qspi_cs_activate(ss);
+
+ /* Set 1048576 byte */
+ writel(0x100000, spbmul0);
+ }
+
+ if (flags & SPI_XFER_END)
+ writel(nbyte, spbmul0);
+
+ if (dout != NULL)
+ tdata = (u8 *)dout;
+
+ if (din != NULL)
+ rdata = din;
+
+ while (nbyte > 0) {
+ /*
+ * Check if there is 32 Byte chunk and if there is, transfer
+ * it in one burst, otherwise transfer on byte-by-byte basis.
+ */
+ chunk = (nbyte >= 32) ? 32 : 1;
+
+ clrsetbits_8(&ss->regs->spbfcr, SPBFCR_TXTRG | SPBFCR_RXTRG,
+ chunk == 32 ? SPBFCR_TXTRG | SPBFCR_RXTRG : 0);
+
+ ret = wait_for_bit_8(&ss->regs->spsr, SPSR_SPTEF,
+ true, 1000, true);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chunk; i++) {
+ writeb(*tdata, &ss->regs->spdr);
+ if (dout != NULL)
+ tdata++;
+ }
+
+ ret = wait_for_bit_8(&ss->regs->spsr, SPSR_SPRFF,
+ true, 1000, true);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chunk; i++) {
+ *rdata = readb(&ss->regs->spdr);
+ if (din != NULL)
+ rdata++;
+ }
+
+ nbyte -= chunk;
+ }
+
+ if (flags & SPI_XFER_END)
+ sh_qspi_cs_deactivate(ss);
+
+ return ret;
+}
+
+#if !CONFIG_IS_ENABLED(DM_SPI)
+static inline struct sh_qspi_slave *to_sh_qspi(struct spi_slave *slave)
+{
+ return container_of(slave, struct sh_qspi_slave, slave);
+}
+
+int spi_cs_is_valid(unsigned int bus, unsigned int cs)
+{
+ return 1;
+}
+
+void spi_cs_activate(struct spi_slave *slave)
+{
+ struct sh_qspi_slave *ss = to_sh_qspi(slave);
+
+ sh_qspi_cs_activate(ss);
+}
+
+void spi_cs_deactivate(struct spi_slave *slave)
+{
+ struct sh_qspi_slave *ss = to_sh_qspi(slave);
+
+ sh_qspi_cs_deactivate(ss);
+}
+
+struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
+ unsigned int max_hz, unsigned int mode)
+{
+ struct sh_qspi_slave *ss;
+
+ if (!spi_cs_is_valid(bus, cs))
+ return NULL;
+
+ ss = spi_alloc_slave(struct sh_qspi_slave, bus, cs);
+ if (!ss) {
+ printf("SPI_error: Fail to allocate sh_qspi_slave\n");
+ return NULL;
+ }
+
+ ss->regs = (struct sh_qspi_regs *)SH_QSPI_BASE;
+
+ /* Init SH QSPI */
+ sh_qspi_init(ss);
+
+ return &ss->slave;
+}
+
+void spi_free_slave(struct spi_slave *slave)
+{
+ struct sh_qspi_slave *spi = to_sh_qspi(slave);
+
+ free(spi);
+}
+
+int spi_claim_bus(struct spi_slave *slave)
+{
+ return 0;
+}
+
+void spi_release_bus(struct spi_slave *slave)
+{
+}
+
+int spi_xfer(struct spi_slave *slave, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct sh_qspi_slave *ss = to_sh_qspi(slave);
+
+ return sh_qspi_xfer_common(ss, bitlen, dout, din, flags);
+}
+
+#else
+
+#include <dm.h>
+
+static int sh_qspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct sh_qspi_slave *ss = dev_get_plat(bus);
+
+ return sh_qspi_xfer_common(ss, bitlen, dout, din, flags);
+}
+
+static int sh_qspi_set_speed(struct udevice *dev, uint speed)
+{
+ /* This is a SPI NOR controller, do nothing. */
+ return 0;
+}
+
+static int sh_qspi_set_mode(struct udevice *dev, uint mode)
+{
+ /* This is a SPI NOR controller, do nothing. */
+ return 0;
+}
+
+static int sh_qspi_probe(struct udevice *dev)
+{
+ struct sh_qspi_slave *ss = dev_get_plat(dev);
+
+ sh_qspi_init(ss);
+
+ return 0;
+}
+
+static int sh_qspi_of_to_plat(struct udevice *dev)
+{
+ struct sh_qspi_slave *plat = dev_get_plat(dev);
+
+ plat->regs = (struct sh_qspi_regs *)dev_read_addr(dev);
+
+ return 0;
+}
+
+static const struct dm_spi_ops sh_qspi_ops = {
+ .xfer = sh_qspi_xfer,
+ .set_speed = sh_qspi_set_speed,
+ .set_mode = sh_qspi_set_mode,
+};
+
+static const struct udevice_id sh_qspi_ids[] = {
+ { .compatible = "renesas,qspi" },
+ { }
+};
+
+U_BOOT_DRIVER(sh_qspi) = {
+ .name = "sh_qspi",
+ .id = UCLASS_SPI,
+ .of_match = sh_qspi_ids,
+ .ops = &sh_qspi_ops,
+ .of_to_plat = sh_qspi_of_to_plat,
+ .plat_auto = sizeof(struct sh_qspi_slave),
+ .probe = sh_qspi_probe,
+};
+#endif
diff --git a/roms/u-boot/drivers/spi/soft_spi.c b/roms/u-boot/drivers/spi/soft_spi.c
new file mode 100644
index 000000000..f3602a25b
--- /dev/null
+++ b/roms/u-boot/drivers/spi/soft_spi.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014 Google, Inc
+ *
+ * (C) Copyright 2002
+ * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com.
+ *
+ * Influenced by code from:
+ * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <fdtdec.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/global_data.h>
+#include <asm/gpio.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct soft_spi_plat {
+ struct gpio_desc cs;
+ struct gpio_desc sclk;
+ struct gpio_desc mosi;
+ struct gpio_desc miso;
+ int spi_delay_us;
+ int flags;
+};
+
+#define SPI_MASTER_NO_RX BIT(0)
+#define SPI_MASTER_NO_TX BIT(1)
+
+struct soft_spi_priv {
+ unsigned int mode;
+};
+
+static int soft_spi_scl(struct udevice *dev, int bit)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct soft_spi_plat *plat = dev_get_plat(bus);
+
+ dm_gpio_set_value(&plat->sclk, bit);
+
+ return 0;
+}
+
+static int soft_spi_sda(struct udevice *dev, int bit)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct soft_spi_plat *plat = dev_get_plat(bus);
+
+ dm_gpio_set_value(&plat->mosi, bit);
+
+ return 0;
+}
+
+static int soft_spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct soft_spi_priv *priv = dev_get_priv(bus);
+ struct soft_spi_plat *plat = dev_get_plat(bus);
+ int cidle = !!(priv->mode & SPI_CPOL);
+
+ dm_gpio_set_value(&plat->cs, 0);
+ dm_gpio_set_value(&plat->sclk, cidle); /* to idle */
+ dm_gpio_set_value(&plat->cs, 1);
+
+ return 0;
+}
+
+static int soft_spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct soft_spi_plat *plat = dev_get_plat(bus);
+
+ dm_gpio_set_value(&plat->cs, 0);
+
+ return 0;
+}
+
+static int soft_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct soft_spi_priv *priv = dev_get_priv(bus);
+ int cidle = !!(priv->mode & SPI_CPOL);
+ /*
+ * Make sure the SPI clock is in idle state as defined for
+ * this slave.
+ */
+ return soft_spi_scl(dev, cidle);
+}
+
+static int soft_spi_release_bus(struct udevice *dev)
+{
+ /* Nothing to do */
+ return 0;
+}
+
+/*-----------------------------------------------------------------------
+ * SPI transfer
+ *
+ * This writes "bitlen" bits out the SPI MOSI port and simultaneously clocks
+ * "bitlen" bits in the SPI MISO port. That's just the way SPI works.
+ *
+ * The source of the outgoing bits is the "dout" parameter and the
+ * destination of the input bits is the "din" parameter. Note that "dout"
+ * and "din" can point to the same memory location, in which case the
+ * input data overwrites the output data (since both are buffered by
+ * temporary variables, this is OK).
+ */
+static int soft_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct soft_spi_priv *priv = dev_get_priv(bus);
+ struct soft_spi_plat *plat = dev_get_plat(bus);
+ uchar tmpdin = 0;
+ uchar tmpdout = 0;
+ const u8 *txd = dout;
+ u8 *rxd = din;
+ int cpha = !!(priv->mode & SPI_CPHA);
+ int cidle = !!(priv->mode & SPI_CPOL);
+ unsigned int j;
+
+ debug("spi_xfer: slave %s:%s dout %08X din %08X bitlen %u\n",
+ dev->parent->name, dev->name, *(uint *)txd, *(uint *)rxd,
+ bitlen);
+
+ if (flags & SPI_XFER_BEGIN)
+ soft_spi_cs_activate(dev);
+
+ for (j = 0; j < bitlen; j++) {
+ /*
+ * Check if it is time to work on a new byte.
+ */
+ if ((j % 8) == 0) {
+ if (txd)
+ tmpdout = *txd++;
+ else
+ tmpdout = 0;
+ if (j != 0) {
+ if (rxd)
+ *rxd++ = tmpdin;
+ }
+ tmpdin = 0;
+ }
+
+ /*
+ * CPOL 0: idle is low (0), active is high (1)
+ * CPOL 1: idle is high (1), active is low (0)
+ */
+
+ /*
+ * drive bit
+ * CPHA 1: CLK from idle to active
+ */
+ if (cpha)
+ soft_spi_scl(dev, !cidle);
+ if ((plat->flags & SPI_MASTER_NO_TX) == 0)
+ soft_spi_sda(dev, !!(tmpdout & 0x80));
+ udelay(plat->spi_delay_us);
+
+ /*
+ * sample bit
+ * CPHA 0: CLK from idle to active
+ * CPHA 1: CLK from active to idle
+ */
+ if (!cpha)
+ soft_spi_scl(dev, !cidle);
+ else
+ soft_spi_scl(dev, cidle);
+ tmpdin <<= 1;
+ if ((plat->flags & SPI_MASTER_NO_RX) == 0)
+ tmpdin |= dm_gpio_get_value(&plat->miso);
+ tmpdout <<= 1;
+ udelay(plat->spi_delay_us);
+
+ /*
+ * drive bit
+ * CPHA 0: CLK from active to idle
+ */
+ if (!cpha)
+ soft_spi_scl(dev, cidle);
+ }
+ /*
+ * If the number of bits isn't a multiple of 8, shift the last
+ * bits over to left-justify them. Then store the last byte
+ * read in.
+ */
+ if (rxd) {
+ if ((bitlen % 8) != 0)
+ tmpdin <<= 8 - (bitlen % 8);
+ *rxd++ = tmpdin;
+ }
+
+ if (flags & SPI_XFER_END)
+ soft_spi_cs_deactivate(dev);
+
+ return 0;
+}
+
+static int soft_spi_set_speed(struct udevice *dev, unsigned int speed)
+{
+ /* Ignore any speed settings. Speed is implemented via "spi-delay-us" */
+ return 0;
+}
+
+static int soft_spi_set_mode(struct udevice *dev, unsigned int mode)
+{
+ struct soft_spi_priv *priv = dev_get_priv(dev);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static const struct dm_spi_ops soft_spi_ops = {
+ .claim_bus = soft_spi_claim_bus,
+ .release_bus = soft_spi_release_bus,
+ .xfer = soft_spi_xfer,
+ .set_speed = soft_spi_set_speed,
+ .set_mode = soft_spi_set_mode,
+};
+
+static int soft_spi_of_to_plat(struct udevice *dev)
+{
+ struct soft_spi_plat *plat = dev_get_plat(dev);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(dev);
+
+ plat->spi_delay_us = fdtdec_get_int(blob, node, "spi-delay-us", 0);
+
+ return 0;
+}
+
+static int soft_spi_probe(struct udevice *dev)
+{
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+ struct soft_spi_plat *plat = dev_get_plat(dev);
+ int cs_flags, clk_flags;
+ int ret;
+
+ cs_flags = (slave && slave->mode & SPI_CS_HIGH) ? 0 : GPIOD_ACTIVE_LOW;
+ clk_flags = (slave && slave->mode & SPI_CPOL) ? GPIOD_ACTIVE_LOW : 0;
+
+ if (gpio_request_by_name(dev, "cs-gpios", 0, &plat->cs,
+ GPIOD_IS_OUT | cs_flags) ||
+ gpio_request_by_name(dev, "gpio-sck", 0, &plat->sclk,
+ GPIOD_IS_OUT | clk_flags))
+ return -EINVAL;
+
+ ret = gpio_request_by_name(dev, "gpio-mosi", 0, &plat->mosi,
+ GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ if (ret)
+ plat->flags |= SPI_MASTER_NO_TX;
+
+ ret = gpio_request_by_name(dev, "gpio-miso", 0, &plat->miso,
+ GPIOD_IS_IN);
+ if (ret)
+ plat->flags |= SPI_MASTER_NO_RX;
+
+ if ((plat->flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_TX)) ==
+ (SPI_MASTER_NO_RX | SPI_MASTER_NO_TX))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct udevice_id soft_spi_ids[] = {
+ { .compatible = "spi-gpio" },
+ { }
+};
+
+U_BOOT_DRIVER(soft_spi) = {
+ .name = "soft_spi",
+ .id = UCLASS_SPI,
+ .of_match = soft_spi_ids,
+ .ops = &soft_spi_ops,
+ .of_to_plat = soft_spi_of_to_plat,
+ .plat_auto = sizeof(struct soft_spi_plat),
+ .priv_auto = sizeof(struct soft_spi_priv),
+ .probe = soft_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/spi-emul-uclass.c b/roms/u-boot/drivers/spi/spi-emul-uclass.c
new file mode 100644
index 000000000..52f3f9a01
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-emul-uclass.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014 Google, Inc
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <spi.h>
+#include <spi_flash.h>
+
+UCLASS_DRIVER(spi_emul) = {
+ .id = UCLASS_SPI_EMUL,
+ .name = "spi_emul",
+};
diff --git a/roms/u-boot/drivers/spi/spi-mem-nodm.c b/roms/u-boot/drivers/spi/spi-mem-nodm.c
new file mode 100644
index 000000000..765f05fe5
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-mem-nodm.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi-mem.h>
+
+int spi_mem_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ unsigned int pos = 0;
+ const u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+ u8 *op_buf;
+ int op_len;
+ u32 flag;
+ int ret;
+ int i;
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ rx_buf = op->data.buf.in;
+ else
+ tx_buf = op->data.buf.out;
+ }
+
+ op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ op_buf = calloc(1, op_len);
+
+ ret = spi_claim_bus(slave);
+ if (ret < 0)
+ return ret;
+
+ op_buf[pos++] = op->cmd.opcode;
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ op_buf[pos + i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ pos += op->addr.nbytes;
+ }
+
+ if (op->dummy.nbytes)
+ memset(op_buf + pos, 0xff, op->dummy.nbytes);
+
+ /* 1st transfer: opcode + address + dummy cycles */
+ flag = SPI_XFER_BEGIN;
+ /* Make sure to set END bit if no tx or rx data messages follow */
+ if (!tx_buf && !rx_buf)
+ flag |= SPI_XFER_END;
+
+ ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
+ if (ret)
+ return ret;
+
+ /* 2nd transfer: rx or tx data path */
+ if (tx_buf || rx_buf) {
+ ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
+ rx_buf, SPI_XFER_END);
+ if (ret)
+ return ret;
+ }
+
+ spi_release_bus(slave);
+
+ for (i = 0; i < pos; i++)
+ debug("%02x ", op_buf[i]);
+ debug("| [%dB %s] ",
+ tx_buf || rx_buf ? op->data.nbytes : 0,
+ tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
+ for (i = 0; i < op->data.nbytes; i++)
+ debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
+ debug("[ret %d]\n", ret);
+
+ free(op_buf);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int spi_mem_adjust_op_size(struct spi_slave *slave,
+ struct spi_mem_op *op)
+{
+ unsigned int len;
+
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ if (slave->max_write_size && len > slave->max_write_size)
+ return -EINVAL;
+
+ if (op->data.dir == SPI_MEM_DATA_IN && slave->max_read_size)
+ op->data.nbytes = min(op->data.nbytes,
+ slave->max_read_size);
+ else if (slave->max_write_size)
+ op->data.nbytes = min(op->data.nbytes,
+ slave->max_write_size - len);
+
+ if (!op->data.nbytes)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/roms/u-boot/drivers/spi/spi-mem.c b/roms/u-boot/drivers/spi/spi-mem.c
new file mode 100644
index 000000000..c095ae950
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-mem.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Exceet Electronics GmbH
+ * Copyright (C) 2018 Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef __UBOOT__
+#include <log.h>
+#include <dm/devres.h>
+#include <linux/dmaengine.h>
+#include <linux/pm_runtime.h>
+#include "internals.h"
+#else
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <dm/device_compat.h>
+#endif
+
+#ifndef __UBOOT__
+/**
+ * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
+ * memory operation
+ * @ctlr: the SPI controller requesting this dma_map()
+ * @op: the memory operation containing the buffer to map
+ * @sgt: a pointer to a non-initialized sg_table that will be filled by this
+ * function
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares everything for you and provides a ready-to-use
+ * sg_table. This function is not intended to be called from spi drivers.
+ * Only SPI controller drivers should use it.
+ * Note that the caller must ensure the memory region pointed by
+ * op->data.buf.{in,out} is DMA-able before calling this function.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt)
+{
+ struct device *dmadev;
+
+ if (!op->data.nbytes)
+ return -EINVAL;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
+ dmadev = ctlr->dma_tx->device->dev;
+ else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
+ dmadev = ctlr->dma_rx->device->dev;
+ else
+ dmadev = ctlr->dev.parent;
+
+ if (!dmadev)
+ return -EINVAL;
+
+ return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
+
+/**
+ * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
+ * memory operation
+ * @ctlr: the SPI controller requesting this dma_unmap()
+ * @op: the memory operation containing the buffer to unmap
+ * @sgt: a pointer to an sg_table previously initialized by
+ * spi_controller_dma_map_mem_op_data()
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares things so that the CPU can access the
+ * op->data.buf.{in,out} buffer again.
+ *
+ * This function is not intended to be called from SPI drivers. Only SPI
+ * controller drivers should use it.
+ *
+ * This function should be called after the DMA operation has finished and is
+ * only valid if the previous spi_controller_dma_map_mem_op_data() call
+ * returned 0.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+ const struct spi_mem_op *op,
+ struct sg_table *sgt)
+{
+ struct device *dmadev;
+
+ if (!op->data.nbytes)
+ return;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
+ dmadev = ctlr->dma_tx->device->dev;
+ else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
+ dmadev = ctlr->dma_rx->device->dev;
+ else
+ dmadev = ctlr->dev.parent;
+
+ spi_unmap_buf(ctlr, dmadev, sgt,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
+#endif /* __UBOOT__ */
+
+static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
+{
+ u32 mode = slave->mode;
+
+ switch (buswidth) {
+ case 1:
+ return 0;
+
+ case 2:
+ if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
+ (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
+ return 0;
+
+ break;
+
+ case 4:
+ if ((tx && (mode & SPI_TX_QUAD)) ||
+ (!tx && (mode & SPI_RX_QUAD)))
+ return 0;
+
+ break;
+ case 8:
+ if ((tx && (mode & SPI_TX_OCTAL)) ||
+ (!tx && (mode & SPI_RX_OCTAL)))
+ return 0;
+
+ break;
+
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
+bool spi_mem_default_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
+ return false;
+
+ if (op->addr.nbytes &&
+ spi_check_buswidth_req(slave, op->addr.buswidth, true))
+ return false;
+
+ if (op->dummy.nbytes &&
+ spi_check_buswidth_req(slave, op->dummy.buswidth, true))
+ return false;
+
+ if (op->data.dir != SPI_MEM_NO_DATA &&
+ spi_check_buswidth_req(slave, op->data.buswidth,
+ op->data.dir == SPI_MEM_DATA_OUT))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
+
+/**
+ * spi_mem_supports_op() - Check if a memory device and the controller it is
+ * connected to support a specific memory operation
+ * @slave: the SPI device
+ * @op: the memory operation to check
+ *
+ * Some controllers are only supporting Single or Dual IOs, others might only
+ * support specific opcodes, or it can even be that the controller and device
+ * both support Quad IOs but the hardware prevents you from using it because
+ * only 2 IO lines are connected.
+ *
+ * This function checks whether a specific operation is supported.
+ *
+ * Return: true if @op is supported, false otherwise.
+ */
+bool spi_mem_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct udevice *bus = slave->dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+
+ if (ops->mem_ops && ops->mem_ops->supports_op)
+ return ops->mem_ops->supports_op(slave, op);
+
+ return spi_mem_default_supports_op(slave, op);
+}
+EXPORT_SYMBOL_GPL(spi_mem_supports_op);
+
+/**
+ * spi_mem_exec_op() - Execute a memory operation
+ * @slave: the SPI device
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first checks that @op is supported and then tries to execute
+ * it.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
+{
+ struct udevice *bus = slave->dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+ unsigned int pos = 0;
+ const u8 *tx_buf = NULL;
+ u8 *rx_buf = NULL;
+ int op_len;
+ u32 flag;
+ int ret;
+ int i;
+
+ if (!spi_mem_supports_op(slave, op))
+ return -ENOTSUPP;
+
+ ret = spi_claim_bus(slave);
+ if (ret < 0)
+ return ret;
+
+ if (ops->mem_ops && ops->mem_ops->exec_op) {
+#ifndef __UBOOT__
+ /*
+ * Flush the message queue before executing our SPI memory
+ * operation to prevent preemption of regular SPI transfers.
+ */
+ spi_flush_queue(ctlr);
+
+ if (ctlr->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(ctlr->dev.parent);
+ if (ret < 0) {
+ dev_err(&ctlr->dev,
+ "Failed to power device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ mutex_lock(&ctlr->bus_lock_mutex);
+ mutex_lock(&ctlr->io_mutex);
+#endif
+ ret = ops->mem_ops->exec_op(slave, op);
+
+#ifndef __UBOOT__
+ mutex_unlock(&ctlr->io_mutex);
+ mutex_unlock(&ctlr->bus_lock_mutex);
+
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+#endif
+
+ /*
+ * Some controllers only optimize specific paths (typically the
+ * read path) and expect the core to use the regular SPI
+ * interface in other cases.
+ */
+ if (!ret || ret != -ENOTSUPP) {
+ spi_release_bus(slave);
+ return ret;
+ }
+ }
+
+#ifndef __UBOOT__
+ tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
+ op->dummy.nbytes;
+
+ /*
+ * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
+ * we're guaranteed that this buffer is DMA-able, as required by the
+ * SPI layer.
+ */
+ tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ spi_message_init(&msg);
+
+ tmpbuf[0] = op->cmd.opcode;
+ xfers[xferpos].tx_buf = tmpbuf;
+ xfers[xferpos].len = sizeof(op->cmd.opcode);
+ xfers[xferpos].tx_nbits = op->cmd.buswidth;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen++;
+
+ if (op->addr.nbytes) {
+ int i;
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ tmpbuf[i + 1] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ xfers[xferpos].tx_buf = tmpbuf + 1;
+ xfers[xferpos].len = op->addr.nbytes;
+ xfers[xferpos].tx_nbits = op->addr.buswidth;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen += op->addr.nbytes;
+ }
+
+ if (op->dummy.nbytes) {
+ memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
+ xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
+ xfers[xferpos].len = op->dummy.nbytes;
+ xfers[xferpos].tx_nbits = op->dummy.buswidth;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen += op->dummy.nbytes;
+ }
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ xfers[xferpos].rx_buf = op->data.buf.in;
+ xfers[xferpos].rx_nbits = op->data.buswidth;
+ } else {
+ xfers[xferpos].tx_buf = op->data.buf.out;
+ xfers[xferpos].tx_nbits = op->data.buswidth;
+ }
+
+ xfers[xferpos].len = op->data.nbytes;
+ spi_message_add_tail(&xfers[xferpos], &msg);
+ xferpos++;
+ totalxferlen += op->data.nbytes;
+ }
+
+ ret = spi_sync(slave, &msg);
+
+ kfree(tmpbuf);
+
+ if (ret)
+ return ret;
+
+ if (msg.actual_length != totalxferlen)
+ return -EIO;
+#else
+
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ rx_buf = op->data.buf.in;
+ else
+ tx_buf = op->data.buf.out;
+ }
+
+ op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+
+ /*
+ * Avoid using malloc() here so that we can use this code in SPL where
+ * simple malloc may be used. That implementation does not allow free()
+ * so repeated calls to this code can exhaust the space.
+ *
+ * The value of op_len is small, since it does not include the actual
+ * data being sent, only the op-code and address. In fact, it should be
+ * possible to just use a small fixed value here instead of op_len.
+ */
+ u8 op_buf[op_len];
+
+ op_buf[pos++] = op->cmd.opcode;
+
+ if (op->addr.nbytes) {
+ for (i = 0; i < op->addr.nbytes; i++)
+ op_buf[pos + i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ pos += op->addr.nbytes;
+ }
+
+ if (op->dummy.nbytes)
+ memset(op_buf + pos, 0xff, op->dummy.nbytes);
+
+ /* 1st transfer: opcode + address + dummy cycles */
+ flag = SPI_XFER_BEGIN;
+ /* Make sure to set END bit if no tx or rx data messages follow */
+ if (!tx_buf && !rx_buf)
+ flag |= SPI_XFER_END;
+
+ ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
+ if (ret)
+ return ret;
+
+ /* 2nd transfer: rx or tx data path */
+ if (tx_buf || rx_buf) {
+ ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
+ rx_buf, SPI_XFER_END);
+ if (ret)
+ return ret;
+ }
+
+ spi_release_bus(slave);
+
+ for (i = 0; i < pos; i++)
+ debug("%02x ", op_buf[i]);
+ debug("| [%dB %s] ",
+ tx_buf || rx_buf ? op->data.nbytes : 0,
+ tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
+ for (i = 0; i < op->data.nbytes; i++)
+ debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
+ debug("[ret %d]\n", ret);
+
+ if (ret < 0)
+ return ret;
+#endif /* __UBOOT__ */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_exec_op);
+
+/**
+ * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
+ * match controller limitations
+ * @slave: the SPI device
+ * @op: the operation to adjust
+ *
+ * Some controllers have FIFO limitations and must split a data transfer
+ * operation into multiple ones, others require a specific alignment for
+ * optimized accesses. This function allows SPI mem drivers to split a single
+ * operation into multiple sub-operations when required.
+ *
+ * Return: a negative error code if the controller can't properly adjust @op,
+ * 0 otherwise. Note that @op->data.nbytes will be updated if @op
+ * can't be handled in a single step.
+ */
+int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
+{
+ struct udevice *bus = slave->dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+
+ if (ops->mem_ops && ops->mem_ops->adjust_op_size)
+ return ops->mem_ops->adjust_op_size(slave, op);
+
+ if (!ops->mem_ops || !ops->mem_ops->exec_op) {
+ unsigned int len;
+
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes +
+ op->dummy.nbytes;
+ if (slave->max_write_size && len > slave->max_write_size)
+ return -EINVAL;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ if (slave->max_read_size)
+ op->data.nbytes = min(op->data.nbytes,
+ slave->max_read_size);
+ } else if (slave->max_write_size) {
+ op->data.nbytes = min(op->data.nbytes,
+ slave->max_write_size - len);
+ }
+
+ if (!op->data.nbytes)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+
+#ifndef __UBOOT__
+static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
+{
+ return container_of(drv, struct spi_mem_driver, spidrv.driver);
+}
+
+static int spi_mem_probe(struct spi_device *spi)
+{
+ struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_mem *mem;
+
+ mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ mem->spi = spi;
+ spi_set_drvdata(spi, mem);
+
+ return memdrv->probe(mem);
+}
+
+static int spi_mem_remove(struct spi_device *spi)
+{
+ struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_mem *mem = spi_get_drvdata(spi);
+
+ if (memdrv->remove)
+ return memdrv->remove(mem);
+
+ return 0;
+}
+
+static void spi_mem_shutdown(struct spi_device *spi)
+{
+ struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+ struct spi_mem *mem = spi_get_drvdata(spi);
+
+ if (memdrv->shutdown)
+ memdrv->shutdown(mem);
+}
+
+/**
+ * spi_mem_driver_register_with_owner() - Register a SPI memory driver
+ * @memdrv: the SPI memory driver to register
+ * @owner: the owner of this driver
+ *
+ * Registers a SPI memory driver.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
+ struct module *owner)
+{
+ memdrv->spidrv.probe = spi_mem_probe;
+ memdrv->spidrv.remove = spi_mem_remove;
+ memdrv->spidrv.shutdown = spi_mem_shutdown;
+
+ return __spi_register_driver(owner, &memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
+
+/**
+ * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
+ * @memdrv: the SPI memory driver to unregister
+ *
+ * Unregisters a SPI memory driver.
+ */
+void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
+{
+ spi_unregister_driver(&memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
+#endif /* __UBOOT__ */
diff --git a/roms/u-boot/drivers/spi/spi-qup.c b/roms/u-boot/drivers/spi/spi-qup.c
new file mode 100644
index 000000000..cdea5405f
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-qup.c
@@ -0,0 +1,803 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Qualcomm QUP SPI controller
+ * FIFO and Block modes supported, no DMA
+ * mode support
+ *
+ * Copyright (c) 2020 Sartura Ltd.
+ *
+ * Author: Robert Marko <robert.marko@sartura.hr>
+ * Author: Luka Kovacic <luka.kovacic@sartura.hr>
+ *
+ * Based on stock U-boot and Linux drivers
+ */
+
+#include <asm/gpio.h>
+#include <asm/io.h>
+#include <clk.h>
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <linux/delay.h>
+#include <spi.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE BIT(8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_IN_BLOCK_READ_REQ BIT(13)
+#define QUP_OP_OUT_BLOCK_WRITE_REQ BIT(12)
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+#define SPI_RESET_STATE 0
+#define SPI_RUN_STATE 1
+#define SPI_CORE_RESET 0
+#define SPI_CORE_RUNNING 1
+
+#define DUMMY_DATA_VAL 0
+#define TIMEOUT_CNT 100
+
+#define QUP_STATE_VALID_BIT 2
+#define QUP_CONFIG_MINI_CORE_MSK (0x0F << 8)
+#define QUP_CONFIG_MINI_CORE_SPI BIT(8)
+#define QUP_CONF_INPUT_MSK BIT(7)
+#define QUP_CONF_INPUT_ENA (0 << 7)
+#define QUP_CONF_NO_INPUT BIT(7)
+#define QUP_CONF_OUTPUT_MSK BIT(6)
+#define QUP_CONF_OUTPUT_ENA (0 << 6)
+#define QUP_CONF_NO_OUTPUT BIT(6)
+#define QUP_STATE_RUN_STATE 0x1
+#define QUP_STATE_RESET_STATE 0x0
+#define QUP_STATE_PAUSE_STATE 0x3
+#define SPI_BIT_WORD_MSK 0x1F
+#define SPI_8_BIT_WORD 0x07
+#define LOOP_BACK_MSK BIT(8)
+#define NO_LOOP_BACK (0 << 8)
+#define SLAVE_OPERATION_MSK BIT(5)
+#define SLAVE_OPERATION (0 << 5)
+#define CLK_ALWAYS_ON (0 << 9)
+#define MX_CS_MODE BIT(8)
+#define CS_POLARITY_MASK BIT(4)
+#define NO_TRI_STATE BIT(0)
+#define FORCE_CS_MSK BIT(11)
+#define FORCE_CS_EN BIT(11)
+#define FORCE_CS_DIS (0 << 11)
+#define OUTPUT_BIT_SHIFT_MSK BIT(16)
+#define OUTPUT_BIT_SHIFT_EN BIT(16)
+#define INPUT_BLOCK_MODE_MSK (0x03 << 12)
+#define INPUT_BLOCK_MODE (0x01 << 12)
+#define OUTPUT_BLOCK_MODE_MSK (0x03 << 10)
+#define OUTPUT_BLOCK_MODE (0x01 << 10)
+#define INPUT_BAM_MODE (0x3 << 12)
+#define OUTPUT_BAM_MODE (0x3 << 10)
+#define PACK_EN (0x1 << 15)
+#define UNPACK_EN (0x1 << 14)
+#define PACK_EN_MSK (0x1 << 15)
+#define UNPACK_EN_MSK (0x1 << 14)
+#define OUTPUT_SERVICE_MSK (0x1 << 8)
+#define INPUT_SERVICE_MSK (0x1 << 9)
+#define OUTPUT_SERVICE_DIS (0x1 << 8)
+#define INPUT_SERVICE_DIS (0x1 << 9)
+#define BLSP0_SPI_DEASSERT_WAIT_REG 0x0310
+#define QUP_DATA_AVAILABLE_FOR_READ BIT(5)
+#define SPI_INPUT_BLOCK_SIZE 4
+#define SPI_OUTPUT_BLOCK_SIZE 4
+#define SPI_BITLEN_MSK 0x07
+#define MAX_COUNT_SIZE 0xffff
+
+struct qup_spi_priv {
+ phys_addr_t base;
+ struct clk clk;
+ u32 num_cs;
+ struct gpio_desc cs_gpios[SPI_NUM_CHIPSELECTS];
+ bool cs_high;
+ u32 core_state;
+};
+
+static int qup_spi_set_cs(struct udevice *dev, unsigned int cs, bool enable)
+{
+ struct qup_spi_priv *priv = dev_get_priv(dev);
+
+ debug("%s: cs=%d enable=%d\n", __func__, cs, enable);
+
+ if (cs >= SPI_NUM_CHIPSELECTS)
+ return -ENODEV;
+
+ if (!dm_gpio_is_valid(&priv->cs_gpios[cs]))
+ return -EINVAL;
+
+ if (priv->cs_high)
+ enable = !enable;
+
+ return dm_gpio_set_value(&priv->cs_gpios[cs], enable ? 1 : 0);
+}
+
+/*
+ * Function to write data to OUTPUT FIFO
+ */
+static void qup_spi_write_byte(struct udevice *dev, unsigned char data)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ /* Wait for space in the FIFO */
+ while ((readl(priv->base + QUP_OPERATIONAL) & QUP_OP_OUT_FIFO_FULL))
+ udelay(1);
+
+ /* Write the byte of data */
+ writel(data, priv->base + QUP_OUTPUT_FIFO);
+}
+
+/*
+ * Function to read data from Input FIFO
+ */
+static unsigned char qup_spi_read_byte(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ /* Wait for Data in FIFO */
+ while (!(readl(priv->base + QUP_OPERATIONAL) & QUP_DATA_AVAILABLE_FOR_READ)) {
+ printf("Stuck at FIFO data wait\n");
+ udelay(1);
+ }
+
+ /* Read a byte of data */
+ return readl(priv->base + QUP_INPUT_FIFO) & 0xff;
+}
+
+/*
+ * Function to check wheather Input or Output FIFO
+ * has data to be serviced
+ */
+static int qup_spi_check_fifo_status(struct udevice *dev, u32 reg_addr)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ unsigned int count = TIMEOUT_CNT;
+ unsigned int status_flag;
+ unsigned int val;
+
+ do {
+ val = readl(priv->base + reg_addr);
+ count--;
+ if (count == 0)
+ return -ETIMEDOUT;
+
+ status_flag = ((val & QUP_OP_OUT_SERVICE_FLAG) | (val & QUP_OP_IN_SERVICE_FLAG));
+ } while (!status_flag);
+
+ return 0;
+}
+
+/*
+ * Function to configure Input and Output enable/disable
+ */
+static void qup_spi_enable_io_config(struct udevice *dev, u32 write_cnt, u32 read_cnt)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+
+ if (write_cnt) {
+ clrsetbits_le32(priv->base + QUP_CONFIG,
+ QUP_CONF_OUTPUT_MSK, QUP_CONF_OUTPUT_ENA);
+ } else {
+ clrsetbits_le32(priv->base + QUP_CONFIG,
+ QUP_CONF_OUTPUT_MSK, QUP_CONF_NO_OUTPUT);
+ }
+
+ if (read_cnt) {
+ clrsetbits_le32(priv->base + QUP_CONFIG,
+ QUP_CONF_INPUT_MSK, QUP_CONF_INPUT_ENA);
+ } else {
+ clrsetbits_le32(priv->base + QUP_CONFIG,
+ QUP_CONF_INPUT_MSK, QUP_CONF_NO_INPUT);
+ }
+}
+
+static int check_bit_state(struct udevice *dev, u32 reg_addr, int bit_num, int val,
+ int us_delay)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ unsigned int count = TIMEOUT_CNT;
+ unsigned int bit_val = ((readl(priv->base + reg_addr) >> bit_num) & 0x01);
+
+ while (bit_val != val) {
+ count--;
+ if (count == 0)
+ return -ETIMEDOUT;
+ udelay(us_delay);
+ bit_val = ((readl(priv->base + reg_addr) >> bit_num) & 0x01);
+ }
+
+ return 0;
+}
+
+/*
+ * Check whether QUPn State is valid
+ */
+static int check_qup_state_valid(struct udevice *dev)
+{
+ return check_bit_state(dev, QUP_STATE, QUP_STATE_VALID, 1, 1);
+}
+
+/*
+ * Configure QUPn Core state
+ */
+static int qup_spi_config_spi_state(struct udevice *dev, unsigned int state)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ u32 val;
+ int ret;
+
+ ret = check_qup_state_valid(dev);
+ if (ret != 0)
+ return ret;
+
+ switch (state) {
+ case SPI_RUN_STATE:
+ /* Set the state to RUN */
+ val = ((readl(priv->base + QUP_STATE) & ~QUP_STATE_MASK)
+ | QUP_STATE_RUN);
+ writel(val, priv->base + QUP_STATE);
+ ret = check_qup_state_valid(dev);
+ if (ret != 0)
+ return ret;
+ priv->core_state = SPI_CORE_RUNNING;
+ break;
+ case SPI_RESET_STATE:
+ /* Set the state to RESET */
+ val = ((readl(priv->base + QUP_STATE) & ~QUP_STATE_MASK)
+ | QUP_STATE_RESET);
+ writel(val, priv->base + QUP_STATE);
+ ret = check_qup_state_valid(dev);
+ if (ret != 0)
+ return ret;
+ priv->core_state = SPI_CORE_RESET;
+ break;
+ default:
+ printf("Unsupported QUP SPI state: %d\n", state);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Function to read bytes number of data from the Input FIFO
+ */
+static int __qup_spi_blsp_spi_read(struct udevice *dev, u8 *data_buffer, unsigned int bytes)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ u32 val;
+ unsigned int i;
+ unsigned int read_bytes = bytes;
+ unsigned int fifo_count;
+ int ret = 0;
+ int state_config;
+
+ /* Configure no of bytes to read */
+ state_config = qup_spi_config_spi_state(dev, SPI_RESET_STATE);
+ if (state_config)
+ return state_config;
+
+ /* Configure input and output enable */
+ qup_spi_enable_io_config(dev, 0, read_bytes);
+
+ writel(bytes, priv->base + QUP_MX_INPUT_CNT);
+
+ state_config = qup_spi_config_spi_state(dev, SPI_RUN_STATE);
+ if (state_config)
+ return state_config;
+
+ while (read_bytes) {
+ ret = qup_spi_check_fifo_status(dev, QUP_OPERATIONAL);
+ if (ret != 0)
+ goto out;
+
+ val = readl(priv->base + QUP_OPERATIONAL);
+ if (val & QUP_OP_IN_SERVICE_FLAG) {
+ /*
+ * acknowledge to hw that software will
+ * read input data
+ */
+ val &= QUP_OP_IN_SERVICE_FLAG;
+ writel(val, priv->base + QUP_OPERATIONAL);
+
+ fifo_count = ((read_bytes > SPI_INPUT_BLOCK_SIZE) ?
+ SPI_INPUT_BLOCK_SIZE : read_bytes);
+
+ for (i = 0; i < fifo_count; i++) {
+ *data_buffer = qup_spi_read_byte(dev);
+ data_buffer++;
+ read_bytes--;
+ }
+ }
+ }
+
+out:
+ /*
+ * Put the SPI Core back in the Reset State
+ * to end the transfer
+ */
+ (void)qup_spi_config_spi_state(dev, SPI_RESET_STATE);
+
+ return ret;
+}
+
+static int qup_spi_blsp_spi_read(struct udevice *dev, u8 *data_buffer, unsigned int bytes)
+{
+ int length, ret;
+
+ while (bytes) {
+ length = (bytes < MAX_COUNT_SIZE) ? bytes : MAX_COUNT_SIZE;
+
+ ret = __qup_spi_blsp_spi_read(dev, data_buffer, length);
+ if (ret != 0)
+ return ret;
+
+ data_buffer += length;
+ bytes -= length;
+ }
+
+ return 0;
+}
+
+/*
+ * Function to write data to the Output FIFO
+ */
+static int __qup_blsp_spi_write(struct udevice *dev, const u8 *cmd_buffer, unsigned int bytes)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ u32 val;
+ unsigned int i;
+ unsigned int write_len = bytes;
+ unsigned int read_len = bytes;
+ unsigned int fifo_count;
+ int ret = 0;
+ int state_config;
+
+ state_config = qup_spi_config_spi_state(dev, SPI_RESET_STATE);
+ if (state_config)
+ return state_config;
+
+ writel(bytes, priv->base + QUP_MX_OUTPUT_CNT);
+ writel(bytes, priv->base + QUP_MX_INPUT_CNT);
+ state_config = qup_spi_config_spi_state(dev, SPI_RUN_STATE);
+ if (state_config)
+ return state_config;
+
+ /* Configure input and output enable */
+ qup_spi_enable_io_config(dev, write_len, read_len);
+
+ /*
+ * read_len considered to ensure that we read the dummy data for the
+ * write we performed. This is needed to ensure with WR-RD transaction
+ * to get the actual data on the subsequent read cycle that happens
+ */
+ while (write_len || read_len) {
+ ret = qup_spi_check_fifo_status(dev, QUP_OPERATIONAL);
+ if (ret != 0)
+ goto out;
+
+ val = readl(priv->base + QUP_OPERATIONAL);
+ if (val & QUP_OP_OUT_SERVICE_FLAG) {
+ /*
+ * acknowledge to hw that software will write
+ * expected output data
+ */
+ val &= QUP_OP_OUT_SERVICE_FLAG;
+ writel(val, priv->base + QUP_OPERATIONAL);
+
+ if (write_len > SPI_OUTPUT_BLOCK_SIZE)
+ fifo_count = SPI_OUTPUT_BLOCK_SIZE;
+ else
+ fifo_count = write_len;
+
+ for (i = 0; i < fifo_count; i++) {
+ /* Write actual data to output FIFO */
+ qup_spi_write_byte(dev, *cmd_buffer);
+ cmd_buffer++;
+ write_len--;
+ }
+ }
+ if (val & QUP_OP_IN_SERVICE_FLAG) {
+ /*
+ * acknowledge to hw that software
+ * will read input data
+ */
+ val &= QUP_OP_IN_SERVICE_FLAG;
+ writel(val, priv->base + QUP_OPERATIONAL);
+
+ if (read_len > SPI_INPUT_BLOCK_SIZE)
+ fifo_count = SPI_INPUT_BLOCK_SIZE;
+ else
+ fifo_count = read_len;
+
+ for (i = 0; i < fifo_count; i++) {
+ /* Read dummy data for the data written */
+ (void)qup_spi_read_byte(dev);
+
+ /* Decrement the write count after reading the
+ * dummy data from the device. This is to make
+ * sure we read dummy data before we write the
+ * data to fifo
+ */
+ read_len--;
+ }
+ }
+ }
+out:
+ /*
+ * Put the SPI Core back in the Reset State
+ * to end the transfer
+ */
+ (void)qup_spi_config_spi_state(dev, SPI_RESET_STATE);
+
+ return ret;
+}
+
+static int qup_spi_blsp_spi_write(struct udevice *dev, const u8 *cmd_buffer, unsigned int bytes)
+{
+ int length, ret;
+
+ while (bytes) {
+ length = (bytes < MAX_COUNT_SIZE) ? bytes : MAX_COUNT_SIZE;
+
+ ret = __qup_blsp_spi_write(dev, cmd_buffer, length);
+ if (ret != 0)
+ return ret;
+
+ cmd_buffer += length;
+ bytes -= length;
+ }
+
+ return 0;
+}
+
+static int qup_spi_set_speed(struct udevice *dev, uint speed)
+{
+ return 0;
+}
+
+static int qup_spi_set_mode(struct udevice *dev, uint mode)
+{
+ struct qup_spi_priv *priv = dev_get_priv(dev);
+ unsigned int clk_idle_state;
+ unsigned int input_first_mode;
+ u32 val;
+
+ switch (mode) {
+ case SPI_MODE_0:
+ clk_idle_state = 0;
+ input_first_mode = SPI_CONFIG_INPUT_FIRST;
+ break;
+ case SPI_MODE_1:
+ clk_idle_state = 0;
+ input_first_mode = 0;
+ break;
+ case SPI_MODE_2:
+ clk_idle_state = 1;
+ input_first_mode = SPI_CONFIG_INPUT_FIRST;
+ break;
+ case SPI_MODE_3:
+ clk_idle_state = 1;
+ input_first_mode = 0;
+ break;
+ default:
+ printf("Unsupported spi mode: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (mode & SPI_CS_HIGH)
+ priv->cs_high = true;
+ else
+ priv->cs_high = false;
+
+ val = readl(priv->base + SPI_CONFIG);
+ val |= input_first_mode;
+ writel(val, priv->base + SPI_CONFIG);
+
+ val = readl(priv->base + SPI_IO_CONTROL);
+ if (clk_idle_state)
+ val |= SPI_IO_C_CLK_IDLE_HIGH;
+ else
+ val &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+ writel(val, priv->base + SPI_IO_CONTROL);
+
+ return 0;
+}
+
+static void qup_spi_reset(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+
+ /* Driver may not be probed yet */
+ if (!priv)
+ return;
+
+ writel(0x1, priv->base + QUP_SW_RESET);
+ udelay(5);
+}
+
+static int qup_spi_hw_init(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct qup_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ /* QUPn module configuration */
+ qup_spi_reset(dev);
+
+ /* Set the QUPn state */
+ ret = qup_spi_config_spi_state(dev, SPI_RESET_STATE);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure Mini core to SPI core with Input Output enabled,
+ * SPI master, N = 8 bits
+ */
+ clrsetbits_le32(priv->base + QUP_CONFIG, (QUP_CONFIG_MINI_CORE_MSK |
+ QUP_CONF_INPUT_MSK |
+ QUP_CONF_OUTPUT_MSK |
+ SPI_BIT_WORD_MSK),
+ (QUP_CONFIG_MINI_CORE_SPI |
+ QUP_CONF_INPUT_ENA |
+ QUP_CONF_OUTPUT_ENA |
+ SPI_8_BIT_WORD));
+
+ /*
+ * Configure Input first SPI protocol,
+ * SPI master mode and no loopback
+ */
+ clrsetbits_le32(priv->base + SPI_CONFIG, (LOOP_BACK_MSK |
+ SLAVE_OPERATION_MSK),
+ (NO_LOOP_BACK |
+ SLAVE_OPERATION));
+
+ /*
+ * Configure SPI IO Control Register
+ * CLK_ALWAYS_ON = 0
+ * MX_CS_MODE = 0
+ * NO_TRI_STATE = 1
+ */
+ writel((CLK_ALWAYS_ON | NO_TRI_STATE), priv->base + SPI_IO_CONTROL);
+
+ /*
+ * Configure SPI IO Modes.
+ * OUTPUT_BIT_SHIFT_EN = 1
+ * INPUT_MODE = Block Mode
+ * OUTPUT MODE = Block Mode
+ */
+
+ clrsetbits_le32(priv->base + QUP_IO_M_MODES, (OUTPUT_BIT_SHIFT_MSK |
+ INPUT_BLOCK_MODE_MSK |
+ OUTPUT_BLOCK_MODE_MSK),
+ (OUTPUT_BIT_SHIFT_EN |
+ INPUT_BLOCK_MODE |
+ OUTPUT_BLOCK_MODE));
+
+ /* Disable Error mask */
+ writel(0, priv->base + SPI_ERROR_FLAGS_EN);
+ writel(0, priv->base + QUP_ERROR_FLAGS_EN);
+ writel(0, priv->base + BLSP0_SPI_DEASSERT_WAIT_REG);
+
+ return ret;
+}
+
+static int qup_spi_claim_bus(struct udevice *dev)
+{
+ int ret;
+
+ ret = qup_spi_hw_init(dev);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int qup_spi_release_bus(struct udevice *dev)
+{
+ /* Reset the SPI hardware */
+ qup_spi_reset(dev);
+
+ return 0;
+}
+
+static int qup_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ unsigned int len;
+ const u8 *txp = dout;
+ u8 *rxp = din;
+ int ret = 0;
+
+ if (bitlen & SPI_BITLEN_MSK) {
+ printf("Invalid bit length\n");
+ return -EINVAL;
+ }
+
+ len = bitlen >> 3;
+
+ if (flags & SPI_XFER_BEGIN) {
+ ret = qup_spi_hw_init(dev);
+ if (ret != 0)
+ return ret;
+
+ ret = qup_spi_set_cs(bus, slave_plat->cs, false);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (dout != NULL) {
+ ret = qup_spi_blsp_spi_write(dev, txp, len);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (din != NULL) {
+ ret = qup_spi_blsp_spi_read(dev, rxp, len);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (flags & SPI_XFER_END) {
+ ret = qup_spi_set_cs(bus, slave_plat->cs, true);
+ if (ret != 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int qup_spi_probe(struct udevice *dev)
+{
+ struct qup_spi_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ priv->base = dev_read_addr(dev);
+ if (priv->base == FDT_ADDR_T_NONE)
+ return -EINVAL;
+
+ ret = clk_get_by_index(dev, 0, &priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(&priv->clk);
+ if (ret < 0)
+ return ret;
+
+ priv->num_cs = dev_read_u32_default(dev, "num-cs", 1);
+
+ ret = gpio_request_list_by_name(dev, "cs-gpios", priv->cs_gpios,
+ priv->num_cs, GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ if (ret < 0) {
+ printf("Can't get %s cs gpios: %d\n", dev->name, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct dm_spi_ops qup_spi_ops = {
+ .claim_bus = qup_spi_claim_bus,
+ .release_bus = qup_spi_release_bus,
+ .xfer = qup_spi_xfer,
+ .set_speed = qup_spi_set_speed,
+ .set_mode = qup_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id qup_spi_ids[] = {
+ { .compatible = "qcom,spi-qup-v1.1.1", },
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+
+U_BOOT_DRIVER(spi_qup) = {
+ .name = "spi_qup",
+ .id = UCLASS_SPI,
+ .of_match = qup_spi_ids,
+ .ops = &qup_spi_ops,
+ .priv_auto = sizeof(struct qup_spi_priv),
+ .probe = qup_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/spi-sifive.c b/roms/u-boot/drivers/spi/spi-sifive.c
new file mode 100644
index 000000000..0a00df0ac
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-sifive.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 SiFive, Inc.
+ * Copyright 2019 Bhargav Shah <bhargavshah1988@gmail.com>
+ *
+ * SiFive SPI controller driver (master mode only)
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <wait_bit.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <clk.h>
+
+#define SIFIVE_SPI_MAX_CS 32
+
+#define SIFIVE_SPI_DEFAULT_DEPTH 8
+#define SIFIVE_SPI_DEFAULT_BITS 8
+
+/* register offsets */
+#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
+#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
+#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
+#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
+#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
+#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
+#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
+#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
+#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
+#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
+#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
+#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
+#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
+#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
+#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
+#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
+
+/* sckdiv bits */
+#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
+
+/* sckmode bits */
+#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
+#define SIFIVE_SPI_SCKMODE_POL BIT(1)
+#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
+ SIFIVE_SPI_SCKMODE_POL)
+
+/* csmode bits */
+#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
+#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
+#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
+
+/* delay0 bits */
+#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
+#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
+
+/* delay1 bits */
+#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
+#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
+#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
+
+/* fmt bits */
+#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
+#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
+#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
+#define SIFIVE_SPI_FMT_PROTO_MASK 3U
+#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
+#define SIFIVE_SPI_FMT_DIR BIT(3)
+#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
+#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
+
+/* txdata bits */
+#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_TXDATA_FULL BIT(31)
+
+/* rxdata bits */
+#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
+#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
+
+/* ie and ip bits */
+#define SIFIVE_SPI_IP_TXWM BIT(0)
+#define SIFIVE_SPI_IP_RXWM BIT(1)
+
+/* format protocol */
+#define SIFIVE_SPI_PROTO_QUAD 4 /* 4 lines I/O protocol transfer */
+#define SIFIVE_SPI_PROTO_DUAL 2 /* 2 lines I/O protocol transfer */
+#define SIFIVE_SPI_PROTO_SINGLE 1 /* 1 line I/O protocol transfer */
+
+struct sifive_spi {
+ void *regs; /* base address of the registers */
+ u32 fifo_depth;
+ u32 bits_per_word;
+ u32 cs_inactive; /* Level of the CS pins when inactive*/
+ u32 freq;
+ u32 num_cs;
+ u8 fmt_proto;
+};
+
+static void sifive_spi_prep_device(struct sifive_spi *spi,
+ struct dm_spi_slave_plat *slave_plat)
+{
+ /* Update the chip select polarity */
+ if (slave_plat->mode & SPI_CS_HIGH)
+ spi->cs_inactive &= ~BIT(slave_plat->cs);
+ else
+ spi->cs_inactive |= BIT(slave_plat->cs);
+ writel(spi->cs_inactive, spi->regs + SIFIVE_SPI_REG_CSDEF);
+
+ /* Select the correct device */
+ writel(slave_plat->cs, spi->regs + SIFIVE_SPI_REG_CSID);
+}
+
+static int sifive_spi_set_cs(struct sifive_spi *spi,
+ struct dm_spi_slave_plat *slave_plat)
+{
+ u32 cs_mode = SIFIVE_SPI_CSMODE_MODE_HOLD;
+
+ if (slave_plat->mode & SPI_CS_HIGH)
+ cs_mode = SIFIVE_SPI_CSMODE_MODE_AUTO;
+
+ writel(cs_mode, spi->regs + SIFIVE_SPI_REG_CSMODE);
+
+ return 0;
+}
+
+static void sifive_spi_clear_cs(struct sifive_spi *spi)
+{
+ writel(SIFIVE_SPI_CSMODE_MODE_AUTO, spi->regs + SIFIVE_SPI_REG_CSMODE);
+}
+
+static void sifive_spi_prep_transfer(struct sifive_spi *spi,
+ struct dm_spi_slave_plat *slave_plat,
+ u8 *rx_ptr)
+{
+ u32 cr;
+
+ /* Modify the SPI protocol mode */
+ cr = readl(spi->regs + SIFIVE_SPI_REG_FMT);
+
+ /* Bits per word ? */
+ cr &= ~SIFIVE_SPI_FMT_LEN_MASK;
+ cr |= SIFIVE_SPI_FMT_LEN(spi->bits_per_word);
+
+ /* LSB first? */
+ cr &= ~SIFIVE_SPI_FMT_ENDIAN;
+ if (slave_plat->mode & SPI_LSB_FIRST)
+ cr |= SIFIVE_SPI_FMT_ENDIAN;
+
+ /* Number of wires ? */
+ cr &= ~SIFIVE_SPI_FMT_PROTO_MASK;
+ switch (spi->fmt_proto) {
+ case SIFIVE_SPI_PROTO_QUAD:
+ cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
+ break;
+ case SIFIVE_SPI_PROTO_DUAL:
+ cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
+ break;
+ default:
+ cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
+ break;
+ }
+
+ /* SPI direction in/out ? */
+ cr &= ~SIFIVE_SPI_FMT_DIR;
+ if (!rx_ptr)
+ cr |= SIFIVE_SPI_FMT_DIR;
+
+ writel(cr, spi->regs + SIFIVE_SPI_REG_FMT);
+}
+
+static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
+{
+ u32 data;
+
+ do {
+ data = readl(spi->regs + SIFIVE_SPI_REG_RXDATA);
+ } while (data & SIFIVE_SPI_RXDATA_EMPTY);
+
+ if (rx_ptr)
+ *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
+}
+
+static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
+{
+ u32 data;
+ u8 tx_data = (tx_ptr) ? *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK :
+ SIFIVE_SPI_TXDATA_DATA_MASK;
+
+ do {
+ data = readl(spi->regs + SIFIVE_SPI_REG_TXDATA);
+ } while (data & SIFIVE_SPI_TXDATA_FULL);
+
+ writel(tx_data, spi->regs + SIFIVE_SPI_REG_TXDATA);
+}
+
+static int sifive_spi_wait(struct sifive_spi *spi, u32 bit)
+{
+ return wait_for_bit_le32(spi->regs + SIFIVE_SPI_REG_IP,
+ bit, true, 100, false);
+}
+
+static int sifive_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct sifive_spi *spi = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ const u8 *tx_ptr = dout;
+ u8 *rx_ptr = din;
+ u32 remaining_len;
+ int ret;
+
+ if (flags & SPI_XFER_BEGIN) {
+ sifive_spi_prep_device(spi, slave_plat);
+
+ ret = sifive_spi_set_cs(spi, slave_plat);
+ if (ret)
+ return ret;
+ }
+
+ sifive_spi_prep_transfer(spi, slave_plat, rx_ptr);
+
+ remaining_len = bitlen / 8;
+
+ while (remaining_len) {
+ unsigned int n_words = min(remaining_len, spi->fifo_depth);
+ unsigned int tx_words, rx_words;
+
+ /* Enqueue n_words for transmission */
+ for (tx_words = 0; tx_words < n_words; tx_words++) {
+ if (!tx_ptr)
+ sifive_spi_tx(spi, NULL);
+ else
+ sifive_spi_tx(spi, tx_ptr++);
+ }
+
+ if (rx_ptr) {
+ /* Wait for transmission + reception to complete */
+ writel(n_words - 1, spi->regs + SIFIVE_SPI_REG_RXMARK);
+ ret = sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM);
+ if (ret)
+ return ret;
+
+ /* Read out all the data from the RX FIFO */
+ for (rx_words = 0; rx_words < n_words; rx_words++)
+ sifive_spi_rx(spi, rx_ptr++);
+ } else {
+ /* Wait for transmission to complete */
+ ret = sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM);
+ if (ret)
+ return ret;
+ }
+
+ remaining_len -= n_words;
+ }
+
+ if (flags & SPI_XFER_END)
+ sifive_spi_clear_cs(spi);
+
+ return 0;
+}
+
+static int sifive_spi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct udevice *dev = slave->dev;
+ struct sifive_spi *spi = dev_get_priv(dev->parent);
+ unsigned long flags = SPI_XFER_BEGIN;
+ u8 opcode = op->cmd.opcode;
+ unsigned int pos = 0;
+ const void *tx_buf = NULL;
+ void *rx_buf = NULL;
+ int op_len, i;
+ int ret;
+
+ if (!op->addr.nbytes && !op->dummy.nbytes && !op->data.nbytes)
+ flags |= SPI_XFER_END;
+
+ spi->fmt_proto = op->cmd.buswidth;
+
+ /* send the opcode */
+ ret = sifive_spi_xfer(dev, 8, (void *)&opcode, NULL, flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to xfer opcode\n");
+ return ret;
+ }
+
+ op_len = op->addr.nbytes + op->dummy.nbytes;
+ u8 op_buf[op_len];
+
+ /* send the addr + dummy */
+ if (op->addr.nbytes) {
+ /* fill address */
+ for (i = 0; i < op->addr.nbytes; i++)
+ op_buf[pos + i] = op->addr.val >>
+ (8 * (op->addr.nbytes - i - 1));
+
+ pos += op->addr.nbytes;
+
+ /* fill dummy */
+ if (op->dummy.nbytes)
+ memset(op_buf + pos, 0xff, op->dummy.nbytes);
+
+ /* make sure to set end flag, if no data bytes */
+ if (!op->data.nbytes)
+ flags |= SPI_XFER_END;
+
+ spi->fmt_proto = op->addr.buswidth;
+
+ ret = sifive_spi_xfer(dev, op_len * 8, op_buf, NULL, flags);
+ if (ret < 0) {
+ dev_err(dev, "failed to xfer addr + dummy\n");
+ return ret;
+ }
+ }
+
+ /* send/received the data */
+ if (op->data.nbytes) {
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ rx_buf = op->data.buf.in;
+ else
+ tx_buf = op->data.buf.out;
+
+ spi->fmt_proto = op->data.buswidth;
+
+ ret = sifive_spi_xfer(dev, op->data.nbytes * 8,
+ tx_buf, rx_buf, SPI_XFER_END);
+ if (ret) {
+ dev_err(dev, "failed to xfer data\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int sifive_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct sifive_spi *spi = dev_get_priv(bus);
+ u32 scale;
+
+ if (speed > spi->freq)
+ speed = spi->freq;
+
+ /* Cofigure max speed */
+ scale = (DIV_ROUND_UP(spi->freq >> 1, speed) - 1)
+ & SIFIVE_SPI_SCKDIV_DIV_MASK;
+ writel(scale, spi->regs + SIFIVE_SPI_REG_SCKDIV);
+
+ return 0;
+}
+
+static int sifive_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct sifive_spi *spi = dev_get_priv(bus);
+ u32 cr;
+
+ /* Switch clock mode bits */
+ cr = readl(spi->regs + SIFIVE_SPI_REG_SCKMODE) &
+ ~SIFIVE_SPI_SCKMODE_MODE_MASK;
+ if (mode & SPI_CPHA)
+ cr |= SIFIVE_SPI_SCKMODE_PHA;
+ if (mode & SPI_CPOL)
+ cr |= SIFIVE_SPI_SCKMODE_POL;
+
+ writel(cr, spi->regs + SIFIVE_SPI_REG_SCKMODE);
+
+ return 0;
+}
+
+static int sifive_spi_cs_info(struct udevice *bus, uint cs,
+ struct spi_cs_info *info)
+{
+ struct sifive_spi *spi = dev_get_priv(bus);
+
+ if (cs >= spi->num_cs)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sifive_spi_init_hw(struct sifive_spi *spi)
+{
+ u32 cs_bits;
+
+ /* probe the number of CS lines */
+ spi->cs_inactive = readl(spi->regs + SIFIVE_SPI_REG_CSDEF);
+ writel(0xffffffffU, spi->regs + SIFIVE_SPI_REG_CSDEF);
+ cs_bits = readl(spi->regs + SIFIVE_SPI_REG_CSDEF);
+ writel(spi->cs_inactive, spi->regs + SIFIVE_SPI_REG_CSDEF);
+ if (!cs_bits) {
+ printf("Could not auto probe CS lines\n");
+ return;
+ }
+
+ spi->num_cs = ilog2(cs_bits) + 1;
+ if (spi->num_cs > SIFIVE_SPI_MAX_CS) {
+ printf("Invalid number of spi slaves\n");
+ return;
+ }
+
+ /* Watermark interrupts are disabled by default */
+ writel(0, spi->regs + SIFIVE_SPI_REG_IE);
+
+ /* Default watermark FIFO threshold values */
+ writel(1, spi->regs + SIFIVE_SPI_REG_TXMARK);
+ writel(0, spi->regs + SIFIVE_SPI_REG_RXMARK);
+
+ /* Set CS/SCK Delays and Inactive Time to defaults */
+ writel(SIFIVE_SPI_DELAY0_CSSCK(1) | SIFIVE_SPI_DELAY0_SCKCS(1),
+ spi->regs + SIFIVE_SPI_REG_DELAY0);
+ writel(SIFIVE_SPI_DELAY1_INTERCS(1) | SIFIVE_SPI_DELAY1_INTERXFR(0),
+ spi->regs + SIFIVE_SPI_REG_DELAY1);
+
+ /* Exit specialized memory-mapped SPI flash mode */
+ writel(0, spi->regs + SIFIVE_SPI_REG_FCTRL);
+}
+
+static int sifive_spi_probe(struct udevice *bus)
+{
+ struct sifive_spi *spi = dev_get_priv(bus);
+ struct clk clkdev;
+ int ret;
+
+ spi->regs = (void *)(ulong)dev_remap_addr(bus);
+ if (!spi->regs)
+ return -ENODEV;
+
+ spi->fifo_depth = dev_read_u32_default(bus,
+ "sifive,fifo-depth",
+ SIFIVE_SPI_DEFAULT_DEPTH);
+
+ spi->bits_per_word = dev_read_u32_default(bus,
+ "sifive,max-bits-per-word",
+ SIFIVE_SPI_DEFAULT_BITS);
+
+ ret = clk_get_by_index(bus, 0, &clkdev);
+ if (ret)
+ return ret;
+ spi->freq = clk_get_rate(&clkdev);
+
+ /* init the sifive spi hw */
+ sifive_spi_init_hw(spi);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops sifive_spi_mem_ops = {
+ .exec_op = sifive_spi_exec_op,
+};
+
+static const struct dm_spi_ops sifive_spi_ops = {
+ .xfer = sifive_spi_xfer,
+ .set_speed = sifive_spi_set_speed,
+ .set_mode = sifive_spi_set_mode,
+ .cs_info = sifive_spi_cs_info,
+ .mem_ops = &sifive_spi_mem_ops,
+};
+
+static const struct udevice_id sifive_spi_ids[] = {
+ { .compatible = "sifive,spi0" },
+ { }
+};
+
+U_BOOT_DRIVER(sifive_spi) = {
+ .name = "sifive_spi",
+ .id = UCLASS_SPI,
+ .of_match = sifive_spi_ids,
+ .ops = &sifive_spi_ops,
+ .priv_auto = sizeof(struct sifive_spi),
+ .probe = sifive_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/spi-sunxi.c b/roms/u-boot/drivers/spi/spi-sunxi.c
new file mode 100644
index 000000000..4ca5d3a93
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-sunxi.c
@@ -0,0 +1,638 @@
+/*
+ * (C) Copyright 2017 Whitebox Systems / Northend Systems B.V.
+ * S.J.R. van Schaik <stephan@whiteboxsystems.nl>
+ * M.B.W. Wajer <merlijn@whiteboxsystems.nl>
+ *
+ * (C) Copyright 2017 Olimex Ltd..
+ * Stefan Mavrodiev <stefan@olimex.com>
+ *
+ * Based on linux spi driver. Original copyright follows:
+ * linux/drivers/spi/spi-sun4i.c
+ *
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <spi.h>
+#include <errno.h>
+#include <fdt_support.h>
+#include <reset.h>
+#include <wait_bit.h>
+#include <asm/global_data.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+
+#include <asm/bitops.h>
+#include <asm/gpio.h>
+#include <asm/io.h>
+
+#include <linux/iopoll.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* sun4i spi registers */
+#define SUN4I_RXDATA_REG 0x00
+#define SUN4I_TXDATA_REG 0x04
+#define SUN4I_CTL_REG 0x08
+#define SUN4I_CLK_CTL_REG 0x1c
+#define SUN4I_BURST_CNT_REG 0x20
+#define SUN4I_XMIT_CNT_REG 0x24
+#define SUN4I_FIFO_STA_REG 0x28
+
+/* sun6i spi registers */
+#define SUN6I_GBL_CTL_REG 0x04
+#define SUN6I_TFR_CTL_REG 0x08
+#define SUN6I_FIFO_CTL_REG 0x18
+#define SUN6I_FIFO_STA_REG 0x1c
+#define SUN6I_CLK_CTL_REG 0x24
+#define SUN6I_BURST_CNT_REG 0x30
+#define SUN6I_XMIT_CNT_REG 0x34
+#define SUN6I_BURST_CTL_REG 0x38
+#define SUN6I_TXDATA_REG 0x200
+#define SUN6I_RXDATA_REG 0x300
+
+/* sun spi bits */
+#define SUN4I_CTL_ENABLE BIT(0)
+#define SUN4I_CTL_MASTER BIT(1)
+#define SUN4I_CLK_CTL_CDR2_MASK 0xff
+#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
+#define SUN4I_CLK_CTL_CDR1_MASK 0xf
+#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN4I_CLK_CTL_DRS BIT(12)
+#define SUN4I_MAX_XFER_SIZE 0xffffff
+#define SUN4I_BURST_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & SUN4I_MAX_XFER_SIZE)
+#define SUN4I_FIFO_STA_RF_CNT_BITS 0
+
+#define SUN4I_SPI_MAX_RATE 24000000
+#define SUN4I_SPI_MIN_RATE 3000
+#define SUN4I_SPI_DEFAULT_RATE 1000000
+#define SUN4I_SPI_TIMEOUT_US 1000000
+
+#define SPI_REG(priv, reg) ((priv)->base + \
+ (priv)->variant->regs[reg])
+#define SPI_BIT(priv, bit) ((priv)->variant->bits[bit])
+#define SPI_CS(priv, cs) (((cs) << SPI_BIT(priv, SPI_TCR_CS_SEL)) & \
+ SPI_BIT(priv, SPI_TCR_CS_MASK))
+
+/* sun spi register set */
+enum sun4i_spi_regs {
+ SPI_GCR,
+ SPI_TCR,
+ SPI_FCR,
+ SPI_FSR,
+ SPI_CCR,
+ SPI_BC,
+ SPI_TC,
+ SPI_BCTL,
+ SPI_TXD,
+ SPI_RXD,
+};
+
+/* sun spi register bits */
+enum sun4i_spi_bits {
+ SPI_GCR_TP,
+ SPI_GCR_SRST,
+ SPI_TCR_CPHA,
+ SPI_TCR_CPOL,
+ SPI_TCR_CS_ACTIVE_LOW,
+ SPI_TCR_CS_SEL,
+ SPI_TCR_CS_MASK,
+ SPI_TCR_XCH,
+ SPI_TCR_CS_MANUAL,
+ SPI_TCR_CS_LEVEL,
+ SPI_FCR_TF_RST,
+ SPI_FCR_RF_RST,
+ SPI_FSR_RF_CNT_MASK,
+};
+
+struct sun4i_spi_variant {
+ const unsigned long *regs;
+ const u32 *bits;
+ u32 fifo_depth;
+ bool has_soft_reset;
+ bool has_burst_ctl;
+};
+
+struct sun4i_spi_plat {
+ struct sun4i_spi_variant *variant;
+ u32 base;
+ u32 max_hz;
+};
+
+struct sun4i_spi_priv {
+ struct sun4i_spi_variant *variant;
+ struct clk clk_ahb, clk_mod;
+ struct reset_ctl reset;
+ u32 base;
+ u32 freq;
+ u32 mode;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+};
+
+static inline void sun4i_spi_drain_fifo(struct sun4i_spi_priv *priv, int len)
+{
+ u8 byte;
+
+ while (len--) {
+ byte = readb(SPI_REG(priv, SPI_RXD));
+ if (priv->rx_buf)
+ *priv->rx_buf++ = byte;
+ }
+}
+
+static inline void sun4i_spi_fill_fifo(struct sun4i_spi_priv *priv, int len)
+{
+ u8 byte;
+
+ while (len--) {
+ byte = priv->tx_buf ? *priv->tx_buf++ : 0;
+ writeb(byte, SPI_REG(priv, SPI_TXD));
+ }
+}
+
+static void sun4i_spi_set_cs(struct udevice *bus, u8 cs, bool enable)
+{
+ struct sun4i_spi_priv *priv = dev_get_priv(bus);
+ u32 reg;
+
+ reg = readl(SPI_REG(priv, SPI_TCR));
+
+ reg &= ~SPI_BIT(priv, SPI_TCR_CS_MASK);
+ reg |= SPI_CS(priv, cs);
+
+ if (enable)
+ reg &= ~SPI_BIT(priv, SPI_TCR_CS_LEVEL);
+ else
+ reg |= SPI_BIT(priv, SPI_TCR_CS_LEVEL);
+
+ writel(reg, SPI_REG(priv, SPI_TCR));
+}
+
+static int sun4i_spi_parse_pins(struct udevice *dev)
+{
+ const void *fdt = gd->fdt_blob;
+ const char *pin_name;
+ const fdt32_t *list;
+ u32 phandle;
+ int drive, pull = 0, pin, i;
+ int offset;
+ int size;
+
+ list = fdt_getprop(fdt, dev_of_offset(dev), "pinctrl-0", &size);
+ if (!list) {
+ printf("WARNING: sun4i_spi: cannot find pinctrl-0 node\n");
+ return -EINVAL;
+ }
+
+ while (size) {
+ phandle = fdt32_to_cpu(*list++);
+ size -= sizeof(*list);
+
+ offset = fdt_node_offset_by_phandle(fdt, phandle);
+ if (offset < 0)
+ return offset;
+
+ drive = fdt_getprop_u32_default_node(fdt, offset, 0,
+ "drive-strength", 0);
+ if (drive) {
+ if (drive <= 10)
+ drive = 0;
+ else if (drive <= 20)
+ drive = 1;
+ else if (drive <= 30)
+ drive = 2;
+ else
+ drive = 3;
+ } else {
+ drive = fdt_getprop_u32_default_node(fdt, offset, 0,
+ "allwinner,drive",
+ 0);
+ drive = min(drive, 3);
+ }
+
+ if (fdt_get_property(fdt, offset, "bias-disable", NULL))
+ pull = 0;
+ else if (fdt_get_property(fdt, offset, "bias-pull-up", NULL))
+ pull = 1;
+ else if (fdt_get_property(fdt, offset, "bias-pull-down", NULL))
+ pull = 2;
+ else
+ pull = fdt_getprop_u32_default_node(fdt, offset, 0,
+ "allwinner,pull",
+ 0);
+ pull = min(pull, 2);
+
+ for (i = 0; ; i++) {
+ pin_name = fdt_stringlist_get(fdt, offset,
+ "pins", i, NULL);
+ if (!pin_name) {
+ pin_name = fdt_stringlist_get(fdt, offset,
+ "allwinner,pins",
+ i, NULL);
+ if (!pin_name)
+ break;
+ }
+
+ pin = name_to_gpio(pin_name);
+ if (pin < 0)
+ break;
+
+ if (IS_ENABLED(CONFIG_MACH_SUN50I))
+ sunxi_gpio_set_cfgpin(pin, SUN50I_GPC_SPI0);
+ else
+ sunxi_gpio_set_cfgpin(pin, SUNXI_GPC_SPI0);
+ sunxi_gpio_set_drv(pin, drive);
+ sunxi_gpio_set_pull(pin, pull);
+ }
+ }
+ return 0;
+}
+
+static inline int sun4i_spi_set_clock(struct udevice *dev, bool enable)
+{
+ struct sun4i_spi_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ if (!enable) {
+ clk_disable(&priv->clk_ahb);
+ clk_disable(&priv->clk_mod);
+ if (reset_valid(&priv->reset))
+ reset_assert(&priv->reset);
+ return 0;
+ }
+
+ ret = clk_enable(&priv->clk_ahb);
+ if (ret) {
+ dev_err(dev, "failed to enable ahb clock (ret=%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_enable(&priv->clk_mod);
+ if (ret) {
+ dev_err(dev, "failed to enable mod clock (ret=%d)\n", ret);
+ goto err_ahb;
+ }
+
+ if (reset_valid(&priv->reset)) {
+ ret = reset_deassert(&priv->reset);
+ if (ret) {
+ dev_err(dev, "failed to deassert reset\n");
+ goto err_mod;
+ }
+ }
+
+ return 0;
+
+err_mod:
+ clk_disable(&priv->clk_mod);
+err_ahb:
+ clk_disable(&priv->clk_ahb);
+ return ret;
+}
+
+static int sun4i_spi_claim_bus(struct udevice *dev)
+{
+ struct sun4i_spi_priv *priv = dev_get_priv(dev->parent);
+ int ret;
+
+ ret = sun4i_spi_set_clock(dev->parent, true);
+ if (ret)
+ return ret;
+
+ setbits_le32(SPI_REG(priv, SPI_GCR), SUN4I_CTL_ENABLE |
+ SUN4I_CTL_MASTER | SPI_BIT(priv, SPI_GCR_TP));
+
+ if (priv->variant->has_soft_reset)
+ setbits_le32(SPI_REG(priv, SPI_GCR),
+ SPI_BIT(priv, SPI_GCR_SRST));
+
+ setbits_le32(SPI_REG(priv, SPI_TCR), SPI_BIT(priv, SPI_TCR_CS_MANUAL) |
+ SPI_BIT(priv, SPI_TCR_CS_ACTIVE_LOW));
+
+ return 0;
+}
+
+static int sun4i_spi_release_bus(struct udevice *dev)
+{
+ struct sun4i_spi_priv *priv = dev_get_priv(dev->parent);
+
+ clrbits_le32(SPI_REG(priv, SPI_GCR), SUN4I_CTL_ENABLE);
+
+ sun4i_spi_set_clock(dev->parent, false);
+
+ return 0;
+}
+
+static int sun4i_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct sun4i_spi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ u32 len = bitlen / 8;
+ u32 rx_fifocnt;
+ u8 nbytes;
+ int ret;
+
+ priv->tx_buf = dout;
+ priv->rx_buf = din;
+
+ if (bitlen % 8) {
+ debug("%s: non byte-aligned SPI transfer.\n", __func__);
+ return -ENAVAIL;
+ }
+
+ if (flags & SPI_XFER_BEGIN)
+ sun4i_spi_set_cs(bus, slave_plat->cs, true);
+
+ /* Reset FIFOs */
+ setbits_le32(SPI_REG(priv, SPI_FCR), SPI_BIT(priv, SPI_FCR_RF_RST) |
+ SPI_BIT(priv, SPI_FCR_TF_RST));
+
+ while (len) {
+ /* Setup the transfer now... */
+ nbytes = min(len, (priv->variant->fifo_depth - 1));
+
+ /* Setup the counters */
+ writel(SUN4I_BURST_CNT(nbytes), SPI_REG(priv, SPI_BC));
+ writel(SUN4I_XMIT_CNT(nbytes), SPI_REG(priv, SPI_TC));
+
+ if (priv->variant->has_burst_ctl)
+ writel(SUN4I_BURST_CNT(nbytes),
+ SPI_REG(priv, SPI_BCTL));
+
+ /* Fill the TX FIFO */
+ sun4i_spi_fill_fifo(priv, nbytes);
+
+ /* Start the transfer */
+ setbits_le32(SPI_REG(priv, SPI_TCR),
+ SPI_BIT(priv, SPI_TCR_XCH));
+
+ /* Wait till RX FIFO to be empty */
+ ret = readl_poll_timeout(SPI_REG(priv, SPI_FSR),
+ rx_fifocnt,
+ (((rx_fifocnt &
+ SPI_BIT(priv, SPI_FSR_RF_CNT_MASK)) >>
+ SUN4I_FIFO_STA_RF_CNT_BITS) >= nbytes),
+ SUN4I_SPI_TIMEOUT_US);
+ if (ret < 0) {
+ printf("ERROR: sun4i_spi: Timeout transferring data\n");
+ sun4i_spi_set_cs(bus, slave_plat->cs, false);
+ return ret;
+ }
+
+ /* Drain the RX FIFO */
+ sun4i_spi_drain_fifo(priv, nbytes);
+
+ len -= nbytes;
+ }
+
+ if (flags & SPI_XFER_END)
+ sun4i_spi_set_cs(bus, slave_plat->cs, false);
+
+ return 0;
+}
+
+static int sun4i_spi_set_speed(struct udevice *dev, uint speed)
+{
+ struct sun4i_spi_plat *plat = dev_get_plat(dev);
+ struct sun4i_spi_priv *priv = dev_get_priv(dev);
+ unsigned int div;
+ u32 reg;
+
+ if (speed > plat->max_hz)
+ speed = plat->max_hz;
+
+ if (speed < SUN4I_SPI_MIN_RATE)
+ speed = SUN4I_SPI_MIN_RATE;
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Whether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+
+ div = SUN4I_SPI_MAX_RATE / (2 * speed);
+ reg = readl(SPI_REG(priv, SPI_CCR));
+
+ if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg &= ~(SUN4I_CLK_CTL_CDR2_MASK | SUN4I_CLK_CTL_DRS);
+ reg |= SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
+ } else {
+ div = __ilog2(SUN4I_SPI_MAX_RATE) - __ilog2(speed);
+ reg &= ~((SUN4I_CLK_CTL_CDR1_MASK << 8) | SUN4I_CLK_CTL_DRS);
+ reg |= SUN4I_CLK_CTL_CDR1(div);
+ }
+
+ priv->freq = speed;
+ writel(reg, SPI_REG(priv, SPI_CCR));
+
+ return 0;
+}
+
+static int sun4i_spi_set_mode(struct udevice *dev, uint mode)
+{
+ struct sun4i_spi_priv *priv = dev_get_priv(dev);
+ u32 reg;
+
+ reg = readl(SPI_REG(priv, SPI_TCR));
+ reg &= ~(SPI_BIT(priv, SPI_TCR_CPOL) | SPI_BIT(priv, SPI_TCR_CPHA));
+
+ if (mode & SPI_CPOL)
+ reg |= SPI_BIT(priv, SPI_TCR_CPOL);
+
+ if (mode & SPI_CPHA)
+ reg |= SPI_BIT(priv, SPI_TCR_CPHA);
+
+ priv->mode = mode;
+ writel(reg, SPI_REG(priv, SPI_TCR));
+
+ return 0;
+}
+
+static const struct dm_spi_ops sun4i_spi_ops = {
+ .claim_bus = sun4i_spi_claim_bus,
+ .release_bus = sun4i_spi_release_bus,
+ .xfer = sun4i_spi_xfer,
+ .set_speed = sun4i_spi_set_speed,
+ .set_mode = sun4i_spi_set_mode,
+};
+
+static int sun4i_spi_probe(struct udevice *bus)
+{
+ struct sun4i_spi_plat *plat = dev_get_plat(bus);
+ struct sun4i_spi_priv *priv = dev_get_priv(bus);
+ int ret;
+
+ ret = clk_get_by_name(bus, "ahb", &priv->clk_ahb);
+ if (ret) {
+ dev_err(bus, "failed to get ahb clock\n");
+ return ret;
+ }
+
+ ret = clk_get_by_name(bus, "mod", &priv->clk_mod);
+ if (ret) {
+ dev_err(bus, "failed to get mod clock\n");
+ return ret;
+ }
+
+ ret = reset_get_by_index(bus, 0, &priv->reset);
+ if (ret && ret != -ENOENT) {
+ dev_err(bus, "failed to get reset\n");
+ return ret;
+ }
+
+ sun4i_spi_parse_pins(bus);
+
+ priv->variant = plat->variant;
+ priv->base = plat->base;
+ priv->freq = plat->max_hz;
+
+ return 0;
+}
+
+static int sun4i_spi_of_to_plat(struct udevice *bus)
+{
+ struct sun4i_spi_plat *plat = dev_get_plat(bus);
+ int node = dev_of_offset(bus);
+
+ plat->base = dev_read_addr(bus);
+ plat->variant = (struct sun4i_spi_variant *)dev_get_driver_data(bus);
+ plat->max_hz = fdtdec_get_int(gd->fdt_blob, node,
+ "spi-max-frequency",
+ SUN4I_SPI_DEFAULT_RATE);
+
+ if (plat->max_hz > SUN4I_SPI_MAX_RATE)
+ plat->max_hz = SUN4I_SPI_MAX_RATE;
+
+ return 0;
+}
+
+static const unsigned long sun4i_spi_regs[] = {
+ [SPI_GCR] = SUN4I_CTL_REG,
+ [SPI_TCR] = SUN4I_CTL_REG,
+ [SPI_FCR] = SUN4I_CTL_REG,
+ [SPI_FSR] = SUN4I_FIFO_STA_REG,
+ [SPI_CCR] = SUN4I_CLK_CTL_REG,
+ [SPI_BC] = SUN4I_BURST_CNT_REG,
+ [SPI_TC] = SUN4I_XMIT_CNT_REG,
+ [SPI_TXD] = SUN4I_TXDATA_REG,
+ [SPI_RXD] = SUN4I_RXDATA_REG,
+};
+
+static const u32 sun4i_spi_bits[] = {
+ [SPI_GCR_TP] = BIT(18),
+ [SPI_TCR_CPHA] = BIT(2),
+ [SPI_TCR_CPOL] = BIT(3),
+ [SPI_TCR_CS_ACTIVE_LOW] = BIT(4),
+ [SPI_TCR_XCH] = BIT(10),
+ [SPI_TCR_CS_SEL] = 12,
+ [SPI_TCR_CS_MASK] = 0x3000,
+ [SPI_TCR_CS_MANUAL] = BIT(16),
+ [SPI_TCR_CS_LEVEL] = BIT(17),
+ [SPI_FCR_TF_RST] = BIT(8),
+ [SPI_FCR_RF_RST] = BIT(9),
+ [SPI_FSR_RF_CNT_MASK] = GENMASK(6, 0),
+};
+
+static const unsigned long sun6i_spi_regs[] = {
+ [SPI_GCR] = SUN6I_GBL_CTL_REG,
+ [SPI_TCR] = SUN6I_TFR_CTL_REG,
+ [SPI_FCR] = SUN6I_FIFO_CTL_REG,
+ [SPI_FSR] = SUN6I_FIFO_STA_REG,
+ [SPI_CCR] = SUN6I_CLK_CTL_REG,
+ [SPI_BC] = SUN6I_BURST_CNT_REG,
+ [SPI_TC] = SUN6I_XMIT_CNT_REG,
+ [SPI_BCTL] = SUN6I_BURST_CTL_REG,
+ [SPI_TXD] = SUN6I_TXDATA_REG,
+ [SPI_RXD] = SUN6I_RXDATA_REG,
+};
+
+static const u32 sun6i_spi_bits[] = {
+ [SPI_GCR_TP] = BIT(7),
+ [SPI_GCR_SRST] = BIT(31),
+ [SPI_TCR_CPHA] = BIT(0),
+ [SPI_TCR_CPOL] = BIT(1),
+ [SPI_TCR_CS_ACTIVE_LOW] = BIT(2),
+ [SPI_TCR_CS_SEL] = 4,
+ [SPI_TCR_CS_MASK] = 0x30,
+ [SPI_TCR_CS_MANUAL] = BIT(6),
+ [SPI_TCR_CS_LEVEL] = BIT(7),
+ [SPI_TCR_XCH] = BIT(31),
+ [SPI_FCR_RF_RST] = BIT(15),
+ [SPI_FCR_TF_RST] = BIT(31),
+ [SPI_FSR_RF_CNT_MASK] = GENMASK(7, 0),
+};
+
+static const struct sun4i_spi_variant sun4i_a10_spi_variant = {
+ .regs = sun4i_spi_regs,
+ .bits = sun4i_spi_bits,
+ .fifo_depth = 64,
+};
+
+static const struct sun4i_spi_variant sun6i_a31_spi_variant = {
+ .regs = sun6i_spi_regs,
+ .bits = sun6i_spi_bits,
+ .fifo_depth = 128,
+ .has_soft_reset = true,
+ .has_burst_ctl = true,
+};
+
+static const struct sun4i_spi_variant sun8i_h3_spi_variant = {
+ .regs = sun6i_spi_regs,
+ .bits = sun6i_spi_bits,
+ .fifo_depth = 64,
+ .has_soft_reset = true,
+ .has_burst_ctl = true,
+};
+
+static const struct udevice_id sun4i_spi_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-spi",
+ .data = (ulong)&sun4i_a10_spi_variant,
+ },
+ {
+ .compatible = "allwinner,sun6i-a31-spi",
+ .data = (ulong)&sun6i_a31_spi_variant,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-spi",
+ .data = (ulong)&sun8i_h3_spi_variant,
+ },
+ { /* sentinel */ }
+};
+
+U_BOOT_DRIVER(sun4i_spi) = {
+ .name = "sun4i_spi",
+ .id = UCLASS_SPI,
+ .of_match = sun4i_spi_ids,
+ .ops = &sun4i_spi_ops,
+ .of_to_plat = sun4i_spi_of_to_plat,
+ .plat_auto = sizeof(struct sun4i_spi_plat),
+ .priv_auto = sizeof(struct sun4i_spi_priv),
+ .probe = sun4i_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/spi-uclass.c b/roms/u-boot/drivers/spi/spi-uclass.c
new file mode 100644
index 000000000..d867b2780
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi-uclass.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014 Google, Inc
+ */
+
+#define LOG_CATEGORY UCLASS_SPI
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <dm/device_compat.h>
+#include <asm/global_data.h>
+#include <dm/device-internal.h>
+#include <dm/uclass-internal.h>
+#include <dm/lists.h>
+#include <dm/util.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define SPI_DEFAULT_SPEED_HZ 100000
+
+static int spi_set_speed_mode(struct udevice *bus, int speed, int mode)
+{
+ struct dm_spi_ops *ops;
+ int ret;
+
+ ops = spi_get_ops(bus);
+ if (ops->set_speed)
+ ret = ops->set_speed(bus, speed);
+ else
+ ret = -EINVAL;
+ if (ret) {
+ dev_err(bus, "Cannot set speed (err=%d)\n", ret);
+ return ret;
+ }
+
+ if (ops->set_mode)
+ ret = ops->set_mode(bus, mode);
+ else
+ ret = -EINVAL;
+ if (ret) {
+ dev_err(bus, "Cannot set mode (err=%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dm_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+ struct dm_spi_bus *spi = dev_get_uclass_priv(bus);
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+ uint speed, mode;
+
+ speed = slave->max_hz;
+ mode = slave->mode;
+
+ if (spi->max_hz) {
+ if (speed)
+ speed = min(speed, spi->max_hz);
+ else
+ speed = spi->max_hz;
+ }
+ if (!speed)
+ speed = SPI_DEFAULT_SPEED_HZ;
+
+ if (speed != spi->speed || mode != spi->mode) {
+ int ret = spi_set_speed_mode(bus, speed, slave->mode);
+
+ if (ret)
+ return log_ret(ret);
+
+ spi->speed = speed;
+ spi->mode = mode;
+ }
+
+ return log_ret(ops->claim_bus ? ops->claim_bus(dev) : 0);
+}
+
+void dm_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+
+ if (ops->release_bus)
+ ops->release_bus(dev);
+}
+
+int dm_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+
+ if (bus->uclass->uc_drv->id != UCLASS_SPI)
+ return -EOPNOTSUPP;
+ if (!ops->xfer)
+ return -ENOSYS;
+
+ return ops->xfer(dev, bitlen, dout, din, flags);
+}
+
+int dm_spi_get_mmap(struct udevice *dev, ulong *map_basep, uint *map_sizep,
+ uint *offsetp)
+{
+ struct udevice *bus = dev->parent;
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+
+ if (bus->uclass->uc_drv->id != UCLASS_SPI)
+ return -EOPNOTSUPP;
+ if (!ops->get_mmap)
+ return -ENOSYS;
+
+ return ops->get_mmap(dev, map_basep, map_sizep, offsetp);
+}
+
+int spi_claim_bus(struct spi_slave *slave)
+{
+ return log_ret(dm_spi_claim_bus(slave->dev));
+}
+
+void spi_release_bus(struct spi_slave *slave)
+{
+ dm_spi_release_bus(slave->dev);
+}
+
+int spi_xfer(struct spi_slave *slave, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ return dm_spi_xfer(slave->dev, bitlen, dout, din, flags);
+}
+
+int spi_write_then_read(struct spi_slave *slave, const u8 *opcode,
+ size_t n_opcode, const u8 *txbuf, u8 *rxbuf,
+ size_t n_buf)
+{
+ unsigned long flags = SPI_XFER_BEGIN;
+ int ret;
+
+ if (n_buf == 0)
+ flags |= SPI_XFER_END;
+
+ ret = spi_xfer(slave, n_opcode * 8, opcode, NULL, flags);
+ if (ret) {
+ dev_dbg(slave->dev,
+ "spi: failed to send command (%zu bytes): %d\n",
+ n_opcode, ret);
+ } else if (n_buf != 0) {
+ ret = spi_xfer(slave, n_buf * 8, txbuf, rxbuf, SPI_XFER_END);
+ if (ret)
+ dev_dbg(slave->dev,
+ "spi: failed to transfer %zu bytes of data: %d\n",
+ n_buf, ret);
+ }
+
+ return ret;
+}
+
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+static int spi_child_post_bind(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+
+ if (!dev_has_ofnode(dev))
+ return 0;
+
+ return spi_slave_of_to_plat(dev, plat);
+}
+#endif
+
+static int spi_post_probe(struct udevice *bus)
+{
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ struct dm_spi_bus *spi = dev_get_uclass_priv(bus);
+
+ spi->max_hz = dev_read_u32_default(bus, "spi-max-frequency", 0);
+#endif
+#if defined(CONFIG_NEEDS_MANUAL_RELOC)
+ struct dm_spi_ops *ops = spi_get_ops(bus);
+ static int reloc_done;
+
+ if (!reloc_done) {
+ if (ops->claim_bus)
+ ops->claim_bus += gd->reloc_off;
+ if (ops->release_bus)
+ ops->release_bus += gd->reloc_off;
+ if (ops->set_wordlen)
+ ops->set_wordlen += gd->reloc_off;
+ if (ops->xfer)
+ ops->xfer += gd->reloc_off;
+ if (ops->set_speed)
+ ops->set_speed += gd->reloc_off;
+ if (ops->set_mode)
+ ops->set_mode += gd->reloc_off;
+ if (ops->cs_info)
+ ops->cs_info += gd->reloc_off;
+ if (ops->mem_ops) {
+ struct spi_controller_mem_ops *mem_ops =
+ (struct spi_controller_mem_ops *)ops->mem_ops;
+ if (mem_ops->adjust_op_size)
+ mem_ops->adjust_op_size += gd->reloc_off;
+ if (mem_ops->supports_op)
+ mem_ops->supports_op += gd->reloc_off;
+ if (mem_ops->exec_op)
+ mem_ops->exec_op += gd->reloc_off;
+ }
+ reloc_done++;
+ }
+#endif
+
+ return 0;
+}
+
+static int spi_child_pre_probe(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+ struct spi_slave *slave = dev_get_parent_priv(dev);
+
+ /*
+ * This is needed because we pass struct spi_slave around the place
+ * instead slave->dev (a struct udevice). So we have to have some
+ * way to access the slave udevice given struct spi_slave. Once we
+ * change the SPI API to use udevice instead of spi_slave, we can
+ * drop this.
+ */
+ slave->dev = dev;
+
+ slave->max_hz = plat->max_hz;
+ slave->mode = plat->mode;
+ slave->wordlen = SPI_DEFAULT_WORDLEN;
+
+ return 0;
+}
+
+int spi_chip_select(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *plat = dev_get_parent_plat(dev);
+
+ return plat ? plat->cs : -ENOENT;
+}
+
+int spi_find_chip_select(struct udevice *bus, int cs, struct udevice **devp)
+{
+ struct dm_spi_ops *ops;
+ struct spi_cs_info info;
+ struct udevice *dev;
+ int ret;
+
+ /*
+ * Ask the driver. For the moment we don't have CS info.
+ * When we do we could provide the driver with a helper function
+ * to figure out what chip selects are valid, or just handle the
+ * request.
+ */
+ ops = spi_get_ops(bus);
+ if (ops->cs_info) {
+ ret = ops->cs_info(bus, cs, &info);
+ } else {
+ /*
+ * We could assume there is at least one valid chip select.
+ * The driver didn't care enough to tell us.
+ */
+ ret = 0;
+ }
+
+ if (ret) {
+ dev_err(bus, "Invalid cs %d (err=%d)\n", cs, ret);
+ return ret;
+ }
+
+ for (device_find_first_child(bus, &dev); dev;
+ device_find_next_child(&dev)) {
+ struct dm_spi_slave_plat *plat;
+
+ plat = dev_get_parent_plat(dev);
+ dev_dbg(bus, "%s: plat=%p, cs=%d\n", __func__, plat, plat->cs);
+ if (plat->cs == cs) {
+ *devp = dev;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+int spi_cs_is_valid(unsigned int busnum, unsigned int cs)
+{
+ struct spi_cs_info info;
+ struct udevice *bus;
+ int ret;
+
+ ret = uclass_find_device_by_seq(UCLASS_SPI, busnum, &bus);
+ if (ret) {
+ log_debug("%s: No bus %d\n", __func__, busnum);
+ return ret;
+ }
+
+ return spi_cs_info(bus, cs, &info);
+}
+
+int spi_cs_info(struct udevice *bus, uint cs, struct spi_cs_info *info)
+{
+ struct spi_cs_info local_info;
+ int ret;
+
+ if (!info)
+ info = &local_info;
+
+ /* If there is a device attached, return it */
+ info->dev = NULL;
+ ret = spi_find_chip_select(bus, cs, &info->dev);
+ return ret == -ENODEV ? 0 : ret;
+}
+
+int spi_find_bus_and_cs(int busnum, int cs, struct udevice **busp,
+ struct udevice **devp)
+{
+ struct udevice *bus, *dev;
+ int ret;
+
+ ret = uclass_find_device_by_seq(UCLASS_SPI, busnum, &bus);
+ if (ret) {
+ log_debug("%s: No bus %d\n", __func__, busnum);
+ return ret;
+ }
+ ret = spi_find_chip_select(bus, cs, &dev);
+ if (ret) {
+ dev_dbg(bus, "%s: No cs %d\n", __func__, cs);
+ return ret;
+ }
+ *busp = bus;
+ *devp = dev;
+
+ return ret;
+}
+
+int spi_get_bus_and_cs(int busnum, int cs, int speed, int mode,
+ const char *drv_name, const char *dev_name,
+ struct udevice **busp, struct spi_slave **devp)
+{
+ struct udevice *bus, *dev;
+ struct dm_spi_slave_plat *plat;
+ struct dm_spi_bus *bus_data;
+ struct spi_slave *slave;
+ bool created = false;
+ int ret;
+
+#if CONFIG_IS_ENABLED(OF_PLATDATA)
+ ret = uclass_first_device_err(UCLASS_SPI, &bus);
+#else
+ ret = uclass_get_device_by_seq(UCLASS_SPI, busnum, &bus);
+#endif
+ if (ret) {
+ log_err("Invalid bus %d (err=%d)\n", busnum, ret);
+ return ret;
+ }
+ ret = spi_find_chip_select(bus, cs, &dev);
+
+ /*
+ * If there is no such device, create one automatically. This means
+ * that we don't need a device tree node or platform data for the
+ * SPI flash chip - we will bind to the correct driver.
+ */
+ if (ret == -ENODEV && drv_name) {
+ dev_dbg(bus, "%s: Binding new device '%s', busnum=%d, cs=%d, driver=%s\n",
+ __func__, dev_name, busnum, cs, drv_name);
+ ret = device_bind_driver(bus, drv_name, dev_name, &dev);
+ if (ret) {
+ dev_dbg(bus, "%s: Unable to bind driver (ret=%d)\n",
+ __func__, ret);
+ return ret;
+ }
+ plat = dev_get_parent_plat(dev);
+ plat->cs = cs;
+ if (speed) {
+ plat->max_hz = speed;
+ } else {
+ dev_warn(bus,
+ "Warning: SPI speed fallback to %u kHz\n",
+ SPI_DEFAULT_SPEED_HZ / 1000);
+ plat->max_hz = SPI_DEFAULT_SPEED_HZ;
+ }
+ plat->mode = mode;
+ created = true;
+ } else if (ret) {
+ dev_err(bus, "Invalid chip select %d:%d (err=%d)\n", busnum, cs, ret);
+ return ret;
+ } else if (dev) {
+ plat = dev_get_parent_plat(dev);
+ }
+
+ if (!device_active(dev)) {
+ struct spi_slave *slave;
+
+ ret = device_probe(dev);
+ if (ret)
+ goto err;
+ slave = dev_get_parent_priv(dev);
+ slave->dev = dev;
+ }
+
+ slave = dev_get_parent_priv(dev);
+ bus_data = dev_get_uclass_priv(bus);
+
+ /*
+ * In case the operation speed is not yet established by
+ * dm_spi_claim_bus() ensure the bus is configured properly.
+ */
+ if (!bus_data->speed) {
+ ret = spi_claim_bus(slave);
+ if (ret)
+ goto err;
+ }
+
+ /* In case bus frequency or mode changed, update it. */
+ if ((speed && bus_data->speed && bus_data->speed != speed) ||
+ (plat && plat->mode != mode)) {
+ ret = spi_set_speed_mode(bus, speed, mode);
+ if (ret)
+ goto err_speed_mode;
+ }
+
+ *busp = bus;
+ *devp = slave;
+ log_debug("%s: bus=%p, slave=%p\n", __func__, bus, *devp);
+
+ return 0;
+
+err_speed_mode:
+ spi_release_bus(slave);
+err:
+ log_debug("%s: Error path, created=%d, device '%s'\n", __func__,
+ created, dev->name);
+ if (created) {
+ device_remove(dev, DM_REMOVE_NORMAL);
+ device_unbind(dev);
+ }
+
+ return ret;
+}
+
+/* Compatibility function - to be removed */
+struct spi_slave *spi_setup_slave(unsigned int busnum, unsigned int cs,
+ unsigned int speed, unsigned int mode)
+{
+ struct spi_slave *slave;
+ struct udevice *dev;
+ int ret;
+
+ ret = spi_get_bus_and_cs(busnum, cs, speed, mode, NULL, 0, &dev,
+ &slave);
+ if (ret)
+ return NULL;
+
+ return slave;
+}
+
+void spi_free_slave(struct spi_slave *slave)
+{
+ device_remove(slave->dev, DM_REMOVE_NORMAL);
+}
+
+int spi_slave_of_to_plat(struct udevice *dev, struct dm_spi_slave_plat *plat)
+{
+ int mode = 0;
+ int value;
+
+ plat->cs = dev_read_u32_default(dev, "reg", -1);
+ plat->max_hz = dev_read_u32_default(dev, "spi-max-frequency",
+ SPI_DEFAULT_SPEED_HZ);
+ if (dev_read_bool(dev, "spi-cpol"))
+ mode |= SPI_CPOL;
+ if (dev_read_bool(dev, "spi-cpha"))
+ mode |= SPI_CPHA;
+ if (dev_read_bool(dev, "spi-cs-high"))
+ mode |= SPI_CS_HIGH;
+ if (dev_read_bool(dev, "spi-3wire"))
+ mode |= SPI_3WIRE;
+ if (dev_read_bool(dev, "spi-half-duplex"))
+ mode |= SPI_PREAMBLE;
+
+ /* Device DUAL/QUAD mode */
+ value = dev_read_u32_default(dev, "spi-tx-bus-width", 1);
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_TX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_TX_QUAD;
+ break;
+ case 8:
+ mode |= SPI_TX_OCTAL;
+ break;
+ default:
+ warn_non_spl("spi-tx-bus-width %d not supported\n", value);
+ break;
+ }
+
+ value = dev_read_u32_default(dev, "spi-rx-bus-width", 1);
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ mode |= SPI_RX_QUAD;
+ break;
+ case 8:
+ mode |= SPI_RX_OCTAL;
+ break;
+ default:
+ warn_non_spl("spi-rx-bus-width %d not supported\n", value);
+ break;
+ }
+
+ plat->mode = mode;
+
+ return 0;
+}
+
+UCLASS_DRIVER(spi) = {
+ .id = UCLASS_SPI,
+ .name = "spi",
+ .flags = DM_UC_FLAG_SEQ_ALIAS,
+#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .post_bind = dm_scan_fdt_dev,
+#endif
+ .post_probe = spi_post_probe,
+ .child_pre_probe = spi_child_pre_probe,
+ .per_device_auto = sizeof(struct dm_spi_bus),
+ .per_child_auto = sizeof(struct spi_slave),
+ .per_child_plat_auto = sizeof(struct dm_spi_slave_plat),
+#if !CONFIG_IS_ENABLED(OF_PLATDATA)
+ .child_post_bind = spi_child_post_bind,
+#endif
+};
+
+UCLASS_DRIVER(spi_generic) = {
+ .id = UCLASS_SPI_GENERIC,
+ .name = "spi_generic",
+};
+
+U_BOOT_DRIVER(spi_generic_drv) = {
+ .name = "spi_generic_drv",
+ .id = UCLASS_SPI_GENERIC,
+};
diff --git a/roms/u-boot/drivers/spi/spi.c b/roms/u-boot/drivers/spi/spi.c
new file mode 100644
index 000000000..22910de0d
--- /dev/null
+++ b/roms/u-boot/drivers/spi/spi.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2011 The Chromium OS Authors.
+ */
+
+#include <common.h>
+#include <fdtdec.h>
+#include <malloc.h>
+#include <spi.h>
+
+int spi_set_wordlen(struct spi_slave *slave, unsigned int wordlen)
+{
+ if (wordlen == 0 || wordlen > 32) {
+ printf("spi: invalid wordlen %u\n", wordlen);
+ return -1;
+ }
+
+ slave->wordlen = wordlen;
+
+ return 0;
+}
+
+void *spi_do_alloc_slave(int offset, int size, unsigned int bus,
+ unsigned int cs)
+{
+ u8 *ptr;
+
+ ptr = malloc(size);
+ if (ptr) {
+ struct spi_slave *slave;
+
+ memset(ptr, '\0', size);
+ slave = (struct spi_slave *)(ptr + offset);
+ slave->bus = bus;
+ slave->cs = cs;
+ slave->wordlen = SPI_DEFAULT_WORDLEN;
+ }
+
+ return ptr;
+}
diff --git a/roms/u-boot/drivers/spi/stm32_qspi.c b/roms/u-boot/drivers/spi/stm32_qspi.c
new file mode 100644
index 000000000..4acc9047b
--- /dev/null
+++ b/roms/u-boot/drivers/spi/stm32_qspi.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2016
+ *
+ * Michael Kurz, <michi.kurz@gmail.com>
+ *
+ * STM32 QSPI driver
+ */
+
+#define LOG_CATEGORY UCLASS_SPI
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <log.h>
+#include <reset.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <watchdog.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/sizes.h>
+
+struct stm32_qspi_regs {
+ u32 cr; /* 0x00 */
+ u32 dcr; /* 0x04 */
+ u32 sr; /* 0x08 */
+ u32 fcr; /* 0x0C */
+ u32 dlr; /* 0x10 */
+ u32 ccr; /* 0x14 */
+ u32 ar; /* 0x18 */
+ u32 abr; /* 0x1C */
+ u32 dr; /* 0x20 */
+ u32 psmkr; /* 0x24 */
+ u32 psmar; /* 0x28 */
+ u32 pir; /* 0x2C */
+ u32 lptr; /* 0x30 */
+};
+
+/*
+ * QUADSPI control register
+ */
+#define STM32_QSPI_CR_EN BIT(0)
+#define STM32_QSPI_CR_ABORT BIT(1)
+#define STM32_QSPI_CR_DMAEN BIT(2)
+#define STM32_QSPI_CR_TCEN BIT(3)
+#define STM32_QSPI_CR_SSHIFT BIT(4)
+#define STM32_QSPI_CR_DFM BIT(6)
+#define STM32_QSPI_CR_FSEL BIT(7)
+#define STM32_QSPI_CR_FTHRES_SHIFT 8
+#define STM32_QSPI_CR_TEIE BIT(16)
+#define STM32_QSPI_CR_TCIE BIT(17)
+#define STM32_QSPI_CR_FTIE BIT(18)
+#define STM32_QSPI_CR_SMIE BIT(19)
+#define STM32_QSPI_CR_TOIE BIT(20)
+#define STM32_QSPI_CR_APMS BIT(22)
+#define STM32_QSPI_CR_PMM BIT(23)
+#define STM32_QSPI_CR_PRESCALER_MASK GENMASK(7, 0)
+#define STM32_QSPI_CR_PRESCALER_SHIFT 24
+
+/*
+ * QUADSPI device configuration register
+ */
+#define STM32_QSPI_DCR_CKMODE BIT(0)
+#define STM32_QSPI_DCR_CSHT_MASK GENMASK(2, 0)
+#define STM32_QSPI_DCR_CSHT_SHIFT 8
+#define STM32_QSPI_DCR_FSIZE_MASK GENMASK(4, 0)
+#define STM32_QSPI_DCR_FSIZE_SHIFT 16
+
+/*
+ * QUADSPI status register
+ */
+#define STM32_QSPI_SR_TEF BIT(0)
+#define STM32_QSPI_SR_TCF BIT(1)
+#define STM32_QSPI_SR_FTF BIT(2)
+#define STM32_QSPI_SR_SMF BIT(3)
+#define STM32_QSPI_SR_TOF BIT(4)
+#define STM32_QSPI_SR_BUSY BIT(5)
+
+/*
+ * QUADSPI flag clear register
+ */
+#define STM32_QSPI_FCR_CTEF BIT(0)
+#define STM32_QSPI_FCR_CTCF BIT(1)
+#define STM32_QSPI_FCR_CSMF BIT(3)
+#define STM32_QSPI_FCR_CTOF BIT(4)
+
+/*
+ * QUADSPI communication configuration register
+ */
+#define STM32_QSPI_CCR_DDRM BIT(31)
+#define STM32_QSPI_CCR_DHHC BIT(30)
+#define STM32_QSPI_CCR_SIOO BIT(28)
+#define STM32_QSPI_CCR_FMODE_SHIFT 26
+#define STM32_QSPI_CCR_DMODE_SHIFT 24
+#define STM32_QSPI_CCR_DCYC_SHIFT 18
+#define STM32_QSPI_CCR_ABSIZE_SHIFT 16
+#define STM32_QSPI_CCR_ABMODE_SHIFT 14
+#define STM32_QSPI_CCR_ADSIZE_SHIFT 12
+#define STM32_QSPI_CCR_ADMODE_SHIFT 10
+#define STM32_QSPI_CCR_IMODE_SHIFT 8
+
+#define STM32_QSPI_CCR_IND_WRITE 0
+#define STM32_QSPI_CCR_IND_READ 1
+#define STM32_QSPI_CCR_MEM_MAP 3
+
+#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
+#define STM32_QSPI_MAX_CHIP 2
+
+#define STM32_QSPI_FIFO_TIMEOUT_US 30000
+#define STM32_QSPI_CMD_TIMEOUT_US 1000000
+#define STM32_BUSY_TIMEOUT_US 100000
+#define STM32_ABT_TIMEOUT_US 100000
+
+struct stm32_qspi_flash {
+ u32 cr;
+ u32 dcr;
+ bool initialized;
+};
+
+struct stm32_qspi_priv {
+ struct stm32_qspi_regs *regs;
+ struct stm32_qspi_flash flash[STM32_QSPI_MAX_CHIP];
+ void __iomem *mm_base;
+ resource_size_t mm_size;
+ ulong clock_rate;
+ int cs_used;
+};
+
+static int _stm32_qspi_wait_for_not_busy(struct stm32_qspi_priv *priv)
+{
+ u32 sr;
+ int ret;
+
+ ret = readl_poll_timeout(&priv->regs->sr, sr,
+ !(sr & STM32_QSPI_SR_BUSY),
+ STM32_BUSY_TIMEOUT_US);
+ if (ret)
+ log_err("busy timeout (stat:%#x)\n", sr);
+
+ return ret;
+}
+
+static int _stm32_qspi_wait_cmd(struct stm32_qspi_priv *priv,
+ const struct spi_mem_op *op)
+{
+ u32 sr;
+ int ret;
+
+ if (!op->data.nbytes)
+ return _stm32_qspi_wait_for_not_busy(priv);
+
+ ret = readl_poll_timeout(&priv->regs->sr, sr,
+ sr & STM32_QSPI_SR_TCF,
+ STM32_QSPI_CMD_TIMEOUT_US);
+ if (ret) {
+ log_err("cmd timeout (stat:%#x)\n", sr);
+ } else if (readl(&priv->regs->sr) & STM32_QSPI_SR_TEF) {
+ log_err("transfer error (stat:%#x)\n", sr);
+ ret = -EIO;
+ }
+
+ /* clear flags */
+ writel(STM32_QSPI_FCR_CTCF | STM32_QSPI_FCR_CTEF, &priv->regs->fcr);
+
+ return ret;
+}
+
+static void _stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
+{
+ *val = readb(addr);
+ WATCHDOG_RESET();
+}
+
+static void _stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
+{
+ writeb(*val, addr);
+}
+
+static int _stm32_qspi_poll(struct stm32_qspi_priv *priv,
+ const struct spi_mem_op *op)
+{
+ void (*fifo)(u8 *val, void __iomem *addr);
+ u32 len = op->data.nbytes, sr;
+ u8 *buf;
+ int ret;
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
+ fifo = _stm32_qspi_read_fifo;
+ buf = op->data.buf.in;
+
+ } else {
+ fifo = _stm32_qspi_write_fifo;
+ buf = (u8 *)op->data.buf.out;
+ }
+
+ while (len--) {
+ ret = readl_poll_timeout(&priv->regs->sr, sr,
+ sr & STM32_QSPI_SR_FTF,
+ STM32_QSPI_FIFO_TIMEOUT_US);
+ if (ret) {
+ log_err("fifo timeout (len:%d stat:%#x)\n", len, sr);
+ return ret;
+ }
+
+ fifo(buf++, &priv->regs->dr);
+ }
+
+ return 0;
+}
+
+static int stm32_qspi_mm(struct stm32_qspi_priv *priv,
+ const struct spi_mem_op *op)
+{
+ memcpy_fromio(op->data.buf.in, priv->mm_base + op->addr.val,
+ op->data.nbytes);
+
+ return 0;
+}
+
+static int _stm32_qspi_tx(struct stm32_qspi_priv *priv,
+ const struct spi_mem_op *op,
+ u8 mode)
+{
+ if (!op->data.nbytes)
+ return 0;
+
+ if (mode == STM32_QSPI_CCR_MEM_MAP)
+ return stm32_qspi_mm(priv, op);
+
+ return _stm32_qspi_poll(priv, op);
+}
+
+static int _stm32_qspi_get_mode(u8 buswidth)
+{
+ if (buswidth == 4)
+ return 3;
+
+ return buswidth;
+}
+
+static int stm32_qspi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct stm32_qspi_priv *priv = dev_get_priv(slave->dev->parent);
+ u32 cr, ccr, addr_max;
+ u8 mode = STM32_QSPI_CCR_IND_WRITE;
+ int timeout, ret;
+
+ dev_dbg(slave->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->dummy.buswidth, op->data.buswidth,
+ op->addr.val, op->data.nbytes);
+
+ ret = _stm32_qspi_wait_for_not_busy(priv);
+ if (ret)
+ return ret;
+
+ addr_max = op->addr.val + op->data.nbytes + 1;
+
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) {
+ if (addr_max < priv->mm_size && op->addr.buswidth)
+ mode = STM32_QSPI_CCR_MEM_MAP;
+ else
+ mode = STM32_QSPI_CCR_IND_READ;
+ }
+
+ if (op->data.nbytes)
+ writel(op->data.nbytes - 1, &priv->regs->dlr);
+
+ ccr = (mode << STM32_QSPI_CCR_FMODE_SHIFT);
+ ccr |= op->cmd.opcode;
+ ccr |= (_stm32_qspi_get_mode(op->cmd.buswidth)
+ << STM32_QSPI_CCR_IMODE_SHIFT);
+
+ if (op->addr.nbytes) {
+ ccr |= ((op->addr.nbytes - 1) << STM32_QSPI_CCR_ADSIZE_SHIFT);
+ ccr |= (_stm32_qspi_get_mode(op->addr.buswidth)
+ << STM32_QSPI_CCR_ADMODE_SHIFT);
+ }
+
+ if (op->dummy.buswidth && op->dummy.nbytes)
+ ccr |= (op->dummy.nbytes * 8 / op->dummy.buswidth
+ << STM32_QSPI_CCR_DCYC_SHIFT);
+
+ if (op->data.nbytes)
+ ccr |= (_stm32_qspi_get_mode(op->data.buswidth)
+ << STM32_QSPI_CCR_DMODE_SHIFT);
+
+ writel(ccr, &priv->regs->ccr);
+
+ if (op->addr.nbytes && mode != STM32_QSPI_CCR_MEM_MAP)
+ writel(op->addr.val, &priv->regs->ar);
+
+ ret = _stm32_qspi_tx(priv, op, mode);
+ /*
+ * Abort in:
+ * -error case
+ * -read memory map: prefetching must be stopped if we read the last
+ * byte of device (device size - fifo size). like device size is not
+ * knows, the prefetching is always stop.
+ */
+ if (ret || mode == STM32_QSPI_CCR_MEM_MAP)
+ goto abort;
+
+ /* Wait end of tx in indirect mode */
+ ret = _stm32_qspi_wait_cmd(priv, op);
+ if (ret)
+ goto abort;
+
+ return 0;
+
+abort:
+ setbits_le32(&priv->regs->cr, STM32_QSPI_CR_ABORT);
+
+ /* Wait clear of abort bit by hw */
+ timeout = readl_poll_timeout(&priv->regs->cr, cr,
+ !(cr & STM32_QSPI_CR_ABORT),
+ STM32_ABT_TIMEOUT_US);
+
+ writel(STM32_QSPI_FCR_CTCF, &priv->regs->fcr);
+
+ if (ret || timeout)
+ dev_err(slave->dev, "ret:%d abort timeout:%d\n", ret, timeout);
+
+ return ret;
+}
+
+static int stm32_qspi_probe(struct udevice *bus)
+{
+ struct stm32_qspi_priv *priv = dev_get_priv(bus);
+ struct resource res;
+ struct clk clk;
+ struct reset_ctl reset_ctl;
+ int ret;
+
+ ret = dev_read_resource_byname(bus, "qspi", &res);
+ if (ret) {
+ dev_err(bus, "can't get regs base addresses(ret = %d)!\n", ret);
+ return ret;
+ }
+
+ priv->regs = (struct stm32_qspi_regs *)res.start;
+
+ ret = dev_read_resource_byname(bus, "qspi_mm", &res);
+ if (ret) {
+ dev_err(bus, "can't get mmap base address(ret = %d)!\n", ret);
+ return ret;
+ }
+
+ priv->mm_base = (void __iomem *)res.start;
+
+ priv->mm_size = resource_size(&res);
+ if (priv->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+ return -EINVAL;
+
+ dev_dbg(bus, "regs=<0x%p> mapped=<0x%p> mapped_size=<0x%lx>\n",
+ priv->regs, priv->mm_base, priv->mm_size);
+
+ ret = clk_get_by_index(bus, 0, &clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(&clk);
+ if (ret) {
+ dev_err(bus, "failed to enable clock\n");
+ return ret;
+ }
+
+ priv->clock_rate = clk_get_rate(&clk);
+ if (!priv->clock_rate) {
+ clk_disable(&clk);
+ return -EINVAL;
+ }
+
+ ret = reset_get_by_index(bus, 0, &reset_ctl);
+ if (ret) {
+ if (ret != -ENOENT) {
+ dev_err(bus, "failed to get reset\n");
+ clk_disable(&clk);
+ return ret;
+ }
+ } else {
+ /* Reset QSPI controller */
+ reset_assert(&reset_ctl);
+ udelay(2);
+ reset_deassert(&reset_ctl);
+ }
+
+ priv->cs_used = -1;
+
+ setbits_le32(&priv->regs->cr, STM32_QSPI_CR_SSHIFT);
+
+ /* Set dcr fsize to max address */
+ setbits_le32(&priv->regs->dcr,
+ STM32_QSPI_DCR_FSIZE_MASK << STM32_QSPI_DCR_FSIZE_SHIFT);
+
+ return 0;
+}
+
+static int stm32_qspi_claim_bus(struct udevice *dev)
+{
+ struct stm32_qspi_priv *priv = dev_get_priv(dev->parent);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ int slave_cs = slave_plat->cs;
+
+ if (slave_cs >= STM32_QSPI_MAX_CHIP)
+ return -ENODEV;
+
+ if (priv->cs_used != slave_cs) {
+ struct stm32_qspi_flash *flash = &priv->flash[slave_cs];
+
+ priv->cs_used = slave_cs;
+
+ if (flash->initialized) {
+ /* Set the configuration: speed + cs */
+ writel(flash->cr, &priv->regs->cr);
+ writel(flash->dcr, &priv->regs->dcr);
+ } else {
+ /* Set chip select */
+ clrsetbits_le32(&priv->regs->cr, STM32_QSPI_CR_FSEL,
+ priv->cs_used ? STM32_QSPI_CR_FSEL : 0);
+
+ /* Save the configuration: speed + cs */
+ flash->cr = readl(&priv->regs->cr);
+ flash->dcr = readl(&priv->regs->dcr);
+
+ flash->initialized = true;
+ }
+ }
+
+ setbits_le32(&priv->regs->cr, STM32_QSPI_CR_EN);
+
+ return 0;
+}
+
+static int stm32_qspi_release_bus(struct udevice *dev)
+{
+ struct stm32_qspi_priv *priv = dev_get_priv(dev->parent);
+
+ clrbits_le32(&priv->regs->cr, STM32_QSPI_CR_EN);
+
+ return 0;
+}
+
+static int stm32_qspi_set_speed(struct udevice *bus, uint speed)
+{
+ struct stm32_qspi_priv *priv = dev_get_priv(bus);
+ u32 qspi_clk = priv->clock_rate;
+ u32 prescaler = 255;
+ u32 csht;
+ int ret;
+
+ if (speed > 0) {
+ prescaler = 0;
+ if (qspi_clk) {
+ prescaler = DIV_ROUND_UP(qspi_clk, speed) - 1;
+ if (prescaler > 255)
+ prescaler = 255;
+ }
+ }
+
+ csht = DIV_ROUND_UP((5 * qspi_clk) / (prescaler + 1), 100000000);
+ csht = (csht - 1) & STM32_QSPI_DCR_CSHT_MASK;
+
+ ret = _stm32_qspi_wait_for_not_busy(priv);
+ if (ret)
+ return ret;
+
+ clrsetbits_le32(&priv->regs->cr,
+ STM32_QSPI_CR_PRESCALER_MASK <<
+ STM32_QSPI_CR_PRESCALER_SHIFT,
+ prescaler << STM32_QSPI_CR_PRESCALER_SHIFT);
+
+ clrsetbits_le32(&priv->regs->dcr,
+ STM32_QSPI_DCR_CSHT_MASK << STM32_QSPI_DCR_CSHT_SHIFT,
+ csht << STM32_QSPI_DCR_CSHT_SHIFT);
+
+ dev_dbg(bus, "regs=%p, speed=%d\n", priv->regs,
+ (qspi_clk / (prescaler + 1)));
+
+ return 0;
+}
+
+static int stm32_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct stm32_qspi_priv *priv = dev_get_priv(bus);
+ int ret;
+ const char *str_rx, *str_tx;
+
+ ret = _stm32_qspi_wait_for_not_busy(priv);
+ if (ret)
+ return ret;
+
+ if ((mode & SPI_CPHA) && (mode & SPI_CPOL))
+ setbits_le32(&priv->regs->dcr, STM32_QSPI_DCR_CKMODE);
+ else if (!(mode & SPI_CPHA) && !(mode & SPI_CPOL))
+ clrbits_le32(&priv->regs->dcr, STM32_QSPI_DCR_CKMODE);
+ else
+ return -ENODEV;
+
+ if (mode & SPI_CS_HIGH)
+ return -ENODEV;
+
+ if (mode & SPI_RX_QUAD)
+ str_rx = "quad";
+ else if (mode & SPI_RX_DUAL)
+ str_rx = "dual";
+ else
+ str_rx = "single";
+
+ if (mode & SPI_TX_QUAD)
+ str_tx = "quad";
+ else if (mode & SPI_TX_DUAL)
+ str_tx = "dual";
+ else
+ str_tx = "single";
+
+ dev_dbg(bus, "regs=%p, mode=%d rx: %s, tx: %s\n",
+ priv->regs, mode, str_rx, str_tx);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
+ .exec_op = stm32_qspi_exec_op,
+};
+
+static const struct dm_spi_ops stm32_qspi_ops = {
+ .claim_bus = stm32_qspi_claim_bus,
+ .release_bus = stm32_qspi_release_bus,
+ .set_speed = stm32_qspi_set_speed,
+ .set_mode = stm32_qspi_set_mode,
+ .mem_ops = &stm32_qspi_mem_ops,
+};
+
+static const struct udevice_id stm32_qspi_ids[] = {
+ { .compatible = "st,stm32f469-qspi" },
+ { }
+};
+
+U_BOOT_DRIVER(stm32_qspi) = {
+ .name = "stm32_qspi",
+ .id = UCLASS_SPI,
+ .of_match = stm32_qspi_ids,
+ .ops = &stm32_qspi_ops,
+ .priv_auto = sizeof(struct stm32_qspi_priv),
+ .probe = stm32_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/stm32_spi.c b/roms/u-boot/drivers/spi/stm32_spi.c
new file mode 100644
index 000000000..bd8514033
--- /dev/null
+++ b/roms/u-boot/drivers/spi/stm32_spi.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
+/*
+ * Copyright (C) 2019, STMicroelectronics - All Rights Reserved
+ *
+ * Driver for STMicroelectronics Serial peripheral interface (SPI)
+ */
+
+#define LOG_CATEGORY UCLASS_SPI
+
+#include <common.h>
+#include <clk.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <reset.h>
+#include <spi.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+
+/* STM32 SPI registers */
+#define STM32_SPI_CR1 0x00
+#define STM32_SPI_CR2 0x04
+#define STM32_SPI_CFG1 0x08
+#define STM32_SPI_CFG2 0x0C
+#define STM32_SPI_SR 0x14
+#define STM32_SPI_IFCR 0x18
+#define STM32_SPI_TXDR 0x20
+#define STM32_SPI_RXDR 0x30
+#define STM32_SPI_I2SCFGR 0x50
+
+/* STM32_SPI_CR1 bit fields */
+#define SPI_CR1_SPE BIT(0)
+#define SPI_CR1_MASRX BIT(8)
+#define SPI_CR1_CSTART BIT(9)
+#define SPI_CR1_CSUSP BIT(10)
+#define SPI_CR1_HDDIR BIT(11)
+#define SPI_CR1_SSI BIT(12)
+
+/* STM32_SPI_CR2 bit fields */
+#define SPI_CR2_TSIZE GENMASK(15, 0)
+
+/* STM32_SPI_CFG1 bit fields */
+#define SPI_CFG1_DSIZE GENMASK(4, 0)
+#define SPI_CFG1_DSIZE_MIN 3
+#define SPI_CFG1_FTHLV_SHIFT 5
+#define SPI_CFG1_FTHLV GENMASK(8, 5)
+#define SPI_CFG1_MBR_SHIFT 28
+#define SPI_CFG1_MBR GENMASK(30, 28)
+#define SPI_CFG1_MBR_MIN 0
+#define SPI_CFG1_MBR_MAX FIELD_GET(SPI_CFG1_MBR, SPI_CFG1_MBR)
+
+/* STM32_SPI_CFG2 bit fields */
+#define SPI_CFG2_COMM_SHIFT 17
+#define SPI_CFG2_COMM GENMASK(18, 17)
+#define SPI_CFG2_MASTER BIT(22)
+#define SPI_CFG2_LSBFRST BIT(23)
+#define SPI_CFG2_CPHA BIT(24)
+#define SPI_CFG2_CPOL BIT(25)
+#define SPI_CFG2_SSM BIT(26)
+#define SPI_CFG2_AFCNTR BIT(31)
+
+/* STM32_SPI_SR bit fields */
+#define SPI_SR_RXP BIT(0)
+#define SPI_SR_TXP BIT(1)
+#define SPI_SR_EOT BIT(3)
+#define SPI_SR_TXTF BIT(4)
+#define SPI_SR_OVR BIT(6)
+#define SPI_SR_SUSP BIT(11)
+#define SPI_SR_RXPLVL_SHIFT 13
+#define SPI_SR_RXPLVL GENMASK(14, 13)
+#define SPI_SR_RXWNE BIT(15)
+
+/* STM32_SPI_IFCR bit fields */
+#define SPI_IFCR_ALL GENMASK(11, 3)
+
+/* STM32_SPI_I2SCFGR bit fields */
+#define SPI_I2SCFGR_I2SMOD BIT(0)
+
+#define MAX_CS_COUNT 4
+
+/* SPI Master Baud Rate min/max divisor */
+#define STM32_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN)
+#define STM32_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX)
+
+#define STM32_SPI_TIMEOUT_US 100000
+
+/* SPI Communication mode */
+#define SPI_FULL_DUPLEX 0
+#define SPI_SIMPLEX_TX 1
+#define SPI_SIMPLEX_RX 2
+#define SPI_HALF_DUPLEX 3
+
+struct stm32_spi_priv {
+ void __iomem *base;
+ struct clk clk;
+ struct reset_ctl rst_ctl;
+ struct gpio_desc cs_gpios[MAX_CS_COUNT];
+ ulong bus_clk_rate;
+ unsigned int fifo_size;
+ unsigned int cur_bpw;
+ unsigned int cur_hz;
+ unsigned int cur_xferlen; /* current transfer length in bytes */
+ unsigned int tx_len; /* number of data to be written in bytes */
+ unsigned int rx_len; /* number of data to be read in bytes */
+ const void *tx_buf; /* data to be written, or NULL */
+ void *rx_buf; /* data to be read, or NULL */
+ u32 cur_mode;
+ bool cs_high;
+};
+
+static void stm32_spi_write_txfifo(struct stm32_spi_priv *priv)
+{
+ while ((priv->tx_len > 0) &&
+ (readl(priv->base + STM32_SPI_SR) & SPI_SR_TXP)) {
+ u32 offs = priv->cur_xferlen - priv->tx_len;
+
+ if (priv->tx_len >= sizeof(u32) &&
+ IS_ALIGNED((uintptr_t)(priv->tx_buf + offs), sizeof(u32))) {
+ const u32 *tx_buf32 = (const u32 *)(priv->tx_buf + offs);
+
+ writel(*tx_buf32, priv->base + STM32_SPI_TXDR);
+ priv->tx_len -= sizeof(u32);
+ } else if (priv->tx_len >= sizeof(u16) &&
+ IS_ALIGNED((uintptr_t)(priv->tx_buf + offs), sizeof(u16))) {
+ const u16 *tx_buf16 = (const u16 *)(priv->tx_buf + offs);
+
+ writew(*tx_buf16, priv->base + STM32_SPI_TXDR);
+ priv->tx_len -= sizeof(u16);
+ } else {
+ const u8 *tx_buf8 = (const u8 *)(priv->tx_buf + offs);
+
+ writeb(*tx_buf8, priv->base + STM32_SPI_TXDR);
+ priv->tx_len -= sizeof(u8);
+ }
+ }
+
+ log_debug("%d bytes left\n", priv->tx_len);
+}
+
+static void stm32_spi_read_rxfifo(struct stm32_spi_priv *priv)
+{
+ u32 sr = readl(priv->base + STM32_SPI_SR);
+ u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+
+ while ((priv->rx_len > 0) &&
+ ((sr & SPI_SR_RXP) ||
+ ((sr & SPI_SR_EOT) && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
+ u32 offs = priv->cur_xferlen - priv->rx_len;
+
+ if (IS_ALIGNED((uintptr_t)(priv->rx_buf + offs), sizeof(u32)) &&
+ (priv->rx_len >= sizeof(u32) || (sr & SPI_SR_RXWNE))) {
+ u32 *rx_buf32 = (u32 *)(priv->rx_buf + offs);
+
+ *rx_buf32 = readl(priv->base + STM32_SPI_RXDR);
+ priv->rx_len -= sizeof(u32);
+ } else if (IS_ALIGNED((uintptr_t)(priv->rx_buf + offs), sizeof(u16)) &&
+ (priv->rx_len >= sizeof(u16) ||
+ (!(sr & SPI_SR_RXWNE) &&
+ (rxplvl >= 2 || priv->cur_bpw > 8)))) {
+ u16 *rx_buf16 = (u16 *)(priv->rx_buf + offs);
+
+ *rx_buf16 = readw(priv->base + STM32_SPI_RXDR);
+ priv->rx_len -= sizeof(u16);
+ } else {
+ u8 *rx_buf8 = (u8 *)(priv->rx_buf + offs);
+
+ *rx_buf8 = readb(priv->base + STM32_SPI_RXDR);
+ priv->rx_len -= sizeof(u8);
+ }
+
+ sr = readl(priv->base + STM32_SPI_SR);
+ rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
+ }
+
+ log_debug("%d bytes left\n", priv->rx_len);
+}
+
+static int stm32_spi_enable(struct stm32_spi_priv *priv)
+{
+ log_debug("\n");
+
+ /* Enable the SPI hardware */
+ setbits_le32(priv->base + STM32_SPI_CR1, SPI_CR1_SPE);
+
+ return 0;
+}
+
+static int stm32_spi_disable(struct stm32_spi_priv *priv)
+{
+ log_debug("\n");
+
+ /* Disable the SPI hardware */
+ clrbits_le32(priv->base + STM32_SPI_CR1, SPI_CR1_SPE);
+
+ return 0;
+}
+
+static int stm32_spi_claim_bus(struct udevice *slave)
+{
+ struct udevice *bus = dev_get_parent(slave);
+ struct stm32_spi_priv *priv = dev_get_priv(bus);
+
+ dev_dbg(slave, "\n");
+
+ /* Enable the SPI hardware */
+ return stm32_spi_enable(priv);
+}
+
+static int stm32_spi_release_bus(struct udevice *slave)
+{
+ struct udevice *bus = dev_get_parent(slave);
+ struct stm32_spi_priv *priv = dev_get_priv(bus);
+
+ dev_dbg(slave, "\n");
+
+ /* Disable the SPI hardware */
+ return stm32_spi_disable(priv);
+}
+
+static void stm32_spi_stopxfer(struct udevice *dev)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(dev);
+ u32 cr1, sr;
+ int ret;
+
+ dev_dbg(dev, "\n");
+
+ cr1 = readl(priv->base + STM32_SPI_CR1);
+
+ if (!(cr1 & SPI_CR1_SPE))
+ return;
+
+ /* Wait on EOT or suspend the flow */
+ ret = readl_poll_timeout(priv->base + STM32_SPI_SR, sr,
+ !(sr & SPI_SR_EOT), 100000);
+ if (ret < 0) {
+ if (cr1 & SPI_CR1_CSTART) {
+ writel(cr1 | SPI_CR1_CSUSP, priv->base + STM32_SPI_CR1);
+ if (readl_poll_timeout(priv->base + STM32_SPI_SR,
+ sr, !(sr & SPI_SR_SUSP),
+ 100000) < 0)
+ dev_err(dev, "Suspend request timeout\n");
+ }
+ }
+
+ /* clear status flags */
+ setbits_le32(priv->base + STM32_SPI_IFCR, SPI_IFCR_ALL);
+}
+
+static int stm32_spi_set_cs(struct udevice *dev, unsigned int cs, bool enable)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(dev);
+
+ dev_dbg(dev, "cs=%d enable=%d\n", cs, enable);
+
+ if (cs >= MAX_CS_COUNT)
+ return -ENODEV;
+
+ if (!dm_gpio_is_valid(&priv->cs_gpios[cs]))
+ return -EINVAL;
+
+ if (priv->cs_high)
+ enable = !enable;
+
+ return dm_gpio_set_value(&priv->cs_gpios[cs], enable ? 1 : 0);
+}
+
+static int stm32_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(bus);
+ u32 cfg2_clrb = 0, cfg2_setb = 0;
+
+ dev_dbg(bus, "mode=%d\n", mode);
+
+ if (mode & SPI_CPOL)
+ cfg2_setb |= SPI_CFG2_CPOL;
+ else
+ cfg2_clrb |= SPI_CFG2_CPOL;
+
+ if (mode & SPI_CPHA)
+ cfg2_setb |= SPI_CFG2_CPHA;
+ else
+ cfg2_clrb |= SPI_CFG2_CPHA;
+
+ if (mode & SPI_LSB_FIRST)
+ cfg2_setb |= SPI_CFG2_LSBFRST;
+ else
+ cfg2_clrb |= SPI_CFG2_LSBFRST;
+
+ if (cfg2_clrb || cfg2_setb)
+ clrsetbits_le32(priv->base + STM32_SPI_CFG2,
+ cfg2_clrb, cfg2_setb);
+
+ if (mode & SPI_CS_HIGH)
+ priv->cs_high = true;
+ else
+ priv->cs_high = false;
+ return 0;
+}
+
+static int stm32_spi_set_fthlv(struct udevice *dev, u32 xfer_len)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(dev);
+ u32 fthlv, half_fifo;
+
+ /* data packet should not exceed 1/2 of fifo space */
+ half_fifo = (priv->fifo_size / 2);
+
+ /* data_packet should not exceed transfer length */
+ fthlv = (half_fifo > xfer_len) ? xfer_len : half_fifo;
+
+ /* align packet size with data registers access */
+ fthlv -= (fthlv % 4);
+
+ if (!fthlv)
+ fthlv = 1;
+ clrsetbits_le32(priv->base + STM32_SPI_CFG1, SPI_CFG1_FTHLV,
+ (fthlv - 1) << SPI_CFG1_FTHLV_SHIFT);
+
+ return 0;
+}
+
+static int stm32_spi_set_speed(struct udevice *bus, uint hz)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(bus);
+ u32 mbrdiv;
+ long div;
+
+ dev_dbg(bus, "hz=%d\n", hz);
+
+ if (priv->cur_hz == hz)
+ return 0;
+
+ div = DIV_ROUND_UP(priv->bus_clk_rate, hz);
+
+ if (div < STM32_MBR_DIV_MIN ||
+ div > STM32_MBR_DIV_MAX)
+ return -EINVAL;
+
+ /* Determine the first power of 2 greater than or equal to div */
+ if (div & (div - 1))
+ mbrdiv = fls(div);
+ else
+ mbrdiv = fls(div) - 1;
+
+ if (!mbrdiv)
+ return -EINVAL;
+
+ clrsetbits_le32(priv->base + STM32_SPI_CFG1, SPI_CFG1_MBR,
+ (mbrdiv - 1) << SPI_CFG1_MBR_SHIFT);
+
+ priv->cur_hz = hz;
+
+ return 0;
+}
+
+static int stm32_spi_xfer(struct udevice *slave, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(slave);
+ struct dm_spi_slave_plat *slave_plat;
+ struct stm32_spi_priv *priv = dev_get_priv(bus);
+ u32 sr;
+ u32 ifcr = 0;
+ u32 xferlen;
+ u32 mode;
+ int xfer_status = 0;
+
+ xferlen = bitlen / 8;
+
+ if (xferlen <= SPI_CR2_TSIZE)
+ writel(xferlen, priv->base + STM32_SPI_CR2);
+ else
+ return -EMSGSIZE;
+
+ priv->tx_buf = dout;
+ priv->rx_buf = din;
+ priv->tx_len = priv->tx_buf ? bitlen / 8 : 0;
+ priv->rx_len = priv->rx_buf ? bitlen / 8 : 0;
+
+ mode = SPI_FULL_DUPLEX;
+ if (!priv->tx_buf)
+ mode = SPI_SIMPLEX_RX;
+ else if (!priv->rx_buf)
+ mode = SPI_SIMPLEX_TX;
+
+ if (priv->cur_xferlen != xferlen || priv->cur_mode != mode) {
+ priv->cur_mode = mode;
+ priv->cur_xferlen = xferlen;
+
+ /* Disable the SPI hardware to unlock CFG1/CFG2 registers */
+ stm32_spi_disable(priv);
+
+ clrsetbits_le32(priv->base + STM32_SPI_CFG2, SPI_CFG2_COMM,
+ mode << SPI_CFG2_COMM_SHIFT);
+
+ stm32_spi_set_fthlv(bus, xferlen);
+
+ /* Enable the SPI hardware */
+ stm32_spi_enable(priv);
+ }
+
+ dev_dbg(bus, "priv->tx_len=%d priv->rx_len=%d\n",
+ priv->tx_len, priv->rx_len);
+
+ slave_plat = dev_get_parent_plat(slave);
+ if (flags & SPI_XFER_BEGIN)
+ stm32_spi_set_cs(bus, slave_plat->cs, false);
+
+ /* Be sure to have data in fifo before starting data transfer */
+ if (priv->tx_buf)
+ stm32_spi_write_txfifo(priv);
+
+ setbits_le32(priv->base + STM32_SPI_CR1, SPI_CR1_CSTART);
+
+ while (1) {
+ sr = readl(priv->base + STM32_SPI_SR);
+
+ if (sr & SPI_SR_OVR) {
+ dev_err(bus, "Overrun: RX data lost\n");
+ xfer_status = -EIO;
+ break;
+ }
+
+ if (sr & SPI_SR_SUSP) {
+ dev_warn(bus, "System too slow is limiting data throughput\n");
+
+ if (priv->rx_buf && priv->rx_len > 0)
+ stm32_spi_read_rxfifo(priv);
+
+ ifcr |= SPI_SR_SUSP;
+ }
+
+ if (sr & SPI_SR_TXTF)
+ ifcr |= SPI_SR_TXTF;
+
+ if (sr & SPI_SR_TXP)
+ if (priv->tx_buf && priv->tx_len > 0)
+ stm32_spi_write_txfifo(priv);
+
+ if (sr & SPI_SR_RXP)
+ if (priv->rx_buf && priv->rx_len > 0)
+ stm32_spi_read_rxfifo(priv);
+
+ if (sr & SPI_SR_EOT) {
+ if (priv->rx_buf && priv->rx_len > 0)
+ stm32_spi_read_rxfifo(priv);
+ break;
+ }
+
+ writel(ifcr, priv->base + STM32_SPI_IFCR);
+ }
+
+ /* clear status flags */
+ setbits_le32(priv->base + STM32_SPI_IFCR, SPI_IFCR_ALL);
+ stm32_spi_stopxfer(bus);
+
+ if (flags & SPI_XFER_END)
+ stm32_spi_set_cs(bus, slave_plat->cs, true);
+
+ return xfer_status;
+}
+
+static int stm32_spi_get_fifo_size(struct udevice *dev)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(dev);
+ u32 count = 0;
+
+ stm32_spi_enable(priv);
+
+ while (readl(priv->base + STM32_SPI_SR) & SPI_SR_TXP)
+ writeb(++count, priv->base + STM32_SPI_TXDR);
+
+ stm32_spi_disable(priv);
+
+ dev_dbg(dev, "%d x 8-bit fifo size\n", count);
+
+ return count;
+}
+
+static int stm32_spi_probe(struct udevice *dev)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(dev);
+ unsigned long clk_rate;
+ int ret;
+ unsigned int i;
+
+ priv->base = dev_remap_addr(dev);
+ if (!priv->base)
+ return -EINVAL;
+
+ /* enable clock */
+ ret = clk_get_by_index(dev, 0, &priv->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(&priv->clk);
+ if (ret < 0)
+ return ret;
+
+ clk_rate = clk_get_rate(&priv->clk);
+ if (!clk_rate) {
+ ret = -EINVAL;
+ goto clk_err;
+ }
+
+ priv->bus_clk_rate = clk_rate;
+
+ /* perform reset */
+ ret = reset_get_by_index(dev, 0, &priv->rst_ctl);
+ if (ret < 0)
+ goto clk_err;
+
+ reset_assert(&priv->rst_ctl);
+ udelay(2);
+ reset_deassert(&priv->rst_ctl);
+
+ ret = gpio_request_list_by_name(dev, "cs-gpios", priv->cs_gpios,
+ ARRAY_SIZE(priv->cs_gpios), 0);
+ if (ret < 0) {
+ dev_err(dev, "Can't get cs gpios: %d", ret);
+ goto reset_err;
+ }
+
+ priv->fifo_size = stm32_spi_get_fifo_size(dev);
+
+ priv->cur_mode = SPI_FULL_DUPLEX;
+ priv->cur_xferlen = 0;
+ priv->cur_bpw = SPI_DEFAULT_WORDLEN;
+ clrsetbits_le32(priv->base + STM32_SPI_CFG1, SPI_CFG1_DSIZE,
+ priv->cur_bpw - 1);
+
+ for (i = 0; i < ARRAY_SIZE(priv->cs_gpios); i++) {
+ if (!dm_gpio_is_valid(&priv->cs_gpios[i]))
+ continue;
+
+ dm_gpio_set_dir_flags(&priv->cs_gpios[i],
+ GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
+ }
+
+ /* Ensure I2SMOD bit is kept cleared */
+ clrbits_le32(priv->base + STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
+
+ /*
+ * - SS input value high
+ * - transmitter half duplex direction
+ * - automatic communication suspend when RX-Fifo is full
+ */
+ setbits_le32(priv->base + STM32_SPI_CR1,
+ SPI_CR1_SSI | SPI_CR1_HDDIR | SPI_CR1_MASRX);
+
+ /*
+ * - Set the master mode (default Motorola mode)
+ * - Consider 1 master/n slaves configuration and
+ * SS input value is determined by the SSI bit
+ * - keep control of all associated GPIOs
+ */
+ setbits_le32(priv->base + STM32_SPI_CFG2,
+ SPI_CFG2_MASTER | SPI_CFG2_SSM | SPI_CFG2_AFCNTR);
+
+ return 0;
+
+reset_err:
+ reset_free(&priv->rst_ctl);
+
+clk_err:
+ clk_disable(&priv->clk);
+ clk_free(&priv->clk);
+
+ return ret;
+};
+
+static int stm32_spi_remove(struct udevice *dev)
+{
+ struct stm32_spi_priv *priv = dev_get_priv(dev);
+ int ret;
+
+ stm32_spi_stopxfer(dev);
+ stm32_spi_disable(priv);
+
+ ret = reset_assert(&priv->rst_ctl);
+ if (ret < 0)
+ return ret;
+
+ reset_free(&priv->rst_ctl);
+
+ ret = clk_disable(&priv->clk);
+ if (ret < 0)
+ return ret;
+
+ clk_free(&priv->clk);
+
+ return ret;
+};
+
+static const struct dm_spi_ops stm32_spi_ops = {
+ .claim_bus = stm32_spi_claim_bus,
+ .release_bus = stm32_spi_release_bus,
+ .set_mode = stm32_spi_set_mode,
+ .set_speed = stm32_spi_set_speed,
+ .xfer = stm32_spi_xfer,
+};
+
+static const struct udevice_id stm32_spi_ids[] = {
+ { .compatible = "st,stm32h7-spi", },
+ { }
+};
+
+U_BOOT_DRIVER(stm32_spi) = {
+ .name = "stm32_spi",
+ .id = UCLASS_SPI,
+ .of_match = stm32_spi_ids,
+ .ops = &stm32_spi_ops,
+ .priv_auto = sizeof(struct stm32_spi_priv),
+ .probe = stm32_spi_probe,
+ .remove = stm32_spi_remove,
+};
diff --git a/roms/u-boot/drivers/spi/tegra114_spi.c b/roms/u-boot/drivers/spi/tegra114_spi.c
new file mode 100644
index 000000000..f0256d8e6
--- /dev/null
+++ b/roms/u-boot/drivers/spi/tegra114_spi.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVIDIA Tegra SPI controller (T114 and later)
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <time.h>
+#include <asm/io.h>
+#include <asm/arch/clock.h>
+#include <asm/arch-tegra/clk_rst.h>
+#include <spi.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "tegra_spi.h"
+
+/* COMMAND1 */
+#define SPI_CMD1_GO BIT(31)
+#define SPI_CMD1_M_S BIT(30)
+#define SPI_CMD1_MODE_MASK GENMASK(1, 0)
+#define SPI_CMD1_MODE_SHIFT 28
+#define SPI_CMD1_CS_SEL_MASK GENMASK(1, 0)
+#define SPI_CMD1_CS_SEL_SHIFT 26
+#define SPI_CMD1_CS_POL_INACTIVE3 BIT(25)
+#define SPI_CMD1_CS_POL_INACTIVE2 BIT(24)
+#define SPI_CMD1_CS_POL_INACTIVE1 BIT(23)
+#define SPI_CMD1_CS_POL_INACTIVE0 BIT(22)
+#define SPI_CMD1_CS_SW_HW BIT(21)
+#define SPI_CMD1_CS_SW_VAL BIT(20)
+#define SPI_CMD1_IDLE_SDA_MASK GENMASK(1, 0)
+#define SPI_CMD1_IDLE_SDA_SHIFT 18
+#define SPI_CMD1_BIDIR BIT(17)
+#define SPI_CMD1_LSBI_FE BIT(16)
+#define SPI_CMD1_LSBY_FE BIT(15)
+#define SPI_CMD1_BOTH_EN_BIT BIT(14)
+#define SPI_CMD1_BOTH_EN_BYTE BIT(13)
+#define SPI_CMD1_RX_EN BIT(12)
+#define SPI_CMD1_TX_EN BIT(11)
+#define SPI_CMD1_PACKED BIT(5)
+#define SPI_CMD1_BIT_LEN_MASK GENMASK(4, 0)
+#define SPI_CMD1_BIT_LEN_SHIFT 0
+
+/* COMMAND2 */
+#define SPI_CMD2_TX_CLK_TAP_DELAY BIT(6)
+#define SPI_CMD2_TX_CLK_TAP_DELAY_MASK GENMASK(11, 6)
+#define SPI_CMD2_RX_CLK_TAP_DELAY BIT(0)
+#define SPI_CMD2_RX_CLK_TAP_DELAY_MASK GENMASK(5, 0)
+
+/* TRANSFER STATUS */
+#define SPI_XFER_STS_RDY BIT(30)
+
+/* FIFO STATUS */
+#define SPI_FIFO_STS_CS_INACTIVE BIT(31)
+#define SPI_FIFO_STS_FRAME_END BIT(30)
+#define SPI_FIFO_STS_RX_FIFO_FLUSH BIT(15)
+#define SPI_FIFO_STS_TX_FIFO_FLUSH BIT(14)
+#define SPI_FIFO_STS_ERR BIT(8)
+#define SPI_FIFO_STS_TX_FIFO_OVF BIT(7)
+#define SPI_FIFO_STS_TX_FIFO_UNR BIT(6)
+#define SPI_FIFO_STS_RX_FIFO_OVF BIT(5)
+#define SPI_FIFO_STS_RX_FIFO_UNR BIT(4)
+#define SPI_FIFO_STS_TX_FIFO_FULL BIT(3)
+#define SPI_FIFO_STS_TX_FIFO_EMPTY BIT(2)
+#define SPI_FIFO_STS_RX_FIFO_FULL BIT(1)
+#define SPI_FIFO_STS_RX_FIFO_EMPTY BIT(0)
+
+#define SPI_TIMEOUT 1000
+#define TEGRA_SPI_MAX_FREQ 52000000
+
+struct spi_regs {
+ u32 command1; /* 000:SPI_COMMAND1 register */
+ u32 command2; /* 004:SPI_COMMAND2 register */
+ u32 timing1; /* 008:SPI_CS_TIM1 register */
+ u32 timing2; /* 00c:SPI_CS_TIM2 register */
+ u32 xfer_status;/* 010:SPI_TRANS_STATUS register */
+ u32 fifo_status;/* 014:SPI_FIFO_STATUS register */
+ u32 tx_data; /* 018:SPI_TX_DATA register */
+ u32 rx_data; /* 01c:SPI_RX_DATA register */
+ u32 dma_ctl; /* 020:SPI_DMA_CTL register */
+ u32 dma_blk; /* 024:SPI_DMA_BLK register */
+ u32 rsvd[56]; /* 028-107 reserved */
+ u32 tx_fifo; /* 108:SPI_FIFO1 register */
+ u32 rsvd2[31]; /* 10c-187 reserved */
+ u32 rx_fifo; /* 188:SPI_FIFO2 register */
+ u32 spare_ctl; /* 18c:SPI_SPARE_CTRL register */
+};
+
+struct tegra114_spi_priv {
+ struct spi_regs *regs;
+ unsigned int freq;
+ unsigned int mode;
+ int periph_id;
+ int valid;
+ int last_transaction_us;
+};
+
+static int tegra114_spi_of_to_plat(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+
+ plat->base = dev_read_addr(bus);
+ plat->periph_id = clock_decode_periph_id(bus);
+
+ if (plat->periph_id == PERIPH_ID_NONE) {
+ debug("%s: could not decode periph id %d\n", __func__,
+ plat->periph_id);
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Use 500KHz as a suitable default */
+ plat->frequency = dev_read_u32_default(bus, "spi-max-frequency",
+ 500000);
+ plat->deactivate_delay_us = dev_read_u32_default(bus,
+ "spi-deactivate-delay", 0);
+ debug("%s: base=%#08lx, periph_id=%d, max-frequency=%d, deactivate_delay=%d\n",
+ __func__, plat->base, plat->periph_id, plat->frequency,
+ plat->deactivate_delay_us);
+
+ return 0;
+}
+
+static int tegra114_spi_probe(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra114_spi_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs;
+ ulong rate;
+
+ priv->regs = (struct spi_regs *)plat->base;
+ regs = priv->regs;
+
+ priv->last_transaction_us = timer_get_us();
+ priv->freq = plat->frequency;
+ priv->periph_id = plat->periph_id;
+
+ /*
+ * Change SPI clock to correct frequency, PLLP_OUT0 source, falling
+ * back to the oscillator if that is too fast.
+ */
+ rate = clock_start_periph_pll(priv->periph_id, CLOCK_ID_PERIPH,
+ priv->freq);
+ if (rate > priv->freq + 100000) {
+ rate = clock_start_periph_pll(priv->periph_id, CLOCK_ID_OSC,
+ priv->freq);
+ if (rate != priv->freq) {
+ printf("Warning: SPI '%s' requested clock %u, actual clock %lu\n",
+ bus->name, priv->freq, rate);
+ }
+ }
+ udelay(plat->deactivate_delay_us);
+
+ /* Clear stale status here */
+ setbits_le32(&regs->fifo_status,
+ SPI_FIFO_STS_ERR |
+ SPI_FIFO_STS_TX_FIFO_OVF |
+ SPI_FIFO_STS_TX_FIFO_UNR |
+ SPI_FIFO_STS_RX_FIFO_OVF |
+ SPI_FIFO_STS_RX_FIFO_UNR |
+ SPI_FIFO_STS_TX_FIFO_FULL |
+ SPI_FIFO_STS_TX_FIFO_EMPTY |
+ SPI_FIFO_STS_RX_FIFO_FULL |
+ SPI_FIFO_STS_RX_FIFO_EMPTY);
+ debug("%s: FIFO STATUS = %08x\n", __func__, readl(&regs->fifo_status));
+
+ setbits_le32(&priv->regs->command1, SPI_CMD1_M_S | SPI_CMD1_CS_SW_HW |
+ (priv->mode << SPI_CMD1_MODE_SHIFT) | SPI_CMD1_CS_SW_VAL);
+ debug("%s: COMMAND1 = %08x\n", __func__, readl(&regs->command1));
+
+ return 0;
+}
+
+/**
+ * Activate the CS by driving it LOW
+ *
+ * @param slave Pointer to spi_slave to which controller has to
+ * communicate with
+ */
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra114_spi_priv *priv = dev_get_priv(bus);
+
+ /* If it's too soon to do another transaction, wait */
+ if (pdata->deactivate_delay_us &&
+ priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < pdata->deactivate_delay_us)
+ udelay(pdata->deactivate_delay_us - delay_us);
+ }
+
+ clrbits_le32(&priv->regs->command1, SPI_CMD1_CS_SW_VAL);
+}
+
+/**
+ * Deactivate the CS by driving it HIGH
+ *
+ * @param slave Pointer to spi_slave to which controller has to
+ * communicate with
+ */
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra114_spi_priv *priv = dev_get_priv(bus);
+
+ setbits_le32(&priv->regs->command1, SPI_CMD1_CS_SW_VAL);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (pdata->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+
+ debug("Deactivate CS, bus '%s'\n", bus->name);
+}
+
+static int tegra114_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *data_out, void *data_in,
+ unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra114_spi_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs = priv->regs;
+ u32 reg, tmpdout, tmpdin = 0;
+ const u8 *dout = data_out;
+ u8 *din = data_in;
+ int num_bytes;
+ int ret;
+
+ debug("%s: slave %u:%u dout %p din %p bitlen %u\n",
+ __func__, dev_seq(bus), spi_chip_select(dev), dout, din, bitlen);
+ if (bitlen % 8)
+ return -1;
+ num_bytes = bitlen / 8;
+
+ ret = 0;
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ /* clear all error status bits */
+ reg = readl(&regs->fifo_status);
+ writel(reg, &regs->fifo_status);
+
+ clrsetbits_le32(&regs->command1, SPI_CMD1_CS_SW_VAL,
+ SPI_CMD1_RX_EN | SPI_CMD1_TX_EN | SPI_CMD1_LSBY_FE |
+ (spi_chip_select(dev) << SPI_CMD1_CS_SEL_SHIFT));
+
+ /* set xfer size to 1 block (32 bits) */
+ writel(0, &regs->dma_blk);
+
+ /* handle data in 32-bit chunks */
+ while (num_bytes > 0) {
+ int bytes;
+ int tm, i;
+
+ tmpdout = 0;
+ bytes = (num_bytes > 4) ? 4 : num_bytes;
+
+ if (dout != NULL) {
+ for (i = 0; i < bytes; ++i)
+ tmpdout = (tmpdout << 8) | dout[i];
+ dout += bytes;
+ }
+
+ num_bytes -= bytes;
+
+ /* clear ready bit */
+ setbits_le32(&regs->xfer_status, SPI_XFER_STS_RDY);
+
+ clrsetbits_le32(&regs->command1,
+ SPI_CMD1_BIT_LEN_MASK << SPI_CMD1_BIT_LEN_SHIFT,
+ (bytes * 8 - 1) << SPI_CMD1_BIT_LEN_SHIFT);
+ writel(tmpdout, &regs->tx_fifo);
+ setbits_le32(&regs->command1, SPI_CMD1_GO);
+
+ /*
+ * Wait for SPI transmit FIFO to empty, or to time out.
+ * The RX FIFO status will be read and cleared last
+ */
+ for (tm = 0; tm < SPI_TIMEOUT; ++tm) {
+ u32 fifo_status, xfer_status;
+
+ xfer_status = readl(&regs->xfer_status);
+ if (!(xfer_status & SPI_XFER_STS_RDY))
+ continue;
+
+ fifo_status = readl(&regs->fifo_status);
+ if (fifo_status & SPI_FIFO_STS_ERR) {
+ debug("%s: got a fifo error: ", __func__);
+ if (fifo_status & SPI_FIFO_STS_TX_FIFO_OVF)
+ debug("tx FIFO overflow ");
+ if (fifo_status & SPI_FIFO_STS_TX_FIFO_UNR)
+ debug("tx FIFO underrun ");
+ if (fifo_status & SPI_FIFO_STS_RX_FIFO_OVF)
+ debug("rx FIFO overflow ");
+ if (fifo_status & SPI_FIFO_STS_RX_FIFO_UNR)
+ debug("rx FIFO underrun ");
+ if (fifo_status & SPI_FIFO_STS_TX_FIFO_FULL)
+ debug("tx FIFO full ");
+ if (fifo_status & SPI_FIFO_STS_TX_FIFO_EMPTY)
+ debug("tx FIFO empty ");
+ if (fifo_status & SPI_FIFO_STS_RX_FIFO_FULL)
+ debug("rx FIFO full ");
+ if (fifo_status & SPI_FIFO_STS_RX_FIFO_EMPTY)
+ debug("rx FIFO empty ");
+ debug("\n");
+ break;
+ }
+
+ if (!(fifo_status & SPI_FIFO_STS_RX_FIFO_EMPTY)) {
+ tmpdin = readl(&regs->rx_fifo);
+
+ /* swap bytes read in */
+ if (din != NULL) {
+ for (i = bytes - 1; i >= 0; --i) {
+ din[i] = tmpdin & 0xff;
+ tmpdin >>= 8;
+ }
+ din += bytes;
+ }
+
+ /* We can exit when we've had both RX and TX */
+ break;
+ }
+ }
+
+ if (tm >= SPI_TIMEOUT)
+ ret = tm;
+
+ /* clear ACK RDY, etc. bits */
+ writel(readl(&regs->fifo_status), &regs->fifo_status);
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ debug("%s: transfer ended. Value=%08x, fifo_status = %08x\n",
+ __func__, tmpdin, readl(&regs->fifo_status));
+
+ if (ret) {
+ printf("%s: timeout during SPI transfer, tm %d\n",
+ __func__, ret);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int tegra114_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra114_spi_priv *priv = dev_get_priv(bus);
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+ priv->freq = speed;
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int tegra114_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct tegra114_spi_priv *priv = dev_get_priv(bus);
+
+ priv->mode = mode;
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops tegra114_spi_ops = {
+ .xfer = tegra114_spi_xfer,
+ .set_speed = tegra114_spi_set_speed,
+ .set_mode = tegra114_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id tegra114_spi_ids[] = {
+ { .compatible = "nvidia,tegra114-spi" },
+ { }
+};
+
+U_BOOT_DRIVER(tegra114_spi) = {
+ .name = "tegra114_spi",
+ .id = UCLASS_SPI,
+ .of_match = tegra114_spi_ids,
+ .ops = &tegra114_spi_ops,
+ .of_to_plat = tegra114_spi_of_to_plat,
+ .plat_auto = sizeof(struct tegra_spi_plat),
+ .priv_auto = sizeof(struct tegra114_spi_priv),
+ .probe = tegra114_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/tegra20_sflash.c b/roms/u-boot/drivers/spi/tegra20_sflash.c
new file mode 100644
index 000000000..10e38cf83
--- /dev/null
+++ b/roms/u-boot/drivers/spi/tegra20_sflash.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2010-2013 NVIDIA Corporation
+ * With help from the mpc8xxx SPI driver
+ * With more help from omap3_spi SPI driver
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <time.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/pinmux.h>
+#include <asm/arch-tegra/clk_rst.h>
+#include <spi.h>
+#include <fdtdec.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "tegra_spi.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define SPI_CMD_GO BIT(30)
+#define SPI_CMD_ACTIVE_SCLK_SHIFT 26
+#define SPI_CMD_ACTIVE_SCLK_MASK (3 << SPI_CMD_ACTIVE_SCLK_SHIFT)
+#define SPI_CMD_CK_SDA BIT(21)
+#define SPI_CMD_ACTIVE_SDA_SHIFT 18
+#define SPI_CMD_ACTIVE_SDA_MASK (3 << SPI_CMD_ACTIVE_SDA_SHIFT)
+#define SPI_CMD_CS_POL BIT(16)
+#define SPI_CMD_TXEN BIT(15)
+#define SPI_CMD_RXEN BIT(14)
+#define SPI_CMD_CS_VAL BIT(13)
+#define SPI_CMD_CS_SOFT BIT(12)
+#define SPI_CMD_CS_DELAY BIT(9)
+#define SPI_CMD_CS3_EN BIT(8)
+#define SPI_CMD_CS2_EN BIT(7)
+#define SPI_CMD_CS1_EN BIT(6)
+#define SPI_CMD_CS0_EN BIT(5)
+#define SPI_CMD_BIT_LENGTH BIT(4)
+#define SPI_CMD_BIT_LENGTH_MASK GENMASK(4, 0)
+
+#define SPI_STAT_BSY BIT(31)
+#define SPI_STAT_RDY BIT(30)
+#define SPI_STAT_RXF_FLUSH BIT(29)
+#define SPI_STAT_TXF_FLUSH BIT(28)
+#define SPI_STAT_RXF_UNR BIT(27)
+#define SPI_STAT_TXF_OVF BIT(26)
+#define SPI_STAT_RXF_EMPTY BIT(25)
+#define SPI_STAT_RXF_FULL BIT(24)
+#define SPI_STAT_TXF_EMPTY BIT(23)
+#define SPI_STAT_TXF_FULL BIT(22)
+#define SPI_STAT_SEL_TXRX_N BIT(16)
+#define SPI_STAT_CUR_BLKCNT BIT(15)
+
+#define SPI_TIMEOUT 1000
+#define TEGRA_SPI_MAX_FREQ 52000000
+
+struct spi_regs {
+ u32 command; /* SPI_COMMAND_0 register */
+ u32 status; /* SPI_STATUS_0 register */
+ u32 rx_cmp; /* SPI_RX_CMP_0 register */
+ u32 dma_ctl; /* SPI_DMA_CTL_0 register */
+ u32 tx_fifo; /* SPI_TX_FIFO_0 register */
+ u32 rsvd[3]; /* offsets 0x14 to 0x1F reserved */
+ u32 rx_fifo; /* SPI_RX_FIFO_0 register */
+};
+
+struct tegra20_sflash_priv {
+ struct spi_regs *regs;
+ unsigned int freq;
+ unsigned int mode;
+ int periph_id;
+ int valid;
+ int last_transaction_us;
+};
+
+int tegra20_sflash_cs_info(struct udevice *bus, unsigned int cs,
+ struct spi_cs_info *info)
+{
+ /* Tegra20 SPI-Flash - only 1 device ('bus/cs') */
+ if (cs != 0)
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int tegra20_sflash_of_to_plat(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->base = dev_read_addr(bus);
+ plat->periph_id = clock_decode_periph_id(bus);
+
+ if (plat->periph_id == PERIPH_ID_NONE) {
+ debug("%s: could not decode periph id %d\n", __func__,
+ plat->periph_id);
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Use 500KHz as a suitable default */
+ plat->frequency = fdtdec_get_int(blob, node, "spi-max-frequency",
+ 500000);
+ plat->deactivate_delay_us = fdtdec_get_int(blob, node,
+ "spi-deactivate-delay", 0);
+ debug("%s: base=%#08lx, periph_id=%d, max-frequency=%d, deactivate_delay=%d\n",
+ __func__, plat->base, plat->periph_id, plat->frequency,
+ plat->deactivate_delay_us);
+
+ return 0;
+}
+
+static int tegra20_sflash_probe(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+
+ priv->regs = (struct spi_regs *)plat->base;
+
+ priv->last_transaction_us = timer_get_us();
+ priv->freq = plat->frequency;
+ priv->periph_id = plat->periph_id;
+
+ /* Change SPI clock to correct frequency, PLLP_OUT0 source */
+ clock_start_periph_pll(priv->periph_id, CLOCK_ID_PERIPH,
+ priv->freq);
+
+ return 0;
+}
+
+static int tegra20_sflash_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs = priv->regs;
+ u32 reg;
+
+ /* Change SPI clock to correct frequency, PLLP_OUT0 source */
+ clock_start_periph_pll(priv->periph_id, CLOCK_ID_PERIPH,
+ priv->freq);
+
+ /* Clear stale status here */
+ reg = SPI_STAT_RDY | SPI_STAT_RXF_FLUSH | SPI_STAT_TXF_FLUSH | \
+ SPI_STAT_RXF_UNR | SPI_STAT_TXF_OVF;
+ writel(reg, &regs->status);
+ debug("%s: STATUS = %08x\n", __func__, readl(&regs->status));
+
+ /*
+ * Use sw-controlled CS, so we can clock in data after ReadID, etc.
+ */
+ reg = (priv->mode & 1) << SPI_CMD_ACTIVE_SDA_SHIFT;
+ if (priv->mode & 2)
+ reg |= 1 << SPI_CMD_ACTIVE_SCLK_SHIFT;
+ clrsetbits_le32(&regs->command, SPI_CMD_ACTIVE_SCLK_MASK |
+ SPI_CMD_ACTIVE_SDA_MASK, SPI_CMD_CS_SOFT | reg);
+ debug("%s: COMMAND = %08x\n", __func__, readl(&regs->command));
+
+ /*
+ * SPI pins on Tegra20 are muxed - change pinmux later due to UART
+ * issue.
+ */
+ pinmux_set_func(PMUX_PINGRP_GMD, PMUX_FUNC_SFLASH);
+ pinmux_tristate_disable(PMUX_PINGRP_LSPI);
+ pinmux_set_func(PMUX_PINGRP_GMC, PMUX_FUNC_SFLASH);
+
+ return 0;
+}
+
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+
+ /* If it's too soon to do another transaction, wait */
+ if (pdata->deactivate_delay_us &&
+ priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < pdata->deactivate_delay_us)
+ udelay(pdata->deactivate_delay_us - delay_us);
+ }
+
+ /* CS is negated on Tegra, so drive a 1 to get a 0 */
+ setbits_le32(&priv->regs->command, SPI_CMD_CS_VAL);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+
+ /* CS is negated on Tegra, so drive a 0 to get a 1 */
+ clrbits_le32(&priv->regs->command, SPI_CMD_CS_VAL);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (pdata->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+}
+
+static int tegra20_sflash_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *data_out, void *data_in,
+ unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs = priv->regs;
+ u32 reg, tmpdout, tmpdin = 0;
+ const u8 *dout = data_out;
+ u8 *din = data_in;
+ int num_bytes;
+ int ret;
+
+ debug("%s: slave %u:%u dout %p din %p bitlen %u\n",
+ __func__, dev_seq(bus), spi_chip_select(dev), dout, din, bitlen);
+ if (bitlen % 8)
+ return -1;
+ num_bytes = bitlen / 8;
+
+ ret = 0;
+
+ reg = readl(&regs->status);
+ writel(reg, &regs->status); /* Clear all SPI events via R/W */
+ debug("spi_xfer entry: STATUS = %08x\n", reg);
+
+ reg = readl(&regs->command);
+ reg |= SPI_CMD_TXEN | SPI_CMD_RXEN;
+ writel(reg, &regs->command);
+ debug("spi_xfer: COMMAND = %08x\n", readl(&regs->command));
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ /* handle data in 32-bit chunks */
+ while (num_bytes > 0) {
+ int bytes;
+ int is_read = 0;
+ int tm, i;
+
+ tmpdout = 0;
+ bytes = (num_bytes > 4) ? 4 : num_bytes;
+
+ if (dout != NULL) {
+ for (i = 0; i < bytes; ++i)
+ tmpdout = (tmpdout << 8) | dout[i];
+ }
+
+ num_bytes -= bytes;
+ if (dout)
+ dout += bytes;
+
+ clrsetbits_le32(&regs->command, SPI_CMD_BIT_LENGTH_MASK,
+ bytes * 8 - 1);
+ writel(tmpdout, &regs->tx_fifo);
+ setbits_le32(&regs->command, SPI_CMD_GO);
+
+ /*
+ * Wait for SPI transmit FIFO to empty, or to time out.
+ * The RX FIFO status will be read and cleared last
+ */
+ for (tm = 0, is_read = 0; tm < SPI_TIMEOUT; ++tm) {
+ u32 status;
+
+ status = readl(&regs->status);
+
+ /* We can exit when we've had both RX and TX activity */
+ if (is_read && (status & SPI_STAT_TXF_EMPTY))
+ break;
+
+ if ((status & (SPI_STAT_BSY | SPI_STAT_RDY)) !=
+ SPI_STAT_RDY)
+ tm++;
+
+ else if (!(status & SPI_STAT_RXF_EMPTY)) {
+ tmpdin = readl(&regs->rx_fifo);
+ is_read = 1;
+
+ /* swap bytes read in */
+ if (din != NULL) {
+ for (i = bytes - 1; i >= 0; --i) {
+ din[i] = tmpdin & 0xff;
+ tmpdin >>= 8;
+ }
+ din += bytes;
+ }
+ }
+ }
+
+ if (tm >= SPI_TIMEOUT)
+ ret = tm;
+
+ /* clear ACK RDY, etc. bits */
+ writel(readl(&regs->status), &regs->status);
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ debug("spi_xfer: transfer ended. Value=%08x, status = %08x\n",
+ tmpdin, readl(&regs->status));
+
+ if (ret) {
+ printf("spi_xfer: timeout during SPI transfer, tm %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tegra20_sflash_set_speed(struct udevice *bus, uint speed)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+ priv->freq = speed;
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int tegra20_sflash_set_mode(struct udevice *bus, uint mode)
+{
+ struct tegra20_sflash_priv *priv = dev_get_priv(bus);
+
+ priv->mode = mode;
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops tegra20_sflash_ops = {
+ .claim_bus = tegra20_sflash_claim_bus,
+ .xfer = tegra20_sflash_xfer,
+ .set_speed = tegra20_sflash_set_speed,
+ .set_mode = tegra20_sflash_set_mode,
+ .cs_info = tegra20_sflash_cs_info,
+};
+
+static const struct udevice_id tegra20_sflash_ids[] = {
+ { .compatible = "nvidia,tegra20-sflash" },
+ { }
+};
+
+U_BOOT_DRIVER(tegra20_sflash) = {
+ .name = "tegra20_sflash",
+ .id = UCLASS_SPI,
+ .of_match = tegra20_sflash_ids,
+ .ops = &tegra20_sflash_ops,
+ .of_to_plat = tegra20_sflash_of_to_plat,
+ .plat_auto = sizeof(struct tegra_spi_plat),
+ .priv_auto = sizeof(struct tegra20_sflash_priv),
+ .probe = tegra20_sflash_probe,
+};
diff --git a/roms/u-boot/drivers/spi/tegra20_slink.c b/roms/u-boot/drivers/spi/tegra20_slink.c
new file mode 100644
index 000000000..209ba8b0c
--- /dev/null
+++ b/roms/u-boot/drivers/spi/tegra20_slink.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVIDIA Tegra SPI-SLINK controller
+ *
+ * Copyright (c) 2010-2013 NVIDIA Corporation
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <time.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/arch/clock.h>
+#include <asm/arch-tegra/clk_rst.h>
+#include <spi.h>
+#include <fdtdec.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "tegra_spi.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* COMMAND */
+#define SLINK_CMD_ENB BIT(31)
+#define SLINK_CMD_GO BIT(30)
+#define SLINK_CMD_M_S BIT(28)
+#define SLINK_CMD_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_CMD_IDLE_SCLK_DRIVE_HIGH BIT(24)
+#define SLINK_CMD_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_CMD_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_CMD_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_CMD_CK_SDA BIT(21)
+#define SLINK_CMD_CS_POL BIT(13)
+#define SLINK_CMD_CS_VAL BIT(12)
+#define SLINK_CMD_CS_SOFT BIT(11)
+#define SLINK_CMD_BIT_LENGTH BIT(4)
+#define SLINK_CMD_BIT_LENGTH_MASK GENMASK(4, 0)
+/* COMMAND2 */
+#define SLINK_CMD2_TXEN BIT(30)
+#define SLINK_CMD2_RXEN BIT(31)
+#define SLINK_CMD2_SS_EN BIT(18)
+#define SLINK_CMD2_SS_EN_SHIFT 18
+#define SLINK_CMD2_SS_EN_MASK GENMASK(19, 18)
+#define SLINK_CMD2_CS_ACTIVE_BETWEEN BIT(17)
+/* STATUS */
+#define SLINK_STAT_BSY BIT(31)
+#define SLINK_STAT_RDY BIT(30)
+#define SLINK_STAT_ERR BIT(29)
+#define SLINK_STAT_RXF_FLUSH BIT(27)
+#define SLINK_STAT_TXF_FLUSH BIT(26)
+#define SLINK_STAT_RXF_OVF BIT(25)
+#define SLINK_STAT_TXF_UNR BIT(24)
+#define SLINK_STAT_RXF_EMPTY BIT(23)
+#define SLINK_STAT_RXF_FULL BIT(22)
+#define SLINK_STAT_TXF_EMPTY BIT(21)
+#define SLINK_STAT_TXF_FULL BIT(20)
+#define SLINK_STAT_TXF_OVF BIT(19)
+#define SLINK_STAT_RXF_UNR BIT(18)
+#define SLINK_STAT_CUR_BLKCNT BIT(15)
+/* STATUS2 */
+#define SLINK_STAT2_RXF_FULL_CNT BIT(16)
+#define SLINK_STAT2_TXF_FULL_CNT BIT(0)
+
+#define SPI_TIMEOUT 1000
+#define TEGRA_SPI_MAX_FREQ 52000000
+
+struct spi_regs {
+ u32 command; /* SLINK_COMMAND_0 register */
+ u32 command2; /* SLINK_COMMAND2_0 reg */
+ u32 status; /* SLINK_STATUS_0 register */
+ u32 reserved; /* Reserved offset 0C */
+ u32 mas_data; /* SLINK_MAS_DATA_0 reg */
+ u32 slav_data; /* SLINK_SLAVE_DATA_0 reg */
+ u32 dma_ctl; /* SLINK_DMA_CTL_0 register */
+ u32 status2; /* SLINK_STATUS2_0 reg */
+ u32 rsvd[56]; /* 0x20 to 0xFF reserved */
+ u32 tx_fifo; /* SLINK_TX_FIFO_0 reg off 100h */
+ u32 rsvd2[31]; /* 0x104 to 0x17F reserved */
+ u32 rx_fifo; /* SLINK_RX_FIFO_0 reg off 180h */
+};
+
+struct tegra30_spi_priv {
+ struct spi_regs *regs;
+ unsigned int freq;
+ unsigned int mode;
+ int periph_id;
+ int valid;
+ int last_transaction_us;
+};
+
+struct tegra_spi_slave {
+ struct spi_slave slave;
+ struct tegra30_spi_priv *ctrl;
+};
+
+static int tegra30_spi_of_to_plat(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->base = dev_read_addr(bus);
+ plat->periph_id = clock_decode_periph_id(bus);
+
+ if (plat->periph_id == PERIPH_ID_NONE) {
+ debug("%s: could not decode periph id %d\n", __func__,
+ plat->periph_id);
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Use 500KHz as a suitable default */
+ plat->frequency = fdtdec_get_int(blob, node, "spi-max-frequency",
+ 500000);
+ plat->deactivate_delay_us = fdtdec_get_int(blob, node,
+ "spi-deactivate-delay", 0);
+ debug("%s: base=%#08lx, periph_id=%d, max-frequency=%d, deactivate_delay=%d\n",
+ __func__, plat->base, plat->periph_id, plat->frequency,
+ plat->deactivate_delay_us);
+
+ return 0;
+}
+
+static int tegra30_spi_probe(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+
+ priv->regs = (struct spi_regs *)plat->base;
+
+ priv->last_transaction_us = timer_get_us();
+ priv->freq = plat->frequency;
+ priv->periph_id = plat->periph_id;
+
+ /* Change SPI clock to correct frequency, PLLP_OUT0 source */
+ clock_start_periph_pll(priv->periph_id, CLOCK_ID_PERIPH,
+ priv->freq);
+
+ return 0;
+}
+
+static int tegra30_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs = priv->regs;
+ u32 reg;
+
+ /* Change SPI clock to correct frequency, PLLP_OUT0 source */
+ clock_start_periph_pll(priv->periph_id, CLOCK_ID_PERIPH,
+ priv->freq);
+
+ /* Clear stale status here */
+ reg = SLINK_STAT_RDY | SLINK_STAT_RXF_FLUSH | SLINK_STAT_TXF_FLUSH | \
+ SLINK_STAT_RXF_UNR | SLINK_STAT_TXF_OVF;
+ writel(reg, &regs->status);
+ debug("%s: STATUS = %08x\n", __func__, readl(&regs->status));
+
+ /* Set master mode and sw controlled CS */
+ reg = readl(&regs->command);
+ reg |= SLINK_CMD_M_S | SLINK_CMD_CS_SOFT;
+ writel(reg, &regs->command);
+ debug("%s: COMMAND = %08x\n", __func__, readl(&regs->command));
+
+ return 0;
+}
+
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+
+ /* If it's too soon to do another transaction, wait */
+ if (pdata->deactivate_delay_us &&
+ priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < pdata->deactivate_delay_us)
+ udelay(pdata->deactivate_delay_us - delay_us);
+ }
+
+ /* CS is negated on Tegra, so drive a 1 to get a 0 */
+ setbits_le32(&priv->regs->command, SLINK_CMD_CS_VAL);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+
+ /* CS is negated on Tegra, so drive a 0 to get a 1 */
+ clrbits_le32(&priv->regs->command, SLINK_CMD_CS_VAL);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (pdata->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+}
+
+static int tegra30_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *data_out, void *data_in,
+ unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs = priv->regs;
+ u32 reg, tmpdout, tmpdin = 0;
+ const u8 *dout = data_out;
+ u8 *din = data_in;
+ int num_bytes;
+ int ret;
+
+ debug("%s: slave %u:%u dout %p din %p bitlen %u\n",
+ __func__, dev_seq(bus), spi_chip_select(dev), dout, din, bitlen);
+ if (bitlen % 8)
+ return -1;
+ num_bytes = bitlen / 8;
+
+ ret = 0;
+
+ reg = readl(&regs->status);
+ writel(reg, &regs->status); /* Clear all SPI events via R/W */
+ debug("%s entry: STATUS = %08x\n", __func__, reg);
+
+ reg = readl(&regs->status2);
+ writel(reg, &regs->status2); /* Clear all STATUS2 events via R/W */
+ debug("%s entry: STATUS2 = %08x\n", __func__, reg);
+
+ debug("%s entry: COMMAND = %08x\n", __func__, readl(&regs->command));
+
+ clrsetbits_le32(&regs->command2, SLINK_CMD2_SS_EN_MASK,
+ SLINK_CMD2_TXEN | SLINK_CMD2_RXEN |
+ (spi_chip_select(dev) << SLINK_CMD2_SS_EN_SHIFT));
+ debug("%s entry: COMMAND2 = %08x\n", __func__, readl(&regs->command2));
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ /* handle data in 32-bit chunks */
+ while (num_bytes > 0) {
+ int bytes;
+ int is_read = 0;
+ int tm, i;
+
+ tmpdout = 0;
+ bytes = (num_bytes > 4) ? 4 : num_bytes;
+
+ if (dout != NULL) {
+ for (i = 0; i < bytes; ++i)
+ tmpdout = (tmpdout << 8) | dout[i];
+ dout += bytes;
+ }
+
+ num_bytes -= bytes;
+
+ clrsetbits_le32(&regs->command, SLINK_CMD_BIT_LENGTH_MASK,
+ bytes * 8 - 1);
+ writel(tmpdout, &regs->tx_fifo);
+ setbits_le32(&regs->command, SLINK_CMD_GO);
+
+ /*
+ * Wait for SPI transmit FIFO to empty, or to time out.
+ * The RX FIFO status will be read and cleared last
+ */
+ for (tm = 0, is_read = 0; tm < SPI_TIMEOUT; ++tm) {
+ u32 status;
+
+ status = readl(&regs->status);
+
+ /* We can exit when we've had both RX and TX activity */
+ if (is_read && (status & SLINK_STAT_TXF_EMPTY))
+ break;
+
+ if ((status & (SLINK_STAT_BSY | SLINK_STAT_RDY)) !=
+ SLINK_STAT_RDY)
+ tm++;
+
+ else if (!(status & SLINK_STAT_RXF_EMPTY)) {
+ tmpdin = readl(&regs->rx_fifo);
+ is_read = 1;
+
+ /* swap bytes read in */
+ if (din != NULL) {
+ for (i = bytes - 1; i >= 0; --i) {
+ din[i] = tmpdin & 0xff;
+ tmpdin >>= 8;
+ }
+ din += bytes;
+ }
+ }
+ }
+
+ if (tm >= SPI_TIMEOUT)
+ ret = tm;
+
+ /* clear ACK RDY, etc. bits */
+ writel(readl(&regs->status), &regs->status);
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ debug("%s: transfer ended. Value=%08x, status = %08x\n",
+ __func__, tmpdin, readl(&regs->status));
+
+ if (ret) {
+ printf("%s: timeout during SPI transfer, tm %d\n",
+ __func__, ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int tegra30_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+ priv->freq = speed;
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int tegra30_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct tegra30_spi_priv *priv = dev_get_priv(bus);
+ struct spi_regs *regs = priv->regs;
+ u32 reg;
+
+ reg = readl(&regs->command);
+
+ /* Set CPOL and CPHA */
+ reg &= ~(SLINK_CMD_IDLE_SCLK_MASK | SLINK_CMD_CK_SDA);
+ if (mode & SPI_CPHA)
+ reg |= SLINK_CMD_CK_SDA;
+
+ if (mode & SPI_CPOL)
+ reg |= SLINK_CMD_IDLE_SCLK_DRIVE_HIGH;
+ else
+ reg |= SLINK_CMD_IDLE_SCLK_DRIVE_LOW;
+
+ writel(reg, &regs->command);
+
+ priv->mode = mode;
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops tegra30_spi_ops = {
+ .claim_bus = tegra30_spi_claim_bus,
+ .xfer = tegra30_spi_xfer,
+ .set_speed = tegra30_spi_set_speed,
+ .set_mode = tegra30_spi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id tegra30_spi_ids[] = {
+ { .compatible = "nvidia,tegra20-slink" },
+ { }
+};
+
+U_BOOT_DRIVER(tegra30_spi) = {
+ .name = "tegra20_slink",
+ .id = UCLASS_SPI,
+ .of_match = tegra30_spi_ids,
+ .ops = &tegra30_spi_ops,
+ .of_to_plat = tegra30_spi_of_to_plat,
+ .plat_auto = sizeof(struct tegra_spi_plat),
+ .priv_auto = sizeof(struct tegra30_spi_priv),
+ .probe = tegra30_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/tegra210_qspi.c b/roms/u-boot/drivers/spi/tegra210_qspi.c
new file mode 100644
index 000000000..5c8c1859c
--- /dev/null
+++ b/roms/u-boot/drivers/spi/tegra210_qspi.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NVIDIA Tegra210 QSPI controller driver
+ *
+ * (C) Copyright 2015-2020 NVIDIA Corporation <www.nvidia.com>
+ *
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <time.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/arch/clock.h>
+#include <asm/arch-tegra/clk_rst.h>
+#include <spi.h>
+#include <fdtdec.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "tegra_spi.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* COMMAND1 */
+#define QSPI_CMD1_GO BIT(31)
+#define QSPI_CMD1_M_S BIT(30)
+#define QSPI_CMD1_MODE_MASK GENMASK(1,0)
+#define QSPI_CMD1_MODE_SHIFT 28
+#define QSPI_CMD1_CS_SEL_MASK GENMASK(1,0)
+#define QSPI_CMD1_CS_SEL_SHIFT 26
+#define QSPI_CMD1_CS_POL_INACTIVE0 BIT(22)
+#define QSPI_CMD1_CS_SW_HW BIT(21)
+#define QSPI_CMD1_CS_SW_VAL BIT(20)
+#define QSPI_CMD1_IDLE_SDA_MASK GENMASK(1,0)
+#define QSPI_CMD1_IDLE_SDA_SHIFT 18
+#define QSPI_CMD1_BIDIR BIT(17)
+#define QSPI_CMD1_LSBI_FE BIT(16)
+#define QSPI_CMD1_LSBY_FE BIT(15)
+#define QSPI_CMD1_BOTH_EN_BIT BIT(14)
+#define QSPI_CMD1_BOTH_EN_BYTE BIT(13)
+#define QSPI_CMD1_RX_EN BIT(12)
+#define QSPI_CMD1_TX_EN BIT(11)
+#define QSPI_CMD1_PACKED BIT(5)
+#define QSPI_CMD1_BITLEN_MASK GENMASK(4,0)
+#define QSPI_CMD1_BITLEN_SHIFT 0
+
+/* COMMAND2 */
+#define QSPI_CMD2_TX_CLK_TAP_DELAY_SHIFT 10
+#define QSPI_CMD2_TX_CLK_TAP_DELAY_MASK GENMASK(14,10)
+#define QSPI_CMD2_RX_CLK_TAP_DELAY_SHIFT 0
+#define QSPI_CMD2_RX_CLK_TAP_DELAY_MASK GENMASK(7,0)
+
+/* TRANSFER STATUS */
+#define QSPI_XFER_STS_RDY BIT(30)
+
+/* FIFO STATUS */
+#define QSPI_FIFO_STS_CS_INACTIVE BIT(31)
+#define QSPI_FIFO_STS_FRAME_END BIT(30)
+#define QSPI_FIFO_STS_RX_FIFO_FLUSH BIT(15)
+#define QSPI_FIFO_STS_TX_FIFO_FLUSH BIT(14)
+#define QSPI_FIFO_STS_ERR BIT(8)
+#define QSPI_FIFO_STS_TX_FIFO_OVF BIT(7)
+#define QSPI_FIFO_STS_TX_FIFO_UNR BIT(6)
+#define QSPI_FIFO_STS_RX_FIFO_OVF BIT(5)
+#define QSPI_FIFO_STS_RX_FIFO_UNR BIT(4)
+#define QSPI_FIFO_STS_TX_FIFO_FULL BIT(3)
+#define QSPI_FIFO_STS_TX_FIFO_EMPTY BIT(2)
+#define QSPI_FIFO_STS_RX_FIFO_FULL BIT(1)
+#define QSPI_FIFO_STS_RX_FIFO_EMPTY BIT(0)
+
+#define QSPI_TIMEOUT 1000
+
+struct qspi_regs {
+ u32 command1; /* 000:QSPI_COMMAND1 register */
+ u32 command2; /* 004:QSPI_COMMAND2 register */
+ u32 timing1; /* 008:QSPI_CS_TIM1 register */
+ u32 timing2; /* 00c:QSPI_CS_TIM2 register */
+ u32 xfer_status;/* 010:QSPI_TRANS_STATUS register */
+ u32 fifo_status;/* 014:QSPI_FIFO_STATUS register */
+ u32 tx_data; /* 018:QSPI_TX_DATA register */
+ u32 rx_data; /* 01c:QSPI_RX_DATA register */
+ u32 dma_ctl; /* 020:QSPI_DMA_CTL register */
+ u32 dma_blk; /* 024:QSPI_DMA_BLK register */
+ u32 rsvd[56]; /* 028-107 reserved */
+ u32 tx_fifo; /* 108:QSPI_FIFO1 register */
+ u32 rsvd2[31]; /* 10c-187 reserved */
+ u32 rx_fifo; /* 188:QSPI_FIFO2 register */
+ u32 spare_ctl; /* 18c:QSPI_SPARE_CTRL register */
+};
+
+struct tegra210_qspi_priv {
+ struct qspi_regs *regs;
+ unsigned int freq;
+ unsigned int mode;
+ int periph_id;
+ int valid;
+ int last_transaction_us;
+};
+
+static int tegra210_qspi_of_to_plat(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+
+ plat->base = dev_read_addr(bus);
+ plat->periph_id = clock_decode_periph_id(bus);
+
+ if (plat->periph_id == PERIPH_ID_NONE) {
+ debug("%s: could not decode periph id %d\n", __func__,
+ plat->periph_id);
+ return -FDT_ERR_NOTFOUND;
+ }
+
+ /* Use 500KHz as a suitable default */
+ plat->frequency = dev_read_u32_default(bus, "spi-max-frequency",
+ 500000);
+ plat->deactivate_delay_us = dev_read_u32_default(bus,
+ "spi-deactivate-delay",
+ 0);
+ debug("%s: base=%#08lx, periph_id=%d, max-frequency=%d, deactivate_delay=%d\n",
+ __func__, plat->base, plat->periph_id, plat->frequency,
+ plat->deactivate_delay_us);
+
+ return 0;
+}
+
+static int tegra210_qspi_probe(struct udevice *bus)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+
+ priv->regs = (struct qspi_regs *)plat->base;
+ struct qspi_regs *regs = priv->regs;
+
+ priv->last_transaction_us = timer_get_us();
+ priv->freq = plat->frequency;
+ priv->periph_id = plat->periph_id;
+
+ debug("%s: Freq = %u, id = %d\n", __func__, priv->freq,
+ priv->periph_id);
+ /* Change SPI clock to correct frequency, PLLP_OUT0 source */
+ clock_start_periph_pll(priv->periph_id, CLOCK_ID_PERIPH, priv->freq);
+
+ /* Set tap delays here, clock change above resets QSPI controller */
+ u32 reg = (0x09 << QSPI_CMD2_TX_CLK_TAP_DELAY_SHIFT) |
+ (0x0C << QSPI_CMD2_RX_CLK_TAP_DELAY_SHIFT);
+ writel(reg, &regs->command2);
+ debug("%s: COMMAND2 = %08x\n", __func__, readl(&regs->command2));
+
+ return 0;
+}
+
+static int tegra210_qspi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+ struct qspi_regs *regs = priv->regs;
+
+ debug("%s: FIFO STATUS = %08x\n", __func__, readl(&regs->fifo_status));
+
+ /* Set master mode and sw controlled CS */
+ setbits_le32(&regs->command1, QSPI_CMD1_M_S | QSPI_CMD1_CS_SW_HW |
+ (priv->mode << QSPI_CMD1_MODE_SHIFT));
+ debug("%s: COMMAND1 = %08x\n", __func__, readl(&regs->command1));
+
+ return 0;
+}
+
+/**
+ * Activate the CS by driving it LOW
+ *
+ * @param slave Pointer to spi_slave to which controller has to
+ * communicate with
+ */
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+
+ /* If it's too soon to do another transaction, wait */
+ if (pdata->deactivate_delay_us &&
+ priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < pdata->deactivate_delay_us)
+ udelay(pdata->deactivate_delay_us - delay_us);
+ }
+
+ clrbits_le32(&priv->regs->command1, QSPI_CMD1_CS_SW_VAL);
+}
+
+/**
+ * Deactivate the CS by driving it HIGH
+ *
+ * @param slave Pointer to spi_slave to which controller has to
+ * communicate with
+ */
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra_spi_plat *pdata = dev_get_plat(bus);
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+
+ setbits_le32(&priv->regs->command1, QSPI_CMD1_CS_SW_VAL);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (pdata->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+
+ debug("Deactivate CS, bus '%s'\n", bus->name);
+}
+
+static int tegra210_qspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *data_out, void *data_in,
+ unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+ struct qspi_regs *regs = priv->regs;
+ u32 reg, tmpdout, tmpdin = 0;
+ const u8 *dout = data_out;
+ u8 *din = data_in;
+ int num_bytes, tm, ret;
+
+ debug("%s: slave %u:%u dout %p din %p bitlen %u\n",
+ __func__, dev_seq(bus), spi_chip_select(dev), dout, din, bitlen);
+ if (bitlen % 8)
+ return -1;
+ num_bytes = bitlen / 8;
+
+ ret = 0;
+
+ /* clear all error status bits */
+ reg = readl(&regs->fifo_status);
+ writel(reg, &regs->fifo_status);
+
+ /* flush RX/TX FIFOs */
+ setbits_le32(&regs->fifo_status,
+ (QSPI_FIFO_STS_RX_FIFO_FLUSH |
+ QSPI_FIFO_STS_TX_FIFO_FLUSH));
+
+ tm = QSPI_TIMEOUT;
+ while ((tm && readl(&regs->fifo_status) &
+ (QSPI_FIFO_STS_RX_FIFO_FLUSH |
+ QSPI_FIFO_STS_TX_FIFO_FLUSH))) {
+ tm--;
+ udelay(1);
+ }
+
+ if (!tm) {
+ printf("%s: timeout during QSPI FIFO flush!\n",
+ __func__);
+ return -1;
+ }
+
+ /*
+ * Notes:
+ * 1. don't set LSBY_FE, so no need to swap bytes from/to TX/RX FIFOs;
+ * 2. don't set RX_EN and TX_EN yet.
+ * (SW needs to make sure that while programming the blk_size,
+ * tx_en and rx_en bits must be zero)
+ * [TODO] I (Yen Lin) have problems when both RX/TX EN bits are set
+ * i.e., both dout and din are not NULL.
+ */
+ clrsetbits_le32(&regs->command1,
+ (QSPI_CMD1_LSBI_FE | QSPI_CMD1_LSBY_FE |
+ QSPI_CMD1_RX_EN | QSPI_CMD1_TX_EN),
+ (spi_chip_select(dev) << QSPI_CMD1_CS_SEL_SHIFT));
+
+ /* set xfer size to 1 block (32 bits) */
+ writel(0, &regs->dma_blk);
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ /* handle data in 32-bit chunks */
+ while (num_bytes > 0) {
+ int bytes;
+
+ tmpdout = 0;
+ bytes = (num_bytes > 4) ? 4 : num_bytes;
+
+ if (dout != NULL) {
+ memcpy((void *)&tmpdout, (void *)dout, bytes);
+ dout += bytes;
+ num_bytes -= bytes;
+ writel(tmpdout, &regs->tx_fifo);
+ setbits_le32(&regs->command1, QSPI_CMD1_TX_EN);
+ }
+
+ if (din != NULL)
+ setbits_le32(&regs->command1, QSPI_CMD1_RX_EN);
+
+ /* clear ready bit */
+ setbits_le32(&regs->xfer_status, QSPI_XFER_STS_RDY);
+
+ clrsetbits_le32(&regs->command1,
+ QSPI_CMD1_BITLEN_MASK << QSPI_CMD1_BITLEN_SHIFT,
+ (bytes * 8 - 1) << QSPI_CMD1_BITLEN_SHIFT);
+
+ /* Need to stabilize other reg bits before GO bit set.
+ * As per the TRM:
+ * "For successful operation at various freq combinations,
+ * a minimum of 4-5 spi_clk cycle delay might be required
+ * before enabling the PIO or DMA bits. The worst case delay
+ * calculation can be done considering slowest qspi_clk as
+ * 1MHz. Based on that 1us delay should be enough before
+ * enabling PIO or DMA." Padded another 1us for safety.
+ */
+ udelay(2);
+ setbits_le32(&regs->command1, QSPI_CMD1_GO);
+ udelay(1);
+
+ /*
+ * Wait for SPI transmit FIFO to empty, or to time out.
+ * The RX FIFO status will be read and cleared last
+ */
+ for (tm = 0; tm < QSPI_TIMEOUT; ++tm) {
+ u32 fifo_status, xfer_status;
+
+ xfer_status = readl(&regs->xfer_status);
+ if (!(xfer_status & QSPI_XFER_STS_RDY))
+ continue;
+
+ fifo_status = readl(&regs->fifo_status);
+ if (fifo_status & QSPI_FIFO_STS_ERR) {
+ debug("%s: got a fifo error: ", __func__);
+ if (fifo_status & QSPI_FIFO_STS_TX_FIFO_OVF)
+ debug("tx FIFO overflow ");
+ if (fifo_status & QSPI_FIFO_STS_TX_FIFO_UNR)
+ debug("tx FIFO underrun ");
+ if (fifo_status & QSPI_FIFO_STS_RX_FIFO_OVF)
+ debug("rx FIFO overflow ");
+ if (fifo_status & QSPI_FIFO_STS_RX_FIFO_UNR)
+ debug("rx FIFO underrun ");
+ if (fifo_status & QSPI_FIFO_STS_TX_FIFO_FULL)
+ debug("tx FIFO full ");
+ if (fifo_status & QSPI_FIFO_STS_TX_FIFO_EMPTY)
+ debug("tx FIFO empty ");
+ if (fifo_status & QSPI_FIFO_STS_RX_FIFO_FULL)
+ debug("rx FIFO full ");
+ if (fifo_status & QSPI_FIFO_STS_RX_FIFO_EMPTY)
+ debug("rx FIFO empty ");
+ debug("\n");
+ break;
+ }
+
+ if (!(fifo_status & QSPI_FIFO_STS_RX_FIFO_EMPTY)) {
+ tmpdin = readl(&regs->rx_fifo);
+ if (din != NULL) {
+ memcpy(din, &tmpdin, bytes);
+ din += bytes;
+ num_bytes -= bytes;
+ }
+ }
+ break;
+ }
+
+ if (tm >= QSPI_TIMEOUT)
+ ret = tm;
+
+ /* clear ACK RDY, etc. bits */
+ writel(readl(&regs->fifo_status), &regs->fifo_status);
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ debug("%s: transfer ended. Value=%08x, fifo_status = %08x\n",
+ __func__, tmpdin, readl(&regs->fifo_status));
+
+ if (ret) {
+ printf("%s: timeout during SPI transfer, tm %d\n",
+ __func__, ret);
+ return -1;
+ }
+
+ return ret;
+}
+
+static int tegra210_qspi_set_speed(struct udevice *bus, uint speed)
+{
+ struct tegra_spi_plat *plat = dev_get_plat(bus);
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+ priv->freq = speed;
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int tegra210_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct tegra210_qspi_priv *priv = dev_get_priv(bus);
+
+ priv->mode = mode;
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops tegra210_qspi_ops = {
+ .claim_bus = tegra210_qspi_claim_bus,
+ .xfer = tegra210_qspi_xfer,
+ .set_speed = tegra210_qspi_set_speed,
+ .set_mode = tegra210_qspi_set_mode,
+ /*
+ * cs_info is not needed, since we require all chip selects to be
+ * in the device tree explicitly
+ */
+};
+
+static const struct udevice_id tegra210_qspi_ids[] = {
+ { .compatible = "nvidia,tegra210-qspi" },
+ { }
+};
+
+U_BOOT_DRIVER(tegra210_qspi) = {
+ .name = "tegra210-qspi",
+ .id = UCLASS_SPI,
+ .of_match = tegra210_qspi_ids,
+ .ops = &tegra210_qspi_ops,
+ .of_to_plat = tegra210_qspi_of_to_plat,
+ .plat_auto = sizeof(struct tegra_spi_plat),
+ .priv_auto = sizeof(struct tegra210_qspi_priv),
+ .per_child_auto = sizeof(struct spi_slave),
+ .probe = tegra210_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/tegra_spi.h b/roms/u-boot/drivers/spi/tegra_spi.h
new file mode 100644
index 000000000..ab69ea42e
--- /dev/null
+++ b/roms/u-boot/drivers/spi/tegra_spi.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2014 Google, Inc
+ */
+
+struct tegra_spi_plat {
+ enum periph_id periph_id;
+ int frequency; /* Default clock frequency, -1 for none */
+ ulong base;
+ uint deactivate_delay_us; /* Delay to wait after deactivate */
+};
diff --git a/roms/u-boot/drivers/spi/ti_qspi.c b/roms/u-boot/drivers/spi/ti_qspi.c
new file mode 100644
index 000000000..c542f40c7
--- /dev/null
+++ b/roms/u-boot/drivers/spi/ti_qspi.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TI QSPI driver
+ *
+ * Copyright (C) 2013, Texas Instruments, Incorporated
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <asm/cache.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <asm/arch/omap.h>
+#include <malloc.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <dm.h>
+#include <asm/gpio.h>
+#include <asm/omap_gpio.h>
+#include <asm/omap_common.h>
+#include <asm/ti-common/ti-edma3.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <regmap.h>
+#include <syscon.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* ti qpsi register bit masks */
+#define QSPI_TIMEOUT 2000000
+#define QSPI_FCLK 192000000
+#define QSPI_DRA7XX_FCLK 76800000
+#define QSPI_WLEN_MAX_BITS 128
+#define QSPI_WLEN_MAX_BYTES (QSPI_WLEN_MAX_BITS >> 3)
+#define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
+/* clock control */
+#define QSPI_CLK_EN BIT(31)
+#define QSPI_CLK_DIV_MAX 0xffff
+/* command */
+#define QSPI_EN_CS(n) (n << 28)
+#define QSPI_WLEN(n) ((n-1) << 19)
+#define QSPI_3_PIN BIT(18)
+#define QSPI_RD_SNGL BIT(16)
+#define QSPI_WR_SNGL (2 << 16)
+#define QSPI_INVAL (4 << 16)
+#define QSPI_RD_QUAD (7 << 16)
+/* device control */
+#define QSPI_CKPHA(n) (1 << (2 + n*8))
+#define QSPI_CSPOL(n) (1 << (1 + n*8))
+#define QSPI_CKPOL(n) (1 << (n*8))
+/* status */
+#define QSPI_WC BIT(1)
+#define QSPI_BUSY BIT(0)
+#define QSPI_WC_BUSY (QSPI_WC | QSPI_BUSY)
+#define QSPI_XFER_DONE QSPI_WC
+#define MM_SWITCH 0x01
+#define MEM_CS(cs) ((cs + 1) << 8)
+#define MEM_CS_UNSELECT 0xfffff8ff
+
+#define QSPI_SETUP0_READ_NORMAL (0x0 << 12)
+#define QSPI_SETUP0_READ_DUAL (0x1 << 12)
+#define QSPI_SETUP0_READ_QUAD (0x3 << 12)
+#define QSPI_SETUP0_ADDR_SHIFT (8)
+#define QSPI_SETUP0_DBITS_SHIFT (10)
+
+#define TI_QSPI_SETUP_REG(priv, cs) (&(priv)->base->setup0 + (cs))
+
+/* ti qspi register set */
+struct ti_qspi_regs {
+ u32 pid;
+ u32 pad0[3];
+ u32 sysconfig;
+ u32 pad1[3];
+ u32 int_stat_raw;
+ u32 int_stat_en;
+ u32 int_en_set;
+ u32 int_en_ctlr;
+ u32 intc_eoi;
+ u32 pad2[3];
+ u32 clk_ctrl;
+ u32 dc;
+ u32 cmd;
+ u32 status;
+ u32 data;
+ u32 setup0;
+ u32 setup1;
+ u32 setup2;
+ u32 setup3;
+ u32 memswitch;
+ u32 data1;
+ u32 data2;
+ u32 data3;
+};
+
+/* ti qspi priv */
+struct ti_qspi_priv {
+ void *memory_map;
+ size_t mmap_size;
+ uint max_hz;
+ u32 num_cs;
+ struct ti_qspi_regs *base;
+ void *ctrl_mod_mmap;
+ ulong fclk;
+ unsigned int mode;
+ u32 cmd;
+ u32 dc;
+};
+
+static int ti_qspi_set_speed(struct udevice *bus, uint hz)
+{
+ struct ti_qspi_priv *priv = dev_get_priv(bus);
+ uint clk_div;
+
+ if (!hz)
+ clk_div = 0;
+ else
+ clk_div = DIV_ROUND_UP(priv->fclk, hz) - 1;
+
+ /* truncate clk_div value to QSPI_CLK_DIV_MAX */
+ if (clk_div > QSPI_CLK_DIV_MAX)
+ clk_div = QSPI_CLK_DIV_MAX;
+
+ debug("ti_spi_set_speed: hz: %d, clock divider %d\n", hz, clk_div);
+
+ /* disable SCLK */
+ writel(readl(&priv->base->clk_ctrl) & ~QSPI_CLK_EN,
+ &priv->base->clk_ctrl);
+ /* enable SCLK and program the clk divider */
+ writel(QSPI_CLK_EN | clk_div, &priv->base->clk_ctrl);
+
+ return 0;
+}
+
+static void ti_qspi_cs_deactivate(struct ti_qspi_priv *priv)
+{
+ writel(priv->cmd | QSPI_INVAL, &priv->base->cmd);
+ /* dummy readl to ensure bus sync */
+ readl(&priv->base->cmd);
+}
+
+static void ti_qspi_ctrl_mode_mmap(void *ctrl_mod_mmap, int cs, bool enable)
+{
+ u32 val;
+
+ val = readl(ctrl_mod_mmap);
+ if (enable)
+ val |= MEM_CS(cs);
+ else
+ val &= MEM_CS_UNSELECT;
+ writel(val, ctrl_mod_mmap);
+}
+
+static int ti_qspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct dm_spi_slave_plat *slave = dev_get_parent_plat(dev);
+ struct ti_qspi_priv *priv;
+ struct udevice *bus;
+ uint words = bitlen >> 3; /* fixed 8-bit word length */
+ const uchar *txp = dout;
+ uchar *rxp = din;
+ uint status;
+ int timeout;
+ unsigned int cs = slave->cs;
+
+ bus = dev->parent;
+ priv = dev_get_priv(bus);
+
+ if (cs > priv->num_cs) {
+ debug("invalid qspi chip select\n");
+ return -EINVAL;
+ }
+
+ if (bitlen == 0)
+ return -1;
+
+ if (bitlen % 8) {
+ debug("spi_xfer: Non byte aligned SPI transfer\n");
+ return -1;
+ }
+
+ /* Setup command reg */
+ priv->cmd = 0;
+ priv->cmd |= QSPI_WLEN(8);
+ priv->cmd |= QSPI_EN_CS(cs);
+ if (priv->mode & SPI_3WIRE)
+ priv->cmd |= QSPI_3_PIN;
+ priv->cmd |= 0xfff;
+
+ while (words) {
+ u8 xfer_len = 0;
+
+ if (txp) {
+ u32 cmd = priv->cmd;
+
+ if (words >= QSPI_WLEN_MAX_BYTES) {
+ u32 *txbuf = (u32 *)txp;
+ u32 data;
+
+ data = cpu_to_be32(*txbuf++);
+ writel(data, &priv->base->data3);
+ data = cpu_to_be32(*txbuf++);
+ writel(data, &priv->base->data2);
+ data = cpu_to_be32(*txbuf++);
+ writel(data, &priv->base->data1);
+ data = cpu_to_be32(*txbuf++);
+ writel(data, &priv->base->data);
+ cmd &= ~QSPI_WLEN_MASK;
+ cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
+ xfer_len = QSPI_WLEN_MAX_BYTES;
+ } else {
+ writeb(*txp, &priv->base->data);
+ xfer_len = 1;
+ }
+ debug("tx cmd %08x dc %08x\n",
+ cmd | QSPI_WR_SNGL, priv->dc);
+ writel(cmd | QSPI_WR_SNGL, &priv->base->cmd);
+ status = readl(&priv->base->status);
+ timeout = QSPI_TIMEOUT;
+ while ((status & QSPI_WC_BUSY) != QSPI_XFER_DONE) {
+ if (--timeout < 0) {
+ printf("spi_xfer: TX timeout!\n");
+ return -1;
+ }
+ status = readl(&priv->base->status);
+ }
+ txp += xfer_len;
+ debug("tx done, status %08x\n", status);
+ }
+ if (rxp) {
+ debug("rx cmd %08x dc %08x\n",
+ ((u32)(priv->cmd | QSPI_RD_SNGL)), priv->dc);
+ writel(priv->cmd | QSPI_RD_SNGL, &priv->base->cmd);
+ status = readl(&priv->base->status);
+ timeout = QSPI_TIMEOUT;
+ while ((status & QSPI_WC_BUSY) != QSPI_XFER_DONE) {
+ if (--timeout < 0) {
+ printf("spi_xfer: RX timeout!\n");
+ return -1;
+ }
+ status = readl(&priv->base->status);
+ }
+ *rxp++ = readl(&priv->base->data);
+ xfer_len = 1;
+ debug("rx done, status %08x, read %02x\n",
+ status, *(rxp-1));
+ }
+ words -= xfer_len;
+ }
+
+ /* Terminate frame */
+ if (flags & SPI_XFER_END)
+ ti_qspi_cs_deactivate(priv);
+
+ return 0;
+}
+
+/* TODO: control from sf layer to here through dm-spi */
+static void ti_qspi_copy_mmap(void *data, void *offset, size_t len)
+{
+#if defined(CONFIG_TI_EDMA3) && !defined(CONFIG_DMA)
+ unsigned int addr = (unsigned int) (data);
+ unsigned int edma_slot_num = 1;
+
+ /* Invalidate the area, so no writeback into the RAM races with DMA */
+ invalidate_dcache_range(addr, addr + roundup(len, ARCH_DMA_MINALIGN));
+
+ /* enable edma3 clocks */
+ enable_edma3_clocks();
+
+ /* Call edma3 api to do actual DMA transfer */
+ edma3_transfer(EDMA3_BASE, edma_slot_num, data, offset, len);
+
+ /* disable edma3 clocks */
+ disable_edma3_clocks();
+#else
+ memcpy_fromio(data, offset, len);
+#endif
+
+ *((unsigned int *)offset) += len;
+}
+
+static void ti_qspi_setup_mmap_read(struct ti_qspi_priv *priv, int cs,
+ u8 opcode, u8 data_nbits, u8 addr_width,
+ u8 dummy_bytes)
+{
+ u32 memval = opcode;
+
+ switch (data_nbits) {
+ case 4:
+ memval |= QSPI_SETUP0_READ_QUAD;
+ break;
+ case 2:
+ memval |= QSPI_SETUP0_READ_DUAL;
+ break;
+ default:
+ memval |= QSPI_SETUP0_READ_NORMAL;
+ break;
+ }
+
+ memval |= ((addr_width - 1) << QSPI_SETUP0_ADDR_SHIFT |
+ dummy_bytes << QSPI_SETUP0_DBITS_SHIFT);
+
+ writel(memval, TI_QSPI_SETUP_REG(priv, cs));
+}
+
+static int ti_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct ti_qspi_priv *priv = dev_get_priv(bus);
+
+ priv->dc = 0;
+ if (mode & SPI_CPHA)
+ priv->dc |= QSPI_CKPHA(0);
+ if (mode & SPI_CPOL)
+ priv->dc |= QSPI_CKPOL(0);
+ if (mode & SPI_CS_HIGH)
+ priv->dc |= QSPI_CSPOL(0);
+
+ return 0;
+}
+
+static int ti_qspi_exec_mem_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct dm_spi_slave_plat *slave_plat;
+ struct ti_qspi_priv *priv;
+ struct udevice *bus;
+ u32 from = 0;
+ int ret = 0;
+
+ bus = slave->dev->parent;
+ priv = dev_get_priv(bus);
+ slave_plat = dev_get_parent_plat(slave->dev);
+
+ /* Only optimize read path. */
+ if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
+ !op->addr.nbytes || op->addr.nbytes > 4)
+ return -ENOTSUPP;
+
+ /* Address exceeds MMIO window size, fall back to regular mode. */
+ from = op->addr.val;
+ if (from + op->data.nbytes > priv->mmap_size)
+ return -ENOTSUPP;
+
+ ti_qspi_setup_mmap_read(priv, slave_plat->cs, op->cmd.opcode,
+ op->data.buswidth, op->addr.nbytes,
+ op->dummy.nbytes);
+
+ ti_qspi_copy_mmap((void *)op->data.buf.in,
+ (void *)priv->memory_map + from, op->data.nbytes);
+
+ return ret;
+}
+
+static int ti_qspi_claim_bus(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ struct ti_qspi_priv *priv;
+ struct udevice *bus;
+
+ bus = dev->parent;
+ priv = dev_get_priv(bus);
+
+ if (slave_plat->cs > priv->num_cs) {
+ debug("invalid qspi chip select\n");
+ return -EINVAL;
+ }
+
+ writel(MM_SWITCH, &priv->base->memswitch);
+ if (priv->ctrl_mod_mmap)
+ ti_qspi_ctrl_mode_mmap(priv->ctrl_mod_mmap,
+ slave_plat->cs, true);
+
+ writel(priv->dc, &priv->base->dc);
+ writel(0, &priv->base->cmd);
+ writel(0, &priv->base->data);
+
+ priv->dc <<= slave_plat->cs * 8;
+ writel(priv->dc, &priv->base->dc);
+
+ return 0;
+}
+
+static int ti_qspi_release_bus(struct udevice *dev)
+{
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ struct ti_qspi_priv *priv;
+ struct udevice *bus;
+
+ bus = dev->parent;
+ priv = dev_get_priv(bus);
+
+ writel(~MM_SWITCH, &priv->base->memswitch);
+ if (priv->ctrl_mod_mmap)
+ ti_qspi_ctrl_mode_mmap(priv->ctrl_mod_mmap,
+ slave_plat->cs, false);
+
+ writel(0, &priv->base->dc);
+ writel(0, &priv->base->cmd);
+ writel(0, &priv->base->data);
+ writel(0, TI_QSPI_SETUP_REG(priv, slave_plat->cs));
+
+ return 0;
+}
+
+static int ti_qspi_probe(struct udevice *bus)
+{
+ struct ti_qspi_priv *priv = dev_get_priv(bus);
+
+ priv->fclk = dev_get_driver_data(bus);
+
+ return 0;
+}
+
+static void *map_syscon_chipselects(struct udevice *bus)
+{
+#if CONFIG_IS_ENABLED(SYSCON)
+ struct udevice *syscon;
+ struct regmap *regmap;
+ const fdt32_t *cell;
+ int len, err;
+
+ err = uclass_get_device_by_phandle(UCLASS_SYSCON, bus,
+ "syscon-chipselects", &syscon);
+ if (err) {
+ debug("%s: unable to find syscon device (%d)\n", __func__,
+ err);
+ return NULL;
+ }
+
+ regmap = syscon_get_regmap(syscon);
+ if (IS_ERR(regmap)) {
+ debug("%s: unable to find regmap (%ld)\n", __func__,
+ PTR_ERR(regmap));
+ return NULL;
+ }
+
+ cell = fdt_getprop(gd->fdt_blob, dev_of_offset(bus),
+ "syscon-chipselects", &len);
+ if (len < 2*sizeof(fdt32_t)) {
+ debug("%s: offset not available\n", __func__);
+ return NULL;
+ }
+
+ return fdtdec_get_number(cell + 1, 1) + regmap_get_range(regmap, 0);
+#else
+ fdt_addr_t addr;
+ addr = devfdt_get_addr_index(bus, 2);
+ return (addr == FDT_ADDR_T_NONE) ? NULL :
+ map_physmem(addr, 0, MAP_NOCACHE);
+#endif
+}
+
+static int ti_qspi_of_to_plat(struct udevice *bus)
+{
+ struct ti_qspi_priv *priv = dev_get_priv(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+ fdt_addr_t mmap_addr;
+ fdt_addr_t mmap_size;
+
+ priv->ctrl_mod_mmap = map_syscon_chipselects(bus);
+ priv->base = map_physmem(dev_read_addr(bus),
+ sizeof(struct ti_qspi_regs), MAP_NOCACHE);
+ mmap_addr = devfdt_get_addr_size_index(bus, 1, &mmap_size);
+ priv->memory_map = map_physmem(mmap_addr, mmap_size, MAP_NOCACHE);
+ priv->mmap_size = mmap_size;
+
+ priv->max_hz = dev_read_u32_default(bus, "spi-max-frequency", 0);
+ if (!priv->max_hz) {
+ debug("Error: Max frequency missing\n");
+ return -ENODEV;
+ }
+ priv->num_cs = fdtdec_get_int(blob, node, "num-cs", 4);
+
+ debug("%s: regs=<0x%x>, max-frequency=%d\n", __func__,
+ (int)priv->base, priv->max_hz);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
+ .exec_op = ti_qspi_exec_mem_op,
+};
+
+static const struct dm_spi_ops ti_qspi_ops = {
+ .claim_bus = ti_qspi_claim_bus,
+ .release_bus = ti_qspi_release_bus,
+ .xfer = ti_qspi_xfer,
+ .set_speed = ti_qspi_set_speed,
+ .set_mode = ti_qspi_set_mode,
+ .mem_ops = &ti_qspi_mem_ops,
+};
+
+static const struct udevice_id ti_qspi_ids[] = {
+ { .compatible = "ti,dra7xxx-qspi", .data = QSPI_DRA7XX_FCLK},
+ { .compatible = "ti,am4372-qspi", .data = QSPI_FCLK},
+ { }
+};
+
+U_BOOT_DRIVER(ti_qspi) = {
+ .name = "ti_qspi",
+ .id = UCLASS_SPI,
+ .of_match = ti_qspi_ids,
+ .ops = &ti_qspi_ops,
+ .of_to_plat = ti_qspi_of_to_plat,
+ .priv_auto = sizeof(struct ti_qspi_priv),
+ .probe = ti_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/uniphier_spi.c b/roms/u-boot/drivers/spi/uniphier_spi.c
new file mode 100644
index 000000000..fcc1bfe64
--- /dev/null
+++ b/roms/u-boot/drivers/spi/uniphier_spi.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * uniphier_spi.c - Socionext UniPhier SPI driver
+ * Copyright 2019 Socionext, Inc.
+ */
+
+#include <clk.h>
+#include <common.h>
+#include <dm.h>
+#include <log.h>
+#include <time.h>
+#include <asm/global_data.h>
+#include <dm/device_compat.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <spi.h>
+#include <wait_bit.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#define SSI_CTL 0x00
+#define SSI_CTL_EN BIT(0)
+
+#define SSI_CKS 0x04
+#define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
+#define SSI_CKS_CKPHS BIT(14)
+#define SSI_CKS_CKINIT BIT(13)
+#define SSI_CKS_CKDLY BIT(12)
+
+#define SSI_TXWDS 0x08
+#define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
+#define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
+#define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_RXWDS 0x0c
+#define SSI_RXWDS_RDTF_MASK GENMASK(7, 6)
+#define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
+
+#define SSI_FPS 0x10
+#define SSI_FPS_FSPOL BIT(15)
+#define SSI_FPS_FSTRT BIT(14)
+
+#define SSI_SR 0x14
+#define SSI_SR_BUSY BIT(7)
+#define SSI_SR_TNF BIT(5)
+#define SSI_SR_RNE BIT(0)
+
+#define SSI_IE 0x18
+
+#define SSI_IC 0x1c
+#define SSI_IC_TCIC BIT(4)
+#define SSI_IC_RCIC BIT(3)
+#define SSI_IC_RORIC BIT(0)
+
+#define SSI_FC 0x20
+#define SSI_FC_TXFFL BIT(12)
+#define SSI_FC_TXFTH_MASK GENMASK(11, 8)
+#define SSI_FC_RXFFL BIT(4)
+#define SSI_FC_RXFTH_MASK GENMASK(3, 0)
+
+#define SSI_XDR 0x24 /* TXDR for write, RXDR for read */
+
+#define SSI_FIFO_DEPTH 8U
+
+#define SSI_REG_TIMEOUT (CONFIG_SYS_HZ / 100) /* 10 ms */
+#define SSI_XFER_TIMEOUT (CONFIG_SYS_HZ) /* 1 sec */
+
+#define SSI_CLK 50000000 /* internal I/O clock: 50MHz */
+
+struct uniphier_spi_plat {
+ void __iomem *base;
+ u32 frequency; /* input frequency */
+ u32 speed_hz;
+ uint deactivate_delay_us; /* Delay to wait after deactivate */
+ uint activate_delay_us; /* Delay to wait after activate */
+};
+
+struct uniphier_spi_priv {
+ void __iomem *base;
+ u8 mode;
+ u8 fifo_depth;
+ u8 bits_per_word;
+ ulong last_transaction_us; /* Time of last transaction end */
+};
+
+static void uniphier_spi_enable(struct uniphier_spi_priv *priv, int enable)
+{
+ u32 val;
+
+ val = readl(priv->base + SSI_CTL);
+ if (enable)
+ val |= SSI_CTL_EN;
+ else
+ val &= ~SSI_CTL_EN;
+ writel(val, priv->base + SSI_CTL);
+}
+
+static void uniphier_spi_regdump(struct uniphier_spi_priv *priv)
+{
+ pr_debug("CTL %08x\n", readl(priv->base + SSI_CTL));
+ pr_debug("CKS %08x\n", readl(priv->base + SSI_CKS));
+ pr_debug("TXWDS %08x\n", readl(priv->base + SSI_TXWDS));
+ pr_debug("RXWDS %08x\n", readl(priv->base + SSI_RXWDS));
+ pr_debug("FPS %08x\n", readl(priv->base + SSI_FPS));
+ pr_debug("SR %08x\n", readl(priv->base + SSI_SR));
+ pr_debug("IE %08x\n", readl(priv->base + SSI_IE));
+ pr_debug("IC %08x\n", readl(priv->base + SSI_IC));
+ pr_debug("FC %08x\n", readl(priv->base + SSI_FC));
+ pr_debug("XDR %08x\n", readl(priv->base + SSI_XDR));
+}
+
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_plat *plat = dev_get_plat(bus);
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ ulong delay_us; /* The delay completed so far */
+ u32 val;
+
+ /* If it's too soon to do another transaction, wait */
+ if (plat->deactivate_delay_us && priv->last_transaction_us) {
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < plat->deactivate_delay_us)
+ udelay(plat->deactivate_delay_us - delay_us);
+ }
+
+ val = readl(priv->base + SSI_FPS);
+ if (priv->mode & SPI_CS_HIGH)
+ val |= SSI_FPS_FSPOL;
+ else
+ val &= ~SSI_FPS_FSPOL;
+ writel(val, priv->base + SSI_FPS);
+
+ if (plat->activate_delay_us)
+ udelay(plat->activate_delay_us);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_plat *plat = dev_get_plat(bus);
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val;
+
+ val = readl(priv->base + SSI_FPS);
+ if (priv->mode & SPI_CS_HIGH)
+ val &= ~SSI_FPS_FSPOL;
+ else
+ val |= SSI_FPS_FSPOL;
+ writel(val, priv->base + SSI_FPS);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (plat->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+}
+
+static int uniphier_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val, size;
+
+ uniphier_spi_enable(priv, false);
+
+ /* disable interrupts */
+ writel(0, priv->base + SSI_IE);
+
+ /* bits_per_word */
+ size = priv->bits_per_word;
+ val = readl(priv->base + SSI_TXWDS);
+ val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
+ val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
+ val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_TXWDS);
+
+ val = readl(priv->base + SSI_RXWDS);
+ val &= ~SSI_RXWDS_DTLEN_MASK;
+ val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
+ writel(val, priv->base + SSI_RXWDS);
+
+ /* reset FIFOs */
+ val = SSI_FC_TXFFL | SSI_FC_RXFFL;
+ writel(val, priv->base + SSI_FC);
+
+ /* FIFO threthold */
+ val = readl(priv->base + SSI_FC);
+ val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
+ val |= FIELD_PREP(SSI_FC_TXFTH_MASK, priv->fifo_depth);
+ val |= FIELD_PREP(SSI_FC_RXFTH_MASK, priv->fifo_depth);
+ writel(val, priv->base + SSI_FC);
+
+ /* clear interrupts */
+ writel(SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC,
+ priv->base + SSI_IC);
+
+ uniphier_spi_enable(priv, true);
+
+ return 0;
+}
+
+static int uniphier_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+
+ uniphier_spi_enable(priv, false);
+
+ return 0;
+}
+
+static int uniphier_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ const u8 *tx_buf = dout;
+ u8 *rx_buf = din, buf;
+ u32 len = bitlen / 8;
+ u32 tx_len, rx_len;
+ u32 ts, status;
+ int ret = 0;
+
+ if (bitlen % 8) {
+ dev_err(dev, "Non byte aligned SPI transfer\n");
+ return -EINVAL;
+ }
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ uniphier_spi_enable(priv, true);
+
+ ts = get_timer(0);
+ tx_len = len;
+ rx_len = len;
+
+ uniphier_spi_regdump(priv);
+
+ while (tx_len || rx_len) {
+ ret = wait_for_bit_le32(priv->base + SSI_SR, SSI_SR_BUSY, false,
+ SSI_REG_TIMEOUT * 1000, false);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "access timeout\n");
+ break;
+ }
+
+ status = readl(priv->base + SSI_SR);
+ /* write the data into TX */
+ if (tx_len && (status & SSI_SR_TNF)) {
+ buf = tx_buf ? *tx_buf++ : 0;
+ writel(buf, priv->base + SSI_XDR);
+ tx_len--;
+ }
+
+ /* read the data from RX */
+ if (rx_len && (status & SSI_SR_RNE)) {
+ buf = readl(priv->base + SSI_XDR);
+ if (rx_buf)
+ *rx_buf++ = buf;
+ rx_len--;
+ }
+
+ if (get_timer(ts) >= SSI_XFER_TIMEOUT) {
+ dev_err(dev, "transfer timeout\n");
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ uniphier_spi_enable(priv, false);
+
+ return ret;
+}
+
+static int uniphier_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct uniphier_spi_plat *plat = dev_get_plat(bus);
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val, ckdiv;
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+
+ /* baudrate */
+ ckdiv = DIV_ROUND_UP(SSI_CLK, speed);
+ ckdiv = round_up(ckdiv, 2);
+
+ val = readl(priv->base + SSI_CKS);
+ val &= ~SSI_CKS_CKRAT_MASK;
+ val |= ckdiv & SSI_CKS_CKRAT_MASK;
+ writel(val, priv->base + SSI_CKS);
+
+ return 0;
+}
+
+static int uniphier_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+ u32 val1, val2;
+
+ /*
+ * clock setting
+ * CKPHS capture timing. 0:rising edge, 1:falling edge
+ * CKINIT clock initial level. 0:low, 1:high
+ * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
+ * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
+ *
+ * frame setting
+ * FSPOL frame signal porarity. 0: low, 1: high
+ * FSTRT start frame timing
+ * 0: rising edge of clock, 1: falling edge of clock
+ */
+ val1 = readl(priv->base + SSI_CKS);
+ val2 = readl(priv->base + SSI_FPS);
+
+ switch (mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
+ val1 |= SSI_CKS_CKPHS | SSI_CKS_CKDLY;
+ val1 &= ~SSI_CKS_CKINIT;
+ val2 &= ~SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_1:
+ /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
+ val1 &= ~(SSI_CKS_CKPHS | SSI_CKS_CKINIT | SSI_CKS_CKDLY);
+ val2 |= SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_2:
+ /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
+ val1 |= SSI_CKS_CKINIT | SSI_CKS_CKDLY;
+ val1 &= ~SSI_CKS_CKPHS;
+ val2 |= SSI_FPS_FSTRT;
+ break;
+ case SPI_MODE_3:
+ /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
+ val1 |= SSI_CKS_CKPHS | SSI_CKS_CKINIT;
+ val1 &= ~SSI_CKS_CKDLY;
+ val2 &= ~SSI_FPS_FSTRT;
+ break;
+ }
+
+ writel(val1, priv->base + SSI_CKS);
+ writel(val2, priv->base + SSI_FPS);
+
+ /* format */
+ val1 = readl(priv->base + SSI_TXWDS);
+ val2 = readl(priv->base + SSI_RXWDS);
+ if (mode & SPI_LSB_FIRST) {
+ val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
+ val2 |= FIELD_PREP(SSI_RXWDS_RDTF_MASK, 1);
+ }
+ writel(val1, priv->base + SSI_TXWDS);
+ writel(val2, priv->base + SSI_RXWDS);
+
+ priv->mode = mode;
+
+ return 0;
+}
+
+static int uniphier_spi_of_to_plat(struct udevice *bus)
+{
+ struct uniphier_spi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->base = dev_read_addr_ptr(bus);
+
+ plat->frequency =
+ fdtdec_get_int(blob, node, "spi-max-frequency", 12500000);
+ plat->deactivate_delay_us =
+ fdtdec_get_int(blob, node, "spi-deactivate-delay", 0);
+ plat->activate_delay_us =
+ fdtdec_get_int(blob, node, "spi-activate-delay", 0);
+ plat->speed_hz = plat->frequency / 2;
+
+ return 0;
+}
+
+static int uniphier_spi_probe(struct udevice *bus)
+{
+ struct uniphier_spi_plat *plat = dev_get_plat(bus);
+ struct uniphier_spi_priv *priv = dev_get_priv(bus);
+
+ priv->base = plat->base;
+ priv->fifo_depth = SSI_FIFO_DEPTH;
+ priv->bits_per_word = 8;
+
+ return 0;
+}
+
+static const struct dm_spi_ops uniphier_spi_ops = {
+ .claim_bus = uniphier_spi_claim_bus,
+ .release_bus = uniphier_spi_release_bus,
+ .xfer = uniphier_spi_xfer,
+ .set_speed = uniphier_spi_set_speed,
+ .set_mode = uniphier_spi_set_mode,
+};
+
+static const struct udevice_id uniphier_spi_ids[] = {
+ { .compatible = "socionext,uniphier-scssi" },
+ { /* Sentinel */ }
+};
+
+U_BOOT_DRIVER(uniphier_spi) = {
+ .name = "uniphier_spi",
+ .id = UCLASS_SPI,
+ .of_match = uniphier_spi_ids,
+ .ops = &uniphier_spi_ops,
+ .of_to_plat = uniphier_spi_of_to_plat,
+ .plat_auto = sizeof(struct uniphier_spi_plat),
+ .priv_auto = sizeof(struct uniphier_spi_priv),
+ .probe = uniphier_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/xilinx_spi.c b/roms/u-boot/drivers/spi/xilinx_spi.c
new file mode 100644
index 000000000..b892cdae9
--- /dev/null
+++ b/roms/u-boot/drivers/spi/xilinx_spi.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Xilinx SPI driver
+ *
+ * Supports 8 bit SPI transfers only, with or w/o FIFO
+ *
+ * Based on bfin_spi.c, by way of altera_spi.c
+ * Copyright (c) 2015 Jagan Teki <jteki@openedev.com>
+ * Copyright (c) 2012 Stephan Linz <linz@li-pro.net>
+ * Copyright (c) 2010 Graeme Smecher <graeme.smecher@mail.mcgill.ca>
+ * Copyright (c) 2010 Thomas Chou <thomas@wytron.com.tw>
+ * Copyright (c) 2005-2008 Analog Devices Inc.
+ */
+
+#include <config.h>
+#include <common.h>
+#include <dm.h>
+#include <errno.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/io.h>
+#include <wait_bit.h>
+#include <linux/bitops.h>
+
+/*
+ * [0]: http://www.xilinx.com/support/documentation
+ *
+ * Xilinx SPI Register Definitions
+ * [1]: [0]/ip_documentation/xps_spi.pdf
+ * page 8, Register Descriptions
+ * [2]: [0]/ip_documentation/axi_spi_ds742.pdf
+ * page 7, Register Overview Table
+ */
+
+/* SPI Control Register (spicr), [1] p9, [2] p8 */
+#define SPICR_LSB_FIRST BIT(9)
+#define SPICR_MASTER_INHIBIT BIT(8)
+#define SPICR_MANUAL_SS BIT(7)
+#define SPICR_RXFIFO_RESEST BIT(6)
+#define SPICR_TXFIFO_RESEST BIT(5)
+#define SPICR_CPHA BIT(4)
+#define SPICR_CPOL BIT(3)
+#define SPICR_MASTER_MODE BIT(2)
+#define SPICR_SPE BIT(1)
+#define SPICR_LOOP BIT(0)
+
+/* SPI Status Register (spisr), [1] p11, [2] p10 */
+#define SPISR_SLAVE_MODE_SELECT BIT(5)
+#define SPISR_MODF BIT(4)
+#define SPISR_TX_FULL BIT(3)
+#define SPISR_TX_EMPTY BIT(2)
+#define SPISR_RX_FULL BIT(1)
+#define SPISR_RX_EMPTY BIT(0)
+
+/* SPI Data Transmit Register (spidtr), [1] p12, [2] p12 */
+#define SPIDTR_8BIT_MASK GENMASK(7, 0)
+#define SPIDTR_16BIT_MASK GENMASK(15, 0)
+#define SPIDTR_32BIT_MASK GENMASK(31, 0)
+
+/* SPI Data Receive Register (spidrr), [1] p12, [2] p12 */
+#define SPIDRR_8BIT_MASK GENMASK(7, 0)
+#define SPIDRR_16BIT_MASK GENMASK(15, 0)
+#define SPIDRR_32BIT_MASK GENMASK(31, 0)
+
+/* SPI Slave Select Register (spissr), [1] p13, [2] p13 */
+#define SPISSR_MASK(cs) (1 << (cs))
+#define SPISSR_ACT(cs) ~SPISSR_MASK(cs)
+#define SPISSR_OFF ~0UL
+
+/* SPI Software Reset Register (ssr) */
+#define SPISSR_RESET_VALUE 0x0a
+
+#define XILSPI_MAX_XFER_BITS 8
+#define XILSPI_SPICR_DFLT_ON (SPICR_MANUAL_SS | SPICR_MASTER_MODE | \
+ SPICR_SPE)
+#define XILSPI_SPICR_DFLT_OFF (SPICR_MASTER_INHIBIT | SPICR_MANUAL_SS)
+
+#define XILINX_SPI_IDLE_VAL GENMASK(7, 0)
+
+#define XILINX_SPISR_TIMEOUT 10000 /* in milliseconds */
+
+/* xilinx spi register set */
+struct xilinx_spi_regs {
+ u32 __space0__[7];
+ u32 dgier; /* Device Global Interrupt Enable Register (DGIER) */
+ u32 ipisr; /* IP Interrupt Status Register (IPISR) */
+ u32 __space1__;
+ u32 ipier; /* IP Interrupt Enable Register (IPIER) */
+ u32 __space2__[5];
+ u32 srr; /* Softare Reset Register (SRR) */
+ u32 __space3__[7];
+ u32 spicr; /* SPI Control Register (SPICR) */
+ u32 spisr; /* SPI Status Register (SPISR) */
+ u32 spidtr; /* SPI Data Transmit Register (SPIDTR) */
+ u32 spidrr; /* SPI Data Receive Register (SPIDRR) */
+ u32 spissr; /* SPI Slave Select Register (SPISSR) */
+ u32 spitfor; /* SPI Transmit FIFO Occupancy Register (SPITFOR) */
+ u32 spirfor; /* SPI Receive FIFO Occupancy Register (SPIRFOR) */
+};
+
+/* xilinx spi priv */
+struct xilinx_spi_priv {
+ struct xilinx_spi_regs *regs;
+ unsigned int freq;
+ unsigned int mode;
+ unsigned int fifo_depth;
+ u8 startup;
+};
+
+static int xilinx_spi_probe(struct udevice *bus)
+{
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+
+ priv->regs = (struct xilinx_spi_regs *)dev_read_addr(bus);
+
+ priv->fifo_depth = dev_read_u32_default(bus, "fifo-size", 0);
+
+ writel(SPISSR_RESET_VALUE, &regs->srr);
+
+ return 0;
+}
+
+static void spi_cs_activate(struct udevice *dev, uint cs)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+
+ writel(SPISSR_ACT(cs), &regs->spissr);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+
+ writel(SPISSR_OFF, &regs->spissr);
+}
+
+static int xilinx_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+
+ writel(SPISSR_OFF, &regs->spissr);
+ writel(XILSPI_SPICR_DFLT_ON, &regs->spicr);
+
+ return 0;
+}
+
+static int xilinx_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+
+ writel(SPISSR_OFF, &regs->spissr);
+ writel(XILSPI_SPICR_DFLT_OFF, &regs->spicr);
+
+ return 0;
+}
+
+static u32 xilinx_spi_fill_txfifo(struct udevice *bus, const u8 *txp,
+ u32 txbytes)
+{
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+ unsigned char d;
+ u32 i = 0;
+
+ while (txbytes && !(readl(&regs->spisr) & SPISR_TX_FULL) &&
+ i < priv->fifo_depth) {
+ d = txp ? *txp++ : XILINX_SPI_IDLE_VAL;
+ debug("spi_xfer: tx:%x ", d);
+ /* write out and wait for processing (receive data) */
+ writel(d & SPIDTR_8BIT_MASK, &regs->spidtr);
+ txbytes--;
+ i++;
+ }
+
+ return i;
+}
+
+static u32 xilinx_spi_read_rxfifo(struct udevice *bus, u8 *rxp, u32 rxbytes)
+{
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+ unsigned char d;
+ unsigned int i = 0;
+
+ while (rxbytes && !(readl(&regs->spisr) & SPISR_RX_EMPTY)) {
+ d = readl(&regs->spidrr) & SPIDRR_8BIT_MASK;
+ if (rxp)
+ *rxp++ = d;
+ debug("spi_xfer: rx:%x\n", d);
+ rxbytes--;
+ i++;
+ }
+ debug("Rx_done\n");
+
+ return i;
+}
+
+static void xilinx_spi_startup_block(struct udevice *dev, unsigned int bytes,
+ const void *dout, void *din)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ const unsigned char *txp = dout;
+ unsigned char *rxp = din;
+ u32 reg;
+ u32 txbytes = bytes;
+ u32 rxbytes = bytes;
+
+ /*
+ * This loop runs two times. First time to send the command.
+ * Second time to transfer data. After transferring data,
+ * it sets txp to the initial value for the normal operation.
+ */
+ for ( ; priv->startup < 2; priv->startup++) {
+ xilinx_spi_fill_txfifo(bus, txp, txbytes);
+ reg = readl(&regs->spicr) & ~SPICR_MASTER_INHIBIT;
+ writel(reg, &regs->spicr);
+ xilinx_spi_read_rxfifo(bus, rxp, rxbytes);
+ txp = din;
+
+ if (priv->startup) {
+ spi_cs_deactivate(dev);
+ spi_cs_activate(dev, slave_plat->cs);
+ txp = dout;
+ }
+ }
+}
+
+static int xilinx_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev_get_parent(dev);
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ /* assume spi core configured to do 8 bit transfers */
+ unsigned int bytes = bitlen / XILSPI_MAX_XFER_BITS;
+ const unsigned char *txp = dout;
+ unsigned char *rxp = din;
+ u32 txbytes = bytes;
+ u32 rxbytes = bytes;
+ u32 reg, count;
+ int ret;
+
+ debug("spi_xfer: bus:%i cs:%i bitlen:%i bytes:%i flags:%lx\n",
+ dev_seq(bus), slave_plat->cs, bitlen, bytes, flags);
+
+ if (bitlen == 0)
+ goto done;
+
+ if (bitlen % XILSPI_MAX_XFER_BITS) {
+ printf("XILSPI warning: Not a multiple of %d bits\n",
+ XILSPI_MAX_XFER_BITS);
+ flags |= SPI_XFER_END;
+ goto done;
+ }
+
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev, slave_plat->cs);
+
+ /*
+ * This is the work around for the startup block issue in
+ * the spi controller. SPI clock is passing through STARTUP
+ * block to FLASH. STARTUP block don't provide clock as soon
+ * as QSPI provides command. So first command fails.
+ */
+ xilinx_spi_startup_block(dev, bytes, dout, din);
+
+ while (txbytes && rxbytes) {
+ count = xilinx_spi_fill_txfifo(bus, txp, txbytes);
+ reg = readl(&regs->spicr) & ~SPICR_MASTER_INHIBIT;
+ writel(reg, &regs->spicr);
+ txbytes -= count;
+ if (txp)
+ txp += count;
+
+ ret = wait_for_bit_le32(&regs->spisr, SPISR_TX_EMPTY, true,
+ XILINX_SPISR_TIMEOUT, false);
+ if (ret < 0) {
+ printf("XILSPI error: Xfer timeout\n");
+ return ret;
+ }
+
+ debug("txbytes:0x%x,txp:0x%p\n", txbytes, txp);
+ count = xilinx_spi_read_rxfifo(bus, rxp, rxbytes);
+ rxbytes -= count;
+ if (rxp)
+ rxp += count;
+ debug("rxbytes:0x%x rxp:0x%p\n", rxbytes, rxp);
+ }
+
+ done:
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ return 0;
+}
+
+static int xilinx_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+
+ priv->freq = speed;
+
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int xilinx_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct xilinx_spi_priv *priv = dev_get_priv(bus);
+ struct xilinx_spi_regs *regs = priv->regs;
+ u32 spicr;
+
+ spicr = readl(&regs->spicr);
+ if (mode & SPI_LSB_FIRST)
+ spicr |= SPICR_LSB_FIRST;
+ if (mode & SPI_CPHA)
+ spicr |= SPICR_CPHA;
+ if (mode & SPI_CPOL)
+ spicr |= SPICR_CPOL;
+ if (mode & SPI_LOOP)
+ spicr |= SPICR_LOOP;
+
+ writel(spicr, &regs->spicr);
+ priv->mode = mode;
+
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops xilinx_spi_ops = {
+ .claim_bus = xilinx_spi_claim_bus,
+ .release_bus = xilinx_spi_release_bus,
+ .xfer = xilinx_spi_xfer,
+ .set_speed = xilinx_spi_set_speed,
+ .set_mode = xilinx_spi_set_mode,
+};
+
+static const struct udevice_id xilinx_spi_ids[] = {
+ { .compatible = "xlnx,xps-spi-2.00.a" },
+ { .compatible = "xlnx,xps-spi-2.00.b" },
+ { }
+};
+
+U_BOOT_DRIVER(xilinx_spi) = {
+ .name = "xilinx_spi",
+ .id = UCLASS_SPI,
+ .of_match = xilinx_spi_ids,
+ .ops = &xilinx_spi_ops,
+ .priv_auto = sizeof(struct xilinx_spi_priv),
+ .probe = xilinx_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/zynq_qspi.c b/roms/u-boot/drivers/spi/zynq_qspi.c
new file mode 100644
index 000000000..cf6da5340
--- /dev/null
+++ b/roms/u-boot/drivers/spi/zynq_qspi.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2013 Xilinx, Inc.
+ * (C) Copyright 2015 Jagan Teki <jteki@openedev.com>
+ *
+ * Xilinx Zynq Quad-SPI(QSPI) controller driver (master mode only)
+ */
+
+#include <clk.h>
+#include <common.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* zynq qspi register bit masks ZYNQ_QSPI_<REG>_<BIT>_MASK */
+#define ZYNQ_QSPI_CR_IFMODE_MASK BIT(31) /* Flash intrface mode*/
+#define ZYNQ_QSPI_CR_MSA_MASK BIT(15) /* Manual start enb */
+#define ZYNQ_QSPI_CR_MCS_MASK BIT(14) /* Manual chip select */
+#define ZYNQ_QSPI_CR_PCS_MASK BIT(10) /* Peri chip select */
+#define ZYNQ_QSPI_CR_FW_MASK GENMASK(7, 6) /* FIFO width */
+#define ZYNQ_QSPI_CR_SS_MASK GENMASK(13, 10) /* Slave Select */
+#define ZYNQ_QSPI_CR_BAUD_MASK GENMASK(5, 3) /* Baud rate div */
+#define ZYNQ_QSPI_CR_CPHA_MASK BIT(2) /* Clock phase */
+#define ZYNQ_QSPI_CR_CPOL_MASK BIT(1) /* Clock polarity */
+#define ZYNQ_QSPI_CR_MSTREN_MASK BIT(0) /* Mode select */
+#define ZYNQ_QSPI_IXR_RXNEMPTY_MASK BIT(4) /* RX_FIFO_not_empty */
+#define ZYNQ_QSPI_IXR_TXOW_MASK BIT(2) /* TX_FIFO_not_full */
+#define ZYNQ_QSPI_IXR_ALL_MASK GENMASK(6, 0) /* All IXR bits */
+#define ZYNQ_QSPI_ENR_SPI_EN_MASK BIT(0) /* SPI Enable */
+#define ZYNQ_QSPI_LQSPICFG_LQMODE_MASK BIT(31) /* Linear QSPI Mode */
+
+/* zynq qspi Transmit Data Register */
+#define ZYNQ_QSPI_TXD_00_00_OFFSET 0x1C /* Transmit 4-byte inst */
+#define ZYNQ_QSPI_TXD_00_01_OFFSET 0x80 /* Transmit 1-byte inst */
+#define ZYNQ_QSPI_TXD_00_10_OFFSET 0x84 /* Transmit 2-byte inst */
+#define ZYNQ_QSPI_TXD_00_11_OFFSET 0x88 /* Transmit 3-byte inst */
+
+#define ZYNQ_QSPI_TXFIFO_THRESHOLD 1 /* Tx FIFO threshold level*/
+#define ZYNQ_QSPI_RXFIFO_THRESHOLD 32 /* Rx FIFO threshold level */
+
+#define ZYNQ_QSPI_CR_BAUD_MAX 8 /* Baud rate divisor max val */
+#define ZYNQ_QSPI_CR_BAUD_SHIFT 3 /* Baud rate divisor shift */
+#define ZYNQ_QSPI_CR_SS_SHIFT 10 /* Slave select shift */
+
+#define ZYNQ_QSPI_FIFO_DEPTH 63
+#define ZYNQ_QSPI_WAIT (CONFIG_SYS_HZ / 100) /* 10 ms */
+
+/* zynq qspi register set */
+struct zynq_qspi_regs {
+ u32 cr; /* 0x00 */
+ u32 isr; /* 0x04 */
+ u32 ier; /* 0x08 */
+ u32 idr; /* 0x0C */
+ u32 imr; /* 0x10 */
+ u32 enr; /* 0x14 */
+ u32 dr; /* 0x18 */
+ u32 txd0r; /* 0x1C */
+ u32 drxr; /* 0x20 */
+ u32 sicr; /* 0x24 */
+ u32 txftr; /* 0x28 */
+ u32 rxftr; /* 0x2C */
+ u32 gpior; /* 0x30 */
+ u32 reserved0[19];
+ u32 txd1r; /* 0x80 */
+ u32 txd2r; /* 0x84 */
+ u32 txd3r; /* 0x88 */
+ u32 reserved1[5];
+ u32 lqspicfg; /* 0xA0 */
+ u32 lqspists; /* 0xA4 */
+};
+
+/* zynq qspi platform data */
+struct zynq_qspi_plat {
+ struct zynq_qspi_regs *regs;
+ u32 frequency; /* input frequency */
+ u32 speed_hz;
+};
+
+/* zynq qspi priv */
+struct zynq_qspi_priv {
+ struct zynq_qspi_regs *regs;
+ u8 cs;
+ u8 mode;
+ u8 fifo_depth;
+ u32 freq; /* required frequency */
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned len;
+ int bytes_to_transfer;
+ int bytes_to_receive;
+ unsigned int is_inst;
+ unsigned cs_change:1;
+};
+
+static int zynq_qspi_of_to_plat(struct udevice *bus)
+{
+ struct zynq_qspi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->regs = (struct zynq_qspi_regs *)fdtdec_get_addr(blob,
+ node, "reg");
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_init_hw - Initialize the hardware
+ * @priv: Pointer to the zynq_qspi_priv structure
+ *
+ * The default settings of the QSPI controller's configurable parameters on
+ * reset are
+ * - Master mode
+ * - Baud rate divisor is set to 2
+ * - Threshold value for TX FIFO not full interrupt is set to 1
+ * - Flash memory interface mode enabled
+ * - Size of the word to be transferred as 8 bit
+ * This function performs the following actions
+ * - Disable and clear all the interrupts
+ * - Enable manual slave select
+ * - Enable auto start
+ * - Deselect all the chip select lines
+ * - Set the size of the word to be transferred as 32 bit
+ * - Set the little endian mode of TX FIFO and
+ * - Enable the QSPI controller
+ */
+static void zynq_qspi_init_hw(struct zynq_qspi_priv *priv)
+{
+ struct zynq_qspi_regs *regs = priv->regs;
+ u32 confr;
+
+ /* Disable QSPI */
+ writel(~ZYNQ_QSPI_ENR_SPI_EN_MASK, &regs->enr);
+
+ /* Disable Interrupts */
+ writel(ZYNQ_QSPI_IXR_ALL_MASK, &regs->idr);
+
+ /* Clear the TX and RX threshold reg */
+ writel(ZYNQ_QSPI_TXFIFO_THRESHOLD, &regs->txftr);
+ writel(ZYNQ_QSPI_RXFIFO_THRESHOLD, &regs->rxftr);
+
+ /* Clear the RX FIFO */
+ while (readl(&regs->isr) & ZYNQ_QSPI_IXR_RXNEMPTY_MASK)
+ readl(&regs->drxr);
+
+ /* Clear Interrupts */
+ writel(ZYNQ_QSPI_IXR_ALL_MASK, &regs->isr);
+
+ /* Manual slave select and Auto start */
+ confr = readl(&regs->cr);
+ confr &= ~ZYNQ_QSPI_CR_MSA_MASK;
+ confr |= ZYNQ_QSPI_CR_IFMODE_MASK | ZYNQ_QSPI_CR_MCS_MASK |
+ ZYNQ_QSPI_CR_PCS_MASK | ZYNQ_QSPI_CR_FW_MASK |
+ ZYNQ_QSPI_CR_MSTREN_MASK;
+ writel(confr, &regs->cr);
+
+ /* Disable the LQSPI feature */
+ confr = readl(&regs->lqspicfg);
+ confr &= ~ZYNQ_QSPI_LQSPICFG_LQMODE_MASK;
+ writel(confr, &regs->lqspicfg);
+
+ /* Enable SPI */
+ writel(ZYNQ_QSPI_ENR_SPI_EN_MASK, &regs->enr);
+}
+
+static int zynq_qspi_probe(struct udevice *bus)
+{
+ struct zynq_qspi_plat *plat = dev_get_plat(bus);
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ struct clk clk;
+ unsigned long clock;
+ int ret;
+
+ priv->regs = plat->regs;
+ priv->fifo_depth = ZYNQ_QSPI_FIFO_DEPTH;
+
+ ret = clk_get_by_name(bus, "ref_clk", &clk);
+ if (ret < 0) {
+ dev_err(bus, "failed to get clock\n");
+ return ret;
+ }
+
+ clock = clk_get_rate(&clk);
+ if (IS_ERR_VALUE(clock)) {
+ dev_err(bus, "failed to get rate\n");
+ return clock;
+ }
+
+ ret = clk_enable(&clk);
+ if (ret) {
+ dev_err(bus, "failed to enable clock\n");
+ return ret;
+ }
+
+ /* init the zynq spi hw */
+ zynq_qspi_init_hw(priv);
+
+ plat->frequency = clock;
+ plat->speed_hz = plat->frequency / 2;
+
+ debug("%s: max-frequency=%d\n", __func__, plat->speed_hz);
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_read_data - Copy data to RX buffer
+ * @priv: Pointer to the zynq_qspi_priv structure
+ * @data: The 32 bit variable where data is stored
+ * @size: Number of bytes to be copied from data to RX buffer
+ */
+static void zynq_qspi_read_data(struct zynq_qspi_priv *priv, u32 data, u8 size)
+{
+ u8 byte3;
+
+ debug("%s: data 0x%04x rx_buf addr: 0x%08x size %d\n", __func__ ,
+ data, (unsigned)(priv->rx_buf), size);
+
+ if (priv->rx_buf) {
+ switch (size) {
+ case 1:
+ *((u8 *)priv->rx_buf) = data;
+ priv->rx_buf += 1;
+ break;
+ case 2:
+ *((u16 *)priv->rx_buf) = data;
+ priv->rx_buf += 2;
+ break;
+ case 3:
+ *((u16 *)priv->rx_buf) = data;
+ priv->rx_buf += 2;
+ byte3 = (u8)(data >> 16);
+ *((u8 *)priv->rx_buf) = byte3;
+ priv->rx_buf += 1;
+ break;
+ case 4:
+ /* Can not assume word aligned buffer */
+ memcpy(priv->rx_buf, &data, size);
+ priv->rx_buf += 4;
+ break;
+ default:
+ /* This will never execute */
+ break;
+ }
+ }
+ priv->bytes_to_receive -= size;
+ if (priv->bytes_to_receive < 0)
+ priv->bytes_to_receive = 0;
+}
+
+/**
+ * zynq_qspi_write_data - Copy data from TX buffer
+ * @priv: Pointer to the zynq_qspi_priv structure
+ * @data: Pointer to the 32 bit variable where data is to be copied
+ * @size: Number of bytes to be copied from TX buffer to data
+ */
+static void zynq_qspi_write_data(struct zynq_qspi_priv *priv,
+ u32 *data, u8 size)
+{
+ if (priv->tx_buf) {
+ switch (size) {
+ case 1:
+ *data = *((u8 *)priv->tx_buf);
+ priv->tx_buf += 1;
+ *data |= 0xFFFFFF00;
+ break;
+ case 2:
+ *data = *((u16 *)priv->tx_buf);
+ priv->tx_buf += 2;
+ *data |= 0xFFFF0000;
+ break;
+ case 3:
+ *data = *((u16 *)priv->tx_buf);
+ priv->tx_buf += 2;
+ *data |= (*((u8 *)priv->tx_buf) << 16);
+ priv->tx_buf += 1;
+ *data |= 0xFF000000;
+ break;
+ case 4:
+ /* Can not assume word aligned buffer */
+ memcpy(data, priv->tx_buf, size);
+ priv->tx_buf += 4;
+ break;
+ default:
+ /* This will never execute */
+ break;
+ }
+ } else {
+ *data = 0;
+ }
+
+ debug("%s: data 0x%08x tx_buf addr: 0x%08x size %d\n", __func__,
+ *data, (u32)priv->tx_buf, size);
+
+ priv->bytes_to_transfer -= size;
+ if (priv->bytes_to_transfer < 0)
+ priv->bytes_to_transfer = 0;
+}
+
+/**
+ * zynq_qspi_chipselect - Select or deselect the chip select line
+ * @priv: Pointer to the zynq_qspi_priv structure
+ * @is_on: Select(1) or deselect (0) the chip select line
+ */
+static void zynq_qspi_chipselect(struct zynq_qspi_priv *priv, int is_on)
+{
+ u32 confr;
+ struct zynq_qspi_regs *regs = priv->regs;
+
+ confr = readl(&regs->cr);
+
+ if (is_on) {
+ /* Select the slave */
+ confr &= ~ZYNQ_QSPI_CR_SS_MASK;
+ confr |= (~(1 << priv->cs) << ZYNQ_QSPI_CR_SS_SHIFT) &
+ ZYNQ_QSPI_CR_SS_MASK;
+ } else
+ /* Deselect the slave */
+ confr |= ZYNQ_QSPI_CR_SS_MASK;
+
+ writel(confr, &regs->cr);
+}
+
+/**
+ * zynq_qspi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * @priv: Pointer to the zynq_qspi_priv structure
+ * @size: Number of bytes to be copied to fifo
+ */
+static void zynq_qspi_fill_tx_fifo(struct zynq_qspi_priv *priv, u32 size)
+{
+ u32 data = 0;
+ u32 fifocount = 0;
+ unsigned len, offset;
+ struct zynq_qspi_regs *regs = priv->regs;
+ static const unsigned offsets[4] = {
+ ZYNQ_QSPI_TXD_00_00_OFFSET, ZYNQ_QSPI_TXD_00_01_OFFSET,
+ ZYNQ_QSPI_TXD_00_10_OFFSET, ZYNQ_QSPI_TXD_00_11_OFFSET };
+
+ while ((fifocount < size) &&
+ (priv->bytes_to_transfer > 0)) {
+ if (priv->bytes_to_transfer >= 4) {
+ if (priv->tx_buf) {
+ memcpy(&data, priv->tx_buf, 4);
+ priv->tx_buf += 4;
+ } else {
+ data = 0;
+ }
+ writel(data, &regs->txd0r);
+ priv->bytes_to_transfer -= 4;
+ fifocount++;
+ } else {
+ /* Write TXD1, TXD2, TXD3 only if TxFIFO is empty. */
+ if (!(readl(&regs->isr)
+ & ZYNQ_QSPI_IXR_TXOW_MASK) &&
+ !priv->rx_buf)
+ return;
+ len = priv->bytes_to_transfer;
+ zynq_qspi_write_data(priv, &data, len);
+ offset = (priv->rx_buf) ? offsets[0] : offsets[len];
+ writel(data, &regs->cr + (offset / 4));
+ }
+ }
+}
+
+/**
+ * zynq_qspi_irq_poll - Interrupt service routine of the QSPI controller
+ * @priv: Pointer to the zynq_qspi structure
+ *
+ * This function handles TX empty and Mode Fault interrupts only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ * On Mode Fault interrupt this function indicates that transfer is completed,
+ * the SPI subsystem will identify the error as the remaining bytes to be
+ * transferred is non-zero.
+ *
+ * returns: 0 for poll timeout
+ * 1 transfer operation complete
+ */
+static int zynq_qspi_irq_poll(struct zynq_qspi_priv *priv)
+{
+ struct zynq_qspi_regs *regs = priv->regs;
+ u32 rxindex = 0;
+ u32 rxcount;
+ u32 status, timeout;
+
+ /* Poll until any of the interrupt status bits are set */
+ timeout = get_timer(0);
+ do {
+ status = readl(&regs->isr);
+ } while ((status == 0) &&
+ (get_timer(timeout) < ZYNQ_QSPI_WAIT));
+
+ if (status == 0) {
+ printf("zynq_qspi_irq_poll: Timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ writel(status, &regs->isr);
+
+ /* Disable all interrupts */
+ writel(ZYNQ_QSPI_IXR_ALL_MASK, &regs->idr);
+ if ((status & ZYNQ_QSPI_IXR_TXOW_MASK) ||
+ (status & ZYNQ_QSPI_IXR_RXNEMPTY_MASK)) {
+ /*
+ * This bit is set when Tx FIFO has < THRESHOLD entries. We have
+ * the THRESHOLD value set to 1, so this bit indicates Tx FIFO
+ * is empty
+ */
+ rxcount = priv->bytes_to_receive - priv->bytes_to_transfer;
+ rxcount = (rxcount % 4) ? ((rxcount/4)+1) : (rxcount/4);
+ while ((rxindex < rxcount) &&
+ (rxindex < ZYNQ_QSPI_RXFIFO_THRESHOLD)) {
+ /* Read out the data from the RX FIFO */
+ u32 data;
+ data = readl(&regs->drxr);
+
+ if (priv->bytes_to_receive >= 4) {
+ if (priv->rx_buf) {
+ memcpy(priv->rx_buf, &data, 4);
+ priv->rx_buf += 4;
+ }
+ priv->bytes_to_receive -= 4;
+ } else {
+ zynq_qspi_read_data(priv, data,
+ priv->bytes_to_receive);
+ }
+ rxindex++;
+ }
+
+ if (priv->bytes_to_transfer) {
+ /* There is more data to send */
+ zynq_qspi_fill_tx_fifo(priv,
+ ZYNQ_QSPI_RXFIFO_THRESHOLD);
+
+ writel(ZYNQ_QSPI_IXR_ALL_MASK, &regs->ier);
+ } else {
+ /*
+ * If transfer and receive is completed then only send
+ * complete signal
+ */
+ if (!priv->bytes_to_receive) {
+ /* return operation complete */
+ writel(ZYNQ_QSPI_IXR_ALL_MASK,
+ &regs->idr);
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * zynq_qspi_start_transfer - Initiates the QSPI transfer
+ * @priv: Pointer to the zynq_qspi_priv structure
+ *
+ * This function fills the TX FIFO, starts the QSPI transfer, and waits for the
+ * transfer to be completed.
+ *
+ * returns: Number of bytes transferred in the last transfer
+ */
+static int zynq_qspi_start_transfer(struct zynq_qspi_priv *priv)
+{
+ u32 data = 0;
+ struct zynq_qspi_regs *regs = priv->regs;
+
+ debug("%s: qspi: 0x%08x transfer: 0x%08x len: %d\n", __func__,
+ (u32)priv, (u32)priv, priv->len);
+
+ priv->bytes_to_transfer = priv->len;
+ priv->bytes_to_receive = priv->len;
+
+ if (priv->len < 4)
+ zynq_qspi_fill_tx_fifo(priv, priv->len);
+ else
+ zynq_qspi_fill_tx_fifo(priv, priv->fifo_depth);
+
+ writel(ZYNQ_QSPI_IXR_ALL_MASK, &regs->ier);
+
+ /* wait for completion */
+ do {
+ data = zynq_qspi_irq_poll(priv);
+ } while (data == 0);
+
+ return (priv->len) - (priv->bytes_to_transfer);
+}
+
+static int zynq_qspi_transfer(struct zynq_qspi_priv *priv)
+{
+ unsigned cs_change = 1;
+ int status = 0;
+
+ while (1) {
+ /* Select the chip if required */
+ if (cs_change)
+ zynq_qspi_chipselect(priv, 1);
+
+ cs_change = priv->cs_change;
+
+ if (!priv->tx_buf && !priv->rx_buf && priv->len) {
+ status = -1;
+ break;
+ }
+
+ /* Request the transfer */
+ if (priv->len) {
+ status = zynq_qspi_start_transfer(priv);
+ priv->is_inst = 0;
+ }
+
+ if (status != priv->len) {
+ if (status > 0)
+ status = -EMSGSIZE;
+ debug("zynq_qspi_transfer:%d len:%d\n",
+ status, priv->len);
+ break;
+ }
+ status = 0;
+
+ if (cs_change)
+ /* Deselect the chip */
+ zynq_qspi_chipselect(priv, 0);
+
+ break;
+ }
+
+ return status;
+}
+
+static int zynq_qspi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ struct zynq_qspi_regs *regs = priv->regs;
+
+ writel(ZYNQ_QSPI_ENR_SPI_EN_MASK, &regs->enr);
+
+ return 0;
+}
+
+static int zynq_qspi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ struct zynq_qspi_regs *regs = priv->regs;
+
+ writel(~ZYNQ_QSPI_ENR_SPI_EN_MASK, &regs->enr);
+
+ return 0;
+}
+
+static int zynq_qspi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+
+ priv->cs = slave_plat->cs;
+ priv->tx_buf = dout;
+ priv->rx_buf = din;
+ priv->len = bitlen / 8;
+
+ debug("zynq_qspi_xfer: bus:%i cs:%i bitlen:%i len:%i flags:%lx\n",
+ dev_seq(bus), slave_plat->cs, bitlen, priv->len, flags);
+
+ /*
+ * Festering sore.
+ * Assume that the beginning of a transfer with bits to
+ * transmit must contain a device command.
+ */
+ if (dout && flags & SPI_XFER_BEGIN)
+ priv->is_inst = 1;
+ else
+ priv->is_inst = 0;
+
+ if (flags & SPI_XFER_END)
+ priv->cs_change = 1;
+ else
+ priv->cs_change = 0;
+
+ zynq_qspi_transfer(priv);
+
+ return 0;
+}
+
+static int zynq_qspi_set_speed(struct udevice *bus, uint speed)
+{
+ struct zynq_qspi_plat *plat = dev_get_plat(bus);
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ struct zynq_qspi_regs *regs = priv->regs;
+ uint32_t confr;
+ u8 baud_rate_val = 0;
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+
+ /* Set the clock frequency */
+ confr = readl(&regs->cr);
+ if (speed == 0) {
+ /* Set baudrate x8, if the freq is 0 */
+ baud_rate_val = 0x2;
+ } else if (plat->speed_hz != speed) {
+ while ((baud_rate_val < ZYNQ_QSPI_CR_BAUD_MAX) &&
+ ((plat->frequency /
+ (2 << baud_rate_val)) > speed))
+ baud_rate_val++;
+
+ plat->speed_hz = speed / (2 << baud_rate_val);
+ }
+ confr &= ~ZYNQ_QSPI_CR_BAUD_MASK;
+ confr |= (baud_rate_val << ZYNQ_QSPI_CR_BAUD_SHIFT);
+
+ writel(confr, &regs->cr);
+ priv->freq = speed;
+
+ debug("%s: regs=%p, speed=%d\n", __func__, priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int zynq_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct zynq_qspi_priv *priv = dev_get_priv(bus);
+ struct zynq_qspi_regs *regs = priv->regs;
+ uint32_t confr;
+
+ /* Set the SPI Clock phase and polarities */
+ confr = readl(&regs->cr);
+ confr &= ~(ZYNQ_QSPI_CR_CPHA_MASK | ZYNQ_QSPI_CR_CPOL_MASK);
+
+ if (mode & SPI_CPHA)
+ confr |= ZYNQ_QSPI_CR_CPHA_MASK;
+ if (mode & SPI_CPOL)
+ confr |= ZYNQ_QSPI_CR_CPOL_MASK;
+
+ writel(confr, &regs->cr);
+ priv->mode = mode;
+
+ debug("%s: regs=%p, mode=%d\n", __func__, priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops zynq_qspi_ops = {
+ .claim_bus = zynq_qspi_claim_bus,
+ .release_bus = zynq_qspi_release_bus,
+ .xfer = zynq_qspi_xfer,
+ .set_speed = zynq_qspi_set_speed,
+ .set_mode = zynq_qspi_set_mode,
+};
+
+static const struct udevice_id zynq_qspi_ids[] = {
+ { .compatible = "xlnx,zynq-qspi-1.0" },
+ { }
+};
+
+U_BOOT_DRIVER(zynq_qspi) = {
+ .name = "zynq_qspi",
+ .id = UCLASS_SPI,
+ .of_match = zynq_qspi_ids,
+ .ops = &zynq_qspi_ops,
+ .of_to_plat = zynq_qspi_of_to_plat,
+ .plat_auto = sizeof(struct zynq_qspi_plat),
+ .priv_auto = sizeof(struct zynq_qspi_priv),
+ .probe = zynq_qspi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/zynq_spi.c b/roms/u-boot/drivers/spi/zynq_spi.c
new file mode 100644
index 000000000..b3e0858eb
--- /dev/null
+++ b/roms/u-boot/drivers/spi/zynq_spi.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2013 Xilinx, Inc.
+ * (C) Copyright 2015 Jagan Teki <jteki@openedev.com>
+ *
+ * Xilinx Zynq PS SPI controller driver (master mode only)
+ */
+
+#include <common.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <log.h>
+#include <malloc.h>
+#include <spi.h>
+#include <time.h>
+#include <clk.h>
+#include <asm/global_data.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* zynq spi register bit masks ZYNQ_SPI_<REG>_<BIT>_MASK */
+#define ZYNQ_SPI_CR_MSA_MASK BIT(15) /* Manual start enb */
+#define ZYNQ_SPI_CR_MCS_MASK BIT(14) /* Manual chip select */
+#define ZYNQ_SPI_CR_CS_MASK GENMASK(13, 10) /* Chip select */
+#define ZYNQ_SPI_CR_BAUD_MASK GENMASK(5, 3) /* Baud rate div */
+#define ZYNQ_SPI_CR_CPHA_MASK BIT(2) /* Clock phase */
+#define ZYNQ_SPI_CR_CPOL_MASK BIT(1) /* Clock polarity */
+#define ZYNQ_SPI_CR_MSTREN_MASK BIT(0) /* Mode select */
+#define ZYNQ_SPI_IXR_RXNEMPTY_MASK BIT(4) /* RX_FIFO_not_empty */
+#define ZYNQ_SPI_IXR_TXOW_MASK BIT(2) /* TX_FIFO_not_full */
+#define ZYNQ_SPI_IXR_ALL_MASK GENMASK(6, 0) /* All IXR bits */
+#define ZYNQ_SPI_ENR_SPI_EN_MASK BIT(0) /* SPI Enable */
+
+#define ZYNQ_SPI_CR_BAUD_MAX 8 /* Baud rate divisor max val */
+#define ZYNQ_SPI_CR_BAUD_SHIFT 3 /* Baud rate divisor shift */
+#define ZYNQ_SPI_CR_SS_SHIFT 10 /* Slave select shift */
+
+#define ZYNQ_SPI_FIFO_DEPTH 128
+#define ZYNQ_SPI_WAIT (CONFIG_SYS_HZ / 100) /* 10 ms */
+
+/* zynq spi register set */
+struct zynq_spi_regs {
+ u32 cr; /* 0x00 */
+ u32 isr; /* 0x04 */
+ u32 ier; /* 0x08 */
+ u32 idr; /* 0x0C */
+ u32 imr; /* 0x10 */
+ u32 enr; /* 0x14 */
+ u32 dr; /* 0x18 */
+ u32 txdr; /* 0x1C */
+ u32 rxdr; /* 0x20 */
+};
+
+
+/* zynq spi platform data */
+struct zynq_spi_plat {
+ struct zynq_spi_regs *regs;
+ u32 frequency; /* input frequency */
+ u32 speed_hz;
+ uint deactivate_delay_us; /* Delay to wait after deactivate */
+ uint activate_delay_us; /* Delay to wait after activate */
+};
+
+/* zynq spi priv */
+struct zynq_spi_priv {
+ struct zynq_spi_regs *regs;
+ u8 cs;
+ u8 mode;
+ ulong last_transaction_us; /* Time of last transaction end */
+ u8 fifo_depth;
+ u32 freq; /* required frequency */
+};
+
+static int zynq_spi_of_to_plat(struct udevice *bus)
+{
+ struct zynq_spi_plat *plat = dev_get_plat(bus);
+ const void *blob = gd->fdt_blob;
+ int node = dev_of_offset(bus);
+
+ plat->regs = dev_read_addr_ptr(bus);
+
+ plat->deactivate_delay_us = fdtdec_get_int(blob, node,
+ "spi-deactivate-delay", 0);
+ plat->activate_delay_us = fdtdec_get_int(blob, node,
+ "spi-activate-delay", 0);
+
+ return 0;
+}
+
+static void zynq_spi_init_hw(struct zynq_spi_priv *priv)
+{
+ struct zynq_spi_regs *regs = priv->regs;
+ u32 confr;
+
+ /* Disable SPI */
+ confr = ZYNQ_SPI_ENR_SPI_EN_MASK;
+ writel(~confr, &regs->enr);
+
+ /* Disable Interrupts */
+ writel(ZYNQ_SPI_IXR_ALL_MASK, &regs->idr);
+
+ /* Clear RX FIFO */
+ while (readl(&regs->isr) &
+ ZYNQ_SPI_IXR_RXNEMPTY_MASK)
+ readl(&regs->rxdr);
+
+ /* Clear Interrupts */
+ writel(ZYNQ_SPI_IXR_ALL_MASK, &regs->isr);
+
+ /* Manual slave select and Auto start */
+ confr = ZYNQ_SPI_CR_MCS_MASK | ZYNQ_SPI_CR_CS_MASK |
+ ZYNQ_SPI_CR_MSTREN_MASK;
+ confr &= ~ZYNQ_SPI_CR_MSA_MASK;
+ writel(confr, &regs->cr);
+
+ /* Enable SPI */
+ writel(ZYNQ_SPI_ENR_SPI_EN_MASK, &regs->enr);
+}
+
+static int zynq_spi_probe(struct udevice *bus)
+{
+ struct zynq_spi_plat *plat = dev_get_plat(bus);
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct clk clk;
+ unsigned long clock;
+ int ret;
+
+ priv->regs = plat->regs;
+ priv->fifo_depth = ZYNQ_SPI_FIFO_DEPTH;
+
+ ret = clk_get_by_name(bus, "ref_clk", &clk);
+ if (ret < 0) {
+ dev_err(bus, "failed to get clock\n");
+ return ret;
+ }
+
+ clock = clk_get_rate(&clk);
+ if (IS_ERR_VALUE(clock)) {
+ dev_err(bus, "failed to get rate\n");
+ return clock;
+ }
+
+ ret = clk_enable(&clk);
+ if (ret) {
+ dev_err(bus, "failed to enable clock\n");
+ return ret;
+ }
+
+ /* init the zynq spi hw */
+ zynq_spi_init_hw(priv);
+
+ plat->frequency = clock;
+ plat->speed_hz = plat->frequency / 2;
+
+ debug("%s: max-frequency=%d\n", __func__, plat->speed_hz);
+
+ return 0;
+}
+
+static void spi_cs_activate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_spi_plat *plat = dev_get_plat(bus);
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+ u32 cr;
+
+ /* If it's too soon to do another transaction, wait */
+ if (plat->deactivate_delay_us && priv->last_transaction_us) {
+ ulong delay_us; /* The delay completed so far */
+ delay_us = timer_get_us() - priv->last_transaction_us;
+ if (delay_us < plat->deactivate_delay_us)
+ udelay(plat->deactivate_delay_us - delay_us);
+ }
+
+ clrbits_le32(&regs->cr, ZYNQ_SPI_CR_CS_MASK);
+ cr = readl(&regs->cr);
+ /*
+ * CS cal logic: CS[13:10]
+ * xxx0 - cs0
+ * xx01 - cs1
+ * x011 - cs2
+ */
+ cr |= (~(1 << priv->cs) << ZYNQ_SPI_CR_SS_SHIFT) & ZYNQ_SPI_CR_CS_MASK;
+ writel(cr, &regs->cr);
+
+ if (plat->activate_delay_us)
+ udelay(plat->activate_delay_us);
+}
+
+static void spi_cs_deactivate(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_spi_plat *plat = dev_get_plat(bus);
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+
+ setbits_le32(&regs->cr, ZYNQ_SPI_CR_CS_MASK);
+
+ /* Remember time of this transaction so we can honour the bus delay */
+ if (plat->deactivate_delay_us)
+ priv->last_transaction_us = timer_get_us();
+}
+
+static int zynq_spi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+
+ writel(ZYNQ_SPI_ENR_SPI_EN_MASK, &regs->enr);
+
+ return 0;
+}
+
+static int zynq_spi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+ u32 confr;
+
+ confr = ZYNQ_SPI_ENR_SPI_EN_MASK;
+ writel(~confr, &regs->enr);
+
+ return 0;
+}
+
+static int zynq_spi_xfer(struct udevice *dev, unsigned int bitlen,
+ const void *dout, void *din, unsigned long flags)
+{
+ struct udevice *bus = dev->parent;
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+ struct dm_spi_slave_plat *slave_plat = dev_get_parent_plat(dev);
+ u32 len = bitlen / 8;
+ u32 tx_len = len, rx_len = len, tx_tvl;
+ const u8 *tx_buf = dout;
+ u8 *rx_buf = din, buf;
+ u32 ts, status;
+
+ debug("spi_xfer: bus:%i cs:%i bitlen:%i len:%i flags:%lx\n",
+ dev_seq(bus), slave_plat->cs, bitlen, len, flags);
+
+ if (bitlen % 8) {
+ debug("spi_xfer: Non byte aligned SPI transfer\n");
+ return -1;
+ }
+
+ priv->cs = slave_plat->cs;
+ if (flags & SPI_XFER_BEGIN)
+ spi_cs_activate(dev);
+
+ while (rx_len > 0) {
+ /* Write the data into TX FIFO - tx threshold is fifo_depth */
+ tx_tvl = 0;
+ while ((tx_tvl < priv->fifo_depth) && tx_len) {
+ if (tx_buf)
+ buf = *tx_buf++;
+ else
+ buf = 0;
+ writel(buf, &regs->txdr);
+ tx_len--;
+ tx_tvl++;
+ }
+
+ /* Check TX FIFO completion */
+ ts = get_timer(0);
+ status = readl(&regs->isr);
+ while (!(status & ZYNQ_SPI_IXR_TXOW_MASK)) {
+ if (get_timer(ts) > ZYNQ_SPI_WAIT) {
+ printf("spi_xfer: Timeout! TX FIFO not full\n");
+ return -1;
+ }
+ status = readl(&regs->isr);
+ }
+
+ /* Read the data from RX FIFO */
+ status = readl(&regs->isr);
+ while ((status & ZYNQ_SPI_IXR_RXNEMPTY_MASK) && rx_len) {
+ buf = readl(&regs->rxdr);
+ if (rx_buf)
+ *rx_buf++ = buf;
+ status = readl(&regs->isr);
+ rx_len--;
+ }
+ }
+
+ if (flags & SPI_XFER_END)
+ spi_cs_deactivate(dev);
+
+ return 0;
+}
+
+static int zynq_spi_set_speed(struct udevice *bus, uint speed)
+{
+ struct zynq_spi_plat *plat = dev_get_plat(bus);
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+ uint32_t confr;
+ u8 baud_rate_val = 0;
+
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+
+ /* Set the clock frequency */
+ confr = readl(&regs->cr);
+ if (speed == 0) {
+ /* Set baudrate x8, if the freq is 0 */
+ baud_rate_val = 0x2;
+ } else if (plat->speed_hz != speed) {
+ while ((baud_rate_val < ZYNQ_SPI_CR_BAUD_MAX) &&
+ ((plat->frequency /
+ (2 << baud_rate_val)) > speed))
+ baud_rate_val++;
+ plat->speed_hz = speed / (2 << baud_rate_val);
+ }
+ confr &= ~ZYNQ_SPI_CR_BAUD_MASK;
+ confr |= (baud_rate_val << ZYNQ_SPI_CR_BAUD_SHIFT);
+
+ writel(confr, &regs->cr);
+ priv->freq = speed;
+
+ debug("zynq_spi_set_speed: regs=%p, speed=%d\n",
+ priv->regs, priv->freq);
+
+ return 0;
+}
+
+static int zynq_spi_set_mode(struct udevice *bus, uint mode)
+{
+ struct zynq_spi_priv *priv = dev_get_priv(bus);
+ struct zynq_spi_regs *regs = priv->regs;
+ uint32_t confr;
+
+ /* Set the SPI Clock phase and polarities */
+ confr = readl(&regs->cr);
+ confr &= ~(ZYNQ_SPI_CR_CPHA_MASK | ZYNQ_SPI_CR_CPOL_MASK);
+
+ if (mode & SPI_CPHA)
+ confr |= ZYNQ_SPI_CR_CPHA_MASK;
+ if (mode & SPI_CPOL)
+ confr |= ZYNQ_SPI_CR_CPOL_MASK;
+
+ writel(confr, &regs->cr);
+ priv->mode = mode;
+
+ debug("zynq_spi_set_mode: regs=%p, mode=%d\n", priv->regs, priv->mode);
+
+ return 0;
+}
+
+static const struct dm_spi_ops zynq_spi_ops = {
+ .claim_bus = zynq_spi_claim_bus,
+ .release_bus = zynq_spi_release_bus,
+ .xfer = zynq_spi_xfer,
+ .set_speed = zynq_spi_set_speed,
+ .set_mode = zynq_spi_set_mode,
+};
+
+static const struct udevice_id zynq_spi_ids[] = {
+ { .compatible = "xlnx,zynq-spi-r1p6" },
+ { .compatible = "cdns,spi-r1p6" },
+ { }
+};
+
+U_BOOT_DRIVER(zynq_spi) = {
+ .name = "zynq_spi",
+ .id = UCLASS_SPI,
+ .of_match = zynq_spi_ids,
+ .ops = &zynq_spi_ops,
+ .of_to_plat = zynq_spi_of_to_plat,
+ .plat_auto = sizeof(struct zynq_spi_plat),
+ .priv_auto = sizeof(struct zynq_spi_priv),
+ .probe = zynq_spi_probe,
+};
diff --git a/roms/u-boot/drivers/spi/zynqmp_gqspi.c b/roms/u-boot/drivers/spi/zynqmp_gqspi.c
new file mode 100644
index 000000000..fc81b0734
--- /dev/null
+++ b/roms/u-boot/drivers/spi/zynqmp_gqspi.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2018 Xilinx
+ *
+ * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only)
+ */
+
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <asm/arch/sys_proto.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <clk.h>
+#include <dm.h>
+#include <malloc.h>
+#include <memalign.h>
+#include <spi.h>
+#include <spi-mem.h>
+#include <ubi_uboot.h>
+#include <wait_bit.h>
+#include <dm/device_compat.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+
+#define GQSPI_GFIFO_STRT_MODE_MASK BIT(29)
+#define GQSPI_CONFIG_MODE_EN_MASK (3 << 30)
+#define GQSPI_CONFIG_DMA_MODE (2 << 30)
+#define GQSPI_CONFIG_CPHA_MASK BIT(2)
+#define GQSPI_CONFIG_CPOL_MASK BIT(1)
+
+/*
+ * QSPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define GQSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */
+#define GQSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */
+#define GQSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */
+#define GQSPI_IXR_GFEMTY_MASK 0x00000080 /* QSPI Generic FIFO Empty */
+#define GQSPI_IXR_GFNFULL_MASK 0x00000200 /* QSPI GENFIFO not full */
+#define GQSPI_IXR_ALL_MASK (GQSPI_IXR_TXNFULL_MASK | \
+ GQSPI_IXR_RXNEMTY_MASK)
+
+/*
+ * QSPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the QSPI controller
+ */
+#define GQSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */
+
+#define GQSPI_GFIFO_LOW_BUS BIT(14)
+#define GQSPI_GFIFO_CS_LOWER BIT(12)
+#define GQSPI_GFIFO_UP_BUS BIT(15)
+#define GQSPI_GFIFO_CS_UPPER BIT(13)
+#define GQSPI_SPI_MODE_QSPI (3 << 10)
+#define GQSPI_SPI_MODE_SPI BIT(10)
+#define GQSPI_SPI_MODE_DUAL_SPI (2 << 10)
+#define GQSPI_IMD_DATA_CS_ASSERT 5
+#define GQSPI_IMD_DATA_CS_DEASSERT 5
+#define GQSPI_GFIFO_TX BIT(16)
+#define GQSPI_GFIFO_RX BIT(17)
+#define GQSPI_GFIFO_STRIPE_MASK BIT(18)
+#define GQSPI_GFIFO_IMD_MASK 0xFF
+#define GQSPI_GFIFO_EXP_MASK BIT(9)
+#define GQSPI_GFIFO_DATA_XFR_MASK BIT(8)
+#define GQSPI_STRT_GEN_FIFO BIT(28)
+#define GQSPI_GEN_FIFO_STRT_MOD BIT(29)
+#define GQSPI_GFIFO_WP_HOLD BIT(19)
+#define GQSPI_BAUD_DIV_MASK (7 << 3)
+#define GQSPI_DFLT_BAUD_RATE_DIV BIT(3)
+#define GQSPI_GFIFO_ALL_INT_MASK 0xFBE
+#define GQSPI_DMA_DST_I_STS_DONE BIT(1)
+#define GQSPI_DMA_DST_I_STS_MASK 0xFE
+#define MODEBITS 0x6
+
+#define GQSPI_GFIFO_SELECT BIT(0)
+#define GQSPI_FIFO_THRESHOLD 1
+
+#define SPI_XFER_ON_BOTH 0
+#define SPI_XFER_ON_LOWER 1
+#define SPI_XFER_ON_UPPER 2
+
+#define GQSPI_DMA_ALIGN 0x4
+#define GQSPI_MAX_BAUD_RATE_VAL 7
+#define GQSPI_DFLT_BAUD_RATE_VAL 2
+
+#define GQSPI_TIMEOUT 100000000
+
+#define GQSPI_BAUD_DIV_SHIFT 2
+#define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
+#define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
+#define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
+#define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
+#define GQSPI_USE_DATA_DLY 0x1
+#define GQSPI_USE_DATA_DLY_SHIFT 31
+#define GQSPI_DATA_DLY_ADJ_VALUE 0x2
+#define GQSPI_DATA_DLY_ADJ_SHIFT 28
+#define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
+#define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
+#define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
+#define IOU_TAPDLY_BYPASS_OFST 0xFF180390
+#define GQSPI_LPBK_DLY_ADJ_LPBK_MASK 0x00000020
+#define GQSPI_FREQ_40MHZ 40000000
+#define GQSPI_FREQ_100MHZ 100000000
+#define GQSPI_FREQ_150MHZ 150000000
+#define IOU_TAPDLY_BYPASS_MASK 0x7
+
+#define GQSPI_REG_OFFSET 0x100
+#define GQSPI_DMA_REG_OFFSET 0x800
+
+/* QSPI register offsets */
+struct zynqmp_qspi_regs {
+ u32 confr; /* 0x00 */
+ u32 isr; /* 0x04 */
+ u32 ier; /* 0x08 */
+ u32 idisr; /* 0x0C */
+ u32 imaskr; /* 0x10 */
+ u32 enbr; /* 0x14 */
+ u32 dr; /* 0x18 */
+ u32 txd0r; /* 0x1C */
+ u32 drxr; /* 0x20 */
+ u32 sicr; /* 0x24 */
+ u32 txftr; /* 0x28 */
+ u32 rxftr; /* 0x2C */
+ u32 gpior; /* 0x30 */
+ u32 reserved0; /* 0x34 */
+ u32 lpbkdly; /* 0x38 */
+ u32 reserved1; /* 0x3C */
+ u32 genfifo; /* 0x40 */
+ u32 gqspisel; /* 0x44 */
+ u32 reserved2; /* 0x48 */
+ u32 gqfifoctrl; /* 0x4C */
+ u32 gqfthr; /* 0x50 */
+ u32 gqpollcfg; /* 0x54 */
+ u32 gqpollto; /* 0x58 */
+ u32 gqxfersts; /* 0x5C */
+ u32 gqfifosnap; /* 0x60 */
+ u32 gqrxcpy; /* 0x64 */
+ u32 reserved3[36]; /* 0x68 */
+ u32 gqspidlyadj; /* 0xF8 */
+};
+
+struct zynqmp_qspi_dma_regs {
+ u32 dmadst; /* 0x00 */
+ u32 dmasize; /* 0x04 */
+ u32 dmasts; /* 0x08 */
+ u32 dmactrl; /* 0x0C */
+ u32 reserved0; /* 0x10 */
+ u32 dmaisr; /* 0x14 */
+ u32 dmaier; /* 0x18 */
+ u32 dmaidr; /* 0x1C */
+ u32 dmaimr; /* 0x20 */
+ u32 dmactrl2; /* 0x24 */
+ u32 dmadstmsb; /* 0x28 */
+};
+
+struct zynqmp_qspi_plat {
+ struct zynqmp_qspi_regs *regs;
+ struct zynqmp_qspi_dma_regs *dma_regs;
+ u32 frequency;
+ u32 speed_hz;
+};
+
+struct zynqmp_qspi_priv {
+ struct zynqmp_qspi_regs *regs;
+ struct zynqmp_qspi_dma_regs *dma_regs;
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned int len;
+ int bytes_to_transfer;
+ int bytes_to_receive;
+ const struct spi_mem_op *op;
+};
+
+static int zynqmp_qspi_of_to_plat(struct udevice *bus)
+{
+ struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
+
+ debug("%s\n", __func__);
+
+ plat->regs = (struct zynqmp_qspi_regs *)(dev_read_addr(bus) +
+ GQSPI_REG_OFFSET);
+ plat->dma_regs = (struct zynqmp_qspi_dma_regs *)
+ (dev_read_addr(bus) + GQSPI_DMA_REG_OFFSET);
+
+ return 0;
+}
+
+static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv)
+{
+ u32 config_reg;
+ struct zynqmp_qspi_regs *regs = priv->regs;
+
+ writel(GQSPI_GFIFO_SELECT, &regs->gqspisel);
+ writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->idisr);
+ writel(GQSPI_FIFO_THRESHOLD, &regs->txftr);
+ writel(GQSPI_FIFO_THRESHOLD, &regs->rxftr);
+ writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->isr);
+
+ config_reg = readl(&regs->confr);
+ config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
+ GQSPI_CONFIG_MODE_EN_MASK);
+ config_reg |= GQSPI_CONFIG_DMA_MODE |
+ GQSPI_GFIFO_WP_HOLD |
+ GQSPI_DFLT_BAUD_RATE_DIV;
+ writel(config_reg, &regs->confr);
+
+ writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
+}
+
+static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv)
+{
+ u32 gqspi_fifo_reg = 0;
+
+ gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS |
+ GQSPI_GFIFO_CS_LOWER;
+
+ return gqspi_fifo_reg;
+}
+
+static u32 zynqmp_qspi_genfifo_mode(u8 buswidth)
+{
+ switch (buswidth) {
+ case 1:
+ return GQSPI_SPI_MODE_SPI;
+ case 2:
+ return GQSPI_SPI_MODE_DUAL_SPI;
+ case 4:
+ return GQSPI_SPI_MODE_QSPI;
+ default:
+ debug("Unsupported bus width %u\n", buswidth);
+ return GQSPI_SPI_MODE_SPI;
+ }
+}
+
+static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv,
+ u32 gqspi_fifo_reg)
+{
+ struct zynqmp_qspi_regs *regs = priv->regs;
+ u32 config_reg, ier;
+ int ret = 0;
+
+ config_reg = readl(&regs->confr);
+ /* Manual start if needed */
+ config_reg |= GQSPI_STRT_GEN_FIFO;
+ writel(config_reg, &regs->confr);
+
+ /* Enable interrupts */
+ ier = readl(&regs->ier);
+ ier |= GQSPI_IXR_GFNFULL_MASK;
+ writel(ier, &regs->ier);
+
+ /* Wait until the fifo is not full to write the new command */
+ ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_GFNFULL_MASK, 1,
+ GQSPI_TIMEOUT, 1);
+ if (ret)
+ printf("%s Timeout\n", __func__);
+
+ writel(gqspi_fifo_reg, &regs->genfifo);
+}
+
+static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on)
+{
+ u32 gqspi_fifo_reg = 0;
+
+ if (is_on) {
+ gqspi_fifo_reg = zynqmp_qspi_bus_select(priv);
+ gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI |
+ GQSPI_IMD_DATA_CS_ASSERT;
+ } else {
+ gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS;
+ gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT;
+ }
+
+ debug("GFIFO_CMD_CS: 0x%x\n", gqspi_fifo_reg);
+
+ /* Dummy generic FIFO entry */
+ zynqmp_qspi_fill_gen_fifo(priv, 0);
+
+ zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg);
+}
+
+void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval)
+{
+ struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
+ struct zynqmp_qspi_regs *regs = priv->regs;
+ u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
+ u32 reqhz = 0;
+
+ clk_rate = plat->frequency;
+ reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
+
+ debug("%s, req_hz:%d, clk_rate:%d, baudrateval:%d\n",
+ __func__, reqhz, clk_rate, baudrateval);
+
+ if (reqhz < GQSPI_FREQ_40MHZ) {
+ zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ } else if (reqhz <= GQSPI_FREQ_100MHZ) {
+ zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
+ tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
+ TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
+ lpbkdlyadj = readl(&regs->lpbkdly);
+ lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_LPBK_MASK);
+ datadlyadj = readl(&regs->gqspidlyadj);
+ datadlyadj |= ((GQSPI_USE_DATA_DLY << GQSPI_USE_DATA_DLY_SHIFT)
+ | (GQSPI_DATA_DLY_ADJ_VALUE <<
+ GQSPI_DATA_DLY_ADJ_SHIFT));
+ } else if (reqhz <= GQSPI_FREQ_150MHZ) {
+ lpbkdlyadj = readl(&regs->lpbkdly);
+ lpbkdlyadj |= ((GQSPI_LPBK_DLY_ADJ_LPBK_MASK) |
+ GQSPI_LPBK_DLY_ADJ_DLY_0);
+ }
+
+ zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST, IOU_TAPDLY_BYPASS_MASK,
+ tapdlybypass);
+ writel(lpbkdlyadj, &regs->lpbkdly);
+ writel(datadlyadj, &regs->gqspidlyadj);
+}
+
+static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed)
+{
+ struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
+ struct zynqmp_qspi_regs *regs = priv->regs;
+ u32 confr;
+ u8 baud_rate_val = 0;
+
+ debug("%s\n", __func__);
+ if (speed > plat->frequency)
+ speed = plat->frequency;
+
+ if (plat->speed_hz != speed) {
+ /* Set the clock frequency */
+ /* If speed == 0, default to lowest speed */
+ while ((baud_rate_val < 8) &&
+ ((plat->frequency /
+ (2 << baud_rate_val)) > speed))
+ baud_rate_val++;
+
+ if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL)
+ baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
+
+ plat->speed_hz = plat->frequency / (2 << baud_rate_val);
+
+ confr = readl(&regs->confr);
+ confr &= ~GQSPI_BAUD_DIV_MASK;
+ confr |= (baud_rate_val << 3);
+ writel(confr, &regs->confr);
+ zynqmp_qspi_set_tapdelay(bus, baud_rate_val);
+
+ debug("regs=%p, speed=%d\n", priv->regs, plat->speed_hz);
+ }
+
+ return 0;
+}
+
+static int zynqmp_qspi_probe(struct udevice *bus)
+{
+ struct zynqmp_qspi_plat *plat = dev_get_plat(bus);
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
+ struct clk clk;
+ unsigned long clock;
+ int ret;
+
+ debug("%s: bus:%p, priv:%p\n", __func__, bus, priv);
+
+ priv->regs = plat->regs;
+ priv->dma_regs = plat->dma_regs;
+
+ ret = clk_get_by_index(bus, 0, &clk);
+ if (ret < 0) {
+ dev_err(bus, "failed to get clock\n");
+ return ret;
+ }
+
+ clock = clk_get_rate(&clk);
+ if (IS_ERR_VALUE(clock)) {
+ dev_err(bus, "failed to get rate\n");
+ return clock;
+ }
+ debug("%s: CLK %ld\n", __func__, clock);
+
+ ret = clk_enable(&clk);
+ if (ret) {
+ dev_err(bus, "failed to enable clock\n");
+ return ret;
+ }
+ plat->frequency = clock;
+ plat->speed_hz = plat->frequency / 2;
+
+ /* init the zynq spi hw */
+ zynqmp_qspi_init_hw(priv);
+
+ return 0;
+}
+
+static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode)
+{
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
+ struct zynqmp_qspi_regs *regs = priv->regs;
+ u32 confr;
+
+ debug("%s\n", __func__);
+ /* Set the SPI Clock phase and polarities */
+ confr = readl(&regs->confr);
+ confr &= ~(GQSPI_CONFIG_CPHA_MASK |
+ GQSPI_CONFIG_CPOL_MASK);
+
+ if (mode & SPI_CPHA)
+ confr |= GQSPI_CONFIG_CPHA_MASK;
+ if (mode & SPI_CPOL)
+ confr |= GQSPI_CONFIG_CPOL_MASK;
+
+ writel(confr, &regs->confr);
+
+ return 0;
+}
+
+static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size)
+{
+ u32 data;
+ int ret = 0;
+ struct zynqmp_qspi_regs *regs = priv->regs;
+ u32 *buf = (u32 *)priv->tx_buf;
+ u32 len = size;
+
+ debug("TxFIFO: 0x%x, size: 0x%x\n", readl(&regs->isr),
+ size);
+
+ while (size) {
+ ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_TXNFULL_MASK, 1,
+ GQSPI_TIMEOUT, 1);
+ if (ret) {
+ printf("%s: Timeout\n", __func__);
+ return ret;
+ }
+
+ if (size >= 4) {
+ writel(*buf, &regs->txd0r);
+ buf++;
+ size -= 4;
+ } else {
+ switch (size) {
+ case 1:
+ data = *((u8 *)buf);
+ buf += 1;
+ data |= GENMASK(31, 8);
+ break;
+ case 2:
+ data = *((u16 *)buf);
+ buf += 2;
+ data |= GENMASK(31, 16);
+ break;
+ case 3:
+ data = *buf;
+ buf += 3;
+ data |= GENMASK(31, 24);
+ break;
+ }
+ writel(data, &regs->txd0r);
+ size = 0;
+ }
+ }
+
+ priv->tx_buf += len;
+ return 0;
+}
+
+static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
+{
+ const struct spi_mem_op *op = priv->op;
+ u32 gen_fifo_cmd;
+ u8 i, dummy_cycles, addr;
+
+ /* Send opcode */
+ gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->cmd.buswidth);
+ gen_fifo_cmd |= GQSPI_GFIFO_TX;
+ gen_fifo_cmd |= op->cmd.opcode;
+ zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
+
+ /* Send address */
+ for (i = 0; i < op->addr.nbytes; i++) {
+ addr = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->addr.buswidth);
+ gen_fifo_cmd |= GQSPI_GFIFO_TX;
+ gen_fifo_cmd |= addr;
+
+ debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd);
+
+ zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
+ }
+
+ /* Send dummy */
+ if (op->dummy.nbytes) {
+ dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
+
+ gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(op->dummy.buswidth);
+ gen_fifo_cmd &= ~(GQSPI_GFIFO_TX | GQSPI_GFIFO_RX);
+ gen_fifo_cmd |= GQSPI_GFIFO_DATA_XFR_MASK;
+ gen_fifo_cmd |= dummy_cycles;
+ zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
+ }
+}
+
+static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv,
+ u32 *gen_fifo_cmd)
+{
+ u32 expval = 8;
+ u32 len;
+
+ while (1) {
+ if (priv->len > 255) {
+ if (priv->len & (1 << expval)) {
+ *gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK;
+ *gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK;
+ *gen_fifo_cmd |= expval;
+ priv->len -= (1 << expval);
+ return expval;
+ }
+ expval++;
+ } else {
+ *gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK |
+ GQSPI_GFIFO_EXP_MASK);
+ *gen_fifo_cmd |= (u8)priv->len;
+ len = (u8)priv->len;
+ priv->len = 0;
+ return len;
+ }
+ }
+}
+
+static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv)
+{
+ u32 gen_fifo_cmd;
+ u32 len;
+ int ret = 0;
+
+ gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
+ gen_fifo_cmd |= GQSPI_GFIFO_TX |
+ GQSPI_GFIFO_DATA_XFR_MASK;
+
+ while (priv->len) {
+ len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
+ zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
+
+ debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd);
+
+ if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
+ ret = zynqmp_qspi_fill_tx_fifo(priv,
+ 1 << len);
+ else
+ ret = zynqmp_qspi_fill_tx_fifo(priv,
+ len);
+
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv,
+ u32 gen_fifo_cmd, u32 *buf)
+{
+ u32 addr;
+ u32 size, len;
+ u32 actuallen = priv->len;
+ int ret = 0;
+ struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;
+
+ writel((unsigned long)buf, &dma_regs->dmadst);
+ writel(roundup(priv->len, ARCH_DMA_MINALIGN), &dma_regs->dmasize);
+ writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
+ addr = (unsigned long)buf;
+ size = roundup(priv->len, ARCH_DMA_MINALIGN);
+ flush_dcache_range(addr, addr + size);
+
+ while (priv->len) {
+ len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
+ if (!(gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK) &&
+ (len % ARCH_DMA_MINALIGN)) {
+ gen_fifo_cmd &= ~GENMASK(7, 0);
+ gen_fifo_cmd |= roundup(len, ARCH_DMA_MINALIGN);
+ }
+ zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
+
+ debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
+ }
+
+ ret = wait_for_bit_le32(&dma_regs->dmaisr, GQSPI_DMA_DST_I_STS_DONE,
+ 1, GQSPI_TIMEOUT, 1);
+ if (ret) {
+ printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr));
+ return -ETIMEDOUT;
+ }
+
+ writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr);
+
+ debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
+ (unsigned long)buf, (unsigned long)priv->rx_buf, *buf,
+ actuallen);
+
+ if (buf != priv->rx_buf)
+ memcpy(priv->rx_buf, buf, actuallen);
+
+ return 0;
+}
+
+static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
+{
+ u32 gen_fifo_cmd;
+ u32 *buf;
+ u32 actuallen = priv->len;
+
+ gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
+ gen_fifo_cmd |= zynqmp_qspi_genfifo_mode(priv->op->data.buswidth);
+ gen_fifo_cmd |= GQSPI_GFIFO_RX |
+ GQSPI_GFIFO_DATA_XFR_MASK;
+
+ /*
+ * Check if receive buffer is aligned to 4 byte and length
+ * is multiples of four byte as we are using dma to receive.
+ */
+ if (!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
+ !(actuallen % GQSPI_DMA_ALIGN)) {
+ buf = (u32 *)priv->rx_buf;
+ return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
+ }
+
+ ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len,
+ GQSPI_DMA_ALIGN));
+ buf = (u32 *)tmp;
+ return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
+}
+
+static int zynqmp_qspi_claim_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
+ struct zynqmp_qspi_regs *regs = priv->regs;
+
+ writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
+
+ return 0;
+}
+
+static int zynqmp_qspi_release_bus(struct udevice *dev)
+{
+ struct udevice *bus = dev->parent;
+ struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
+ struct zynqmp_qspi_regs *regs = priv->regs;
+
+ writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
+
+ return 0;
+}
+
+static int zynqmp_qspi_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+{
+ struct zynqmp_qspi_priv *priv = dev_get_priv(slave->dev->parent);
+ int ret = 0;
+
+ priv->op = op;
+ priv->tx_buf = op->data.buf.out;
+ priv->rx_buf = op->data.buf.in;
+ priv->len = op->data.nbytes;
+
+ zynqmp_qspi_chipselect(priv, 1);
+
+ /* Send opcode, addr, dummy */
+ zynqmp_qspi_genfifo_cmd(priv);
+
+ /* Request the transfer */
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ ret = zynqmp_qspi_genfifo_fill_rx(priv);
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ ret = zynqmp_qspi_genfifo_fill_tx(priv);
+
+ zynqmp_qspi_chipselect(priv, 0);
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = {
+ .exec_op = zynqmp_qspi_exec_op,
+};
+
+static const struct dm_spi_ops zynqmp_qspi_ops = {
+ .claim_bus = zynqmp_qspi_claim_bus,
+ .release_bus = zynqmp_qspi_release_bus,
+ .set_speed = zynqmp_qspi_set_speed,
+ .set_mode = zynqmp_qspi_set_mode,
+ .mem_ops = &zynqmp_qspi_mem_ops,
+};
+
+static const struct udevice_id zynqmp_qspi_ids[] = {
+ { .compatible = "xlnx,zynqmp-qspi-1.0" },
+ { .compatible = "xlnx,versal-qspi-1.0" },
+ { }
+};
+
+U_BOOT_DRIVER(zynqmp_qspi) = {
+ .name = "zynqmp_qspi",
+ .id = UCLASS_SPI,
+ .of_match = zynqmp_qspi_ids,
+ .ops = &zynqmp_qspi_ops,
+ .of_to_plat = zynqmp_qspi_of_to_plat,
+ .plat_auto = sizeof(struct zynqmp_qspi_plat),
+ .priv_auto = sizeof(struct zynqmp_qspi_priv),
+ .probe = zynqmp_qspi_probe,
+};