diff options
Diffstat (limited to 'roms/u-boot/drivers/ddr')
110 files changed, 55337 insertions, 0 deletions
diff --git a/roms/u-boot/drivers/ddr/Kconfig b/roms/u-boot/drivers/ddr/Kconfig new file mode 100644 index 000000000..d4b393d25 --- /dev/null +++ b/roms/u-boot/drivers/ddr/Kconfig @@ -0,0 +1,2 @@ +source "drivers/ddr/altera/Kconfig" +source "drivers/ddr/imx/Kconfig" diff --git a/roms/u-boot/drivers/ddr/altera/Kconfig b/roms/u-boot/drivers/ddr/altera/Kconfig new file mode 100644 index 000000000..4660d20de --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/Kconfig @@ -0,0 +1,8 @@ +config SPL_ALTERA_SDRAM + bool "SoCFPGA DDR SDRAM driver in SPL" + depends on SPL + depends on TARGET_SOCFPGA_GEN5 || TARGET_SOCFPGA_ARRIA10 || TARGET_SOCFPGA_SOC64 + select RAM if TARGET_SOCFPGA_GEN5 || TARGET_SOCFPGA_SOC64 + select SPL_RAM if TARGET_SOCFPGA_GEN5 || TARGET_SOCFPGA_SOC64 + help + Enable DDR SDRAM controller for the SoCFPGA devices. diff --git a/roms/u-boot/drivers/ddr/altera/Makefile b/roms/u-boot/drivers/ddr/altera/Makefile new file mode 100644 index 000000000..39dfee5d5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# (C) Copyright 2000-2003 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. +# +# (C) Copyright 2010, Thomas Chou <thomas@wytron.com.tw> +# Copyright (C) 2014 Altera Corporation <www.altera.com> + +ifdef CONFIG_$(SPL_)ALTERA_SDRAM +obj-$(CONFIG_TARGET_SOCFPGA_GEN5) += sdram_gen5.o sequencer.o +obj-$(CONFIG_TARGET_SOCFPGA_ARRIA10) += sdram_arria10.o +obj-$(CONFIG_TARGET_SOCFPGA_STRATIX10) += sdram_soc64.o sdram_s10.o +obj-$(CONFIG_TARGET_SOCFPGA_AGILEX) += sdram_soc64.o sdram_agilex.o +endif diff --git a/roms/u-boot/drivers/ddr/altera/sdram_agilex.c b/roms/u-boot/drivers/ddr/altera/sdram_agilex.c new file mode 100644 index 000000000..65ecdd022 --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_agilex.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <div64.h> +#include <fdtdec.h> +#include <hang.h> +#include <log.h> +#include <ram.h> +#include <reset.h> +#include <asm/global_data.h> +#include "sdram_soc64.h" +#include <wait_bit.h> +#include <asm/arch/firewall.h> +#include <asm/arch/reset_manager.h> +#include <asm/arch/system_manager.h> +#include <asm/io.h> +#include <linux/sizes.h> + +DECLARE_GLOBAL_DATA_PTR; + +int sdram_mmr_init_full(struct udevice *dev) +{ + struct altera_sdram_plat *plat = dev_get_plat(dev); + struct altera_sdram_priv *priv = dev_get_priv(dev); + u32 i; + int ret; + phys_size_t hw_size; + struct bd_info bd = {0}; + + /* Ensure HMC clock is running */ + if (poll_hmc_clock_status()) { + debug("DDR: Error as HMC clock was not running\n"); + return -EPERM; + } + + /* Trying 3 times to do a calibration */ + for (i = 0; i < 3; i++) { + ret = wait_for_bit_le32((const void *)(plat->hmc + + DDRCALSTAT), + DDR_HMC_DDRCALSTAT_CAL_MSK, true, 1000, + false); + if (!ret) + break; + + emif_reset(plat); + } + + if (ret) { + puts("DDR: Error as SDRAM calibration failed\n"); + return -EPERM; + } + debug("DDR: Calibration success\n"); + + /* + * Configure the DDR IO size + * niosreserve0: Used to indicate DDR width & + * bit[7:0] = Number of data bits (bit[6:5] 0x01=32bit, 0x10=64bit) + * bit[8] = 1 if user-mode OCT is present + * bit[9] = 1 if warm reset compiled into EMIF Cal Code + * bit[10] = 1 if warm reset is on during generation in EMIF Cal + * niosreserve1: IP ADCDS version encoded as 16 bit value + * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta, + * 3=EAP, 4-6 are reserved) + * bit[5:3] = Service Pack # (e.g. 1) + * bit[9:6] = Minor Release # + * bit[14:10] = Major Release # + */ + /* Configure DDR IO size x16, x32 and x64 mode */ + u32 update_value; + + update_value = hmc_readl(plat, NIOSRESERVED0); + update_value = (update_value & 0xFF) >> 5; + + /* Configure DDR data rate 0-HAlf-rate 1-Quarter-rate */ + update_value |= (hmc_readl(plat, CTRLCFG3) & 0x4); + hmc_ecc_writel(plat, update_value, DDRIOCTRL); + + /* Copy values MMR IOHMC dramaddrw to HMC adp DRAMADDRWIDTH */ + hmc_ecc_writel(plat, hmc_readl(plat, DRAMADDRW), DRAMADDRWIDTH); + + /* assigning the SDRAM size */ + phys_size_t size = sdram_calculate_size(plat); + + if (size <= 0) + hw_size = PHYS_SDRAM_1_SIZE; + else + hw_size = size; + + /* Get bank configuration from devicetree */ + ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL, + (phys_size_t *)&gd->ram_size, &bd); + if (ret) { + puts("DDR: Failed to decode memory node\n"); + return -ENXIO; + } + + if (gd->ram_size != hw_size) { + printf("DDR: Warning: DRAM size from device tree (%lld MiB)\n", + gd->ram_size >> 20); + printf(" mismatch with hardware (%lld MiB).\n", + hw_size >> 20); + } + + if (gd->ram_size > hw_size) { + printf("DDR: Error: DRAM size from device tree is greater\n"); + printf(" than hardware size.\n"); + hang(); + } + + printf("DDR: %lld MiB\n", gd->ram_size >> 20); + + /* This enables nonsecure access to DDR */ + /* mpuregion0addr_limit */ + FW_MPU_DDR_SCR_WRITEL(gd->ram_size - 1, + FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT); + FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT); + + /* nonmpuregion0addr_limit */ + FW_MPU_DDR_SCR_WRITEL(gd->ram_size - 1, + FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT); + + /* Enable mpuregion0enable and nonmpuregion0enable */ + FW_MPU_DDR_SCR_WRITEL(MPUREGION0_ENABLE | NONMPUREGION0_ENABLE, + FW_MPU_DDR_SCR_EN_SET); + + u32 ctrlcfg1 = hmc_readl(plat, CTRLCFG1); + + /* Enable or disable the DDR ECC */ + if (CTRLCFG1_CFG_CTRL_EN_ECC(ctrlcfg1)) { + setbits_le32(plat->hmc + ECCCTRL1, + (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_ECC_EN_SET_MSK)); + clrbits_le32(plat->hmc + ECCCTRL1, + (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_CNT_RST_SET_MSK)); + setbits_le32(plat->hmc + ECCCTRL2, + (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK | + DDR_HMC_ECCCTL2_AWB_EN_SET_MSK)); + setbits_le32(plat->hmc + ERRINTEN, + DDR_HMC_ERRINTEN_DERRINTEN_EN_SET_MSK); + + if (!cpu_has_been_warmreset()) + sdram_init_ecc_bits(&bd); + } else { + clrbits_le32(plat->hmc + ECCCTRL1, + (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_ECC_EN_SET_MSK)); + clrbits_le32(plat->hmc + ECCCTRL2, + (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK | + DDR_HMC_ECCCTL2_AWB_EN_SET_MSK)); + } + + /* Enable non-secure reads/writes to HMC Adapter for SDRAM ECC */ + writel(FW_HMC_ADAPTOR_MPU_MASK, FW_HMC_ADAPTOR_REG_ADDR); + + sdram_size_check(&bd); + + priv->info.base = bd.bi_dram[0].start; + priv->info.size = gd->ram_size; + + debug("DDR: HMC init success\n"); + return 0; +} diff --git a/roms/u-boot/drivers/ddr/altera/sdram_arria10.c b/roms/u-boot/drivers/ddr/altera/sdram_arria10.c new file mode 100644 index 000000000..4a8f8dea1 --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_arria10.c @@ -0,0 +1,714 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2017 Intel Corporation <www.intel.com> + */ + +#include <common.h> +#include <cpu_func.h> +#include <errno.h> +#include <fdtdec.h> +#include <init.h> +#include <log.h> +#include <malloc.h> +#include <wait_bit.h> +#include <watchdog.h> +#include <asm/cache.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <asm/arch/fpga_manager.h> +#include <asm/arch/misc.h> +#include <asm/arch/reset_manager.h> +#include <asm/arch/sdram.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> + +DECLARE_GLOBAL_DATA_PTR; + +static void sdram_mmr_init(void); +static u64 sdram_size_calc(void); + +/* FAWBANK - Number of Bank of a given device involved in the FAW period. */ +#define ARRIA10_SDR_ACTIVATE_FAWBANK (0x1) + +#define ARRIA_DDR_CONFIG(A, B, C, R) \ + (((A) << 24) | ((B) << 16) | ((C) << 8) | (R)) +#define DDR_CONFIG_ELEMENTS ARRAY_SIZE(ddr_config) +#define DDR_REG_SEQ2CORE 0xFFD0507C +#define DDR_REG_CORE2SEQ 0xFFD05078 +#define DDR_READ_LATENCY_DELAY 40 +#define DDR_SIZE_2GB_HEX 0x80000000 + +#define IO48_MMR_DRAMSTS 0xFFCFA0EC +#define IO48_MMR_NIOS2_RESERVE0 0xFFCFA110 +#define IO48_MMR_NIOS2_RESERVE1 0xFFCFA114 +#define IO48_MMR_NIOS2_RESERVE2 0xFFCFA118 + +#define SEQ2CORE_MASK 0xF +#define CORE2SEQ_INT_REQ 0xF +#define SEQ2CORE_INT_RESP_BIT 3 + +static const struct socfpga_ecc_hmc *socfpga_ecc_hmc_base = + (void *)SOCFPGA_SDR_ADDRESS; +static const struct socfpga_noc_ddr_scheduler *socfpga_noc_ddr_scheduler_base = + (void *)SOCFPGA_SDR_SCHEDULER_ADDRESS; +static const struct socfpga_noc_fw_ddr_mpu_fpga2sdram + *socfpga_noc_fw_ddr_mpu_fpga2sdram_base = + (void *)SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS; +static const struct socfpga_noc_fw_ddr_l3 *socfpga_noc_fw_ddr_l3_base = + (void *)SOCFPGA_SDR_FIREWALL_L3_ADDRESS; +static const struct socfpga_io48_mmr *socfpga_io48_mmr_base = + (void *)SOCFPGA_HMC_MMR_IO48_ADDRESS; + +/* The following are the supported configurations */ +static u32 ddr_config[] = { + /* Chip - Row - Bank - Column Style */ + /* All Types */ + ARRIA_DDR_CONFIG(0, 3, 10, 12), + ARRIA_DDR_CONFIG(0, 3, 10, 13), + ARRIA_DDR_CONFIG(0, 3, 10, 14), + ARRIA_DDR_CONFIG(0, 3, 10, 15), + ARRIA_DDR_CONFIG(0, 3, 10, 16), + ARRIA_DDR_CONFIG(0, 3, 10, 17), + /* LPDDR x16 */ + ARRIA_DDR_CONFIG(0, 3, 11, 14), + ARRIA_DDR_CONFIG(0, 3, 11, 15), + ARRIA_DDR_CONFIG(0, 3, 11, 16), + ARRIA_DDR_CONFIG(0, 3, 12, 15), + /* DDR4 Only */ + ARRIA_DDR_CONFIG(0, 4, 10, 14), + ARRIA_DDR_CONFIG(0, 4, 10, 15), + ARRIA_DDR_CONFIG(0, 4, 10, 16), + ARRIA_DDR_CONFIG(0, 4, 10, 17), /* 14 */ + /* Chip - Bank - Row - Column Style */ + ARRIA_DDR_CONFIG(1, 3, 10, 12), + ARRIA_DDR_CONFIG(1, 3, 10, 13), + ARRIA_DDR_CONFIG(1, 3, 10, 14), + ARRIA_DDR_CONFIG(1, 3, 10, 15), + ARRIA_DDR_CONFIG(1, 3, 10, 16), + ARRIA_DDR_CONFIG(1, 3, 10, 17), + ARRIA_DDR_CONFIG(1, 3, 11, 14), + ARRIA_DDR_CONFIG(1, 3, 11, 15), + ARRIA_DDR_CONFIG(1, 3, 11, 16), + ARRIA_DDR_CONFIG(1, 3, 12, 15), + /* DDR4 Only */ + ARRIA_DDR_CONFIG(1, 4, 10, 14), + ARRIA_DDR_CONFIG(1, 4, 10, 15), + ARRIA_DDR_CONFIG(1, 4, 10, 16), + ARRIA_DDR_CONFIG(1, 4, 10, 17), +}; + +static int match_ddr_conf(u32 ddr_conf) +{ + int i; + + for (i = 0; i < DDR_CONFIG_ELEMENTS; i++) { + if (ddr_conf == ddr_config[i]) + return i; + } + return 0; +} + +static int emif_clear(void) +{ + writel(0, DDR_REG_CORE2SEQ); + + return wait_for_bit_le32((u32 *)DDR_REG_SEQ2CORE, + SEQ2CORE_MASK, 0, 1000, 0); +} + +static int emif_reset(void) +{ + u32 c2s, s2c; + int ret; + + c2s = readl(DDR_REG_CORE2SEQ); + s2c = readl(DDR_REG_SEQ2CORE); + + debug("c2s=%08x s2c=%08x nr0=%08x nr1=%08x nr2=%08x dst=%08x\n", + c2s, s2c, readl(IO48_MMR_NIOS2_RESERVE0), + readl(IO48_MMR_NIOS2_RESERVE1), + readl(IO48_MMR_NIOS2_RESERVE2), + readl(IO48_MMR_DRAMSTS)); + + if (s2c & SEQ2CORE_MASK) { + ret = emif_clear(); + if (ret) { + debug("failed emif_clear()\n"); + return -EPERM; + } + } + + writel(CORE2SEQ_INT_REQ, DDR_REG_CORE2SEQ); + + ret = wait_for_bit_le32((u32 *)DDR_REG_SEQ2CORE, + SEQ2CORE_INT_RESP_BIT, false, 1000, false); + if (ret) { + debug("emif_reset failed to see interrupt acknowledge\n"); + emif_clear(); + return ret; + } + + mdelay(1); + + ret = emif_clear(); + if (ret) { + debug("emif_clear() failed\n"); + return -EPERM; + } + debug("emif_reset interrupt cleared\n"); + + debug("nr0=%08x nr1=%08x nr2=%08x\n", + readl(IO48_MMR_NIOS2_RESERVE0), + readl(IO48_MMR_NIOS2_RESERVE1), + readl(IO48_MMR_NIOS2_RESERVE2)); + + return 0; +} + +static int ddr_setup(void) +{ + int i, ret; + + /* Try 32 times to do a calibration */ + for (i = 0; i < 32; i++) { + mdelay(500); + ret = wait_for_bit_le32(&socfpga_ecc_hmc_base->ddrcalstat, + BIT(0), true, 500, false); + if (!ret) + return 0; + + ret = emif_reset(); + if (ret) + puts("Error: Failed to reset EMIF\n"); + } + + puts("Error: Could Not Calibrate SDRAM\n"); + return -EPERM; +} + +static int sdram_is_ecc_enabled(void) +{ + return !!(readl(&socfpga_ecc_hmc_base->eccctrl) & + ALT_ECC_HMC_OCP_ECCCTL_ECC_EN_SET_MSK); +} + +/* Initialize SDRAM ECC bits to avoid false DBE */ +static void sdram_init_ecc_bits(u32 size) +{ + icache_enable(); + + memset(0, 0, 0x8000); + gd->arch.tlb_addr = 0x4000; + gd->arch.tlb_size = PGTABLE_SIZE; + + dcache_enable(); + + printf("DDRCAL: Scrubbing ECC RAM (%i MiB).\n", size >> 20); + memset((void *)0x8000, 0, size - 0x8000); + flush_dcache_all(); + printf("DDRCAL: Scrubbing ECC RAM done.\n"); + dcache_disable(); +} + +/* Function to startup the SDRAM*/ +static int sdram_startup(void) +{ + /* Release NOC ddr scheduler from reset */ + socfpga_reset_deassert_noc_ddr_scheduler(); + + /* Bringup the DDR (calibration and configuration) */ + return ddr_setup(); +} + +static u64 sdram_size_calc(void) +{ + u32 dramaddrw = readl(&socfpga_io48_mmr_base->dramaddrw); + + u64 size = BIT(((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_CS_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_CS_ADDR_WIDTH_SHIFT) + + ((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH_SHIFT) + + ((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_BANK_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_BANK_ADDR_WIDTH_SHIFT) + + ((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_ROW_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_ROW_ADDR_WIDTH_SHIFT) + + (dramaddrw & IO48_MMR_DRAMADDRW_CFG_COL_ADDR_WIDTH_MASK)); + + size *= (2 << (readl(&socfpga_ecc_hmc_base->ddrioctrl) & + ALT_ECC_HMC_OCP_DDRIOCTRL_IO_SIZE_MSK)); + + debug("SDRAM size=%llu\n", size); + + return size; +} + +/* Function to initialize SDRAM MMR and NOC DDR scheduler*/ +static void sdram_mmr_init(void) +{ + u32 update_value, io48_value; + u32 ctrlcfg0 = readl(&socfpga_io48_mmr_base->ctrlcfg0); + u32 ctrlcfg1 = readl(&socfpga_io48_mmr_base->ctrlcfg1); + u32 dramaddrw = readl(&socfpga_io48_mmr_base->dramaddrw); + u32 caltim0 = readl(&socfpga_io48_mmr_base->caltiming0); + u32 caltim1 = readl(&socfpga_io48_mmr_base->caltiming1); + u32 caltim2 = readl(&socfpga_io48_mmr_base->caltiming2); + u32 caltim3 = readl(&socfpga_io48_mmr_base->caltiming3); + u32 caltim4 = readl(&socfpga_io48_mmr_base->caltiming4); + u32 caltim9 = readl(&socfpga_io48_mmr_base->caltiming9); + u32 ddrioctl; + + /* + * Configure the DDR IO size [0xFFCFB008] + * niosreserve0: Used to indicate DDR width & + * bit[7:0] = Number of data bits (0x20 for 32bit) + * bit[8] = 1 if user-mode OCT is present + * bit[9] = 1 if warm reset compiled into EMIF Cal Code + * bit[10] = 1 if warm reset is on during generation in EMIF Cal + * niosreserve1: IP ADCDS version encoded as 16 bit value + * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta, + * 3=EAP, 4-6 are reserved) + * bit[5:3] = Service Pack # (e.g. 1) + * bit[9:6] = Minor Release # + * bit[14:10] = Major Release # + */ + if ((readl(&socfpga_io48_mmr_base->niosreserve1) >> 6) & 0x1FF) { + update_value = readl(&socfpga_io48_mmr_base->niosreserve0); + writel(((update_value & 0xFF) >> 5), + &socfpga_ecc_hmc_base->ddrioctrl); + } + + ddrioctl = readl(&socfpga_ecc_hmc_base->ddrioctrl); + + /* Set the DDR Configuration [0xFFD12400] */ + io48_value = ARRIA_DDR_CONFIG( + ((ctrlcfg1 & + IO48_MMR_CTRLCFG1_ADDR_ORDER_MASK) >> + IO48_MMR_CTRLCFG1_ADDR_ORDER_SHIFT), + ((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_BANK_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_BANK_ADDR_WIDTH_SHIFT) + + ((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_BANK_GROUP_ADDR_WIDTH_SHIFT), + (dramaddrw & + IO48_MMR_DRAMADDRW_CFG_COL_ADDR_WIDTH_MASK), + ((dramaddrw & + IO48_MMR_DRAMADDRW_CFG_ROW_ADDR_WIDTH_MASK) >> + IO48_MMR_DRAMADDRW_CFG_ROW_ADDR_WIDTH_SHIFT)); + + update_value = match_ddr_conf(io48_value); + if (update_value) + writel(update_value, + &socfpga_noc_ddr_scheduler_base->ddr_t_main_scheduler_ddrconf); + + /* + * Configure DDR timing [0xFFD1240C] + * RDTOMISS = tRTP + tRP + tRCD - BL/2 + * WRTOMISS = WL + tWR + tRP + tRCD and + * WL = RL + BL/2 + 2 - rd-to-wr ; tWR = 15ns so... + * First part of equation is in memory clock units so divide by 2 + * for HMC clock units. 1066MHz is close to 1ns so use 15 directly. + * WRTOMISS = ((RL + BL/2 + 2 + tWR) >> 1)- rd-to-wr + tRP + tRCD + */ + u32 ctrlcfg0_cfg_ctrl_burst_len = + (ctrlcfg0 & IO48_MMR_CTRLCFG0_CTRL_BURST_LENGTH_MASK) >> + IO48_MMR_CTRLCFG0_CTRL_BURST_LENGTH_SHIFT; + + u32 caltim0_cfg_act_to_rdwr = caltim0 & + IO48_MMR_CALTIMING0_CFG_ACT_TO_RDWR_MASK; + + u32 caltim0_cfg_act_to_act = + (caltim0 & IO48_MMR_CALTIMING0_CFG_ACT_TO_ACT_MASK) >> + IO48_MMR_CALTIMING0_CFG_ACT_TO_ACT_SHIFT; + + u32 caltim0_cfg_act_to_act_db = + (caltim0 & + IO48_MMR_CALTIMING0_CFG_ACT_TO_ACT_DIFF_BANK_MASK) >> + IO48_MMR_CALTIMING0_CFG_ACT_TO_ACT_DIFF_BANK_SHIFT; + + u32 caltim1_cfg_rd_to_wr = + (caltim1 & IO48_MMR_CALTIMING1_CFG_RD_TO_WR_MASK) >> + IO48_MMR_CALTIMING1_CFG_RD_TO_WR_SHIFT; + + u32 caltim1_cfg_rd_to_rd_dc = + (caltim1 & IO48_MMR_CALTIMING1_CFG_RD_TO_RD_DC_MASK) >> + IO48_MMR_CALTIMING1_CFG_RD_TO_RD_DC_SHIFT; + + u32 caltim1_cfg_rd_to_wr_dc = + (caltim1 & IO48_MMR_CALTIMING1_CFG_RD_TO_WR_DIFF_CHIP_MASK) >> + IO48_MMR_CALTIMING1_CFG_RD_TO_WR_DIFF_CHIP_SHIFT; + + u32 caltim2_cfg_rd_to_pch = + (caltim2 & IO48_MMR_CALTIMING2_CFG_RD_TO_PCH_MASK) >> + IO48_MMR_CALTIMING2_CFG_RD_TO_PCH_SHIFT; + + u32 caltim3_cfg_wr_to_rd = + (caltim3 & IO48_MMR_CALTIMING3_CFG_WR_TO_RD_MASK) >> + IO48_MMR_CALTIMING3_CFG_WR_TO_RD_SHIFT; + + u32 caltim3_cfg_wr_to_rd_dc = + (caltim3 & IO48_MMR_CALTIMING3_CFG_WR_TO_RD_DIFF_CHIP_MASK) >> + IO48_MMR_CALTIMING3_CFG_WR_TO_RD_DIFF_CHIP_SHIFT; + + u32 caltim4_cfg_pch_to_valid = + (caltim4 & IO48_MMR_CALTIMING4_CFG_PCH_TO_VALID_MASK) >> + IO48_MMR_CALTIMING4_CFG_PCH_TO_VALID_SHIFT; + + u32 caltim9_cfg_4_act_to_act = caltim9 & + IO48_MMR_CALTIMING9_CFG_WR_4_ACT_TO_ACT_MASK; + + update_value = (caltim2_cfg_rd_to_pch + caltim4_cfg_pch_to_valid + + caltim0_cfg_act_to_rdwr - + (ctrlcfg0_cfg_ctrl_burst_len >> 2)); + + io48_value = ((((readl(&socfpga_io48_mmr_base->dramtiming0) & + ALT_IO48_DRAMTIME_MEM_READ_LATENCY_MASK) + 2 + 15 + + (ctrlcfg0_cfg_ctrl_burst_len >> 1)) >> 1) - + /* Up to here was in memory cycles so divide by 2 */ + caltim1_cfg_rd_to_wr + caltim0_cfg_act_to_rdwr + + caltim4_cfg_pch_to_valid); + + writel(((caltim0_cfg_act_to_act << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_ACTTOACT_LSB) | + (update_value << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_RDTOMISS_LSB) | + (io48_value << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_WRTOMISS_LSB) | + ((ctrlcfg0_cfg_ctrl_burst_len >> 2) << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_BURSTLEN_LSB) | + (caltim1_cfg_rd_to_wr << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_RDTOWR_LSB) | + (caltim3_cfg_wr_to_rd << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_WRTORD_LSB) | + (((ddrioctl == 1) ? 1 : 0) << + ALT_NOC_MPU_DDR_T_SCHED_DDRTIMING_BWRATIO_LSB)), + &socfpga_noc_ddr_scheduler_base-> + ddr_t_main_scheduler_ddrtiming); + + /* Configure DDR mode [0xFFD12410] [precharge = 0] */ + writel(((ddrioctl ? 0 : 1) << + ALT_NOC_MPU_DDR_T_SCHED_DDRMOD_BWRATIOEXTENDED_LSB), + &socfpga_noc_ddr_scheduler_base->ddr_t_main_scheduler_ddrmode); + + /* Configure the read latency [0xFFD12414] */ + writel(((readl(&socfpga_io48_mmr_base->dramtiming0) & + ALT_IO48_DRAMTIME_MEM_READ_LATENCY_MASK) >> 1) + + DDR_READ_LATENCY_DELAY, + &socfpga_noc_ddr_scheduler_base-> + ddr_t_main_scheduler_readlatency); + + /* + * Configuring timing values concerning activate commands + * [0xFFD12438] [FAWBANK alway 1 because always 4 bank DDR] + */ + writel(((caltim0_cfg_act_to_act_db << + ALT_NOC_MPU_DDR_T_SCHED_ACTIVATE_RRD_LSB) | + (caltim9_cfg_4_act_to_act << + ALT_NOC_MPU_DDR_T_SCHED_ACTIVATE_FAW_LSB) | + (ARRIA10_SDR_ACTIVATE_FAWBANK << + ALT_NOC_MPU_DDR_T_SCHED_ACTIVATE_FAWBANK_LSB)), + &socfpga_noc_ddr_scheduler_base->ddr_t_main_scheduler_activate); + + /* + * Configuring timing values concerning device to device data bus + * ownership change [0xFFD1243C] + */ + writel(((caltim1_cfg_rd_to_rd_dc << + ALT_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSRDTORD_LSB) | + (caltim1_cfg_rd_to_wr_dc << + ALT_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSRDTOWR_LSB) | + (caltim3_cfg_wr_to_rd_dc << + ALT_NOC_MPU_DDR_T_SCHED_DEVTODEV_BUSWRTORD_LSB)), + &socfpga_noc_ddr_scheduler_base->ddr_t_main_scheduler_devtodev); + + /* Enable or disable the SDRAM ECC */ + if (ctrlcfg1 & IO48_MMR_CTRLCFG1_CTRL_ENABLE_ECC) { + setbits_le32(&socfpga_ecc_hmc_base->eccctrl, + (ALT_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL_CNT_RST_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL_ECC_EN_SET_MSK)); + clrbits_le32(&socfpga_ecc_hmc_base->eccctrl, + (ALT_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL_CNT_RST_SET_MSK)); + setbits_le32(&socfpga_ecc_hmc_base->eccctrl2, + (ALT_ECC_HMC_OCP_ECCCTL2_RMW_EN_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL2_AWB_EN_SET_MSK)); + } else { + clrbits_le32(&socfpga_ecc_hmc_base->eccctrl, + (ALT_ECC_HMC_OCP_ECCCTL_AWB_CNT_RST_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL_CNT_RST_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL_ECC_EN_SET_MSK)); + clrbits_le32(&socfpga_ecc_hmc_base->eccctrl2, + (ALT_ECC_HMC_OCP_ECCCTL2_RMW_EN_SET_MSK | + ALT_ECC_HMC_OCP_ECCCTL2_AWB_EN_SET_MSK)); + } +} + +struct firewall_entry { + const char *prop_name; + const u32 cfg_addr; + const u32 en_addr; + const u32 en_bit; +}; +#define FW_MPU_FPGA_ADDRESS \ + ((const struct socfpga_noc_fw_ddr_mpu_fpga2sdram *)\ + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS) + +#define SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(ADDR) \ + (SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS + \ + offsetof(struct socfpga_noc_fw_ddr_mpu_fpga2sdram, ADDR)) + +#define SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(ADDR) \ + (SOCFPGA_SDR_FIREWALL_L3_ADDRESS + \ + offsetof(struct socfpga_noc_fw_ddr_l3, ADDR)) + +const struct firewall_entry firewall_table[] = { + { + "mpu0", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(mpuregion0addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_MPUREG0EN_SET_MSK + }, + { + "mpu1", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS + + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(mpuregion1addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_MPUREG1EN_SET_MSK + }, + { + "mpu2", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(mpuregion2addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_MPUREG2EN_SET_MSK + }, + { + "mpu3", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(mpuregion3addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_MPUREG3EN_SET_MSK + }, + { + "l3-0", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion0addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG0EN_SET_MSK + }, + { + "l3-1", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion1addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG1EN_SET_MSK + }, + { + "l3-2", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion2addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG2EN_SET_MSK + }, + { + "l3-3", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion3addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG3EN_SET_MSK + }, + { + "l3-4", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion4addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG4EN_SET_MSK + }, + { + "l3-5", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion5addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG5EN_SET_MSK + }, + { + "l3-6", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion6addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG6EN_SET_MSK + }, + { + "l3-7", + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(hpsregion7addr), + SOCFPGA_SDR_FIREWALL_L3_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_HPSREG7EN_SET_MSK + }, + { + "fpga2sdram0-0", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram0region0addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR0REG0EN_SET_MSK + }, + { + "fpga2sdram0-1", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram0region1addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR0REG1EN_SET_MSK + }, + { + "fpga2sdram0-2", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram0region2addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR0REG2EN_SET_MSK + }, + { + "fpga2sdram0-3", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram0region3addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR0REG3EN_SET_MSK + }, + { + "fpga2sdram1-0", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram1region0addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR1REG0EN_SET_MSK + }, + { + "fpga2sdram1-1", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram1region1addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR1REG1EN_SET_MSK + }, + { + "fpga2sdram1-2", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram1region2addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR1REG2EN_SET_MSK + }, + { + "fpga2sdram1-3", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram1region3addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR1REG3EN_SET_MSK + }, + { + "fpga2sdram2-0", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram2region0addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR2REG0EN_SET_MSK + }, + { + "fpga2sdram2-1", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram2region1addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR2REG1EN_SET_MSK + }, + { + "fpga2sdram2-2", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram2region2addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR2REG2EN_SET_MSK + }, + { + "fpga2sdram2-3", + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET + (fpga2sdram2region3addr), + SOCFPGA_SDR_FIREWALL_MPU_FPGA_ADDRESS_OFFSET(enable), + ALT_NOC_FW_DDR_SCR_EN_F2SDR2REG3EN_SET_MSK + }, + +}; + +static int of_sdram_firewall_setup(const void *blob) +{ + int child, i, node, ret; + u32 start_end[2]; + char name[32]; + + node = fdtdec_next_compatible(blob, 0, COMPAT_ALTERA_SOCFPGA_NOC); + if (node < 0) + return -ENXIO; + + child = fdt_first_subnode(blob, node); + if (child < 0) + return -ENXIO; + + /* set to default state */ + writel(0, &socfpga_noc_fw_ddr_mpu_fpga2sdram_base->enable); + writel(0, &socfpga_noc_fw_ddr_l3_base->enable); + + + for (i = 0; i < ARRAY_SIZE(firewall_table); i++) { + sprintf(name, "%s", firewall_table[i].prop_name); + ret = fdtdec_get_int_array(blob, child, name, + start_end, 2); + if (ret) { + sprintf(name, "altr,%s", firewall_table[i].prop_name); + ret = fdtdec_get_int_array(blob, child, name, + start_end, 2); + if (ret) + continue; + } + + writel((start_end[0] & ALT_NOC_FW_DDR_ADDR_MASK) | + (start_end[1] << ALT_NOC_FW_DDR_END_ADDR_LSB), + firewall_table[i].cfg_addr); + setbits_le32(firewall_table[i].en_addr, + firewall_table[i].en_bit); + } + + return 0; +} + +int ddr_calibration_sequence(void) +{ + WATCHDOG_RESET(); + + /* Check to see if SDRAM cal was success */ + if (sdram_startup()) { + puts("DDRCAL: Failed\n"); + return -EPERM; + } + + puts("DDRCAL: Success\n"); + + WATCHDOG_RESET(); + + /* initialize the MMR register */ + sdram_mmr_init(); + + /* assigning the SDRAM size */ + u64 size = sdram_size_calc(); + + /* + * If size is less than zero, this is invalid/weird value from + * calculation, use default Config size. + * Up to 2GB is supported, 2GB would be used if more than that. + */ + if (size <= 0) + gd->ram_size = PHYS_SDRAM_1_SIZE; + else if (DDR_SIZE_2GB_HEX <= size) + gd->ram_size = DDR_SIZE_2GB_HEX; + else + gd->ram_size = (u32)size; + + /* setup the dram info within bd */ + dram_init_banksize(); + + if (of_sdram_firewall_setup(gd->fdt_blob)) + puts("FW: Error Configuring Firewall\n"); + + if (sdram_is_ecc_enabled()) + sdram_init_ecc_bits(gd->ram_size); + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/altera/sdram_gen5.c b/roms/u-boot/drivers/ddr/altera/sdram_gen5.c new file mode 100644 index 000000000..8d3ce495d --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_gen5.c @@ -0,0 +1,655 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright Altera Corporation (C) 2014-2015 + */ +#include <common.h> +#include <dm.h> +#include <errno.h> +#include <div64.h> +#include <init.h> +#include <log.h> +#include <ram.h> +#include <reset.h> +#include <watchdog.h> +#include <asm/arch/fpga_manager.h> +#include <asm/arch/reset_manager.h> +#include <asm/arch/sdram.h> +#include <asm/arch/system_manager.h> +#include <asm/bitops.h> +#include <asm/io.h> +#include <dm/device_compat.h> + +#include "sequencer.h" + +#ifdef CONFIG_SPL_BUILD + +struct altera_gen5_sdram_priv { + struct ram_info info; +}; + +struct altera_gen5_sdram_plat { + struct socfpga_sdr *sdr; +}; + +struct sdram_prot_rule { + u32 sdram_start; /* SDRAM start address */ + u32 sdram_end; /* SDRAM end address */ + u32 rule; /* SDRAM protection rule number: 0-19 */ + int valid; /* Rule valid or not? 1 - valid, 0 not*/ + + u32 security; + u32 portmask; + u32 result; + u32 lo_prot_id; + u32 hi_prot_id; +}; + +static unsigned long sdram_calculate_size(struct socfpga_sdr_ctrl *sdr_ctrl); + +/** + * get_errata_rows() - Up the number of DRAM rows to cover entire address space + * @cfg: SDRAM controller configuration data + * + * SDRAM Failure happens when accessing non-existent memory. Artificially + * increase the number of rows so that the memory controller thinks it has + * 4GB of RAM. This function returns such amount of rows. + */ +static int get_errata_rows(const struct socfpga_sdram_config *cfg) +{ + /* Define constant for 4G memory - used for SDRAM errata workaround */ +#define MEMSIZE_4G (4ULL * 1024ULL * 1024ULL * 1024ULL) + const unsigned long long memsize = MEMSIZE_4G; + const unsigned int cs = + ((cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB) + 1; + const unsigned int rows = + (cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB; + const unsigned int banks = + (cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB; + const unsigned int cols = + (cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB; + const unsigned int width = 8; + + unsigned long long newrows; + int bits, inewrowslog2; + + debug("workaround rows - memsize %lld\n", memsize); + debug("workaround rows - cs %d\n", cs); + debug("workaround rows - width %d\n", width); + debug("workaround rows - rows %d\n", rows); + debug("workaround rows - banks %d\n", banks); + debug("workaround rows - cols %d\n", cols); + + newrows = lldiv(memsize, cs * (width / 8)); + debug("rows workaround - term1 %lld\n", newrows); + + newrows = lldiv(newrows, (1 << banks) * (1 << cols)); + debug("rows workaround - term2 %lld\n", newrows); + + /* + * Compute the hamming weight - same as number of bits set. + * Need to see if result is ordinal power of 2 before + * attempting log2 of result. + */ + bits = generic_hweight32(newrows); + + debug("rows workaround - bits %d\n", bits); + + if (bits != 1) { + printf("SDRAM workaround failed, bits set %d\n", bits); + return rows; + } + + if (newrows > UINT_MAX) { + printf("SDRAM workaround rangecheck failed, %lld\n", newrows); + return rows; + } + + inewrowslog2 = __ilog2(newrows); + + debug("rows workaround - ilog2 %d, %lld\n", inewrowslog2, newrows); + + if (inewrowslog2 == -1) { + printf("SDRAM workaround failed, newrows %lld\n", newrows); + return rows; + } + + return inewrowslog2; +} + +/* SDRAM protection rules vary from 0-19, a total of 20 rules. */ +static void sdram_set_rule(struct socfpga_sdr_ctrl *sdr_ctrl, + struct sdram_prot_rule *prule) +{ + u32 lo_addr_bits; + u32 hi_addr_bits; + int ruleno = prule->rule; + + /* Select the rule */ + writel(ruleno, &sdr_ctrl->prot_rule_rdwr); + + /* Obtain the address bits */ + lo_addr_bits = prule->sdram_start >> 20ULL; + hi_addr_bits = (prule->sdram_end - 1) >> 20ULL; + + debug("sdram set rule start %x, %d\n", lo_addr_bits, + prule->sdram_start); + debug("sdram set rule end %x, %d\n", hi_addr_bits, + prule->sdram_end); + + /* Set rule addresses */ + writel(lo_addr_bits | (hi_addr_bits << 12), &sdr_ctrl->prot_rule_addr); + + /* Set rule protection ids */ + writel(prule->lo_prot_id | (prule->hi_prot_id << 12), + &sdr_ctrl->prot_rule_id); + + /* Set the rule data */ + writel(prule->security | (prule->valid << 2) | + (prule->portmask << 3) | (prule->result << 13), + &sdr_ctrl->prot_rule_data); + + /* write the rule */ + writel(ruleno | (1 << 5), &sdr_ctrl->prot_rule_rdwr); + + /* Set rule number to 0 by default */ + writel(0, &sdr_ctrl->prot_rule_rdwr); +} + +static void sdram_get_rule(struct socfpga_sdr_ctrl *sdr_ctrl, + struct sdram_prot_rule *prule) +{ + u32 addr; + u32 id; + u32 data; + int ruleno = prule->rule; + + /* Read the rule */ + writel(ruleno, &sdr_ctrl->prot_rule_rdwr); + writel(ruleno | (1 << 6), &sdr_ctrl->prot_rule_rdwr); + + /* Get the addresses */ + addr = readl(&sdr_ctrl->prot_rule_addr); + prule->sdram_start = (addr & 0xFFF) << 20; + prule->sdram_end = ((addr >> 12) & 0xFFF) << 20; + + /* Get the configured protection IDs */ + id = readl(&sdr_ctrl->prot_rule_id); + prule->lo_prot_id = id & 0xFFF; + prule->hi_prot_id = (id >> 12) & 0xFFF; + + /* Get protection data */ + data = readl(&sdr_ctrl->prot_rule_data); + + prule->security = data & 0x3; + prule->valid = (data >> 2) & 0x1; + prule->portmask = (data >> 3) & 0x3FF; + prule->result = (data >> 13) & 0x1; +} + +static void +sdram_set_protection_config(struct socfpga_sdr_ctrl *sdr_ctrl, + const u32 sdram_start, const u32 sdram_end) +{ + struct sdram_prot_rule rule; + int rules; + + /* Start with accepting all SDRAM transaction */ + writel(0x0, &sdr_ctrl->protport_default); + + /* Clear all protection rules for warm boot case */ + memset(&rule, 0, sizeof(rule)); + + for (rules = 0; rules < 20; rules++) { + rule.rule = rules; + sdram_set_rule(sdr_ctrl, &rule); + } + + /* new rule: accept SDRAM */ + rule.sdram_start = sdram_start; + rule.sdram_end = sdram_end; + rule.lo_prot_id = 0x0; + rule.hi_prot_id = 0xFFF; + rule.portmask = 0x3FF; + rule.security = 0x3; + rule.result = 0; + rule.valid = 1; + rule.rule = 0; + + /* set new rule */ + sdram_set_rule(sdr_ctrl, &rule); + + /* default rule: reject everything */ + writel(0x3ff, &sdr_ctrl->protport_default); +} + +static void sdram_dump_protection_config(struct socfpga_sdr_ctrl *sdr_ctrl) +{ + struct sdram_prot_rule rule; + int rules; + + debug("SDRAM Prot rule, default %x\n", + readl(&sdr_ctrl->protport_default)); + + for (rules = 0; rules < 20; rules++) { + rule.rule = rules; + sdram_get_rule(sdr_ctrl, &rule); + debug("Rule %d, rules ...\n", rules); + debug(" sdram start %x\n", rule.sdram_start); + debug(" sdram end %x\n", rule.sdram_end); + debug(" low prot id %d, hi prot id %d\n", + rule.lo_prot_id, + rule.hi_prot_id); + debug(" portmask %x\n", rule.portmask); + debug(" security %d\n", rule.security); + debug(" result %d\n", rule.result); + debug(" valid %d\n", rule.valid); + } +} + +/** + * sdram_write_verify() - write to register and verify the write. + * @addr: Register address + * @val: Value to be written and verified + * + * This function writes to a register, reads back the value and compares + * the result with the written value to check if the data match. + */ +static unsigned sdram_write_verify(const u32 *addr, const u32 val) +{ + u32 rval; + + debug(" Write - Address 0x%p Data 0x%08x\n", addr, val); + writel(val, addr); + + debug(" Read and verify..."); + rval = readl(addr); + if (rval != val) { + debug("FAIL - Address 0x%p Expected 0x%08x Data 0x%08x\n", + addr, val, rval); + return -EINVAL; + } + + debug("correct!\n"); + return 0; +} + +/** + * sdr_get_ctrlcfg() - Get the value of DRAM CTRLCFG register + * @cfg: SDRAM controller configuration data + * + * Return the value of DRAM CTRLCFG register. + */ +static u32 sdr_get_ctrlcfg(const struct socfpga_sdram_config *cfg) +{ + const u32 csbits = + ((cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB) + 1; + u32 addrorder = + (cfg->ctrl_cfg & SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK) >> + SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB; + + u32 ctrl_cfg = cfg->ctrl_cfg; + + /* + * SDRAM Failure When Accessing Non-Existent Memory + * Set the addrorder field of the SDRAM control register + * based on the CSBITs setting. + */ + if (csbits == 1) { + if (addrorder != 0) + debug("INFO: Changing address order to 0 (chip, row, bank, column)\n"); + addrorder = 0; + } else if (csbits == 2) { + if (addrorder != 2) + debug("INFO: Changing address order to 2 (row, chip, bank, column)\n"); + addrorder = 2; + } + + ctrl_cfg &= ~SDR_CTRLGRP_CTRLCFG_ADDRORDER_MASK; + ctrl_cfg |= addrorder << SDR_CTRLGRP_CTRLCFG_ADDRORDER_LSB; + + return ctrl_cfg; +} + +/** + * sdr_get_addr_rw() - Get the value of DRAM ADDRW register + * @cfg: SDRAM controller configuration data + * + * Return the value of DRAM ADDRW register. + */ +static u32 sdr_get_addr_rw(const struct socfpga_sdram_config *cfg) +{ + /* + * SDRAM Failure When Accessing Non-Existent Memory + * Set SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB to + * log2(number of chip select bits). Since there's only + * 1 or 2 chip selects, log2(1) => 0, and log2(2) => 1, + * which is the same as "chip selects" - 1. + */ + const int rows = get_errata_rows(cfg); + u32 dram_addrw = cfg->dram_addrw & ~SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK; + + return dram_addrw | (rows << SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB); +} + +/** + * sdr_load_regs() - Load SDRAM controller registers + * @cfg: SDRAM controller configuration data + * + * This function loads the register values into the SDRAM controller block. + */ +static void sdr_load_regs(struct socfpga_sdr_ctrl *sdr_ctrl, + const struct socfpga_sdram_config *cfg) +{ + const u32 ctrl_cfg = sdr_get_ctrlcfg(cfg); + const u32 dram_addrw = sdr_get_addr_rw(cfg); + + debug("\nConfiguring CTRLCFG\n"); + writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg); + + debug("Configuring DRAMTIMING1\n"); + writel(cfg->dram_timing1, &sdr_ctrl->dram_timing1); + + debug("Configuring DRAMTIMING2\n"); + writel(cfg->dram_timing2, &sdr_ctrl->dram_timing2); + + debug("Configuring DRAMTIMING3\n"); + writel(cfg->dram_timing3, &sdr_ctrl->dram_timing3); + + debug("Configuring DRAMTIMING4\n"); + writel(cfg->dram_timing4, &sdr_ctrl->dram_timing4); + + debug("Configuring LOWPWRTIMING\n"); + writel(cfg->lowpwr_timing, &sdr_ctrl->lowpwr_timing); + + debug("Configuring DRAMADDRW\n"); + writel(dram_addrw, &sdr_ctrl->dram_addrw); + + debug("Configuring DRAMIFWIDTH\n"); + writel(cfg->dram_if_width, &sdr_ctrl->dram_if_width); + + debug("Configuring DRAMDEVWIDTH\n"); + writel(cfg->dram_dev_width, &sdr_ctrl->dram_dev_width); + + debug("Configuring LOWPWREQ\n"); + writel(cfg->lowpwr_eq, &sdr_ctrl->lowpwr_eq); + + debug("Configuring DRAMINTR\n"); + writel(cfg->dram_intr, &sdr_ctrl->dram_intr); + + debug("Configuring STATICCFG\n"); + writel(cfg->static_cfg, &sdr_ctrl->static_cfg); + + debug("Configuring CTRLWIDTH\n"); + writel(cfg->ctrl_width, &sdr_ctrl->ctrl_width); + + debug("Configuring PORTCFG\n"); + writel(cfg->port_cfg, &sdr_ctrl->port_cfg); + + debug("Configuring FIFOCFG\n"); + writel(cfg->fifo_cfg, &sdr_ctrl->fifo_cfg); + + debug("Configuring MPPRIORITY\n"); + writel(cfg->mp_priority, &sdr_ctrl->mp_priority); + + debug("Configuring MPWEIGHT_MPWEIGHT_0\n"); + writel(cfg->mp_weight0, &sdr_ctrl->mp_weight0); + writel(cfg->mp_weight1, &sdr_ctrl->mp_weight1); + writel(cfg->mp_weight2, &sdr_ctrl->mp_weight2); + writel(cfg->mp_weight3, &sdr_ctrl->mp_weight3); + + debug("Configuring MPPACING_MPPACING_0\n"); + writel(cfg->mp_pacing0, &sdr_ctrl->mp_pacing0); + writel(cfg->mp_pacing1, &sdr_ctrl->mp_pacing1); + writel(cfg->mp_pacing2, &sdr_ctrl->mp_pacing2); + writel(cfg->mp_pacing3, &sdr_ctrl->mp_pacing3); + + debug("Configuring MPTHRESHOLDRST_MPTHRESHOLDRST_0\n"); + writel(cfg->mp_threshold0, &sdr_ctrl->mp_threshold0); + writel(cfg->mp_threshold1, &sdr_ctrl->mp_threshold1); + writel(cfg->mp_threshold2, &sdr_ctrl->mp_threshold2); + + debug("Configuring PHYCTRL_PHYCTRL_0\n"); + writel(cfg->phy_ctrl0, &sdr_ctrl->phy_ctrl0); + + debug("Configuring CPORTWIDTH\n"); + writel(cfg->cport_width, &sdr_ctrl->cport_width); + + debug("Configuring CPORTWMAP\n"); + writel(cfg->cport_wmap, &sdr_ctrl->cport_wmap); + + debug("Configuring CPORTRMAP\n"); + writel(cfg->cport_rmap, &sdr_ctrl->cport_rmap); + + debug("Configuring RFIFOCMAP\n"); + writel(cfg->rfifo_cmap, &sdr_ctrl->rfifo_cmap); + + debug("Configuring WFIFOCMAP\n"); + writel(cfg->wfifo_cmap, &sdr_ctrl->wfifo_cmap); + + debug("Configuring CPORTRDWR\n"); + writel(cfg->cport_rdwr, &sdr_ctrl->cport_rdwr); + + debug("Configuring DRAMODT\n"); + writel(cfg->dram_odt, &sdr_ctrl->dram_odt); + + if (dram_is_ddr(3)) { + debug("Configuring EXTRATIME1\n"); + writel(cfg->extratime1, &sdr_ctrl->extratime1); + } +} + +/** + * sdram_mmr_init_full() - Function to initialize SDRAM MMR + * @sdr_phy_reg: Value of the PHY control register 0 + * + * Initialize the SDRAM MMR. + */ +int sdram_mmr_init_full(struct socfpga_sdr_ctrl *sdr_ctrl, + unsigned int sdr_phy_reg) +{ + const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config(); + const unsigned int rows = + (cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB; + int ret; + + writel(rows, + socfpga_get_sysmgr_addr() + SYSMGR_ISWGRP_HANDOFF_OFFSET(4)); + + sdr_load_regs(sdr_ctrl, cfg); + + /* saving this value to SYSMGR.ISWGRP.HANDOFF.FPGA2SDR */ + writel(cfg->fpgaport_rst, + socfpga_get_sysmgr_addr() + SYSMGR_ISWGRP_HANDOFF_OFFSET(3)); + + /* only enable if the FPGA is programmed */ + if (fpgamgr_test_fpga_ready()) { + ret = sdram_write_verify(&sdr_ctrl->fpgaport_rst, + cfg->fpgaport_rst); + if (ret) + return ret; + } + + /* Restore the SDR PHY Register if valid */ + if (sdr_phy_reg != 0xffffffff) + writel(sdr_phy_reg, &sdr_ctrl->phy_ctrl0); + + /* Final step - apply configuration changes */ + debug("Configuring STATICCFG\n"); + clrsetbits_le32(&sdr_ctrl->static_cfg, + SDR_CTRLGRP_STATICCFG_APPLYCFG_MASK, + 1 << SDR_CTRLGRP_STATICCFG_APPLYCFG_LSB); + + sdram_set_protection_config(sdr_ctrl, 0, + sdram_calculate_size(sdr_ctrl) - 1); + + sdram_dump_protection_config(sdr_ctrl); + + return 0; +} + +/** + * sdram_calculate_size() - Calculate SDRAM size + * + * Calculate SDRAM device size based on SDRAM controller parameters. + * Size is specified in bytes. + */ +static unsigned long sdram_calculate_size(struct socfpga_sdr_ctrl *sdr_ctrl) +{ + unsigned long temp; + unsigned long row, bank, col, cs, width; + const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config(); + const unsigned int csbits = + ((cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_CSBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_CSBITS_LSB) + 1; + const unsigned int rowbits = + (cfg->dram_addrw & SDR_CTRLGRP_DRAMADDRW_ROWBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_ROWBITS_LSB; + + temp = readl(&sdr_ctrl->dram_addrw); + col = (temp & SDR_CTRLGRP_DRAMADDRW_COLBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_COLBITS_LSB; + + /* + * SDRAM Failure When Accessing Non-Existent Memory + * Use ROWBITS from Quartus/QSys to calculate SDRAM size + * since the FB specifies we modify ROWBITs to work around SDRAM + * controller issue. + */ + row = readl(socfpga_get_sysmgr_addr() + + SYSMGR_ISWGRP_HANDOFF_OFFSET(4)); + if (row == 0) + row = rowbits; + /* + * If the stored handoff value for rows is greater than + * the field width in the sdr.dramaddrw register then + * something is very wrong. Revert to using the the #define + * value handed off by the SOCEDS tool chain instead of + * using a broken value. + */ + if (row > 31) + row = rowbits; + + bank = (temp & SDR_CTRLGRP_DRAMADDRW_BANKBITS_MASK) >> + SDR_CTRLGRP_DRAMADDRW_BANKBITS_LSB; + + /* + * SDRAM Failure When Accessing Non-Existent Memory + * Use CSBITs from Quartus/QSys to calculate SDRAM size + * since the FB specifies we modify CSBITs to work around SDRAM + * controller issue. + */ + cs = csbits; + + width = readl(&sdr_ctrl->dram_if_width); + + /* ECC would not be calculated as its not addressible */ + if (width == SDRAM_WIDTH_32BIT_WITH_ECC) + width = 32; + if (width == SDRAM_WIDTH_16BIT_WITH_ECC) + width = 16; + + /* calculate the SDRAM size base on this info */ + temp = 1 << (row + bank + col); + temp = temp * cs * (width / 8); + + debug("%s returns %ld\n", __func__, temp); + + return temp; +} + +static int altera_gen5_sdram_of_to_plat(struct udevice *dev) +{ + struct altera_gen5_sdram_plat *plat = dev_get_plat(dev); + + plat->sdr = (struct socfpga_sdr *)devfdt_get_addr_index(dev, 0); + if (!plat->sdr) + return -ENODEV; + + return 0; +} + +static int altera_gen5_sdram_probe(struct udevice *dev) +{ + int ret; + unsigned long sdram_size; + struct altera_gen5_sdram_plat *plat = dev_get_plat(dev); + struct altera_gen5_sdram_priv *priv = dev_get_priv(dev); + struct socfpga_sdr_ctrl *sdr_ctrl = &plat->sdr->sdr_ctrl; + struct reset_ctl_bulk resets; + + ret = reset_get_bulk(dev, &resets); + if (ret) { + dev_err(dev, "Can't get reset: %d\n", ret); + return -ENODEV; + } + reset_deassert_bulk(&resets); + + if (sdram_mmr_init_full(sdr_ctrl, 0xffffffff) != 0) { + puts("SDRAM init failed.\n"); + goto failed; + } + + debug("SDRAM: Calibrating PHY\n"); + /* SDRAM calibration */ + if (sdram_calibration_full(plat->sdr) == 0) { + puts("SDRAM calibration failed.\n"); + goto failed; + } + + sdram_size = sdram_calculate_size(sdr_ctrl); + debug("SDRAM: %ld MiB\n", sdram_size >> 20); + + /* Sanity check ensure correct SDRAM size specified */ + if (get_ram_size(0, sdram_size) != sdram_size) { + puts("SDRAM size check failed!\n"); + goto failed; + } + + priv->info.base = 0; + priv->info.size = sdram_size; + + return 0; + +failed: + reset_release_bulk(&resets); + return -ENODEV; +} + +static int altera_gen5_sdram_get_info(struct udevice *dev, + struct ram_info *info) +{ + struct altera_gen5_sdram_priv *priv = dev_get_priv(dev); + + info->base = priv->info.base; + info->size = priv->info.size; + + return 0; +} + +static const struct ram_ops altera_gen5_sdram_ops = { + .get_info = altera_gen5_sdram_get_info, +}; + +static const struct udevice_id altera_gen5_sdram_ids[] = { + { .compatible = "altr,sdr-ctl" }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(altera_gen5_sdram) = { + .name = "altr_sdr_ctl", + .id = UCLASS_RAM, + .of_match = altera_gen5_sdram_ids, + .ops = &altera_gen5_sdram_ops, + .of_to_plat = altera_gen5_sdram_of_to_plat, + .plat_auto = sizeof(struct altera_gen5_sdram_plat), + .probe = altera_gen5_sdram_probe, + .priv_auto = sizeof(struct altera_gen5_sdram_priv), +}; + +#endif /* CONFIG_SPL_BUILD */ diff --git a/roms/u-boot/drivers/ddr/altera/sdram_s10.c b/roms/u-boot/drivers/ddr/altera/sdram_s10.c new file mode 100644 index 000000000..3caa2e14f --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_s10.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016-2018 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> +#include <cpu_func.h> +#include <dm.h> +#include <errno.h> +#include <div64.h> +#include <fdtdec.h> +#include <hang.h> +#include <init.h> +#include <log.h> +#include <ram.h> +#include <reset.h> +#include <asm/global_data.h> +#include "sdram_s10.h" +#include <wait_bit.h> +#include <asm/arch/firewall.h> +#include <asm/arch/reset_manager.h> +#include <asm/io.h> +#include <linux/sizes.h> + +DECLARE_GLOBAL_DATA_PTR; + +#define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R)) + +/* The followring are the supported configurations */ +u32 ddr_config[] = { + /* DDR_CONFIG(Address order,Bank,Column,Row) */ + /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */ + DDR_CONFIG(0, 3, 10, 12), + DDR_CONFIG(0, 3, 9, 13), + DDR_CONFIG(0, 3, 10, 13), + DDR_CONFIG(0, 3, 9, 14), + DDR_CONFIG(0, 3, 10, 14), + DDR_CONFIG(0, 3, 10, 15), + DDR_CONFIG(0, 3, 11, 14), + DDR_CONFIG(0, 3, 11, 15), + DDR_CONFIG(0, 3, 10, 16), + DDR_CONFIG(0, 3, 11, 16), + DDR_CONFIG(0, 3, 12, 15), /* 0xa */ + /* List for DDR4 only (pinout order > chip, bank, row, column) */ + DDR_CONFIG(1, 3, 10, 14), + DDR_CONFIG(1, 4, 10, 14), + DDR_CONFIG(1, 3, 10, 15), + DDR_CONFIG(1, 4, 10, 15), + DDR_CONFIG(1, 3, 10, 16), + DDR_CONFIG(1, 4, 10, 16), + DDR_CONFIG(1, 3, 10, 17), + DDR_CONFIG(1, 4, 10, 17), +}; + +int match_ddr_conf(u32 ddr_conf) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ddr_config); i++) { + if (ddr_conf == ddr_config[i]) + return i; + } + return 0; +} + +/** + * sdram_mmr_init_full() - Function to initialize SDRAM MMR + * + * Initialize the SDRAM MMR. + */ +int sdram_mmr_init_full(struct udevice *dev) +{ + struct altera_sdram_plat *plat = dev_get_plat(dev); + struct altera_sdram_priv *priv = dev_get_priv(dev); + u32 update_value, io48_value, ddrioctl; + u32 i; + int ret; + phys_size_t hw_size; + struct bd_info bd = {0}; + + /* Enable access to DDR from CPU master */ + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_DDRREG), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE0), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1A), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1B), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1C), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1D), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1E), + CCU_ADBASE_DI_MASK); + + /* Enable access to DDR from IO master */ + clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE0), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1A), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1B), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1C), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1D), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1E), + CCU_ADBASE_DI_MASK); + + /* Enable access to DDR from TCU */ + clrbits_le32(CCU_REG_ADDR(CCU_TCU_MPRT_ADBASE_MEMSPACE0), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_TCU_MPRT_ADBASE_MEMSPACE1A), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_TCU_MPRT_ADBASE_MEMSPACE1B), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_TCU_MPRT_ADBASE_MEMSPACE1C), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_TCU_MPRT_ADBASE_MEMSPACE1D), + CCU_ADBASE_DI_MASK); + clrbits_le32(CCU_REG_ADDR(CCU_TCU_MPRT_ADBASE_MEMSPACE1E), + CCU_ADBASE_DI_MASK); + + /* this enables nonsecure access to DDR */ + /* mpuregion0addr_limit */ + FW_MPU_DDR_SCR_WRITEL(0xFFFF0000, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT); + FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT); + + /* nonmpuregion0addr_limit */ + FW_MPU_DDR_SCR_WRITEL(0xFFFF0000, + FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT); + FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT); + + /* Enable mpuregion0enable and nonmpuregion0enable */ + FW_MPU_DDR_SCR_WRITEL(MPUREGION0_ENABLE | NONMPUREGION0_ENABLE, + FW_MPU_DDR_SCR_EN_SET); + + /* Ensure HMC clock is running */ + if (poll_hmc_clock_status()) { + puts("DDR: Error as HMC clock not running\n"); + return -1; + } + + /* Try 3 times to do a calibration */ + for (i = 0; i < 3; i++) { + ret = wait_for_bit_le32((const void *)(plat->hmc + + DDRCALSTAT), + DDR_HMC_DDRCALSTAT_CAL_MSK, true, 1000, + false); + if (!ret) + break; + + emif_reset(plat); + } + + if (ret) { + puts("DDR: Error as SDRAM calibration failed\n"); + return -1; + } + debug("DDR: Calibration success\n"); + + u32 ctrlcfg0 = hmc_readl(plat, CTRLCFG0); + u32 ctrlcfg1 = hmc_readl(plat, CTRLCFG1); + u32 dramaddrw = hmc_readl(plat, DRAMADDRW); + u32 dramtim0 = hmc_readl(plat, DRAMTIMING0); + u32 caltim0 = hmc_readl(plat, CALTIMING0); + u32 caltim1 = hmc_readl(plat, CALTIMING1); + u32 caltim2 = hmc_readl(plat, CALTIMING2); + u32 caltim3 = hmc_readl(plat, CALTIMING3); + u32 caltim4 = hmc_readl(plat, CALTIMING4); + u32 caltim9 = hmc_readl(plat, CALTIMING9); + + /* + * Configure the DDR IO size [0xFFCFB008] + * niosreserve0: Used to indicate DDR width & + * bit[7:0] = Number of data bits (bit[6:5] 0x01=32bit, 0x10=64bit) + * bit[8] = 1 if user-mode OCT is present + * bit[9] = 1 if warm reset compiled into EMIF Cal Code + * bit[10] = 1 if warm reset is on during generation in EMIF Cal + * niosreserve1: IP ADCDS version encoded as 16 bit value + * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta, + * 3=EAP, 4-6 are reserved) + * bit[5:3] = Service Pack # (e.g. 1) + * bit[9:6] = Minor Release # + * bit[14:10] = Major Release # + */ + update_value = hmc_readl(plat, NIOSRESERVED0); + hmc_ecc_writel(plat, ((update_value & 0xFF) >> 5), DDRIOCTRL); + ddrioctl = hmc_ecc_readl(plat, DDRIOCTRL); + + /* enable HPS interface to HMC */ + hmc_ecc_writel(plat, DDR_HMC_HPSINTFCSEL_ENABLE_MASK, HPSINTFCSEL); + + /* Set the DDR Configuration */ + io48_value = DDR_CONFIG(CTRLCFG1_CFG_ADDR_ORDER(ctrlcfg1), + (DRAMADDRW_CFG_BANK_ADDR_WIDTH(dramaddrw) + + DRAMADDRW_CFG_BANK_GRP_ADDR_WIDTH(dramaddrw)), + DRAMADDRW_CFG_COL_ADDR_WIDTH(dramaddrw), + DRAMADDRW_CFG_ROW_ADDR_WIDTH(dramaddrw)); + + update_value = match_ddr_conf(io48_value); + if (update_value) + ddr_sch_writel(plat, update_value, DDR_SCH_DDRCONF); + + /* Configure HMC dramaddrw */ + hmc_ecc_writel(plat, hmc_readl(plat, DRAMADDRW), DRAMADDRWIDTH); + + /* + * Configure DDR timing + * RDTOMISS = tRTP + tRP + tRCD - BL/2 + * WRTOMISS = WL + tWR + tRP + tRCD and + * WL = RL + BL/2 + 2 - rd-to-wr ; tWR = 15ns so... + * First part of equation is in memory clock units so divide by 2 + * for HMC clock units. 1066MHz is close to 1ns so use 15 directly. + * WRTOMISS = ((RL + BL/2 + 2 + tWR) >> 1)- rd-to-wr + tRP + tRCD + */ + u32 burst_len = CTRLCFG0_CFG_CTRL_BURST_LEN(ctrlcfg0); + + update_value = CALTIMING2_CFG_RD_TO_WR_PCH(caltim2) + + CALTIMING4_CFG_PCH_TO_VALID(caltim4) + + CALTIMING0_CFG_ACT_TO_RDWR(caltim0) - + (burst_len >> 2); + io48_value = (((DRAMTIMING0_CFG_TCL(dramtim0) + 2 + DDR_TWR + + (burst_len >> 1)) >> 1) - + /* Up to here was in memory cycles so divide by 2 */ + CALTIMING1_CFG_RD_TO_WR(caltim1) + + CALTIMING0_CFG_ACT_TO_RDWR(caltim0) + + CALTIMING4_CFG_PCH_TO_VALID(caltim4)); + + ddr_sch_writel(plat, ((CALTIMING0_CFG_ACT_TO_ACT(caltim0) << + DDR_SCH_DDRTIMING_ACTTOACT_OFF) | + (update_value << DDR_SCH_DDRTIMING_RDTOMISS_OFF) | + (io48_value << DDR_SCH_DDRTIMING_WRTOMISS_OFF) | + ((burst_len >> 2) << DDR_SCH_DDRTIMING_BURSTLEN_OFF) | + (CALTIMING1_CFG_RD_TO_WR(caltim1) << + DDR_SCH_DDRTIMING_RDTOWR_OFF) | + (CALTIMING3_CFG_WR_TO_RD(caltim3) << + DDR_SCH_DDRTIMING_WRTORD_OFF) | + (((ddrioctl == 1) ? 1 : 0) << + DDR_SCH_DDRTIMING_BWRATIO_OFF)), + DDR_SCH_DDRTIMING); + + /* Configure DDR mode [precharge = 0] */ + ddr_sch_writel(plat, ((ddrioctl ? 0 : 1) << + DDR_SCH_DDRMOD_BWRATIOEXTENDED_OFF), + DDR_SCH_DDRMODE); + + /* Configure the read latency */ + ddr_sch_writel(plat, (DRAMTIMING0_CFG_TCL(dramtim0) >> 1) + + DDR_READ_LATENCY_DELAY, + DDR_SCH_READ_LATENCY); + + /* + * Configuring timing values concerning activate commands + * [FAWBANK alway 1 because always 4 bank DDR] + */ + ddr_sch_writel(plat, ((CALTIMING0_CFG_ACT_TO_ACT_DB(caltim0) << + DDR_SCH_ACTIVATE_RRD_OFF) | + (CALTIMING9_CFG_4_ACT_TO_ACT(caltim9) << + DDR_SCH_ACTIVATE_FAW_OFF) | + (DDR_ACTIVATE_FAWBANK << + DDR_SCH_ACTIVATE_FAWBANK_OFF)), + DDR_SCH_ACTIVATE); + + /* + * Configuring timing values concerning device to device data bus + * ownership change + */ + ddr_sch_writel(plat, ((CALTIMING1_CFG_RD_TO_RD_DC(caltim1) << + DDR_SCH_DEVTODEV_BUSRDTORD_OFF) | + (CALTIMING1_CFG_RD_TO_WR_DC(caltim1) << + DDR_SCH_DEVTODEV_BUSRDTOWR_OFF) | + (CALTIMING3_CFG_WR_TO_RD_DC(caltim3) << + DDR_SCH_DEVTODEV_BUSWRTORD_OFF)), + DDR_SCH_DEVTODEV); + + /* assigning the SDRAM size */ + unsigned long long size = sdram_calculate_size(plat); + /* If the size is invalid, use default Config size */ + if (size <= 0) + hw_size = PHYS_SDRAM_1_SIZE; + else + hw_size = size; + + /* Get bank configuration from devicetree */ + ret = fdtdec_decode_ram_size(gd->fdt_blob, NULL, 0, NULL, + (phys_size_t *)&gd->ram_size, &bd); + if (ret) { + puts("DDR: Failed to decode memory node\n"); + return -1; + } + + if (gd->ram_size != hw_size) + printf("DDR: Warning: DRAM size from device tree mismatch with hardware.\n"); + + printf("DDR: %lld MiB\n", gd->ram_size >> 20); + + /* Enable or disable the SDRAM ECC */ + if (CTRLCFG1_CFG_CTRL_EN_ECC(ctrlcfg1)) { + setbits_le32(plat->hmc + ECCCTRL1, + (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_ECC_EN_SET_MSK)); + clrbits_le32(plat->hmc + ECCCTRL1, + (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_CNT_RST_SET_MSK)); + setbits_le32(plat->hmc + ECCCTRL2, + (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK | + DDR_HMC_ECCCTL2_AWB_EN_SET_MSK)); + hmc_ecc_writel(plat, DDR_HMC_ERRINTEN_INTMASK, ERRINTENS); + + /* Initialize memory content if not from warm reset */ + if (!cpu_has_been_warmreset()) + sdram_init_ecc_bits(&bd); + } else { + clrbits_le32(plat->hmc + ECCCTRL1, + (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_CNT_RST_SET_MSK | + DDR_HMC_ECCCTL_ECC_EN_SET_MSK)); + clrbits_le32(plat->hmc + ECCCTRL2, + (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK | + DDR_HMC_ECCCTL2_AWB_EN_SET_MSK)); + } + + /* Enable non-secure reads/writes to HMC Adapter for SDRAM ECC */ + writel(FW_HMC_ADAPTOR_MPU_MASK, FW_HMC_ADAPTOR_REG_ADDR); + + sdram_size_check(&bd); + + priv->info.base = bd.bi_dram[0].start; + priv->info.size = gd->ram_size; + + debug("DDR: HMC init success\n"); + return 0; +} + diff --git a/roms/u-boot/drivers/ddr/altera/sdram_s10.h b/roms/u-boot/drivers/ddr/altera/sdram_s10.h new file mode 100644 index 000000000..cca4cb35e --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_s10.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2017-2018 Intel Corporation <www.intel.com> + * + */ + +#ifndef _SDRAM_S10_H_ +#define _SDRAM_S10_H_ + +#define DDR_TWR 15 +#define DDR_READ_LATENCY_DELAY 40 +#define DDR_ACTIVATE_FAWBANK 0x1 + +/* NOC DDR scheduler */ +#define DDR_SCH_ID_COREID 0 +#define DDR_SCH_ID_REVID 0x4 +#define DDR_SCH_DDRCONF 0x8 +#define DDR_SCH_DDRTIMING 0xc +#define DDR_SCH_DDRMODE 0x10 +#define DDR_SCH_READ_LATENCY 0x14 +#define DDR_SCH_ACTIVATE 0x38 +#define DDR_SCH_DEVTODEV 0x3c +#define DDR_SCH_DDR4TIMING 0x40 + +#define DDR_SCH_DDRTIMING_ACTTOACT_OFF 0 +#define DDR_SCH_DDRTIMING_RDTOMISS_OFF 6 +#define DDR_SCH_DDRTIMING_WRTOMISS_OFF 12 +#define DDR_SCH_DDRTIMING_BURSTLEN_OFF 18 +#define DDR_SCH_DDRTIMING_RDTOWR_OFF 21 +#define DDR_SCH_DDRTIMING_WRTORD_OFF 26 +#define DDR_SCH_DDRTIMING_BWRATIO_OFF 31 +#define DDR_SCH_DDRMOD_BWRATIOEXTENDED_OFF 1 +#define DDR_SCH_ACTIVATE_RRD_OFF 0 +#define DDR_SCH_ACTIVATE_FAW_OFF 4 +#define DDR_SCH_ACTIVATE_FAWBANK_OFF 10 +#define DDR_SCH_DEVTODEV_BUSRDTORD_OFF 0 +#define DDR_SCH_DEVTODEV_BUSRDTOWR_OFF 2 +#define DDR_SCH_DEVTODEV_BUSWRTORD_OFF 4 + +#include "sdram_soc64.h" + +#endif /* _SDRAM_S10_H_ */ diff --git a/roms/u-boot/drivers/ddr/altera/sdram_soc64.c b/roms/u-boot/drivers/ddr/altera/sdram_soc64.c new file mode 100644 index 000000000..a08f0953e --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_soc64.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016-2019 Intel Corporation <www.intel.com> + * + */ + +#include <common.h> +#include <cpu_func.h> +#include <dm.h> +#include <errno.h> +#include <div64.h> +#include <fdtdec.h> +#include <hang.h> +#include <init.h> +#include <log.h> +#include <ram.h> +#include <reset.h> +#include "sdram_soc64.h" +#include <wait_bit.h> +#include <asm/arch/firewall.h> +#include <asm/arch/system_manager.h> +#include <asm/arch/reset_manager.h> +#include <asm/cache.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <dm/device_compat.h> +#include <linux/sizes.h> + +#define PGTABLE_OFF 0x4000 + +u32 hmc_readl(struct altera_sdram_plat *plat, u32 reg) +{ + return readl(plat->iomhc + reg); +} + +u32 hmc_ecc_readl(struct altera_sdram_plat *plat, u32 reg) +{ + return readl(plat->hmc + reg); +} + +u32 hmc_ecc_writel(struct altera_sdram_plat *plat, + u32 data, u32 reg) +{ + return writel(data, plat->hmc + reg); +} + +u32 ddr_sch_writel(struct altera_sdram_plat *plat, u32 data, + u32 reg) +{ + return writel(data, plat->ddr_sch + reg); +} + +int emif_clear(struct altera_sdram_plat *plat) +{ + hmc_ecc_writel(plat, 0, RSTHANDSHAKECTRL); + + return wait_for_bit_le32((const void *)(plat->hmc + + RSTHANDSHAKESTAT), + DDR_HMC_RSTHANDSHAKE_MASK, + false, 1000, false); +} + +int emif_reset(struct altera_sdram_plat *plat) +{ + u32 c2s, s2c, ret; + + c2s = hmc_ecc_readl(plat, RSTHANDSHAKECTRL) & DDR_HMC_RSTHANDSHAKE_MASK; + s2c = hmc_ecc_readl(plat, RSTHANDSHAKESTAT) & DDR_HMC_RSTHANDSHAKE_MASK; + + debug("DDR: c2s=%08x s2c=%08x nr0=%08x nr1=%08x nr2=%08x dst=%08x\n", + c2s, s2c, hmc_readl(plat, NIOSRESERVED0), + hmc_readl(plat, NIOSRESERVED1), hmc_readl(plat, NIOSRESERVED2), + hmc_readl(plat, DRAMSTS)); + + if (s2c && emif_clear(plat)) { + printf("DDR: emif_clear() failed\n"); + return -1; + } + + debug("DDR: Triggerring emif reset\n"); + hmc_ecc_writel(plat, DDR_HMC_CORE2SEQ_INT_REQ, RSTHANDSHAKECTRL); + + /* if seq2core[3] = 0, we are good */ + ret = wait_for_bit_le32((const void *)(plat->hmc + + RSTHANDSHAKESTAT), + DDR_HMC_SEQ2CORE_INT_RESP_MASK, + false, 1000, false); + if (ret) { + printf("DDR: failed to get ack from EMIF\n"); + return ret; + } + + ret = emif_clear(plat); + if (ret) { + printf("DDR: emif_clear() failed\n"); + return ret; + } + + debug("DDR: %s triggered successly\n", __func__); + return 0; +} + +int poll_hmc_clock_status(void) +{ + return wait_for_bit_le32((const void *)(socfpga_get_sysmgr_addr() + + SYSMGR_SOC64_HMC_CLK), + SYSMGR_HMC_CLK_STATUS_MSK, true, 1000, false); +} + +void sdram_clear_mem(phys_addr_t addr, phys_size_t size) +{ + phys_size_t i; + + if (addr % CONFIG_SYS_CACHELINE_SIZE) { + printf("DDR: address 0x%llx is not cacheline size aligned.\n", + addr); + hang(); + } + + if (size % CONFIG_SYS_CACHELINE_SIZE) { + printf("DDR: size 0x%llx is not multiple of cacheline size\n", + size); + hang(); + } + + /* Use DC ZVA instruction to clear memory to zeros by a cache line */ + for (i = 0; i < size; i = i + CONFIG_SYS_CACHELINE_SIZE) { + asm volatile("dc zva, %0" + : + : "r"(addr) + : "memory"); + addr += CONFIG_SYS_CACHELINE_SIZE; + } +} + +void sdram_init_ecc_bits(struct bd_info *bd) +{ + phys_size_t size, size_init; + phys_addr_t start_addr; + int bank = 0; + unsigned int start = get_timer(0); + + icache_enable(); + + start_addr = bd->bi_dram[0].start; + size = bd->bi_dram[0].size; + + /* Initialize small block for page table */ + memset((void *)start_addr, 0, PGTABLE_SIZE + PGTABLE_OFF); + gd->arch.tlb_addr = start_addr + PGTABLE_OFF; + gd->arch.tlb_size = PGTABLE_SIZE; + start_addr += PGTABLE_SIZE + PGTABLE_OFF; + size -= (PGTABLE_OFF + PGTABLE_SIZE); + dcache_enable(); + + while (1) { + while (size) { + size_init = min((phys_addr_t)SZ_1G, (phys_addr_t)size); + sdram_clear_mem(start_addr, size_init); + size -= size_init; + start_addr += size_init; + WATCHDOG_RESET(); + } + + bank++; + if (bank >= CONFIG_NR_DRAM_BANKS) + break; + + start_addr = bd->bi_dram[bank].start; + size = bd->bi_dram[bank].size; + } + + dcache_disable(); + icache_disable(); + + printf("SDRAM-ECC: Initialized success with %d ms\n", + (unsigned int)get_timer(start)); +} + +void sdram_size_check(struct bd_info *bd) +{ + phys_size_t total_ram_check = 0; + phys_size_t ram_check = 0; + phys_addr_t start = 0; + int bank; + + /* Sanity check ensure correct SDRAM size specified */ + debug("DDR: Running SDRAM size sanity check\n"); + + for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { + start = bd->bi_dram[bank].start; + while (ram_check < bd->bi_dram[bank].size) { + ram_check += get_ram_size((void *)(start + ram_check), + (phys_size_t)SZ_1G); + } + total_ram_check += ram_check; + ram_check = 0; + } + + /* If the ram_size is 2GB smaller, we can assume the IO space is + * not mapped in. gd->ram_size is the actual size of the dram + * not the accessible size. + */ + if (total_ram_check != gd->ram_size) { + puts("DDR: SDRAM size check failed!\n"); + hang(); + } + + debug("DDR: SDRAM size check passed!\n"); +} + +/** + * sdram_calculate_size() - Calculate SDRAM size + * + * Calculate SDRAM device size based on SDRAM controller parameters. + * Size is specified in bytes. + */ +phys_size_t sdram_calculate_size(struct altera_sdram_plat *plat) +{ + u32 dramaddrw = hmc_readl(plat, DRAMADDRW); + + phys_size_t size = 1 << (DRAMADDRW_CFG_CS_ADDR_WIDTH(dramaddrw) + + DRAMADDRW_CFG_BANK_GRP_ADDR_WIDTH(dramaddrw) + + DRAMADDRW_CFG_BANK_ADDR_WIDTH(dramaddrw) + + DRAMADDRW_CFG_ROW_ADDR_WIDTH(dramaddrw) + + DRAMADDRW_CFG_COL_ADDR_WIDTH(dramaddrw)); + + size *= (2 << (hmc_ecc_readl(plat, DDRIOCTRL) & + DDR_HMC_DDRIOCTRL_IOSIZE_MSK)); + + return size; +} + +static int altera_sdram_of_to_plat(struct udevice *dev) +{ + struct altera_sdram_plat *plat = dev_get_plat(dev); + fdt_addr_t addr; + + addr = dev_read_addr_index(dev, 0); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + plat->ddr_sch = (void __iomem *)addr; + + addr = dev_read_addr_index(dev, 1); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + plat->iomhc = (void __iomem *)addr; + + addr = dev_read_addr_index(dev, 2); + if (addr == FDT_ADDR_T_NONE) + return -EINVAL; + plat->hmc = (void __iomem *)addr; + + return 0; +} + +static int altera_sdram_probe(struct udevice *dev) +{ + int ret; + struct altera_sdram_priv *priv = dev_get_priv(dev); + + ret = reset_get_bulk(dev, &priv->resets); + if (ret) { + dev_err(dev, "Can't get reset: %d\n", ret); + return -ENODEV; + } + reset_deassert_bulk(&priv->resets); + + if (sdram_mmr_init_full(dev) != 0) { + puts("SDRAM init failed.\n"); + goto failed; + } + + return 0; + +failed: + reset_release_bulk(&priv->resets); + return -ENODEV; +} + +static int altera_sdram_get_info(struct udevice *dev, + struct ram_info *info) +{ + struct altera_sdram_priv *priv = dev_get_priv(dev); + + info->base = priv->info.base; + info->size = priv->info.size; + + return 0; +} + +static struct ram_ops altera_sdram_ops = { + .get_info = altera_sdram_get_info, +}; + +static const struct udevice_id altera_sdram_ids[] = { + { .compatible = "altr,sdr-ctl-s10" }, + { .compatible = "intel,sdr-ctl-agilex" }, + { /* sentinel */ } +}; + +U_BOOT_DRIVER(altera_sdram) = { + .name = "altr_sdr_ctl", + .id = UCLASS_RAM, + .of_match = altera_sdram_ids, + .ops = &altera_sdram_ops, + .of_to_plat = altera_sdram_of_to_plat, + .plat_auto = sizeof(struct altera_sdram_plat), + .probe = altera_sdram_probe, + .priv_auto = sizeof(struct altera_sdram_priv), +}; diff --git a/roms/u-boot/drivers/ddr/altera/sdram_soc64.h b/roms/u-boot/drivers/ddr/altera/sdram_soc64.h new file mode 100644 index 000000000..8af0afc41 --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sdram_soc64.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2017-2019 Intel Corporation <www.intel.com> + */ + +#ifndef _SDRAM_SOC64_H_ +#define _SDRAM_SOC64_H_ + +#include <common.h> +#include <linux/sizes.h> + +struct altera_sdram_priv { + struct ram_info info; + struct reset_ctl_bulk resets; +}; + +struct altera_sdram_plat { + void __iomem *hmc; + void __iomem *ddr_sch; + void __iomem *iomhc; +}; + +/* ECC HMC registers */ +#define DDRIOCTRL 0x8 +#define DDRCALSTAT 0xc +#define DRAMADDRWIDTH 0xe0 +#define ECCCTRL1 0x100 +#define ECCCTRL2 0x104 +#define ERRINTEN 0x110 +#define ERRINTENS 0x114 +#define INTMODE 0x11c +#define INTSTAT 0x120 +#define AUTOWB_CORRADDR 0x138 +#define ECC_REG2WRECCDATABUS 0x144 +#define ECC_DIAGON 0x150 +#define ECC_DECSTAT 0x154 +#define HPSINTFCSEL 0x210 +#define RSTHANDSHAKECTRL 0x214 +#define RSTHANDSHAKESTAT 0x218 + +#define DDR_HMC_DDRIOCTRL_IOSIZE_MSK 0x00000003 +#define DDR_HMC_DDRCALSTAT_CAL_MSK BIT(0) +#define DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK BIT(16) +#define DDR_HMC_ECCCTL_CNT_RST_SET_MSK BIT(8) +#define DDR_HMC_ECCCTL_ECC_EN_SET_MSK BIT(0) +#define DDR_HMC_ECCCTL2_RMW_EN_SET_MSK BIT(8) +#define DDR_HMC_ECCCTL2_AWB_EN_SET_MSK BIT(0) +#define DDR_HMC_ECC_DIAGON_ECCDIAGON_EN_SET_MSK BIT(16) +#define DDR_HMC_ECC_DIAGON_WRDIAGON_EN_SET_MSK BIT(0) +#define DDR_HMC_ERRINTEN_SERRINTEN_EN_SET_MSK BIT(0) +#define DDR_HMC_ERRINTEN_DERRINTEN_EN_SET_MSK BIT(1) +#define DDR_HMC_INTSTAT_SERRPENA_SET_MSK BIT(0) +#define DDR_HMC_INTSTAT_DERRPENA_SET_MSK BIT(1) +#define DDR_HMC_INTSTAT_ADDRMTCFLG_SET_MSK BIT(16) +#define DDR_HMC_INTMODE_INTMODE_SET_MSK BIT(0) +#define DDR_HMC_RSTHANDSHAKE_MASK 0x000000ff +#define DDR_HMC_CORE2SEQ_INT_REQ 0xF +#define DDR_HMC_SEQ2CORE_INT_RESP_MASK BIT(3) +#define DDR_HMC_HPSINTFCSEL_ENABLE_MASK 0x001f1f1f + +#define DDR_HMC_ERRINTEN_INTMASK \ + (DDR_HMC_ERRINTEN_SERRINTEN_EN_SET_MSK | \ + DDR_HMC_ERRINTEN_DERRINTEN_EN_SET_MSK) + +/* HMC MMR IO48 registers */ +#define CTRLCFG0 0x28 +#define CTRLCFG1 0x2c +#define CTRLCFG3 0x34 +#define DRAMTIMING0 0x50 +#define CALTIMING0 0x7c +#define CALTIMING1 0x80 +#define CALTIMING2 0x84 +#define CALTIMING3 0x88 +#define CALTIMING4 0x8c +#define CALTIMING9 0xa0 +#define DRAMADDRW 0xa8 +#define DRAMSTS 0xec +#define NIOSRESERVED0 0x110 +#define NIOSRESERVED1 0x114 +#define NIOSRESERVED2 0x118 + +#define DRAMADDRW_CFG_COL_ADDR_WIDTH(x) \ + (((x) >> 0) & 0x1F) +#define DRAMADDRW_CFG_ROW_ADDR_WIDTH(x) \ + (((x) >> 5) & 0x1F) +#define DRAMADDRW_CFG_BANK_ADDR_WIDTH(x) \ + (((x) >> 10) & 0xF) +#define DRAMADDRW_CFG_BANK_GRP_ADDR_WIDTH(x) \ + (((x) >> 14) & 0x3) +#define DRAMADDRW_CFG_CS_ADDR_WIDTH(x) \ + (((x) >> 16) & 0x7) + +#define CTRLCFG0_CFG_MEMTYPE(x) \ + (((x) >> 0) & 0xF) +#define CTRLCFG0_CFG_DIMM_TYPE(x) \ + (((x) >> 4) & 0x7) +#define CTRLCFG0_CFG_AC_POS(x) \ + (((x) >> 7) & 0x3) +#define CTRLCFG0_CFG_CTRL_BURST_LEN(x) \ + (((x) >> 9) & 0x1F) + +#define CTRLCFG1_CFG_DBC3_BURST_LEN(x) \ + (((x) >> 0) & 0x1F) +#define CTRLCFG1_CFG_ADDR_ORDER(x) \ + (((x) >> 5) & 0x3) +#define CTRLCFG1_CFG_CTRL_EN_ECC(x) \ + (((x) >> 7) & 0x1) + +#define DRAMTIMING0_CFG_TCL(x) \ + (((x) >> 0) & 0x7F) + +#define CALTIMING0_CFG_ACT_TO_RDWR(x) \ + (((x) >> 0) & 0x3F) +#define CALTIMING0_CFG_ACT_TO_PCH(x) \ + (((x) >> 6) & 0x3F) +#define CALTIMING0_CFG_ACT_TO_ACT(x) \ + (((x) >> 12) & 0x3F) +#define CALTIMING0_CFG_ACT_TO_ACT_DB(x) \ + (((x) >> 18) & 0x3F) + +#define CALTIMING1_CFG_RD_TO_RD(x) \ + (((x) >> 0) & 0x3F) +#define CALTIMING1_CFG_RD_TO_RD_DC(x) \ + (((x) >> 6) & 0x3F) +#define CALTIMING1_CFG_RD_TO_RD_DB(x) \ + (((x) >> 12) & 0x3F) +#define CALTIMING1_CFG_RD_TO_WR(x) \ + (((x) >> 18) & 0x3F) +#define CALTIMING1_CFG_RD_TO_WR_DC(x) \ + (((x) >> 24) & 0x3F) + +#define CALTIMING2_CFG_RD_TO_WR_DB(x) \ + (((x) >> 0) & 0x3F) +#define CALTIMING2_CFG_RD_TO_WR_PCH(x) \ + (((x) >> 6) & 0x3F) +#define CALTIMING2_CFG_RD_AP_TO_VALID(x) \ + (((x) >> 12) & 0x3F) +#define CALTIMING2_CFG_WR_TO_WR(x) \ + (((x) >> 18) & 0x3F) +#define CALTIMING2_CFG_WR_TO_WR_DC(x) \ + (((x) >> 24) & 0x3F) + +#define CALTIMING3_CFG_WR_TO_WR_DB(x) \ + (((x) >> 0) & 0x3F) +#define CALTIMING3_CFG_WR_TO_RD(x) \ + (((x) >> 6) & 0x3F) +#define CALTIMING3_CFG_WR_TO_RD_DC(x) \ + (((x) >> 12) & 0x3F) +#define CALTIMING3_CFG_WR_TO_RD_DB(x) \ + (((x) >> 18) & 0x3F) +#define CALTIMING3_CFG_WR_TO_PCH(x) \ + (((x) >> 24) & 0x3F) + +#define CALTIMING4_CFG_WR_AP_TO_VALID(x) \ + (((x) >> 0) & 0x3F) +#define CALTIMING4_CFG_PCH_TO_VALID(x) \ + (((x) >> 6) & 0x3F) +#define CALTIMING4_CFG_PCH_ALL_TO_VALID(x) \ + (((x) >> 12) & 0x3F) +#define CALTIMING4_CFG_ARF_TO_VALID(x) \ + (((x) >> 18) & 0xFF) +#define CALTIMING4_CFG_PDN_TO_VALID(x) \ + (((x) >> 26) & 0x3F) + +#define CALTIMING9_CFG_4_ACT_TO_ACT(x) \ + (((x) >> 0) & 0xFF) + +/* Firewall DDR scheduler MPFE */ +#define FW_HMC_ADAPTOR_REG_ADDR 0xf8020004 +#define FW_HMC_ADAPTOR_MPU_MASK BIT(0) + +u32 hmc_readl(struct altera_sdram_plat *plat, u32 reg); +u32 hmc_ecc_readl(struct altera_sdram_plat *plat, u32 reg); +u32 hmc_ecc_writel(struct altera_sdram_plat *plat, + u32 data, u32 reg); +u32 ddr_sch_writel(struct altera_sdram_plat *plat, u32 data, + u32 reg); +int emif_clear(struct altera_sdram_plat *plat); +int emif_reset(struct altera_sdram_plat *plat); +int poll_hmc_clock_status(void); +void sdram_clear_mem(phys_addr_t addr, phys_size_t size); +void sdram_init_ecc_bits(struct bd_info *bd); +void sdram_size_check(struct bd_info *bd); +phys_size_t sdram_calculate_size(struct altera_sdram_plat *plat); +int sdram_mmr_init_full(struct udevice *dev); + +#endif /* _SDRAM_SOC64_H_ */ diff --git a/roms/u-boot/drivers/ddr/altera/sequencer.c b/roms/u-boot/drivers/ddr/altera/sequencer.c new file mode 100644 index 000000000..6b9b2e909 --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sequencer.c @@ -0,0 +1,3991 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright Altera Corporation (C) 2012-2015 + */ + +#include <common.h> +#include <log.h> +#include <asm/io.h> +#include <asm/arch/sdram.h> +#include <errno.h> +#include <hang.h> +#include "sequencer.h" + +static const struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs = + (struct socfpga_sdr_rw_load_manager *) + (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800); +static const struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs + = (struct socfpga_sdr_rw_load_jump_manager *) + (SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00); +static const struct socfpga_sdr_reg_file *sdr_reg_file = + (struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS; +static const struct socfpga_sdr_scc_mgr *sdr_scc_mgr = + (struct socfpga_sdr_scc_mgr *) + (SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00); +static const struct socfpga_phy_mgr_cmd *phy_mgr_cmd = + (struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS; +static const struct socfpga_phy_mgr_cfg *phy_mgr_cfg = + (struct socfpga_phy_mgr_cfg *) + (SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40); +static const struct socfpga_data_mgr *data_mgr = + (struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS; +static const struct socfpga_sdr_ctrl *sdr_ctrl = + (struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS; + +#define DELTA_D 1 + +/* + * In order to reduce ROM size, most of the selectable calibration steps are + * decided at compile time based on the user's calibration mode selection, + * as captured by the STATIC_CALIB_STEPS selection below. + * + * However, to support simulation-time selection of fast simulation mode, where + * we skip everything except the bare minimum, we need a few of the steps to + * be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the + * check, which is based on the rtl-supplied value, or we dynamically compute + * the value to use based on the dynamically-chosen calibration mode + */ + +#define DLEVEL 0 +#define STATIC_IN_RTL_SIM 0 +#define STATIC_SKIP_DELAY_LOOPS 0 + +#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \ + STATIC_SKIP_DELAY_LOOPS) + +#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \ + ((non_skip_value) & seq->skip_delay_mask) + +bool dram_is_ddr(const u8 ddr) +{ + const struct socfpga_sdram_config *cfg = socfpga_get_sdram_config(); + const u8 type = (cfg->ctrl_cfg >> SDR_CTRLGRP_CTRLCFG_MEMTYPE_LSB) & + SDR_CTRLGRP_CTRLCFG_MEMTYPE_MASK; + + if (ddr == 2 && type == 1) /* DDR2 */ + return true; + + if (ddr == 3 && type == 2) /* DDR3 */ + return true; + + return false; +} + +static void set_failing_group_stage(struct socfpga_sdrseq *seq, + u32 group, u32 stage, u32 substage) +{ + /* + * Only set the global stage if there was not been any other + * failing group + */ + if (seq->gbl.error_stage == CAL_STAGE_NIL) { + seq->gbl.error_substage = substage; + seq->gbl.error_stage = stage; + seq->gbl.error_group = group; + } +} + +static void reg_file_set_group(u16 set_group) +{ + clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16); +} + +static void reg_file_set_stage(u8 set_stage) +{ + clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff); +} + +static void reg_file_set_sub_stage(u8 set_sub_stage) +{ + set_sub_stage &= 0xff; + clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8); +} + +/** + * phy_mgr_initialize() - Initialize PHY Manager + * + * Initialize PHY Manager. + */ +static void phy_mgr_initialize(struct socfpga_sdrseq *seq) +{ + u32 ratio; + + debug("%s:%d\n", __func__, __LINE__); + /* Calibration has control over path to memory */ + /* + * In Hard PHY this is a 2-bit control: + * 0: AFI Mux Select + * 1: DDIO Mux Select + */ + writel(0x3, &phy_mgr_cfg->mux_sel); + + /* USER memory clock is not stable we begin initialization */ + writel(0, &phy_mgr_cfg->reset_mem_stbl); + + /* USER calibration status all set to zero */ + writel(0, &phy_mgr_cfg->cal_status); + + writel(0, &phy_mgr_cfg->cal_debug_info); + + /* Init params only if we do NOT skip calibration. */ + if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) + return; + + ratio = seq->rwcfg->mem_dq_per_read_dqs / + seq->rwcfg->mem_virtual_groups_per_read_dqs; + seq->param.read_correct_mask_vg = (1 << ratio) - 1; + seq->param.write_correct_mask_vg = (1 << ratio) - 1; + seq->param.read_correct_mask = (1 << seq->rwcfg->mem_dq_per_read_dqs) + - 1; + seq->param.write_correct_mask = (1 << seq->rwcfg->mem_dq_per_write_dqs) + - 1; +} + +/** + * set_rank_and_odt_mask() - Set Rank and ODT mask + * @rank: Rank mask + * @odt_mode: ODT mode, OFF or READ_WRITE + * + * Set Rank and ODT mask (On-Die Termination). + */ +static void set_rank_and_odt_mask(struct socfpga_sdrseq *seq, + const u32 rank, const u32 odt_mode) +{ + u32 odt_mask_0 = 0; + u32 odt_mask_1 = 0; + u32 cs_and_odt_mask; + + if (odt_mode == RW_MGR_ODT_MODE_OFF) { + odt_mask_0 = 0x0; + odt_mask_1 = 0x0; + } else { /* RW_MGR_ODT_MODE_READ_WRITE */ + switch (seq->rwcfg->mem_number_of_ranks) { + case 1: /* 1 Rank */ + /* Read: ODT = 0 ; Write: ODT = 1 */ + odt_mask_0 = 0x0; + odt_mask_1 = 0x1; + break; + case 2: /* 2 Ranks */ + if (seq->rwcfg->mem_number_of_cs_per_dimm == 1) { + /* + * - Dual-Slot , Single-Rank (1 CS per DIMM) + * OR + * - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM) + * + * Since MEM_NUMBER_OF_RANKS is 2, they + * are both single rank with 2 CS each + * (special for RDIMM). + * + * Read: Turn on ODT on the opposite rank + * Write: Turn on ODT on all ranks + */ + odt_mask_0 = 0x3 & ~(1 << rank); + odt_mask_1 = 0x3; + if (dram_is_ddr(2)) + odt_mask_1 &= ~(1 << rank); + } else { + /* + * - Single-Slot , Dual-Rank (2 CS per DIMM) + * + * Read: Turn on ODT off on all ranks + * Write: Turn on ODT on active rank + */ + odt_mask_0 = 0x0; + odt_mask_1 = 0x3 & (1 << rank); + } + break; + case 4: /* 4 Ranks */ + /* + * DDR3 Read, DDR2 Read/Write: + * ----------+-----------------------+ + * | ODT | + * +-----------------------+ + * Rank | 3 | 2 | 1 | 0 | + * ----------+-----+-----+-----+-----+ + * 0 | 0 | 1 | 0 | 0 | + * 1 | 1 | 0 | 0 | 0 | + * 2 | 0 | 0 | 0 | 1 | + * 3 | 0 | 0 | 1 | 0 | + * ----------+-----+-----+-----+-----+ + * + * DDR3 Write: + * ----------+-----------------------+ + * | ODT | + * Write To +-----------------------+ + * Rank | 3 | 2 | 1 | 0 | + * ----------+-----+-----+-----+-----+ + * 0 | 0 | 1 | 0 | 1 | + * 1 | 1 | 0 | 1 | 0 | + * 2 | 0 | 1 | 0 | 1 | + * 3 | 1 | 0 | 1 | 0 | + * ----------+-----+-----+-----+-----+ + */ + switch (rank) { + case 0: + odt_mask_0 = 0x4; + if (dram_is_ddr(2)) + odt_mask_1 = 0x4; + else if (dram_is_ddr(3)) + odt_mask_1 = 0x5; + break; + case 1: + odt_mask_0 = 0x8; + if (dram_is_ddr(2)) + odt_mask_1 = 0x8; + else if (dram_is_ddr(3)) + odt_mask_1 = 0xA; + break; + case 2: + odt_mask_0 = 0x1; + if (dram_is_ddr(2)) + odt_mask_1 = 0x1; + else if (dram_is_ddr(3)) + odt_mask_1 = 0x5; + break; + case 3: + odt_mask_0 = 0x2; + if (dram_is_ddr(2)) + odt_mask_1 = 0x2; + else if (dram_is_ddr(3)) + odt_mask_1 = 0xA; + break; + } + break; + } + } + + cs_and_odt_mask = (0xFF & ~(1 << rank)) | + ((0xFF & odt_mask_0) << 8) | + ((0xFF & odt_mask_1) << 16); + writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); +} + +/** + * scc_mgr_set() - Set SCC Manager register + * @off: Base offset in SCC Manager space + * @grp: Read/Write group + * @val: Value to be set + * + * This function sets the SCC Manager (Scan Chain Control Manager) register. + */ +static void scc_mgr_set(u32 off, u32 grp, u32 val) +{ + writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2)); +} + +/** + * scc_mgr_initialize() - Initialize SCC Manager registers + * + * Initialize SCC Manager registers. + */ +static void scc_mgr_initialize(void) +{ + /* + * Clear register file for HPS. 16 (2^4) is the size of the + * full register file in the scc mgr: + * RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS + + * MEM_IF_READ_DQS_WIDTH - 1); + */ + int i; + + for (i = 0; i < 16; i++) { + debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n", + __func__, __LINE__, i); + scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0); + } +} + +static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase) +{ + scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase); +} + +static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay) +{ + scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay); +} + +static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase) +{ + scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase); +} + +static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay) +{ + scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay); +} + +static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay) +{ + scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay); +} + +static void scc_mgr_set_dqs_io_in_delay(struct socfpga_sdrseq *seq, + u32 delay) +{ + scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, + seq->rwcfg->mem_dq_per_write_dqs, delay); +} + +static void scc_mgr_set_dm_in_delay(struct socfpga_sdrseq *seq, u32 dm, + u32 delay) +{ + scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, + seq->rwcfg->mem_dq_per_write_dqs + 1 + dm, + delay); +} + +static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay) +{ + scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay); +} + +static void scc_mgr_set_dqs_out1_delay(struct socfpga_sdrseq *seq, + u32 delay) +{ + scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, + seq->rwcfg->mem_dq_per_write_dqs, delay); +} + +static void scc_mgr_set_dm_out1_delay(struct socfpga_sdrseq *seq, u32 dm, + u32 delay) +{ + scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, + seq->rwcfg->mem_dq_per_write_dqs + 1 + dm, + delay); +} + +/* load up dqs config settings */ +static void scc_mgr_load_dqs(u32 dqs) +{ + writel(dqs, &sdr_scc_mgr->dqs_ena); +} + +/* load up dqs io config settings */ +static void scc_mgr_load_dqs_io(void) +{ + writel(0, &sdr_scc_mgr->dqs_io_ena); +} + +/* load up dq config settings */ +static void scc_mgr_load_dq(u32 dq_in_group) +{ + writel(dq_in_group, &sdr_scc_mgr->dq_ena); +} + +/* load up dm config settings */ +static void scc_mgr_load_dm(u32 dm) +{ + writel(dm, &sdr_scc_mgr->dm_ena); +} + +/** + * scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks + * @off: Base offset in SCC Manager space + * @grp: Read/Write group + * @val: Value to be set + * @update: If non-zero, trigger SCC Manager update for all ranks + * + * This function sets the SCC Manager (Scan Chain Control Manager) register + * and optionally triggers the SCC update for all ranks. + */ +static void scc_mgr_set_all_ranks(struct socfpga_sdrseq *seq, + const u32 off, const u32 grp, const u32 val, + const int update) +{ + u32 r; + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + scc_mgr_set(off, grp, val); + + if (update || (r == 0)) { + writel(grp, &sdr_scc_mgr->dqs_ena); + writel(0, &sdr_scc_mgr->update); + } + } +} + +static void scc_mgr_set_dqs_en_phase_all_ranks(struct socfpga_sdrseq *seq, + u32 read_group, u32 phase) +{ + /* + * USER although the h/w doesn't support different phases per + * shadow register, for simplicity our scc manager modeling + * keeps different phase settings per shadow reg, and it's + * important for us to keep them in sync to match h/w. + * for efficiency, the scan chain update should occur only + * once to sr0. + */ + scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_PHASE_OFFSET, + read_group, phase, 0); +} + +static void scc_mgr_set_dqdqs_output_phase_all_ranks(struct socfpga_sdrseq *seq, + u32 write_group, u32 phase) +{ + /* + * USER although the h/w doesn't support different phases per + * shadow register, for simplicity our scc manager modeling + * keeps different phase settings per shadow reg, and it's + * important for us to keep them in sync to match h/w. + * for efficiency, the scan chain update should occur only + * once to sr0. + */ + scc_mgr_set_all_ranks(seq, SCC_MGR_DQDQS_OUT_PHASE_OFFSET, + write_group, phase, 0); +} + +static void scc_mgr_set_dqs_en_delay_all_ranks(struct socfpga_sdrseq *seq, + u32 read_group, u32 delay) +{ + /* + * In shadow register mode, the T11 settings are stored in + * registers in the core, which are updated by the DQS_ENA + * signals. Not issuing the SCC_MGR_UPD command allows us to + * save lots of rank switching overhead, by calling + * select_shadow_regs_for_update with update_scan_chains + * set to 0. + */ + scc_mgr_set_all_ranks(seq, SCC_MGR_DQS_EN_DELAY_OFFSET, + read_group, delay, 1); +} + +/** + * scc_mgr_set_oct_out1_delay() - Set OCT output delay + * @write_group: Write group + * @delay: Delay value + * + * This function sets the OCT output delay in SCC manager. + */ +static void scc_mgr_set_oct_out1_delay(struct socfpga_sdrseq *seq, + const u32 write_group, const u32 delay) +{ + const int ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; + const int base = write_group * ratio; + int i; + /* + * Load the setting in the SCC manager + * Although OCT affects only write data, the OCT delay is controlled + * by the DQS logic block which is instantiated once per read group. + * For protocols where a write group consists of multiple read groups, + * the setting must be set multiple times. + */ + for (i = 0; i < ratio; i++) + scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay); +} + +/** + * scc_mgr_set_hhp_extras() - Set HHP extras. + * + * Load the fixed setting in the SCC manager HHP extras. + */ +static void scc_mgr_set_hhp_extras(void) +{ + /* + * Load the fixed setting in the SCC manager + * bits: 0:0 = 1'b1 - DQS bypass + * bits: 1:1 = 1'b1 - DQ bypass + * bits: 4:2 = 3'b001 - rfifo_mode + * bits: 6:5 = 2'b01 - rfifo clock_select + * bits: 7:7 = 1'b0 - separate gating from ungating setting + * bits: 8:8 = 1'b0 - separate OE from Output delay setting + */ + const u32 value = (0 << 8) | (0 << 7) | (1 << 5) | + (1 << 2) | (1 << 1) | (1 << 0); + const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | + SCC_MGR_HHP_GLOBALS_OFFSET | + SCC_MGR_HHP_EXTRAS_OFFSET; + + debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n", + __func__, __LINE__); + writel(value, addr); + debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n", + __func__, __LINE__); +} + +/** + * scc_mgr_zero_all() - Zero all DQS config + * + * Zero all DQS config. + */ +static void scc_mgr_zero_all(struct socfpga_sdrseq *seq) +{ + int i, r; + + /* + * USER Zero all DQS config settings, across all groups and all + * shadow registers + */ + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { + /* + * The phases actually don't exist on a per-rank basis, + * but there's no harm updating them several times, so + * let's keep the code simple. + */ + scc_mgr_set_dqs_bus_in_delay(i, + seq->iocfg->dqs_in_reserve + ); + scc_mgr_set_dqs_en_phase(i, 0); + scc_mgr_set_dqs_en_delay(i, 0); + } + + for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) { + scc_mgr_set_dqdqs_output_phase(i, 0); + /* Arria V/Cyclone V don't have out2. */ + scc_mgr_set_oct_out1_delay(seq, i, + seq->iocfg->dqs_out_reserve); + } + } + + /* Multicast to all DQS group enables. */ + writel(0xff, &sdr_scc_mgr->dqs_ena); + writel(0, &sdr_scc_mgr->update); +} + +/** + * scc_set_bypass_mode() - Set bypass mode and trigger SCC update + * @write_group: Write group + * + * Set bypass mode and trigger SCC update. + */ +static void scc_set_bypass_mode(const u32 write_group) +{ + /* Multicast to all DQ enables. */ + writel(0xff, &sdr_scc_mgr->dq_ena); + writel(0xff, &sdr_scc_mgr->dm_ena); + + /* Update current DQS IO enable. */ + writel(0, &sdr_scc_mgr->dqs_io_ena); + + /* Update the DQS logic. */ + writel(write_group, &sdr_scc_mgr->dqs_ena); + + /* Hit update. */ + writel(0, &sdr_scc_mgr->update); +} + +/** + * scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group + * @write_group: Write group + * + * Load DQS settings for Write Group, do not trigger SCC update. + */ +static void scc_mgr_load_dqs_for_write_group(struct socfpga_sdrseq *seq, + const u32 write_group) +{ + const int ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; + const int base = write_group * ratio; + int i; + /* + * Load the setting in the SCC manager + * Although OCT affects only write data, the OCT delay is controlled + * by the DQS logic block which is instantiated once per read group. + * For protocols where a write group consists of multiple read groups, + * the setting must be set multiple times. + */ + for (i = 0; i < ratio; i++) + writel(base + i, &sdr_scc_mgr->dqs_ena); +} + +/** + * scc_mgr_zero_group() - Zero all configs for a group + * + * Zero DQ, DM, DQS and OCT configs for a group. + */ +static void scc_mgr_zero_group(struct socfpga_sdrseq *seq, + const u32 write_group, const int out_only) +{ + int i, r; + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + /* Zero all DQ config settings. */ + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) { + scc_mgr_set_dq_out1_delay(i, 0); + if (!out_only) + scc_mgr_set_dq_in_delay(i, 0); + } + + /* Multicast to all DQ enables. */ + writel(0xff, &sdr_scc_mgr->dq_ena); + + /* Zero all DM config settings. */ + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { + if (!out_only) + scc_mgr_set_dm_in_delay(seq, i, 0); + scc_mgr_set_dm_out1_delay(seq, i, 0); + } + + /* Multicast to all DM enables. */ + writel(0xff, &sdr_scc_mgr->dm_ena); + + /* Zero all DQS IO settings. */ + if (!out_only) + scc_mgr_set_dqs_io_in_delay(seq, 0); + + /* Arria V/Cyclone V don't have out2. */ + scc_mgr_set_dqs_out1_delay(seq, seq->iocfg->dqs_out_reserve); + scc_mgr_set_oct_out1_delay(seq, write_group, + seq->iocfg->dqs_out_reserve); + scc_mgr_load_dqs_for_write_group(seq, write_group); + + /* Multicast to all DQS IO enables (only 1 in total). */ + writel(0, &sdr_scc_mgr->dqs_io_ena); + + /* Hit update to zero everything. */ + writel(0, &sdr_scc_mgr->update); + } +} + +/* + * apply and load a particular input delay for the DQ pins in a group + * group_bgn is the index of the first dq pin (in the write group) + */ +static void scc_mgr_apply_group_dq_in_delay(struct socfpga_sdrseq *seq, + u32 group_bgn, u32 delay) +{ + u32 i, p; + + for (i = 0, p = group_bgn; i < seq->rwcfg->mem_dq_per_read_dqs; + i++, p++) { + scc_mgr_set_dq_in_delay(p, delay); + scc_mgr_load_dq(p); + } +} + +/** + * scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the + * DQ pins in a group + * @delay: Delay value + * + * Apply and load a particular output delay for the DQ pins in a group. + */ +static void scc_mgr_apply_group_dq_out1_delay(struct socfpga_sdrseq *seq, + const u32 delay) +{ + int i; + + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) { + scc_mgr_set_dq_out1_delay(i, delay); + scc_mgr_load_dq(i); + } +} + +/* apply and load a particular output delay for the DM pins in a group */ +static void scc_mgr_apply_group_dm_out1_delay(struct socfpga_sdrseq *seq, + u32 delay1) +{ + u32 i; + + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) { + scc_mgr_set_dm_out1_delay(seq, i, delay1); + scc_mgr_load_dm(i); + } +} + + +/* apply and load delay on both DQS and OCT out1 */ +static void scc_mgr_apply_group_dqs_io_and_oct_out1(struct socfpga_sdrseq *seq, + u32 write_group, u32 delay) +{ + scc_mgr_set_dqs_out1_delay(seq, delay); + scc_mgr_load_dqs_io(); + + scc_mgr_set_oct_out1_delay(seq, write_group, delay); + scc_mgr_load_dqs_for_write_group(seq, write_group); +} + +/** + * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output + * side: DQ, DM, DQS, OCT + * @write_group: Write group + * @delay: Delay value + * + * Apply a delay to the entire output side: DQ, DM, DQS, OCT. + */ +static void scc_mgr_apply_group_all_out_delay_add(struct socfpga_sdrseq *seq, + const u32 write_group, + const u32 delay) +{ + u32 i, new_delay; + + /* DQ shift */ + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) + scc_mgr_load_dq(i); + + /* DM shift */ + for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) + scc_mgr_load_dm(i); + + /* DQS shift */ + new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay; + if (new_delay > seq->iocfg->io_out2_delay_max) { + debug_cond(DLEVEL >= 1, + "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", + __func__, __LINE__, write_group, delay, new_delay, + seq->iocfg->io_out2_delay_max, + new_delay - seq->iocfg->io_out2_delay_max); + new_delay -= seq->iocfg->io_out2_delay_max; + scc_mgr_set_dqs_out1_delay(seq, new_delay); + } + + scc_mgr_load_dqs_io(); + + /* OCT shift */ + new_delay = READ_SCC_OCT_OUT2_DELAY + delay; + if (new_delay > seq->iocfg->io_out2_delay_max) { + debug_cond(DLEVEL >= 1, + "%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n", + __func__, __LINE__, write_group, delay, + new_delay, seq->iocfg->io_out2_delay_max, + new_delay - seq->iocfg->io_out2_delay_max); + new_delay -= seq->iocfg->io_out2_delay_max; + scc_mgr_set_oct_out1_delay(seq, write_group, new_delay); + } + + scc_mgr_load_dqs_for_write_group(seq, write_group); +} + +/** + * scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output + * side to all ranks + * @write_group: Write group + * @delay: Delay value + * + * Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks. + */ +static void +scc_mgr_apply_group_all_out_delay_add_all_ranks(struct socfpga_sdrseq *seq, + const u32 write_group, + const u32 delay) +{ + int r; + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + scc_mgr_apply_group_all_out_delay_add(seq, write_group, delay); + writel(0, &sdr_scc_mgr->update); + } +} + +/** + * set_jump_as_return() - Return instruction optimization + * + * Optimization used to recover some slots in ddr3 inst_rom could be + * applied to other protocols if we wanted to + */ +static void set_jump_as_return(struct socfpga_sdrseq *seq) +{ + /* + * To save space, we replace return with jump to special shared + * RETURN instruction so we set the counter to large value so that + * we always jump. + */ + writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0); + writel(seq->rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0); +} + +/** + * delay_for_n_mem_clocks() - Delay for N memory clocks + * @clocks: Length of the delay + * + * Delay for N memory clocks. + */ +static void delay_for_n_mem_clocks(struct socfpga_sdrseq *seq, + const u32 clocks) +{ + u32 afi_clocks; + u16 c_loop; + u8 inner; + u8 outer; + + debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks); + + /* Scale (rounding up) to get afi clocks. */ + afi_clocks = DIV_ROUND_UP(clocks, seq->misccfg->afi_rate_ratio); + if (afi_clocks) /* Temporary underflow protection */ + afi_clocks--; + + /* + * Note, we don't bother accounting for being off a little + * bit because of a few extra instructions in outer loops. + * Note, the loops have a test at the end, and do the test + * before the decrement, and so always perform the loop + * 1 time more than the counter value + */ + c_loop = afi_clocks >> 16; + outer = c_loop ? 0xff : (afi_clocks >> 8); + inner = outer ? 0xff : afi_clocks; + + /* + * rom instructions are structured as follows: + * + * IDLE_LOOP2: jnz cntr0, TARGET_A + * IDLE_LOOP1: jnz cntr1, TARGET_B + * return + * + * so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and + * TARGET_B is set to IDLE_LOOP2 as well + * + * if we have no outer loop, though, then we can use IDLE_LOOP1 only, + * and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely + * + * a little confusing, but it helps save precious space in the inst_rom + * and sequencer rom and keeps the delays more accurate and reduces + * overhead + */ + if (afi_clocks < 0x100) { + writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), + &sdr_rw_load_mgr_regs->load_cntr1); + + writel(seq->rwcfg->idle_loop1, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + + writel(seq->rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); + } else { + writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner), + &sdr_rw_load_mgr_regs->load_cntr0); + + writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer), + &sdr_rw_load_mgr_regs->load_cntr1); + + writel(seq->rwcfg->idle_loop2, + &sdr_rw_load_jump_mgr_regs->load_jump_add0); + + writel(seq->rwcfg->idle_loop2, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + + do { + writel(seq->rwcfg->idle_loop2, + SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); + } while (c_loop-- != 0); + } + debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks); +} + +static void delay_for_n_ns(struct socfpga_sdrseq *seq, const u32 ns) +{ + delay_for_n_mem_clocks(seq, (ns * seq->misccfg->afi_clk_freq * + seq->misccfg->afi_rate_ratio) / 1000); +} + +/** + * rw_mgr_mem_init_load_regs() - Load instruction registers + * @cntr0: Counter 0 value + * @cntr1: Counter 1 value + * @cntr2: Counter 2 value + * @jump: Jump instruction value + * + * Load instruction registers. + */ +static void rw_mgr_mem_init_load_regs(struct socfpga_sdrseq *seq, + u32 cntr0, u32 cntr1, u32 cntr2, u32 jump) +{ + u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET; + + /* Load counters */ + writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0), + &sdr_rw_load_mgr_regs->load_cntr0); + writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1), + &sdr_rw_load_mgr_regs->load_cntr1); + writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2), + &sdr_rw_load_mgr_regs->load_cntr2); + + /* Load jump address */ + writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0); + writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1); + writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2); + + /* Execute count instruction */ + writel(jump, grpaddr); +} + +/** + * rw_mgr_mem_load_user_ddr2() - Load user calibration values for DDR2 + * @handoff: Indicate whether this is initialization or handoff phase + * + * Load user calibration values and optionally precharge the banks. + */ +static void rw_mgr_mem_load_user_ddr2(struct socfpga_sdrseq *seq, + const int handoff) +{ + u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET; + u32 r; + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) { + /* set rank */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF); + + /* precharge all banks ... */ + writel(seq->rwcfg->precharge_all, grpaddr); + + writel(seq->rwcfg->emr2, grpaddr); + writel(seq->rwcfg->emr3, grpaddr); + writel(seq->rwcfg->emr, grpaddr); + + if (handoff) { + writel(seq->rwcfg->mr_user, grpaddr); + continue; + } + + writel(seq->rwcfg->mr_dll_reset, grpaddr); + + writel(seq->rwcfg->precharge_all, grpaddr); + + writel(seq->rwcfg->refresh, grpaddr); + delay_for_n_ns(seq, 200); + writel(seq->rwcfg->refresh, grpaddr); + delay_for_n_ns(seq, 200); + + writel(seq->rwcfg->mr_calib, grpaddr); + writel(/*seq->rwcfg->*/0x0b, grpaddr); // EMR_OCD_ENABLE + writel(seq->rwcfg->emr, grpaddr); + delay_for_n_mem_clocks(seq, 200); + } +} + +/** + * rw_mgr_mem_load_user_ddr3() - Load user calibration values + * @fin1: Final instruction 1 + * @fin2: Final instruction 2 + * @precharge: If 1, precharge the banks at the end + * + * Load user calibration values and optionally precharge the banks. + */ +static void rw_mgr_mem_load_user_ddr3(struct socfpga_sdrseq *seq, + const u32 fin1, const u32 fin2, + const int precharge) +{ + u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET; + u32 r; + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) { + /* set rank */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF); + + /* precharge all banks ... */ + if (precharge) + writel(seq->rwcfg->precharge_all, grpaddr); + + /* + * USER Use Mirror-ed commands for odd ranks if address + * mirrorring is on + */ + if ((seq->rwcfg->mem_address_mirroring >> r) & 0x1) { + set_jump_as_return(seq); + writel(seq->rwcfg->mrs2_mirr, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs3_mirr, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs1_mirr, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(fin1, grpaddr); + } else { + set_jump_as_return(seq); + writel(seq->rwcfg->mrs2, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs3, grpaddr); + delay_for_n_mem_clocks(seq, 4); + set_jump_as_return(seq); + writel(seq->rwcfg->mrs1, grpaddr); + set_jump_as_return(seq); + writel(fin2, grpaddr); + } + + if (precharge) + continue; + + set_jump_as_return(seq); + writel(seq->rwcfg->zqcl, grpaddr); + + /* tZQinit = tDLLK = 512 ck cycles */ + delay_for_n_mem_clocks(seq, 512); + } +} + +/** + * rw_mgr_mem_load_user() - Load user calibration values + * @fin1: Final instruction 1 + * @fin2: Final instruction 2 + * @precharge: If 1, precharge the banks at the end + * + * Load user calibration values and optionally precharge the banks. + */ +static void rw_mgr_mem_load_user(struct socfpga_sdrseq *seq, + const u32 fin1, const u32 fin2, + const int precharge) +{ + if (dram_is_ddr(2)) + rw_mgr_mem_load_user_ddr2(seq, precharge); + else if (dram_is_ddr(3)) + rw_mgr_mem_load_user_ddr3(seq, fin1, fin2, precharge); + else + hang(); +} +/** + * rw_mgr_mem_initialize() - Initialize RW Manager + * + * Initialize RW Manager. + */ +static void rw_mgr_mem_initialize(struct socfpga_sdrseq *seq) +{ + debug("%s:%d\n", __func__, __LINE__); + + /* The reset / cke part of initialization is broadcasted to all ranks */ + if (dram_is_ddr(3)) { + writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_SET_CS_AND_ODT_MASK_OFFSET); + } + + /* + * Here's how you load register for a loop + * Counters are located @ 0x800 + * Jump address are located @ 0xC00 + * For both, registers 0 to 3 are selected using bits 3 and 2, like + * in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C + * I know this ain't pretty, but Avalon bus throws away the 2 least + * significant bits + */ + + /* Start with memory RESET activated */ + + /* tINIT = 200us */ + + /* + * 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles + * If a and b are the number of iteration in 2 nested loops + * it takes the following number of cycles to complete the operation: + * number_of_cycles = ((2 + n) * a + 2) * b + * where n is the number of instruction in the inner loop + * One possible solution is n = 0 , a = 256 , b = 106 => a = FF, + * b = 6A + */ + rw_mgr_mem_init_load_regs(seq, seq->misccfg->tinit_cntr0_val, + seq->misccfg->tinit_cntr1_val, + seq->misccfg->tinit_cntr2_val, + seq->rwcfg->init_reset_0_cke_0); + + /* Indicate that memory is stable. */ + writel(1, &phy_mgr_cfg->reset_mem_stbl); + + if (dram_is_ddr(2)) { + writel(seq->rwcfg->nop, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); + + /* Bring up clock enable. */ + + /* tXRP < 400 ck cycles */ + delay_for_n_ns(seq, 400); + } else if (dram_is_ddr(3)) { + /* + * transition the RESET to high + * Wait for 500us + */ + + /* + * 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles + * If a and b are the number of iteration in 2 nested loops + * it takes the following number of cycles to complete the + * operation number_of_cycles = ((2 + n) * a + 2) * b + * where n is the number of instruction in the inner loop + * One possible solution is + * n = 2 , a = 131 , b = 256 => a = 83, b = FF + */ + rw_mgr_mem_init_load_regs(seq, seq->misccfg->treset_cntr0_val, + seq->misccfg->treset_cntr1_val, + seq->misccfg->treset_cntr2_val, + seq->rwcfg->init_reset_1_cke_0); + /* Bring up clock enable. */ + + /* tXRP < 250 ck cycles */ + delay_for_n_mem_clocks(seq, 250); + } + + rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_dll_reset_mirr, + seq->rwcfg->mrs0_dll_reset, 0); +} + +/** + * rw_mgr_mem_handoff() - Hand off the memory to user + * + * At the end of calibration we have to program the user settings in + * and hand off the memory to the user. + */ +static void rw_mgr_mem_handoff(struct socfpga_sdrseq *seq) +{ + rw_mgr_mem_load_user(seq, seq->rwcfg->mrs0_user_mirr, + seq->rwcfg->mrs0_user, 1); + /* + * Need to wait tMOD (12CK or 15ns) time before issuing other + * commands, but we will have plenty of NIOS cycles before actual + * handoff so its okay. + */ +} + +/** + * rw_mgr_mem_calibrate_write_test_issue() - Issue write test command + * @group: Write Group + * @use_dm: Use DM + * + * Issue write test command. Two variants are provided, one that just tests + * a write pattern and another that tests datamask functionality. + */ +static void rw_mgr_mem_calibrate_write_test_issue(struct socfpga_sdrseq *seq, + u32 group, u32 test_dm) +{ + const u32 quick_write_mode = + (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) && + seq->misccfg->enable_super_quick_calibration; + u32 mcc_instruction; + u32 rw_wl_nop_cycles; + + /* + * Set counter and jump addresses for the right + * number of NOP cycles. + * The number of supported NOP cycles can range from -1 to infinity + * Three different cases are handled: + * + * 1. For a number of NOP cycles greater than 0, the RW Mgr looping + * mechanism will be used to insert the right number of NOPs + * + * 2. For a number of NOP cycles equals to 0, the micro-instruction + * issuing the write command will jump straight to the + * micro-instruction that turns on DQS (for DDRx), or outputs write + * data (for RLD), skipping + * the NOP micro-instruction all together + * + * 3. A number of NOP cycles equal to -1 indicates that DQS must be + * turned on in the same micro-instruction that issues the write + * command. Then we need + * to directly jump to the micro-instruction that sends out the data + * + * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters + * (2 and 3). One jump-counter (0) is used to perform multiple + * write-read operations. + * one counter left to issue this command in "multiple-group" mode + */ + + rw_wl_nop_cycles = seq->gbl.rw_wl_nop_cycles; + + if (rw_wl_nop_cycles == -1) { + /* + * CNTR 2 - We want to execute the special write operation that + * turns on DQS right away and then skip directly to the + * instruction that sends out the data. We set the counter to a + * large number so that the jump is always taken. + */ + writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); + + /* CNTR 3 - Not used */ + if (test_dm) { + mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0_wl_1; + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_data, + &sdr_rw_load_jump_mgr_regs->load_jump_add2); + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop, + &sdr_rw_load_jump_mgr_regs->load_jump_add3); + } else { + mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0_wl_1; + writel(seq->rwcfg->lfsr_wr_rd_bank_0_data, + &sdr_rw_load_jump_mgr_regs->load_jump_add2); + writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop, + &sdr_rw_load_jump_mgr_regs->load_jump_add3); + } + } else if (rw_wl_nop_cycles == 0) { + /* + * CNTR 2 - We want to skip the NOP operation and go straight + * to the DQS enable instruction. We set the counter to a large + * number so that the jump is always taken. + */ + writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2); + + /* CNTR 3 - Not used */ + if (test_dm) { + mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_dqs, + &sdr_rw_load_jump_mgr_regs->load_jump_add2); + } else { + mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_bank_0_dqs, + &sdr_rw_load_jump_mgr_regs->load_jump_add2); + } + } else { + /* + * CNTR 2 - In this case we want to execute the next instruction + * and NOT take the jump. So we set the counter to 0. The jump + * address doesn't count. + */ + writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2); + writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2); + + /* + * CNTR 3 - Set the nop counter to the number of cycles we + * need to loop for, minus 1. + */ + writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3); + if (test_dm) { + mcc_instruction = seq->rwcfg->lfsr_wr_rd_dm_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_nop, + &sdr_rw_load_jump_mgr_regs->load_jump_add3); + } else { + mcc_instruction = seq->rwcfg->lfsr_wr_rd_bank_0; + writel(seq->rwcfg->lfsr_wr_rd_bank_0_nop, + &sdr_rw_load_jump_mgr_regs->load_jump_add3); + } + } + + writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RESET_READ_DATAPATH_OFFSET); + + if (quick_write_mode) + writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0); + else + writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0); + + writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0); + + /* + * CNTR 1 - This is used to ensure enough time elapses + * for read data to come back. + */ + writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1); + + if (test_dm) { + writel(seq->rwcfg->lfsr_wr_rd_dm_bank_0_wait, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + } else { + writel(seq->rwcfg->lfsr_wr_rd_bank_0_wait, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + } + + writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET) + + (group << 2)); +} + +/** + * rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple + * pass + * @rank_bgn: Rank number + * @write_group: Write Group + * @use_dm: Use DM + * @all_correct: All bits must be correct in the mask + * @bit_chk: Resulting bit mask after the test + * @all_ranks: Test all ranks + * + * Test writes, can check for a single bit pass or multiple bit pass. + */ +static int +rw_mgr_mem_calibrate_write_test(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 write_group, + const u32 use_dm, const u32 all_correct, + u32 *bit_chk, const u32 all_ranks) +{ + const u32 rank_end = all_ranks ? + seq->rwcfg->mem_number_of_ranks : + (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + const u32 shift_ratio = seq->rwcfg->mem_dq_per_write_dqs / + seq->rwcfg->mem_virtual_groups_per_write_dqs; + const u32 correct_mask_vg = seq->param.write_correct_mask_vg; + + u32 tmp_bit_chk, base_rw_mgr, group; + int vg, r; + + *bit_chk = seq->param.write_correct_mask; + + for (r = rank_bgn; r < rank_end; r++) { + /* Set rank */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); + + tmp_bit_chk = 0; + for (vg = seq->rwcfg->mem_virtual_groups_per_write_dqs - 1; + vg >= 0; vg--) { + /* Reset the FIFOs to get pointers to known state. */ + writel(0, &phy_mgr_cmd->fifo_reset); + + group = write_group * + seq->rwcfg->mem_virtual_groups_per_write_dqs + + vg; + rw_mgr_mem_calibrate_write_test_issue(seq, group, + use_dm); + + base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); + tmp_bit_chk <<= shift_ratio; + tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr)); + } + + *bit_chk &= tmp_bit_chk; + } + + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); + if (all_correct) { + debug_cond(DLEVEL >= 2, + "write_test(%u,%u,ALL) : %u == %u => %i\n", + write_group, use_dm, *bit_chk, + seq->param.write_correct_mask, + *bit_chk == seq->param.write_correct_mask); + return *bit_chk == seq->param.write_correct_mask; + } else { + debug_cond(DLEVEL >= 2, + "write_test(%u,%u,ONE) : %u != %i => %i\n", + write_group, use_dm, *bit_chk, 0, *bit_chk != 0); + return *bit_chk != 0x00; + } +} + +/** + * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns + * @rank_bgn: Rank number + * @group: Read/Write Group + * @all_ranks: Test all ranks + * + * Performs a guaranteed read on the patterns we are going to use during a + * read test to ensure memory works. + */ +static int +rw_mgr_mem_calibrate_read_test_patterns(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 group, + const u32 all_ranks) +{ + const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET; + const u32 addr_offset = + (group * seq->rwcfg->mem_virtual_groups_per_read_dqs) + << 2; + const u32 rank_end = all_ranks ? + seq->rwcfg->mem_number_of_ranks : + (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + const u32 shift_ratio = seq->rwcfg->mem_dq_per_read_dqs / + seq->rwcfg->mem_virtual_groups_per_read_dqs; + const u32 correct_mask_vg = seq->param.read_correct_mask_vg; + + u32 tmp_bit_chk, base_rw_mgr, bit_chk; + int vg, r; + int ret = 0; + + bit_chk = seq->param.read_correct_mask; + + for (r = rank_bgn; r < rank_end; r++) { + /* Set rank */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); + + /* Load up a constant bursts of read commands */ + writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); + writel(seq->rwcfg->guaranteed_read, + &sdr_rw_load_jump_mgr_regs->load_jump_add0); + + writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); + writel(seq->rwcfg->guaranteed_read_cont, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + + tmp_bit_chk = 0; + for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1; + vg >= 0; vg--) { + /* Reset the FIFOs to get pointers to known state. */ + writel(0, &phy_mgr_cmd->fifo_reset); + writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RESET_READ_DATAPATH_OFFSET); + writel(seq->rwcfg->guaranteed_read, + addr + addr_offset + (vg << 2)); + + base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); + tmp_bit_chk <<= shift_ratio; + tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr; + } + + bit_chk &= tmp_bit_chk; + } + + writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2)); + + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); + + if (bit_chk != seq->param.read_correct_mask) + ret = -EIO; + + debug_cond(DLEVEL >= 1, + "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n", + __func__, __LINE__, group, bit_chk, + seq->param.read_correct_mask, ret); + + return ret; +} + +/** + * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read + * test + * @rank_bgn: Rank number + * @all_ranks: Test all ranks + * + * Load up the patterns we are going to use during a read test. + */ +static void rw_mgr_mem_calibrate_read_load_patterns(struct socfpga_sdrseq *seq, + const u32 rank_bgn, + const int all_ranks) +{ + const u32 rank_end = all_ranks ? + seq->rwcfg->mem_number_of_ranks : + (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + u32 r; + + debug("%s:%d\n", __func__, __LINE__); + + for (r = rank_bgn; r < rank_end; r++) { + /* set rank */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); + + /* Load up a constant bursts */ + writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0); + + writel(seq->rwcfg->guaranteed_write_wait0, + &sdr_rw_load_jump_mgr_regs->load_jump_add0); + + writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1); + + writel(seq->rwcfg->guaranteed_write_wait1, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + + writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2); + + writel(seq->rwcfg->guaranteed_write_wait2, + &sdr_rw_load_jump_mgr_regs->load_jump_add2); + + writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3); + + writel(seq->rwcfg->guaranteed_write_wait3, + &sdr_rw_load_jump_mgr_regs->load_jump_add3); + + writel(seq->rwcfg->guaranteed_write, + SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); + } + + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); +} + +/** + * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank + * @rank_bgn: Rank number + * @group: Read/Write group + * @num_tries: Number of retries of the test + * @all_correct: All bits must be correct in the mask + * @bit_chk: Resulting bit mask after the test + * @all_groups: Test all R/W groups + * @all_ranks: Test all ranks + * + * Try a read and see if it returns correct data back. Test has dummy reads + * inserted into the mix used to align DQS enable. Test has more thorough + * checks than the regular read test. + */ +static int +rw_mgr_mem_calibrate_read_test(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 group, + const u32 num_tries, const u32 all_correct, + u32 *bit_chk, + const u32 all_groups, const u32 all_ranks) +{ + const u32 rank_end = all_ranks ? seq->rwcfg->mem_number_of_ranks : + (rank_bgn + NUM_RANKS_PER_SHADOW_REG); + const u32 quick_read_mode = + ((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) && + seq->misccfg->enable_super_quick_calibration); + u32 correct_mask_vg = seq->param.read_correct_mask_vg; + u32 tmp_bit_chk; + u32 base_rw_mgr; + u32 addr; + + int r, vg, ret; + + *bit_chk = seq->param.read_correct_mask; + + for (r = rank_bgn; r < rank_end; r++) { + /* set rank */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_READ_WRITE); + + writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1); + + writel(seq->rwcfg->read_b2b_wait1, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + + writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2); + writel(seq->rwcfg->read_b2b_wait2, + &sdr_rw_load_jump_mgr_regs->load_jump_add2); + + if (quick_read_mode) + writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0); + /* need at least two (1+1) reads to capture failures */ + else if (all_groups) + writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0); + else + writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0); + + writel(seq->rwcfg->read_b2b, + &sdr_rw_load_jump_mgr_regs->load_jump_add0); + if (all_groups) + writel(seq->rwcfg->mem_if_read_dqs_width * + seq->rwcfg->mem_virtual_groups_per_read_dqs - 1, + &sdr_rw_load_mgr_regs->load_cntr3); + else + writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3); + + writel(seq->rwcfg->read_b2b, + &sdr_rw_load_jump_mgr_regs->load_jump_add3); + + tmp_bit_chk = 0; + for (vg = seq->rwcfg->mem_virtual_groups_per_read_dqs - 1; + vg >= 0; vg--) { + /* Reset the FIFOs to get pointers to known state. */ + writel(0, &phy_mgr_cmd->fifo_reset); + writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RESET_READ_DATAPATH_OFFSET); + + if (all_groups) { + addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_ALL_GROUPS_OFFSET; + } else { + addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET; + } + + writel(seq->rwcfg->read_b2b, addr + + ((group * + seq->rwcfg->mem_virtual_groups_per_read_dqs + + vg) << 2)); + + base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS); + tmp_bit_chk <<= + seq->rwcfg->mem_dq_per_read_dqs / + seq->rwcfg->mem_virtual_groups_per_read_dqs; + tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr); + } + + *bit_chk &= tmp_bit_chk; + } + + addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET; + writel(seq->rwcfg->clear_dqs_enable, addr + (group << 2)); + + set_rank_and_odt_mask(seq, 0, RW_MGR_ODT_MODE_OFF); + + if (all_correct) { + ret = (*bit_chk == seq->param.read_correct_mask); + debug_cond(DLEVEL >= 2, + "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n", + __func__, __LINE__, group, all_groups, *bit_chk, + seq->param.read_correct_mask, ret); + } else { + ret = (*bit_chk != 0x00); + debug_cond(DLEVEL >= 2, + "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n", + __func__, __LINE__, group, all_groups, *bit_chk, + 0, ret); + } + + return ret; +} + +/** + * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks + * @grp: Read/Write group + * @num_tries: Number of retries of the test + * @all_correct: All bits must be correct in the mask + * @all_groups: Test all R/W groups + * + * Perform a READ test across all memory ranks. + */ +static int +rw_mgr_mem_calibrate_read_test_all_ranks(struct socfpga_sdrseq *seq, + const u32 grp, const u32 num_tries, + const u32 all_correct, + const u32 all_groups) +{ + u32 bit_chk; + return rw_mgr_mem_calibrate_read_test(seq, 0, grp, num_tries, + all_correct, &bit_chk, all_groups, + 1); +} + +/** + * rw_mgr_incr_vfifo() - Increase VFIFO value + * @grp: Read/Write group + * + * Increase VFIFO value. + */ +static void rw_mgr_incr_vfifo(const u32 grp) +{ + writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy); +} + +/** + * rw_mgr_decr_vfifo() - Decrease VFIFO value + * @grp: Read/Write group + * + * Decrease VFIFO value. + */ +static void rw_mgr_decr_vfifo(struct socfpga_sdrseq *seq, const u32 grp) +{ + u32 i; + + for (i = 0; i < seq->misccfg->read_valid_fifo_size - 1; i++) + rw_mgr_incr_vfifo(grp); +} + +/** + * find_vfifo_failing_read() - Push VFIFO to get a failing read + * @grp: Read/Write group + * + * Push VFIFO until a failing read happens. + */ +static int find_vfifo_failing_read(struct socfpga_sdrseq *seq, + const u32 grp) +{ + u32 v, ret, fail_cnt = 0; + + for (v = 0; v < seq->misccfg->read_valid_fifo_size; v++) { + debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n", + __func__, __LINE__, v); + ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, 0); + if (!ret) { + fail_cnt++; + + if (fail_cnt == 2) + return v; + } + + /* Fiddle with FIFO. */ + rw_mgr_incr_vfifo(grp); + } + + /* No failing read found! Something must have gone wrong. */ + debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__); + return 0; +} + +/** + * sdr_find_phase_delay() - Find DQS enable phase or delay + * @working: If 1, look for working phase/delay, if 0, look for non-working + * @delay: If 1, look for delay, if 0, look for phase + * @grp: Read/Write group + * @work: Working window position + * @work_inc: Working window increment + * @pd: DQS Phase/Delay Iterator + * + * Find working or non-working DQS enable phase setting. + */ +static int sdr_find_phase_delay(struct socfpga_sdrseq *seq, int working, + int delay, const u32 grp, u32 *work, + const u32 work_inc, u32 *pd) +{ + const u32 max = delay ? seq->iocfg->dqs_en_delay_max : + seq->iocfg->dqs_en_phase_max; + u32 ret; + + for (; *pd <= max; (*pd)++) { + if (delay) + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *pd); + else + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *pd); + + ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, 0); + if (!working) + ret = !ret; + + if (ret) + return 0; + + if (work) + *work += work_inc; + } + + return -EINVAL; +} +/** + * sdr_find_phase() - Find DQS enable phase + * @working: If 1, look for working phase, if 0, look for non-working phase + * @grp: Read/Write group + * @work: Working window position + * @i: Iterator + * @p: DQS Phase Iterator + * + * Find working or non-working DQS enable phase setting. + */ +static int sdr_find_phase(struct socfpga_sdrseq *seq, int working, + const u32 grp, u32 *work, u32 *i, u32 *p) +{ + const u32 end = seq->misccfg->read_valid_fifo_size + (working ? 0 : 1); + int ret; + + for (; *i < end; (*i)++) { + if (working) + *p = 0; + + ret = sdr_find_phase_delay(seq, working, 0, grp, work, + seq->iocfg->delay_per_opa_tap, p); + if (!ret) + return 0; + + if (*p > seq->iocfg->dqs_en_phase_max) { + /* Fiddle with FIFO. */ + rw_mgr_incr_vfifo(grp); + if (!working) + *p = 0; + } + } + + return -EINVAL; +} + +/** + * sdr_working_phase() - Find working DQS enable phase + * @grp: Read/Write group + * @work_bgn: Working window start position + * @d: dtaps output value + * @p: DQS Phase Iterator + * @i: Iterator + * + * Find working DQS enable phase setting. + */ +static int sdr_working_phase(struct socfpga_sdrseq *seq, const u32 grp, + u32 *work_bgn, u32 *d, u32 *p, u32 *i) +{ + const u32 dtaps_per_ptap = seq->iocfg->delay_per_opa_tap / + seq->iocfg->delay_per_dqs_en_dchain_tap; + int ret; + + *work_bgn = 0; + + for (*d = 0; *d <= dtaps_per_ptap; (*d)++) { + *i = 0; + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, *d); + ret = sdr_find_phase(seq, 1, grp, work_bgn, i, p); + if (!ret) + return 0; + *work_bgn += seq->iocfg->delay_per_dqs_en_dchain_tap; + } + + /* Cannot find working solution */ + debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n", + __func__, __LINE__); + return -EINVAL; +} + +/** + * sdr_backup_phase() - Find DQS enable backup phase + * @grp: Read/Write group + * @work_bgn: Working window start position + * @p: DQS Phase Iterator + * + * Find DQS enable backup phase setting. + */ +static void sdr_backup_phase(struct socfpga_sdrseq *seq, const u32 grp, + u32 *work_bgn, u32 *p) +{ + u32 tmp_delay, d; + int ret; + + /* Special case code for backing up a phase */ + if (*p == 0) { + *p = seq->iocfg->dqs_en_phase_max; + rw_mgr_decr_vfifo(seq, grp); + } else { + (*p)--; + } + tmp_delay = *work_bgn - seq->iocfg->delay_per_opa_tap; + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, *p); + + for (d = 0; d <= seq->iocfg->dqs_en_delay_max && tmp_delay < *work_bgn; + d++) { + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d); + + ret = rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, 0); + if (ret) { + *work_bgn = tmp_delay; + break; + } + + tmp_delay += seq->iocfg->delay_per_dqs_en_dchain_tap; + } + + /* Restore VFIFO to old state before we decremented it (if needed). */ + (*p)++; + if (*p > seq->iocfg->dqs_en_phase_max) { + *p = 0; + rw_mgr_incr_vfifo(grp); + } + + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0); +} + +/** + * sdr_nonworking_phase() - Find non-working DQS enable phase + * @grp: Read/Write group + * @work_end: Working window end position + * @p: DQS Phase Iterator + * @i: Iterator + * + * Find non-working DQS enable phase setting. + */ +static int sdr_nonworking_phase(struct socfpga_sdrseq *seq, + const u32 grp, u32 *work_end, u32 *p, u32 *i) +{ + int ret; + + (*p)++; + *work_end += seq->iocfg->delay_per_opa_tap; + if (*p > seq->iocfg->dqs_en_phase_max) { + /* Fiddle with FIFO. */ + *p = 0; + rw_mgr_incr_vfifo(grp); + } + + ret = sdr_find_phase(seq, 0, grp, work_end, i, p); + if (ret) { + /* Cannot see edge of failing read. */ + debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n", + __func__, __LINE__); + } + + return ret; +} + +/** + * sdr_find_window_center() - Find center of the working DQS window. + * @grp: Read/Write group + * @work_bgn: First working settings + * @work_end: Last working settings + * + * Find center of the working DQS enable window. + */ +static int sdr_find_window_center(struct socfpga_sdrseq *seq, + const u32 grp, const u32 work_bgn, + const u32 work_end) +{ + u32 work_mid; + int tmp_delay = 0; + int i, p, d; + + work_mid = (work_bgn + work_end) / 2; + + debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n", + work_bgn, work_end, work_mid); + /* Get the middle delay to be less than a VFIFO delay */ + tmp_delay = (seq->iocfg->dqs_en_phase_max + 1) + * seq->iocfg->delay_per_opa_tap; + + debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay); + work_mid %= tmp_delay; + debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid); + + tmp_delay = rounddown(work_mid, seq->iocfg->delay_per_opa_tap); + if (tmp_delay > seq->iocfg->dqs_en_phase_max + * seq->iocfg->delay_per_opa_tap) { + tmp_delay = seq->iocfg->dqs_en_phase_max + * seq->iocfg->delay_per_opa_tap; + } + p = tmp_delay / seq->iocfg->delay_per_opa_tap; + + debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay); + + d = DIV_ROUND_UP(work_mid - tmp_delay, + seq->iocfg->delay_per_dqs_en_dchain_tap); + if (d > seq->iocfg->dqs_en_delay_max) + d = seq->iocfg->dqs_en_delay_max; + tmp_delay += d * seq->iocfg->delay_per_dqs_en_dchain_tap; + + debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay); + + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p); + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, d); + + /* + * push vfifo until we can successfully calibrate. We can do this + * because the largest possible margin in 1 VFIFO cycle. + */ + for (i = 0; i < seq->misccfg->read_valid_fifo_size; i++) { + debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n"); + if (rw_mgr_mem_calibrate_read_test_all_ranks(seq, grp, 1, + PASS_ONE_BIT, + 0)) { + debug_cond(DLEVEL >= 2, + "%s:%d center: found: ptap=%u dtap=%u\n", + __func__, __LINE__, p, d); + return 0; + } + + /* Fiddle with FIFO. */ + rw_mgr_incr_vfifo(grp); + } + + debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n", + __func__, __LINE__); + return -EINVAL; +} + +/** + * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to + * use + * @grp: Read/Write Group + * + * Find a good DQS enable to use. + */ +static int +rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(struct socfpga_sdrseq *seq, + const u32 grp) +{ + u32 d, p, i; + u32 dtaps_per_ptap; + u32 work_bgn, work_end; + u32 found_passing_read, found_failing_read = 0, initial_failing_dtap; + int ret; + + debug("%s:%d %u\n", __func__, __LINE__, grp); + + reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); + + scc_mgr_set_dqs_en_delay_all_ranks(seq, grp, 0); + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, 0); + + /* Step 0: Determine number of delay taps for each phase tap. */ + dtaps_per_ptap = seq->iocfg->delay_per_opa_tap / + seq->iocfg->delay_per_dqs_en_dchain_tap; + + /* Step 1: First push vfifo until we get a failing read. */ + find_vfifo_failing_read(seq, grp); + + /* Step 2: Find first working phase, increment in ptaps. */ + work_bgn = 0; + ret = sdr_working_phase(seq, grp, &work_bgn, &d, &p, &i); + if (ret) + return ret; + + work_end = work_bgn; + + /* + * If d is 0 then the working window covers a phase tap and we can + * follow the old procedure. Otherwise, we've found the beginning + * and we need to increment the dtaps until we find the end. + */ + if (d == 0) { + /* + * Step 3a: If we have room, back off by one and + * increment in dtaps. + */ + sdr_backup_phase(seq, grp, &work_bgn, &p); + + /* + * Step 4a: go forward from working phase to non working + * phase, increment in ptaps. + */ + ret = sdr_nonworking_phase(seq, grp, &work_end, &p, &i); + if (ret) + return ret; + + /* Step 5a: Back off one from last, increment in dtaps. */ + + /* Special case code for backing up a phase */ + if (p == 0) { + p = seq->iocfg->dqs_en_phase_max; + rw_mgr_decr_vfifo(seq, grp); + } else { + p = p - 1; + } + + work_end -= seq->iocfg->delay_per_opa_tap; + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p); + + d = 0; + + debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n", + __func__, __LINE__, p); + } + + /* The dtap increment to find the failing edge is done here. */ + sdr_find_phase_delay(seq, 0, 1, grp, &work_end, + seq->iocfg->delay_per_dqs_en_dchain_tap, &d); + + /* Go back to working dtap */ + if (d != 0) + work_end -= seq->iocfg->delay_per_dqs_en_dchain_tap; + + debug_cond(DLEVEL >= 2, + "%s:%d p/d: ptap=%u dtap=%u end=%u\n", + __func__, __LINE__, p, d - 1, work_end); + + if (work_end < work_bgn) { + /* nil range */ + debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n", + __func__, __LINE__); + return -EINVAL; + } + + debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n", + __func__, __LINE__, work_bgn, work_end); + + /* + * We need to calculate the number of dtaps that equal a ptap. + * To do that we'll back up a ptap and re-find the edge of the + * window using dtaps + */ + debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n", + __func__, __LINE__); + + /* Special case code for backing up a phase */ + if (p == 0) { + p = seq->iocfg->dqs_en_phase_max; + rw_mgr_decr_vfifo(seq, grp); + debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n", + __func__, __LINE__, p); + } else { + p = p - 1; + debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u", + __func__, __LINE__, p); + } + + scc_mgr_set_dqs_en_phase_all_ranks(seq, grp, p); + + /* + * Increase dtap until we first see a passing read (in case the + * window is smaller than a ptap), and then a failing read to + * mark the edge of the window again. + */ + + /* Find a passing read. */ + debug_cond(DLEVEL >= 2, "%s:%d find passing read\n", + __func__, __LINE__); + + initial_failing_dtap = d; + + found_passing_read = !sdr_find_phase_delay(seq, 1, 1, grp, NULL, 0, &d); + if (found_passing_read) { + /* Find a failing read. */ + debug_cond(DLEVEL >= 2, "%s:%d find failing read\n", + __func__, __LINE__); + d++; + found_failing_read = !sdr_find_phase_delay(seq, 0, 1, grp, NULL, + 0, &d); + } else { + debug_cond(DLEVEL >= 1, + "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n", + __func__, __LINE__); + } + + /* + * The dynamically calculated dtaps_per_ptap is only valid if we + * found a passing/failing read. If we didn't, it means d hit the max + * (seq->iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its + * statically calculated value. + */ + if (found_passing_read && found_failing_read) + dtaps_per_ptap = d - initial_failing_dtap; + + writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap); + debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u", + __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap); + + /* Step 6: Find the centre of the window. */ + ret = sdr_find_window_center(seq, grp, work_bgn, work_end); + + return ret; +} + +/** + * search_stop_check() - Check if the detected edge is valid + * @write: Perform read (Stage 2) or write (Stage 3) calibration + * @d: DQS delay + * @rank_bgn: Rank number + * @write_group: Write Group + * @read_group: Read Group + * @bit_chk: Resulting bit mask after the test + * @sticky_bit_chk: Resulting sticky bit mask after the test + * @use_read_test: Perform read test + * + * Test if the found edge is valid. + */ +static u32 search_stop_check(struct socfpga_sdrseq *seq, const int write, + const int d, const int rank_bgn, + const u32 write_group, const u32 read_group, + u32 *bit_chk, u32 *sticky_bit_chk, + const u32 use_read_test) +{ + const u32 ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; + const u32 correct_mask = write ? seq->param.write_correct_mask : + seq->param.read_correct_mask; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; + u32 ret; + /* + * Stop searching when the read test doesn't pass AND when + * we've seen a passing read on every bit. + */ + if (write) { /* WRITE-ONLY */ + ret = !rw_mgr_mem_calibrate_write_test(seq, rank_bgn, + write_group, 0, + PASS_ONE_BIT, bit_chk, + 0); + } else if (use_read_test) { /* READ-ONLY */ + ret = !rw_mgr_mem_calibrate_read_test(seq, rank_bgn, read_group, + NUM_READ_PB_TESTS, + PASS_ONE_BIT, bit_chk, + 0, 0); + } else { /* READ-ONLY */ + rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, 0, + PASS_ONE_BIT, bit_chk, 0); + *bit_chk = *bit_chk >> (per_dqs * + (read_group - (write_group * ratio))); + ret = (*bit_chk == 0); + } + *sticky_bit_chk = *sticky_bit_chk | *bit_chk; + ret = ret && (*sticky_bit_chk == correct_mask); + debug_cond(DLEVEL >= 2, + "%s:%d center(left): dtap=%u => %u == %u && %u", + __func__, __LINE__, d, + *sticky_bit_chk, correct_mask, ret); + return ret; +} + +/** + * search_left_edge() - Find left edge of DQ/DQS working phase + * @write: Perform read (Stage 2) or write (Stage 3) calibration + * @rank_bgn: Rank number + * @write_group: Write Group + * @read_group: Read Group + * @test_bgn: Rank number to begin the test + * @sticky_bit_chk: Resulting sticky bit mask after the test + * @left_edge: Left edge of the DQ/DQS phase + * @right_edge: Right edge of the DQ/DQS phase + * @use_read_test: Perform read test + * + * Find left edge of DQ/DQS working phase. + */ +static void search_left_edge(struct socfpga_sdrseq *seq, const int write, + const int rank_bgn, const u32 write_group, + const u32 read_group, const u32 test_bgn, + u32 *sticky_bit_chk, int *left_edge, + int *right_edge, const u32 use_read_test) +{ + const u32 delay_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->io_in_delay_max; + const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->dqs_in_delay_max; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; + u32 stop, bit_chk; + int i, d; + + for (d = 0; d <= dqs_max; d++) { + if (write) + scc_mgr_apply_group_dq_out1_delay(seq, d); + else + scc_mgr_apply_group_dq_in_delay(seq, test_bgn, d); + + writel(0, &sdr_scc_mgr->update); + + stop = search_stop_check(seq, write, d, rank_bgn, write_group, + read_group, &bit_chk, sticky_bit_chk, + use_read_test); + if (stop == 1) + break; + + /* stop != 1 */ + for (i = 0; i < per_dqs; i++) { + if (bit_chk & 1) { + /* + * Remember a passing test as + * the left_edge. + */ + left_edge[i] = d; + } else { + /* + * If a left edge has not been seen + * yet, then a future passing test + * will mark this edge as the right + * edge. + */ + if (left_edge[i] == delay_max + 1) + right_edge[i] = -(d + 1); + } + bit_chk >>= 1; + } + } + + /* Reset DQ delay chains to 0 */ + if (write) + scc_mgr_apply_group_dq_out1_delay(seq, 0); + else + scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0); + + *sticky_bit_chk = 0; + for (i = per_dqs - 1; i >= 0; i--) { + debug_cond(DLEVEL >= 2, + "%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n", + __func__, __LINE__, i, left_edge[i], + i, right_edge[i]); + + /* + * Check for cases where we haven't found the left edge, + * which makes our assignment of the the right edge invalid. + * Reset it to the illegal value. + */ + if ((left_edge[i] == delay_max + 1) && + (right_edge[i] != delay_max + 1)) { + right_edge[i] = delay_max + 1; + debug_cond(DLEVEL >= 2, + "%s:%d vfifo_center: reset right_edge[%u]: %d\n", + __func__, __LINE__, i, right_edge[i]); + } + + /* + * Reset sticky bit + * READ: except for bits where we have seen both + * the left and right edge. + * WRITE: except for bits where we have seen the + * left edge. + */ + *sticky_bit_chk <<= 1; + if (write) { + if (left_edge[i] != delay_max + 1) + *sticky_bit_chk |= 1; + } else { + if ((left_edge[i] != delay_max + 1) && + (right_edge[i] != delay_max + 1)) + *sticky_bit_chk |= 1; + } + } +} + +/** + * search_right_edge() - Find right edge of DQ/DQS working phase + * @write: Perform read (Stage 2) or write (Stage 3) calibration + * @rank_bgn: Rank number + * @write_group: Write Group + * @read_group: Read Group + * @start_dqs: DQS start phase + * @start_dqs_en: DQS enable start phase + * @sticky_bit_chk: Resulting sticky bit mask after the test + * @left_edge: Left edge of the DQ/DQS phase + * @right_edge: Right edge of the DQ/DQS phase + * @use_read_test: Perform read test + * + * Find right edge of DQ/DQS working phase. + */ +static int search_right_edge(struct socfpga_sdrseq *seq, const int write, + const int rank_bgn, const u32 write_group, + const u32 read_group, const int start_dqs, + const int start_dqs_en, u32 *sticky_bit_chk, + int *left_edge, int *right_edge, + const u32 use_read_test) +{ + const u32 delay_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->io_in_delay_max; + const u32 dqs_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->dqs_in_delay_max; + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; + u32 stop, bit_chk; + int i, d; + + for (d = 0; d <= dqs_max - start_dqs; d++) { + if (write) { /* WRITE-ONLY */ + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, + write_group, + d + start_dqs); + } else { /* READ-ONLY */ + scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs); + if (seq->iocfg->shift_dqs_en_when_shift_dqs) { + u32 delay = d + start_dqs_en; + if (delay > seq->iocfg->dqs_en_delay_max) + delay = seq->iocfg->dqs_en_delay_max; + scc_mgr_set_dqs_en_delay(read_group, delay); + } + scc_mgr_load_dqs(read_group); + } + + writel(0, &sdr_scc_mgr->update); + + stop = search_stop_check(seq, write, d, rank_bgn, write_group, + read_group, &bit_chk, sticky_bit_chk, + use_read_test); + if (stop == 1) { + if (write && (d == 0)) { /* WRITE-ONLY */ + for (i = 0; + i < seq->rwcfg->mem_dq_per_write_dqs; + i++) { + /* + * d = 0 failed, but it passed when + * testing the left edge, so it must be + * marginal, set it to -1 + */ + if (right_edge[i] == delay_max + 1 && + left_edge[i] != delay_max + 1) + right_edge[i] = -1; + } + } + break; + } + + /* stop != 1 */ + for (i = 0; i < per_dqs; i++) { + if (bit_chk & 1) { + /* + * Remember a passing test as + * the right_edge. + */ + right_edge[i] = d; + } else { + if (d != 0) { + /* + * If a right edge has not + * been seen yet, then a future + * passing test will mark this + * edge as the left edge. + */ + if (right_edge[i] == delay_max + 1) + left_edge[i] = -(d + 1); + } else { + /* + * d = 0 failed, but it passed + * when testing the left edge, + * so it must be marginal, set + * it to -1 + */ + if (right_edge[i] == delay_max + 1 && + left_edge[i] != delay_max + 1) + right_edge[i] = -1; + /* + * If a right edge has not been + * seen yet, then a future + * passing test will mark this + * edge as the left edge. + */ + else if (right_edge[i] == delay_max + 1) + left_edge[i] = -(d + 1); + } + } + + debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ", + __func__, __LINE__, d); + debug_cond(DLEVEL >= 2, + "bit_chk_test=%i left_edge[%u]: %d ", + bit_chk & 1, i, left_edge[i]); + debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i, + right_edge[i]); + bit_chk >>= 1; + } + } + + /* Check that all bits have a window */ + for (i = 0; i < per_dqs; i++) { + debug_cond(DLEVEL >= 2, + "%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d", + __func__, __LINE__, i, left_edge[i], + i, right_edge[i]); + if ((left_edge[i] == dqs_max + 1) || + (right_edge[i] == dqs_max + 1)) + return i + 1; /* FIXME: If we fail, retval > 0 */ + } + + return 0; +} + +/** + * get_window_mid_index() - Find the best middle setting of DQ/DQS phase + * @write: Perform read (Stage 2) or write (Stage 3) calibration + * @left_edge: Left edge of the DQ/DQS phase + * @right_edge: Right edge of the DQ/DQS phase + * @mid_min: Best DQ/DQS phase middle setting + * + * Find index and value of the middle of the DQ/DQS working phase. + */ +static int get_window_mid_index(struct socfpga_sdrseq *seq, + const int write, int *left_edge, + int *right_edge, int *mid_min) +{ + const u32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; + int i, mid, min_index; + + /* Find middle of window for each DQ bit */ + *mid_min = left_edge[0] - right_edge[0]; + min_index = 0; + for (i = 1; i < per_dqs; i++) { + mid = left_edge[i] - right_edge[i]; + if (mid < *mid_min) { + *mid_min = mid; + min_index = i; + } + } + + /* + * -mid_min/2 represents the amount that we need to move DQS. + * If mid_min is odd and positive we'll need to add one to make + * sure the rounding in further calculations is correct (always + * bias to the right), so just add 1 for all positive values. + */ + if (*mid_min > 0) + (*mid_min)++; + *mid_min = *mid_min / 2; + + debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n", + __func__, __LINE__, *mid_min, min_index); + return min_index; +} + +/** + * center_dq_windows() - Center the DQ/DQS windows + * @write: Perform read (Stage 2) or write (Stage 3) calibration + * @left_edge: Left edge of the DQ/DQS phase + * @right_edge: Right edge of the DQ/DQS phase + * @mid_min: Adjusted DQ/DQS phase middle setting + * @orig_mid_min: Original DQ/DQS phase middle setting + * @min_index: DQ/DQS phase middle setting index + * @test_bgn: Rank number to begin the test + * @dq_margin: Amount of shift for the DQ + * @dqs_margin: Amount of shift for the DQS + * + * Align the DQ/DQS windows in each group. + */ +static void center_dq_windows(struct socfpga_sdrseq *seq, + const int write, int *left_edge, int *right_edge, + const int mid_min, const int orig_mid_min, + const int min_index, const int test_bgn, + int *dq_margin, int *dqs_margin) +{ + const s32 delay_max = write ? seq->iocfg->io_out1_delay_max : + seq->iocfg->io_in_delay_max; + const s32 per_dqs = write ? seq->rwcfg->mem_dq_per_write_dqs : + seq->rwcfg->mem_dq_per_read_dqs; + const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET : + SCC_MGR_IO_IN_DELAY_OFFSET; + const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off; + + s32 temp_dq_io_delay1; + int shift_dq, i, p; + + /* Initialize data for export structures */ + *dqs_margin = delay_max + 1; + *dq_margin = delay_max + 1; + + /* add delay to bring centre of all DQ windows to the same "level" */ + for (i = 0, p = test_bgn; i < per_dqs; i++, p++) { + /* Use values before divide by 2 to reduce round off error */ + shift_dq = (left_edge[i] - right_edge[i] - + (left_edge[min_index] - right_edge[min_index]))/2 + + (orig_mid_min - mid_min); + + debug_cond(DLEVEL >= 2, + "vfifo_center: before: shift_dq[%u]=%d\n", + i, shift_dq); + + temp_dq_io_delay1 = readl(addr + (i << 2)); + + if (shift_dq + temp_dq_io_delay1 > delay_max) + shift_dq = delay_max - temp_dq_io_delay1; + else if (shift_dq + temp_dq_io_delay1 < 0) + shift_dq = -temp_dq_io_delay1; + + debug_cond(DLEVEL >= 2, + "vfifo_center: after: shift_dq[%u]=%d\n", + i, shift_dq); + + if (write) + scc_mgr_set_dq_out1_delay(i, + temp_dq_io_delay1 + shift_dq); + else + scc_mgr_set_dq_in_delay(p, + temp_dq_io_delay1 + shift_dq); + + scc_mgr_load_dq(p); + + debug_cond(DLEVEL >= 2, + "vfifo_center: margin[%u]=[%d,%d]\n", i, + left_edge[i] - shift_dq + (-mid_min), + right_edge[i] + shift_dq - (-mid_min)); + + /* To determine values for export structures */ + if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin) + *dq_margin = left_edge[i] - shift_dq + (-mid_min); + + if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin) + *dqs_margin = right_edge[i] + shift_dq - (-mid_min); + } +} + +/** + * rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering + * @rank_bgn: Rank number + * @rw_group: Read/Write Group + * @test_bgn: Rank at which the test begins + * @use_read_test: Perform a read test + * @update_fom: Update FOM + * + * Per-bit deskew DQ and centering. + */ +static int rw_mgr_mem_calibrate_vfifo_center(struct socfpga_sdrseq *seq, + const u32 rank_bgn, + const u32 rw_group, + const u32 test_bgn, + const int use_read_test, + const int update_fom) +{ + const u32 addr = + SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET + + (rw_group << 2); + /* + * Store these as signed since there are comparisons with + * signed numbers. + */ + u32 sticky_bit_chk; + s32 left_edge[seq->rwcfg->mem_dq_per_read_dqs]; + s32 right_edge[seq->rwcfg->mem_dq_per_read_dqs]; + s32 orig_mid_min, mid_min; + s32 new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en; + s32 dq_margin, dqs_margin; + int i, min_index; + int ret; + + debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn); + + start_dqs = readl(addr); + if (seq->iocfg->shift_dqs_en_when_shift_dqs) + start_dqs_en = readl(addr - seq->iocfg->dqs_en_delay_offset); + + /* set the left and right edge of each bit to an illegal value */ + /* use (seq->iocfg->io_in_delay_max + 1) as an illegal value */ + sticky_bit_chk = 0; + for (i = 0; i < seq->rwcfg->mem_dq_per_read_dqs; i++) { + left_edge[i] = seq->iocfg->io_in_delay_max + 1; + right_edge[i] = seq->iocfg->io_in_delay_max + 1; + } + + /* Search for the left edge of the window for each bit */ + search_left_edge(seq, 0, rank_bgn, rw_group, rw_group, test_bgn, + &sticky_bit_chk, + left_edge, right_edge, use_read_test); + + + /* Search for the right edge of the window for each bit */ + ret = search_right_edge(seq, 0, rank_bgn, rw_group, rw_group, + start_dqs, start_dqs_en, + &sticky_bit_chk, + left_edge, right_edge, use_read_test); + if (ret) { + /* + * Restore delay chain settings before letting the loop + * in rw_mgr_mem_calibrate_vfifo to retry different + * dqs/ck relationships. + */ + scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs); + if (seq->iocfg->shift_dqs_en_when_shift_dqs) + scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en); + + scc_mgr_load_dqs(rw_group); + writel(0, &sdr_scc_mgr->update); + + debug_cond(DLEVEL >= 1, + "%s:%d vfifo_center: failed to find edge [%u]: %d %d", + __func__, __LINE__, i, left_edge[i], right_edge[i]); + if (use_read_test) { + set_failing_group_stage(seq, rw_group * + seq->rwcfg->mem_dq_per_read_dqs + i, + CAL_STAGE_VFIFO, + CAL_SUBSTAGE_VFIFO_CENTER); + } else { + set_failing_group_stage(seq, rw_group * + seq->rwcfg->mem_dq_per_read_dqs + i, + CAL_STAGE_VFIFO_AFTER_WRITES, + CAL_SUBSTAGE_VFIFO_CENTER); + } + return -EIO; + } + + min_index = get_window_mid_index(seq, 0, left_edge, right_edge, + &mid_min); + + /* Determine the amount we can change DQS (which is -mid_min) */ + orig_mid_min = mid_min; + new_dqs = start_dqs - mid_min; + if (new_dqs > seq->iocfg->dqs_in_delay_max) + new_dqs = seq->iocfg->dqs_in_delay_max; + else if (new_dqs < 0) + new_dqs = 0; + + mid_min = start_dqs - new_dqs; + debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n", + mid_min, new_dqs); + + if (seq->iocfg->shift_dqs_en_when_shift_dqs) { + if (start_dqs_en - mid_min > seq->iocfg->dqs_en_delay_max) + mid_min += start_dqs_en - mid_min - + seq->iocfg->dqs_en_delay_max; + else if (start_dqs_en - mid_min < 0) + mid_min += start_dqs_en - mid_min; + } + new_dqs = start_dqs - mid_min; + + debug_cond(DLEVEL >= 1, + "vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n", + start_dqs, + seq->iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1, + new_dqs, mid_min); + + /* Add delay to bring centre of all DQ windows to the same "level". */ + center_dq_windows(seq, 0, left_edge, right_edge, mid_min, orig_mid_min, + min_index, test_bgn, &dq_margin, &dqs_margin); + + /* Move DQS-en */ + if (seq->iocfg->shift_dqs_en_when_shift_dqs) { + final_dqs_en = start_dqs_en - mid_min; + scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en); + scc_mgr_load_dqs(rw_group); + } + + /* Move DQS */ + scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs); + scc_mgr_load_dqs(rw_group); + debug_cond(DLEVEL >= 2, + "%s:%d vfifo_center: dq_margin=%d dqs_margin=%d", + __func__, __LINE__, dq_margin, dqs_margin); + + /* + * Do not remove this line as it makes sure all of our decisions + * have been applied. Apply the update bit. + */ + writel(0, &sdr_scc_mgr->update); + + if ((dq_margin < 0) || (dqs_margin < 0)) + return -EINVAL; + + return 0; +} + +/** + * rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the + * device + * @rw_group: Read/Write Group + * @phase: DQ/DQS phase + * + * Because initially no communication ca be reliably performed with the memory + * device, the sequencer uses a guaranteed write mechanism to write data into + * the memory device. + */ +static int rw_mgr_mem_calibrate_guaranteed_write(struct socfpga_sdrseq *seq, + const u32 rw_group, + const u32 phase) +{ + int ret; + + /* Set a particular DQ/DQS phase. */ + scc_mgr_set_dqdqs_output_phase_all_ranks(seq, rw_group, phase); + + debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n", + __func__, __LINE__, rw_group, phase); + + /* + * Altera EMI_RM 2015.05.04 :: Figure 1-25 + * Load up the patterns used by read calibration using the + * current DQDQS phase. + */ + rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1); + + if (seq->gbl.phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ) + return 0; + + /* + * Altera EMI_RM 2015.05.04 :: Figure 1-26 + * Back-to-Back reads of the patterns used for calibration. + */ + ret = rw_mgr_mem_calibrate_read_test_patterns(seq, 0, rw_group, 1); + if (ret) + debug_cond(DLEVEL >= 1, + "%s:%d Guaranteed read test failed: g=%u p=%u\n", + __func__, __LINE__, rw_group, phase); + return ret; +} + +/** + * rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration + * @rw_group: Read/Write Group + * @test_bgn: Rank at which the test begins + * + * DQS enable calibration ensures reliable capture of the DQ signal without + * glitches on the DQS line. + */ +static int +rw_mgr_mem_calibrate_dqs_enable_calibration(struct socfpga_sdrseq *seq, + const u32 rw_group, + const u32 test_bgn) +{ + /* + * Altera EMI_RM 2015.05.04 :: Figure 1-27 + * DQS and DQS Eanble Signal Relationships. + */ + + /* We start at zero, so have one less dq to devide among */ + const u32 delay_step = seq->iocfg->io_in_delay_max / + (seq->rwcfg->mem_dq_per_read_dqs - 1); + int ret; + u32 i, p, d, r; + + debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn); + + /* Try different dq_in_delays since the DQ path is shorter than DQS. */ + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + for (i = 0, p = test_bgn, d = 0; + i < seq->rwcfg->mem_dq_per_read_dqs; + i++, p++, d += delay_step) { + debug_cond(DLEVEL >= 1, + "%s:%d: g=%u r=%u i=%u p=%u d=%u\n", + __func__, __LINE__, rw_group, r, i, p, d); + + scc_mgr_set_dq_in_delay(p, d); + scc_mgr_load_dq(p); + } + + writel(0, &sdr_scc_mgr->update); + } + + /* + * Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different + * dq_in_delay values + */ + ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(seq, rw_group); + + debug_cond(DLEVEL >= 1, + "%s:%d: g=%u found=%u; Reseting delay chain to zero\n", + __func__, __LINE__, rw_group, !ret); + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + scc_mgr_apply_group_dq_in_delay(seq, test_bgn, 0); + writel(0, &sdr_scc_mgr->update); + } + + return ret; +} + +/** + * rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS + * @rw_group: Read/Write Group + * @test_bgn: Rank at which the test begins + * @use_read_test: Perform a read test + * @update_fom: Update FOM + * + * The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads + * within a group. + */ +static int +rw_mgr_mem_calibrate_dq_dqs_centering(struct socfpga_sdrseq *seq, + const u32 rw_group, const u32 test_bgn, + const int use_read_test, + const int update_fom) + +{ + int ret, grp_calibrated; + u32 rank_bgn, sr; + + /* + * Altera EMI_RM 2015.05.04 :: Figure 1-28 + * Read per-bit deskew can be done on a per shadow register basis. + */ + grp_calibrated = 1; + for (rank_bgn = 0, sr = 0; + rank_bgn < seq->rwcfg->mem_number_of_ranks; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { + ret = rw_mgr_mem_calibrate_vfifo_center(seq, rank_bgn, rw_group, + test_bgn, + use_read_test, + update_fom); + if (!ret) + continue; + + grp_calibrated = 0; + } + + if (!grp_calibrated) + return -EIO; + + return 0; +} + +/** + * rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO + * @rw_group: Read/Write Group + * @test_bgn: Rank at which the test begins + * + * Stage 1: Calibrate the read valid prediction FIFO. + * + * This function implements UniPHY calibration Stage 1, as explained in + * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". + * + * - read valid prediction will consist of finding: + * - DQS enable phase and DQS enable delay (DQS Enable Calibration) + * - DQS input phase and DQS input delay (DQ/DQS Centering) + * - we also do a per-bit deskew on the DQ lines. + */ +static int rw_mgr_mem_calibrate_vfifo(struct socfpga_sdrseq *seq, + const u32 rw_group, const u32 test_bgn) +{ + u32 p, d; + u32 dtaps_per_ptap; + u32 failed_substage; + + int ret; + + debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn); + + /* Update info for sims */ + reg_file_set_group(rw_group); + reg_file_set_stage(CAL_STAGE_VFIFO); + reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ); + + failed_substage = CAL_SUBSTAGE_GUARANTEED_READ; + + /* USER Determine number of delay taps for each phase tap. */ + dtaps_per_ptap = DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap, + seq->iocfg->delay_per_dqs_en_dchain_tap) + - 1; + + for (d = 0; d <= dtaps_per_ptap; d += 2) { + /* + * In RLDRAMX we may be messing the delay of pins in + * the same write rw_group but outside of the current read + * the rw_group, but that's ok because we haven't calibrated + * output side yet. + */ + if (d > 0) { + scc_mgr_apply_group_all_out_delay_add_all_ranks(seq, + rw_group, + d); + } + + for (p = 0; p <= seq->iocfg->dqdqs_out_phase_max; p++) { + /* 1) Guaranteed Write */ + ret = rw_mgr_mem_calibrate_guaranteed_write(seq, + rw_group, + p); + if (ret) + break; + + /* 2) DQS Enable Calibration */ + ret = rw_mgr_mem_calibrate_dqs_enable_calibration(seq, + rw_group, + test_bgn); + if (ret) { + failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE; + continue; + } + + /* 3) Centering DQ/DQS */ + /* + * If doing read after write calibration, do not update + * FOM now. Do it then. + */ + ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, + rw_group, + test_bgn, + 1, 0); + if (ret) { + failed_substage = CAL_SUBSTAGE_VFIFO_CENTER; + continue; + } + + /* All done. */ + goto cal_done_ok; + } + } + + /* Calibration Stage 1 failed. */ + set_failing_group_stage(seq, rw_group, CAL_STAGE_VFIFO, + failed_substage); + return 0; + + /* Calibration Stage 1 completed OK. */ +cal_done_ok: + /* + * Reset the delay chains back to zero if they have moved > 1 + * (check for > 1 because loop will increase d even when pass in + * first case). + */ + if (d > 2) + scc_mgr_zero_group(seq, rw_group, 1); + + return 1; +} + +/** + * rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering. + * @rw_group: Read/Write Group + * @test_bgn: Rank at which the test begins + * + * Stage 3: DQ/DQS Centering. + * + * This function implements UniPHY calibration Stage 3, as explained in + * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". + */ +static int rw_mgr_mem_calibrate_vfifo_end(struct socfpga_sdrseq *seq, + const u32 rw_group, + const u32 test_bgn) +{ + int ret; + + debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn); + + /* Update info for sims. */ + reg_file_set_group(rw_group); + reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES); + reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER); + + ret = rw_mgr_mem_calibrate_dq_dqs_centering(seq, rw_group, test_bgn, 0, + 1); + if (ret) + set_failing_group_stage(seq, rw_group, + CAL_STAGE_VFIFO_AFTER_WRITES, + CAL_SUBSTAGE_VFIFO_CENTER); + return ret; +} + +/** + * rw_mgr_mem_calibrate_lfifo() - Minimize latency + * + * Stage 4: Minimize latency. + * + * This function implements UniPHY calibration Stage 4, as explained in + * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". + * Calibrate LFIFO to find smallest read latency. + */ +static u32 rw_mgr_mem_calibrate_lfifo(struct socfpga_sdrseq *seq) +{ + int found_one = 0; + + debug("%s:%d\n", __func__, __LINE__); + + /* Update info for sims. */ + reg_file_set_stage(CAL_STAGE_LFIFO); + reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY); + + /* Load up the patterns used by read calibration for all ranks */ + rw_mgr_mem_calibrate_read_load_patterns(seq, 0, 1); + + do { + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); + debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u", + __func__, __LINE__, seq->gbl.curr_read_lat); + + if (!rw_mgr_mem_calibrate_read_test_all_ranks(seq, 0, + NUM_READ_TESTS, + PASS_ALL_BITS, 1)) + break; + + found_one = 1; + /* + * Reduce read latency and see if things are + * working correctly. + */ + seq->gbl.curr_read_lat--; + } while (seq->gbl.curr_read_lat > 0); + + /* Reset the fifos to get pointers to known state. */ + writel(0, &phy_mgr_cmd->fifo_reset); + + if (found_one) { + /* Add a fudge factor to the read latency that was determined */ + seq->gbl.curr_read_lat += 2; + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); + debug_cond(DLEVEL >= 2, + "%s:%d lfifo: success: using read_lat=%u\n", + __func__, __LINE__, seq->gbl.curr_read_lat); + } else { + set_failing_group_stage(seq, 0xff, CAL_STAGE_LFIFO, + CAL_SUBSTAGE_READ_LATENCY); + + debug_cond(DLEVEL >= 2, + "%s:%d lfifo: failed at initial read_lat=%u\n", + __func__, __LINE__, seq->gbl.curr_read_lat); + } + + return found_one; +} + +/** + * search_window() - Search for the/part of the window with DM/DQS shift + * @search_dm: If 1, search for the DM shift, if 0, search for DQS + * shift + * @rank_bgn: Rank number + * @write_group: Write Group + * @bgn_curr: Current window begin + * @end_curr: Current window end + * @bgn_best: Current best window begin + * @end_best: Current best window end + * @win_best: Size of the best window + * @new_dqs: New DQS value (only applicable if search_dm = 0). + * + * Search for the/part of the window with DM/DQS shift. + */ +static void search_window(struct socfpga_sdrseq *seq, + const int search_dm, const u32 rank_bgn, + const u32 write_group, int *bgn_curr, int *end_curr, + int *bgn_best, int *end_best, int *win_best, + int new_dqs) +{ + u32 bit_chk; + const int max = seq->iocfg->io_out1_delay_max - new_dqs; + int d, di; + + /* Search for the/part of the window with DM/DQS shift. */ + for (di = max; di >= 0; di -= DELTA_D) { + if (search_dm) { + d = di; + scc_mgr_apply_group_dm_out1_delay(seq, d); + } else { + /* For DQS, we go from 0...max */ + d = max - di; + /* + * Note: This only shifts DQS, so are we limiting + * ourselves to width of DQ unnecessarily. + */ + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, + write_group, + d + new_dqs); + } + + writel(0, &sdr_scc_mgr->update); + + if (rw_mgr_mem_calibrate_write_test(seq, rank_bgn, write_group, + 1, PASS_ALL_BITS, &bit_chk, + 0)) { + /* Set current end of the window. */ + *end_curr = search_dm ? -d : d; + + /* + * If a starting edge of our window has not been seen + * this is our current start of the DM window. + */ + if (*bgn_curr == seq->iocfg->io_out1_delay_max + 1) + *bgn_curr = search_dm ? -d : d; + + /* + * If current window is bigger than best seen. + * Set best seen to be current window. + */ + if ((*end_curr - *bgn_curr + 1) > *win_best) { + *win_best = *end_curr - *bgn_curr + 1; + *bgn_best = *bgn_curr; + *end_best = *end_curr; + } + } else { + /* We just saw a failing test. Reset temp edge. */ + *bgn_curr = seq->iocfg->io_out1_delay_max + 1; + *end_curr = seq->iocfg->io_out1_delay_max + 1; + + /* Early exit is only applicable to DQS. */ + if (search_dm) + continue; + + /* + * Early exit optimization: if the remaining delay + * chain space is less than already seen largest + * window we can exit. + */ + if (*win_best - 1 > seq->iocfg->io_out1_delay_max + - new_dqs - d) + break; + } + } +} + +/* + * rw_mgr_mem_calibrate_writes_center() - Center all windows + * @rank_bgn: Rank number + * @write_group: Write group + * @test_bgn: Rank at which the test begins + * + * Center all windows. Do per-bit-deskew to possibly increase size of + * certain windows. + */ +static int +rw_mgr_mem_calibrate_writes_center(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 write_group, + const u32 test_bgn) +{ + int i; + u32 sticky_bit_chk; + u32 min_index; + int left_edge[seq->rwcfg->mem_dq_per_write_dqs]; + int right_edge[seq->rwcfg->mem_dq_per_write_dqs]; + int mid; + int mid_min, orig_mid_min; + int new_dqs, start_dqs; + int dq_margin, dqs_margin, dm_margin; + int bgn_curr = seq->iocfg->io_out1_delay_max + 1; + int end_curr = seq->iocfg->io_out1_delay_max + 1; + int bgn_best = seq->iocfg->io_out1_delay_max + 1; + int end_best = seq->iocfg->io_out1_delay_max + 1; + int win_best = 0; + + int ret; + + debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn); + + dm_margin = 0; + + start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS | + SCC_MGR_IO_OUT1_DELAY_OFFSET) + + (seq->rwcfg->mem_dq_per_write_dqs << 2)); + + /* Per-bit deskew. */ + + /* + * Set the left and right edge of each bit to an illegal value. + * Use (seq->iocfg->io_out1_delay_max + 1) as an illegal value. + */ + sticky_bit_chk = 0; + for (i = 0; i < seq->rwcfg->mem_dq_per_write_dqs; i++) { + left_edge[i] = seq->iocfg->io_out1_delay_max + 1; + right_edge[i] = seq->iocfg->io_out1_delay_max + 1; + } + + /* Search for the left edge of the window for each bit. */ + search_left_edge(seq, 1, rank_bgn, write_group, 0, test_bgn, + &sticky_bit_chk, + left_edge, right_edge, 0); + + /* Search for the right edge of the window for each bit. */ + ret = search_right_edge(seq, 1, rank_bgn, write_group, 0, + start_dqs, 0, + &sticky_bit_chk, + left_edge, right_edge, 0); + if (ret) { + set_failing_group_stage(seq, test_bgn + ret - 1, + CAL_STAGE_WRITES, + CAL_SUBSTAGE_WRITES_CENTER); + return -EINVAL; + } + + min_index = get_window_mid_index(seq, 1, left_edge, right_edge, + &mid_min); + + /* Determine the amount we can change DQS (which is -mid_min). */ + orig_mid_min = mid_min; + new_dqs = start_dqs; + mid_min = 0; + debug_cond(DLEVEL >= 1, + "%s:%d write_center: start_dqs=%d new_dqs=%d mid_min=%d\n", + __func__, __LINE__, start_dqs, new_dqs, mid_min); + + /* Add delay to bring centre of all DQ windows to the same "level". */ + center_dq_windows(seq, 1, left_edge, right_edge, mid_min, orig_mid_min, + min_index, 0, &dq_margin, &dqs_margin); + + /* Move DQS */ + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs); + writel(0, &sdr_scc_mgr->update); + + /* Centre DM */ + debug_cond(DLEVEL >= 2, "%s:%d write_center: DM\n", __func__, __LINE__); + + /* Search for the/part of the window with DM shift. */ + search_window(seq, 1, rank_bgn, write_group, &bgn_curr, &end_curr, + &bgn_best, &end_best, &win_best, 0); + + /* Reset DM delay chains to 0. */ + scc_mgr_apply_group_dm_out1_delay(seq, 0); + + /* + * Check to see if the current window nudges up aganist 0 delay. + * If so we need to continue the search by shifting DQS otherwise DQS + * search begins as a new search. + */ + if (end_curr != 0) { + bgn_curr = seq->iocfg->io_out1_delay_max + 1; + end_curr = seq->iocfg->io_out1_delay_max + 1; + } + + /* Search for the/part of the window with DQS shifts. */ + search_window(seq, 0, rank_bgn, write_group, &bgn_curr, &end_curr, + &bgn_best, &end_best, &win_best, new_dqs); + + /* Assign left and right edge for cal and reporting. */ + left_edge[0] = -1 * bgn_best; + right_edge[0] = end_best; + + debug_cond(DLEVEL >= 2, "%s:%d dm_calib: left=%d right=%d\n", + __func__, __LINE__, left_edge[0], right_edge[0]); + + /* Move DQS (back to orig). */ + scc_mgr_apply_group_dqs_io_and_oct_out1(seq, write_group, new_dqs); + + /* Move DM */ + + /* Find middle of window for the DM bit. */ + mid = (left_edge[0] - right_edge[0]) / 2; + + /* Only move right, since we are not moving DQS/DQ. */ + if (mid < 0) + mid = 0; + + /* dm_marign should fail if we never find a window. */ + if (win_best == 0) + dm_margin = -1; + else + dm_margin = left_edge[0] - mid; + + scc_mgr_apply_group_dm_out1_delay(seq, mid); + writel(0, &sdr_scc_mgr->update); + + debug_cond(DLEVEL >= 2, + "%s:%d dm_calib: left=%d right=%d mid=%d dm_margin=%d\n", + __func__, __LINE__, left_edge[0], right_edge[0], + mid, dm_margin); + /* Export values. */ + seq->gbl.fom_out += dq_margin + dqs_margin; + + debug_cond(DLEVEL >= 2, + "%s:%d write_center: dq_margin=%d dqs_margin=%d dm_margin=%d\n", + __func__, __LINE__, dq_margin, dqs_margin, dm_margin); + + /* + * Do not remove this line as it makes sure all of our + * decisions have been applied. + */ + writel(0, &sdr_scc_mgr->update); + + if ((dq_margin < 0) || (dqs_margin < 0) || (dm_margin < 0)) + return -EINVAL; + + return 0; +} + +/** + * rw_mgr_mem_calibrate_writes() - Write Calibration Part One + * @rank_bgn: Rank number + * @group: Read/Write Group + * @test_bgn: Rank at which the test begins + * + * Stage 2: Write Calibration Part One. + * + * This function implements UniPHY calibration Stage 2, as explained in + * detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages". + */ +static int rw_mgr_mem_calibrate_writes(struct socfpga_sdrseq *seq, + const u32 rank_bgn, const u32 group, + const u32 test_bgn) +{ + int ret; + + /* Update info for sims */ + debug("%s:%d %u %u\n", __func__, __LINE__, group, test_bgn); + + reg_file_set_group(group); + reg_file_set_stage(CAL_STAGE_WRITES); + reg_file_set_sub_stage(CAL_SUBSTAGE_WRITES_CENTER); + + ret = rw_mgr_mem_calibrate_writes_center(seq, rank_bgn, group, + test_bgn); + if (ret) + set_failing_group_stage(seq, group, CAL_STAGE_WRITES, + CAL_SUBSTAGE_WRITES_CENTER); + + return ret; +} + +/** + * mem_precharge_and_activate() - Precharge all banks and activate + * + * Precharge all banks and activate row 0 in bank "000..." and bank "111...". + */ +static void mem_precharge_and_activate(struct socfpga_sdrseq *seq) +{ + int r; + + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; r++) { + /* Set rank. */ + set_rank_and_odt_mask(seq, r, RW_MGR_ODT_MODE_OFF); + + /* Precharge all banks. */ + writel(seq->rwcfg->precharge_all, SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); + + writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr0); + writel(seq->rwcfg->activate_0_and_1_wait1, + &sdr_rw_load_jump_mgr_regs->load_jump_add0); + + writel(0x0F, &sdr_rw_load_mgr_regs->load_cntr1); + writel(seq->rwcfg->activate_0_and_1_wait2, + &sdr_rw_load_jump_mgr_regs->load_jump_add1); + + /* Activate rows. */ + writel(seq->rwcfg->activate_0_and_1, + SDR_PHYGRP_RWMGRGRP_ADDRESS | + RW_MGR_RUN_SINGLE_GROUP_OFFSET); + } +} + +/** + * mem_init_latency() - Configure memory RLAT and WLAT settings + * + * Configure memory RLAT and WLAT parameters. + */ +static void mem_init_latency(struct socfpga_sdrseq *seq) +{ + /* + * For AV/CV, LFIFO is hardened and always runs at full rate + * so max latency in AFI clocks, used here, is correspondingly + * smaller. + */ + const u32 max_latency = (1 << seq->misccfg->max_latency_count_width) + - 1; + u32 rlat, wlat; + + debug("%s:%d\n", __func__, __LINE__); + + /* + * Read in write latency. + * WL for Hard PHY does not include additive latency. + */ + wlat = readl(&data_mgr->t_wl_add); + wlat += readl(&data_mgr->mem_t_add); + + seq->gbl.rw_wl_nop_cycles = wlat - 1; + + /* Read in readl latency. */ + rlat = readl(&data_mgr->t_rl_add); + + /* Set a pretty high read latency initially. */ + seq->gbl.curr_read_lat = rlat + 16; + if (seq->gbl.curr_read_lat > max_latency) + seq->gbl.curr_read_lat = max_latency; + + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); + + /* Advertise write latency. */ + writel(wlat, &phy_mgr_cfg->afi_wlat); +} + +/** + * @mem_skip_calibrate() - Set VFIFO and LFIFO to instant-on settings + * + * Set VFIFO and LFIFO to instant-on settings in skip calibration mode. + */ +static void mem_skip_calibrate(struct socfpga_sdrseq *seq) +{ + u32 vfifo_offset; + u32 i, j, r; + + debug("%s:%d\n", __func__, __LINE__); + /* Need to update every shadow register set used by the interface */ + for (r = 0; r < seq->rwcfg->mem_number_of_ranks; + r += NUM_RANKS_PER_SHADOW_REG) { + /* + * Set output phase alignment settings appropriate for + * skip calibration. + */ + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { + scc_mgr_set_dqs_en_phase(i, 0); + if (seq->iocfg->dll_chain_length == 6) + scc_mgr_set_dqdqs_output_phase(i, 6); + else + scc_mgr_set_dqdqs_output_phase(i, 7); + /* + * Case:33398 + * + * Write data arrives to the I/O two cycles before write + * latency is reached (720 deg). + * -> due to bit-slip in a/c bus + * -> to allow board skew where dqs is longer than ck + * -> how often can this happen!? + * -> can claim back some ptaps for high freq + * support if we can relax this, but i digress... + * + * The write_clk leads mem_ck by 90 deg + * The minimum ptap of the OPA is 180 deg + * Each ptap has (360 / IO_DLL_CHAIN_LENGH) deg of delay + * The write_clk is always delayed by 2 ptaps + * + * Hence, to make DQS aligned to CK, we need to delay + * DQS by: + * (720 - 90 - 180 - 2) * + * (360 / seq->iocfg->dll_chain_length) + * + * Dividing the above by + (360 / seq->iocfg->dll_chain_length) + * gives us the number of ptaps, which simplies to: + * + * (1.25 * seq->iocfg->dll_chain_length - 2) + */ + scc_mgr_set_dqdqs_output_phase(i, + ((125 * seq->iocfg->dll_chain_length) + / 100) - 2); + } + writel(0xff, &sdr_scc_mgr->dqs_ena); + writel(0xff, &sdr_scc_mgr->dqs_io_ena); + + for (i = 0; i < seq->rwcfg->mem_if_write_dqs_width; i++) { + writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | + SCC_MGR_GROUP_COUNTER_OFFSET); + } + writel(0xff, &sdr_scc_mgr->dq_ena); + writel(0xff, &sdr_scc_mgr->dm_ena); + writel(0, &sdr_scc_mgr->update); + } + + /* Compensate for simulation model behaviour */ + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { + scc_mgr_set_dqs_bus_in_delay(i, 10); + scc_mgr_load_dqs(i); + } + writel(0, &sdr_scc_mgr->update); + + /* + * ArriaV has hard FIFOs that can only be initialized by incrementing + * in sequencer. + */ + vfifo_offset = seq->misccfg->calib_vfifo_offset; + for (j = 0; j < vfifo_offset; j++) + writel(0xff, &phy_mgr_cmd->inc_vfifo_hard_phy); + writel(0, &phy_mgr_cmd->fifo_reset); + + /* + * For Arria V and Cyclone V with hard LFIFO, we get the skip-cal + * setting from generation-time constant. + */ + seq->gbl.curr_read_lat = seq->misccfg->calib_lfifo_offset; + writel(seq->gbl.curr_read_lat, &phy_mgr_cfg->phy_rlat); +} + +/** + * mem_calibrate() - Memory calibration entry point. + * + * Perform memory calibration. + */ +static u32 mem_calibrate(struct socfpga_sdrseq *seq) +{ + u32 i; + u32 rank_bgn, sr; + u32 write_group, write_test_bgn; + u32 read_group, read_test_bgn; + u32 run_groups, current_run; + u32 failing_groups = 0; + u32 group_failed = 0; + + const u32 rwdqs_ratio = seq->rwcfg->mem_if_read_dqs_width / + seq->rwcfg->mem_if_write_dqs_width; + + debug("%s:%d\n", __func__, __LINE__); + + /* Initialize the data settings */ + seq->gbl.error_substage = CAL_SUBSTAGE_NIL; + seq->gbl.error_stage = CAL_STAGE_NIL; + seq->gbl.error_group = 0xff; + seq->gbl.fom_in = 0; + seq->gbl.fom_out = 0; + + /* Initialize WLAT and RLAT. */ + mem_init_latency(seq); + + /* Initialize bit slips. */ + mem_precharge_and_activate(seq); + + for (i = 0; i < seq->rwcfg->mem_if_read_dqs_width; i++) { + writel(i, SDR_PHYGRP_SCCGRP_ADDRESS | + SCC_MGR_GROUP_COUNTER_OFFSET); + /* Only needed once to set all groups, pins, DQ, DQS, DM. */ + if (i == 0) + scc_mgr_set_hhp_extras(); + + scc_set_bypass_mode(i); + } + + /* Calibration is skipped. */ + if ((seq->dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL) { + /* + * Set VFIFO and LFIFO to instant-on settings in skip + * calibration mode. + */ + mem_skip_calibrate(seq); + + /* + * Do not remove this line as it makes sure all of our + * decisions have been applied. + */ + writel(0, &sdr_scc_mgr->update); + return 1; + } + + /* Calibration is not skipped. */ + for (i = 0; i < NUM_CALIB_REPEAT; i++) { + /* + * Zero all delay chain/phase settings for all + * groups and all shadow register sets. + */ + scc_mgr_zero_all(seq); + + run_groups = ~0; + + for (write_group = 0, write_test_bgn = 0; write_group + < seq->rwcfg->mem_if_write_dqs_width; write_group++, + write_test_bgn += seq->rwcfg->mem_dq_per_write_dqs) { + /* Initialize the group failure */ + group_failed = 0; + + current_run = run_groups & ((1 << + RW_MGR_NUM_DQS_PER_WRITE_GROUP) - 1); + run_groups = run_groups >> + RW_MGR_NUM_DQS_PER_WRITE_GROUP; + + if (current_run == 0) + continue; + + writel(write_group, SDR_PHYGRP_SCCGRP_ADDRESS | + SCC_MGR_GROUP_COUNTER_OFFSET); + scc_mgr_zero_group(seq, write_group, 0); + + for (read_group = write_group * rwdqs_ratio, + read_test_bgn = 0; + read_group < (write_group + 1) * rwdqs_ratio; + read_group++, + read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) { + if (STATIC_CALIB_STEPS & CALIB_SKIP_VFIFO) + continue; + + /* Calibrate the VFIFO */ + if (rw_mgr_mem_calibrate_vfifo(seq, read_group, + read_test_bgn)) + continue; + + if (!(seq->gbl.phy_debug_mode_flags & + PHY_DEBUG_SWEEP_ALL_GROUPS)) + return 0; + + /* The group failed, we're done. */ + goto grp_failed; + } + + /* Calibrate the output side */ + for (rank_bgn = 0, sr = 0; + rank_bgn < seq->rwcfg->mem_number_of_ranks; + rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) { + if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) + continue; + + /* Not needed in quick mode! */ + if (STATIC_CALIB_STEPS & + CALIB_SKIP_DELAY_SWEEPS) + continue; + + /* Calibrate WRITEs */ + if (!rw_mgr_mem_calibrate_writes(seq, rank_bgn, + write_group, + write_test_bgn)) + continue; + + group_failed = 1; + if (!(seq->gbl.phy_debug_mode_flags & + PHY_DEBUG_SWEEP_ALL_GROUPS)) + return 0; + } + + /* Some group failed, we're done. */ + if (group_failed) + goto grp_failed; + + for (read_group = write_group * rwdqs_ratio, + read_test_bgn = 0; + read_group < (write_group + 1) * rwdqs_ratio; + read_group++, + read_test_bgn += seq->rwcfg->mem_dq_per_read_dqs) { + if (STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) + continue; + + if (!rw_mgr_mem_calibrate_vfifo_end(seq, + read_group, + read_test_bgn)) + continue; + + if (!(seq->gbl.phy_debug_mode_flags & + PHY_DEBUG_SWEEP_ALL_GROUPS)) + return 0; + + /* The group failed, we're done. */ + goto grp_failed; + } + + /* No group failed, continue as usual. */ + continue; + +grp_failed: /* A group failed, increment the counter. */ + failing_groups++; + } + + /* + * USER If there are any failing groups then report + * the failure. + */ + if (failing_groups != 0) + return 0; + + if (STATIC_CALIB_STEPS & CALIB_SKIP_LFIFO) + continue; + + /* Calibrate the LFIFO */ + if (!rw_mgr_mem_calibrate_lfifo(seq)) + return 0; + } + + /* + * Do not remove this line as it makes sure all of our decisions + * have been applied. + */ + writel(0, &sdr_scc_mgr->update); + return 1; +} + +/** + * run_mem_calibrate() - Perform memory calibration + * + * This function triggers the entire memory calibration procedure. + */ +static int run_mem_calibrate(struct socfpga_sdrseq *seq) +{ + int pass; + u32 ctrl_cfg; + + debug("%s:%d\n", __func__, __LINE__); + + /* Reset pass/fail status shown on afi_cal_success/fail */ + writel(PHY_MGR_CAL_RESET, &phy_mgr_cfg->cal_status); + + /* Stop tracking manager. */ + ctrl_cfg = readl(&sdr_ctrl->ctrl_cfg); + writel(ctrl_cfg & ~SDR_CTRLGRP_CTRLCFG_DQSTRKEN_MASK, + &sdr_ctrl->ctrl_cfg); + + phy_mgr_initialize(seq); + rw_mgr_mem_initialize(seq); + + /* Perform the actual memory calibration. */ + pass = mem_calibrate(seq); + + mem_precharge_and_activate(seq); + writel(0, &phy_mgr_cmd->fifo_reset); + + /* Handoff. */ + rw_mgr_mem_handoff(seq); + /* + * In Hard PHY this is a 2-bit control: + * 0: AFI Mux Select + * 1: DDIO Mux Select + */ + writel(0x2, &phy_mgr_cfg->mux_sel); + + /* Start tracking manager. */ + writel(ctrl_cfg, &sdr_ctrl->ctrl_cfg); + + return pass; +} + +/** + * debug_mem_calibrate() - Report result of memory calibration + * @pass: Value indicating whether calibration passed or failed + * + * This function reports the results of the memory calibration + * and writes debug information into the register file. + */ +static void debug_mem_calibrate(struct socfpga_sdrseq *seq, int pass) +{ + u32 debug_info; + + if (pass) { + debug("%s: CALIBRATION PASSED\n", __FILE__); + + seq->gbl.fom_in /= 2; + seq->gbl.fom_out /= 2; + + if (seq->gbl.fom_in > 0xff) + seq->gbl.fom_in = 0xff; + + if (seq->gbl.fom_out > 0xff) + seq->gbl.fom_out = 0xff; + + /* Update the FOM in the register file */ + debug_info = seq->gbl.fom_in; + debug_info |= seq->gbl.fom_out << 8; + writel(debug_info, &sdr_reg_file->fom); + + writel(debug_info, &phy_mgr_cfg->cal_debug_info); + writel(PHY_MGR_CAL_SUCCESS, &phy_mgr_cfg->cal_status); + } else { + debug("%s: CALIBRATION FAILED\n", __FILE__); + + debug_info = seq->gbl.error_stage; + debug_info |= seq->gbl.error_substage << 8; + debug_info |= seq->gbl.error_group << 16; + + writel(debug_info, &sdr_reg_file->failing_stage); + writel(debug_info, &phy_mgr_cfg->cal_debug_info); + writel(PHY_MGR_CAL_FAIL, &phy_mgr_cfg->cal_status); + + /* Update the failing group/stage in the register file */ + debug_info = seq->gbl.error_stage; + debug_info |= seq->gbl.error_substage << 8; + debug_info |= seq->gbl.error_group << 16; + writel(debug_info, &sdr_reg_file->failing_stage); + } + + debug("%s: Calibration complete\n", __FILE__); +} + +/** + * hc_initialize_rom_data() - Initialize ROM data + * + * Initialize ROM data. + */ +static void hc_initialize_rom_data(void) +{ + unsigned int nelem = 0; + const u32 *rom_init; + u32 i, addr; + + socfpga_get_seq_inst_init(&rom_init, &nelem); + addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_INST_ROM_WRITE_OFFSET; + for (i = 0; i < nelem; i++) + writel(rom_init[i], addr + (i << 2)); + + socfpga_get_seq_ac_init(&rom_init, &nelem); + addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_AC_ROM_WRITE_OFFSET; + for (i = 0; i < nelem; i++) + writel(rom_init[i], addr + (i << 2)); +} + +/** + * initialize_reg_file() - Initialize SDR register file + * + * Initialize SDR register file. + */ +static void initialize_reg_file(struct socfpga_sdrseq *seq) +{ + /* Initialize the register file with the correct data */ + writel(seq->misccfg->reg_file_init_seq_signature, + &sdr_reg_file->signature); + writel(0, &sdr_reg_file->debug_data_addr); + writel(0, &sdr_reg_file->cur_stage); + writel(0, &sdr_reg_file->fom); + writel(0, &sdr_reg_file->failing_stage); + writel(0, &sdr_reg_file->debug1); + writel(0, &sdr_reg_file->debug2); +} + +/** + * initialize_hps_phy() - Initialize HPS PHY + * + * Initialize HPS PHY. + */ +static void initialize_hps_phy(void) +{ + u32 reg; + /* + * Tracking also gets configured here because it's in the + * same register. + */ + u32 trk_sample_count = 7500; + u32 trk_long_idle_sample_count = (10 << 16) | 100; + /* + * Format is number of outer loops in the 16 MSB, sample + * count in 16 LSB. + */ + + reg = 0; + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ACDELAYEN_SET(2); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQDELAYEN_SET(1); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSDELAYEN_SET(1); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_DQSLOGICDELAYEN_SET(1); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_RESETDELAYEN_SET(0); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_LPDDRDIS_SET(1); + /* + * This field selects the intrinsic latency to RDATA_EN/FULL path. + * 00-bypass, 01- add 5 cycles, 10- add 10 cycles, 11- add 15 cycles. + */ + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_ADDLATSEL_SET(0); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_SET( + trk_sample_count); + writel(reg, &sdr_ctrl->phy_ctrl0); + + reg = 0; + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_SAMPLECOUNT_31_20_SET( + trk_sample_count >> + SDR_CTRLGRP_PHYCTRL_PHYCTRL_0_SAMPLECOUNT_19_0_WIDTH); + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_SET( + trk_long_idle_sample_count); + writel(reg, &sdr_ctrl->phy_ctrl1); + + reg = 0; + reg |= SDR_CTRLGRP_PHYCTRL_PHYCTRL_2_LONGIDLESAMPLECOUNT_31_20_SET( + trk_long_idle_sample_count >> + SDR_CTRLGRP_PHYCTRL_PHYCTRL_1_LONGIDLESAMPLECOUNT_19_0_WIDTH); + writel(reg, &sdr_ctrl->phy_ctrl2); +} + +/** + * initialize_tracking() - Initialize tracking + * + * Initialize the register file with usable initial data. + */ +static void initialize_tracking(struct socfpga_sdrseq *seq) +{ + /* + * Initialize the register file with the correct data. + * Compute usable version of value in case we skip full + * computation later. + */ + writel(DIV_ROUND_UP(seq->iocfg->delay_per_opa_tap, + seq->iocfg->delay_per_dchain_tap) - 1, + &sdr_reg_file->dtaps_per_ptap); + + /* trk_sample_count */ + writel(7500, &sdr_reg_file->trk_sample_count); + + /* longidle outer loop [15:0] */ + writel((10 << 16) | (100 << 0), &sdr_reg_file->trk_longidle); + + /* + * longidle sample count [31:24] + * trfc, worst case of 933Mhz 4Gb [23:16] + * trcd, worst case [15:8] + * vfifo wait [7:0] + */ + writel((243 << 24) | (14 << 16) | (10 << 8) | (4 << 0), + &sdr_reg_file->delays); + + /* mux delay */ + if (dram_is_ddr(2)) { + writel(0, &sdr_reg_file->trk_rw_mgr_addr); + } else if (dram_is_ddr(3)) { + writel((seq->rwcfg->idle << 24) | + (seq->rwcfg->activate_1 << 16) | + (seq->rwcfg->sgle_read << 8) | + (seq->rwcfg->precharge_all << 0), + &sdr_reg_file->trk_rw_mgr_addr); + } + + writel(seq->rwcfg->mem_if_read_dqs_width, + &sdr_reg_file->trk_read_dqs_width); + + /* trefi [7:0] */ + if (dram_is_ddr(2)) { + writel(1000 << 0, &sdr_reg_file->trk_rfsh); + } else if (dram_is_ddr(3)) { + writel((seq->rwcfg->refresh_all << 24) | (1000 << 0), + &sdr_reg_file->trk_rfsh); + } +} + +int sdram_calibration_full(struct socfpga_sdr *sdr) +{ + u32 pass; + struct socfpga_sdrseq seq; + + /* + * For size reasons, this file uses hard coded addresses. + * Check if we are called with the correct address. + */ + if (sdr != (struct socfpga_sdr *)SOCFPGA_SDR_ADDRESS) + return -ENODEV; + + memset(&seq, 0, sizeof(seq)); + + seq.rwcfg = socfpga_get_sdram_rwmgr_config(); + seq.iocfg = socfpga_get_sdram_io_config(); + seq.misccfg = socfpga_get_sdram_misc_config(); + + /* Set the calibration enabled by default */ + seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_ENABLE_CAL_RPT; + /* + * Only sweep all groups (regardless of fail state) by default + * Set enabled read test by default. + */ +#if DISABLE_GUARANTEED_READ + seq.gbl.phy_debug_mode_flags |= PHY_DEBUG_DISABLE_GUARANTEED_READ; +#endif + /* Initialize the register file */ + initialize_reg_file(&seq); + + /* Initialize any PHY CSR */ + initialize_hps_phy(); + + scc_mgr_initialize(); + + initialize_tracking(&seq); + + debug("%s: Preparing to start memory calibration\n", __FILE__); + + debug("%s:%d\n", __func__, __LINE__); + debug_cond(DLEVEL >= 1, + "DDR3 FULL_RATE ranks=%u cs/dimm=%u dq/dqs=%u,%u vg/dqs=%u,%u ", + seq.rwcfg->mem_number_of_ranks, + seq.rwcfg->mem_number_of_cs_per_dimm, + seq.rwcfg->mem_dq_per_read_dqs, + seq.rwcfg->mem_dq_per_write_dqs, + seq.rwcfg->mem_virtual_groups_per_read_dqs, + seq.rwcfg->mem_virtual_groups_per_write_dqs); + debug_cond(DLEVEL >= 1, + "dqs=%u,%u dq=%u dm=%u ptap_delay=%u dtap_delay=%u ", + seq.rwcfg->mem_if_read_dqs_width, + seq.rwcfg->mem_if_write_dqs_width, + seq.rwcfg->mem_data_width, seq.rwcfg->mem_data_mask_width, + seq.iocfg->delay_per_opa_tap, + seq.iocfg->delay_per_dchain_tap); + debug_cond(DLEVEL >= 1, "dtap_dqsen_delay=%u, dll=%u", + seq.iocfg->delay_per_dqs_en_dchain_tap, + seq.iocfg->dll_chain_length); + debug_cond(DLEVEL >= 1, + "max values: en_p=%u dqdqs_p=%u en_d=%u dqs_in_d=%u ", + seq.iocfg->dqs_en_phase_max, seq.iocfg->dqdqs_out_phase_max, + seq.iocfg->dqs_en_delay_max, seq.iocfg->dqs_in_delay_max); + debug_cond(DLEVEL >= 1, "io_in_d=%u io_out1_d=%u io_out2_d=%u ", + seq.iocfg->io_in_delay_max, seq.iocfg->io_out1_delay_max, + seq.iocfg->io_out2_delay_max); + debug_cond(DLEVEL >= 1, "dqs_in_reserve=%u dqs_out_reserve=%u\n", + seq.iocfg->dqs_in_reserve, seq.iocfg->dqs_out_reserve); + + hc_initialize_rom_data(); + + /* update info for sims */ + reg_file_set_stage(CAL_STAGE_NIL); + reg_file_set_group(0); + + /* + * Load global needed for those actions that require + * some dynamic calibration support. + */ + seq.dyn_calib_steps = STATIC_CALIB_STEPS; + /* + * Load global to allow dynamic selection of delay loop settings + * based on calibration mode. + */ + if (!(seq.dyn_calib_steps & CALIB_SKIP_DELAY_LOOPS)) + seq.skip_delay_mask = 0xff; + else + seq.skip_delay_mask = 0x0; + + pass = run_mem_calibrate(&seq); + debug_mem_calibrate(&seq, pass); + return pass; +} diff --git a/roms/u-boot/drivers/ddr/altera/sequencer.h b/roms/u-boot/drivers/ddr/altera/sequencer.h new file mode 100644 index 000000000..c72a683ff --- /dev/null +++ b/roms/u-boot/drivers/ddr/altera/sequencer.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Copyright Altera Corporation (C) 2012-2015 + */ + +#ifndef _SEQUENCER_H_ +#define _SEQUENCER_H_ + +#define RW_MGR_NUM_DM_PER_WRITE_GROUP (seq->rwcfg->mem_data_mask_width \ + / seq->rwcfg->mem_if_write_dqs_width) +#define RW_MGR_NUM_TRUE_DM_PER_WRITE_GROUP ( \ + seq->rwcfg->true_mem_data_mask_width \ + / seq->rwcfg->mem_if_write_dqs_width) + +#define RW_MGR_NUM_DQS_PER_WRITE_GROUP (seq->rwcfg->mem_if_read_dqs_width \ + / seq->rwcfg->mem_if_write_dqs_width) +#define NUM_RANKS_PER_SHADOW_REG (seq->rwcfg->mem_number_of_ranks \ + / NUM_SHADOW_REGS) + +#define RW_MGR_RUN_SINGLE_GROUP_OFFSET 0x0 +#define RW_MGR_RUN_ALL_GROUPS_OFFSET 0x0400 +#define RW_MGR_RESET_READ_DATAPATH_OFFSET 0x1000 +#define RW_MGR_SET_CS_AND_ODT_MASK_OFFSET 0x1400 +#define RW_MGR_INST_ROM_WRITE_OFFSET 0x1800 +#define RW_MGR_AC_ROM_WRITE_OFFSET 0x1C00 + +#define NUM_SHADOW_REGS 1 + +#define RW_MGR_RANK_NONE 0xFF +#define RW_MGR_RANK_ALL 0x00 + +#define RW_MGR_ODT_MODE_OFF 0 +#define RW_MGR_ODT_MODE_READ_WRITE 1 + +#define NUM_CALIB_REPEAT 1 + +#define NUM_READ_TESTS 7 +#define NUM_READ_PB_TESTS 7 +#define NUM_WRITE_TESTS 15 +#define NUM_WRITE_PB_TESTS 31 + +#define PASS_ALL_BITS 1 +#define PASS_ONE_BIT 0 + +/* calibration stages */ +#define CAL_STAGE_NIL 0 +#define CAL_STAGE_VFIFO 1 +#define CAL_STAGE_WLEVEL 2 +#define CAL_STAGE_LFIFO 3 +#define CAL_STAGE_WRITES 4 +#define CAL_STAGE_FULLTEST 5 +#define CAL_STAGE_REFRESH 6 +#define CAL_STAGE_CAL_SKIPPED 7 +#define CAL_STAGE_CAL_ABORTED 8 +#define CAL_STAGE_VFIFO_AFTER_WRITES 9 + +/* calibration substages */ +#define CAL_SUBSTAGE_NIL 0 +#define CAL_SUBSTAGE_GUARANTEED_READ 1 +#define CAL_SUBSTAGE_DQS_EN_PHASE 2 +#define CAL_SUBSTAGE_VFIFO_CENTER 3 +#define CAL_SUBSTAGE_WORKING_DELAY 1 +#define CAL_SUBSTAGE_LAST_WORKING_DELAY 2 +#define CAL_SUBSTAGE_WLEVEL_COPY 3 +#define CAL_SUBSTAGE_WRITES_CENTER 1 +#define CAL_SUBSTAGE_READ_LATENCY 1 +#define CAL_SUBSTAGE_REFRESH 1 + +#define SCC_MGR_GROUP_COUNTER_OFFSET 0x0000 +#define SCC_MGR_DQS_IN_DELAY_OFFSET 0x0100 +#define SCC_MGR_DQS_EN_PHASE_OFFSET 0x0200 +#define SCC_MGR_DQS_EN_DELAY_OFFSET 0x0300 +#define SCC_MGR_DQDQS_OUT_PHASE_OFFSET 0x0400 +#define SCC_MGR_OCT_OUT1_DELAY_OFFSET 0x0500 +#define SCC_MGR_IO_OUT1_DELAY_OFFSET 0x0700 +#define SCC_MGR_IO_IN_DELAY_OFFSET 0x0900 + +/* HHP-HPS-specific versions of some commands */ +#define SCC_MGR_DQS_EN_DELAY_GATE_OFFSET 0x0600 +#define SCC_MGR_IO_OE_DELAY_OFFSET 0x0800 +#define SCC_MGR_HHP_GLOBALS_OFFSET 0x0A00 +#define SCC_MGR_HHP_RFILE_OFFSET 0x0B00 +#define SCC_MGR_AFI_CAL_INIT_OFFSET 0x0D00 + +#define SDR_PHYGRP_SCCGRP_ADDRESS (SOCFPGA_SDR_ADDRESS | 0x0) +#define SDR_PHYGRP_PHYMGRGRP_ADDRESS (SOCFPGA_SDR_ADDRESS | 0x1000) +#define SDR_PHYGRP_RWMGRGRP_ADDRESS (SOCFPGA_SDR_ADDRESS | 0x2000) +#define SDR_PHYGRP_DATAMGRGRP_ADDRESS (SOCFPGA_SDR_ADDRESS | 0x4000) +#define SDR_PHYGRP_REGFILEGRP_ADDRESS (SOCFPGA_SDR_ADDRESS | 0x4800) + +#define PHY_MGR_CAL_RESET (0) +#define PHY_MGR_CAL_SUCCESS (1) +#define PHY_MGR_CAL_FAIL (2) + +#define CALIB_SKIP_DELAY_LOOPS (1 << 0) +#define CALIB_SKIP_ALL_BITS_CHK (1 << 1) +#define CALIB_SKIP_DELAY_SWEEPS (1 << 2) +#define CALIB_SKIP_VFIFO (1 << 3) +#define CALIB_SKIP_LFIFO (1 << 4) +#define CALIB_SKIP_WLEVEL (1 << 5) +#define CALIB_SKIP_WRITES (1 << 6) +#define CALIB_SKIP_FULL_TEST (1 << 7) +#define CALIB_SKIP_ALL (CALIB_SKIP_VFIFO | \ + CALIB_SKIP_LFIFO | CALIB_SKIP_WLEVEL | \ + CALIB_SKIP_WRITES | CALIB_SKIP_FULL_TEST) +#define CALIB_IN_RTL_SIM (1 << 8) + +/* Scan chain manager command addresses */ +#define READ_SCC_OCT_OUT2_DELAY 0 +#define READ_SCC_DQ_OUT2_DELAY 0 +#define READ_SCC_DQS_IO_OUT2_DELAY 0 +#define READ_SCC_DM_IO_OUT2_DELAY 0 + +/* HHP-HPS-specific values */ +#define SCC_MGR_HHP_EXTRAS_OFFSET 0 +#define SCC_MGR_HHP_DQSE_MAP_OFFSET 1 + +/* PHY Debug mode flag constants */ +#define PHY_DEBUG_IN_DEBUG_MODE 0x00000001 +#define PHY_DEBUG_ENABLE_CAL_RPT 0x00000002 +#define PHY_DEBUG_ENABLE_MARGIN_RPT 0x00000004 +#define PHY_DEBUG_SWEEP_ALL_GROUPS 0x00000008 +#define PHY_DEBUG_DISABLE_GUARANTEED_READ 0x00000010 +#define PHY_DEBUG_ENABLE_NON_DESTRUCTIVE_CALIBRATION 0x00000020 + +struct socfpga_sdr_rw_load_manager { + u32 load_cntr0; + u32 load_cntr1; + u32 load_cntr2; + u32 load_cntr3; +}; + +struct socfpga_sdr_rw_load_jump_manager { + u32 load_jump_add0; + u32 load_jump_add1; + u32 load_jump_add2; + u32 load_jump_add3; +}; + +struct socfpga_sdr_reg_file { + u32 signature; + u32 debug_data_addr; + u32 cur_stage; + u32 fom; + u32 failing_stage; + u32 debug1; + u32 debug2; + u32 dtaps_per_ptap; + u32 trk_sample_count; + u32 trk_longidle; + u32 delays; + u32 trk_rw_mgr_addr; + u32 trk_read_dqs_width; + u32 trk_rfsh; +}; + +/* parameter variable holder */ +struct param_type { + u32 read_correct_mask; + u32 read_correct_mask_vg; + u32 write_correct_mask; + u32 write_correct_mask_vg; +}; + + +/* global variable holder */ +struct gbl_type { + uint32_t phy_debug_mode_flags; + + /* current read latency */ + + uint32_t curr_read_lat; + + /* error code */ + + uint32_t error_substage; + uint32_t error_stage; + uint32_t error_group; + + /* figure-of-merit in, figure-of-merit out */ + + uint32_t fom_in; + uint32_t fom_out; + + /*USER Number of RW Mgr NOP cycles between + write command and write data */ + uint32_t rw_wl_nop_cycles; +}; + +struct socfpga_sdr_scc_mgr { + u32 dqs_ena; + u32 dqs_io_ena; + u32 dq_ena; + u32 dm_ena; + u32 __padding1[4]; + u32 update; + u32 __padding2[7]; + u32 active_rank; +}; + +/* PHY manager configuration registers. */ +struct socfpga_phy_mgr_cfg { + u32 phy_rlat; + u32 reset_mem_stbl; + u32 mux_sel; + u32 cal_status; + u32 cal_debug_info; + u32 vfifo_rd_en_ovrd; + u32 afi_wlat; + u32 afi_rlat; +}; + +/* PHY manager command addresses. */ +struct socfpga_phy_mgr_cmd { + u32 inc_vfifo_fr; + u32 inc_vfifo_hard_phy; + u32 fifo_reset; + u32 inc_vfifo_fr_hr; + u32 inc_vfifo_qr; +}; + +struct socfpga_data_mgr { + u32 __padding1; + u32 t_wl_add; + u32 mem_t_add; + u32 t_rl_add; +}; + +/* This struct describes the controller @ SOCFPGA_SDR_ADDRESS */ +struct socfpga_sdr { + /* SDR_PHYGRP_SCCGRP_ADDRESS */ + u8 _align1[0xe00]; + /* SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00 */ + struct socfpga_sdr_scc_mgr sdr_scc_mgr; + u8 _align2[0x1bc]; + /* SDR_PHYGRP_PHYMGRGRP_ADDRESS */ + struct socfpga_phy_mgr_cmd phy_mgr_cmd; + u8 _align3[0x2c]; + /* SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40 */ + struct socfpga_phy_mgr_cfg phy_mgr_cfg; + u8 _align4[0xfa0]; + /* SDR_PHYGRP_RWMGRGRP_ADDRESS */ + u8 rwmgr_grp[0x800]; + /* SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800 */ + struct socfpga_sdr_rw_load_manager sdr_rw_load_mgr_regs; + u8 _align5[0x3f0]; + /* SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00 */ + struct socfpga_sdr_rw_load_jump_manager sdr_rw_load_jump_mgr_regs; + u8 _align6[0x13f0]; + /* SDR_PHYGRP_DATAMGRGRP_ADDRESS */ + struct socfpga_data_mgr data_mgr; + u8 _align7[0x7f0]; + /* SDR_PHYGRP_REGFILEGRP_ADDRESS */ + struct socfpga_sdr_reg_file sdr_reg_file; + u8 _align8[0x7c8]; + /* SDR_CTRLGRP_ADDRESS */ + struct socfpga_sdr_ctrl sdr_ctrl; + u8 _align9[0xea4]; +}; + +struct socfpga_sdrseq { + const struct socfpga_sdram_rw_mgr_config *rwcfg; + const struct socfpga_sdram_io_config *iocfg; + const struct socfpga_sdram_misc_config *misccfg; + /* calibration steps requested by the rtl */ + u16 dyn_calib_steps; + /* + * To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option + * instead of static, we use boolean logic to select between + * non-skip and skip values + * + * The mask is set to include all bits when not-skipping, but is + * zero when skipping + */ + + u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */ + struct gbl_type gbl; + struct param_type param; +}; + +int sdram_calibration_full(struct socfpga_sdr *sdr); +bool dram_is_ddr(const u8 ddr); + +#endif /* _SEQUENCER_H_ */ diff --git a/roms/u-boot/drivers/ddr/fsl/Kconfig b/roms/u-boot/drivers/ddr/fsl/Kconfig new file mode 100644 index 000000000..8b480dfd6 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/Kconfig @@ -0,0 +1,188 @@ +config SYS_FSL_DDR + bool + help + Select Freescale General DDR driver, shared between most Freescale + PowerPC- based SoCs (such as mpc83xx, mpc85xx, mpc86xx) and ARM- + based Layerscape SoCs (such as ls2080a). + +config SYS_FSL_MMDC + bool + help + Select Freescale Multi Mode DDR controller (MMDC). + +config SYS_FSL_DDR_BE + bool + help + Access DDR registers in big-endian + +config SYS_FSL_DDR_LE + bool + help + Access DDR registers in little-endian + +config FSL_DDR_BIST + bool + +config FSL_DDR_INTERACTIVE + bool + +config FSL_DDR_SYNC_REFRESH + bool + +config FSL_DDR_FIRST_SLOT_QUAD_CAPABLE + bool + +menu "Freescale DDR controllers" + depends on SYS_FSL_DDR + +config SYS_NUM_DDR_CTLRS + int "Maximum DDR controllers" + default 3 if ARCH_LS2080A || \ + ARCH_T4240 + default 2 if ARCH_B4860 || \ + ARCH_BSC9132 || \ + ARCH_MPC8572 || \ + ARCH_MPC8641 || \ + ARCH_P4080 || \ + ARCH_P5040 || \ + ARCH_LX2160A || \ + ARCH_LX2162A || \ + ARCH_T4160 + default 1 + +config SYS_FSL_DDR_VER + int + default 50 if SYS_FSL_DDR_VER_50 + default 47 if SYS_FSL_DDR_VER_47 + default 46 if SYS_FSL_DDR_VER_46 + default 44 if SYS_FSL_DDR_VER_44 + +config SYS_FSL_DDR_VER_50 + bool + +config SYS_FSL_DDR_VER_47 + bool + +config SYS_FSL_DDR_VER_46 + bool + +config SYS_FSL_DDR_VER_44 + bool + +config SYS_FSL_DDRC_GEN1 + bool + help + Enable Freescale DDR controller. + +config SYS_FSL_DDRC_GEN2 + bool + depends on !MPC86xx + help + Enable Freescale DDR2 controller. + +config SYS_FSL_DDRC_86XX_GEN2 + bool + depends on MPC86xx + help + Enable Freescale DDR2 controller for MPC86xx SoCs. + +config SYS_FSL_DDRC_GEN3 + bool + depends on PPC + help + Enable Freescale DDR3 controller for PowerPC SoCs. + +config SYS_FSL_DDRC_ARM_GEN3 + bool + depends on ARM + help + Enable Freescale DDR3 controller for ARM SoCs. + +config SYS_FSL_DDRC_GEN4 + bool + help + Enable Freescale DDR4 controller. + +config SYS_FSL_HAS_DDR4 + bool + +config SYS_FSL_HAS_DDR3 + bool + +config SYS_FSL_HAS_DDR2 + bool + +config SYS_FSL_HAS_DDR1 + bool + +choice + prompt "DDR technology" + default SYS_FSL_DDR4 if SYS_FSL_HAS_DDR4 + default SYS_FSL_DDR3 if SYS_FSL_HAS_DDR3 + default SYS_FSL_DDR2 if SYS_FSL_HAS_DDR2 + default SYS_FSL_DDR1 if SYS_FSL_HAS_DDR1 + +config SYS_FSL_DDR4 + bool "Freescale DDR4 controller" + depends on SYS_FSL_HAS_DDR4 + select SYS_FSL_DDRC_GEN4 + +config SYS_FSL_DDR3 + bool "Freescale DDR3 controller" + depends on SYS_FSL_HAS_DDR3 + select SYS_FSL_DDRC_GEN3 if PPC + select SYS_FSL_DDRC_ARM_GEN3 if ARM + +config SYS_FSL_DDR2 + bool "Freescale DDR2 controller" + depends on SYS_FSL_HAS_DDR2 + select SYS_FSL_DDRC_GEN2 if (!MPC86xx && !SYS_FSL_DDRC_GEN3) + select SYS_FSL_DDRC_86XX_GEN2 if MPC86xx + +config SYS_FSL_DDR1 + bool "Freescale DDR1 controller" + depends on SYS_FSL_HAS_DDR1 + select SYS_FSL_DDRC_GEN1 + +endchoice + +endmenu + +config SYS_FSL_ERRATUM_A008378 + bool + +config SYS_FSL_ERRATUM_A008109 + bool + +config SYS_FSL_ERRATUM_A008511 + bool + +config SYS_FSL_ERRATUM_A009663 + bool + +config SYS_FSL_ERRATUM_A009801 + bool + +config SYS_FSL_ERRATUM_A009803 + bool + +config SYS_FSL_ERRATUM_A009942 + bool + +config SYS_FSL_ERRATUM_A010165 + bool + +config SYS_FSL_ERRATUM_NMG_DDR120 + bool + +config SYS_FSL_ERRATUM_DDR_115 + bool + +config SYS_FSL_ERRATUM_DDR111_DDR134 + bool + +config SYS_FSL_ERRATUM_DDR_A003 + bool + +config SYS_FSL_ERRATUM_DDR_A003474 + bool diff --git a/roms/u-boot/drivers/ddr/fsl/Makefile b/roms/u-boot/drivers/ddr/fsl/Makefile new file mode 100644 index 000000000..c675f44ab --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/Makefile @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright 2008-2014 Freescale Semiconductor, Inc. + +obj-$(CONFIG_SYS_FSL_DDR1) += main.o util.o ctrl_regs.o options.o \ + lc_common_dimm_params.o +obj-$(CONFIG_SYS_FSL_DDR2) += main.o util.o ctrl_regs.o options.o \ + lc_common_dimm_params.o +obj-$(CONFIG_SYS_FSL_DDR3) += main.o util.o ctrl_regs.o options.o \ + lc_common_dimm_params.o +obj-$(CONFIG_SYS_FSL_DDR4) += main.o util.o ctrl_regs.o options.o \ + lc_common_dimm_params.o + +ifdef CONFIG_DDR_SPD +SPD := y +endif +ifdef CONFIG_SPD_EEPROM +SPD := y +endif +ifdef SPD +obj-$(CONFIG_SYS_FSL_DDR1) += ddr1_dimm_params.o +obj-$(CONFIG_SYS_FSL_DDR2) += ddr2_dimm_params.o +obj-$(CONFIG_SYS_FSL_DDR3) += ddr3_dimm_params.o +obj-$(CONFIG_SYS_FSL_DDR4) += ddr4_dimm_params.o +endif + +obj-$(CONFIG_FSL_DDR_INTERACTIVE) += interactive.o +obj-$(CONFIG_SYS_FSL_DDRC_GEN1) += mpc85xx_ddr_gen1.o +obj-$(CONFIG_SYS_FSL_DDRC_GEN2) += mpc85xx_ddr_gen2.o +obj-$(CONFIG_SYS_FSL_DDRC_GEN3) += mpc85xx_ddr_gen3.o +obj-$(CONFIG_SYS_FSL_DDRC_86XX_GEN2) += mpc86xx_ddr.o +obj-$(CONFIG_SYS_FSL_DDRC_ARM_GEN3) += arm_ddr_gen3.o +obj-$(CONFIG_SYS_FSL_DDRC_GEN4) += fsl_ddr_gen4.o +obj-$(CONFIG_SYS_FSL_MMDC) += fsl_mmdc.o diff --git a/roms/u-boot/drivers/ddr/fsl/arm_ddr_gen3.c b/roms/u-boot/drivers/ddr/fsl/arm_ddr_gen3.c new file mode 100644 index 000000000..629ba6784 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/arm_ddr_gen3.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * Derived from mpc85xx_ddr_gen3.c, removed all workarounds + */ + +#include <common.h> +#include <log.h> +#include <asm/io.h> +#include <fsl_ddr_sdram.h> +#include <asm/processor.h> +#include <fsl_immap.h> +#include <fsl_ddr.h> +#include <asm/arch/clock.h> +#include <linux/delay.h> + +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 4) +#error Invalid setting for CONFIG_CHIP_SELECTS_PER_CTRL +#endif + + +/* + * regs has the to-be-set values for DDR controller registers + * ctrl_num is the DDR controller number + * step: 0 goes through the initialization in one pass + * 1 sets registers and returns before enabling controller + * 2 resumes from step 1 and continues to initialize + * Dividing the initialization to two steps to deassert DDR reset signal + * to comply with JEDEC specs for RDIMMs. + */ +void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, + unsigned int ctrl_num, int step) +{ + unsigned int i, bus_width; + struct ccsr_ddr __iomem *ddr; + u32 temp_sdram_cfg; + u32 total_gb_size_per_controller; + int timeout; + + switch (ctrl_num) { + case 0: + ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; + break; +#if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1) + case 1: + ddr = (void *)CONFIG_SYS_FSL_DDR2_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2) + case 2: + ddr = (void *)CONFIG_SYS_FSL_DDR3_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3) + case 3: + ddr = (void *)CONFIG_SYS_FSL_DDR4_ADDR; + break; +#endif + default: + printf("%s unexpected ctrl_num = %u\n", __func__, ctrl_num); + return; + } + + if (step == 2) + goto step2; + + if (regs->ddr_eor) + ddr_out32(&ddr->eor, regs->ddr_eor); + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (i == 0) { + ddr_out32(&ddr->cs0_bnds, regs->cs[i].bnds); + ddr_out32(&ddr->cs0_config, regs->cs[i].config); + ddr_out32(&ddr->cs0_config_2, regs->cs[i].config_2); + + } else if (i == 1) { + ddr_out32(&ddr->cs1_bnds, regs->cs[i].bnds); + ddr_out32(&ddr->cs1_config, regs->cs[i].config); + ddr_out32(&ddr->cs1_config_2, regs->cs[i].config_2); + + } else if (i == 2) { + ddr_out32(&ddr->cs2_bnds, regs->cs[i].bnds); + ddr_out32(&ddr->cs2_config, regs->cs[i].config); + ddr_out32(&ddr->cs2_config_2, regs->cs[i].config_2); + + } else if (i == 3) { + ddr_out32(&ddr->cs3_bnds, regs->cs[i].bnds); + ddr_out32(&ddr->cs3_config, regs->cs[i].config); + ddr_out32(&ddr->cs3_config_2, regs->cs[i].config_2); + } + } + + ddr_out32(&ddr->timing_cfg_3, regs->timing_cfg_3); + ddr_out32(&ddr->timing_cfg_0, regs->timing_cfg_0); + ddr_out32(&ddr->timing_cfg_1, regs->timing_cfg_1); + ddr_out32(&ddr->timing_cfg_2, regs->timing_cfg_2); + ddr_out32(&ddr->sdram_mode, regs->ddr_sdram_mode); + ddr_out32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2); + ddr_out32(&ddr->sdram_mode_3, regs->ddr_sdram_mode_3); + ddr_out32(&ddr->sdram_mode_4, regs->ddr_sdram_mode_4); + ddr_out32(&ddr->sdram_mode_5, regs->ddr_sdram_mode_5); + ddr_out32(&ddr->sdram_mode_6, regs->ddr_sdram_mode_6); + ddr_out32(&ddr->sdram_mode_7, regs->ddr_sdram_mode_7); + ddr_out32(&ddr->sdram_mode_8, regs->ddr_sdram_mode_8); + ddr_out32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl); + ddr_out32(&ddr->sdram_interval, regs->ddr_sdram_interval); + ddr_out32(&ddr->sdram_data_init, regs->ddr_data_init); + ddr_out32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl); + ddr_out32(&ddr->timing_cfg_4, regs->timing_cfg_4); + ddr_out32(&ddr->timing_cfg_5, regs->timing_cfg_5); + ddr_out32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl); + ddr_out32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl); +#ifndef CONFIG_SYS_FSL_DDR_EMU + /* + * Skip these two registers if running on emulator + * because emulator doesn't have skew between bytes. + */ + + if (regs->ddr_wrlvl_cntl_2) + ddr_out32(&ddr->ddr_wrlvl_cntl_2, regs->ddr_wrlvl_cntl_2); + if (regs->ddr_wrlvl_cntl_3) + ddr_out32(&ddr->ddr_wrlvl_cntl_3, regs->ddr_wrlvl_cntl_3); +#endif + + ddr_out32(&ddr->ddr_sr_cntr, regs->ddr_sr_cntr); + ddr_out32(&ddr->ddr_sdram_rcw_1, regs->ddr_sdram_rcw_1); + ddr_out32(&ddr->ddr_sdram_rcw_2, regs->ddr_sdram_rcw_2); + ddr_out32(&ddr->ddr_cdr1, regs->ddr_cdr1); +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + ddr_out32(&ddr->sdram_cfg_2, + regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT); + ddr_out32(&ddr->init_addr, CONFIG_SYS_SDRAM_BASE); + ddr_out32(&ddr->init_ext_addr, DDR_INIT_ADDR_EXT_UIA); + + /* DRAM VRef will not be trained */ + ddr_out32(&ddr->ddr_cdr2, + regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN); + } else +#endif + { + ddr_out32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); + ddr_out32(&ddr->init_addr, regs->ddr_init_addr); + ddr_out32(&ddr->init_ext_addr, regs->ddr_init_ext_addr); + ddr_out32(&ddr->ddr_cdr2, regs->ddr_cdr2); + } + ddr_out32(&ddr->err_disable, regs->err_disable); + ddr_out32(&ddr->err_int_en, regs->err_int_en); + for (i = 0; i < 32; i++) { + if (regs->debug[i]) { + debug("Write to debug_%d as %08x\n", i + 1, + regs->debug[i]); + ddr_out32(&ddr->debug[i], regs->debug[i]); + } + } + + /* + * For RDIMMs, JEDEC spec requires clocks to be stable before reset is + * deasserted. Clocks start when any chip select is enabled and clock + * control register is set. Because all DDR components are connected to + * one reset signal, this needs to be done in two steps. Step 1 is to + * get the clocks started. Step 2 resumes after reset signal is + * deasserted. + */ + if (step == 1) { + udelay(200); + return; + } + +step2: + /* Set, but do not enable the memory */ + temp_sdram_cfg = regs->ddr_sdram_cfg; + temp_sdram_cfg &= ~(SDRAM_CFG_MEM_EN); + ddr_out32(&ddr->sdram_cfg, temp_sdram_cfg); + + /* + * 500 painful micro-seconds must elapse between + * the DDR clock setup and the DDR config enable. + * DDR2 need 200 us, and DDR3 need 500 us from spec, + * we choose the max, that is 500 us for all of case. + */ + udelay(500); + asm volatile("dsb sy;isb"); + +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + /* enter self-refresh */ + temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg_2); + temp_sdram_cfg |= SDRAM_CFG2_FRC_SR; + ddr_out32(&ddr->sdram_cfg_2, temp_sdram_cfg); + /* do board specific memory setup */ + board_mem_sleep_setup(); + + temp_sdram_cfg = (ddr_in32(&ddr->sdram_cfg) | SDRAM_CFG_BI); + } else +#endif + temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg) & ~SDRAM_CFG_BI; + /* Let the controller go */ + ddr_out32(&ddr->sdram_cfg, temp_sdram_cfg | SDRAM_CFG_MEM_EN); + asm volatile("dsb sy;isb"); + + total_gb_size_per_controller = 0; + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (!(regs->cs[i].config & 0x80000000)) + continue; + total_gb_size_per_controller += 1 << ( + ((regs->cs[i].config >> 14) & 0x3) + 2 + + ((regs->cs[i].config >> 8) & 0x7) + 12 + + ((regs->cs[i].config >> 0) & 0x7) + 8 + + 3 - ((regs->ddr_sdram_cfg >> 19) & 0x3) - + 26); /* minus 26 (count of 64M) */ + } + if (regs->cs[0].config & 0x20000000) { + /* 2-way interleaving */ + total_gb_size_per_controller <<= 1; + } + /* + * total memory / bus width = transactions needed + * transactions needed / data rate = seconds + * to add plenty of buffer, double the time + * For example, 2GB on 666MT/s 64-bit bus takes about 402ms + * Let's wait for 800ms + */ + bus_width = 3 - ((ddr_in32(&ddr->sdram_cfg) & SDRAM_CFG_DBW_MASK) + >> SDRAM_CFG_DBW_SHIFT); + timeout = ((total_gb_size_per_controller << (6 - bus_width)) * 100 / + (get_ddr_freq(ctrl_num) >> 20)) << 1; + total_gb_size_per_controller >>= 4; /* shift down to gb size */ + debug("total %d GB\n", total_gb_size_per_controller); + debug("Need to wait up to %d * 10ms\n", timeout); + + /* Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done. */ + while ((ddr_in32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) && + (timeout >= 0)) { + udelay(10000); /* throttle polling rate */ + timeout--; + } + + if (timeout <= 0) + printf("Waiting for D_INIT timeout. Memory may not work.\n"); +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + /* exit self-refresh */ + temp_sdram_cfg = ddr_in32(&ddr->sdram_cfg_2); + temp_sdram_cfg &= ~SDRAM_CFG2_FRC_SR; + ddr_out32(&ddr->sdram_cfg_2, temp_sdram_cfg); + } +#endif +} diff --git a/roms/u-boot/drivers/ddr/fsl/ctrl_regs.c b/roms/u-boot/drivers/ddr/fsl/ctrl_regs.c new file mode 100644 index 000000000..c849ef3a4 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/ctrl_regs.c @@ -0,0 +1,2663 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2008-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2020 NXP Semiconductor + */ + +/* + * Generic driver for Freescale DDR/DDR2/DDR3/DDR4 memory controller. + * Based on code from spd_sdram.c + * Author: James Yang [at freescale.com] + */ + +#include <common.h> +#include <fsl_ddr_sdram.h> +#include <fsl_errata.h> +#include <fsl_ddr.h> +#include <fsl_immap.h> +#include <log.h> +#include <asm/bitops.h> +#include <asm/io.h> +#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \ + defined(CONFIG_ARM) +#include <asm/arch/clock.h> +#endif + +/* + * Determine Rtt value. + * + * This should likely be either board or controller specific. + * + * Rtt(nominal) - DDR2: + * 0 = Rtt disabled + * 1 = 75 ohm + * 2 = 150 ohm + * 3 = 50 ohm + * Rtt(nominal) - DDR3: + * 0 = Rtt disabled + * 1 = 60 ohm + * 2 = 120 ohm + * 3 = 40 ohm + * 4 = 20 ohm + * 5 = 30 ohm + * + * FIXME: Apparently 8641 needs a value of 2 + * FIXME: Old code seys if 667 MHz or higher, use 3 on 8572 + * + * FIXME: There was some effort down this line earlier: + * + * unsigned int i; + * for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL/2; i++) { + * if (popts->dimmslot[i].num_valid_cs + * && (popts->cs_local_opts[2*i].odt_rd_cfg + * || popts->cs_local_opts[2*i].odt_wr_cfg)) { + * rtt = 2; + * break; + * } + * } + */ +static inline int fsl_ddr_get_rtt(void) +{ + int rtt; + +#if defined(CONFIG_SYS_FSL_DDR1) + rtt = 0; +#elif defined(CONFIG_SYS_FSL_DDR2) + rtt = 3; +#else + rtt = 0; +#endif + + return rtt; +} + +#ifdef CONFIG_SYS_FSL_DDR4 +/* + * compute CAS write latency according to DDR4 spec + * CWL = 9 for <= 1600MT/s + * 10 for <= 1866MT/s + * 11 for <= 2133MT/s + * 12 for <= 2400MT/s + * 14 for <= 2667MT/s + * 16 for <= 2933MT/s + * 18 for higher + */ +static inline unsigned int compute_cas_write_latency( + const unsigned int ctrl_num) +{ + unsigned int cwl; + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); + if (mclk_ps >= 1250) + cwl = 9; + else if (mclk_ps >= 1070) + cwl = 10; + else if (mclk_ps >= 935) + cwl = 11; + else if (mclk_ps >= 833) + cwl = 12; + else if (mclk_ps >= 750) + cwl = 14; + else if (mclk_ps >= 681) + cwl = 16; + else + cwl = 18; + + return cwl; +} +#else +/* + * compute the CAS write latency according to DDR3 spec + * CWL = 5 if tCK >= 2.5ns + * 6 if 2.5ns > tCK >= 1.875ns + * 7 if 1.875ns > tCK >= 1.5ns + * 8 if 1.5ns > tCK >= 1.25ns + * 9 if 1.25ns > tCK >= 1.07ns + * 10 if 1.07ns > tCK >= 0.935ns + * 11 if 0.935ns > tCK >= 0.833ns + * 12 if 0.833ns > tCK >= 0.75ns + */ +static inline unsigned int compute_cas_write_latency( + const unsigned int ctrl_num) +{ + unsigned int cwl; + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); + + if (mclk_ps >= 2500) + cwl = 5; + else if (mclk_ps >= 1875) + cwl = 6; + else if (mclk_ps >= 1500) + cwl = 7; + else if (mclk_ps >= 1250) + cwl = 8; + else if (mclk_ps >= 1070) + cwl = 9; + else if (mclk_ps >= 935) + cwl = 10; + else if (mclk_ps >= 833) + cwl = 11; + else if (mclk_ps >= 750) + cwl = 12; + else { + cwl = 12; + printf("Warning: CWL is out of range\n"); + } + return cwl; +} +#endif + +/* Chip Select Configuration (CSn_CONFIG) */ +static void set_csn_config(int dimm_number, int i, fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const dimm_params_t *dimm_params) +{ + unsigned int cs_n_en = 0; /* Chip Select enable */ + unsigned int intlv_en = 0; /* Memory controller interleave enable */ + unsigned int intlv_ctl = 0; /* Interleaving control */ + unsigned int ap_n_en = 0; /* Chip select n auto-precharge enable */ + unsigned int odt_rd_cfg = 0; /* ODT for reads configuration */ + unsigned int odt_wr_cfg = 0; /* ODT for writes configuration */ + unsigned int ba_bits_cs_n = 0; /* Num of bank bits for SDRAM on CSn */ + unsigned int row_bits_cs_n = 0; /* Num of row bits for SDRAM on CSn */ + unsigned int col_bits_cs_n = 0; /* Num of ocl bits for SDRAM on CSn */ + int go_config = 0; +#ifdef CONFIG_SYS_FSL_DDR4 + unsigned int bg_bits_cs_n = 0; /* Num of bank group bits */ +#else + unsigned int n_banks_per_sdram_device; +#endif + + /* Compute CS_CONFIG only for existing ranks of each DIMM. */ + switch (i) { + case 0: + if (dimm_params[dimm_number].n_ranks > 0) { + go_config = 1; + /* These fields only available in CS0_CONFIG */ + if (!popts->memctl_interleaving) + break; + switch (popts->memctl_interleaving_mode) { + case FSL_DDR_256B_INTERLEAVING: + case FSL_DDR_CACHE_LINE_INTERLEAVING: + case FSL_DDR_PAGE_INTERLEAVING: + case FSL_DDR_BANK_INTERLEAVING: + case FSL_DDR_SUPERBANK_INTERLEAVING: + intlv_en = popts->memctl_interleaving; + intlv_ctl = popts->memctl_interleaving_mode; + break; + default: + break; + } + } + break; + case 1: + if ((dimm_number == 0 && dimm_params[0].n_ranks > 1) || \ + (dimm_number == 1 && dimm_params[1].n_ranks > 0)) + go_config = 1; + break; + case 2: + if ((dimm_number == 0 && dimm_params[0].n_ranks > 2) || \ + (dimm_number >= 1 && dimm_params[dimm_number].n_ranks > 0)) + go_config = 1; + break; + case 3: + if ((dimm_number == 0 && dimm_params[0].n_ranks > 3) || \ + (dimm_number == 1 && dimm_params[1].n_ranks > 1) || \ + (dimm_number == 3 && dimm_params[3].n_ranks > 0)) + go_config = 1; + break; + default: + break; + } + if (go_config) { + cs_n_en = 1; + ap_n_en = popts->cs_local_opts[i].auto_precharge; + odt_rd_cfg = popts->cs_local_opts[i].odt_rd_cfg; + odt_wr_cfg = popts->cs_local_opts[i].odt_wr_cfg; +#ifdef CONFIG_SYS_FSL_DDR4 + ba_bits_cs_n = dimm_params[dimm_number].bank_addr_bits; + bg_bits_cs_n = dimm_params[dimm_number].bank_group_bits; +#else + n_banks_per_sdram_device + = dimm_params[dimm_number].n_banks_per_sdram_device; + ba_bits_cs_n = __ilog2(n_banks_per_sdram_device) - 2; +#endif + row_bits_cs_n = dimm_params[dimm_number].n_row_addr - 12; + col_bits_cs_n = dimm_params[dimm_number].n_col_addr - 8; + } + ddr->cs[i].config = (0 + | ((cs_n_en & 0x1) << 31) + | ((intlv_en & 0x3) << 29) + | ((intlv_ctl & 0xf) << 24) + | ((ap_n_en & 0x1) << 23) + + /* XXX: some implementation only have 1 bit starting at left */ + | ((odt_rd_cfg & 0x7) << 20) + + /* XXX: Some implementation only have 1 bit starting at left */ + | ((odt_wr_cfg & 0x7) << 16) + + | ((ba_bits_cs_n & 0x3) << 14) + | ((row_bits_cs_n & 0x7) << 8) +#ifdef CONFIG_SYS_FSL_DDR4 + | ((bg_bits_cs_n & 0x3) << 4) +#endif + | ((col_bits_cs_n & 0x7) << 0) + ); + debug("FSLDDR: cs[%d]_config = 0x%08x\n", i,ddr->cs[i].config); +} + +/* Chip Select Configuration 2 (CSn_CONFIG_2) */ +/* FIXME: 8572 */ +static void set_csn_config_2(int i, fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int pasr_cfg = 0; /* Partial array self refresh config */ + + ddr->cs[i].config_2 = ((pasr_cfg & 7) << 24); + debug("FSLDDR: cs[%d]_config_2 = 0x%08x\n", i, ddr->cs[i].config_2); +} + +/* -3E = 667 CL5, -25 = CL6 800, -25E = CL5 800 */ + +#if !defined(CONFIG_SYS_FSL_DDR1) +/* + * Check DIMM configuration, return 2 if quad-rank or two dual-rank + * Return 1 if other two slots configuration. Return 0 if single slot. + */ +static inline int avoid_odt_overlap(const dimm_params_t *dimm_params) +{ +#if CONFIG_DIMM_SLOTS_PER_CTLR == 1 + if (dimm_params[0].n_ranks == 4) + return 2; +#endif + +#if CONFIG_DIMM_SLOTS_PER_CTLR == 2 + if ((dimm_params[0].n_ranks == 2) && + (dimm_params[1].n_ranks == 2)) + return 2; + +#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE + if (dimm_params[0].n_ranks == 4) + return 2; +#endif + + if ((dimm_params[0].n_ranks != 0) && + (dimm_params[2].n_ranks != 0)) + return 1; +#endif + return 0; +} + +/* + * DDR SDRAM Timing Configuration 0 (TIMING_CFG_0) + * + * Avoid writing for DDR I. The new PQ38 DDR controller + * dreams up non-zero default values to be backwards compatible. + */ +static void set_timing_cfg_0(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const dimm_params_t *dimm_params) +{ + unsigned char trwt_mclk = 0; /* Read-to-write turnaround */ + unsigned char twrt_mclk = 0; /* Write-to-read turnaround */ + /* 7.5 ns on -3E; 0 means WL - CL + BL/2 + 1 */ + unsigned char trrt_mclk = 0; /* Read-to-read turnaround */ + unsigned char twwt_mclk = 0; /* Write-to-write turnaround */ + + /* Active powerdown exit timing (tXARD and tXARDS). */ + unsigned char act_pd_exit_mclk; + /* Precharge powerdown exit timing (tXP). */ + unsigned char pre_pd_exit_mclk; + /* ODT powerdown exit timing (tAXPD). */ + unsigned char taxpd_mclk = 0; + /* Mode register set cycle time (tMRD). */ + unsigned char tmrd_mclk; +#if defined(CONFIG_SYS_FSL_DDR4) || defined(CONFIG_SYS_FSL_DDR3) + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); +#endif + +#ifdef CONFIG_SYS_FSL_DDR4 + /* tXP=max(4nCK, 6ns) */ + int txp = max((int)mclk_ps * 4, 6000); /* unit=ps */ + unsigned int data_rate = get_ddr_freq(ctrl_num); + + /* for faster clock, need more time for data setup */ + trwt_mclk = (data_rate/1000000 > 1900) ? 3 : 2; + + /* + * for single quad-rank DIMM and two-slot DIMMs + * to avoid ODT overlap + */ + switch (avoid_odt_overlap(dimm_params)) { + case 2: + twrt_mclk = 2; + twwt_mclk = 2; + trrt_mclk = 2; + break; + default: + twrt_mclk = 1; + twwt_mclk = 1; + trrt_mclk = 0; + break; + } + + act_pd_exit_mclk = picos_to_mclk(ctrl_num, txp); + pre_pd_exit_mclk = act_pd_exit_mclk; + /* + * MRS_CYC = max(tMRD, tMOD) + * tMRD = 8nCK, tMOD = max(24nCK, 15ns) + */ + tmrd_mclk = max(24U, picos_to_mclk(ctrl_num, 15000)); +#elif defined(CONFIG_SYS_FSL_DDR3) + unsigned int data_rate = get_ddr_freq(ctrl_num); + int txp; + unsigned int ip_rev; + int odt_overlap; + /* + * (tXARD and tXARDS). Empirical? + * The DDR3 spec has not tXARD, + * we use the tXP instead of it. + * tXP=max(3nCK, 7.5ns) for DDR3-800, 1066 + * max(3nCK, 6ns) for DDR3-1333, 1600, 1866, 2133 + * spec has not the tAXPD, we use + * tAXPD=1, need design to confirm. + */ + txp = max((int)mclk_ps * 3, (mclk_ps > 1540 ? 7500 : 6000)); + + ip_rev = fsl_ddr_get_version(ctrl_num); + if (ip_rev >= 0x40700) { + /* + * MRS_CYC = max(tMRD, tMOD) + * tMRD = 4nCK (8nCK for RDIMM) + * tMOD = max(12nCK, 15ns) + */ + tmrd_mclk = max((unsigned int)12, + picos_to_mclk(ctrl_num, 15000)); + } else { + /* + * MRS_CYC = tMRD + * tMRD = 4nCK (8nCK for RDIMM) + */ + if (popts->registered_dimm_en) + tmrd_mclk = 8; + else + tmrd_mclk = 4; + } + + /* set the turnaround time */ + + /* + * for single quad-rank DIMM and two-slot DIMMs + * to avoid ODT overlap + */ + odt_overlap = avoid_odt_overlap(dimm_params); + switch (odt_overlap) { + case 2: + twwt_mclk = 2; + trrt_mclk = 1; + break; + case 1: + twwt_mclk = 1; + trrt_mclk = 0; + break; + default: + break; + } + + /* for faster clock, need more time for data setup */ + trwt_mclk = (data_rate/1000000 > 1800) ? 2 : 1; + + if ((data_rate/1000000 > 1150) || (popts->memctl_interleaving)) + twrt_mclk = 1; + + if (popts->dynamic_power == 0) { /* powerdown is not used */ + act_pd_exit_mclk = 1; + pre_pd_exit_mclk = 1; + taxpd_mclk = 1; + } else { + /* act_pd_exit_mclk = tXARD, see above */ + act_pd_exit_mclk = picos_to_mclk(ctrl_num, txp); + /* Mode register MR0[A12] is '1' - fast exit */ + pre_pd_exit_mclk = act_pd_exit_mclk; + taxpd_mclk = 1; + } +#else /* CONFIG_SYS_FSL_DDR2 */ + /* + * (tXARD and tXARDS). Empirical? + * tXARD = 2 for DDR2 + * tXP=2 + * tAXPD=8 + */ + act_pd_exit_mclk = 2; + pre_pd_exit_mclk = 2; + taxpd_mclk = 8; + tmrd_mclk = 2; +#endif + + if (popts->trwt_override) + trwt_mclk = popts->trwt; + + ddr->timing_cfg_0 = (0 + | ((trwt_mclk & 0x3) << 30) /* RWT */ + | ((twrt_mclk & 0x3) << 28) /* WRT */ + | ((trrt_mclk & 0x3) << 26) /* RRT */ + | ((twwt_mclk & 0x3) << 24) /* WWT */ + | ((act_pd_exit_mclk & 0xf) << 20) /* ACT_PD_EXIT */ + | ((pre_pd_exit_mclk & 0xF) << 16) /* PRE_PD_EXIT */ + | ((taxpd_mclk & 0xf) << 8) /* ODT_PD_EXIT */ + | ((tmrd_mclk & 0x1f) << 0) /* MRS_CYC */ + ); + debug("FSLDDR: timing_cfg_0 = 0x%08x\n", ddr->timing_cfg_0); +} +#endif /* !defined(CONFIG_SYS_FSL_DDR1) */ + +/* DDR SDRAM Timing Configuration 3 (TIMING_CFG_3) */ +static void set_timing_cfg_3(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency, + unsigned int additive_latency) +{ + /* Extended precharge to activate interval (tRP) */ + unsigned int ext_pretoact = 0; + /* Extended Activate to precharge interval (tRAS) */ + unsigned int ext_acttopre = 0; + /* Extended activate to read/write interval (tRCD) */ + unsigned int ext_acttorw = 0; + /* Extended refresh recovery time (tRFC) */ + unsigned int ext_refrec; + /* Extended MCAS latency from READ cmd */ + unsigned int ext_caslat = 0; + /* Extended additive latency */ + unsigned int ext_add_lat = 0; + /* Extended last data to precharge interval (tWR) */ + unsigned int ext_wrrec = 0; + /* Control Adjust */ + unsigned int cntl_adj = 0; + + ext_pretoact = picos_to_mclk(ctrl_num, common_dimm->trp_ps) >> 4; + ext_acttopre = picos_to_mclk(ctrl_num, common_dimm->tras_ps) >> 4; + ext_acttorw = picos_to_mclk(ctrl_num, common_dimm->trcd_ps) >> 4; + ext_caslat = (2 * cas_latency - 1) >> 4; + ext_add_lat = additive_latency >> 4; +#ifdef CONFIG_SYS_FSL_DDR4 + ext_refrec = (picos_to_mclk(ctrl_num, common_dimm->trfc1_ps) - 8) >> 4; +#else + ext_refrec = (picos_to_mclk(ctrl_num, common_dimm->trfc_ps) - 8) >> 4; + /* ext_wrrec only deals with 16 clock and above, or 14 with OTF */ +#endif + ext_wrrec = (picos_to_mclk(ctrl_num, common_dimm->twr_ps) + + (popts->otf_burst_chop_en ? 2 : 0)) >> 4; + + ddr->timing_cfg_3 = (0 + | ((ext_pretoact & 0x1) << 28) + | ((ext_acttopre & 0x3) << 24) + | ((ext_acttorw & 0x1) << 22) + | ((ext_refrec & 0x3F) << 16) + | ((ext_caslat & 0x3) << 12) + | ((ext_add_lat & 0x1) << 10) + | ((ext_wrrec & 0x1) << 8) + | ((cntl_adj & 0x7) << 0) + ); + debug("FSLDDR: timing_cfg_3 = 0x%08x\n", ddr->timing_cfg_3); +} + +/* DDR SDRAM Timing Configuration 1 (TIMING_CFG_1) */ +static void set_timing_cfg_1(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency) +{ + /* Precharge-to-activate interval (tRP) */ + unsigned char pretoact_mclk; + /* Activate to precharge interval (tRAS) */ + unsigned char acttopre_mclk; + /* Activate to read/write interval (tRCD) */ + unsigned char acttorw_mclk; + /* CASLAT */ + unsigned char caslat_ctrl; + /* Refresh recovery time (tRFC) ; trfc_low */ + unsigned char refrec_ctrl; + /* Last data to precharge minimum interval (tWR) */ + unsigned char wrrec_mclk; + /* Activate-to-activate interval (tRRD) */ + unsigned char acttoact_mclk; + /* Last write data pair to read command issue interval (tWTR) */ + unsigned char wrtord_mclk; +#ifdef CONFIG_SYS_FSL_DDR4 + /* DDR4 supports 10, 12, 14, 16, 18, 20, 24 */ + static const u8 wrrec_table[] = { + 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + 12, 12, 14, 14, 16, + 16, 18, 18, 20, 20, + 24, 24, 24, 24}; +#else + /* DDR_SDRAM_MODE doesn't support 9,11,13,15 */ + static const u8 wrrec_table[] = { + 1, 2, 3, 4, 5, 6, 7, 8, 10, 10, 12, 12, 14, 14, 0, 0}; +#endif + + pretoact_mclk = picos_to_mclk(ctrl_num, common_dimm->trp_ps); + acttopre_mclk = picos_to_mclk(ctrl_num, common_dimm->tras_ps); + acttorw_mclk = picos_to_mclk(ctrl_num, common_dimm->trcd_ps); + + /* + * Translate CAS Latency to a DDR controller field value: + * + * CAS Lat DDR I DDR II Ctrl + * Clocks SPD Bit SPD Bit Value + * ------- ------- ------- ----- + * 1.0 0 0001 + * 1.5 1 0010 + * 2.0 2 2 0011 + * 2.5 3 0100 + * 3.0 4 3 0101 + * 3.5 5 0110 + * 4.0 4 0111 + * 4.5 1000 + * 5.0 5 1001 + */ +#if defined(CONFIG_SYS_FSL_DDR1) + caslat_ctrl = (cas_latency + 1) & 0x07; +#elif defined(CONFIG_SYS_FSL_DDR2) + caslat_ctrl = 2 * cas_latency - 1; +#else + /* + * if the CAS latency more than 8 cycle, + * we need set extend bit for it at + * TIMING_CFG_3[EXT_CASLAT] + */ + if (fsl_ddr_get_version(ctrl_num) <= 0x40400) + caslat_ctrl = 2 * cas_latency - 1; + else + caslat_ctrl = (cas_latency - 1) << 1; +#endif + +#ifdef CONFIG_SYS_FSL_DDR4 + refrec_ctrl = picos_to_mclk(ctrl_num, common_dimm->trfc1_ps) - 8; + wrrec_mclk = picos_to_mclk(ctrl_num, common_dimm->twr_ps); + acttoact_mclk = max(picos_to_mclk(ctrl_num, common_dimm->trrds_ps), 4U); + wrtord_mclk = max(2U, picos_to_mclk(ctrl_num, 2500)); + if ((wrrec_mclk < 1) || (wrrec_mclk > 24)) + printf("Error: WRREC doesn't support %d clocks\n", wrrec_mclk); + else + wrrec_mclk = wrrec_table[wrrec_mclk - 1]; +#else + refrec_ctrl = picos_to_mclk(ctrl_num, common_dimm->trfc_ps) - 8; + wrrec_mclk = picos_to_mclk(ctrl_num, common_dimm->twr_ps); + acttoact_mclk = picos_to_mclk(ctrl_num, common_dimm->trrd_ps); + wrtord_mclk = picos_to_mclk(ctrl_num, common_dimm->twtr_ps); + if ((wrrec_mclk < 1) || (wrrec_mclk > 16)) + printf("Error: WRREC doesn't support %d clocks\n", wrrec_mclk); + else + wrrec_mclk = wrrec_table[wrrec_mclk - 1]; +#endif + if (popts->otf_burst_chop_en) + wrrec_mclk += 2; + + /* + * JEDEC has min requirement for tRRD + */ +#if defined(CONFIG_SYS_FSL_DDR3) + if (acttoact_mclk < 4) + acttoact_mclk = 4; +#endif + /* + * JEDEC has some min requirements for tWTR + */ +#if defined(CONFIG_SYS_FSL_DDR2) + if (wrtord_mclk < 2) + wrtord_mclk = 2; +#elif defined(CONFIG_SYS_FSL_DDR3) + if (wrtord_mclk < 4) + wrtord_mclk = 4; +#endif + if (popts->otf_burst_chop_en) + wrtord_mclk += 2; + + ddr->timing_cfg_1 = (0 + | ((pretoact_mclk & 0x0F) << 28) + | ((acttopre_mclk & 0x0F) << 24) + | ((acttorw_mclk & 0xF) << 20) + | ((caslat_ctrl & 0xF) << 16) + | ((refrec_ctrl & 0xF) << 12) + | ((wrrec_mclk & 0x0F) << 8) + | ((acttoact_mclk & 0x0F) << 4) + | ((wrtord_mclk & 0x0F) << 0) + ); + debug("FSLDDR: timing_cfg_1 = 0x%08x\n", ddr->timing_cfg_1); +} + +/* DDR SDRAM Timing Configuration 2 (TIMING_CFG_2) */ +static void set_timing_cfg_2(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency, + unsigned int additive_latency) +{ + /* Additive latency */ + unsigned char add_lat_mclk; + /* CAS-to-preamble override */ + unsigned short cpo; + /* Write latency */ + unsigned char wr_lat; + /* Read to precharge (tRTP) */ + unsigned char rd_to_pre; + /* Write command to write data strobe timing adjustment */ + unsigned char wr_data_delay; + /* Minimum CKE pulse width (tCKE) */ + unsigned char cke_pls; + /* Window for four activates (tFAW) */ + unsigned short four_act; +#ifdef CONFIG_SYS_FSL_DDR3 + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); +#endif + + /* FIXME add check that this must be less than acttorw_mclk */ + add_lat_mclk = additive_latency; + cpo = popts->cpo_override; + +#if defined(CONFIG_SYS_FSL_DDR1) + /* + * This is a lie. It should really be 1, but if it is + * set to 1, bits overlap into the old controller's + * otherwise unused ACSM field. If we leave it 0, then + * the HW will magically treat it as 1 for DDR 1. Oh Yea. + */ + wr_lat = 0; +#elif defined(CONFIG_SYS_FSL_DDR2) + wr_lat = cas_latency - 1; +#else + wr_lat = compute_cas_write_latency(ctrl_num); +#endif + +#ifdef CONFIG_SYS_FSL_DDR4 + rd_to_pre = picos_to_mclk(ctrl_num, 7500); +#else + rd_to_pre = picos_to_mclk(ctrl_num, common_dimm->trtp_ps); +#endif + /* + * JEDEC has some min requirements for tRTP + */ +#if defined(CONFIG_SYS_FSL_DDR2) + if (rd_to_pre < 2) + rd_to_pre = 2; +#elif defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + if (rd_to_pre < 4) + rd_to_pre = 4; +#endif + if (popts->otf_burst_chop_en) + rd_to_pre += 2; /* according to UM */ + + wr_data_delay = popts->write_data_delay; +#ifdef CONFIG_SYS_FSL_DDR4 + cpo = 0; + cke_pls = max(3U, picos_to_mclk(ctrl_num, 5000)); +#elif defined(CONFIG_SYS_FSL_DDR3) + /* + * cke pulse = max(3nCK, 7.5ns) for DDR3-800 + * max(3nCK, 5.625ns) for DDR3-1066, 1333 + * max(3nCK, 5ns) for DDR3-1600, 1866, 2133 + */ + cke_pls = max(3U, picos_to_mclk(ctrl_num, mclk_ps > 1870 ? 7500 : + (mclk_ps > 1245 ? 5625 : 5000))); +#else + cke_pls = FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR; +#endif + four_act = picos_to_mclk(ctrl_num, + popts->tfaw_window_four_activates_ps); + + ddr->timing_cfg_2 = (0 + | ((add_lat_mclk & 0xf) << 28) + | ((cpo & 0x1f) << 23) + | ((wr_lat & 0xf) << 19) + | (((wr_lat & 0x10) >> 4) << 18) + | ((rd_to_pre & RD_TO_PRE_MASK) << RD_TO_PRE_SHIFT) + | ((wr_data_delay & WR_DATA_DELAY_MASK) << WR_DATA_DELAY_SHIFT) + | ((cke_pls & 0x7) << 6) + | ((four_act & 0x3f) << 0) + ); + debug("FSLDDR: timing_cfg_2 = 0x%08x\n", ddr->timing_cfg_2); +} + +/* DDR SDRAM Register Control Word */ +static void set_ddr_sdram_rcw(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm) +{ + unsigned int ddr_freq = get_ddr_freq(ctrl_num) / 1000000; + unsigned int rc0a, rc0f; + + if (common_dimm->all_dimms_registered && + !common_dimm->all_dimms_unbuffered) { + if (popts->rcw_override) { + ddr->ddr_sdram_rcw_1 = popts->rcw_1; + ddr->ddr_sdram_rcw_2 = popts->rcw_2; + ddr->ddr_sdram_rcw_3 = popts->rcw_3; + } else { + rc0a = ddr_freq > 3200 ? 0x7 : + (ddr_freq > 2933 ? 0x6 : + (ddr_freq > 2666 ? 0x5 : + (ddr_freq > 2400 ? 0x4 : + (ddr_freq > 2133 ? 0x3 : + (ddr_freq > 1866 ? 0x2 : + (ddr_freq > 1600 ? 1 : 0)))))); + rc0f = ddr_freq > 3200 ? 0x3 : + (ddr_freq > 2400 ? 0x2 : + (ddr_freq > 2133 ? 0x1 : 0)); + ddr->ddr_sdram_rcw_1 = + common_dimm->rcw[0] << 28 | \ + common_dimm->rcw[1] << 24 | \ + common_dimm->rcw[2] << 20 | \ + common_dimm->rcw[3] << 16 | \ + common_dimm->rcw[4] << 12 | \ + common_dimm->rcw[5] << 8 | \ + common_dimm->rcw[6] << 4 | \ + common_dimm->rcw[7]; + ddr->ddr_sdram_rcw_2 = + common_dimm->rcw[8] << 28 | \ + common_dimm->rcw[9] << 24 | \ + rc0a << 20 | \ + common_dimm->rcw[11] << 16 | \ + common_dimm->rcw[12] << 12 | \ + common_dimm->rcw[13] << 8 | \ + common_dimm->rcw[14] << 4 | \ + rc0f; + ddr->ddr_sdram_rcw_3 = + ((ddr_freq - 1260 + 19) / 20) << 8; + } + debug("FSLDDR: ddr_sdram_rcw_1 = 0x%08x\n", + ddr->ddr_sdram_rcw_1); + debug("FSLDDR: ddr_sdram_rcw_2 = 0x%08x\n", + ddr->ddr_sdram_rcw_2); + debug("FSLDDR: ddr_sdram_rcw_3 = 0x%08x\n", + ddr->ddr_sdram_rcw_3); + } +} + +/* DDR SDRAM control configuration (DDR_SDRAM_CFG) */ +static void set_ddr_sdram_cfg(fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm) +{ + unsigned int mem_en; /* DDR SDRAM interface logic enable */ + unsigned int sren; /* Self refresh enable (during sleep) */ + unsigned int ecc_en; /* ECC enable. */ + unsigned int rd_en; /* Registered DIMM enable */ + unsigned int sdram_type; /* Type of SDRAM */ + unsigned int dyn_pwr; /* Dynamic power management mode */ + unsigned int dbw; /* DRAM dta bus width */ + unsigned int eight_be = 0; /* 8-beat burst enable, DDR2 is zero */ + unsigned int ncap = 0; /* Non-concurrent auto-precharge */ + unsigned int threet_en; /* Enable 3T timing */ + unsigned int twot_en; /* Enable 2T timing */ + unsigned int ba_intlv_ctl; /* Bank (CS) interleaving control */ + unsigned int x32_en = 0; /* x32 enable */ + unsigned int pchb8 = 0; /* precharge bit 8 enable */ + unsigned int hse; /* Global half strength override */ + unsigned int acc_ecc_en = 0; /* Accumulated ECC enable */ + unsigned int mem_halt = 0; /* memory controller halt */ + unsigned int bi = 0; /* Bypass initialization */ + + mem_en = 1; + sren = popts->self_refresh_in_sleep; + if (common_dimm->all_dimms_ecc_capable) { + /* Allow setting of ECC only if all DIMMs are ECC. */ + ecc_en = popts->ecc_mode; + } else { + ecc_en = 0; + } + + if (common_dimm->all_dimms_registered && + !common_dimm->all_dimms_unbuffered) { + rd_en = 1; + twot_en = 0; + } else { + rd_en = 0; + twot_en = popts->twot_en; + } + + sdram_type = CONFIG_FSL_SDRAM_TYPE; + + dyn_pwr = popts->dynamic_power; + dbw = popts->data_bus_width; + /* 8-beat burst enable DDR-III case + * we must clear it when use the on-the-fly mode, + * must set it when use the 32-bits bus mode. + */ + if ((sdram_type == SDRAM_TYPE_DDR3) || + (sdram_type == SDRAM_TYPE_DDR4)) { + if (popts->burst_length == DDR_BL8) + eight_be = 1; + if (popts->burst_length == DDR_OTF) + eight_be = 0; + if (dbw == 0x1) + eight_be = 1; + } + + threet_en = popts->threet_en; + ba_intlv_ctl = popts->ba_intlv_ctl; + hse = popts->half_strength_driver_enable; + + /* set when ddr bus width < 64 */ + acc_ecc_en = (dbw != 0 && ecc_en == 1) ? 1 : 0; + + ddr->ddr_sdram_cfg = (0 + | ((mem_en & 0x1) << 31) + | ((sren & 0x1) << 30) + | ((ecc_en & 0x1) << 29) + | ((rd_en & 0x1) << 28) + | ((sdram_type & 0x7) << 24) + | ((dyn_pwr & 0x1) << 21) + | ((dbw & 0x3) << 19) + | ((eight_be & 0x1) << 18) + | ((ncap & 0x1) << 17) + | ((threet_en & 0x1) << 16) + | ((twot_en & 0x1) << 15) + | ((ba_intlv_ctl & 0x7F) << 8) + | ((x32_en & 0x1) << 5) + | ((pchb8 & 0x1) << 4) + | ((hse & 0x1) << 3) + | ((acc_ecc_en & 0x1) << 2) + | ((mem_halt & 0x1) << 1) + | ((bi & 0x1) << 0) + ); + debug("FSLDDR: ddr_sdram_cfg = 0x%08x\n", ddr->ddr_sdram_cfg); +} + +/* DDR SDRAM control configuration 2 (DDR_SDRAM_CFG_2) */ +static void set_ddr_sdram_cfg_2(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const unsigned int unq_mrs_en) +{ + unsigned int frc_sr = 0; /* Force self refresh */ + unsigned int sr_ie = 0; /* Self-refresh interrupt enable */ + unsigned int odt_cfg = 0; /* ODT configuration */ + unsigned int num_pr; /* Number of posted refreshes */ + unsigned int slow = 0; /* DDR will be run less than 1250 */ + unsigned int x4_en = 0; /* x4 DRAM enable */ + unsigned int obc_cfg; /* On-The-Fly Burst Chop Cfg */ + unsigned int ap_en; /* Address Parity Enable */ + unsigned int d_init; /* DRAM data initialization */ + unsigned int rcw_en = 0; /* Register Control Word Enable */ + unsigned int md_en = 0; /* Mirrored DIMM Enable */ + unsigned int qd_en = 0; /* quad-rank DIMM Enable */ + int i; +#ifndef CONFIG_SYS_FSL_DDR4 + unsigned int dll_rst_dis = 1; /* DLL reset disable */ + unsigned int dqs_cfg; /* DQS configuration */ + + dqs_cfg = popts->dqs_config; +#endif + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (popts->cs_local_opts[i].odt_rd_cfg + || popts->cs_local_opts[i].odt_wr_cfg) { + odt_cfg = SDRAM_CFG2_ODT_ONLY_READ; + break; + } + } + sr_ie = popts->self_refresh_interrupt_en; + num_pr = popts->package_3ds + 1; + + /* + * 8572 manual says + * {TIMING_CFG_1[PRETOACT] + * + [DDR_SDRAM_CFG_2[NUM_PR] + * * ({EXT_REFREC || REFREC} + 8 + 2)]} + * << DDR_SDRAM_INTERVAL[REFINT] + */ +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + obc_cfg = popts->otf_burst_chop_en; +#else + obc_cfg = 0; +#endif + +#if (CONFIG_SYS_FSL_DDR_VER >= FSL_DDR_VER_4_7) + slow = get_ddr_freq(ctrl_num) < 1249000000; +#endif + + if (popts->registered_dimm_en) + rcw_en = 1; + + /* DDR4 can have address parity for UDIMM and discrete */ + if ((CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4) && + (!popts->registered_dimm_en)) { + ap_en = 0; + } else { + ap_en = popts->ap_en; + } + + x4_en = popts->x4_en ? 1 : 0; + +#if defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER) + /* Use the DDR controller to auto initialize memory. */ + d_init = popts->ecc_init_using_memctl; + ddr->ddr_data_init = CONFIG_MEM_INIT_VALUE; + debug("DDR: ddr_data_init = 0x%08x\n", ddr->ddr_data_init); +#else + /* Memory will be initialized via DMA, or not at all. */ + d_init = 0; +#endif + +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + md_en = popts->mirrored_dimm; +#endif + qd_en = popts->quad_rank_present ? 1 : 0; + ddr->ddr_sdram_cfg_2 = (0 + | ((frc_sr & 0x1) << 31) + | ((sr_ie & 0x1) << 30) +#ifndef CONFIG_SYS_FSL_DDR4 + | ((dll_rst_dis & 0x1) << 29) + | ((dqs_cfg & 0x3) << 26) +#endif + | ((odt_cfg & 0x3) << 21) + | ((num_pr & 0xf) << 12) + | ((slow & 1) << 11) + | (x4_en << 10) + | (qd_en << 9) + | (unq_mrs_en << 8) + | ((obc_cfg & 0x1) << 6) + | ((ap_en & 0x1) << 5) + | ((d_init & 0x1) << 4) + | ((rcw_en & 0x1) << 2) + | ((md_en & 0x1) << 0) + ); + debug("FSLDDR: ddr_sdram_cfg_2 = 0x%08x\n", ddr->ddr_sdram_cfg_2); +} + +#ifdef CONFIG_SYS_FSL_DDR4 +/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */ +static void set_ddr_sdram_mode_2(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + const unsigned int unq_mrs_en) +{ + unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */ + unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */ + int i; + unsigned int wr_crc = 0; /* Disable */ + unsigned int rtt_wr = 0; /* Rtt_WR - dynamic ODT off */ + unsigned int srt = 0; /* self-refresh temerature, normal range */ + unsigned int cwl = compute_cas_write_latency(ctrl_num) - 9; + unsigned int mpr = 0; /* serial */ + unsigned int wc_lat; + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); + + if (popts->rtt_override) + rtt_wr = popts->rtt_wr_override_value; + else + rtt_wr = popts->cs_local_opts[0].odt_rtt_wr; + + if (common_dimm->extended_op_srt) + srt = common_dimm->extended_op_srt; + + esdmode2 = (0 + | ((wr_crc & 0x1) << 12) + | ((rtt_wr & 0x3) << 9) + | ((srt & 0x3) << 6) + | ((cwl & 0x7) << 3)); + + if (mclk_ps >= 1250) + wc_lat = 0; + else if (mclk_ps >= 833) + wc_lat = 1; + else + wc_lat = 2; + + esdmode3 = (0 + | ((mpr & 0x3) << 11) + | ((wc_lat & 0x3) << 9)); + + ddr->ddr_sdram_mode_2 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2); + + if (unq_mrs_en) { /* unique mode registers are supported */ + for (i = 1; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (popts->rtt_override) + rtt_wr = popts->rtt_wr_override_value; + else + rtt_wr = popts->cs_local_opts[i].odt_rtt_wr; + + esdmode2 &= 0xF9FF; /* clear bit 10, 9 */ + esdmode2 |= (rtt_wr & 0x3) << 9; + switch (i) { + case 1: + ddr->ddr_sdram_mode_4 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + break; + case 2: + ddr->ddr_sdram_mode_6 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + break; + case 3: + ddr->ddr_sdram_mode_8 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + break; + } + } + debug("FSLDDR: ddr_sdram_mode_4 = 0x%08x\n", + ddr->ddr_sdram_mode_4); + debug("FSLDDR: ddr_sdram_mode_6 = 0x%08x\n", + ddr->ddr_sdram_mode_6); + debug("FSLDDR: ddr_sdram_mode_8 = 0x%08x\n", + ddr->ddr_sdram_mode_8); + } +} +#elif defined(CONFIG_SYS_FSL_DDR3) +/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */ +static void set_ddr_sdram_mode_2(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + const unsigned int unq_mrs_en) +{ + unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */ + unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */ + int i; + unsigned int rtt_wr = 0; /* Rtt_WR - dynamic ODT off */ + unsigned int srt = 0; /* self-refresh temerature, normal range */ + unsigned int asr = 0; /* auto self-refresh disable */ + unsigned int cwl = compute_cas_write_latency(ctrl_num) - 5; + unsigned int pasr = 0; /* partial array self refresh disable */ + + if (popts->rtt_override) + rtt_wr = popts->rtt_wr_override_value; + else + rtt_wr = popts->cs_local_opts[0].odt_rtt_wr; + + if (common_dimm->extended_op_srt) + srt = common_dimm->extended_op_srt; + + esdmode2 = (0 + | ((rtt_wr & 0x3) << 9) + | ((srt & 0x1) << 7) + | ((asr & 0x1) << 6) + | ((cwl & 0x7) << 3) + | ((pasr & 0x7) << 0)); + ddr->ddr_sdram_mode_2 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2); + + if (unq_mrs_en) { /* unique mode registers are supported */ + for (i = 1; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (popts->rtt_override) + rtt_wr = popts->rtt_wr_override_value; + else + rtt_wr = popts->cs_local_opts[i].odt_rtt_wr; + + esdmode2 &= 0xF9FF; /* clear bit 10, 9 */ + esdmode2 |= (rtt_wr & 0x3) << 9; + switch (i) { + case 1: + ddr->ddr_sdram_mode_4 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + break; + case 2: + ddr->ddr_sdram_mode_6 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + break; + case 3: + ddr->ddr_sdram_mode_8 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + break; + } + } + debug("FSLDDR: ddr_sdram_mode_4 = 0x%08x\n", + ddr->ddr_sdram_mode_4); + debug("FSLDDR: ddr_sdram_mode_6 = 0x%08x\n", + ddr->ddr_sdram_mode_6); + debug("FSLDDR: ddr_sdram_mode_8 = 0x%08x\n", + ddr->ddr_sdram_mode_8); + } +} + +#else /* for DDR2 and DDR1 */ +/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */ +static void set_ddr_sdram_mode_2(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + const unsigned int unq_mrs_en) +{ + unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */ + unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */ + + ddr->ddr_sdram_mode_2 = (0 + | ((esdmode2 & 0xFFFF) << 16) + | ((esdmode3 & 0xFFFF) << 0) + ); + debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2); +} +#endif + +#ifdef CONFIG_SYS_FSL_DDR4 +/* DDR SDRAM Mode configuration 9 (DDR_SDRAM_MODE_9) */ +static void set_ddr_sdram_mode_9(fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + const unsigned int unq_mrs_en) +{ + int i; + unsigned short esdmode4 = 0; /* Extended SDRAM mode 4 */ + unsigned short esdmode5; /* Extended SDRAM mode 5 */ + int rtt_park = 0; + bool four_cs = false; + const unsigned int mclk_ps = get_memory_clk_period_ps(0); + +#if CONFIG_CHIP_SELECTS_PER_CTRL == 4 + if ((ddr->cs[0].config & SDRAM_CS_CONFIG_EN) && + (ddr->cs[1].config & SDRAM_CS_CONFIG_EN) && + (ddr->cs[2].config & SDRAM_CS_CONFIG_EN) && + (ddr->cs[3].config & SDRAM_CS_CONFIG_EN)) + four_cs = true; +#endif + if (ddr->cs[0].config & SDRAM_CS_CONFIG_EN) { + esdmode5 = 0x00000500; /* Data mask enable, RTT_PARK CS0 */ + rtt_park = four_cs ? 0 : 1; + } else { + esdmode5 = 0x00000400; /* Data mask enabled */ + } + + /* + * For DDR3, set C/A latency if address parity is enabled. + * For DDR4, set C/A latency for UDIMM only. For RDIMM the delay is + * handled by register chip and RCW settings. + */ + if ((ddr->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) && + ((CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4) || + !popts->registered_dimm_en)) { + if (mclk_ps >= 935) { + /* for DDR4-1600/1866/2133 */ + esdmode5 |= DDR_MR5_CA_PARITY_LAT_4_CLK; + } else if (mclk_ps >= 833) { + /* for DDR4-2400 */ + esdmode5 |= DDR_MR5_CA_PARITY_LAT_5_CLK; + } else { + printf("parity: mclk_ps = %d not supported\n", mclk_ps); + } + } + + ddr->ddr_sdram_mode_9 = (0 + | ((esdmode4 & 0xffff) << 16) + | ((esdmode5 & 0xffff) << 0) + ); + + /* Normally only the first enabled CS use 0x500, others use 0x400 + * But when four chip-selects are all enabled, all mode registers + * need 0x500 to park. + */ + + debug("FSLDDR: ddr_sdram_mode_9 = 0x%08x\n", ddr->ddr_sdram_mode_9); + if (unq_mrs_en) { /* unique mode registers are supported */ + for (i = 1; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (!rtt_park && + (ddr->cs[i].config & SDRAM_CS_CONFIG_EN)) { + esdmode5 |= 0x00000500; /* RTT_PARK */ + rtt_park = four_cs ? 0 : 1; + } else { + esdmode5 = 0x00000400; + } + + if ((ddr->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) && + ((CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4) || + !popts->registered_dimm_en)) { + if (mclk_ps >= 935) { + /* for DDR4-1600/1866/2133 */ + esdmode5 |= DDR_MR5_CA_PARITY_LAT_4_CLK; + } else if (mclk_ps >= 833) { + /* for DDR4-2400 */ + esdmode5 |= DDR_MR5_CA_PARITY_LAT_5_CLK; + } else { + printf("parity: mclk_ps = %d not supported\n", + mclk_ps); + } + } + + switch (i) { + case 1: + ddr->ddr_sdram_mode_11 = (0 + | ((esdmode4 & 0xFFFF) << 16) + | ((esdmode5 & 0xFFFF) << 0) + ); + break; + case 2: + ddr->ddr_sdram_mode_13 = (0 + | ((esdmode4 & 0xFFFF) << 16) + | ((esdmode5 & 0xFFFF) << 0) + ); + break; + case 3: + ddr->ddr_sdram_mode_15 = (0 + | ((esdmode4 & 0xFFFF) << 16) + | ((esdmode5 & 0xFFFF) << 0) + ); + break; + } + } + debug("FSLDDR: ddr_sdram_mode_11 = 0x%08x\n", + ddr->ddr_sdram_mode_11); + debug("FSLDDR: ddr_sdram_mode_13 = 0x%08x\n", + ddr->ddr_sdram_mode_13); + debug("FSLDDR: ddr_sdram_mode_15 = 0x%08x\n", + ddr->ddr_sdram_mode_15); + } +} + +/* DDR SDRAM Mode configuration 10 (DDR_SDRAM_MODE_10) */ +static void set_ddr_sdram_mode_10(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + const unsigned int unq_mrs_en) +{ + int i; + unsigned short esdmode6 = 0; /* Extended SDRAM mode 6 */ + unsigned short esdmode7 = 0; /* Extended SDRAM mode 7 */ + unsigned int tccdl_min = picos_to_mclk(ctrl_num, common_dimm->tccdl_ps); + + esdmode6 = ((tccdl_min - 4) & 0x7) << 10; + + if (popts->ddr_cdr2 & DDR_CDR2_VREF_RANGE_2) + esdmode6 |= 1 << 6; /* Range 2 */ + + ddr->ddr_sdram_mode_10 = (0 + | ((esdmode6 & 0xffff) << 16) + | ((esdmode7 & 0xffff) << 0) + ); + debug("FSLDDR: ddr_sdram_mode_10 = 0x%08x\n", ddr->ddr_sdram_mode_10); + if (unq_mrs_en) { /* unique mode registers are supported */ + for (i = 1; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + switch (i) { + case 1: + ddr->ddr_sdram_mode_12 = (0 + | ((esdmode6 & 0xFFFF) << 16) + | ((esdmode7 & 0xFFFF) << 0) + ); + break; + case 2: + ddr->ddr_sdram_mode_14 = (0 + | ((esdmode6 & 0xFFFF) << 16) + | ((esdmode7 & 0xFFFF) << 0) + ); + break; + case 3: + ddr->ddr_sdram_mode_16 = (0 + | ((esdmode6 & 0xFFFF) << 16) + | ((esdmode7 & 0xFFFF) << 0) + ); + break; + } + } + debug("FSLDDR: ddr_sdram_mode_12 = 0x%08x\n", + ddr->ddr_sdram_mode_12); + debug("FSLDDR: ddr_sdram_mode_14 = 0x%08x\n", + ddr->ddr_sdram_mode_14); + debug("FSLDDR: ddr_sdram_mode_16 = 0x%08x\n", + ddr->ddr_sdram_mode_16); + } +} + +#endif + +/* DDR SDRAM Interval Configuration (DDR_SDRAM_INTERVAL) */ +static void set_ddr_sdram_interval(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm) +{ + unsigned int refint; /* Refresh interval */ + unsigned int bstopre; /* Precharge interval */ + + refint = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps); + + bstopre = popts->bstopre; + + /* refint field used 0x3FFF in earlier controllers */ + ddr->ddr_sdram_interval = (0 + | ((refint & 0xFFFF) << 16) + | ((bstopre & 0x3FFF) << 0) + ); + debug("FSLDDR: ddr_sdram_interval = 0x%08x\n", ddr->ddr_sdram_interval); +} + +#ifdef CONFIG_SYS_FSL_DDR4 +/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */ +static void set_ddr_sdram_mode(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency, + unsigned int additive_latency, + const unsigned int unq_mrs_en) +{ + int i; + unsigned short esdmode; /* Extended SDRAM mode */ + unsigned short sdmode; /* SDRAM mode */ + + /* Mode Register - MR1 */ + unsigned int qoff = 0; /* Output buffer enable 0=yes, 1=no */ + unsigned int tdqs_en = 0; /* TDQS Enable: 0=no, 1=yes */ + unsigned int rtt; + unsigned int wrlvl_en = 0; /* Write level enable: 0=no, 1=yes */ + unsigned int al = 0; /* Posted CAS# additive latency (AL) */ + unsigned int dic = 0; /* Output driver impedance, 40ohm */ + unsigned int dll_en = 1; /* DLL Enable 1=Enable (Normal), + 0=Disable (Test/Debug) */ + + /* Mode Register - MR0 */ + unsigned int wr = 0; /* Write Recovery */ + unsigned int dll_rst; /* DLL Reset */ + unsigned int mode; /* Normal=0 or Test=1 */ + unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */ + /* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */ + unsigned int bt; + unsigned int bl; /* BL: Burst Length */ + + unsigned int wr_mclk; + /* DDR4 support WR 10, 12, 14, 16, 18, 20, 24 */ + static const u8 wr_table[] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6}; + /* DDR4 support CAS 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24 */ + static const u8 cas_latency_table[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, + 9, 9, 10, 10, 11, 11}; + + if (popts->rtt_override) + rtt = popts->rtt_override_value; + else + rtt = popts->cs_local_opts[0].odt_rtt_norm; + + if (additive_latency == (cas_latency - 1)) + al = 1; + if (additive_latency == (cas_latency - 2)) + al = 2; + + if (popts->quad_rank_present) + dic = 1; /* output driver impedance 240/7 ohm */ + + /* + * The esdmode value will also be used for writing + * MR1 during write leveling for DDR3, although the + * bits specifically related to the write leveling + * scheme will be handled automatically by the DDR + * controller. so we set the wrlvl_en = 0 here. + */ + esdmode = (0 + | ((qoff & 0x1) << 12) + | ((tdqs_en & 0x1) << 11) + | ((rtt & 0x7) << 8) + | ((wrlvl_en & 0x1) << 7) + | ((al & 0x3) << 3) + | ((dic & 0x3) << 1) /* DIC field is split */ + | ((dll_en & 0x1) << 0) + ); + + /* + * DLL control for precharge PD + * 0=slow exit DLL off (tXPDLL) + * 1=fast exit DLL on (tXP) + */ + + wr_mclk = picos_to_mclk(ctrl_num, common_dimm->twr_ps); + if (wr_mclk <= 24) { + wr = wr_table[wr_mclk - 10]; + } else { + printf("Error: unsupported write recovery for mode register wr_mclk = %d\n", + wr_mclk); + } + + dll_rst = 0; /* dll no reset */ + mode = 0; /* normal mode */ + + /* look up table to get the cas latency bits */ + if (cas_latency >= 9 && cas_latency <= 24) + caslat = cas_latency_table[cas_latency - 9]; + else + printf("Error: unsupported cas latency for mode register\n"); + + bt = 0; /* Nibble sequential */ + + switch (popts->burst_length) { + case DDR_BL8: + bl = 0; + break; + case DDR_OTF: + bl = 1; + break; + case DDR_BC4: + bl = 2; + break; + default: + printf("Error: invalid burst length of %u specified. ", + popts->burst_length); + puts("Defaulting to on-the-fly BC4 or BL8 beats.\n"); + bl = 1; + break; + } + + sdmode = (0 + | ((wr & 0x7) << 9) + | ((dll_rst & 0x1) << 8) + | ((mode & 0x1) << 7) + | (((caslat >> 1) & 0x7) << 4) + | ((bt & 0x1) << 3) + | ((caslat & 1) << 2) + | ((bl & 0x3) << 0) + ); + + ddr->ddr_sdram_mode = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + + debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode); + + if (unq_mrs_en) { /* unique mode registers are supported */ + for (i = 1; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (popts->rtt_override) + rtt = popts->rtt_override_value; + else + rtt = popts->cs_local_opts[i].odt_rtt_norm; + + esdmode &= 0xF8FF; /* clear bit 10,9,8 for rtt */ + esdmode |= (rtt & 0x7) << 8; + switch (i) { + case 1: + ddr->ddr_sdram_mode_3 = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + break; + case 2: + ddr->ddr_sdram_mode_5 = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + break; + case 3: + ddr->ddr_sdram_mode_7 = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + break; + } + } + debug("FSLDDR: ddr_sdram_mode_3 = 0x%08x\n", + ddr->ddr_sdram_mode_3); + debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n", + ddr->ddr_sdram_mode_5); + debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n", + ddr->ddr_sdram_mode_5); + } +} + +#elif defined(CONFIG_SYS_FSL_DDR3) +/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */ +static void set_ddr_sdram_mode(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency, + unsigned int additive_latency, + const unsigned int unq_mrs_en) +{ + int i; + unsigned short esdmode; /* Extended SDRAM mode */ + unsigned short sdmode; /* SDRAM mode */ + + /* Mode Register - MR1 */ + unsigned int qoff = 0; /* Output buffer enable 0=yes, 1=no */ + unsigned int tdqs_en = 0; /* TDQS Enable: 0=no, 1=yes */ + unsigned int rtt; + unsigned int wrlvl_en = 0; /* Write level enable: 0=no, 1=yes */ + unsigned int al = 0; /* Posted CAS# additive latency (AL) */ + unsigned int dic = 0; /* Output driver impedance, 40ohm */ + unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal), + 1=Disable (Test/Debug) */ + + /* Mode Register - MR0 */ + unsigned int dll_on; /* DLL control for precharge PD, 0=off, 1=on */ + unsigned int wr = 0; /* Write Recovery */ + unsigned int dll_rst; /* DLL Reset */ + unsigned int mode; /* Normal=0 or Test=1 */ + unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */ + /* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */ + unsigned int bt; + unsigned int bl; /* BL: Burst Length */ + + unsigned int wr_mclk; + /* + * DDR_SDRAM_MODE doesn't support 9,11,13,15 + * Please refer JEDEC Standard No. 79-3E for Mode Register MR0 + * for this table + */ + static const u8 wr_table[] = {1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0}; + + if (popts->rtt_override) + rtt = popts->rtt_override_value; + else + rtt = popts->cs_local_opts[0].odt_rtt_norm; + + if (additive_latency == (cas_latency - 1)) + al = 1; + if (additive_latency == (cas_latency - 2)) + al = 2; + + if (popts->quad_rank_present) + dic = 1; /* output driver impedance 240/7 ohm */ + + /* + * The esdmode value will also be used for writing + * MR1 during write leveling for DDR3, although the + * bits specifically related to the write leveling + * scheme will be handled automatically by the DDR + * controller. so we set the wrlvl_en = 0 here. + */ + esdmode = (0 + | ((qoff & 0x1) << 12) + | ((tdqs_en & 0x1) << 11) + | ((rtt & 0x4) << 7) /* rtt field is split */ + | ((wrlvl_en & 0x1) << 7) + | ((rtt & 0x2) << 5) /* rtt field is split */ + | ((dic & 0x2) << 4) /* DIC field is split */ + | ((al & 0x3) << 3) + | ((rtt & 0x1) << 2) /* rtt field is split */ + | ((dic & 0x1) << 1) /* DIC field is split */ + | ((dll_en & 0x1) << 0) + ); + + /* + * DLL control for precharge PD + * 0=slow exit DLL off (tXPDLL) + * 1=fast exit DLL on (tXP) + */ + dll_on = 1; + + wr_mclk = picos_to_mclk(ctrl_num, common_dimm->twr_ps); + if (wr_mclk <= 16) { + wr = wr_table[wr_mclk - 5]; + } else { + printf("Error: unsupported write recovery for mode register " + "wr_mclk = %d\n", wr_mclk); + } + + dll_rst = 0; /* dll no reset */ + mode = 0; /* normal mode */ + + /* look up table to get the cas latency bits */ + if (cas_latency >= 5 && cas_latency <= 16) { + unsigned char cas_latency_table[] = { + 0x2, /* 5 clocks */ + 0x4, /* 6 clocks */ + 0x6, /* 7 clocks */ + 0x8, /* 8 clocks */ + 0xa, /* 9 clocks */ + 0xc, /* 10 clocks */ + 0xe, /* 11 clocks */ + 0x1, /* 12 clocks */ + 0x3, /* 13 clocks */ + 0x5, /* 14 clocks */ + 0x7, /* 15 clocks */ + 0x9, /* 16 clocks */ + }; + caslat = cas_latency_table[cas_latency - 5]; + } else { + printf("Error: unsupported cas latency for mode register\n"); + } + + bt = 0; /* Nibble sequential */ + + switch (popts->burst_length) { + case DDR_BL8: + bl = 0; + break; + case DDR_OTF: + bl = 1; + break; + case DDR_BC4: + bl = 2; + break; + default: + printf("Error: invalid burst length of %u specified. " + " Defaulting to on-the-fly BC4 or BL8 beats.\n", + popts->burst_length); + bl = 1; + break; + } + + sdmode = (0 + | ((dll_on & 0x1) << 12) + | ((wr & 0x7) << 9) + | ((dll_rst & 0x1) << 8) + | ((mode & 0x1) << 7) + | (((caslat >> 1) & 0x7) << 4) + | ((bt & 0x1) << 3) + | ((caslat & 1) << 2) + | ((bl & 0x3) << 0) + ); + + ddr->ddr_sdram_mode = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + + debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode); + + if (unq_mrs_en) { /* unique mode registers are supported */ + for (i = 1; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (popts->rtt_override) + rtt = popts->rtt_override_value; + else + rtt = popts->cs_local_opts[i].odt_rtt_norm; + + esdmode &= 0xFDBB; /* clear bit 9,6,2 */ + esdmode |= (0 + | ((rtt & 0x4) << 7) /* rtt field is split */ + | ((rtt & 0x2) << 5) /* rtt field is split */ + | ((rtt & 0x1) << 2) /* rtt field is split */ + ); + switch (i) { + case 1: + ddr->ddr_sdram_mode_3 = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + break; + case 2: + ddr->ddr_sdram_mode_5 = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + break; + case 3: + ddr->ddr_sdram_mode_7 = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + break; + } + } + debug("FSLDDR: ddr_sdram_mode_3 = 0x%08x\n", + ddr->ddr_sdram_mode_3); + debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n", + ddr->ddr_sdram_mode_5); + debug("FSLDDR: ddr_sdram_mode_5 = 0x%08x\n", + ddr->ddr_sdram_mode_5); + } +} + +#else /* !CONFIG_SYS_FSL_DDR3 */ + +/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */ +static void set_ddr_sdram_mode(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency, + unsigned int additive_latency, + const unsigned int unq_mrs_en) +{ + unsigned short esdmode; /* Extended SDRAM mode */ + unsigned short sdmode; /* SDRAM mode */ + + /* + * FIXME: This ought to be pre-calculated in a + * technology-specific routine, + * e.g. compute_DDR2_mode_register(), and then the + * sdmode and esdmode passed in as part of common_dimm. + */ + + /* Extended Mode Register */ + unsigned int mrs = 0; /* Mode Register Set */ + unsigned int outputs = 0; /* 0=Enabled, 1=Disabled */ + unsigned int rdqs_en = 0; /* RDQS Enable: 0=no, 1=yes */ + unsigned int dqs_en = 0; /* DQS# Enable: 0=enable, 1=disable */ + unsigned int ocd = 0; /* 0x0=OCD not supported, + 0x7=OCD default state */ + unsigned int rtt; + unsigned int al; /* Posted CAS# additive latency (AL) */ + unsigned int ods = 0; /* Output Drive Strength: + 0 = Full strength (18ohm) + 1 = Reduced strength (4ohm) */ + unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal), + 1=Disable (Test/Debug) */ + + /* Mode Register (MR) */ + unsigned int mr; /* Mode Register Definition */ + unsigned int pd; /* Power-Down Mode */ + unsigned int wr; /* Write Recovery */ + unsigned int dll_res; /* DLL Reset */ + unsigned int mode; /* Normal=0 or Test=1 */ + unsigned int caslat = 0;/* CAS# latency */ + /* BT: Burst Type (0=Sequential, 1=Interleaved) */ + unsigned int bt; + unsigned int bl; /* BL: Burst Length */ + + dqs_en = !popts->dqs_config; + rtt = fsl_ddr_get_rtt(); + + al = additive_latency; + + esdmode = (0 + | ((mrs & 0x3) << 14) + | ((outputs & 0x1) << 12) + | ((rdqs_en & 0x1) << 11) + | ((dqs_en & 0x1) << 10) + | ((ocd & 0x7) << 7) + | ((rtt & 0x2) << 5) /* rtt field is split */ + | ((al & 0x7) << 3) + | ((rtt & 0x1) << 2) /* rtt field is split */ + | ((ods & 0x1) << 1) + | ((dll_en & 0x1) << 0) + ); + + mr = 0; /* FIXME: CHECKME */ + + /* + * 0 = Fast Exit (Normal) + * 1 = Slow Exit (Low Power) + */ + pd = 0; + +#if defined(CONFIG_SYS_FSL_DDR1) + wr = 0; /* Historical */ +#elif defined(CONFIG_SYS_FSL_DDR2) + wr = picos_to_mclk(ctrl_num, common_dimm->twr_ps); +#endif + dll_res = 0; + mode = 0; + +#if defined(CONFIG_SYS_FSL_DDR1) + if (1 <= cas_latency && cas_latency <= 4) { + unsigned char mode_caslat_table[4] = { + 0x5, /* 1.5 clocks */ + 0x2, /* 2.0 clocks */ + 0x6, /* 2.5 clocks */ + 0x3 /* 3.0 clocks */ + }; + caslat = mode_caslat_table[cas_latency - 1]; + } else { + printf("Warning: unknown cas_latency %d\n", cas_latency); + } +#elif defined(CONFIG_SYS_FSL_DDR2) + caslat = cas_latency; +#endif + bt = 0; + + switch (popts->burst_length) { + case DDR_BL4: + bl = 2; + break; + case DDR_BL8: + bl = 3; + break; + default: + printf("Error: invalid burst length of %u specified. " + " Defaulting to 4 beats.\n", + popts->burst_length); + bl = 2; + break; + } + + sdmode = (0 + | ((mr & 0x3) << 14) + | ((pd & 0x1) << 12) + | ((wr & 0x7) << 9) + | ((dll_res & 0x1) << 8) + | ((mode & 0x1) << 7) + | ((caslat & 0x7) << 4) + | ((bt & 0x1) << 3) + | ((bl & 0x7) << 0) + ); + + ddr->ddr_sdram_mode = (0 + | ((esdmode & 0xFFFF) << 16) + | ((sdmode & 0xFFFF) << 0) + ); + debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode); +} +#endif + +/* DDR SDRAM Data Initialization (DDR_DATA_INIT) */ +static void set_ddr_data_init(fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int init_value; /* Initialization value */ + +#ifdef CONFIG_MEM_INIT_VALUE + init_value = CONFIG_MEM_INIT_VALUE; +#else + init_value = 0xDEADBEEF; +#endif + ddr->ddr_data_init = init_value; +} + +/* + * DDR SDRAM Clock Control (DDR_SDRAM_CLK_CNTL) + * The old controller on the 8540/60 doesn't have this register. + * Hope it's OK to set it (to 0) anyway. + */ +static void set_ddr_sdram_clk_cntl(fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts) +{ + unsigned int clk_adjust; /* Clock adjust */ + unsigned int ss_en = 0; /* Source synchronous enable */ + +#if defined(CONFIG_ARCH_MPC8541) || defined(CONFIG_ARCH_MPC8555) + /* Per FSL Application Note: AN2805 */ + ss_en = 1; +#endif + if (fsl_ddr_get_version(0) >= 0x40701) { + /* clk_adjust in 5-bits on T-series and LS-series */ + clk_adjust = (popts->clk_adjust & 0x1F) << 22; + } else { + /* clk_adjust in 4-bits on earlier MPC85xx and P-series */ + clk_adjust = (popts->clk_adjust & 0xF) << 23; + } + + ddr->ddr_sdram_clk_cntl = (0 + | ((ss_en & 0x1) << 31) + | clk_adjust + ); + debug("FSLDDR: clk_cntl = 0x%08x\n", ddr->ddr_sdram_clk_cntl); +} + +/* DDR Initialization Address (DDR_INIT_ADDR) */ +static void set_ddr_init_addr(fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int init_addr = 0; /* Initialization address */ + + ddr->ddr_init_addr = init_addr; +} + +/* DDR Initialization Address (DDR_INIT_EXT_ADDR) */ +static void set_ddr_init_ext_addr(fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int uia = 0; /* Use initialization address */ + unsigned int init_ext_addr = 0; /* Initialization address */ + + ddr->ddr_init_ext_addr = (0 + | ((uia & 0x1) << 31) + | (init_ext_addr & 0xF) + ); +} + +/* DDR SDRAM Timing Configuration 4 (TIMING_CFG_4) */ +static void set_timing_cfg_4(fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts) +{ + unsigned int rwt = 0; /* Read-to-write turnaround for same CS */ + unsigned int wrt = 0; /* Write-to-read turnaround for same CS */ + unsigned int rrt = 0; /* Read-to-read turnaround for same CS */ + unsigned int wwt = 0; /* Write-to-write turnaround for same CS */ + unsigned int trwt_mclk = 0; /* ext_rwt */ + unsigned int dll_lock = 0; /* DDR SDRAM DLL Lock Time */ + +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + if (popts->burst_length == DDR_BL8) { + /* We set BL/2 for fixed BL8 */ + rrt = 0; /* BL/2 clocks */ + wwt = 0; /* BL/2 clocks */ + } else { + /* We need to set BL/2 + 2 to BC4 and OTF */ + rrt = 2; /* BL/2 + 2 clocks */ + wwt = 2; /* BL/2 + 2 clocks */ + } +#endif +#ifdef CONFIG_SYS_FSL_DDR4 + dll_lock = 2; /* tDLLK = 1024 clocks */ +#elif defined(CONFIG_SYS_FSL_DDR3) + dll_lock = 1; /* tDLLK = 512 clocks from spec */ +#endif + + if (popts->trwt_override) + trwt_mclk = popts->trwt; + + ddr->timing_cfg_4 = (0 + | ((rwt & 0xf) << 28) + | ((wrt & 0xf) << 24) + | ((rrt & 0xf) << 20) + | ((wwt & 0xf) << 16) + | ((trwt_mclk & 0xc) << 12) + | (dll_lock & 0x3) + ); + debug("FSLDDR: timing_cfg_4 = 0x%08x\n", ddr->timing_cfg_4); +} + +/* DDR SDRAM Timing Configuration 5 (TIMING_CFG_5) */ +static void set_timing_cfg_5(fsl_ddr_cfg_regs_t *ddr, unsigned int cas_latency) +{ + unsigned int rodt_on = 0; /* Read to ODT on */ + unsigned int rodt_off = 0; /* Read to ODT off */ + unsigned int wodt_on = 0; /* Write to ODT on */ + unsigned int wodt_off = 0; /* Write to ODT off */ + +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + unsigned int wr_lat = ((ddr->timing_cfg_2 & 0x00780000) >> 19) + + ((ddr->timing_cfg_2 & 0x00040000) >> 14); + /* rodt_on = timing_cfg_1[caslat] - timing_cfg_2[wrlat] + 1 */ + if (cas_latency >= wr_lat) + rodt_on = cas_latency - wr_lat + 1; + rodt_off = 4; /* 4 clocks */ + wodt_on = 1; /* 1 clocks */ + wodt_off = 4; /* 4 clocks */ +#endif + + ddr->timing_cfg_5 = (0 + | ((rodt_on & 0x1f) << 24) + | ((rodt_off & 0x7) << 20) + | ((wodt_on & 0x1f) << 12) + | ((wodt_off & 0x7) << 8) + ); + debug("FSLDDR: timing_cfg_5 = 0x%08x\n", ddr->timing_cfg_5); +} + +#ifdef CONFIG_SYS_FSL_DDR4 +static void set_timing_cfg_6(fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int hs_caslat = 0; + unsigned int hs_wrlat = 0; + unsigned int hs_wrrec = 0; + unsigned int hs_clkadj = 0; + unsigned int hs_wrlvl_start = 0; + + ddr->timing_cfg_6 = (0 + | ((hs_caslat & 0x1f) << 24) + | ((hs_wrlat & 0x1f) << 19) + | ((hs_wrrec & 0x1f) << 12) + | ((hs_clkadj & 0x1f) << 6) + | ((hs_wrlvl_start & 0x1f) << 0) + ); + debug("FSLDDR: timing_cfg_6 = 0x%08x\n", ddr->timing_cfg_6); +} + +static void set_timing_cfg_7(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm) +{ + unsigned int txpr, tcksre, tcksrx; + unsigned int cke_rst, cksre, cksrx, par_lat = 0, cs_to_cmd; + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); + + txpr = max(5U, picos_to_mclk(ctrl_num, common_dimm->trfc1_ps + 10000)); + tcksre = max(5U, picos_to_mclk(ctrl_num, 10000)); + tcksrx = max(5U, picos_to_mclk(ctrl_num, 10000)); + + if (ddr->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN && + CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4) { + /* for DDR4 only */ + par_lat = (ddr->ddr_sdram_rcw_2 & 0xf) + 1; + debug("PAR_LAT = %u for mclk_ps = %d\n", par_lat, mclk_ps); + } + + cs_to_cmd = 0; + + if (txpr <= 200) + cke_rst = 0; + else if (txpr <= 256) + cke_rst = 1; + else if (txpr <= 512) + cke_rst = 2; + else + cke_rst = 3; + + if (tcksre <= 19) + cksre = tcksre - 5; + else + cksre = 15; + + if (tcksrx <= 19) + cksrx = tcksrx - 5; + else + cksrx = 15; + + ddr->timing_cfg_7 = (0 + | ((cke_rst & 0x3) << 28) + | ((cksre & 0xf) << 24) + | ((cksrx & 0xf) << 20) + | ((par_lat & 0xf) << 16) + | ((cs_to_cmd & 0xf) << 4) + ); + debug("FSLDDR: timing_cfg_7 = 0x%08x\n", ddr->timing_cfg_7); +} + +static void set_timing_cfg_8(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm, + unsigned int cas_latency) +{ + int rwt_bg, wrt_bg, rrt_bg, wwt_bg; + unsigned int acttoact_bg, wrtord_bg, pre_all_rec; + int tccdl = picos_to_mclk(ctrl_num, common_dimm->tccdl_ps); + int wr_lat = ((ddr->timing_cfg_2 & 0x00780000) >> 19) + + ((ddr->timing_cfg_2 & 0x00040000) >> 14); + + rwt_bg = cas_latency + 2 + 4 - wr_lat; + if (rwt_bg < tccdl) + rwt_bg = tccdl - rwt_bg; + else + rwt_bg = 0; + + wrt_bg = wr_lat + 4 + 1 - cas_latency; + if (wrt_bg < tccdl) + wrt_bg = tccdl - wrt_bg; + else + wrt_bg = 0; + + if (popts->burst_length == DDR_BL8) { + rrt_bg = tccdl - 4; + wwt_bg = tccdl - 4; + } else { + rrt_bg = tccdl - 2; + wwt_bg = tccdl - 2; + } + + acttoact_bg = picos_to_mclk(ctrl_num, common_dimm->trrdl_ps); + wrtord_bg = max(4U, picos_to_mclk(ctrl_num, 7500)); + if (popts->otf_burst_chop_en) + wrtord_bg += 2; + + pre_all_rec = 0; + + ddr->timing_cfg_8 = (0 + | ((rwt_bg & 0xf) << 28) + | ((wrt_bg & 0xf) << 24) + | ((rrt_bg & 0xf) << 20) + | ((wwt_bg & 0xf) << 16) + | ((acttoact_bg & 0xf) << 12) + | ((wrtord_bg & 0xf) << 8) + | ((pre_all_rec & 0x1f) << 0) + ); + + debug("FSLDDR: timing_cfg_8 = 0x%08x\n", ddr->timing_cfg_8); +} + +static void set_timing_cfg_9(const unsigned int ctrl_num, + fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts, + const common_timing_params_t *common_dimm) +{ + unsigned int refrec_cid_mclk = 0; + unsigned int acttoact_cid_mclk = 0; + + if (popts->package_3ds) { + refrec_cid_mclk = + picos_to_mclk(ctrl_num, common_dimm->trfc_slr_ps); + acttoact_cid_mclk = 4U; /* tRRDS_slr */ + } + + ddr->timing_cfg_9 = (refrec_cid_mclk & 0x3ff) << 16 | + (acttoact_cid_mclk & 0xf) << 8; + + debug("FSLDDR: timing_cfg_9 = 0x%08x\n", ddr->timing_cfg_9); +} + +/* This function needs to be called after set_ddr_sdram_cfg() is called */ +static void set_ddr_dq_mapping(fsl_ddr_cfg_regs_t *ddr, + const dimm_params_t *dimm_params) +{ + unsigned int acc_ecc_en = (ddr->ddr_sdram_cfg >> 2) & 0x1; + int i; + + for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) { + if (dimm_params[i].n_ranks) + break; + } + if (i >= CONFIG_DIMM_SLOTS_PER_CTLR) { + puts("DDR error: no DIMM found!\n"); + return; + } + + ddr->dq_map_0 = ((dimm_params[i].dq_mapping[0] & 0x3F) << 26) | + ((dimm_params[i].dq_mapping[1] & 0x3F) << 20) | + ((dimm_params[i].dq_mapping[2] & 0x3F) << 14) | + ((dimm_params[i].dq_mapping[3] & 0x3F) << 8) | + ((dimm_params[i].dq_mapping[4] & 0x3F) << 2); + + ddr->dq_map_1 = ((dimm_params[i].dq_mapping[5] & 0x3F) << 26) | + ((dimm_params[i].dq_mapping[6] & 0x3F) << 20) | + ((dimm_params[i].dq_mapping[7] & 0x3F) << 14) | + ((dimm_params[i].dq_mapping[10] & 0x3F) << 8) | + ((dimm_params[i].dq_mapping[11] & 0x3F) << 2); + + ddr->dq_map_2 = ((dimm_params[i].dq_mapping[12] & 0x3F) << 26) | + ((dimm_params[i].dq_mapping[13] & 0x3F) << 20) | + ((dimm_params[i].dq_mapping[14] & 0x3F) << 14) | + ((dimm_params[i].dq_mapping[15] & 0x3F) << 8) | + ((dimm_params[i].dq_mapping[16] & 0x3F) << 2); + + /* dq_map for ECC[4:7] is set to 0 if accumulated ECC is enabled */ + ddr->dq_map_3 = ((dimm_params[i].dq_mapping[17] & 0x3F) << 26) | + ((dimm_params[i].dq_mapping[8] & 0x3F) << 20) | + (acc_ecc_en ? 0 : + (dimm_params[i].dq_mapping[9] & 0x3F) << 14) | + dimm_params[i].dq_mapping_ors; + + debug("FSLDDR: dq_map_0 = 0x%08x\n", ddr->dq_map_0); + debug("FSLDDR: dq_map_1 = 0x%08x\n", ddr->dq_map_1); + debug("FSLDDR: dq_map_2 = 0x%08x\n", ddr->dq_map_2); + debug("FSLDDR: dq_map_3 = 0x%08x\n", ddr->dq_map_3); +} +static void set_ddr_sdram_cfg_3(fsl_ddr_cfg_regs_t *ddr, + const memctl_options_t *popts) +{ + int rd_pre; + + rd_pre = popts->quad_rank_present ? 1 : 0; + + ddr->ddr_sdram_cfg_3 = (rd_pre & 0x1) << 16; + /* Disable MRS on parity error for RDIMMs */ + ddr->ddr_sdram_cfg_3 |= popts->registered_dimm_en ? 1 : 0; + + if (popts->package_3ds) { /* only 2,4,8 are supported */ + if ((popts->package_3ds + 1) & 0x1) { + printf("Error: Unsupported 3DS DIMM with %d die\n", + popts->package_3ds + 1); + } else { + ddr->ddr_sdram_cfg_3 |= ((popts->package_3ds + 1) >> 1) + << 4; + } + } + + debug("FSLDDR: ddr_sdram_cfg_3 = 0x%08x\n", ddr->ddr_sdram_cfg_3); +} +#endif /* CONFIG_SYS_FSL_DDR4 */ + +/* DDR ZQ Calibration Control (DDR_ZQ_CNTL) */ +static void set_ddr_zq_cntl(fsl_ddr_cfg_regs_t *ddr, unsigned int zq_en) +{ + unsigned int zqinit = 0;/* POR ZQ Calibration Time (tZQinit) */ + /* Normal Operation Full Calibration Time (tZQoper) */ + unsigned int zqoper = 0; + /* Normal Operation Short Calibration Time (tZQCS) */ + unsigned int zqcs = 0; +#ifdef CONFIG_SYS_FSL_DDR4 + unsigned int zqcs_init; +#endif + + if (zq_en) { +#ifdef CONFIG_SYS_FSL_DDR4 + zqinit = 10; /* 1024 clocks */ + zqoper = 9; /* 512 clocks */ + zqcs = 7; /* 128 clocks */ + zqcs_init = 5; /* 1024 refresh sequences */ +#else + zqinit = 9; /* 512 clocks */ + zqoper = 8; /* 256 clocks */ + zqcs = 6; /* 64 clocks */ +#endif + } + + ddr->ddr_zq_cntl = (0 + | ((zq_en & 0x1) << 31) + | ((zqinit & 0xF) << 24) + | ((zqoper & 0xF) << 16) + | ((zqcs & 0xF) << 8) +#ifdef CONFIG_SYS_FSL_DDR4 + | ((zqcs_init & 0xF) << 0) +#endif + ); + debug("FSLDDR: zq_cntl = 0x%08x\n", ddr->ddr_zq_cntl); +} + +/* DDR Write Leveling Control (DDR_WRLVL_CNTL) */ +static void set_ddr_wrlvl_cntl(fsl_ddr_cfg_regs_t *ddr, unsigned int wrlvl_en, + const memctl_options_t *popts) +{ + /* + * First DQS pulse rising edge after margining mode + * is programmed (tWL_MRD) + */ + unsigned int wrlvl_mrd = 0; + /* ODT delay after margining mode is programmed (tWL_ODTEN) */ + unsigned int wrlvl_odten = 0; + /* DQS/DQS_ delay after margining mode is programmed (tWL_DQSEN) */ + unsigned int wrlvl_dqsen = 0; + /* WRLVL_SMPL: Write leveling sample time */ + unsigned int wrlvl_smpl = 0; + /* WRLVL_WLR: Write leveling repeition time */ + unsigned int wrlvl_wlr = 0; + /* WRLVL_START: Write leveling start time */ + unsigned int wrlvl_start = 0; + + /* suggest enable write leveling for DDR3 due to fly-by topology */ + if (wrlvl_en) { + /* tWL_MRD min = 40 nCK, we set it 64 */ + wrlvl_mrd = 0x6; + /* tWL_ODTEN 128 */ + wrlvl_odten = 0x7; + /* tWL_DQSEN min = 25 nCK, we set it 32 */ + wrlvl_dqsen = 0x5; + /* + * Write leveling sample time at least need 6 clocks + * higher than tWLO to allow enough time for progagation + * delay and sampling the prime data bits. + */ + wrlvl_smpl = 0xf; + /* + * Write leveling repetition time + * at least tWLO + 6 clocks clocks + * we set it 64 + */ + wrlvl_wlr = 0x6; + /* + * Write leveling start time + * The value use for the DQS_ADJUST for the first sample + * when write leveling is enabled. It probably needs to be + * overridden per platform. + */ + wrlvl_start = 0x8; + /* + * Override the write leveling sample and start time + * according to specific board + */ + if (popts->wrlvl_override) { + wrlvl_smpl = popts->wrlvl_sample; + wrlvl_start = popts->wrlvl_start; + } + } + + ddr->ddr_wrlvl_cntl = (0 + | ((wrlvl_en & 0x1) << 31) + | ((wrlvl_mrd & 0x7) << 24) + | ((wrlvl_odten & 0x7) << 20) + | ((wrlvl_dqsen & 0x7) << 16) + | ((wrlvl_smpl & 0xf) << 12) + | ((wrlvl_wlr & 0x7) << 8) + | ((wrlvl_start & 0x1F) << 0) + ); + debug("FSLDDR: wrlvl_cntl = 0x%08x\n", ddr->ddr_wrlvl_cntl); + ddr->ddr_wrlvl_cntl_2 = popts->wrlvl_ctl_2; + debug("FSLDDR: wrlvl_cntl_2 = 0x%08x\n", ddr->ddr_wrlvl_cntl_2); + ddr->ddr_wrlvl_cntl_3 = popts->wrlvl_ctl_3; + debug("FSLDDR: wrlvl_cntl_3 = 0x%08x\n", ddr->ddr_wrlvl_cntl_3); + +} + +/* DDR Self Refresh Counter (DDR_SR_CNTR) */ +static void set_ddr_sr_cntr(fsl_ddr_cfg_regs_t *ddr, unsigned int sr_it) +{ + /* Self Refresh Idle Threshold */ + ddr->ddr_sr_cntr = (sr_it & 0xF) << 16; +} + +static void set_ddr_eor(fsl_ddr_cfg_regs_t *ddr, const memctl_options_t *popts) +{ + if (popts->addr_hash) { + ddr->ddr_eor = 0x40000000; /* address hash enable */ + puts("Address hashing enabled.\n"); + } +} + +static void set_ddr_cdr1(fsl_ddr_cfg_regs_t *ddr, const memctl_options_t *popts) +{ + ddr->ddr_cdr1 = popts->ddr_cdr1; + debug("FSLDDR: ddr_cdr1 = 0x%08x\n", ddr->ddr_cdr1); +} + +static void set_ddr_cdr2(fsl_ddr_cfg_regs_t *ddr, const memctl_options_t *popts) +{ + ddr->ddr_cdr2 = popts->ddr_cdr2; + debug("FSLDDR: ddr_cdr2 = 0x%08x\n", ddr->ddr_cdr2); +} + +unsigned int +check_fsl_memctl_config_regs(const fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int res = 0; + + /* + * Check that DDR_SDRAM_CFG[RD_EN] and DDR_SDRAM_CFG[2T_EN] are + * not set at the same time. + */ + if (ddr->ddr_sdram_cfg & 0x10000000 + && ddr->ddr_sdram_cfg & 0x00008000) { + printf("Error: DDR_SDRAM_CFG[RD_EN] and DDR_SDRAM_CFG[2T_EN] " + " should not be set at the same time.\n"); + res++; + } + + return res; +} + +unsigned int +compute_fsl_memctl_config_regs(const unsigned int ctrl_num, + const memctl_options_t *popts, + fsl_ddr_cfg_regs_t *ddr, + const common_timing_params_t *common_dimm, + const dimm_params_t *dimm_params, + unsigned int dbw_cap_adj, + unsigned int size_only) +{ + unsigned int i; + unsigned int cas_latency; + unsigned int additive_latency; + unsigned int sr_it; + unsigned int zq_en; + unsigned int wrlvl_en; + unsigned int ip_rev = 0; + unsigned int unq_mrs_en = 0; + int cs_en = 1; + + memset(ddr, 0, sizeof(fsl_ddr_cfg_regs_t)); + + if (common_dimm == NULL) { + printf("Error: subset DIMM params struct null pointer\n"); + return 1; + } + + /* + * Process overrides first. + * + * FIXME: somehow add dereated caslat to this + */ + cas_latency = (popts->cas_latency_override) + ? popts->cas_latency_override_value + : common_dimm->lowest_common_spd_caslat; + + additive_latency = (popts->additive_latency_override) + ? popts->additive_latency_override_value + : common_dimm->additive_latency; + + sr_it = (popts->auto_self_refresh_en) + ? popts->sr_it + : 0; + /* ZQ calibration */ + zq_en = (popts->zq_en) ? 1 : 0; + /* write leveling */ + wrlvl_en = (popts->wrlvl_en) ? 1 : 0; + + /* Chip Select Memory Bounds (CSn_BNDS) */ + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + unsigned long long ea, sa; + unsigned int cs_per_dimm + = CONFIG_CHIP_SELECTS_PER_CTRL / CONFIG_DIMM_SLOTS_PER_CTLR; + unsigned int dimm_number + = i / cs_per_dimm; + unsigned long long rank_density + = dimm_params[dimm_number].rank_density >> dbw_cap_adj; + + if (dimm_params[dimm_number].n_ranks == 0) { + debug("Skipping setup of CS%u " + "because n_ranks on DIMM %u is 0\n", i, dimm_number); + continue; + } + if (popts->memctl_interleaving) { + switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) { + case FSL_DDR_CS0_CS1_CS2_CS3: + break; + case FSL_DDR_CS0_CS1: + case FSL_DDR_CS0_CS1_AND_CS2_CS3: + if (i > 1) + cs_en = 0; + break; + case FSL_DDR_CS2_CS3: + default: + if (i > 0) + cs_en = 0; + break; + } + sa = common_dimm->base_address; + ea = sa + common_dimm->total_mem - 1; + } else if (!popts->memctl_interleaving) { + /* + * If memory interleaving between controllers is NOT + * enabled, the starting address for each memory + * controller is distinct. However, because rank + * interleaving is enabled, the starting and ending + * addresses of the total memory on that memory + * controller needs to be programmed into its + * respective CS0_BNDS. + */ + switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) { + case FSL_DDR_CS0_CS1_CS2_CS3: + sa = common_dimm->base_address; + ea = sa + common_dimm->total_mem - 1; + break; + case FSL_DDR_CS0_CS1_AND_CS2_CS3: + if ((i >= 2) && (dimm_number == 0)) { + sa = dimm_params[dimm_number].base_address + + 2 * rank_density; + ea = sa + 2 * rank_density - 1; + } else { + sa = dimm_params[dimm_number].base_address; + ea = sa + 2 * rank_density - 1; + } + break; + case FSL_DDR_CS0_CS1: + if (dimm_params[dimm_number].n_ranks > (i % cs_per_dimm)) { + sa = dimm_params[dimm_number].base_address; + ea = sa + rank_density - 1; + if (i != 1) + sa += (i % cs_per_dimm) * rank_density; + ea += (i % cs_per_dimm) * rank_density; + } else { + sa = 0; + ea = 0; + } + if (i == 0) + ea += rank_density; + break; + case FSL_DDR_CS2_CS3: + if (dimm_params[dimm_number].n_ranks > (i % cs_per_dimm)) { + sa = dimm_params[dimm_number].base_address; + ea = sa + rank_density - 1; + if (i != 3) + sa += (i % cs_per_dimm) * rank_density; + ea += (i % cs_per_dimm) * rank_density; + } else { + sa = 0; + ea = 0; + } + if (i == 2) + ea += (rank_density >> dbw_cap_adj); + break; + default: /* No bank(chip-select) interleaving */ + sa = dimm_params[dimm_number].base_address; + ea = sa + rank_density - 1; + if (dimm_params[dimm_number].n_ranks > (i % cs_per_dimm)) { + sa += (i % cs_per_dimm) * rank_density; + ea += (i % cs_per_dimm) * rank_density; + } else { + sa = 0; + ea = 0; + } + break; + } + } + + sa >>= 24; + ea >>= 24; + + if (cs_en) { + ddr->cs[i].bnds = (0 + | ((sa & 0xffff) << 16) /* starting address */ + | ((ea & 0xffff) << 0) /* ending address */ + ); + } else { + /* setting bnds to 0xffffffff for inactive CS */ + ddr->cs[i].bnds = 0xffffffff; + } + + debug("FSLDDR: cs[%d]_bnds = 0x%08x\n", i, ddr->cs[i].bnds); + set_csn_config(dimm_number, i, ddr, popts, dimm_params); + set_csn_config_2(i, ddr); + } + + /* + * In the case we only need to compute the ddr sdram size, we only need + * to set csn registers, so return from here. + */ + if (size_only) + return 0; + + set_ddr_eor(ddr, popts); + +#if !defined(CONFIG_SYS_FSL_DDR1) + set_timing_cfg_0(ctrl_num, ddr, popts, dimm_params); +#endif + + set_timing_cfg_3(ctrl_num, ddr, popts, common_dimm, cas_latency, + additive_latency); + set_timing_cfg_1(ctrl_num, ddr, popts, common_dimm, cas_latency); + set_timing_cfg_2(ctrl_num, ddr, popts, common_dimm, + cas_latency, additive_latency); + + set_ddr_cdr1(ddr, popts); + set_ddr_cdr2(ddr, popts); + set_ddr_sdram_cfg(ddr, popts, common_dimm); + ip_rev = fsl_ddr_get_version(ctrl_num); + if (ip_rev > 0x40400) + unq_mrs_en = 1; + + if ((ip_rev > 0x40700) && (popts->cswl_override != 0)) + ddr->debug[18] = popts->cswl_override; + + set_ddr_sdram_cfg_2(ctrl_num, ddr, popts, unq_mrs_en); + set_ddr_sdram_mode(ctrl_num, ddr, popts, common_dimm, + cas_latency, additive_latency, unq_mrs_en); + set_ddr_sdram_mode_2(ctrl_num, ddr, popts, common_dimm, unq_mrs_en); +#ifdef CONFIG_SYS_FSL_DDR4 + set_ddr_sdram_mode_9(ddr, popts, common_dimm, unq_mrs_en); + set_ddr_sdram_mode_10(ctrl_num, ddr, popts, common_dimm, unq_mrs_en); +#endif + set_ddr_sdram_rcw(ctrl_num, ddr, popts, common_dimm); + + set_ddr_sdram_interval(ctrl_num, ddr, popts, common_dimm); + set_ddr_data_init(ddr); + set_ddr_sdram_clk_cntl(ddr, popts); + set_ddr_init_addr(ddr); + set_ddr_init_ext_addr(ddr); + set_timing_cfg_4(ddr, popts); + set_timing_cfg_5(ddr, cas_latency); +#ifdef CONFIG_SYS_FSL_DDR4 + set_ddr_sdram_cfg_3(ddr, popts); + set_timing_cfg_6(ddr); + set_timing_cfg_7(ctrl_num, ddr, popts, common_dimm); + set_timing_cfg_8(ctrl_num, ddr, popts, common_dimm, cas_latency); + set_timing_cfg_9(ctrl_num, ddr, popts, common_dimm); + set_ddr_dq_mapping(ddr, dimm_params); +#endif + + set_ddr_zq_cntl(ddr, zq_en); + set_ddr_wrlvl_cntl(ddr, wrlvl_en, popts); + + set_ddr_sr_cntr(ddr, sr_it); + +#ifdef CONFIG_SYS_FSL_DDR_EMU + /* disble DDR training for emulator */ + ddr->debug[2] = 0x00000400; + ddr->debug[4] = 0xff800800; + ddr->debug[5] = 0x08000800; + ddr->debug[6] = 0x08000800; + ddr->debug[7] = 0x08000800; + ddr->debug[8] = 0x08000800; +#endif +#ifdef CONFIG_SYS_FSL_ERRATUM_A004508 + if ((ip_rev >= 0x40000) && (ip_rev < 0x40400)) + ddr->debug[2] |= 0x00000200; /* set bit 22 */ +#endif + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009942 + if (popts->cpo_sample) + ddr->debug[28] = (ddr->debug[28] & 0xffffff00) | + popts->cpo_sample; +#endif + + return check_fsl_memctl_config_regs(ddr); +} + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009942 +/* + * This additional workaround of A009942 checks the condition to determine if + * the CPO value set by the existing A009942 workaround needs to be updated. + * If need, print a warning to prompt user reconfigure DDR debug_29[24:31] with + * expected optimal value, the optimal value is highly board dependent. + */ +void erratum_a009942_check_cpo(void) +{ + struct ccsr_ddr __iomem *ddr = + (struct ccsr_ddr __iomem *)(CONFIG_SYS_FSL_DDR_ADDR); + u32 cpo, cpo_e, cpo_o, cpo_target, cpo_optimal; + u32 cpo_min = ddr_in32(&ddr->debug[9]) >> 24; + u32 cpo_max = cpo_min; + u32 sdram_cfg, i, tmp, lanes, ddr_type; + bool update_cpo = false, has_ecc = false; + + sdram_cfg = ddr_in32(&ddr->sdram_cfg); + if (sdram_cfg & SDRAM_CFG_32_BE) + lanes = 4; + else if (sdram_cfg & SDRAM_CFG_16_BE) + lanes = 2; + else + lanes = 8; + + if (sdram_cfg & SDRAM_CFG_ECC_EN) + has_ecc = true; + + /* determine the maximum and minimum CPO values */ + for (i = 9; i < 9 + lanes / 2; i++) { + cpo = ddr_in32(&ddr->debug[i]); + cpo_e = cpo >> 24; + cpo_o = (cpo >> 8) & 0xff; + tmp = min(cpo_e, cpo_o); + if (tmp < cpo_min) + cpo_min = tmp; + tmp = max(cpo_e, cpo_o); + if (tmp > cpo_max) + cpo_max = tmp; + } + + if (has_ecc) { + cpo = ddr_in32(&ddr->debug[13]); + cpo = cpo >> 24; + if (cpo < cpo_min) + cpo_min = cpo; + if (cpo > cpo_max) + cpo_max = cpo; + } + + cpo_target = ddr_in32(&ddr->debug[28]) & 0xff; + cpo_optimal = ((cpo_max + cpo_min) >> 1) + 0x27; + debug("cpo_optimal = 0x%x, cpo_target = 0x%x\n", cpo_optimal, + cpo_target); + debug("cpo_max = 0x%x, cpo_min = 0x%x\n", cpo_max, cpo_min); + + ddr_type = (sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) >> + SDRAM_CFG_SDRAM_TYPE_SHIFT; + if (ddr_type == SDRAM_TYPE_DDR4) + update_cpo = (cpo_min + 0x3b) < cpo_target ? true : false; + else if (ddr_type == SDRAM_TYPE_DDR3) + update_cpo = (cpo_min + 0x3f) < cpo_target ? true : false; + + if (update_cpo) { + printf("WARN: pls set popts->cpo_sample = 0x%x ", cpo_optimal); + printf("in <board>/ddr.c to optimize cpo\n"); + } +} +#endif diff --git a/roms/u-boot/drivers/ddr/fsl/ddr1_dimm_params.c b/roms/u-boot/drivers/ddr/fsl/ddr1_dimm_params.c new file mode 100644 index 000000000..e5481eaa0 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/ddr1_dimm_params.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <fsl_ddr_sdram.h> +#include <log.h> +#include <asm/bitops.h> + +#include <fsl_ddr.h> + +/* + * Calculate the Density of each Physical Rank. + * Returned size is in bytes. + * + * Study these table from Byte 31 of JEDEC SPD Spec. + * + * DDR I DDR II + * Bit Size Size + * --- ----- ------ + * 7 high 512MB 512MB + * 6 256MB 256MB + * 5 128MB 128MB + * 4 64MB 16GB + * 3 32MB 8GB + * 2 16MB 4GB + * 1 2GB 2GB + * 0 low 1GB 1GB + * + * Reorder Table to be linear by stripping the bottom + * 2 or 5 bits off and shifting them up to the top. + */ + +static unsigned long long +compute_ranksize(unsigned int mem_type, unsigned char row_dens) +{ + unsigned long long bsize; + + /* Bottom 2 bits up to the top. */ + bsize = ((row_dens >> 2) | ((row_dens & 3) << 6)); + bsize <<= 24ULL; + debug("DDR: DDR I rank density = 0x%16llx\n", bsize); + + return bsize; +} + +/* + * Convert a two-nibble BCD value into a cycle time. + * While the spec calls for nano-seconds, picos are returned. + * + * This implements the tables for bytes 9, 23 and 25 for both + * DDR I and II. No allowance for distinguishing the invalid + * fields absent for DDR I yet present in DDR II is made. + * (That is, cycle times of .25, .33, .66 and .75 ns are + * allowed for both DDR II and I.) + */ +static unsigned int +convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val) +{ + /* Table look up the lower nibble, allow DDR I & II. */ + unsigned int tenths_ps[16] = { + 0, + 100, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + 250, /* This and the next 3 entries valid ... */ + 330, /* ... only for tCK calculations. */ + 660, + 750, + 0, /* undefined */ + 0 /* undefined */ + }; + + unsigned int whole_ns = (spd_val & 0xF0) >> 4; + unsigned int tenth_ns = spd_val & 0x0F; + unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns]; + + return ps; +} + +static unsigned int +convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val) +{ + unsigned int tenth_ns = (spd_val & 0xF0) >> 4; + unsigned int hundredth_ns = spd_val & 0x0F; + unsigned int ps = tenth_ns * 100 + hundredth_ns * 10; + + return ps; +} + +static unsigned int byte40_table_ps[8] = { + 0, + 250, + 330, + 500, + 660, + 750, + 0, /* supposed to be RFC, but not sure what that means */ + 0 /* Undefined */ +}; + +static unsigned int +compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc) +{ + return ((trctrfc_ext & 0x1) * 256 + trfc) * 1000 + + byte40_table_ps[(trctrfc_ext >> 1) & 0x7]; +} + +static unsigned int +compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc) +{ + return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7]; +} + +/* + * tCKmax from DDR I SPD Byte 43 + * + * Bits 7:2 == whole ns + * Bits 1:0 == quarter ns + * 00 == 0.00 ns + * 01 == 0.25 ns + * 10 == 0.50 ns + * 11 == 0.75 ns + * + * Returns picoseconds. + */ +static unsigned int +compute_tckmax_from_spd_ps(unsigned int byte43) +{ + return (byte43 >> 2) * 1000 + (byte43 & 0x3) * 250; +} + +/* + * Determine Refresh Rate. Ignore self refresh bit on DDR I. + * Table from SPD Spec, Byte 12, converted to picoseconds and + * filled in with "default" normal values. + */ +static unsigned int +determine_refresh_rate_ps(const unsigned int spd_refresh) +{ + unsigned int refresh_time_ps[8] = { + 15625000, /* 0 Normal 1.00x */ + 3900000, /* 1 Reduced .25x */ + 7800000, /* 2 Extended .50x */ + 31300000, /* 3 Extended 2.00x */ + 62500000, /* 4 Extended 4.00x */ + 125000000, /* 5 Extended 8.00x */ + 15625000, /* 6 Normal 1.00x filler */ + 15625000, /* 7 Normal 1.00x filler */ + }; + + return refresh_time_ps[spd_refresh & 0x7]; +} + +/* + * The purpose of this function is to compute a suitable + * CAS latency given the DRAM clock period. The SPD only + * defines at most 3 CAS latencies. Typically the slower in + * frequency the DIMM runs at, the shorter its CAS latency can be. + * If the DIMM is operating at a sufficiently low frequency, + * it may be able to run at a CAS latency shorter than the + * shortest SPD-defined CAS latency. + * + * If a CAS latency is not found, 0 is returned. + * + * Do this by finding in the standard speed bin table the longest + * tCKmin that doesn't exceed the value of mclk_ps (tCK). + * + * An assumption made is that the SDRAM device allows the + * CL to be programmed for a value that is lower than those + * advertised by the SPD. This is not always the case, + * as those modes not defined in the SPD are optional. + * + * CAS latency de-rating based upon values JEDEC Standard No. 79-E + * Table 11. + * + * ordinal 2, ddr1_speed_bins[1] contains tCK for CL=2 + */ + /* CL2.0 CL2.5 CL3.0 */ +unsigned short ddr1_speed_bins[] = {0, 7500, 6000, 5000 }; + +unsigned int +compute_derated_DDR1_CAS_latency(unsigned int mclk_ps) +{ + const unsigned int num_speed_bins = ARRAY_SIZE(ddr1_speed_bins); + unsigned int lowest_tCKmin_found = 0; + unsigned int lowest_tCKmin_CL = 0; + unsigned int i; + + debug("mclk_ps = %u\n", mclk_ps); + + for (i = 0; i < num_speed_bins; i++) { + unsigned int x = ddr1_speed_bins[i]; + debug("i=%u, x = %u, lowest_tCKmin_found = %u\n", + i, x, lowest_tCKmin_found); + if (x && lowest_tCKmin_found <= x && x <= mclk_ps) { + lowest_tCKmin_found = x; + lowest_tCKmin_CL = i + 1; + } + } + + debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL); + + return lowest_tCKmin_CL; +} + +/* + * ddr_compute_dimm_parameters for DDR1 SPD + * + * Compute DIMM parameters based upon the SPD information in spd. + * Writes the results to the dimm_params_t structure pointed by pdimm. + * + * FIXME: use #define for the retvals + */ +unsigned int ddr_compute_dimm_parameters(const unsigned int ctrl_num, + const ddr1_spd_eeprom_t *spd, + dimm_params_t *pdimm, + unsigned int dimm_number) +{ + unsigned int retval; + + if (spd->mem_type) { + if (spd->mem_type != SPD_MEMTYPE_DDR) { + printf("DIMM %u: is not a DDR1 SPD.\n", dimm_number); + return 1; + } + } else { + memset(pdimm, 0, sizeof(dimm_params_t)); + return 1; + } + + retval = ddr1_spd_check(spd); + if (retval) { + printf("DIMM %u: failed checksum\n", dimm_number); + return 2; + } + + /* + * The part name in ASCII in the SPD EEPROM is not null terminated. + * Guarantee null termination here by presetting all bytes to 0 + * and copying the part name in ASCII from the SPD onto it + */ + memset(pdimm->mpart, 0, sizeof(pdimm->mpart)); + memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1); + + /* DIMM organization parameters */ + pdimm->n_ranks = spd->nrows; + pdimm->rank_density = compute_ranksize(spd->mem_type, spd->bank_dens); + pdimm->capacity = pdimm->n_ranks * pdimm->rank_density; + pdimm->data_width = spd->dataw_lsb; + pdimm->primary_sdram_width = spd->primw; + pdimm->ec_sdram_width = spd->ecw; + + /* + * FIXME: Need to determine registered_dimm status. + * 1 == register buffered + * 0 == unbuffered + */ + pdimm->registered_dimm = 0; /* unbuffered */ + + /* SDRAM device parameters */ + pdimm->n_row_addr = spd->nrow_addr; + pdimm->n_col_addr = spd->ncol_addr; + pdimm->n_banks_per_sdram_device = spd->nbanks; + pdimm->edc_config = spd->config; + pdimm->burst_lengths_bitmask = spd->burstl; + + /* + * Calculate the Maximum Data Rate based on the Minimum Cycle time. + * The SPD clk_cycle field (tCKmin) is measured in tenths of + * nanoseconds and represented as BCD. + */ + pdimm->tckmin_x_ps + = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle); + pdimm->tckmin_x_minus_1_ps + = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2); + pdimm->tckmin_x_minus_2_ps + = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3); + + pdimm->tckmax_ps = compute_tckmax_from_spd_ps(spd->tckmax); + + /* + * Compute CAS latencies defined by SPD + * The SPD caslat_x should have at least 1 and at most 3 bits set. + * + * If cas_lat after masking is 0, the __ilog2 function returns + * 255 into the variable. This behavior is abused once. + */ + pdimm->caslat_x = __ilog2(spd->cas_lat); + pdimm->caslat_x_minus_1 = __ilog2(spd->cas_lat + & ~(1 << pdimm->caslat_x)); + pdimm->caslat_x_minus_2 = __ilog2(spd->cas_lat + & ~(1 << pdimm->caslat_x) + & ~(1 << pdimm->caslat_x_minus_1)); + + /* Compute CAS latencies below that defined by SPD */ + pdimm->caslat_lowest_derated = compute_derated_DDR1_CAS_latency( + get_memory_clk_period_ps(ctrl_num)); + + /* Compute timing parameters */ + pdimm->trcd_ps = spd->trcd * 250; + pdimm->trp_ps = spd->trp * 250; + pdimm->tras_ps = spd->tras * 1000; + + pdimm->twr_ps = mclk_to_picos(ctrl_num, 3); + pdimm->twtr_ps = mclk_to_picos(ctrl_num, 1); + pdimm->trfc_ps = compute_trfc_ps_from_spd(0, spd->trfc); + + pdimm->trrd_ps = spd->trrd * 250; + pdimm->trc_ps = compute_trc_ps_from_spd(0, spd->trc); + + pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh); + + pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup); + pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold); + pdimm->tds_ps + = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup); + pdimm->tdh_ps + = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold); + + pdimm->trtp_ps = mclk_to_picos(ctrl_num, 2); /* By the book. */ + pdimm->tdqsq_max_ps = spd->tdqsq * 10; + pdimm->tqhs_ps = spd->tqhs * 10; + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/fsl/ddr2_dimm_params.c b/roms/u-boot/drivers/ddr/fsl/ddr2_dimm_params.c new file mode 100644 index 000000000..3b78118a9 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/ddr2_dimm_params.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <fsl_ddr_sdram.h> +#include <log.h> +#include <asm/bitops.h> + +#include <fsl_ddr.h> +/* + * Calculate the Density of each Physical Rank. + * Returned size is in bytes. + * + * Study these table from Byte 31 of JEDEC SPD Spec. + * + * DDR I DDR II + * Bit Size Size + * --- ----- ------ + * 7 high 512MB 512MB + * 6 256MB 256MB + * 5 128MB 128MB + * 4 64MB 16GB + * 3 32MB 8GB + * 2 16MB 4GB + * 1 2GB 2GB + * 0 low 1GB 1GB + * + * Reorder Table to be linear by stripping the bottom + * 2 or 5 bits off and shifting them up to the top. + * + */ +static unsigned long long +compute_ranksize(unsigned int mem_type, unsigned char row_dens) +{ + unsigned long long bsize; + + /* Bottom 5 bits up to the top. */ + bsize = ((row_dens >> 5) | ((row_dens & 31) << 3)); + bsize <<= 27ULL; + debug("DDR: DDR II rank density = 0x%16llx\n", bsize); + + return bsize; +} + +/* + * Convert a two-nibble BCD value into a cycle time. + * While the spec calls for nano-seconds, picos are returned. + * + * This implements the tables for bytes 9, 23 and 25 for both + * DDR I and II. No allowance for distinguishing the invalid + * fields absent for DDR I yet present in DDR II is made. + * (That is, cycle times of .25, .33, .66 and .75 ns are + * allowed for both DDR II and I.) + */ +static unsigned int +convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val) +{ + /* Table look up the lower nibble, allow DDR I & II. */ + unsigned int tenths_ps[16] = { + 0, + 100, + 200, + 300, + 400, + 500, + 600, + 700, + 800, + 900, + 250, /* This and the next 3 entries valid ... */ + 330, /* ... only for tCK calculations. */ + 660, + 750, + 0, /* undefined */ + 0 /* undefined */ + }; + + unsigned int whole_ns = (spd_val & 0xF0) >> 4; + unsigned int tenth_ns = spd_val & 0x0F; + unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns]; + + return ps; +} + +static unsigned int +convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val) +{ + unsigned int tenth_ns = (spd_val & 0xF0) >> 4; + unsigned int hundredth_ns = spd_val & 0x0F; + unsigned int ps = tenth_ns * 100 + hundredth_ns * 10; + + return ps; +} + +static unsigned int byte40_table_ps[8] = { + 0, + 250, + 330, + 500, + 660, + 750, + 0, /* supposed to be RFC, but not sure what that means */ + 0 /* Undefined */ +}; + +static unsigned int +compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc) +{ + return (((trctrfc_ext & 0x1) * 256) + trfc) * 1000 + + byte40_table_ps[(trctrfc_ext >> 1) & 0x7]; +} + +static unsigned int +compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc) +{ + return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7]; +} + +/* + * Determine Refresh Rate. Ignore self refresh bit on DDR I. + * Table from SPD Spec, Byte 12, converted to picoseconds and + * filled in with "default" normal values. + */ +static unsigned int +determine_refresh_rate_ps(const unsigned int spd_refresh) +{ + unsigned int refresh_time_ps[8] = { + 15625000, /* 0 Normal 1.00x */ + 3900000, /* 1 Reduced .25x */ + 7800000, /* 2 Extended .50x */ + 31300000, /* 3 Extended 2.00x */ + 62500000, /* 4 Extended 4.00x */ + 125000000, /* 5 Extended 8.00x */ + 15625000, /* 6 Normal 1.00x filler */ + 15625000, /* 7 Normal 1.00x filler */ + }; + + return refresh_time_ps[spd_refresh & 0x7]; +} + +/* + * The purpose of this function is to compute a suitable + * CAS latency given the DRAM clock period. The SPD only + * defines at most 3 CAS latencies. Typically the slower in + * frequency the DIMM runs at, the shorter its CAS latency can. + * be. If the DIMM is operating at a sufficiently low frequency, + * it may be able to run at a CAS latency shorter than the + * shortest SPD-defined CAS latency. + * + * If a CAS latency is not found, 0 is returned. + * + * Do this by finding in the standard speed bin table the longest + * tCKmin that doesn't exceed the value of mclk_ps (tCK). + * + * An assumption made is that the SDRAM device allows the + * CL to be programmed for a value that is lower than those + * advertised by the SPD. This is not always the case, + * as those modes not defined in the SPD are optional. + * + * CAS latency de-rating based upon values JEDEC Standard No. 79-2C + * Table 40, "DDR2 SDRAM stanadard speed bins and tCK, tRCD, tRP, tRAS, + * and tRC for corresponding bin" + * + * ordinal 2, ddr2_speed_bins[1] contains tCK for CL=3 + * Not certain if any good value exists for CL=2 + */ + /* CL2 CL3 CL4 CL5 CL6 CL7*/ +unsigned short ddr2_speed_bins[] = { 0, 5000, 3750, 3000, 2500, 1875 }; + +unsigned int +compute_derated_DDR2_CAS_latency(unsigned int mclk_ps) +{ + const unsigned int num_speed_bins = ARRAY_SIZE(ddr2_speed_bins); + unsigned int lowest_tCKmin_found = 0; + unsigned int lowest_tCKmin_CL = 0; + unsigned int i; + + debug("mclk_ps = %u\n", mclk_ps); + + for (i = 0; i < num_speed_bins; i++) { + unsigned int x = ddr2_speed_bins[i]; + debug("i=%u, x = %u, lowest_tCKmin_found = %u\n", + i, x, lowest_tCKmin_found); + if (x && x <= mclk_ps && x >= lowest_tCKmin_found ) { + lowest_tCKmin_found = x; + lowest_tCKmin_CL = i + 2; + } + } + + debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL); + + return lowest_tCKmin_CL; +} + +/* + * ddr_compute_dimm_parameters for DDR2 SPD + * + * Compute DIMM parameters based upon the SPD information in spd. + * Writes the results to the dimm_params_t structure pointed by pdimm. + * + * FIXME: use #define for the retvals + */ +unsigned int ddr_compute_dimm_parameters(const unsigned int ctrl_num, + const ddr2_spd_eeprom_t *spd, + dimm_params_t *pdimm, + unsigned int dimm_number) +{ + unsigned int retval; + + if (spd->mem_type) { + if (spd->mem_type != SPD_MEMTYPE_DDR2) { + printf("DIMM %u: is not a DDR2 SPD.\n", dimm_number); + return 1; + } + } else { + memset(pdimm, 0, sizeof(dimm_params_t)); + return 1; + } + + retval = ddr2_spd_check(spd); + if (retval) { + printf("DIMM %u: failed checksum\n", dimm_number); + return 2; + } + + /* + * The part name in ASCII in the SPD EEPROM is not null terminated. + * Guarantee null termination here by presetting all bytes to 0 + * and copying the part name in ASCII from the SPD onto it + */ + memset(pdimm->mpart, 0, sizeof(pdimm->mpart)); + memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1); + + /* DIMM organization parameters */ + pdimm->n_ranks = (spd->mod_ranks & 0x7) + 1; + pdimm->rank_density = compute_ranksize(spd->mem_type, spd->rank_dens); + pdimm->capacity = pdimm->n_ranks * pdimm->rank_density; + pdimm->data_width = spd->dataw; + pdimm->primary_sdram_width = spd->primw; + pdimm->ec_sdram_width = spd->ecw; + + /* These are all the types defined by the JEDEC DDR2 SPD 1.3 spec */ + switch (spd->dimm_type) { + case DDR2_SPD_DIMMTYPE_RDIMM: + case DDR2_SPD_DIMMTYPE_72B_SO_RDIMM: + case DDR2_SPD_DIMMTYPE_MINI_RDIMM: + /* Registered/buffered DIMMs */ + pdimm->registered_dimm = 1; + break; + + case DDR2_SPD_DIMMTYPE_UDIMM: + case DDR2_SPD_DIMMTYPE_SO_DIMM: + case DDR2_SPD_DIMMTYPE_MICRO_DIMM: + case DDR2_SPD_DIMMTYPE_MINI_UDIMM: + /* Unbuffered DIMMs */ + pdimm->registered_dimm = 0; + break; + + case DDR2_SPD_DIMMTYPE_72B_SO_CDIMM: + default: + printf("unknown dimm_type 0x%02X\n", spd->dimm_type); + return 1; + } + + /* SDRAM device parameters */ + pdimm->n_row_addr = spd->nrow_addr; + pdimm->n_col_addr = spd->ncol_addr; + pdimm->n_banks_per_sdram_device = spd->nbanks; + pdimm->edc_config = spd->config; + pdimm->burst_lengths_bitmask = spd->burstl; + + /* + * Calculate the Maximum Data Rate based on the Minimum Cycle time. + * The SPD clk_cycle field (tCKmin) is measured in tenths of + * nanoseconds and represented as BCD. + */ + pdimm->tckmin_x_ps + = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle); + pdimm->tckmin_x_minus_1_ps + = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2); + pdimm->tckmin_x_minus_2_ps + = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3); + + pdimm->tckmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd->tckmax); + + /* + * Compute CAS latencies defined by SPD + * The SPD caslat_x should have at least 1 and at most 3 bits set. + * + * If cas_lat after masking is 0, the __ilog2 function returns + * 255 into the variable. This behavior is abused once. + */ + pdimm->caslat_x = __ilog2(spd->cas_lat); + pdimm->caslat_x_minus_1 = __ilog2(spd->cas_lat + & ~(1 << pdimm->caslat_x)); + pdimm->caslat_x_minus_2 = __ilog2(spd->cas_lat + & ~(1 << pdimm->caslat_x) + & ~(1 << pdimm->caslat_x_minus_1)); + + /* Compute CAS latencies below that defined by SPD */ + pdimm->caslat_lowest_derated = compute_derated_DDR2_CAS_latency( + get_memory_clk_period_ps(ctrl_num)); + + /* Compute timing parameters */ + pdimm->trcd_ps = spd->trcd * 250; + pdimm->trp_ps = spd->trp * 250; + pdimm->tras_ps = spd->tras * 1000; + + pdimm->twr_ps = spd->twr * 250; + pdimm->twtr_ps = spd->twtr * 250; + pdimm->trfc_ps = compute_trfc_ps_from_spd(spd->trctrfc_ext, spd->trfc); + + pdimm->trrd_ps = spd->trrd * 250; + pdimm->trc_ps = compute_trc_ps_from_spd(spd->trctrfc_ext, spd->trc); + + pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh); + + pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup); + pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold); + pdimm->tds_ps + = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup); + pdimm->tdh_ps + = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold); + + pdimm->trtp_ps = spd->trtp * 250; + pdimm->tdqsq_max_ps = spd->tdqsq * 10; + pdimm->tqhs_ps = spd->tqhs * 10; + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/fsl/ddr3_dimm_params.c b/roms/u-boot/drivers/ddr/fsl/ddr3_dimm_params.c new file mode 100644 index 000000000..8464438c5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/ddr3_dimm_params.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Dave Liu <daveliu@freescale.com> + * + * calculate the organization and timing parameter + * from ddr3 spd, please refer to the spec + * JEDEC standard No.21-C 4_01_02_11R18.pdf + */ + +#include <common.h> +#include <fsl_ddr_sdram.h> +#include <log.h> + +#include <fsl_ddr.h> + +/* + * Calculate the Density of each Physical Rank. + * Returned size is in bytes. + * + * each rank size = + * sdram capacity(bit) / 8 * primary bus width / sdram width + * + * where: sdram capacity = spd byte4[3:0] + * primary bus width = spd byte8[2:0] + * sdram width = spd byte7[2:0] + * + * SPD byte4 - sdram density and banks + * bit[3:0] size(bit) size(byte) + * 0000 256Mb 32MB + * 0001 512Mb 64MB + * 0010 1Gb 128MB + * 0011 2Gb 256MB + * 0100 4Gb 512MB + * 0101 8Gb 1GB + * 0110 16Gb 2GB + * + * SPD byte8 - module memory bus width + * bit[2:0] primary bus width + * 000 8bits + * 001 16bits + * 010 32bits + * 011 64bits + * + * SPD byte7 - module organiztion + * bit[2:0] sdram device width + * 000 4bits + * 001 8bits + * 010 16bits + * 011 32bits + * + */ +static unsigned long long +compute_ranksize(const ddr3_spd_eeprom_t *spd) +{ + unsigned long long bsize; + + int nbit_sdram_cap_bsize = 0; + int nbit_primary_bus_width = 0; + int nbit_sdram_width = 0; + + if ((spd->density_banks & 0xf) < 7) + nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28; + if ((spd->bus_width & 0x7) < 4) + nbit_primary_bus_width = (spd->bus_width & 0x7) + 3; + if ((spd->organization & 0x7) < 4) + nbit_sdram_width = (spd->organization & 0x7) + 2; + + bsize = 1ULL << (nbit_sdram_cap_bsize - 3 + + nbit_primary_bus_width - nbit_sdram_width); + + debug("DDR: DDR III rank density = 0x%16llx\n", bsize); + + return bsize; +} + +/* + * ddr_compute_dimm_parameters for DDR3 SPD + * + * Compute DIMM parameters based upon the SPD information in spd. + * Writes the results to the dimm_params_t structure pointed by pdimm. + * + */ +unsigned int ddr_compute_dimm_parameters(const unsigned int ctrl_num, + const ddr3_spd_eeprom_t *spd, + dimm_params_t *pdimm, + unsigned int dimm_number) +{ + unsigned int retval; + unsigned int mtb_ps; + int ftb_10th_ps; + int i; + + if (spd->mem_type) { + if (spd->mem_type != SPD_MEMTYPE_DDR3) { + printf("DIMM %u: is not a DDR3 SPD.\n", dimm_number); + return 1; + } + } else { + memset(pdimm, 0, sizeof(dimm_params_t)); + return 1; + } + + retval = ddr3_spd_check(spd); + if (retval) { + printf("DIMM %u: failed checksum\n", dimm_number); + return 2; + } + + /* + * The part name in ASCII in the SPD EEPROM is not null terminated. + * Guarantee null termination here by presetting all bytes to 0 + * and copying the part name in ASCII from the SPD onto it + */ + memset(pdimm->mpart, 0, sizeof(pdimm->mpart)); + if ((spd->info_size_crc & 0xF) > 1) + memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1); + + /* DIMM organization parameters */ + pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1; + pdimm->rank_density = compute_ranksize(spd); + pdimm->capacity = pdimm->n_ranks * pdimm->rank_density; + pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7)); + if ((spd->bus_width >> 3) & 0x3) + pdimm->ec_sdram_width = 8; + else + pdimm->ec_sdram_width = 0; + pdimm->data_width = pdimm->primary_sdram_width + + pdimm->ec_sdram_width; + pdimm->device_width = 1 << ((spd->organization & 0x7) + 2); + + /* These are the types defined by the JEDEC DDR3 SPD spec */ + pdimm->mirrored_dimm = 0; + pdimm->registered_dimm = 0; + switch (spd->module_type & DDR3_SPD_MODULETYPE_MASK) { + case DDR3_SPD_MODULETYPE_RDIMM: + case DDR3_SPD_MODULETYPE_MINI_RDIMM: + case DDR3_SPD_MODULETYPE_72B_SO_RDIMM: + /* Registered/buffered DIMMs */ + pdimm->registered_dimm = 1; + for (i = 0; i < 16; i += 2) { + u8 rcw = spd->mod_section.registered.rcw[i/2]; + pdimm->rcw[i] = (rcw >> 0) & 0x0F; + pdimm->rcw[i+1] = (rcw >> 4) & 0x0F; + } + break; + + case DDR3_SPD_MODULETYPE_UDIMM: + case DDR3_SPD_MODULETYPE_SO_DIMM: + case DDR3_SPD_MODULETYPE_MICRO_DIMM: + case DDR3_SPD_MODULETYPE_MINI_UDIMM: + case DDR3_SPD_MODULETYPE_MINI_CDIMM: + case DDR3_SPD_MODULETYPE_72B_SO_UDIMM: + case DDR3_SPD_MODULETYPE_72B_SO_CDIMM: + case DDR3_SPD_MODULETYPE_LRDIMM: + case DDR3_SPD_MODULETYPE_16B_SO_DIMM: + case DDR3_SPD_MODULETYPE_32B_SO_DIMM: + /* Unbuffered DIMMs */ + if (spd->mod_section.unbuffered.addr_mapping & 0x1) + pdimm->mirrored_dimm = 1; + break; + + default: + printf("unknown module_type 0x%02X\n", spd->module_type); + return 1; + } + + /* SDRAM device parameters */ + pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12; + pdimm->n_col_addr = (spd->addressing & 0x7) + 9; + pdimm->n_banks_per_sdram_device = 8 << ((spd->density_banks >> 4) & 0x7); + + /* + * The SPD spec has not the ECC bit, + * We consider the DIMM as ECC capability + * when the extension bus exist + */ + if (pdimm->ec_sdram_width) + pdimm->edc_config = 0x02; + else + pdimm->edc_config = 0x00; + + /* + * The SPD spec has not the burst length byte + * but DDR3 spec has nature BL8 and BC4, + * BL8 -bit3, BC4 -bit2 + */ + pdimm->burst_lengths_bitmask = 0x0c; + + /* MTB - medium timebase + * The unit in the SPD spec is ns, + * We convert it to ps. + * eg: MTB = 0.125ns (125ps) + */ + mtb_ps = (spd->mtb_dividend * 1000) /spd->mtb_divisor; + pdimm->mtb_ps = mtb_ps; + + /* + * FTB - fine timebase + * use 1/10th of ps as our unit to avoid floating point + * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps + */ + ftb_10th_ps = + ((spd->ftb_div & 0xf0) >> 4) * 10 / (spd->ftb_div & 0x0f); + pdimm->ftb_10th_ps = ftb_10th_ps; + /* + * sdram minimum cycle time + * we assume the MTB is 0.125ns + * eg: + * tck_min=15 MTB (1.875ns) ->DDR3-1066 + * =12 MTB (1.5ns) ->DDR3-1333 + * =10 MTB (1.25ns) ->DDR3-1600 + */ + pdimm->tckmin_x_ps = spd->tck_min * mtb_ps + + (spd->fine_tck_min * ftb_10th_ps) / 10; + + /* + * CAS latency supported + * bit4 - CL4 + * bit5 - CL5 + * bit18 - CL18 + */ + pdimm->caslat_x = ((spd->caslat_msb << 8) | spd->caslat_lsb) << 4; + + /* + * min CAS latency time + * eg: taa_min = + * DDR3-800D 100 MTB (12.5ns) + * DDR3-1066F 105 MTB (13.125ns) + * DDR3-1333H 108 MTB (13.5ns) + * DDR3-1600H 90 MTB (11.25ns) + */ + pdimm->taa_ps = spd->taa_min * mtb_ps + + (spd->fine_taa_min * ftb_10th_ps) / 10; + + /* + * min write recovery time + * eg: + * twr_min = 120 MTB (15ns) -> all speed grades. + */ + pdimm->twr_ps = spd->twr_min * mtb_ps; + + /* + * min RAS to CAS delay time + * eg: trcd_min = + * DDR3-800 100 MTB (12.5ns) + * DDR3-1066F 105 MTB (13.125ns) + * DDR3-1333H 108 MTB (13.5ns) + * DDR3-1600H 90 MTB (11.25) + */ + pdimm->trcd_ps = spd->trcd_min * mtb_ps + + (spd->fine_trcd_min * ftb_10th_ps) / 10; + + /* + * min row active to row active delay time + * eg: trrd_min = + * DDR3-800(1KB page) 80 MTB (10ns) + * DDR3-1333(1KB page) 48 MTB (6ns) + */ + pdimm->trrd_ps = spd->trrd_min * mtb_ps; + + /* + * min row precharge delay time + * eg: trp_min = + * DDR3-800D 100 MTB (12.5ns) + * DDR3-1066F 105 MTB (13.125ns) + * DDR3-1333H 108 MTB (13.5ns) + * DDR3-1600H 90 MTB (11.25ns) + */ + pdimm->trp_ps = spd->trp_min * mtb_ps + + (spd->fine_trp_min * ftb_10th_ps) / 10; + + /* min active to precharge delay time + * eg: tRAS_min = + * DDR3-800D 300 MTB (37.5ns) + * DDR3-1066F 300 MTB (37.5ns) + * DDR3-1333H 288 MTB (36ns) + * DDR3-1600H 280 MTB (35ns) + */ + pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) | spd->tras_min_lsb) + * mtb_ps; + /* + * min active to actice/refresh delay time + * eg: tRC_min = + * DDR3-800D 400 MTB (50ns) + * DDR3-1066F 405 MTB (50.625ns) + * DDR3-1333H 396 MTB (49.5ns) + * DDR3-1600H 370 MTB (46.25ns) + */ + pdimm->trc_ps = (((spd->tras_trc_ext & 0xf0) << 4) | spd->trc_min_lsb) + * mtb_ps + (spd->fine_trc_min * ftb_10th_ps) / 10; + /* + * min refresh recovery delay time + * eg: tRFC_min = + * 512Mb 720 MTB (90ns) + * 1Gb 880 MTB (110ns) + * 2Gb 1280 MTB (160ns) + */ + pdimm->trfc_ps = ((spd->trfc_min_msb << 8) | spd->trfc_min_lsb) + * mtb_ps; + /* + * min internal write to read command delay time + * eg: twtr_min = 40 MTB (7.5ns) - all speed bins. + * tWRT is at least 4 mclk independent of operating freq. + */ + pdimm->twtr_ps = spd->twtr_min * mtb_ps; + + /* + * min internal read to precharge command delay time + * eg: trtp_min = 40 MTB (7.5ns) - all speed bins. + * tRTP is at least 4 mclk independent of operating freq. + */ + pdimm->trtp_ps = spd->trtp_min * mtb_ps; + + /* + * Average periodic refresh interval + * tREFI = 7.8 us at normal temperature range + * = 3.9 us at ext temperature range + */ + pdimm->refresh_rate_ps = 7800000; + if ((spd->therm_ref_opt & 0x1) && !(spd->therm_ref_opt & 0x2)) { + pdimm->refresh_rate_ps = 3900000; + pdimm->extended_op_srt = 1; + } + + /* + * min four active window delay time + * eg: tfaw_min = + * DDR3-800(1KB page) 320 MTB (40ns) + * DDR3-1066(1KB page) 300 MTB (37.5ns) + * DDR3-1333(1KB page) 240 MTB (30ns) + * DDR3-1600(1KB page) 240 MTB (30ns) + */ + pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min) + * mtb_ps; + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/fsl/ddr4_dimm_params.c b/roms/u-boot/drivers/ddr/fsl/ddr4_dimm_params.c new file mode 100644 index 000000000..e2bdc12ef --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/ddr4_dimm_params.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2014-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP Semiconductor + * + * calculate the organization and timing parameter + * from ddr3 spd, please refer to the spec + * JEDEC standard No.21-C 4_01_02_12R23A.pdf + * + * + */ + +#include <common.h> +#include <fsl_ddr_sdram.h> +#include <log.h> +#include <linux/bug.h> + +#include <fsl_ddr.h> + +/* + * Calculate the Density of each Physical Rank. + * Returned size is in bytes. + * + * Total DIMM size = + * sdram capacity(bit) / 8 * primary bus width / sdram width + * * Logical Ranks per DIMM + * + * where: sdram capacity = spd byte4[3:0] + * primary bus width = spd byte13[2:0] + * sdram width = spd byte12[2:0] + * Logical Ranks per DIMM = spd byte12[5:3] for SDP, DDP, QDP + * spd byte12{5:3] * spd byte6[6:4] for 3DS + * + * To simplify each rank size = total DIMM size / Number of Package Ranks + * where Number of Package Ranks = spd byte12[5:3] + * + * SPD byte4 - sdram density and banks + * bit[3:0] size(bit) size(byte) + * 0000 256Mb 32MB + * 0001 512Mb 64MB + * 0010 1Gb 128MB + * 0011 2Gb 256MB + * 0100 4Gb 512MB + * 0101 8Gb 1GB + * 0110 16Gb 2GB + * 0111 32Gb 4GB + * + * SPD byte13 - module memory bus width + * bit[2:0] primary bus width + * 000 8bits + * 001 16bits + * 010 32bits + * 011 64bits + * + * SPD byte12 - module organization + * bit[2:0] sdram device width + * 000 4bits + * 001 8bits + * 010 16bits + * 011 32bits + * + * SPD byte12 - module organization + * bit[5:3] number of package ranks per DIMM + * 000 1 + * 001 2 + * 010 3 + * 011 4 + * + * SPD byte6 - SDRAM package type + * bit[6:4] Die count + * 000 1 + * 001 2 + * 010 3 + * 011 4 + * 100 5 + * 101 6 + * 110 7 + * 111 8 + * + * SPD byte6 - SRAM package type + * bit[1:0] Signal loading + * 00 Not specified + * 01 Multi load stack + * 10 Sigle load stack (3DS) + * 11 Reserved + */ +static unsigned long long +compute_ranksize(const struct ddr4_spd_eeprom_s *spd) +{ + unsigned long long bsize; + + int nbit_sdram_cap_bsize = 0; + int nbit_primary_bus_width = 0; + int nbit_sdram_width = 0; + int die_count = 0; + bool package_3ds; + + if ((spd->density_banks & 0xf) <= 7) + nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28; + if ((spd->bus_width & 0x7) < 4) + nbit_primary_bus_width = (spd->bus_width & 0x7) + 3; + if ((spd->organization & 0x7) < 4) + nbit_sdram_width = (spd->organization & 0x7) + 2; + package_3ds = (spd->package_type & 0x3) == 0x2; + if ((spd->package_type & 0x80) && !package_3ds) { /* other than 3DS */ + printf("Warning: not supported SDRAM package type\n"); + return 0; + } + if (package_3ds) + die_count = (spd->package_type >> 4) & 0x7; + + bsize = 1ULL << (nbit_sdram_cap_bsize - 3 + + nbit_primary_bus_width - nbit_sdram_width + + die_count); + + debug("DDR: DDR rank density = 0x%16llx\n", bsize); + + return bsize; +} + +#define spd_to_ps(mtb, ftb) \ + (mtb * pdimm->mtb_ps + (ftb * pdimm->ftb_10th_ps) / 10) +/* + * ddr_compute_dimm_parameters for DDR4 SPD + * + * Compute DIMM parameters based upon the SPD information in spd. + * Writes the results to the dimm_params_t structure pointed by pdimm. + * + */ +unsigned int ddr_compute_dimm_parameters(const unsigned int ctrl_num, + const generic_spd_eeprom_t *spd, + dimm_params_t *pdimm, + unsigned int dimm_number) +{ + unsigned int retval; + int i; + const u8 udimm_rc_e_dq[18] = { + 0x0c, 0x2c, 0x15, 0x35, 0x15, 0x35, 0x0b, 0x2c, 0x15, + 0x35, 0x0b, 0x35, 0x0b, 0x2c, 0x0b, 0x35, 0x15, 0x36 + }; + int spd_error = 0; + u8 *ptr; + u8 val; + + if (spd->mem_type) { + if (spd->mem_type != SPD_MEMTYPE_DDR4) { + printf("Ctrl %u DIMM %u: is not a DDR4 SPD.\n", + ctrl_num, dimm_number); + return 1; + } + } else { + memset(pdimm, 0, sizeof(dimm_params_t)); + return 1; + } + + retval = ddr4_spd_check(spd); + if (retval) { + printf("DIMM %u: failed checksum\n", dimm_number); + return 2; + } + + /* + * The part name in ASCII in the SPD EEPROM is not null terminated. + * Guarantee null termination here by presetting all bytes to 0 + * and copying the part name in ASCII from the SPD onto it + */ + memset(pdimm->mpart, 0, sizeof(pdimm->mpart)); + if ((spd->info_size_crc & 0xF) > 2) + memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1); + + /* DIMM organization parameters */ + pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1; + pdimm->rank_density = compute_ranksize(spd); + pdimm->capacity = pdimm->n_ranks * pdimm->rank_density; + pdimm->die_density = spd->density_banks & 0xf; + pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7)); + if ((spd->bus_width >> 3) & 0x3) + pdimm->ec_sdram_width = 8; + else + pdimm->ec_sdram_width = 0; + pdimm->data_width = pdimm->primary_sdram_width + + pdimm->ec_sdram_width; + pdimm->device_width = 1 << ((spd->organization & 0x7) + 2); + pdimm->package_3ds = (spd->package_type & 0x3) == 0x2 ? + (spd->package_type >> 4) & 0x7 : 0; + + /* These are the types defined by the JEDEC SPD spec */ + pdimm->mirrored_dimm = 0; + pdimm->registered_dimm = 0; + switch (spd->module_type & DDR4_SPD_MODULETYPE_MASK) { + case DDR4_SPD_MODULETYPE_RDIMM: + /* Registered/buffered DIMMs */ + pdimm->registered_dimm = 1; + if (spd->mod_section.registered.reg_map & 0x1) + pdimm->mirrored_dimm = 1; + val = spd->mod_section.registered.ca_stren; + pdimm->rcw[3] = val >> 4; + pdimm->rcw[4] = ((val & 0x3) << 2) | ((val & 0xc) >> 2); + val = spd->mod_section.registered.clk_stren; + pdimm->rcw[5] = ((val & 0x3) << 2) | ((val & 0xc) >> 2); + /* Not all in SPD. For convience only. Boards may overwrite. */ + pdimm->rcw[6] = 0xf; + /* + * A17 only used for 16Gb and above devices. + * C[2:0] only used for 3DS. + */ + pdimm->rcw[8] = pdimm->die_density >= 0x6 ? 0x0 : 0x8 | + (pdimm->package_3ds > 0x3 ? 0x0 : + (pdimm->package_3ds > 0x1 ? 0x1 : + (pdimm->package_3ds > 0 ? 0x2 : 0x3))); + if (pdimm->package_3ds || pdimm->n_ranks != 4) + pdimm->rcw[13] = 0xc; + else + pdimm->rcw[13] = 0xd; /* Fix encoded by board */ + + break; + + case DDR4_SPD_MODULETYPE_UDIMM: + case DDR4_SPD_MODULETYPE_SO_DIMM: + /* Unbuffered DIMMs */ + if (spd->mod_section.unbuffered.addr_mapping & 0x1) + pdimm->mirrored_dimm = 1; + if ((spd->mod_section.unbuffered.mod_height & 0xe0) == 0 && + (spd->mod_section.unbuffered.ref_raw_card == 0x04)) { + /* Fix SPD error found on DIMMs with raw card E0 */ + for (i = 0; i < 18; i++) { + if (spd->mapping[i] == udimm_rc_e_dq[i]) + continue; + spd_error = 1; + debug("SPD byte %d: 0x%x, should be 0x%x\n", + 60 + i, spd->mapping[i], + udimm_rc_e_dq[i]); + ptr = (u8 *)&spd->mapping[i]; + *ptr = udimm_rc_e_dq[i]; + } + if (spd_error) + puts("SPD DQ mapping error fixed\n"); + } + break; + + default: + printf("unknown module_type 0x%02X\n", spd->module_type); + return 1; + } + + /* SDRAM device parameters */ + pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12; + pdimm->n_col_addr = (spd->addressing & 0x7) + 9; + pdimm->bank_addr_bits = (spd->density_banks >> 4) & 0x3; + pdimm->bank_group_bits = (spd->density_banks >> 6) & 0x3; + + /* + * The SPD spec has not the ECC bit, + * We consider the DIMM as ECC capability + * when the extension bus exist + */ + if (pdimm->ec_sdram_width) + pdimm->edc_config = 0x02; + else + pdimm->edc_config = 0x00; + + /* + * The SPD spec has not the burst length byte + * but DDR4 spec has nature BL8 and BC4, + * BL8 -bit3, BC4 -bit2 + */ + pdimm->burst_lengths_bitmask = 0x0c; + + /* MTB - medium timebase + * The MTB in the SPD spec is 125ps, + * + * FTB - fine timebase + * use 1/10th of ps as our unit to avoid floating point + * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps + */ + if ((spd->timebases & 0xf) == 0x0) { + pdimm->mtb_ps = 125; + pdimm->ftb_10th_ps = 10; + + } else { + printf("Unknown Timebases\n"); + } + + /* sdram minimum cycle time */ + pdimm->tckmin_x_ps = spd_to_ps(spd->tck_min, spd->fine_tck_min); + + /* sdram max cycle time */ + pdimm->tckmax_ps = spd_to_ps(spd->tck_max, spd->fine_tck_max); + + /* + * CAS latency supported + * bit0 - CL7 + * bit4 - CL11 + * bit8 - CL15 + * bit12- CL19 + * bit16- CL23 + */ + pdimm->caslat_x = (spd->caslat_b1 << 7) | + (spd->caslat_b2 << 15) | + (spd->caslat_b3 << 23); + + BUG_ON(spd->caslat_b4 != 0); + + /* + * min CAS latency time + */ + pdimm->taa_ps = spd_to_ps(spd->taa_min, spd->fine_taa_min); + + /* + * min RAS to CAS delay time + */ + pdimm->trcd_ps = spd_to_ps(spd->trcd_min, spd->fine_trcd_min); + + /* + * Min Row Precharge Delay Time + */ + pdimm->trp_ps = spd_to_ps(spd->trp_min, spd->fine_trp_min); + + /* min active to precharge delay time */ + pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) + + spd->tras_min_lsb) * pdimm->mtb_ps; + + /* min active to actice/refresh delay time */ + pdimm->trc_ps = spd_to_ps((((spd->tras_trc_ext & 0xf0) << 4) + + spd->trc_min_lsb), spd->fine_trc_min); + /* Min Refresh Recovery Delay Time */ + pdimm->trfc1_ps = ((spd->trfc1_min_msb << 8) | (spd->trfc1_min_lsb)) * + pdimm->mtb_ps; + pdimm->trfc2_ps = ((spd->trfc2_min_msb << 8) | (spd->trfc2_min_lsb)) * + pdimm->mtb_ps; + pdimm->trfc4_ps = ((spd->trfc4_min_msb << 8) | (spd->trfc4_min_lsb)) * + pdimm->mtb_ps; + /* min four active window delay time */ + pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min) * + pdimm->mtb_ps; + + /* min row active to row active delay time, different bank group */ + pdimm->trrds_ps = spd_to_ps(spd->trrds_min, spd->fine_trrds_min); + /* min row active to row active delay time, same bank group */ + pdimm->trrdl_ps = spd_to_ps(spd->trrdl_min, spd->fine_trrdl_min); + /* min CAS to CAS Delay Time (tCCD_Lmin), same bank group */ + pdimm->tccdl_ps = spd_to_ps(spd->tccdl_min, spd->fine_tccdl_min); + + if (pdimm->package_3ds) { + if (pdimm->die_density <= 0x4) { + pdimm->trfc_slr_ps = 260000; + } else if (pdimm->die_density <= 0x5) { + pdimm->trfc_slr_ps = 350000; + } else { + printf("WARN: Unsupported logical rank density 0x%x\n", + pdimm->die_density); + } + } + + /* + * Average periodic refresh interval + * tREFI = 7.8 us at normal temperature range + */ + pdimm->refresh_rate_ps = 7800000; + + for (i = 0; i < 18; i++) + pdimm->dq_mapping[i] = spd->mapping[i]; + + pdimm->dq_mapping_ors = ((spd->mapping[0] >> 6) & 0x3) == 0 ? 1 : 0; + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/fsl/fsl_ddr_gen4.c b/roms/u-boot/drivers/ddr/fsl/fsl_ddr_gen4.c new file mode 100644 index 000000000..e43c68015 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/fsl_ddr_gen4.c @@ -0,0 +1,622 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2014-2020 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <env.h> +#include <log.h> +#include <asm/io.h> +#include <fsl_ddr_sdram.h> +#include <asm/processor.h> +#include <fsl_immap.h> +#include <fsl_ddr.h> +#include <fsl_errata.h> +#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \ + defined(CONFIG_ARM) +#include <asm/arch/clock.h> +#endif +#include <linux/delay.h> + +#define CTLR_INTLV_MASK 0x20000000 + +#if defined(CONFIG_SYS_FSL_ERRATUM_A008511) | \ + defined(CONFIG_SYS_FSL_ERRATUM_A009803) +static void set_wait_for_bits_clear(void *ptr, u32 value, u32 bits) +{ + int timeout = 1000; + + ddr_out32(ptr, value); + + while (ddr_in32(ptr) & bits) { + udelay(100); + timeout--; + } + if (timeout <= 0) + puts("Error: wait for clear timeout.\n"); +} +#endif + +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 4) +#error Invalid setting for CONFIG_CHIP_SELECTS_PER_CTRL +#endif + +/* + * regs has the to-be-set values for DDR controller registers + * ctrl_num is the DDR controller number + * step: 0 goes through the initialization in one pass + * 1 sets registers and returns before enabling controller + * 2 resumes from step 1 and continues to initialize + * Dividing the initialization to two steps to deassert DDR reset signal + * to comply with JEDEC specs for RDIMMs. + */ +void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, + unsigned int ctrl_num, int step) +{ + unsigned int i, bus_width; + struct ccsr_ddr __iomem *ddr; + u32 temp32; + u32 total_gb_size_per_controller; + int timeout; + int mod_bnds = 0; + +#ifdef CONFIG_SYS_FSL_ERRATUM_A008511 + u32 mr6; + u32 vref_seq1[3] = {0x80, 0x96, 0x16}; /* for range 1 */ + u32 vref_seq2[3] = {0xc0, 0xf0, 0x70}; /* for range 2 */ + u32 *vref_seq = vref_seq1; +#endif +#ifdef CONFIG_FSL_DDR_BIST + u32 mtcr, err_detect, err_sbe; + u32 cs0_bnds, cs1_bnds, cs2_bnds, cs3_bnds, cs0_config; +#endif +#ifdef CONFIG_FSL_DDR_BIST + char buffer[CONFIG_SYS_CBSIZE]; +#endif +#if defined(CONFIG_SYS_FSL_ERRATUM_A009942) || \ + (defined(CONFIG_SYS_FSL_ERRATUM_A008378) && \ + defined(CONFIG_SYS_FSL_DDRC_GEN4)) || \ + defined(CONFIG_SYS_FSL_ERRATUM_A008109) + u32 val32; +#endif +#ifdef CONFIG_SYS_FSL_ERRATUM_A009942 + unsigned int ddr_freq; +#endif + switch (ctrl_num) { + case 0: + ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; + break; +#if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1) + case 1: + ddr = (void *)CONFIG_SYS_FSL_DDR2_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2) + case 2: + ddr = (void *)CONFIG_SYS_FSL_DDR3_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3) + case 3: + ddr = (void *)CONFIG_SYS_FSL_DDR4_ADDR; + break; +#endif + default: + printf("%s unexpected ctrl_num = %u\n", __func__, ctrl_num); + return; + } + mod_bnds = regs->cs[0].config & CTLR_INTLV_MASK; + + if (step == 2) + goto step2; + + /* Set cdr1 first in case 0.9v VDD is enabled for some SoCs*/ + ddr_out32(&ddr->ddr_cdr1, regs->ddr_cdr1); + + if (regs->ddr_eor) + ddr_out32(&ddr->eor, regs->ddr_eor); + + ddr_out32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl); + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (i == 0) { + if (mod_bnds) { + debug("modified bnds\n"); + ddr_out32(&ddr->cs0_bnds, + (regs->cs[i].bnds & 0xfffefffe) >> 1); + ddr_out32(&ddr->cs0_config, + (regs->cs[i].config & + ~CTLR_INTLV_MASK)); + } else { + ddr_out32(&ddr->cs0_bnds, regs->cs[i].bnds); + ddr_out32(&ddr->cs0_config, regs->cs[i].config); + } + ddr_out32(&ddr->cs0_config_2, regs->cs[i].config_2); + + } else if (i == 1) { + if (mod_bnds) { + ddr_out32(&ddr->cs1_bnds, + (regs->cs[i].bnds & 0xfffefffe) >> 1); + } else { + ddr_out32(&ddr->cs1_bnds, regs->cs[i].bnds); + } + ddr_out32(&ddr->cs1_config, regs->cs[i].config); + ddr_out32(&ddr->cs1_config_2, regs->cs[i].config_2); + + } else if (i == 2) { + if (mod_bnds) { + ddr_out32(&ddr->cs2_bnds, + (regs->cs[i].bnds & 0xfffefffe) >> 1); + } else { + ddr_out32(&ddr->cs2_bnds, regs->cs[i].bnds); + } + ddr_out32(&ddr->cs2_config, regs->cs[i].config); + ddr_out32(&ddr->cs2_config_2, regs->cs[i].config_2); + + } else if (i == 3) { + if (mod_bnds) { + ddr_out32(&ddr->cs3_bnds, + (regs->cs[i].bnds & 0xfffefffe) >> 1); + } else { + ddr_out32(&ddr->cs3_bnds, regs->cs[i].bnds); + } + ddr_out32(&ddr->cs3_config, regs->cs[i].config); + ddr_out32(&ddr->cs3_config_2, regs->cs[i].config_2); + } + } + + ddr_out32(&ddr->timing_cfg_3, regs->timing_cfg_3); + ddr_out32(&ddr->timing_cfg_0, regs->timing_cfg_0); + ddr_out32(&ddr->timing_cfg_1, regs->timing_cfg_1); + ddr_out32(&ddr->timing_cfg_2, regs->timing_cfg_2); + ddr_out32(&ddr->timing_cfg_4, regs->timing_cfg_4); + ddr_out32(&ddr->timing_cfg_5, regs->timing_cfg_5); + ddr_out32(&ddr->timing_cfg_6, regs->timing_cfg_6); + ddr_out32(&ddr->timing_cfg_7, regs->timing_cfg_7); + ddr_out32(&ddr->timing_cfg_8, regs->timing_cfg_8); + ddr_out32(&ddr->timing_cfg_9, regs->timing_cfg_9); + ddr_out32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl); + ddr_out32(&ddr->dq_map_0, regs->dq_map_0); + ddr_out32(&ddr->dq_map_1, regs->dq_map_1); + ddr_out32(&ddr->dq_map_2, regs->dq_map_2); + ddr_out32(&ddr->dq_map_3, regs->dq_map_3); + ddr_out32(&ddr->sdram_cfg_3, regs->ddr_sdram_cfg_3); + ddr_out32(&ddr->sdram_mode, regs->ddr_sdram_mode); + ddr_out32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2); + ddr_out32(&ddr->sdram_mode_3, regs->ddr_sdram_mode_3); + ddr_out32(&ddr->sdram_mode_4, regs->ddr_sdram_mode_4); + ddr_out32(&ddr->sdram_mode_5, regs->ddr_sdram_mode_5); + ddr_out32(&ddr->sdram_mode_6, regs->ddr_sdram_mode_6); + ddr_out32(&ddr->sdram_mode_7, regs->ddr_sdram_mode_7); + ddr_out32(&ddr->sdram_mode_8, regs->ddr_sdram_mode_8); + ddr_out32(&ddr->sdram_mode_9, regs->ddr_sdram_mode_9); + ddr_out32(&ddr->sdram_mode_10, regs->ddr_sdram_mode_10); + ddr_out32(&ddr->sdram_mode_11, regs->ddr_sdram_mode_11); + ddr_out32(&ddr->sdram_mode_12, regs->ddr_sdram_mode_12); + ddr_out32(&ddr->sdram_mode_13, regs->ddr_sdram_mode_13); + ddr_out32(&ddr->sdram_mode_14, regs->ddr_sdram_mode_14); + ddr_out32(&ddr->sdram_mode_15, regs->ddr_sdram_mode_15); + ddr_out32(&ddr->sdram_mode_16, regs->ddr_sdram_mode_16); + ddr_out32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl); +#ifdef CONFIG_SYS_FSL_ERRATUM_A009663 + ddr_out32(&ddr->sdram_interval, + regs->ddr_sdram_interval & ~SDRAM_INTERVAL_BSTOPRE); +#else + ddr_out32(&ddr->sdram_interval, regs->ddr_sdram_interval); +#endif + ddr_out32(&ddr->sdram_data_init, regs->ddr_data_init); + ddr_out32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl); +#ifndef CONFIG_SYS_FSL_DDR_EMU + /* + * Skip these two registers if running on emulator + * because emulator doesn't have skew between bytes. + */ + + if (regs->ddr_wrlvl_cntl_2) + ddr_out32(&ddr->ddr_wrlvl_cntl_2, regs->ddr_wrlvl_cntl_2); + if (regs->ddr_wrlvl_cntl_3) + ddr_out32(&ddr->ddr_wrlvl_cntl_3, regs->ddr_wrlvl_cntl_3); +#endif + + ddr_out32(&ddr->ddr_sr_cntr, regs->ddr_sr_cntr); + ddr_out32(&ddr->ddr_sdram_rcw_1, regs->ddr_sdram_rcw_1); + ddr_out32(&ddr->ddr_sdram_rcw_2, regs->ddr_sdram_rcw_2); + ddr_out32(&ddr->ddr_sdram_rcw_3, regs->ddr_sdram_rcw_3); + ddr_out32(&ddr->ddr_sdram_rcw_4, regs->ddr_sdram_rcw_4); + ddr_out32(&ddr->ddr_sdram_rcw_5, regs->ddr_sdram_rcw_5); + ddr_out32(&ddr->ddr_sdram_rcw_6, regs->ddr_sdram_rcw_6); +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + ddr_out32(&ddr->sdram_cfg_2, + regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT); + ddr_out32(&ddr->init_addr, CONFIG_SYS_SDRAM_BASE); + ddr_out32(&ddr->init_ext_addr, DDR_INIT_ADDR_EXT_UIA); + + /* DRAM VRef will not be trained */ + ddr_out32(&ddr->ddr_cdr2, + regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN); + } else +#endif + { + ddr_out32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); + ddr_out32(&ddr->init_addr, regs->ddr_init_addr); + ddr_out32(&ddr->init_ext_addr, regs->ddr_init_ext_addr); + ddr_out32(&ddr->ddr_cdr2, regs->ddr_cdr2); + } + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009803 + /* part 1 of 2 */ + if (regs->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) { + if (regs->ddr_sdram_cfg & SDRAM_CFG_RD_EN) { /* for RDIMM */ + ddr_out32(&ddr->ddr_sdram_rcw_2, + regs->ddr_sdram_rcw_2 & ~0xf0); + } + ddr_out32(&ddr->err_disable, regs->err_disable | + DDR_ERR_DISABLE_APED); + } +#else + ddr_out32(&ddr->err_disable, regs->err_disable); +#endif + ddr_out32(&ddr->err_int_en, regs->err_int_en); + for (i = 0; i < 64; i++) { + if (regs->debug[i]) { + debug("Write to debug_%d as %08x\n", + i+1, regs->debug[i]); + ddr_out32(&ddr->debug[i], regs->debug[i]); + } + } + +#ifdef CONFIG_SYS_FSL_ERRATUM_A008511 + /* Part 1 of 2 */ + if (fsl_ddr_get_version(ctrl_num) == 0x50200) { + /* Disable DRAM VRef training */ + ddr_out32(&ddr->ddr_cdr2, + regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN); + /* disable transmit bit deskew */ + temp32 = ddr_in32(&ddr->debug[28]); + temp32 |= DDR_TX_BD_DIS; + ddr_out32(&ddr->debug[28], temp32); + ddr_out32(&ddr->debug[25], 0x9000); + } else if (fsl_ddr_get_version(ctrl_num) == 0x50201) { + /* Output enable forced off */ + ddr_out32(&ddr->debug[37], 1 << 31); + /* Enable Vref training */ + ddr_out32(&ddr->ddr_cdr2, + regs->ddr_cdr2 | DDR_CDR2_VREF_TRAIN_EN); + } else { + debug("Erratum A008511 doesn't apply.\n"); + } +#endif + +#if defined(CONFIG_SYS_FSL_ERRATUM_A009803) || \ + defined(CONFIG_SYS_FSL_ERRATUM_A008511) + /* Disable D_INIT */ + ddr_out32(&ddr->sdram_cfg_2, + regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT); +#endif + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009801 + temp32 = ddr_in32(&ddr->debug[25]); + temp32 &= ~DDR_CAS_TO_PRE_SUB_MASK; + temp32 |= 9 << DDR_CAS_TO_PRE_SUB_SHIFT; + ddr_out32(&ddr->debug[25], temp32); +#endif + +#ifdef CONFIG_SYS_FSL_ERRATUM_A010165 + temp32 = get_ddr_freq(ctrl_num) / 1000000; + if ((temp32 > 1900) && (temp32 < 2300)) { + temp32 = ddr_in32(&ddr->debug[28]); + ddr_out32(&ddr->debug[28], temp32 | 0x000a0000); + } +#endif + /* + * For RDIMMs, JEDEC spec requires clocks to be stable before reset is + * deasserted. Clocks start when any chip select is enabled and clock + * control register is set. Because all DDR components are connected to + * one reset signal, this needs to be done in two steps. Step 1 is to + * get the clocks started. Step 2 resumes after reset signal is + * deasserted. + */ + if (step == 1) { + udelay(200); + return; + } + +step2: + /* Set, but do not enable the memory */ + temp32 = regs->ddr_sdram_cfg; + temp32 &= ~(SDRAM_CFG_MEM_EN); + ddr_out32(&ddr->sdram_cfg, temp32); + + /* + * 500 painful micro-seconds must elapse between + * the DDR clock setup and the DDR config enable. + * DDR2 need 200 us, and DDR3 need 500 us from spec, + * we choose the max, that is 500 us for all of case. + */ + udelay(500); + mb(); + isb(); + +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + /* enter self-refresh */ + temp32 = ddr_in32(&ddr->sdram_cfg_2); + temp32 |= SDRAM_CFG2_FRC_SR; + ddr_out32(&ddr->sdram_cfg_2, temp32); + /* do board specific memory setup */ + board_mem_sleep_setup(); + + temp32 = (ddr_in32(&ddr->sdram_cfg) | SDRAM_CFG_BI); + } else +#endif + temp32 = ddr_in32(&ddr->sdram_cfg) & ~SDRAM_CFG_BI; + /* Let the controller go */ + ddr_out32(&ddr->sdram_cfg, temp32 | SDRAM_CFG_MEM_EN); + mb(); + isb(); + +#if defined(CONFIG_SYS_FSL_ERRATUM_A008511) || \ + defined(CONFIG_SYS_FSL_ERRATUM_A009803) + /* Part 2 of 2 */ + timeout = 40; + /* Wait for idle. D_INIT needs to be cleared earlier, or timeout */ + while (!(ddr_in32(&ddr->debug[1]) & 0x2) && + (timeout > 0)) { + udelay(1000); + timeout--; + } + if (timeout <= 0) { + printf("Controler %d timeout, debug_2 = %x\n", + ctrl_num, ddr_in32(&ddr->debug[1])); + } + +#ifdef CONFIG_SYS_FSL_ERRATUM_A008511 + /* This erraum only applies to verion 5.2.0 */ + if (fsl_ddr_get_version(ctrl_num) == 0x50200) { + /* The vref setting sequence is different for range 2 */ + if (regs->ddr_cdr2 & DDR_CDR2_VREF_RANGE_2) + vref_seq = vref_seq2; + + /* Set VREF */ + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (!(regs->cs[i].config & SDRAM_CS_CONFIG_EN)) + continue; + + mr6 = (regs->ddr_sdram_mode_10 >> 16) | + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL(i) | + MD_CNTL_MD_SEL(6) | + 0x00200000; + temp32 = mr6 | vref_seq[0]; + set_wait_for_bits_clear(&ddr->sdram_md_cntl, + temp32, MD_CNTL_MD_EN); + udelay(1); + debug("MR6 = 0x%08x\n", temp32); + temp32 = mr6 | vref_seq[1]; + set_wait_for_bits_clear(&ddr->sdram_md_cntl, + temp32, MD_CNTL_MD_EN); + udelay(1); + debug("MR6 = 0x%08x\n", temp32); + temp32 = mr6 | vref_seq[2]; + set_wait_for_bits_clear(&ddr->sdram_md_cntl, + temp32, MD_CNTL_MD_EN); + udelay(1); + debug("MR6 = 0x%08x\n", temp32); + } + ddr_out32(&ddr->sdram_md_cntl, 0); + temp32 = ddr_in32(&ddr->debug[28]); + temp32 &= ~DDR_TX_BD_DIS; /* Enable deskew */ + ddr_out32(&ddr->debug[28], temp32); + ddr_out32(&ddr->debug[1], 0x400); /* restart deskew */ + /* wait for idle */ + timeout = 40; + while (!(ddr_in32(&ddr->debug[1]) & 0x2) && + (timeout > 0)) { + udelay(1000); + timeout--; + } + if (timeout <= 0) { + printf("Controler %d timeout, debug_2 = %x\n", + ctrl_num, ddr_in32(&ddr->debug[1])); + } + } +#endif /* CONFIG_SYS_FSL_ERRATUM_A008511 */ + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009803 + if (regs->ddr_sdram_cfg_2 & SDRAM_CFG2_AP_EN) { + /* if it's RDIMM */ + if (regs->ddr_sdram_cfg & SDRAM_CFG_RD_EN) { + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (!(regs->cs[i].config & SDRAM_CS_CONFIG_EN)) + continue; + set_wait_for_bits_clear(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL(i) | + 0x070000ed, + MD_CNTL_MD_EN); + udelay(1); + } + } + + ddr_out32(&ddr->err_disable, + regs->err_disable & ~DDR_ERR_DISABLE_APED); + } +#endif + /* Restore D_INIT */ + ddr_out32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); +#endif + +#if defined(CONFIG_SYS_FSL_ERRATUM_A008378) && defined(CONFIG_SYS_FSL_DDRC_GEN4) + /* Erratum applies when accumulated ECC is used, or DBI is enabled */ +#define IS_ACC_ECC_EN(v) ((v) & 0x4) +#define IS_DBI(v) ((((v) >> 12) & 0x3) == 0x2) + if (has_erratum_a008378()) { + if (IS_ACC_ECC_EN(regs->ddr_sdram_cfg) || + IS_DBI(regs->ddr_sdram_cfg_3)) { + val32 = ddr_in32(&ddr->debug[28]); + val32 |= (0x9 << 20); + ddr_out32(&ddr->debug[28], val32); + } + debug("Applied errata CONFIG_SYS_FSL_ERRATUM_A008378\n"); + } +#endif + +#if defined(CONFIG_SYS_FSL_ERRATUM_A008109) + val32 = ddr_in32(&ddr->sdram_cfg_2) | 0x800; /* DDR_SLOW */ + ddr_out32(&ddr->sdram_cfg_2, val32); + + val32 = ddr_in32(&ddr->debug[18]) | 0x2; + ddr_out32(&ddr->debug[18], val32); + + ddr_out32(&ddr->debug[28], 0x30000000); + debug("Applied errta CONFIG_SYS_FSL_ERRATUM_A008109\n"); +#endif + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009942 + ddr_freq = get_ddr_freq(ctrl_num) / 1000000; + val32 = ddr_in32(&ddr->debug[28]); + val32 &= 0xff0fff00; + if (ddr_freq <= 1333) + val32 |= 0x0080006a; + else if (ddr_freq <= 1600) + val32 |= 0x0070006f; + else if (ddr_freq <= 1867) + val32 |= 0x00700076; + else if (ddr_freq <= 2133) + val32 |= 0x0060007b; + + ddr_out32(&ddr->debug[28], val32); + debug("Applied errata CONFIG_SYS_FSL_ERRATUM_A009942\n"); +#endif + + total_gb_size_per_controller = 0; + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (!(regs->cs[i].config & 0x80000000)) + continue; + total_gb_size_per_controller += 1 << ( + ((regs->cs[i].config >> 14) & 0x3) + 2 + + ((regs->cs[i].config >> 8) & 0x7) + 12 + + ((regs->cs[i].config >> 4) & 0x3) + 0 + + ((regs->cs[i].config >> 0) & 0x7) + 8 + + ((regs->ddr_sdram_cfg_3 >> 4) & 0x3) + + 3 - ((regs->ddr_sdram_cfg >> 19) & 0x3) - + 26); /* minus 26 (count of 64M) */ + } + /* + * total memory / bus width = transactions needed + * transactions needed / data rate = seconds + * to add plenty of buffer, double the time + * For example, 2GB on 666MT/s 64-bit bus takes about 402ms + * Let's wait for 800ms + */ + bus_width = 3 - ((ddr_in32(&ddr->sdram_cfg) & SDRAM_CFG_DBW_MASK) + >> SDRAM_CFG_DBW_SHIFT); + timeout = ((total_gb_size_per_controller << (6 - bus_width)) * 100 / + (get_ddr_freq(ctrl_num) >> 20)) << 2; + total_gb_size_per_controller >>= 4; /* shift down to gb size */ + debug("total %d GB\n", total_gb_size_per_controller); + debug("Need to wait up to %d * 10ms\n", timeout); + + /* Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done. */ + while ((ddr_in32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) && + (timeout >= 0)) { + udelay(10000); /* throttle polling rate */ + timeout--; + } + + if (timeout <= 0) + printf("Waiting for D_INIT timeout. Memory may not work.\n"); + + if (mod_bnds) { + debug("Reset to original bnds\n"); + ddr_out32(&ddr->cs0_bnds, regs->cs[0].bnds); +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + ddr_out32(&ddr->cs1_bnds, regs->cs[1].bnds); +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + ddr_out32(&ddr->cs2_bnds, regs->cs[2].bnds); +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 3) + ddr_out32(&ddr->cs3_bnds, regs->cs[3].bnds); +#endif +#endif +#endif + ddr_out32(&ddr->cs0_config, regs->cs[0].config); + } + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009663 + ddr_out32(&ddr->sdram_interval, regs->ddr_sdram_interval); +#endif + +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + /* exit self-refresh */ + temp32 = ddr_in32(&ddr->sdram_cfg_2); + temp32 &= ~SDRAM_CFG2_FRC_SR; + ddr_out32(&ddr->sdram_cfg_2, temp32); + } +#endif + +#ifdef CONFIG_FSL_DDR_BIST +#define BIST_PATTERN1 0xFFFFFFFF +#define BIST_PATTERN2 0x0 +#define BIST_CR 0x80010000 +#define BIST_CR_EN 0x80000000 +#define BIST_CR_STAT 0x00000001 + /* Perform build-in test on memory. Three-way interleaving is not yet + * supported by this code. */ + if (env_get_f("ddr_bist", buffer, CONFIG_SYS_CBSIZE) >= 0) { + puts("Running BIST test. This will take a while..."); + cs0_config = ddr_in32(&ddr->cs0_config); + cs0_bnds = ddr_in32(&ddr->cs0_bnds); + cs1_bnds = ddr_in32(&ddr->cs1_bnds); + cs2_bnds = ddr_in32(&ddr->cs2_bnds); + cs3_bnds = ddr_in32(&ddr->cs3_bnds); + if (cs0_config & CTLR_INTLV_MASK) { + /* set bnds to non-interleaving */ + ddr_out32(&ddr->cs0_bnds, (cs0_bnds & 0xfffefffe) >> 1); + ddr_out32(&ddr->cs1_bnds, (cs1_bnds & 0xfffefffe) >> 1); + ddr_out32(&ddr->cs2_bnds, (cs2_bnds & 0xfffefffe) >> 1); + ddr_out32(&ddr->cs3_bnds, (cs3_bnds & 0xfffefffe) >> 1); + } + ddr_out32(&ddr->mtp1, BIST_PATTERN1); + ddr_out32(&ddr->mtp2, BIST_PATTERN1); + ddr_out32(&ddr->mtp3, BIST_PATTERN2); + ddr_out32(&ddr->mtp4, BIST_PATTERN2); + ddr_out32(&ddr->mtp5, BIST_PATTERN1); + ddr_out32(&ddr->mtp6, BIST_PATTERN1); + ddr_out32(&ddr->mtp7, BIST_PATTERN2); + ddr_out32(&ddr->mtp8, BIST_PATTERN2); + ddr_out32(&ddr->mtp9, BIST_PATTERN1); + ddr_out32(&ddr->mtp10, BIST_PATTERN2); + mtcr = BIST_CR; + ddr_out32(&ddr->mtcr, mtcr); + timeout = 100; + while (timeout > 0 && (mtcr & BIST_CR_EN)) { + mdelay(1000); + timeout--; + mtcr = ddr_in32(&ddr->mtcr); + } + if (timeout <= 0) + puts("Timeout\n"); + else + puts("Done\n"); + err_detect = ddr_in32(&ddr->err_detect); + err_sbe = ddr_in32(&ddr->err_sbe); + if (mtcr & BIST_CR_STAT) { + printf("BIST test failed on controller %d.\n", + ctrl_num); + } + if (err_detect || (err_sbe & 0xffff)) { + printf("ECC error detected on controller %d.\n", + ctrl_num); + } + + if (cs0_config & CTLR_INTLV_MASK) { + /* restore bnds registers */ + ddr_out32(&ddr->cs0_bnds, cs0_bnds); + ddr_out32(&ddr->cs1_bnds, cs1_bnds); + ddr_out32(&ddr->cs2_bnds, cs2_bnds); + ddr_out32(&ddr->cs3_bnds, cs3_bnds); + } + } +#endif +} diff --git a/roms/u-boot/drivers/ddr/fsl/fsl_mmdc.c b/roms/u-boot/drivers/ddr/fsl/fsl_mmdc.c new file mode 100644 index 000000000..cbd625b7e --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/fsl_mmdc.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2016 Freescale Semiconductor, Inc. + */ + +/* + * Generic driver for Freescale MMDC(Multi Mode DDR Controller). + */ + +#include <common.h> +#include <fsl_mmdc.h> +#include <asm/io.h> +#include <linux/delay.h> + +static void set_wait_for_bits_clear(void *ptr, u32 value, u32 bits) +{ + int timeout = 1000; + + out_be32(ptr, value); + + while (in_be32(ptr) & bits) { + udelay(100); + timeout--; + } + if (timeout <= 0) + printf("Error: %p wait for clear timeout.\n", ptr); +} + +void mmdc_init(const struct fsl_mmdc_info *priv) +{ + struct mmdc_regs *mmdc = (struct mmdc_regs *)CONFIG_SYS_FSL_DDR_ADDR; + unsigned int tmp; + + /* 1. set configuration request */ + out_be32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ); + + /* 2. configure the desired timing parameters */ + out_be32(&mmdc->mdotc, priv->mdotc); + out_be32(&mmdc->mdcfg0, priv->mdcfg0); + out_be32(&mmdc->mdcfg1, priv->mdcfg1); + out_be32(&mmdc->mdcfg2, priv->mdcfg2); + + /* 3. configure DDR type and other miscellaneous parameters */ + out_be32(&mmdc->mdmisc, priv->mdmisc); + out_be32(&mmdc->mpmur0, MMDC_MPMUR0_FRC_MSR); + out_be32(&mmdc->mdrwd, priv->mdrwd); + out_be32(&mmdc->mpodtctrl, priv->mpodtctrl); + + /* 4. configure the required delay while leaving reset */ + out_be32(&mmdc->mdor, priv->mdor); + + /* 5. configure DDR physical parameters */ + /* set row/column address width, burst length, data bus width */ + tmp = priv->mdctl & ~(MDCTL_SDE0 | MDCTL_SDE1); + out_be32(&mmdc->mdctl, tmp); + /* configure address space partition */ + out_be32(&mmdc->mdasp, priv->mdasp); + + /* 6. perform a ZQ calibration - not needed here, doing in #8b */ + + /* 7. enable MMDC with the desired chip select */ +#if (CONFIG_CHIP_SELECTS_PER_CTRL == 1) + out_be32(&mmdc->mdctl, tmp | MDCTL_SDE0); +#elif (CONFIG_CHIP_SELECTS_PER_CTRL == 2) + out_be32(&mmdc->mdctl, tmp | MDCTL_SDE0 | MDCTL_SDE1); +#endif + + /* 8a. dram init sequence: update MRs for ZQ, ODT, PRE, etc */ + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(8) | MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_2); + + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(0) | MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_3); + + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) | MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_1); + + out_be32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(0x19) | + CMD_ADDR_LSB_MR_ADDR(0x30) | + MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_0); + + /* 8b. ZQ calibration */ + out_be32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(0x4) | MDSCR_ENABLE_CON_REQ | + CMD_ZQ_CALIBRATION | CMD_BANK_ADDR_0); + + set_wait_for_bits_clear(&mmdc->mpzqhwctrl, priv->mpzqhwctrl, + MPZQHWCTRL_ZQ_HW_FORCE); + + /* 9a. calibrations now, wr lvl */ + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(0x84) | + MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_1); + + out_be32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ | MDSCR_WL_EN | + CMD_NORMAL); + + set_wait_for_bits_clear(&mmdc->mpwlgcr, MPWLGCR_HW_WL_EN, + MPWLGCR_HW_WL_EN); + + mdelay(1); + + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) | MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_1); + out_be32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ); + + mdelay(1); + + /* 9b. read DQS gating calibration */ + out_be32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(4) | MDSCR_ENABLE_CON_REQ | + CMD_PRECHARGE_BANK_OPEN | CMD_BANK_ADDR_0); + + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) | MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_3); + + out_be32(&mmdc->mppdcmpr2, MPPDCMPR2_MPR_COMPARE_EN); + + /* set absolute read delay offset */ + if (priv->mprddlctl) + out_be32(&mmdc->mprddlctl, priv->mprddlctl); + else + out_be32(&mmdc->mprddlctl, MMDC_MPRDDLCTL_DEFAULT_DELAY); + + set_wait_for_bits_clear(&mmdc->mpdgctrl0, + AUTO_RD_DQS_GATING_CALIBRATION_EN, + AUTO_RD_DQS_GATING_CALIBRATION_EN); + + out_be32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ | CMD_LOAD_MODE_REG | + CMD_BANK_ADDR_3); + + /* 9c. read calibration */ + out_be32(&mmdc->mdscr, CMD_ADDR_MSB_MR_OP(4) | MDSCR_ENABLE_CON_REQ | + CMD_PRECHARGE_BANK_OPEN | CMD_BANK_ADDR_0); + out_be32(&mmdc->mdscr, CMD_ADDR_LSB_MR_ADDR(4) | MDSCR_ENABLE_CON_REQ | + CMD_LOAD_MODE_REG | CMD_BANK_ADDR_3); + out_be32(&mmdc->mppdcmpr2, MPPDCMPR2_MPR_COMPARE_EN); + set_wait_for_bits_clear(&mmdc->mprddlhwctl, + MPRDDLHWCTL_AUTO_RD_CALIBRATION_EN, + MPRDDLHWCTL_AUTO_RD_CALIBRATION_EN); + + out_be32(&mmdc->mdscr, MDSCR_ENABLE_CON_REQ | CMD_LOAD_MODE_REG | + CMD_BANK_ADDR_3); + + /* 10. configure power-down, self-refresh entry, exit parameters */ + out_be32(&mmdc->mdpdc, priv->mdpdc); + out_be32(&mmdc->mapsr, MMDC_MAPSR_PWR_SAV_CTRL_STAT); + + /* 11. ZQ config again? do nothing here */ + + /* 12. refresh scheme */ + set_wait_for_bits_clear(&mmdc->mdref, priv->mdref, + MDREF_START_REFRESH); + + /* 13. disable CON_REQ */ + out_be32(&mmdc->mdscr, MDSCR_DISABLE_CFG_REQ); +} diff --git a/roms/u-boot/drivers/ddr/fsl/interactive.c b/roms/u-boot/drivers/ddr/fsl/interactive.c new file mode 100644 index 000000000..2f76beb2d --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/interactive.c @@ -0,0 +1,2314 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2010-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP Semiconductor + */ + +/* + * Generic driver for Freescale DDR/DDR2/DDR3 memory controller. + * Based on code from spd_sdram.c + * Author: James Yang [at freescale.com] + * York Sun [at freescale.com] + */ + +#include <common.h> +#include <cli.h> +#include <command.h> +#include <env.h> +#include <log.h> +#include <asm/bitops.h> +#include <linux/ctype.h> +#include <asm/types.h> +#include <asm/io.h> + +#include <fsl_ddr_sdram.h> +#include <fsl_ddr.h> + +/* Option parameter Structures */ +struct options_string { + const char *option_name; + size_t offset; + unsigned int size; + const char printhex; +}; + +static unsigned int picos_to_mhz(unsigned int picos) +{ + return 1000000 / picos; +} + +static void print_option_table(const struct options_string *table, + int table_size, + const void *base) +{ + unsigned int i; + unsigned int *ptr; + unsigned long long *ptr_l; + + for (i = 0; i < table_size; i++) { + switch (table[i].size) { + case 4: + ptr = (unsigned int *) (base + table[i].offset); + if (table[i].printhex) { + printf("%s = 0x%08X\n", + table[i].option_name, *ptr); + } else { + printf("%s = %u\n", + table[i].option_name, *ptr); + } + break; + case 8: + ptr_l = (unsigned long long *) (base + table[i].offset); + printf("%s = %llu\n", + table[i].option_name, *ptr_l); + break; + default: + printf("Unrecognized size!\n"); + break; + } + } +} + +static int handle_option_table(const struct options_string *table, + int table_size, + void *base, + const char *opt, + const char *val) +{ + unsigned int i; + unsigned int value, *ptr; + unsigned long long value_l, *ptr_l; + + for (i = 0; i < table_size; i++) { + if (strcmp(table[i].option_name, opt) != 0) + continue; + switch (table[i].size) { + case 4: + value = simple_strtoul(val, NULL, 0); + ptr = base + table[i].offset; + *ptr = value; + break; + case 8: + value_l = simple_strtoull(val, NULL, 0); + ptr_l = base + table[i].offset; + *ptr_l = value_l; + break; + default: + printf("Unrecognized size!\n"); + break; + } + return 1; + } + + return 0; +} + +static void fsl_ddr_generic_edit(void *pdata, + void *pend, + unsigned int element_size, + unsigned int element_num, + unsigned int value) +{ + char *pcdata = (char *)pdata; /* BIG ENDIAN ONLY */ + + pcdata += element_num * element_size; + if ((pcdata + element_size) > (char *) pend) { + printf("trying to write past end of data\n"); + return; + } + + switch (element_size) { + case 1: + __raw_writeb(value, pcdata); + break; + case 2: + __raw_writew(value, pcdata); + break; + case 4: + __raw_writel(value, pcdata); + break; + default: + printf("unexpected element size %u\n", element_size); + break; + } +} + +static void fsl_ddr_spd_edit(fsl_ddr_info_t *pinfo, + unsigned int ctrl_num, + unsigned int dimm_num, + unsigned int element_num, + unsigned int value) +{ + generic_spd_eeprom_t *pspd; + + pspd = &(pinfo->spd_installed_dimms[ctrl_num][dimm_num]); + fsl_ddr_generic_edit(pspd, pspd + 1, 1, element_num, value); +} + +#define COMMON_TIMING(x) {#x, offsetof(common_timing_params_t, x), \ + sizeof((common_timing_params_t *)0)->x, 0} + +static void lowest_common_dimm_parameters_edit(fsl_ddr_info_t *pinfo, + unsigned int ctrl_num, + const char *optname_str, + const char *value_str) +{ + common_timing_params_t *p = &pinfo->common_timing_params[ctrl_num]; + + static const struct options_string options[] = { + COMMON_TIMING(tckmin_x_ps), + COMMON_TIMING(tckmax_ps), +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + COMMON_TIMING(taamin_ps), +#endif + COMMON_TIMING(trcd_ps), + COMMON_TIMING(trp_ps), + COMMON_TIMING(tras_ps), + +#ifdef CONFIG_SYS_FSL_DDR4 + COMMON_TIMING(trfc1_ps), + COMMON_TIMING(trfc2_ps), + COMMON_TIMING(trfc4_ps), + COMMON_TIMING(trrds_ps), + COMMON_TIMING(trrdl_ps), + COMMON_TIMING(tccdl_ps), + COMMON_TIMING(trfc_slr_ps), +#else + COMMON_TIMING(twtr_ps), + COMMON_TIMING(trfc_ps), + COMMON_TIMING(trrd_ps), + COMMON_TIMING(trtp_ps), +#endif + COMMON_TIMING(twr_ps), + COMMON_TIMING(trc_ps), + COMMON_TIMING(refresh_rate_ps), + COMMON_TIMING(extended_op_srt), +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + COMMON_TIMING(tis_ps), + COMMON_TIMING(tih_ps), + COMMON_TIMING(tds_ps), + COMMON_TIMING(tdh_ps), + COMMON_TIMING(tdqsq_max_ps), + COMMON_TIMING(tqhs_ps), +#endif + COMMON_TIMING(ndimms_present), + COMMON_TIMING(lowest_common_spd_caslat), + COMMON_TIMING(highest_common_derated_caslat), + COMMON_TIMING(additive_latency), + COMMON_TIMING(all_dimms_burst_lengths_bitmask), + COMMON_TIMING(all_dimms_registered), + COMMON_TIMING(all_dimms_unbuffered), + COMMON_TIMING(all_dimms_ecc_capable), + COMMON_TIMING(total_mem), + COMMON_TIMING(base_address), + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + if (handle_option_table(options, n_opts, p, optname_str, value_str)) + return; + + printf("Error: couldn't find option string %s\n", optname_str); +} + +#define DIMM_PARM(x) {#x, offsetof(dimm_params_t, x), \ + sizeof((dimm_params_t *)0)->x, 0} +#define DIMM_PARM_HEX(x) {#x, offsetof(dimm_params_t, x), \ + sizeof((dimm_params_t *)0)->x, 1} + +static void fsl_ddr_dimm_parameters_edit(fsl_ddr_info_t *pinfo, + unsigned int ctrl_num, + unsigned int dimm_num, + const char *optname_str, + const char *value_str) +{ + dimm_params_t *p = &(pinfo->dimm_params[ctrl_num][dimm_num]); + + static const struct options_string options[] = { + DIMM_PARM(n_ranks), + DIMM_PARM(data_width), + DIMM_PARM(primary_sdram_width), + DIMM_PARM(ec_sdram_width), + DIMM_PARM(package_3ds), + DIMM_PARM(registered_dimm), + DIMM_PARM(mirrored_dimm), + DIMM_PARM(device_width), + + DIMM_PARM(n_row_addr), + DIMM_PARM(n_col_addr), + DIMM_PARM(edc_config), +#ifdef CONFIG_SYS_FSL_DDR4 + DIMM_PARM(bank_addr_bits), + DIMM_PARM(bank_group_bits), + DIMM_PARM_HEX(die_density), +#else + DIMM_PARM(n_banks_per_sdram_device), +#endif + DIMM_PARM(burst_lengths_bitmask), + + DIMM_PARM(tckmin_x_ps), + DIMM_PARM(tckmin_x_minus_1_ps), + DIMM_PARM(tckmin_x_minus_2_ps), + DIMM_PARM(tckmax_ps), + + DIMM_PARM(caslat_x), + DIMM_PARM(caslat_x_minus_1), + DIMM_PARM(caslat_x_minus_2), + + DIMM_PARM(caslat_lowest_derated), + + DIMM_PARM(trcd_ps), + DIMM_PARM(trp_ps), + DIMM_PARM(tras_ps), +#ifdef CONFIG_SYS_FSL_DDR4 + DIMM_PARM(trfc1_ps), + DIMM_PARM(trfc2_ps), + DIMM_PARM(trfc4_ps), + DIMM_PARM(trrds_ps), + DIMM_PARM(trrdl_ps), + DIMM_PARM(tccdl_ps), + DIMM_PARM(trfc_slr_ps), +#else + DIMM_PARM(twr_ps), + DIMM_PARM(twtr_ps), + DIMM_PARM(trfc_ps), + DIMM_PARM(trrd_ps), + DIMM_PARM(trtp_ps), +#endif + DIMM_PARM(trc_ps), + DIMM_PARM(refresh_rate_ps), + DIMM_PARM(extended_op_srt), + +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + DIMM_PARM(tis_ps), + DIMM_PARM(tih_ps), + DIMM_PARM(tds_ps), + DIMM_PARM(tdh_ps), + DIMM_PARM(tdqsq_max_ps), + DIMM_PARM(tqhs_ps), +#endif +#ifdef CONFIG_SYS_FSL_DDR4 + DIMM_PARM_HEX(dq_mapping[0]), + DIMM_PARM_HEX(dq_mapping[1]), + DIMM_PARM_HEX(dq_mapping[2]), + DIMM_PARM_HEX(dq_mapping[3]), + DIMM_PARM_HEX(dq_mapping[4]), + DIMM_PARM_HEX(dq_mapping[5]), + DIMM_PARM_HEX(dq_mapping[6]), + DIMM_PARM_HEX(dq_mapping[7]), + DIMM_PARM_HEX(dq_mapping[8]), + DIMM_PARM_HEX(dq_mapping[9]), + DIMM_PARM_HEX(dq_mapping[10]), + DIMM_PARM_HEX(dq_mapping[11]), + DIMM_PARM_HEX(dq_mapping[12]), + DIMM_PARM_HEX(dq_mapping[13]), + DIMM_PARM_HEX(dq_mapping[14]), + DIMM_PARM_HEX(dq_mapping[15]), + DIMM_PARM_HEX(dq_mapping[16]), + DIMM_PARM_HEX(dq_mapping[17]), + DIMM_PARM(dq_mapping_ors), +#endif + DIMM_PARM(rank_density), + DIMM_PARM(capacity), + DIMM_PARM(base_address), + }; + + static const unsigned int n_opts = ARRAY_SIZE(options); + + if (handle_option_table(options, n_opts, p, optname_str, value_str)) + return; + + printf("couldn't find option string %s\n", optname_str); +} + +static void print_dimm_parameters(const dimm_params_t *pdimm) +{ + static const struct options_string options[] = { + DIMM_PARM(n_ranks), + DIMM_PARM(data_width), + DIMM_PARM(primary_sdram_width), + DIMM_PARM(ec_sdram_width), + DIMM_PARM(package_3ds), + DIMM_PARM(registered_dimm), + DIMM_PARM(mirrored_dimm), + DIMM_PARM(device_width), + + DIMM_PARM(n_row_addr), + DIMM_PARM(n_col_addr), + DIMM_PARM(edc_config), +#ifdef CONFIG_SYS_FSL_DDR4 + DIMM_PARM(bank_addr_bits), + DIMM_PARM(bank_group_bits), + DIMM_PARM_HEX(die_density), +#else + DIMM_PARM(n_banks_per_sdram_device), +#endif + + DIMM_PARM(tckmin_x_ps), + DIMM_PARM(tckmin_x_minus_1_ps), + DIMM_PARM(tckmin_x_minus_2_ps), + DIMM_PARM(tckmax_ps), + + DIMM_PARM(caslat_x), + DIMM_PARM_HEX(caslat_x), + DIMM_PARM(taa_ps), + DIMM_PARM(caslat_x_minus_1), + DIMM_PARM(caslat_x_minus_2), + DIMM_PARM(caslat_lowest_derated), + + DIMM_PARM(trcd_ps), + DIMM_PARM(trp_ps), + DIMM_PARM(tras_ps), +#if defined(CONFIG_SYS_FSL_DDR4) || defined(CONFIG_SYS_FSL_DDR3) + DIMM_PARM(tfaw_ps), +#endif +#ifdef CONFIG_SYS_FSL_DDR4 + DIMM_PARM(trfc1_ps), + DIMM_PARM(trfc2_ps), + DIMM_PARM(trfc4_ps), + DIMM_PARM(trrds_ps), + DIMM_PARM(trrdl_ps), + DIMM_PARM(tccdl_ps), + DIMM_PARM(trfc_slr_ps), +#else + DIMM_PARM(twr_ps), + DIMM_PARM(twtr_ps), + DIMM_PARM(trfc_ps), + DIMM_PARM(trrd_ps), + DIMM_PARM(trtp_ps), +#endif + DIMM_PARM(trc_ps), + DIMM_PARM(refresh_rate_ps), + +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + DIMM_PARM(tis_ps), + DIMM_PARM(tih_ps), + DIMM_PARM(tds_ps), + DIMM_PARM(tdh_ps), + DIMM_PARM(tdqsq_max_ps), + DIMM_PARM(tqhs_ps), +#endif +#ifdef CONFIG_SYS_FSL_DDR4 + DIMM_PARM_HEX(dq_mapping[0]), + DIMM_PARM_HEX(dq_mapping[1]), + DIMM_PARM_HEX(dq_mapping[2]), + DIMM_PARM_HEX(dq_mapping[3]), + DIMM_PARM_HEX(dq_mapping[4]), + DIMM_PARM_HEX(dq_mapping[5]), + DIMM_PARM_HEX(dq_mapping[6]), + DIMM_PARM_HEX(dq_mapping[7]), + DIMM_PARM_HEX(dq_mapping[8]), + DIMM_PARM_HEX(dq_mapping[9]), + DIMM_PARM_HEX(dq_mapping[10]), + DIMM_PARM_HEX(dq_mapping[11]), + DIMM_PARM_HEX(dq_mapping[12]), + DIMM_PARM_HEX(dq_mapping[13]), + DIMM_PARM_HEX(dq_mapping[14]), + DIMM_PARM_HEX(dq_mapping[15]), + DIMM_PARM_HEX(dq_mapping[16]), + DIMM_PARM_HEX(dq_mapping[17]), + DIMM_PARM(dq_mapping_ors), +#endif + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + if (pdimm->n_ranks == 0) { + printf("DIMM not present\n"); + return; + } + printf("DIMM organization parameters:\n"); + printf("module part name = %s\n", pdimm->mpart); + printf("rank_density = %llu bytes (%llu megabytes)\n", + pdimm->rank_density, pdimm->rank_density / 0x100000); + printf("capacity = %llu bytes (%llu megabytes)\n", + pdimm->capacity, pdimm->capacity / 0x100000); + printf("burst_lengths_bitmask = %02X\n", + pdimm->burst_lengths_bitmask); + printf("base_addresss = %llu (%08llX %08llX)\n", + pdimm->base_address, + (pdimm->base_address >> 32), + pdimm->base_address & 0xFFFFFFFF); + print_option_table(options, n_opts, pdimm); +} + +static void print_lowest_common_dimm_parameters( + const common_timing_params_t *plcd_dimm_params) +{ + static const struct options_string options[] = { +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + COMMON_TIMING(taamin_ps), +#endif + COMMON_TIMING(trcd_ps), + COMMON_TIMING(trp_ps), + COMMON_TIMING(tras_ps), +#ifdef CONFIG_SYS_FSL_DDR4 + COMMON_TIMING(trfc1_ps), + COMMON_TIMING(trfc2_ps), + COMMON_TIMING(trfc4_ps), + COMMON_TIMING(trrds_ps), + COMMON_TIMING(trrdl_ps), + COMMON_TIMING(tccdl_ps), + COMMON_TIMING(trfc_slr_ps), +#else + COMMON_TIMING(twtr_ps), + COMMON_TIMING(trfc_ps), + COMMON_TIMING(trrd_ps), + COMMON_TIMING(trtp_ps), +#endif + COMMON_TIMING(twr_ps), + COMMON_TIMING(trc_ps), + COMMON_TIMING(refresh_rate_ps), + COMMON_TIMING(extended_op_srt), +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + COMMON_TIMING(tis_ps), + COMMON_TIMING(tih_ps), + COMMON_TIMING(tds_ps), + COMMON_TIMING(tdh_ps), + COMMON_TIMING(tdqsq_max_ps), + COMMON_TIMING(tqhs_ps), +#endif + COMMON_TIMING(lowest_common_spd_caslat), + COMMON_TIMING(highest_common_derated_caslat), + COMMON_TIMING(additive_latency), + COMMON_TIMING(ndimms_present), + COMMON_TIMING(all_dimms_registered), + COMMON_TIMING(all_dimms_unbuffered), + COMMON_TIMING(all_dimms_ecc_capable), + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + /* Clock frequencies */ + printf("tckmin_x_ps = %u (%u MHz)\n", + plcd_dimm_params->tckmin_x_ps, + picos_to_mhz(plcd_dimm_params->tckmin_x_ps)); + printf("tckmax_ps = %u (%u MHz)\n", + plcd_dimm_params->tckmax_ps, + picos_to_mhz(plcd_dimm_params->tckmax_ps)); + printf("all_dimms_burst_lengths_bitmask = %02X\n", + plcd_dimm_params->all_dimms_burst_lengths_bitmask); + + print_option_table(options, n_opts, plcd_dimm_params); + + printf("total_mem = %llu (%llu megabytes)\n", + plcd_dimm_params->total_mem, + plcd_dimm_params->total_mem / 0x100000); + printf("base_address = %llu (%llu megabytes)\n", + plcd_dimm_params->base_address, + plcd_dimm_params->base_address / 0x100000); +} + +#define CTRL_OPTIONS(x) {#x, offsetof(memctl_options_t, x), \ + sizeof((memctl_options_t *)0)->x, 0} +#define CTRL_OPTIONS_CS(x, y) {"cs" #x "_" #y, \ + offsetof(memctl_options_t, cs_local_opts[x].y), \ + sizeof((memctl_options_t *)0)->cs_local_opts[x].y, 0} + +static void fsl_ddr_options_edit(fsl_ddr_info_t *pinfo, + unsigned int ctl_num, + const char *optname_str, + const char *value_str) +{ + memctl_options_t *p = &(pinfo->memctl_opts[ctl_num]); + /* + * This array all on the stack and *computed* each time this + * function is rung. + */ + static const struct options_string options[] = { + CTRL_OPTIONS_CS(0, odt_rd_cfg), + CTRL_OPTIONS_CS(0, odt_wr_cfg), +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + CTRL_OPTIONS_CS(1, odt_rd_cfg), + CTRL_OPTIONS_CS(1, odt_wr_cfg), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CTRL_OPTIONS_CS(2, odt_rd_cfg), + CTRL_OPTIONS_CS(2, odt_wr_cfg), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CTRL_OPTIONS_CS(3, odt_rd_cfg), + CTRL_OPTIONS_CS(3, odt_wr_cfg), +#endif +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + CTRL_OPTIONS_CS(0, odt_rtt_norm), + CTRL_OPTIONS_CS(0, odt_rtt_wr), +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + CTRL_OPTIONS_CS(1, odt_rtt_norm), + CTRL_OPTIONS_CS(1, odt_rtt_wr), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CTRL_OPTIONS_CS(2, odt_rtt_norm), + CTRL_OPTIONS_CS(2, odt_rtt_wr), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CTRL_OPTIONS_CS(3, odt_rtt_norm), + CTRL_OPTIONS_CS(3, odt_rtt_wr), +#endif +#endif + CTRL_OPTIONS(memctl_interleaving), + CTRL_OPTIONS(memctl_interleaving_mode), + CTRL_OPTIONS(ba_intlv_ctl), + CTRL_OPTIONS(ecc_mode), + CTRL_OPTIONS(ecc_init_using_memctl), + CTRL_OPTIONS(dqs_config), + CTRL_OPTIONS(self_refresh_in_sleep), + CTRL_OPTIONS(dynamic_power), + CTRL_OPTIONS(data_bus_width), + CTRL_OPTIONS(burst_length), + CTRL_OPTIONS(cas_latency_override), + CTRL_OPTIONS(cas_latency_override_value), + CTRL_OPTIONS(use_derated_caslat), + CTRL_OPTIONS(additive_latency_override), + CTRL_OPTIONS(additive_latency_override_value), + CTRL_OPTIONS(clk_adjust), + CTRL_OPTIONS(cpo_override), + CTRL_OPTIONS(write_data_delay), + CTRL_OPTIONS(half_strength_driver_enable), + + /* + * These can probably be changed to 2T_EN and 3T_EN + * (using a leading numerical character) without problem + */ + CTRL_OPTIONS(twot_en), + CTRL_OPTIONS(threet_en), + CTRL_OPTIONS(mirrored_dimm), + CTRL_OPTIONS(ap_en), + CTRL_OPTIONS(x4_en), + CTRL_OPTIONS(package_3ds), + CTRL_OPTIONS(bstopre), + CTRL_OPTIONS(wrlvl_override), + CTRL_OPTIONS(wrlvl_sample), + CTRL_OPTIONS(wrlvl_start), + CTRL_OPTIONS(cswl_override), + CTRL_OPTIONS(rcw_override), + CTRL_OPTIONS(rcw_1), + CTRL_OPTIONS(rcw_2), + CTRL_OPTIONS(rcw_3), + CTRL_OPTIONS(ddr_cdr1), + CTRL_OPTIONS(ddr_cdr2), + CTRL_OPTIONS(tfaw_window_four_activates_ps), + CTRL_OPTIONS(trwt_override), + CTRL_OPTIONS(trwt), + CTRL_OPTIONS(rtt_override), + CTRL_OPTIONS(rtt_override_value), + CTRL_OPTIONS(rtt_wr_override_value), + }; + + static const unsigned int n_opts = ARRAY_SIZE(options); + + if (handle_option_table(options, n_opts, p, + optname_str, value_str)) + return; + + printf("couldn't find option string %s\n", optname_str); +} + +#define CFG_REGS(x) {#x, offsetof(fsl_ddr_cfg_regs_t, x), \ + sizeof((fsl_ddr_cfg_regs_t *)0)->x, 1} +#define CFG_REGS_CS(x, y) {"cs" #x "_" #y, \ + offsetof(fsl_ddr_cfg_regs_t, cs[x].y), \ + sizeof((fsl_ddr_cfg_regs_t *)0)->cs[x].y, 1} + +static void print_fsl_memctl_config_regs(const fsl_ddr_cfg_regs_t *ddr) +{ + unsigned int i; + static const struct options_string options[] = { + CFG_REGS_CS(0, bnds), + CFG_REGS_CS(0, config), + CFG_REGS_CS(0, config_2), +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + CFG_REGS_CS(1, bnds), + CFG_REGS_CS(1, config), + CFG_REGS_CS(1, config_2), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CFG_REGS_CS(2, bnds), + CFG_REGS_CS(2, config), + CFG_REGS_CS(2, config_2), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CFG_REGS_CS(3, bnds), + CFG_REGS_CS(3, config), + CFG_REGS_CS(3, config_2), +#endif + CFG_REGS(timing_cfg_3), + CFG_REGS(timing_cfg_0), + CFG_REGS(timing_cfg_1), + CFG_REGS(timing_cfg_2), + CFG_REGS(ddr_sdram_cfg), + CFG_REGS(ddr_sdram_cfg_2), + CFG_REGS(ddr_sdram_cfg_3), + CFG_REGS(ddr_sdram_mode), + CFG_REGS(ddr_sdram_mode_2), + CFG_REGS(ddr_sdram_mode_3), + CFG_REGS(ddr_sdram_mode_4), + CFG_REGS(ddr_sdram_mode_5), + CFG_REGS(ddr_sdram_mode_6), + CFG_REGS(ddr_sdram_mode_7), + CFG_REGS(ddr_sdram_mode_8), +#ifdef CONFIG_SYS_FSL_DDR4 + CFG_REGS(ddr_sdram_mode_9), + CFG_REGS(ddr_sdram_mode_10), + CFG_REGS(ddr_sdram_mode_11), + CFG_REGS(ddr_sdram_mode_12), + CFG_REGS(ddr_sdram_mode_13), + CFG_REGS(ddr_sdram_mode_14), + CFG_REGS(ddr_sdram_mode_15), + CFG_REGS(ddr_sdram_mode_16), +#endif + CFG_REGS(ddr_sdram_interval), + CFG_REGS(ddr_data_init), + CFG_REGS(ddr_sdram_clk_cntl), + CFG_REGS(ddr_init_addr), + CFG_REGS(ddr_init_ext_addr), + CFG_REGS(timing_cfg_4), + CFG_REGS(timing_cfg_5), +#ifdef CONFIG_SYS_FSL_DDR4 + CFG_REGS(timing_cfg_6), + CFG_REGS(timing_cfg_7), + CFG_REGS(timing_cfg_8), + CFG_REGS(timing_cfg_9), +#endif + CFG_REGS(ddr_zq_cntl), + CFG_REGS(ddr_wrlvl_cntl), + CFG_REGS(ddr_wrlvl_cntl_2), + CFG_REGS(ddr_wrlvl_cntl_3), + CFG_REGS(ddr_sr_cntr), + CFG_REGS(ddr_sdram_rcw_1), + CFG_REGS(ddr_sdram_rcw_2), + CFG_REGS(ddr_sdram_rcw_3), + CFG_REGS(ddr_cdr1), + CFG_REGS(ddr_cdr2), + CFG_REGS(dq_map_0), + CFG_REGS(dq_map_1), + CFG_REGS(dq_map_2), + CFG_REGS(dq_map_3), + CFG_REGS(err_disable), + CFG_REGS(err_int_en), + CFG_REGS(ddr_eor), + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + print_option_table(options, n_opts, ddr); + + for (i = 0; i < 64; i++) + printf("debug_%02d = 0x%08X\n", i+1, ddr->debug[i]); +} + +static void fsl_ddr_regs_edit(fsl_ddr_info_t *pinfo, + unsigned int ctrl_num, + const char *regname, + const char *value_str) +{ + unsigned int i; + fsl_ddr_cfg_regs_t *ddr; + char buf[20]; + static const struct options_string options[] = { + CFG_REGS_CS(0, bnds), + CFG_REGS_CS(0, config), + CFG_REGS_CS(0, config_2), +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + CFG_REGS_CS(1, bnds), + CFG_REGS_CS(1, config), + CFG_REGS_CS(1, config_2), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CFG_REGS_CS(2, bnds), + CFG_REGS_CS(2, config), + CFG_REGS_CS(2, config_2), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 3) + CFG_REGS_CS(3, bnds), + CFG_REGS_CS(3, config), + CFG_REGS_CS(3, config_2), +#endif + CFG_REGS(timing_cfg_3), + CFG_REGS(timing_cfg_0), + CFG_REGS(timing_cfg_1), + CFG_REGS(timing_cfg_2), + CFG_REGS(ddr_sdram_cfg), + CFG_REGS(ddr_sdram_cfg_2), + CFG_REGS(ddr_sdram_cfg_3), + CFG_REGS(ddr_sdram_mode), + CFG_REGS(ddr_sdram_mode_2), + CFG_REGS(ddr_sdram_mode_3), + CFG_REGS(ddr_sdram_mode_4), + CFG_REGS(ddr_sdram_mode_5), + CFG_REGS(ddr_sdram_mode_6), + CFG_REGS(ddr_sdram_mode_7), + CFG_REGS(ddr_sdram_mode_8), +#ifdef CONFIG_SYS_FSL_DDR4 + CFG_REGS(ddr_sdram_mode_9), + CFG_REGS(ddr_sdram_mode_10), + CFG_REGS(ddr_sdram_mode_11), + CFG_REGS(ddr_sdram_mode_12), + CFG_REGS(ddr_sdram_mode_13), + CFG_REGS(ddr_sdram_mode_14), + CFG_REGS(ddr_sdram_mode_15), + CFG_REGS(ddr_sdram_mode_16), +#endif + CFG_REGS(ddr_sdram_interval), + CFG_REGS(ddr_data_init), + CFG_REGS(ddr_sdram_clk_cntl), + CFG_REGS(ddr_init_addr), + CFG_REGS(ddr_init_ext_addr), + CFG_REGS(timing_cfg_4), + CFG_REGS(timing_cfg_5), +#ifdef CONFIG_SYS_FSL_DDR4 + CFG_REGS(timing_cfg_6), + CFG_REGS(timing_cfg_7), + CFG_REGS(timing_cfg_8), + CFG_REGS(timing_cfg_9), +#endif + CFG_REGS(ddr_zq_cntl), + CFG_REGS(ddr_wrlvl_cntl), + CFG_REGS(ddr_wrlvl_cntl_2), + CFG_REGS(ddr_wrlvl_cntl_3), + CFG_REGS(ddr_sr_cntr), + CFG_REGS(ddr_sdram_rcw_1), + CFG_REGS(ddr_sdram_rcw_2), + CFG_REGS(ddr_sdram_rcw_3), + CFG_REGS(ddr_cdr1), + CFG_REGS(ddr_cdr2), + CFG_REGS(dq_map_0), + CFG_REGS(dq_map_1), + CFG_REGS(dq_map_2), + CFG_REGS(dq_map_3), + CFG_REGS(err_disable), + CFG_REGS(err_int_en), + CFG_REGS(ddr_sdram_rcw_2), + CFG_REGS(ddr_sdram_rcw_2), + CFG_REGS(ddr_eor), + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + debug("fsl_ddr_regs_edit: ctrl_num = %u, " + "regname = %s, value = %s\n", + ctrl_num, regname, value_str); + if (ctrl_num > CONFIG_SYS_NUM_DDR_CTLRS) + return; + + ddr = &(pinfo->fsl_ddr_config_reg[ctrl_num]); + + if (handle_option_table(options, n_opts, ddr, regname, value_str)) + return; + + for (i = 0; i < 64; i++) { + unsigned int value = simple_strtoul(value_str, NULL, 0); + sprintf(buf, "debug_%u", i + 1); + if (strcmp(buf, regname) == 0) { + ddr->debug[i] = value; + return; + } + } + printf("Error: couldn't find register string %s\n", regname); +} + +#define CTRL_OPTIONS_HEX(x) {#x, offsetof(memctl_options_t, x), \ + sizeof((memctl_options_t *)0)->x, 1} + +static void print_memctl_options(const memctl_options_t *popts) +{ + static const struct options_string options[] = { + CTRL_OPTIONS_CS(0, odt_rd_cfg), + CTRL_OPTIONS_CS(0, odt_wr_cfg), +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + CTRL_OPTIONS_CS(1, odt_rd_cfg), + CTRL_OPTIONS_CS(1, odt_wr_cfg), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CTRL_OPTIONS_CS(2, odt_rd_cfg), + CTRL_OPTIONS_CS(2, odt_wr_cfg), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 3) + CTRL_OPTIONS_CS(3, odt_rd_cfg), + CTRL_OPTIONS_CS(3, odt_wr_cfg), +#endif +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + CTRL_OPTIONS_CS(0, odt_rtt_norm), + CTRL_OPTIONS_CS(0, odt_rtt_wr), +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 1) + CTRL_OPTIONS_CS(1, odt_rtt_norm), + CTRL_OPTIONS_CS(1, odt_rtt_wr), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 2) + CTRL_OPTIONS_CS(2, odt_rtt_norm), + CTRL_OPTIONS_CS(2, odt_rtt_wr), +#endif +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 3) + CTRL_OPTIONS_CS(3, odt_rtt_norm), + CTRL_OPTIONS_CS(3, odt_rtt_wr), +#endif +#endif + CTRL_OPTIONS(memctl_interleaving), + CTRL_OPTIONS(memctl_interleaving_mode), + CTRL_OPTIONS_HEX(ba_intlv_ctl), + CTRL_OPTIONS(ecc_mode), + CTRL_OPTIONS(ecc_init_using_memctl), + CTRL_OPTIONS(dqs_config), + CTRL_OPTIONS(self_refresh_in_sleep), + CTRL_OPTIONS(dynamic_power), + CTRL_OPTIONS(data_bus_width), + CTRL_OPTIONS(burst_length), + CTRL_OPTIONS(cas_latency_override), + CTRL_OPTIONS(cas_latency_override_value), + CTRL_OPTIONS(use_derated_caslat), + CTRL_OPTIONS(additive_latency_override), + CTRL_OPTIONS(additive_latency_override_value), + CTRL_OPTIONS(clk_adjust), + CTRL_OPTIONS(cpo_override), + CTRL_OPTIONS(write_data_delay), + CTRL_OPTIONS(half_strength_driver_enable), + /* + * These can probably be changed to 2T_EN and 3T_EN + * (using a leading numerical character) without problem + */ + CTRL_OPTIONS(twot_en), + CTRL_OPTIONS(threet_en), + CTRL_OPTIONS(registered_dimm_en), + CTRL_OPTIONS(mirrored_dimm), + CTRL_OPTIONS(ap_en), + CTRL_OPTIONS(x4_en), + CTRL_OPTIONS(package_3ds), + CTRL_OPTIONS(bstopre), + CTRL_OPTIONS(wrlvl_override), + CTRL_OPTIONS(wrlvl_sample), + CTRL_OPTIONS(wrlvl_start), + CTRL_OPTIONS_HEX(cswl_override), + CTRL_OPTIONS(rcw_override), + CTRL_OPTIONS_HEX(rcw_1), + CTRL_OPTIONS_HEX(rcw_2), + CTRL_OPTIONS_HEX(rcw_3), + CTRL_OPTIONS_HEX(ddr_cdr1), + CTRL_OPTIONS_HEX(ddr_cdr2), + CTRL_OPTIONS(tfaw_window_four_activates_ps), + CTRL_OPTIONS(trwt_override), + CTRL_OPTIONS(trwt), + CTRL_OPTIONS(rtt_override), + CTRL_OPTIONS(rtt_override_value), + CTRL_OPTIONS(rtt_wr_override_value), + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + print_option_table(options, n_opts, popts); +} + +#ifdef CONFIG_SYS_FSL_DDR1 +void ddr1_spd_dump(const ddr1_spd_eeprom_t *spd) +{ + unsigned int i; + + printf("%-3d : %02x %s\n", 0, spd->info_size, + " spd->info_size, * 0 # bytes written into serial memory *"); + printf("%-3d : %02x %s\n", 1, spd->chip_size, + " spd->chip_size, * 1 Total # bytes of SPD memory device *"); + printf("%-3d : %02x %s\n", 2, spd->mem_type, + " spd->mem_type, * 2 Fundamental memory type *"); + printf("%-3d : %02x %s\n", 3, spd->nrow_addr, + " spd->nrow_addr, * 3 # of Row Addresses on this assembly *"); + printf("%-3d : %02x %s\n", 4, spd->ncol_addr, + " spd->ncol_addr, * 4 # of Column Addrs on this assembly *"); + printf("%-3d : %02x %s\n", 5, spd->nrows, + " spd->nrows * 5 # of DIMM Banks *"); + printf("%-3d : %02x %s\n", 6, spd->dataw_lsb, + " spd->dataw_lsb, * 6 Data Width lsb of this assembly *"); + printf("%-3d : %02x %s\n", 7, spd->dataw_msb, + " spd->dataw_msb, * 7 Data Width msb of this assembly *"); + printf("%-3d : %02x %s\n", 8, spd->voltage, + " spd->voltage, * 8 Voltage intf std of this assembly *"); + printf("%-3d : %02x %s\n", 9, spd->clk_cycle, + " spd->clk_cycle, * 9 SDRAM Cycle time at CL=X *"); + printf("%-3d : %02x %s\n", 10, spd->clk_access, + " spd->clk_access, * 10 SDRAM Access from Clock at CL=X *"); + printf("%-3d : %02x %s\n", 11, spd->config, + " spd->config, * 11 DIMM Configuration type *"); + printf("%-3d : %02x %s\n", 12, spd->refresh, + " spd->refresh, * 12 Refresh Rate/Type *"); + printf("%-3d : %02x %s\n", 13, spd->primw, + " spd->primw, * 13 Primary SDRAM Width *"); + printf("%-3d : %02x %s\n", 14, spd->ecw, + " spd->ecw, * 14 Error Checking SDRAM width *"); + printf("%-3d : %02x %s\n", 15, spd->min_delay, + " spd->min_delay, * 15 Back to Back Random Access *"); + printf("%-3d : %02x %s\n", 16, spd->burstl, + " spd->burstl, * 16 Burst Lengths Supported *"); + printf("%-3d : %02x %s\n", 17, spd->nbanks, + " spd->nbanks, * 17 # of Banks on Each SDRAM Device *"); + printf("%-3d : %02x %s\n", 18, spd->cas_lat, + " spd->cas_lat, * 18 CAS# Latencies Supported *"); + printf("%-3d : %02x %s\n", 19, spd->cs_lat, + " spd->cs_lat, * 19 Chip Select Latency *"); + printf("%-3d : %02x %s\n", 20, spd->write_lat, + " spd->write_lat, * 20 Write Latency/Recovery *"); + printf("%-3d : %02x %s\n", 21, spd->mod_attr, + " spd->mod_attr, * 21 SDRAM Module Attributes *"); + printf("%-3d : %02x %s\n", 22, spd->dev_attr, + " spd->dev_attr, * 22 SDRAM Device Attributes *"); + printf("%-3d : %02x %s\n", 23, spd->clk_cycle2, + " spd->clk_cycle2, * 23 Min SDRAM Cycle time at CL=X-1 *"); + printf("%-3d : %02x %s\n", 24, spd->clk_access2, + " spd->clk_access2, * 24 SDRAM Access from Clock at CL=X-1 *"); + printf("%-3d : %02x %s\n", 25, spd->clk_cycle3, + " spd->clk_cycle3, * 25 Min SDRAM Cycle time at CL=X-2 *"); + printf("%-3d : %02x %s\n", 26, spd->clk_access3, + " spd->clk_access3, * 26 Max Access from Clock at CL=X-2 *"); + printf("%-3d : %02x %s\n", 27, spd->trp, + " spd->trp, * 27 Min Row Precharge Time (tRP)*"); + printf("%-3d : %02x %s\n", 28, spd->trrd, + " spd->trrd, * 28 Min Row Active to Row Active (tRRD) *"); + printf("%-3d : %02x %s\n", 29, spd->trcd, + " spd->trcd, * 29 Min RAS to CAS Delay (tRCD) *"); + printf("%-3d : %02x %s\n", 30, spd->tras, + " spd->tras, * 30 Minimum RAS Pulse Width (tRAS) *"); + printf("%-3d : %02x %s\n", 31, spd->bank_dens, + " spd->bank_dens, * 31 Density of each bank on module *"); + printf("%-3d : %02x %s\n", 32, spd->ca_setup, + " spd->ca_setup, * 32 Cmd + Addr signal input setup time *"); + printf("%-3d : %02x %s\n", 33, spd->ca_hold, + " spd->ca_hold, * 33 Cmd and Addr signal input hold time *"); + printf("%-3d : %02x %s\n", 34, spd->data_setup, + " spd->data_setup, * 34 Data signal input setup time *"); + printf("%-3d : %02x %s\n", 35, spd->data_hold, + " spd->data_hold, * 35 Data signal input hold time *"); + printf("%-3d : %02x %s\n", 36, spd->res_36_40[0], + " spd->res_36_40[0], * 36 Reserved / tWR *"); + printf("%-3d : %02x %s\n", 37, spd->res_36_40[1], + " spd->res_36_40[1], * 37 Reserved / tWTR *"); + printf("%-3d : %02x %s\n", 38, spd->res_36_40[2], + " spd->res_36_40[2], * 38 Reserved / tRTP *"); + printf("%-3d : %02x %s\n", 39, spd->res_36_40[3], + " spd->res_36_40[3], * 39 Reserved / mem_probe *"); + printf("%-3d : %02x %s\n", 40, spd->res_36_40[4], + " spd->res_36_40[4], * 40 Reserved / trc,trfc extensions *"); + printf("%-3d : %02x %s\n", 41, spd->trc, + " spd->trc, * 41 Min Active to Auto refresh time tRC *"); + printf("%-3d : %02x %s\n", 42, spd->trfc, + " spd->trfc, * 42 Min Auto to Active period tRFC *"); + printf("%-3d : %02x %s\n", 43, spd->tckmax, + " spd->tckmax, * 43 Max device cycle time tCKmax *"); + printf("%-3d : %02x %s\n", 44, spd->tdqsq, + " spd->tdqsq, * 44 Max DQS to DQ skew *"); + printf("%-3d : %02x %s\n", 45, spd->tqhs, + " spd->tqhs, * 45 Max Read DataHold skew tQHS *"); + printf("%-3d : %02x %s\n", 46, spd->res_46, + " spd->res_46, * 46 Reserved/ PLL Relock time *"); + printf("%-3d : %02x %s\n", 47, spd->dimm_height, + " spd->dimm_height * 47 SDRAM DIMM Height *"); + + printf("%-3d-%3d: ", 48, 61); + + for (i = 0; i < 14; i++) + printf("%02x", spd->res_48_61[i]); + + printf(" * 48-61 IDD in SPD and Reserved space *\n"); + + printf("%-3d : %02x %s\n", 62, spd->spd_rev, + " spd->spd_rev, * 62 SPD Data Revision Code *"); + printf("%-3d : %02x %s\n", 63, spd->cksum, + " spd->cksum, * 63 Checksum for bytes 0-62 *"); + printf("%-3d-%3d: ", 64, 71); + + for (i = 0; i < 8; i++) + printf("%02x", spd->mid[i]); + + printf("* 64 Mfr's JEDEC ID code per JEP-108E *\n"); + printf("%-3d : %02x %s\n", 72, spd->mloc, + " spd->mloc, * 72 Manufacturing Location *"); + + printf("%-3d-%3d: >>", 73, 90); + + for (i = 0; i < 18; i++) + printf("%c", spd->mpart[i]); + + printf("<<* 73 Manufacturer's Part Number *\n"); + + printf("%-3d-%3d: %02x %02x %s\n", 91, 92, spd->rev[0], spd->rev[1], + "* 91 Revision Code *"); + printf("%-3d-%3d: %02x %02x %s\n", 93, 94, spd->mdate[0], spd->mdate[1], + "* 93 Manufacturing Date *"); + printf("%-3d-%3d: ", 95, 98); + + for (i = 0; i < 4; i++) + printf("%02x", spd->sernum[i]); + + printf("* 95 Assembly Serial Number *\n"); + + printf("%-3d-%3d: ", 99, 127); + + for (i = 0; i < 27; i++) + printf("%02x", spd->mspec[i]); + + printf("* 99 Manufacturer Specific Data *\n"); +} +#endif + +#ifdef CONFIG_SYS_FSL_DDR2 +void ddr2_spd_dump(const ddr2_spd_eeprom_t *spd) +{ + unsigned int i; + + printf("%-3d : %02x %s\n", 0, spd->info_size, + " spd->info_size, * 0 # bytes written into serial memory *"); + printf("%-3d : %02x %s\n", 1, spd->chip_size, + " spd->chip_size, * 1 Total # bytes of SPD memory device *"); + printf("%-3d : %02x %s\n", 2, spd->mem_type, + " spd->mem_type, * 2 Fundamental memory type *"); + printf("%-3d : %02x %s\n", 3, spd->nrow_addr, + " spd->nrow_addr, * 3 # of Row Addresses on this assembly *"); + printf("%-3d : %02x %s\n", 4, spd->ncol_addr, + " spd->ncol_addr, * 4 # of Column Addrs on this assembly *"); + printf("%-3d : %02x %s\n", 5, spd->mod_ranks, + " spd->mod_ranks * 5 # of Module Rows on this assembly *"); + printf("%-3d : %02x %s\n", 6, spd->dataw, + " spd->dataw, * 6 Data Width of this assembly *"); + printf("%-3d : %02x %s\n", 7, spd->res_7, + " spd->res_7, * 7 Reserved *"); + printf("%-3d : %02x %s\n", 8, spd->voltage, + " spd->voltage, * 8 Voltage intf std of this assembly *"); + printf("%-3d : %02x %s\n", 9, spd->clk_cycle, + " spd->clk_cycle, * 9 SDRAM Cycle time at CL=X *"); + printf("%-3d : %02x %s\n", 10, spd->clk_access, + " spd->clk_access, * 10 SDRAM Access from Clock at CL=X *"); + printf("%-3d : %02x %s\n", 11, spd->config, + " spd->config, * 11 DIMM Configuration type *"); + printf("%-3d : %02x %s\n", 12, spd->refresh, + " spd->refresh, * 12 Refresh Rate/Type *"); + printf("%-3d : %02x %s\n", 13, spd->primw, + " spd->primw, * 13 Primary SDRAM Width *"); + printf("%-3d : %02x %s\n", 14, spd->ecw, + " spd->ecw, * 14 Error Checking SDRAM width *"); + printf("%-3d : %02x %s\n", 15, spd->res_15, + " spd->res_15, * 15 Reserved *"); + printf("%-3d : %02x %s\n", 16, spd->burstl, + " spd->burstl, * 16 Burst Lengths Supported *"); + printf("%-3d : %02x %s\n", 17, spd->nbanks, + " spd->nbanks, * 17 # of Banks on Each SDRAM Device *"); + printf("%-3d : %02x %s\n", 18, spd->cas_lat, + " spd->cas_lat, * 18 CAS# Latencies Supported *"); + printf("%-3d : %02x %s\n", 19, spd->mech_char, + " spd->mech_char, * 19 Mechanical Characteristics *"); + printf("%-3d : %02x %s\n", 20, spd->dimm_type, + " spd->dimm_type, * 20 DIMM type *"); + printf("%-3d : %02x %s\n", 21, spd->mod_attr, + " spd->mod_attr, * 21 SDRAM Module Attributes *"); + printf("%-3d : %02x %s\n", 22, spd->dev_attr, + " spd->dev_attr, * 22 SDRAM Device Attributes *"); + printf("%-3d : %02x %s\n", 23, spd->clk_cycle2, + " spd->clk_cycle2, * 23 Min SDRAM Cycle time at CL=X-1 *"); + printf("%-3d : %02x %s\n", 24, spd->clk_access2, + " spd->clk_access2, * 24 SDRAM Access from Clock at CL=X-1 *"); + printf("%-3d : %02x %s\n", 25, spd->clk_cycle3, + " spd->clk_cycle3, * 25 Min SDRAM Cycle time at CL=X-2 *"); + printf("%-3d : %02x %s\n", 26, spd->clk_access3, + " spd->clk_access3, * 26 Max Access from Clock at CL=X-2 *"); + printf("%-3d : %02x %s\n", 27, spd->trp, + " spd->trp, * 27 Min Row Precharge Time (tRP)*"); + printf("%-3d : %02x %s\n", 28, spd->trrd, + " spd->trrd, * 28 Min Row Active to Row Active (tRRD) *"); + printf("%-3d : %02x %s\n", 29, spd->trcd, + " spd->trcd, * 29 Min RAS to CAS Delay (tRCD) *"); + printf("%-3d : %02x %s\n", 30, spd->tras, + " spd->tras, * 30 Minimum RAS Pulse Width (tRAS) *"); + printf("%-3d : %02x %s\n", 31, spd->rank_dens, + " spd->rank_dens, * 31 Density of each rank on module *"); + printf("%-3d : %02x %s\n", 32, spd->ca_setup, + " spd->ca_setup, * 32 Cmd + Addr signal input setup time *"); + printf("%-3d : %02x %s\n", 33, spd->ca_hold, + " spd->ca_hold, * 33 Cmd and Addr signal input hold time *"); + printf("%-3d : %02x %s\n", 34, spd->data_setup, + " spd->data_setup, * 34 Data signal input setup time *"); + printf("%-3d : %02x %s\n", 35, spd->data_hold, + " spd->data_hold, * 35 Data signal input hold time *"); + printf("%-3d : %02x %s\n", 36, spd->twr, + " spd->twr, * 36 Write Recovery time tWR *"); + printf("%-3d : %02x %s\n", 37, spd->twtr, + " spd->twtr, * 37 Int write to read delay tWTR *"); + printf("%-3d : %02x %s\n", 38, spd->trtp, + " spd->trtp, * 38 Int read to precharge delay tRTP *"); + printf("%-3d : %02x %s\n", 39, spd->mem_probe, + " spd->mem_probe, * 39 Mem analysis probe characteristics *"); + printf("%-3d : %02x %s\n", 40, spd->trctrfc_ext, + " spd->trctrfc_ext, * 40 Extensions to trc and trfc *"); + printf("%-3d : %02x %s\n", 41, spd->trc, + " spd->trc, * 41 Min Active to Auto refresh time tRC *"); + printf("%-3d : %02x %s\n", 42, spd->trfc, + " spd->trfc, * 42 Min Auto to Active period tRFC *"); + printf("%-3d : %02x %s\n", 43, spd->tckmax, + " spd->tckmax, * 43 Max device cycle time tCKmax *"); + printf("%-3d : %02x %s\n", 44, spd->tdqsq, + " spd->tdqsq, * 44 Max DQS to DQ skew *"); + printf("%-3d : %02x %s\n", 45, spd->tqhs, + " spd->tqhs, * 45 Max Read DataHold skew tQHS *"); + printf("%-3d : %02x %s\n", 46, spd->pll_relock, + " spd->pll_relock, * 46 PLL Relock time *"); + printf("%-3d : %02x %s\n", 47, spd->t_casemax, + " spd->t_casemax, * 47 t_casemax *"); + printf("%-3d : %02x %s\n", 48, spd->psi_ta_dram, + " spd->psi_ta_dram, * 48 Thermal Resistance of DRAM Package " + "from Top (Case) to Ambient (Psi T-A DRAM) *"); + printf("%-3d : %02x %s\n", 49, spd->dt0_mode, + " spd->dt0_mode, * 49 DRAM Case Temperature Rise from " + "Ambient due to Activate-Precharge/Mode Bits " + "(DT0/Mode Bits) *)"); + printf("%-3d : %02x %s\n", 50, spd->dt2n_dt2q, + " spd->dt2n_dt2q, * 50 DRAM Case Temperature Rise from " + "Ambient due to Precharge/Quiet Standby " + "(DT2N/DT2Q) *"); + printf("%-3d : %02x %s\n", 51, spd->dt2p, + " spd->dt2p, * 51 DRAM Case Temperature Rise from " + "Ambient due to Precharge Power-Down (DT2P) *"); + printf("%-3d : %02x %s\n", 52, spd->dt3n, + " spd->dt3n, * 52 DRAM Case Temperature Rise from " + "Ambient due to Active Standby (DT3N) *"); + printf("%-3d : %02x %s\n", 53, spd->dt3pfast, + " spd->dt3pfast, * 53 DRAM Case Temperature Rise from " + "Ambient due to Active Power-Down with Fast PDN Exit " + "(DT3Pfast) *"); + printf("%-3d : %02x %s\n", 54, spd->dt3pslow, + " spd->dt3pslow, * 54 DRAM Case Temperature Rise from " + "Ambient due to Active Power-Down with Slow PDN Exit " + "(DT3Pslow) *"); + printf("%-3d : %02x %s\n", 55, spd->dt4r_dt4r4w, + " spd->dt4r_dt4r4w, * 55 DRAM Case Temperature Rise from " + "Ambient due to Page Open Burst Read/DT4R4W Mode Bit " + "(DT4R/DT4R4W Mode Bit) *"); + printf("%-3d : %02x %s\n", 56, spd->dt5b, + " spd->dt5b, * 56 DRAM Case Temperature Rise from " + "Ambient due to Burst Refresh (DT5B) *"); + printf("%-3d : %02x %s\n", 57, spd->dt7, + " spd->dt7, * 57 DRAM Case Temperature Rise from " + "Ambient due to Bank Interleave Reads with " + "Auto-Precharge (DT7) *"); + printf("%-3d : %02x %s\n", 58, spd->psi_ta_pll, + " spd->psi_ta_pll, * 58 Thermal Resistance of PLL Package form" + " Top (Case) to Ambient (Psi T-A PLL) *"); + printf("%-3d : %02x %s\n", 59, spd->psi_ta_reg, + " spd->psi_ta_reg, * 59 Thermal Reisitance of Register Package" + " from Top (Case) to Ambient (Psi T-A Register) *"); + printf("%-3d : %02x %s\n", 60, spd->dtpllactive, + " spd->dtpllactive, * 60 PLL Case Temperature Rise from " + "Ambient due to PLL Active (DT PLL Active) *"); + printf("%-3d : %02x %s\n", 61, spd->dtregact, + " spd->dtregact, " + "* 61 Register Case Temperature Rise from Ambient due to " + "Register Active/Mode Bit (DT Register Active/Mode Bit) *"); + printf("%-3d : %02x %s\n", 62, spd->spd_rev, + " spd->spd_rev, * 62 SPD Data Revision Code *"); + printf("%-3d : %02x %s\n", 63, spd->cksum, + " spd->cksum, * 63 Checksum for bytes 0-62 *"); + + printf("%-3d-%3d: ", 64, 71); + + for (i = 0; i < 8; i++) + printf("%02x", spd->mid[i]); + + printf("* 64 Mfr's JEDEC ID code per JEP-108E *\n"); + + printf("%-3d : %02x %s\n", 72, spd->mloc, + " spd->mloc, * 72 Manufacturing Location *"); + + printf("%-3d-%3d: >>", 73, 90); + for (i = 0; i < 18; i++) + printf("%c", spd->mpart[i]); + + + printf("<<* 73 Manufacturer's Part Number *\n"); + + printf("%-3d-%3d: %02x %02x %s\n", 91, 92, spd->rev[0], spd->rev[1], + "* 91 Revision Code *"); + printf("%-3d-%3d: %02x %02x %s\n", 93, 94, spd->mdate[0], spd->mdate[1], + "* 93 Manufacturing Date *"); + printf("%-3d-%3d: ", 95, 98); + + for (i = 0; i < 4; i++) + printf("%02x", spd->sernum[i]); + + printf("* 95 Assembly Serial Number *\n"); + + printf("%-3d-%3d: ", 99, 127); + for (i = 0; i < 27; i++) + printf("%02x", spd->mspec[i]); + + + printf("* 99 Manufacturer Specific Data *\n"); +} +#endif + +#ifdef CONFIG_SYS_FSL_DDR3 +void ddr3_spd_dump(const ddr3_spd_eeprom_t *spd) +{ + unsigned int i; + + /* General Section: Bytes 0-59 */ + +#define PRINT_NXS(x, y, z...) printf("%-3d : %02x " z "\n", x, (u8)y); +#define PRINT_NNXXS(n0, n1, x0, x1, s) \ + printf("%-3d-%3d: %02x %02x " s "\n", n0, n1, x0, x1); + + PRINT_NXS(0, spd->info_size_crc, + "info_size_crc bytes written into serial memory, " + "CRC coverage"); + PRINT_NXS(1, spd->spd_rev, + "spd_rev SPD Revision"); + PRINT_NXS(2, spd->mem_type, + "mem_type Key Byte / DRAM Device Type"); + PRINT_NXS(3, spd->module_type, + "module_type Key Byte / Module Type"); + PRINT_NXS(4, spd->density_banks, + "density_banks SDRAM Density and Banks"); + PRINT_NXS(5, spd->addressing, + "addressing SDRAM Addressing"); + PRINT_NXS(6, spd->module_vdd, + "module_vdd Module Nominal Voltage, VDD"); + PRINT_NXS(7, spd->organization, + "organization Module Organization"); + PRINT_NXS(8, spd->bus_width, + "bus_width Module Memory Bus Width"); + PRINT_NXS(9, spd->ftb_div, + "ftb_div Fine Timebase (FTB) Dividend / Divisor"); + PRINT_NXS(10, spd->mtb_dividend, + "mtb_dividend Medium Timebase (MTB) Dividend"); + PRINT_NXS(11, spd->mtb_divisor, + "mtb_divisor Medium Timebase (MTB) Divisor"); + PRINT_NXS(12, spd->tck_min, + "tck_min SDRAM Minimum Cycle Time"); + PRINT_NXS(13, spd->res_13, + "res_13 Reserved"); + PRINT_NXS(14, spd->caslat_lsb, + "caslat_lsb CAS Latencies Supported, LSB"); + PRINT_NXS(15, spd->caslat_msb, + "caslat_msb CAS Latencies Supported, MSB"); + PRINT_NXS(16, spd->taa_min, + "taa_min Min CAS Latency Time"); + PRINT_NXS(17, spd->twr_min, + "twr_min Min Write REcovery Time"); + PRINT_NXS(18, spd->trcd_min, + "trcd_min Min RAS# to CAS# Delay Time"); + PRINT_NXS(19, spd->trrd_min, + "trrd_min Min Row Active to Row Active Delay Time"); + PRINT_NXS(20, spd->trp_min, + "trp_min Min Row Precharge Delay Time"); + PRINT_NXS(21, spd->tras_trc_ext, + "tras_trc_ext Upper Nibbles for tRAS and tRC"); + PRINT_NXS(22, spd->tras_min_lsb, + "tras_min_lsb Min Active to Precharge Delay Time, LSB"); + PRINT_NXS(23, spd->trc_min_lsb, + "trc_min_lsb Min Active to Active/Refresh Delay Time, LSB"); + PRINT_NXS(24, spd->trfc_min_lsb, + "trfc_min_lsb Min Refresh Recovery Delay Time LSB"); + PRINT_NXS(25, spd->trfc_min_msb, + "trfc_min_msb Min Refresh Recovery Delay Time MSB"); + PRINT_NXS(26, spd->twtr_min, + "twtr_min Min Internal Write to Read Command Delay Time"); + PRINT_NXS(27, spd->trtp_min, + "trtp_min " + "Min Internal Read to Precharge Command Delay Time"); + PRINT_NXS(28, spd->tfaw_msb, + "tfaw_msb Upper Nibble for tFAW"); + PRINT_NXS(29, spd->tfaw_min, + "tfaw_min Min Four Activate Window Delay Time"); + PRINT_NXS(30, spd->opt_features, + "opt_features SDRAM Optional Features"); + PRINT_NXS(31, spd->therm_ref_opt, + "therm_ref_opt SDRAM Thermal and Refresh Opts"); + PRINT_NXS(32, spd->therm_sensor, + "therm_sensor SDRAM Thermal Sensor"); + PRINT_NXS(33, spd->device_type, + "device_type SDRAM Device Type"); + PRINT_NXS(34, spd->fine_tck_min, + "fine_tck_min Fine offset for tCKmin"); + PRINT_NXS(35, spd->fine_taa_min, + "fine_taa_min Fine offset for tAAmin"); + PRINT_NXS(36, spd->fine_trcd_min, + "fine_trcd_min Fine offset for tRCDmin"); + PRINT_NXS(37, spd->fine_trp_min, + "fine_trp_min Fine offset for tRPmin"); + PRINT_NXS(38, spd->fine_trc_min, + "fine_trc_min Fine offset for tRCmin"); + + printf("%-3d-%3d: ", 39, 59); /* Reserved, General Section */ + + for (i = 39; i <= 59; i++) + printf("%02x ", spd->res_39_59[i - 39]); + + puts("\n"); + + switch (spd->module_type) { + case 0x02: /* UDIMM */ + case 0x03: /* SO-DIMM */ + case 0x04: /* Micro-DIMM */ + case 0x06: /* Mini-UDIMM */ + PRINT_NXS(60, spd->mod_section.unbuffered.mod_height, + "mod_height (Unbuffered) Module Nominal Height"); + PRINT_NXS(61, spd->mod_section.unbuffered.mod_thickness, + "mod_thickness (Unbuffered) Module Maximum Thickness"); + PRINT_NXS(62, spd->mod_section.unbuffered.ref_raw_card, + "ref_raw_card (Unbuffered) Reference Raw Card Used"); + PRINT_NXS(63, spd->mod_section.unbuffered.addr_mapping, + "addr_mapping (Unbuffered) Address mapping from " + "Edge Connector to DRAM"); + break; + case 0x01: /* RDIMM */ + case 0x05: /* Mini-RDIMM */ + PRINT_NXS(60, spd->mod_section.registered.mod_height, + "mod_height (Registered) Module Nominal Height"); + PRINT_NXS(61, spd->mod_section.registered.mod_thickness, + "mod_thickness (Registered) Module Maximum Thickness"); + PRINT_NXS(62, spd->mod_section.registered.ref_raw_card, + "ref_raw_card (Registered) Reference Raw Card Used"); + PRINT_NXS(63, spd->mod_section.registered.modu_attr, + "modu_attr (Registered) DIMM Module Attributes"); + PRINT_NXS(64, spd->mod_section.registered.thermal, + "thermal (Registered) Thermal Heat " + "Spreader Solution"); + PRINT_NXS(65, spd->mod_section.registered.reg_id_lo, + "reg_id_lo (Registered) Register Manufacturer ID " + "Code, LSB"); + PRINT_NXS(66, spd->mod_section.registered.reg_id_hi, + "reg_id_hi (Registered) Register Manufacturer ID " + "Code, MSB"); + PRINT_NXS(67, spd->mod_section.registered.reg_rev, + "reg_rev (Registered) Register " + "Revision Number"); + PRINT_NXS(68, spd->mod_section.registered.reg_type, + "reg_type (Registered) Register Type"); + for (i = 69; i <= 76; i++) { + printf("%-3d : %02x rcw[%d]\n", i, + spd->mod_section.registered.rcw[i-69], i-69); + } + break; + default: + /* Module-specific Section, Unsupported Module Type */ + printf("%-3d-%3d: ", 60, 116); + + for (i = 60; i <= 116; i++) + printf("%02x", spd->mod_section.uc[i - 60]); + + break; + } + + /* Unique Module ID: Bytes 117-125 */ + PRINT_NXS(117, spd->mmid_lsb, "Module MfgID Code LSB - JEP-106"); + PRINT_NXS(118, spd->mmid_msb, "Module MfgID Code MSB - JEP-106"); + PRINT_NXS(119, spd->mloc, "Mfg Location"); + PRINT_NNXXS(120, 121, spd->mdate[0], spd->mdate[1], "Mfg Date"); + + printf("%-3d-%3d: ", 122, 125); + + for (i = 122; i <= 125; i++) + printf("%02x ", spd->sernum[i - 122]); + printf(" Module Serial Number\n"); + + /* CRC: Bytes 126-127 */ + PRINT_NNXXS(126, 127, spd->crc[0], spd->crc[1], " SPD CRC"); + + /* Other Manufacturer Fields and User Space: Bytes 128-255 */ + printf("%-3d-%3d: ", 128, 145); + for (i = 128; i <= 145; i++) + printf("%02x ", spd->mpart[i - 128]); + printf(" Mfg's Module Part Number\n"); + + PRINT_NNXXS(146, 147, spd->mrev[0], spd->mrev[1], + "Module Revision code"); + + PRINT_NXS(148, spd->dmid_lsb, "DRAM MfgID Code LSB - JEP-106"); + PRINT_NXS(149, spd->dmid_msb, "DRAM MfgID Code MSB - JEP-106"); + + printf("%-3d-%3d: ", 150, 175); + for (i = 150; i <= 175; i++) + printf("%02x ", spd->msd[i - 150]); + printf(" Mfg's Specific Data\n"); + + printf("%-3d-%3d: ", 176, 255); + for (i = 176; i <= 255; i++) + printf("%02x", spd->cust[i - 176]); + printf(" Mfg's Specific Data\n"); + +} +#endif + +#ifdef CONFIG_SYS_FSL_DDR4 +void ddr4_spd_dump(const struct ddr4_spd_eeprom_s *spd) +{ + unsigned int i; + + /* General Section: Bytes 0-127 */ + +#define PRINT_NXS(x, y, z...) printf("%-3d : %02x " z "\n", x, (u8)y); +#define PRINT_NNXXS(n0, n1, x0, x1, s) \ + printf("%-3d-%3d: %02x %02x " s "\n", n0, n1, x0, x1); + + PRINT_NXS(0, spd->info_size_crc, + "info_size_crc bytes written into serial memory, CRC coverage"); + PRINT_NXS(1, spd->spd_rev, + "spd_rev SPD Revision"); + PRINT_NXS(2, spd->mem_type, + "mem_type Key Byte / DRAM Device Type"); + PRINT_NXS(3, spd->module_type, + "module_type Key Byte / Module Type"); + PRINT_NXS(4, spd->density_banks, + "density_banks SDRAM Density and Banks"); + PRINT_NXS(5, spd->addressing, + "addressing SDRAM Addressing"); + PRINT_NXS(6, spd->package_type, + "package_type Package type"); + PRINT_NXS(7, spd->opt_feature, + "opt_feature Optional features"); + PRINT_NXS(8, spd->thermal_ref, + "thermal_ref Thermal and Refresh options"); + PRINT_NXS(9, spd->oth_opt_features, + "oth_opt_features Other SDRAM optional features"); + PRINT_NXS(10, spd->res_10, + "res_10 Reserved"); + PRINT_NXS(11, spd->module_vdd, + "module_vdd Module Nominal Voltage, VDD"); + PRINT_NXS(12, spd->organization, + "organization Module Organization"); + PRINT_NXS(13, spd->bus_width, + "bus_width Module Memory Bus Width"); + PRINT_NXS(14, spd->therm_sensor, + "therm_sensor Module Thermal Sensor"); + PRINT_NXS(15, spd->ext_type, + "ext_type Extended module type"); + PRINT_NXS(16, spd->res_16, + "res_16 Reserved"); + PRINT_NXS(17, spd->timebases, + "timebases MTb and FTB"); + PRINT_NXS(18, spd->tck_min, + "tck_min tCKAVGmin"); + PRINT_NXS(19, spd->tck_max, + "tck_max TCKAVGmax"); + PRINT_NXS(20, spd->caslat_b1, + "caslat_b1 CAS latencies, 1st byte"); + PRINT_NXS(21, spd->caslat_b2, + "caslat_b2 CAS latencies, 2nd byte"); + PRINT_NXS(22, spd->caslat_b3, + "caslat_b3 CAS latencies, 3rd byte "); + PRINT_NXS(23, spd->caslat_b4, + "caslat_b4 CAS latencies, 4th byte"); + PRINT_NXS(24, spd->taa_min, + "taa_min Min CAS Latency Time"); + PRINT_NXS(25, spd->trcd_min, + "trcd_min Min RAS# to CAS# Delay Time"); + PRINT_NXS(26, spd->trp_min, + "trp_min Min Row Precharge Delay Time"); + PRINT_NXS(27, spd->tras_trc_ext, + "tras_trc_ext Upper Nibbles for tRAS and tRC"); + PRINT_NXS(28, spd->tras_min_lsb, + "tras_min_lsb tRASmin, lsb"); + PRINT_NXS(29, spd->trc_min_lsb, + "trc_min_lsb tRCmin, lsb"); + PRINT_NXS(30, spd->trfc1_min_lsb, + "trfc1_min_lsb Min Refresh Recovery Delay Time, LSB"); + PRINT_NXS(31, spd->trfc1_min_msb, + "trfc1_min_msb Min Refresh Recovery Delay Time, MSB "); + PRINT_NXS(32, spd->trfc2_min_lsb, + "trfc2_min_lsb Min Refresh Recovery Delay Time, LSB"); + PRINT_NXS(33, spd->trfc2_min_msb, + "trfc2_min_msb Min Refresh Recovery Delay Time, MSB"); + PRINT_NXS(34, spd->trfc4_min_lsb, + "trfc4_min_lsb Min Refresh Recovery Delay Time, LSB"); + PRINT_NXS(35, spd->trfc4_min_msb, + "trfc4_min_msb Min Refresh Recovery Delay Time, MSB"); + PRINT_NXS(36, spd->tfaw_msb, + "tfaw_msb Upper Nibble for tFAW"); + PRINT_NXS(37, spd->tfaw_min, + "tfaw_min tFAW, lsb"); + PRINT_NXS(38, spd->trrds_min, + "trrds_min tRRD_Smin, MTB"); + PRINT_NXS(39, spd->trrdl_min, + "trrdl_min tRRD_Lmin, MTB"); + PRINT_NXS(40, spd->tccdl_min, + "tccdl_min tCCS_Lmin, MTB"); + + printf("%-3d-%3d: ", 41, 59); /* Reserved, General Section */ + for (i = 41; i <= 59; i++) + printf("%02x ", spd->res_41[i - 41]); + + puts("\n"); + printf("%-3d-%3d: ", 60, 77); + for (i = 60; i <= 77; i++) + printf("%02x ", spd->mapping[i - 60]); + puts(" mapping[] Connector to SDRAM bit map\n"); + + PRINT_NXS(117, spd->fine_tccdl_min, + "fine_tccdl_min Fine offset for tCCD_Lmin"); + PRINT_NXS(118, spd->fine_trrdl_min, + "fine_trrdl_min Fine offset for tRRD_Lmin"); + PRINT_NXS(119, spd->fine_trrds_min, + "fine_trrds_min Fine offset for tRRD_Smin"); + PRINT_NXS(120, spd->fine_trc_min, + "fine_trc_min Fine offset for tRCmin"); + PRINT_NXS(121, spd->fine_trp_min, + "fine_trp_min Fine offset for tRPmin"); + PRINT_NXS(122, spd->fine_trcd_min, + "fine_trcd_min Fine offset for tRCDmin"); + PRINT_NXS(123, spd->fine_taa_min, + "fine_taa_min Fine offset for tAAmin"); + PRINT_NXS(124, spd->fine_tck_max, + "fine_tck_max Fine offset for tCKAVGmax"); + PRINT_NXS(125, spd->fine_tck_min, + "fine_tck_min Fine offset for tCKAVGmin"); + + /* CRC: Bytes 126-127 */ + PRINT_NNXXS(126, 127, spd->crc[0], spd->crc[1], " SPD CRC"); + + switch (spd->module_type) { + case 0x02: /* UDIMM */ + case 0x03: /* SO-DIMM */ + PRINT_NXS(128, spd->mod_section.unbuffered.mod_height, + "mod_height (Unbuffered) Module Nominal Height"); + PRINT_NXS(129, spd->mod_section.unbuffered.mod_thickness, + "mod_thickness (Unbuffered) Module Maximum Thickness"); + PRINT_NXS(130, spd->mod_section.unbuffered.ref_raw_card, + "ref_raw_card (Unbuffered) Reference Raw Card Used"); + PRINT_NXS(131, spd->mod_section.unbuffered.addr_mapping, + "addr_mapping (Unbuffered) Address mapping from Edge Connector to DRAM"); + PRINT_NNXXS(254, 255, spd->mod_section.unbuffered.crc[0], + spd->mod_section.unbuffered.crc[1], " Module CRC"); + break; + case 0x01: /* RDIMM */ + PRINT_NXS(128, spd->mod_section.registered.mod_height, + "mod_height (Registered) Module Nominal Height"); + PRINT_NXS(129, spd->mod_section.registered.mod_thickness, + "mod_thickness (Registered) Module Maximum Thickness"); + PRINT_NXS(130, spd->mod_section.registered.ref_raw_card, + "ref_raw_card (Registered) Reference Raw Card Used"); + PRINT_NXS(131, spd->mod_section.registered.modu_attr, + "modu_attr (Registered) DIMM Module Attributes"); + PRINT_NXS(132, spd->mod_section.registered.thermal, + "thermal (Registered) Thermal Heat Spreader Solution"); + PRINT_NXS(133, spd->mod_section.registered.reg_id_lo, + "reg_id_lo (Registered) Register Manufacturer ID Code, LSB"); + PRINT_NXS(134, spd->mod_section.registered.reg_id_hi, + "reg_id_hi (Registered) Register Manufacturer ID Code, MSB"); + PRINT_NXS(135, spd->mod_section.registered.reg_rev, + "reg_rev (Registered) Register Revision Number"); + PRINT_NXS(136, spd->mod_section.registered.reg_map, + "reg_map (Registered) Address mapping"); + PRINT_NNXXS(254, 255, spd->mod_section.registered.crc[0], + spd->mod_section.registered.crc[1], " Module CRC"); + break; + case 0x04: /* LRDIMM */ + PRINT_NXS(128, spd->mod_section.loadreduced.mod_height, + "mod_height (Loadreduced) Module Nominal Height"); + PRINT_NXS(129, spd->mod_section.loadreduced.mod_thickness, + "mod_thickness (Loadreduced) Module Maximum Thickness"); + PRINT_NXS(130, spd->mod_section.loadreduced.ref_raw_card, + "ref_raw_card (Loadreduced) Reference Raw Card Used"); + PRINT_NXS(131, spd->mod_section.loadreduced.modu_attr, + "modu_attr (Loadreduced) DIMM Module Attributes"); + PRINT_NXS(132, spd->mod_section.loadreduced.thermal, + "thermal (Loadreduced) Thermal Heat Spreader Solution"); + PRINT_NXS(133, spd->mod_section.loadreduced.reg_id_lo, + "reg_id_lo (Loadreduced) Register Manufacturer ID Code, LSB"); + PRINT_NXS(134, spd->mod_section.loadreduced.reg_id_hi, + "reg_id_hi (Loadreduced) Register Manufacturer ID Code, MSB"); + PRINT_NXS(135, spd->mod_section.loadreduced.reg_rev, + "reg_rev (Loadreduced) Register Revision Number"); + PRINT_NXS(136, spd->mod_section.loadreduced.reg_map, + "reg_map (Loadreduced) Address mapping"); + PRINT_NXS(137, spd->mod_section.loadreduced.reg_drv, + "reg_drv (Loadreduced) Reg output drive strength"); + PRINT_NXS(138, spd->mod_section.loadreduced.reg_drv_ck, + "reg_drv_ck (Loadreduced) Reg output drive strength for CK"); + PRINT_NXS(139, spd->mod_section.loadreduced.data_buf_rev, + "data_buf_rev (Loadreduced) Data Buffer Revision Numbe"); + PRINT_NXS(140, spd->mod_section.loadreduced.vrefqe_r0, + "vrefqe_r0 (Loadreduced) DRAM VrefDQ for Package Rank 0"); + PRINT_NXS(141, spd->mod_section.loadreduced.vrefqe_r1, + "vrefqe_r1 (Loadreduced) DRAM VrefDQ for Package Rank 1"); + PRINT_NXS(142, spd->mod_section.loadreduced.vrefqe_r2, + "vrefqe_r2 (Loadreduced) DRAM VrefDQ for Package Rank 2"); + PRINT_NXS(143, spd->mod_section.loadreduced.vrefqe_r3, + "vrefqe_r3 (Loadreduced) DRAM VrefDQ for Package Rank 3"); + PRINT_NXS(144, spd->mod_section.loadreduced.data_intf, + "data_intf (Loadreduced) Data Buffer VrefDQ for DRAM Interface"); + PRINT_NXS(145, spd->mod_section.loadreduced.data_drv_1866, + "data_drv_1866 (Loadreduced) Data Buffer MDQ Drive Strength and RTT"); + PRINT_NXS(146, spd->mod_section.loadreduced.data_drv_2400, + "data_drv_2400 (Loadreduced) Data Buffer MDQ Drive Strength and RTT"); + PRINT_NXS(147, spd->mod_section.loadreduced.data_drv_3200, + "data_drv_3200 (Loadreduced) Data Buffer MDQ Drive Strength and RTT"); + PRINT_NXS(148, spd->mod_section.loadreduced.dram_drv, + "dram_drv (Loadreduced) DRAM Drive Strength"); + PRINT_NXS(149, spd->mod_section.loadreduced.dram_odt_1866, + "dram_odt_1866 (Loadreduced) DRAM ODT (RTT_WR, RTT_NOM)"); + PRINT_NXS(150, spd->mod_section.loadreduced.dram_odt_2400, + "dram_odt_2400 (Loadreduced) DRAM ODT (RTT_WR, RTT_NOM)"); + PRINT_NXS(151, spd->mod_section.loadreduced.dram_odt_3200, + "dram_odt_3200 (Loadreduced) DRAM ODT (RTT_WR, RTT_NOM)"); + PRINT_NXS(152, spd->mod_section.loadreduced.dram_odt_park_1866, + "dram_odt_park_1866 (Loadreduced) DRAM ODT (RTT_PARK)"); + PRINT_NXS(153, spd->mod_section.loadreduced.dram_odt_park_2400, + "dram_odt_park_2400 (Loadreduced) DRAM ODT (RTT_PARK)"); + PRINT_NXS(154, spd->mod_section.loadreduced.dram_odt_park_3200, + "dram_odt_park_3200 (Loadreduced) DRAM ODT (RTT_PARK)"); + PRINT_NNXXS(254, 255, spd->mod_section.loadreduced.crc[0], + spd->mod_section.loadreduced.crc[1], + " Module CRC"); + break; + default: + /* Module-specific Section, Unsupported Module Type */ + printf("%-3d-%3d: ", 128, 255); + + for (i = 128; i <= 255; i++) + printf("%02x", spd->mod_section.uc[i - 128]); + + break; + } + + /* Unique Module ID: Bytes 320-383 */ + PRINT_NXS(320, spd->mmid_lsb, "Module MfgID Code LSB - JEP-106"); + PRINT_NXS(321, spd->mmid_msb, "Module MfgID Code MSB - JEP-106"); + PRINT_NXS(322, spd->mloc, "Mfg Location"); + PRINT_NNXXS(323, 324, spd->mdate[0], spd->mdate[1], "Mfg Date"); + + printf("%-3d-%3d: ", 325, 328); + + for (i = 325; i <= 328; i++) + printf("%02x ", spd->sernum[i - 325]); + printf(" Module Serial Number\n"); + + printf("%-3d-%3d: ", 329, 348); + for (i = 329; i <= 348; i++) + printf("%02x ", spd->mpart[i - 329]); + printf(" Mfg's Module Part Number\n"); + + PRINT_NXS(349, spd->mrev, "Module Revision code"); + PRINT_NXS(350, spd->dmid_lsb, "DRAM MfgID Code LSB - JEP-106"); + PRINT_NXS(351, spd->dmid_msb, "DRAM MfgID Code MSB - JEP-106"); + PRINT_NXS(352, spd->stepping, "DRAM stepping"); + + printf("%-3d-%3d: ", 353, 381); + for (i = 353; i <= 381; i++) + printf("%02x ", spd->msd[i - 353]); + printf(" Mfg's Specific Data\n"); +} +#endif + +static inline void generic_spd_dump(const generic_spd_eeprom_t *spd) +{ +#if defined(CONFIG_SYS_FSL_DDR1) + ddr1_spd_dump(spd); +#elif defined(CONFIG_SYS_FSL_DDR2) + ddr2_spd_dump(spd); +#elif defined(CONFIG_SYS_FSL_DDR3) + ddr3_spd_dump(spd); +#elif defined(CONFIG_SYS_FSL_DDR4) + ddr4_spd_dump(spd); +#endif +} + +static void fsl_ddr_printinfo(const fsl_ddr_info_t *pinfo, + unsigned int ctrl_mask, + unsigned int dimm_mask, + unsigned int do_mask) +{ + unsigned int i, j, retval; + + /* STEP 1: DIMM SPD data */ + if (do_mask & STEP_GET_SPD) { + for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) { + if (!(ctrl_mask & (1 << i))) + continue; + + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + if (!(dimm_mask & (1 << j))) + continue; + + printf("SPD info: Controller=%u " + "DIMM=%u\n", i, j); + generic_spd_dump( + &(pinfo->spd_installed_dimms[i][j])); + printf("\n"); + } + printf("\n"); + } + printf("\n"); + } + + /* STEP 2: DIMM Parameters */ + if (do_mask & STEP_COMPUTE_DIMM_PARMS) { + for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) { + if (!(ctrl_mask & (1 << i))) + continue; + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + if (!(dimm_mask & (1 << j))) + continue; + printf("DIMM parameters: Controller=%u " + "DIMM=%u\n", i, j); + print_dimm_parameters( + &(pinfo->dimm_params[i][j])); + printf("\n"); + } + printf("\n"); + } + printf("\n"); + } + + /* STEP 3: Common Parameters */ + if (do_mask & STEP_COMPUTE_COMMON_PARMS) { + for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) { + if (!(ctrl_mask & (1 << i))) + continue; + printf("\"lowest common\" DIMM parameters: " + "Controller=%u\n", i); + print_lowest_common_dimm_parameters( + &pinfo->common_timing_params[i]); + printf("\n"); + } + printf("\n"); + } + + /* STEP 4: User Configuration Options */ + if (do_mask & STEP_GATHER_OPTS) { + for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) { + if (!(ctrl_mask & (1 << i))) + continue; + printf("User Config Options: Controller=%u\n", i); + print_memctl_options(&pinfo->memctl_opts[i]); + printf("\n"); + } + printf("\n"); + } + + /* STEP 5: Address assignment */ + if (do_mask & STEP_ASSIGN_ADDRESSES) { + for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) { + if (!(ctrl_mask & (1 << i))) + continue; + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + printf("Address Assignment: Controller=%u " + "DIMM=%u\n", i, j); + printf("Don't have this functionality yet\n"); + } + printf("\n"); + } + printf("\n"); + } + + /* STEP 6: computed controller register values */ + if (do_mask & STEP_COMPUTE_REGS) { + for (i = 0; i < CONFIG_SYS_NUM_DDR_CTLRS; i++) { + if (!(ctrl_mask & (1 << i))) + continue; + printf("Computed Register Values: Controller=%u\n", i); + print_fsl_memctl_config_regs( + &pinfo->fsl_ddr_config_reg[i]); + retval = check_fsl_memctl_config_regs( + &pinfo->fsl_ddr_config_reg[i]); + if (retval) { + printf("check_fsl_memctl_config_regs " + "result = %u\n", retval); + } + printf("\n"); + } + printf("\n"); + } +} + +struct data_strings { + const char *data_name; + unsigned int step_mask; + unsigned int dimm_number_required; +}; + +#define DATA_OPTIONS(name, step, dimm) {#name, step, dimm} + +static unsigned int fsl_ddr_parse_interactive_cmd( + char **argv, + int argc, + unsigned int *pstep_mask, + unsigned int *pctlr_mask, + unsigned int *pdimm_mask, + unsigned int *pdimm_number_required + ) { + + static const struct data_strings options[] = { + DATA_OPTIONS(spd, STEP_GET_SPD, 1), + DATA_OPTIONS(dimmparms, STEP_COMPUTE_DIMM_PARMS, 1), + DATA_OPTIONS(commonparms, STEP_COMPUTE_COMMON_PARMS, 0), + DATA_OPTIONS(opts, STEP_GATHER_OPTS, 0), + DATA_OPTIONS(addresses, STEP_ASSIGN_ADDRESSES, 0), + DATA_OPTIONS(regs, STEP_COMPUTE_REGS, 0), + }; + static const unsigned int n_opts = ARRAY_SIZE(options); + + unsigned int i, j; + unsigned int error = 0; + + for (i = 1; i < argc; i++) { + unsigned int matched = 0; + + for (j = 0; j < n_opts; j++) { + if (strcmp(options[j].data_name, argv[i]) != 0) + continue; + *pstep_mask |= options[j].step_mask; + *pdimm_number_required = + options[j].dimm_number_required; + matched = 1; + break; + } + + if (matched) + continue; + + if (argv[i][0] == 'c') { + char c = argv[i][1]; + if (isdigit(c)) + *pctlr_mask |= 1 << (c - '0'); + continue; + } + + if (argv[i][0] == 'd') { + char c = argv[i][1]; + if (isdigit(c)) + *pdimm_mask |= 1 << (c - '0'); + continue; + } + + printf("unknown arg %s\n", argv[i]); + *pstep_mask = 0; + error = 1; + break; + } + + return error; +} + +int fsl_ddr_interactive_env_var_exists(void) +{ + char buffer[CONFIG_SYS_CBSIZE]; + + if (env_get_f("ddr_interactive", buffer, CONFIG_SYS_CBSIZE) >= 0) + return 1; + + return 0; +} + +unsigned long long fsl_ddr_interactive(fsl_ddr_info_t *pinfo, int var_is_set) +{ + unsigned long long ddrsize; + const char *prompt = "FSL DDR>"; + char buffer[CONFIG_SYS_CBSIZE]; + char buffer2[CONFIG_SYS_CBSIZE]; + char *p = NULL; + char *argv[CONFIG_SYS_MAXARGS + 1]; /* NULL terminated */ + int argc; + unsigned int next_step = STEP_GET_SPD; + const char *usage = { + "commands:\n" + "print print SPD and intermediate computed data\n" + "reset reboot machine\n" + "recompute reload SPD and options to default and recompute regs\n" + "edit modify spd, parameter, or option\n" + "compute recompute registers from current next_step to end\n" + "copy copy parameters\n" + "next_step shows current next_step\n" + "help this message\n" + "go program the memory controller and continue with u-boot\n" + }; + + if (var_is_set) { + if (env_get_f("ddr_interactive", buffer2, + CONFIG_SYS_CBSIZE) > 0) + p = buffer2; + else + var_is_set = 0; + } + + /* + * The strategy for next_step is that it points to the next + * step in the computation process that needs to be done. + */ + while (1) { + if (var_is_set) { + char *pend = strchr(p, ';'); + if (pend) { + /* found command separator, copy sub-command */ + *pend = '\0'; + strcpy(buffer, p); + p = pend + 1; + } else { + /* separator not found, copy whole string */ + strcpy(buffer, p); + p = NULL; + var_is_set = 0; + } + } else { + /* + * No need to worry for buffer overflow here in + * this function; cli_readline() maxes out at + * CFG_CBSIZE + */ + cli_readline_into_buffer(prompt, buffer, 0); + } + argc = cli_simple_parse_line(buffer, argv); + if (argc == 0) + continue; + + + if (strcmp(argv[0], "help") == 0) { + puts(usage); + continue; + } + + if (strcmp(argv[0], "next_step") == 0) { + printf("next_step = 0x%02X (%s)\n", + next_step, + step_to_string(next_step)); + continue; + } + + if (strcmp(argv[0], "copy") == 0) { + unsigned int error = 0; + unsigned int step_mask = 0; + unsigned int src_ctlr_mask = 0; + unsigned int src_dimm_mask = 0; + unsigned int dimm_number_required = 0; + unsigned int src_ctlr_num = 0; + unsigned int src_dimm_num = 0; + unsigned int dst_ctlr_num = -1; + unsigned int dst_dimm_num = -1; + unsigned int i, num_dest_parms; + + if (argc == 1) { + printf("copy <src c#> <src d#> <spd|dimmparms|commonparms|opts|addresses|regs> <dst c#> <dst d#>\n"); + continue; + } + + error = fsl_ddr_parse_interactive_cmd( + argv, argc, + &step_mask, + &src_ctlr_mask, + &src_dimm_mask, + &dimm_number_required + ); + + /* XXX: only dimm_number_required and step_mask will + be used by this function. Parse the controller and + DIMM number separately because it is easier. */ + + if (error) + continue; + + /* parse source destination controller / DIMM */ + + num_dest_parms = dimm_number_required ? 2 : 1; + + for (i = 0; i < argc; i++) { + if (argv[i][0] == 'c') { + char c = argv[i][1]; + if (isdigit(c)) { + src_ctlr_num = (c - '0'); + break; + } + } + } + + for (i = 0; i < argc; i++) { + if (argv[i][0] == 'd') { + char c = argv[i][1]; + if (isdigit(c)) { + src_dimm_num = (c - '0'); + break; + } + } + } + + /* parse destination controller / DIMM */ + + for (i = argc - 1; i >= argc - num_dest_parms; i--) { + if (argv[i][0] == 'c') { + char c = argv[i][1]; + if (isdigit(c)) { + dst_ctlr_num = (c - '0'); + break; + } + } + } + + for (i = argc - 1; i >= argc - num_dest_parms; i--) { + if (argv[i][0] == 'd') { + char c = argv[i][1]; + if (isdigit(c)) { + dst_dimm_num = (c - '0'); + break; + } + } + } + + /* TODO: validate inputs */ + + debug("src_ctlr_num = %u, src_dimm_num = %u, dst_ctlr_num = %u, dst_dimm_num = %u, step_mask = %x\n", + src_ctlr_num, src_dimm_num, dst_ctlr_num, dst_dimm_num, step_mask); + + + switch (step_mask) { + + case STEP_GET_SPD: + memcpy(&(pinfo->spd_installed_dimms[dst_ctlr_num][dst_dimm_num]), + &(pinfo->spd_installed_dimms[src_ctlr_num][src_dimm_num]), + sizeof(pinfo->spd_installed_dimms[0][0])); + break; + + case STEP_COMPUTE_DIMM_PARMS: + memcpy(&(pinfo->dimm_params[dst_ctlr_num][dst_dimm_num]), + &(pinfo->dimm_params[src_ctlr_num][src_dimm_num]), + sizeof(pinfo->dimm_params[0][0])); + break; + + case STEP_COMPUTE_COMMON_PARMS: + memcpy(&(pinfo->common_timing_params[dst_ctlr_num]), + &(pinfo->common_timing_params[src_ctlr_num]), + sizeof(pinfo->common_timing_params[0])); + break; + + case STEP_GATHER_OPTS: + memcpy(&(pinfo->memctl_opts[dst_ctlr_num]), + &(pinfo->memctl_opts[src_ctlr_num]), + sizeof(pinfo->memctl_opts[0])); + break; + + /* someday be able to have addresses to copy addresses... */ + + case STEP_COMPUTE_REGS: + memcpy(&(pinfo->fsl_ddr_config_reg[dst_ctlr_num]), + &(pinfo->fsl_ddr_config_reg[src_ctlr_num]), + sizeof(pinfo->memctl_opts[0])); + break; + + default: + printf("unexpected step_mask value\n"); + } + + continue; + + } + + if (strcmp(argv[0], "edit") == 0) { + unsigned int error = 0; + unsigned int step_mask = 0; + unsigned int ctlr_mask = 0; + unsigned int dimm_mask = 0; + char *p_element = NULL; + char *p_value = NULL; + unsigned int dimm_number_required = 0; + unsigned int ctrl_num; + unsigned int dimm_num; + + if (argc == 1) { + /* Only the element and value must be last */ + printf("edit <c#> <d#> " + "<spd|dimmparms|commonparms|opts|" + "addresses|regs> <element> <value>\n"); + printf("for spd, specify byte number for " + "element\n"); + continue; + } + + error = fsl_ddr_parse_interactive_cmd( + argv, argc - 2, + &step_mask, + &ctlr_mask, + &dimm_mask, + &dimm_number_required + ); + + if (error) + continue; + + + /* Check arguments */ + + /* ERROR: If no steps were found */ + if (step_mask == 0) { + printf("Error: No valid steps were specified " + "in argument.\n"); + continue; + } + + /* ERROR: If multiple steps were found */ + if (step_mask & (step_mask - 1)) { + printf("Error: Multiple steps specified in " + "argument.\n"); + continue; + } + + /* ERROR: Controller not specified */ + if (ctlr_mask == 0) { + printf("Error: controller number not " + "specified or no element and " + "value specified\n"); + continue; + } + + if (ctlr_mask & (ctlr_mask - 1)) { + printf("Error: multiple controllers " + "specified, %X\n", ctlr_mask); + continue; + } + + /* ERROR: DIMM number not specified */ + if (dimm_number_required && dimm_mask == 0) { + printf("Error: DIMM number number not " + "specified or no element and " + "value specified\n"); + continue; + } + + if (dimm_mask & (dimm_mask - 1)) { + printf("Error: multipled DIMMs specified\n"); + continue; + } + + p_element = argv[argc - 2]; + p_value = argv[argc - 1]; + + ctrl_num = __ilog2(ctlr_mask); + dimm_num = __ilog2(dimm_mask); + + switch (step_mask) { + case STEP_GET_SPD: + { + unsigned int element_num; + unsigned int value; + + element_num = simple_strtoul(p_element, + NULL, 0); + value = simple_strtoul(p_value, + NULL, 0); + fsl_ddr_spd_edit(pinfo, + ctrl_num, + dimm_num, + element_num, + value); + next_step = STEP_COMPUTE_DIMM_PARMS; + } + break; + + case STEP_COMPUTE_DIMM_PARMS: + fsl_ddr_dimm_parameters_edit( + pinfo, ctrl_num, dimm_num, + p_element, p_value); + next_step = STEP_COMPUTE_COMMON_PARMS; + break; + + case STEP_COMPUTE_COMMON_PARMS: + lowest_common_dimm_parameters_edit(pinfo, + ctrl_num, p_element, p_value); + next_step = STEP_GATHER_OPTS; + break; + + case STEP_GATHER_OPTS: + fsl_ddr_options_edit(pinfo, ctrl_num, + p_element, p_value); + next_step = STEP_ASSIGN_ADDRESSES; + break; + + case STEP_ASSIGN_ADDRESSES: + printf("editing of address assignment " + "not yet implemented\n"); + break; + + case STEP_COMPUTE_REGS: + { + fsl_ddr_regs_edit(pinfo, + ctrl_num, + p_element, + p_value); + next_step = STEP_PROGRAM_REGS; + } + break; + + default: + printf("programming error\n"); + while (1) + ; + break; + } + continue; + } + + if (strcmp(argv[0], "reset") == 0) { + /* + * Reboot machine. + * Args don't seem to matter because this + * doesn't return + */ + do_reset(NULL, 0, 0, NULL); + printf("Reset didn't work\n"); + } + + if (strcmp(argv[0], "recompute") == 0) { + /* + * Recalculate everything, starting with + * loading SPD EEPROM from DIMMs + */ + next_step = STEP_GET_SPD; + ddrsize = fsl_ddr_compute(pinfo, next_step, 0); + continue; + } + + if (strcmp(argv[0], "compute") == 0) { + /* + * Compute rest of steps starting at + * the current next_step/ + */ + ddrsize = fsl_ddr_compute(pinfo, next_step, 0); + continue; + } + + if (strcmp(argv[0], "print") == 0) { + unsigned int error = 0; + unsigned int step_mask = 0; + unsigned int ctlr_mask = 0; + unsigned int dimm_mask = 0; + unsigned int dimm_number_required = 0; + + if (argc == 1) { + printf("print [c<n>] [d<n>] [spd] [dimmparms] " + "[commonparms] [opts] [addresses] [regs]\n"); + continue; + } + + error = fsl_ddr_parse_interactive_cmd( + argv, argc, + &step_mask, + &ctlr_mask, + &dimm_mask, + &dimm_number_required + ); + + if (error) + continue; + + /* If no particular controller was found, print all */ + if (ctlr_mask == 0) + ctlr_mask = 0xFF; + + /* If no particular dimm was found, print all dimms. */ + if (dimm_mask == 0) + dimm_mask = 0xFF; + + /* If no steps were found, print all steps. */ + if (step_mask == 0) + step_mask = STEP_ALL; + + fsl_ddr_printinfo(pinfo, ctlr_mask, + dimm_mask, step_mask); + continue; + } + + if (strcmp(argv[0], "go") == 0) { + if (next_step) + ddrsize = fsl_ddr_compute(pinfo, next_step, 0); + break; + } + + printf("unknown command %s\n", argv[0]); + } + + debug("end of memory = %llu\n", (u64)ddrsize); + + return ddrsize; +} diff --git a/roms/u-boot/drivers/ddr/fsl/lc_common_dimm_params.c b/roms/u-boot/drivers/ddr/fsl/lc_common_dimm_params.c new file mode 100644 index 000000000..d299d763d --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/lc_common_dimm_params.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP Semiconductor + */ + +#include <common.h> +#include <fsl_ddr_sdram.h> +#include <log.h> +#include <asm/bitops.h> + +#include <fsl_ddr.h> + +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) +static unsigned int +compute_cas_latency(const unsigned int ctrl_num, + const dimm_params_t *dimm_params, + common_timing_params_t *outpdimm, + unsigned int number_of_dimms) +{ + unsigned int i; + unsigned int common_caslat; + unsigned int caslat_actual; + unsigned int retry = 16; + unsigned int tmp = ~0; + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); +#ifdef CONFIG_SYS_FSL_DDR3 + const unsigned int taamax = 20000; +#else + const unsigned int taamax = 18000; +#endif + + /* compute the common CAS latency supported between slots */ + for (i = 0; i < number_of_dimms; i++) { + if (dimm_params[i].n_ranks) + tmp &= dimm_params[i].caslat_x; + } + common_caslat = tmp; + + /* validate if the memory clk is in the range of dimms */ + if (mclk_ps < outpdimm->tckmin_x_ps) { + printf("DDR clock (MCLK cycle %u ps) is faster than " + "the slowest DIMM(s) (tCKmin %u ps) can support.\n", + mclk_ps, outpdimm->tckmin_x_ps); + } +#ifdef CONFIG_SYS_FSL_DDR4 + if (mclk_ps > outpdimm->tckmax_ps) { + printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n", + mclk_ps, outpdimm->tckmax_ps); + } +#endif + /* determine the acutal cas latency */ + caslat_actual = (outpdimm->taamin_ps + mclk_ps - 1) / mclk_ps; + /* check if the dimms support the CAS latency */ + while (!(common_caslat & (1 << caslat_actual)) && retry > 0) { + caslat_actual++; + retry--; + } + /* once the caculation of caslat_actual is completed + * we must verify that this CAS latency value does not + * exceed tAAmax, which is 20 ns for all DDR3 speed grades, + * 18ns for all DDR4 speed grades. + */ + if (caslat_actual * mclk_ps > taamax) { + printf("The chosen cas latency %d is too large\n", + caslat_actual); + } + outpdimm->lowest_common_spd_caslat = caslat_actual; + debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual); + + return 0; +} +#else /* for DDR1 and DDR2 */ +static unsigned int +compute_cas_latency(const unsigned int ctrl_num, + const dimm_params_t *dimm_params, + common_timing_params_t *outpdimm, + unsigned int number_of_dimms) +{ + int i; + const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); + unsigned int lowest_good_caslat; + unsigned int not_ok; + unsigned int temp1, temp2; + + debug("using mclk_ps = %u\n", mclk_ps); + if (mclk_ps > outpdimm->tckmax_ps) { + printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n", + mclk_ps, outpdimm->tckmax_ps); + } + + /* + * Compute a CAS latency suitable for all DIMMs + * + * Strategy for SPD-defined latencies: compute only + * CAS latency defined by all DIMMs. + */ + + /* + * Step 1: find CAS latency common to all DIMMs using bitwise + * operation. + */ + temp1 = 0xFF; + for (i = 0; i < number_of_dimms; i++) { + if (dimm_params[i].n_ranks) { + temp2 = 0; + temp2 |= 1 << dimm_params[i].caslat_x; + temp2 |= 1 << dimm_params[i].caslat_x_minus_1; + temp2 |= 1 << dimm_params[i].caslat_x_minus_2; + /* + * If there was no entry for X-2 (X-1) in + * the SPD, then caslat_x_minus_2 + * (caslat_x_minus_1) contains either 255 or + * 0xFFFFFFFF because that's what the glorious + * __ilog2 function returns for an input of 0. + * On 32-bit PowerPC, left shift counts with bit + * 26 set (that the value of 255 or 0xFFFFFFFF + * will have), cause the destination register to + * be 0. That is why this works. + */ + temp1 &= temp2; + } + } + + /* + * Step 2: check each common CAS latency against tCK of each + * DIMM's SPD. + */ + lowest_good_caslat = 0; + temp2 = 0; + while (temp1) { + not_ok = 0; + temp2 = __ilog2(temp1); + debug("checking common caslat = %u\n", temp2); + + /* Check if this CAS latency will work on all DIMMs at tCK. */ + for (i = 0; i < number_of_dimms; i++) { + if (!dimm_params[i].n_ranks) + continue; + + if (dimm_params[i].caslat_x == temp2) { + if (mclk_ps >= dimm_params[i].tckmin_x_ps) { + debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n", + temp2, i, mclk_ps, + dimm_params[i].tckmin_x_ps); + continue; + } else { + not_ok++; + } + } + + if (dimm_params[i].caslat_x_minus_1 == temp2) { + unsigned int tckmin_x_minus_1_ps + = dimm_params[i].tckmin_x_minus_1_ps; + if (mclk_ps >= tckmin_x_minus_1_ps) { + debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n", + temp2, i, mclk_ps, + tckmin_x_minus_1_ps); + continue; + } else { + not_ok++; + } + } + + if (dimm_params[i].caslat_x_minus_2 == temp2) { + unsigned int tckmin_x_minus_2_ps + = dimm_params[i].tckmin_x_minus_2_ps; + if (mclk_ps >= tckmin_x_minus_2_ps) { + debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n", + temp2, i, mclk_ps, + tckmin_x_minus_2_ps); + continue; + } else { + not_ok++; + } + } + } + + if (!not_ok) + lowest_good_caslat = temp2; + + temp1 &= ~(1 << temp2); + } + + debug("lowest common SPD-defined CAS latency = %u\n", + lowest_good_caslat); + outpdimm->lowest_common_spd_caslat = lowest_good_caslat; + + + /* + * Compute a common 'de-rated' CAS latency. + * + * The strategy here is to find the *highest* dereated cas latency + * with the assumption that all of the DIMMs will support a dereated + * CAS latency higher than or equal to their lowest dereated value. + */ + temp1 = 0; + for (i = 0; i < number_of_dimms; i++) + temp1 = max(temp1, dimm_params[i].caslat_lowest_derated); + + outpdimm->highest_common_derated_caslat = temp1; + debug("highest common dereated CAS latency = %u\n", temp1); + + return 0; +} +#endif + +/* + * compute_lowest_common_dimm_parameters() + * + * Determine the worst-case DIMM timing parameters from the set of DIMMs + * whose parameters have been computed into the array pointed to + * by dimm_params. + */ +unsigned int +compute_lowest_common_dimm_parameters(const unsigned int ctrl_num, + const dimm_params_t *dimm_params, + common_timing_params_t *outpdimm, + const unsigned int number_of_dimms) +{ + unsigned int i, j; + + unsigned int tckmin_x_ps = 0; + unsigned int tckmax_ps = 0xFFFFFFFF; + unsigned int trcd_ps = 0; + unsigned int trp_ps = 0; + unsigned int tras_ps = 0; +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + unsigned int taamin_ps = 0; +#endif +#ifdef CONFIG_SYS_FSL_DDR4 + unsigned int twr_ps = 15000; + unsigned int trfc1_ps = 0; + unsigned int trfc2_ps = 0; + unsigned int trfc4_ps = 0; + unsigned int trrds_ps = 0; + unsigned int trrdl_ps = 0; + unsigned int tccdl_ps = 0; + unsigned int trfc_slr_ps = 0; +#else + unsigned int twr_ps = 0; + unsigned int twtr_ps = 0; + unsigned int trfc_ps = 0; + unsigned int trrd_ps = 0; + unsigned int trtp_ps = 0; +#endif + unsigned int trc_ps = 0; + unsigned int refresh_rate_ps = 0; + unsigned int extended_op_srt = 1; +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + unsigned int tis_ps = 0; + unsigned int tih_ps = 0; + unsigned int tds_ps = 0; + unsigned int tdh_ps = 0; + unsigned int tdqsq_max_ps = 0; + unsigned int tqhs_ps = 0; +#endif + unsigned int temp1, temp2; + unsigned int additive_latency = 0; + + temp1 = 0; + for (i = 0; i < number_of_dimms; i++) { + /* + * If there are no ranks on this DIMM, + * it probably doesn't exist, so skip it. + */ + if (dimm_params[i].n_ranks == 0) { + temp1++; + continue; + } + if (dimm_params[i].n_ranks == 4 && i != 0) { + printf("Found Quad-rank DIMM in wrong bank, ignored." + " Software may not run as expected.\n"); + temp1++; + continue; + } + + /* + * check if quad-rank DIMM is plugged if + * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined + * Only the board with proper design is capable + */ +#ifndef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE + if (dimm_params[i].n_ranks == 4 && \ + CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) { + printf("Found Quad-rank DIMM, not able to support."); + temp1++; + continue; + } +#endif + /* + * Find minimum tckmax_ps to find fastest slow speed, + * i.e., this is the slowest the whole system can go. + */ + tckmax_ps = min(tckmax_ps, + (unsigned int)dimm_params[i].tckmax_ps); +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + taamin_ps = max(taamin_ps, + (unsigned int)dimm_params[i].taa_ps); +#endif + tckmin_x_ps = max(tckmin_x_ps, + (unsigned int)dimm_params[i].tckmin_x_ps); + trcd_ps = max(trcd_ps, (unsigned int)dimm_params[i].trcd_ps); + trp_ps = max(trp_ps, (unsigned int)dimm_params[i].trp_ps); + tras_ps = max(tras_ps, (unsigned int)dimm_params[i].tras_ps); +#ifdef CONFIG_SYS_FSL_DDR4 + trfc1_ps = max(trfc1_ps, + (unsigned int)dimm_params[i].trfc1_ps); + trfc2_ps = max(trfc2_ps, + (unsigned int)dimm_params[i].trfc2_ps); + trfc4_ps = max(trfc4_ps, + (unsigned int)dimm_params[i].trfc4_ps); + trrds_ps = max(trrds_ps, + (unsigned int)dimm_params[i].trrds_ps); + trrdl_ps = max(trrdl_ps, + (unsigned int)dimm_params[i].trrdl_ps); + tccdl_ps = max(tccdl_ps, + (unsigned int)dimm_params[i].tccdl_ps); + trfc_slr_ps = max(trfc_slr_ps, + (unsigned int)dimm_params[i].trfc_slr_ps); +#else + twr_ps = max(twr_ps, (unsigned int)dimm_params[i].twr_ps); + twtr_ps = max(twtr_ps, (unsigned int)dimm_params[i].twtr_ps); + trfc_ps = max(trfc_ps, (unsigned int)dimm_params[i].trfc_ps); + trrd_ps = max(trrd_ps, (unsigned int)dimm_params[i].trrd_ps); + trtp_ps = max(trtp_ps, (unsigned int)dimm_params[i].trtp_ps); +#endif + trc_ps = max(trc_ps, (unsigned int)dimm_params[i].trc_ps); +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + tis_ps = max(tis_ps, (unsigned int)dimm_params[i].tis_ps); + tih_ps = max(tih_ps, (unsigned int)dimm_params[i].tih_ps); + tds_ps = max(tds_ps, (unsigned int)dimm_params[i].tds_ps); + tdh_ps = max(tdh_ps, (unsigned int)dimm_params[i].tdh_ps); + tqhs_ps = max(tqhs_ps, (unsigned int)dimm_params[i].tqhs_ps); + /* + * Find maximum tdqsq_max_ps to find slowest. + * + * FIXME: is finding the slowest value the correct + * strategy for this parameter? + */ + tdqsq_max_ps = max(tdqsq_max_ps, + (unsigned int)dimm_params[i].tdqsq_max_ps); +#endif + refresh_rate_ps = max(refresh_rate_ps, + (unsigned int)dimm_params[i].refresh_rate_ps); + /* extended_op_srt is either 0 or 1, 0 having priority */ + extended_op_srt = min(extended_op_srt, + (unsigned int)dimm_params[i].extended_op_srt); + } + + outpdimm->ndimms_present = number_of_dimms - temp1; + + if (temp1 == number_of_dimms) { + debug("no dimms this memory controller\n"); + return 0; + } + + outpdimm->tckmin_x_ps = tckmin_x_ps; + outpdimm->tckmax_ps = tckmax_ps; +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + outpdimm->taamin_ps = taamin_ps; +#endif + outpdimm->trcd_ps = trcd_ps; + outpdimm->trp_ps = trp_ps; + outpdimm->tras_ps = tras_ps; +#ifdef CONFIG_SYS_FSL_DDR4 + outpdimm->trfc1_ps = trfc1_ps; + outpdimm->trfc2_ps = trfc2_ps; + outpdimm->trfc4_ps = trfc4_ps; + outpdimm->trrds_ps = trrds_ps; + outpdimm->trrdl_ps = trrdl_ps; + outpdimm->tccdl_ps = tccdl_ps; + outpdimm->trfc_slr_ps = trfc_slr_ps; +#else + outpdimm->twtr_ps = twtr_ps; + outpdimm->trfc_ps = trfc_ps; + outpdimm->trrd_ps = trrd_ps; + outpdimm->trtp_ps = trtp_ps; +#endif + outpdimm->twr_ps = twr_ps; + outpdimm->trc_ps = trc_ps; + outpdimm->refresh_rate_ps = refresh_rate_ps; + outpdimm->extended_op_srt = extended_op_srt; +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + outpdimm->tis_ps = tis_ps; + outpdimm->tih_ps = tih_ps; + outpdimm->tds_ps = tds_ps; + outpdimm->tdh_ps = tdh_ps; + outpdimm->tdqsq_max_ps = tdqsq_max_ps; + outpdimm->tqhs_ps = tqhs_ps; +#endif + + /* Determine common burst length for all DIMMs. */ + temp1 = 0xff; + for (i = 0; i < number_of_dimms; i++) { + if (dimm_params[i].n_ranks) { + temp1 &= dimm_params[i].burst_lengths_bitmask; + } + } + outpdimm->all_dimms_burst_lengths_bitmask = temp1; + + /* Determine if all DIMMs registered buffered. */ + temp1 = temp2 = 0; + for (i = 0; i < number_of_dimms; i++) { + if (dimm_params[i].n_ranks) { + if (dimm_params[i].registered_dimm) { + temp1 = 1; +#ifndef CONFIG_SPL_BUILD + printf("Detected RDIMM %s\n", + dimm_params[i].mpart); +#endif + } else { + temp2 = 1; +#ifndef CONFIG_SPL_BUILD + printf("Detected UDIMM %s\n", + dimm_params[i].mpart); +#endif + } + } + } + + outpdimm->all_dimms_registered = 0; + outpdimm->all_dimms_unbuffered = 0; + if (temp1 && !temp2) { + outpdimm->all_dimms_registered = 1; + } else if (!temp1 && temp2) { + outpdimm->all_dimms_unbuffered = 1; + } else { + printf("ERROR: Mix of registered buffered and unbuffered " + "DIMMs detected!\n"); + } + + temp1 = 0; + if (outpdimm->all_dimms_registered) + for (j = 0; j < 16; j++) { + outpdimm->rcw[j] = dimm_params[0].rcw[j]; + for (i = 1; i < number_of_dimms; i++) { + if (!dimm_params[i].n_ranks) + continue; + if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) { + temp1 = 1; + break; + } + } + } + + if (temp1 != 0) + printf("ERROR: Mix different RDIMM detected!\n"); + + /* calculate cas latency for all DDR types */ + if (compute_cas_latency(ctrl_num, dimm_params, + outpdimm, number_of_dimms)) + return 1; + + /* Determine if all DIMMs ECC capable. */ + temp1 = 1; + for (i = 0; i < number_of_dimms; i++) { + if (dimm_params[i].n_ranks && + !(dimm_params[i].edc_config & EDC_ECC)) { + temp1 = 0; + break; + } + } + if (temp1) { + debug("all DIMMs ECC capable\n"); + } else { + debug("Warning: not all DIMMs ECC capable, cant enable ECC\n"); + } + outpdimm->all_dimms_ecc_capable = temp1; + + /* + * Compute additive latency. + * + * For DDR1, additive latency should be 0. + * + * For DDR2, with ODT enabled, use "a value" less than ACTTORW, + * which comes from Trcd, and also note that: + * add_lat + caslat must be >= 4 + * + * For DDR3, we use the AL=0 + * + * When to use additive latency for DDR2: + * + * I. Because you are using CL=3 and need to do ODT on writes and + * want functionality. + * 1. Are you going to use ODT? (Does your board not have + * additional termination circuitry for DQ, DQS, DQS_, + * DM, RDQS, RDQS_ for x4/x8 configs?) + * 2. If so, is your lowest supported CL going to be 3? + * 3. If so, then you must set AL=1 because + * + * WL >= 3 for ODT on writes + * RL = AL + CL + * WL = RL - 1 + * -> + * WL = AL + CL - 1 + * AL + CL - 1 >= 3 + * AL + CL >= 4 + * QED + * + * RL >= 3 for ODT on reads + * RL = AL + CL + * + * Since CL aren't usually less than 2, AL=0 is a minimum, + * so the WL-derived AL should be the -- FIXME? + * + * II. Because you are using auto-precharge globally and want to + * use additive latency (posted CAS) to get more bandwidth. + * 1. Are you going to use auto-precharge mode globally? + * + * Use addtivie latency and compute AL to be 1 cycle less than + * tRCD, i.e. the READ or WRITE command is in the cycle + * immediately following the ACTIVATE command.. + * + * III. Because you feel like it or want to do some sort of + * degraded-performance experiment. + * 1. Do you just want to use additive latency because you feel + * like it? + * + * Validation: AL is less than tRCD, and within the other + * read-to-precharge constraints. + */ + + additive_latency = 0; + +#if defined(CONFIG_SYS_FSL_DDR2) + if ((outpdimm->lowest_common_spd_caslat < 4) && + (picos_to_mclk(ctrl_num, trcd_ps) > + outpdimm->lowest_common_spd_caslat)) { + additive_latency = picos_to_mclk(ctrl_num, trcd_ps) - + outpdimm->lowest_common_spd_caslat; + if (mclk_to_picos(ctrl_num, additive_latency) > trcd_ps) { + additive_latency = picos_to_mclk(ctrl_num, trcd_ps); + debug("setting additive_latency to %u because it was " + " greater than tRCD_ps\n", additive_latency); + } + } +#endif + + /* + * Validate additive latency + * + * AL <= tRCD(min) + */ + if (mclk_to_picos(ctrl_num, additive_latency) > trcd_ps) { + printf("Error: invalid additive latency exceeds tRCD(min).\n"); + return 1; + } + + /* + * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled + * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled + * ADD_LAT (the register) must be set to a value less + * than ACTTORW if WL = 1, then AL must be set to 1 + * RD_TO_PRE (the register) must be set to a minimum + * tRTP + AL if AL is nonzero + */ + + /* + * Additive latency will be applied only if the memctl option to + * use it. + */ + outpdimm->additive_latency = additive_latency; + + debug("tCKmin_ps = %u\n", outpdimm->tckmin_x_ps); + debug("trcd_ps = %u\n", outpdimm->trcd_ps); + debug("trp_ps = %u\n", outpdimm->trp_ps); + debug("tras_ps = %u\n", outpdimm->tras_ps); +#ifdef CONFIG_SYS_FSL_DDR4 + debug("trfc1_ps = %u\n", trfc1_ps); + debug("trfc2_ps = %u\n", trfc2_ps); + debug("trfc4_ps = %u\n", trfc4_ps); + debug("trrds_ps = %u\n", trrds_ps); + debug("trrdl_ps = %u\n", trrdl_ps); + debug("tccdl_ps = %u\n", tccdl_ps); + debug("trfc_slr_ps = %u\n", trfc_slr_ps); +#else + debug("twtr_ps = %u\n", outpdimm->twtr_ps); + debug("trfc_ps = %u\n", outpdimm->trfc_ps); + debug("trrd_ps = %u\n", outpdimm->trrd_ps); +#endif + debug("twr_ps = %u\n", outpdimm->twr_ps); + debug("trc_ps = %u\n", outpdimm->trc_ps); + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/fsl/main.c b/roms/u-boot/drivers/ddr/fsl/main.c new file mode 100644 index 000000000..8e147160b --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/main.c @@ -0,0 +1,939 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008-2014 Freescale Semiconductor, Inc. + */ + +/* + * Generic driver for Freescale DDR/DDR2/DDR3 memory controller. + * Based on code from spd_sdram.c + * Author: James Yang [at freescale.com] + */ + +#include <common.h> +#include <dm.h> +#include <i2c.h> +#include <fsl_ddr_sdram.h> +#include <fsl_ddr.h> +#include <init.h> +#include <log.h> +#include <asm/bitops.h> + +/* + * CONFIG_SYS_FSL_DDR_SDRAM_BASE_PHY is the physical address from the view + * of DDR controllers. It is the same as CONFIG_SYS_DDR_SDRAM_BASE for + * all Power SoCs. But it could be different for ARM SoCs. For example, + * fsl_lsch3 has a mapping mechanism to map DDR memory to ranges (in order) of + * 0x00_8000_0000 ~ 0x00_ffff_ffff + * 0x80_8000_0000 ~ 0xff_ffff_ffff + */ +#ifndef CONFIG_SYS_FSL_DDR_SDRAM_BASE_PHY +#ifdef CONFIG_MPC83xx +#define CONFIG_SYS_FSL_DDR_SDRAM_BASE_PHY CONFIG_SYS_SDRAM_BASE +#else +#define CONFIG_SYS_FSL_DDR_SDRAM_BASE_PHY CONFIG_SYS_DDR_SDRAM_BASE +#endif +#endif + +#ifdef CONFIG_PPC +#include <asm/fsl_law.h> + +void fsl_ddr_set_lawbar( + const common_timing_params_t *memctl_common_params, + unsigned int memctl_interleaved, + unsigned int ctrl_num); +#endif + +void fsl_ddr_set_intl3r(const unsigned int granule_size); +#if defined(SPD_EEPROM_ADDRESS) || \ + defined(SPD_EEPROM_ADDRESS1) || defined(SPD_EEPROM_ADDRESS2) || \ + defined(SPD_EEPROM_ADDRESS3) || defined(SPD_EEPROM_ADDRESS4) +#if (CONFIG_SYS_NUM_DDR_CTLRS == 1) && (CONFIG_DIMM_SLOTS_PER_CTLR == 1) +u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = { + [0][0] = SPD_EEPROM_ADDRESS, +}; +#elif (CONFIG_SYS_NUM_DDR_CTLRS == 1) && (CONFIG_DIMM_SLOTS_PER_CTLR == 2) +u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = { + [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */ + [0][1] = SPD_EEPROM_ADDRESS2, /* controller 1 */ +}; +#elif (CONFIG_SYS_NUM_DDR_CTLRS == 2) && (CONFIG_DIMM_SLOTS_PER_CTLR == 1) +u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = { + [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */ + [1][0] = SPD_EEPROM_ADDRESS2, /* controller 2 */ +}; +#elif (CONFIG_SYS_NUM_DDR_CTLRS == 2) && (CONFIG_DIMM_SLOTS_PER_CTLR == 2) +u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = { + [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */ + [0][1] = SPD_EEPROM_ADDRESS2, /* controller 1 */ + [1][0] = SPD_EEPROM_ADDRESS3, /* controller 2 */ + [1][1] = SPD_EEPROM_ADDRESS4, /* controller 2 */ +}; +#elif (CONFIG_SYS_NUM_DDR_CTLRS == 3) && (CONFIG_DIMM_SLOTS_PER_CTLR == 1) +u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = { + [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */ + [1][0] = SPD_EEPROM_ADDRESS2, /* controller 2 */ + [2][0] = SPD_EEPROM_ADDRESS3, /* controller 3 */ +}; +#elif (CONFIG_SYS_NUM_DDR_CTLRS == 3) && (CONFIG_DIMM_SLOTS_PER_CTLR == 2) +u8 spd_i2c_addr[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_DIMM_SLOTS_PER_CTLR] = { + [0][0] = SPD_EEPROM_ADDRESS1, /* controller 1 */ + [0][1] = SPD_EEPROM_ADDRESS2, /* controller 1 */ + [1][0] = SPD_EEPROM_ADDRESS3, /* controller 2 */ + [1][1] = SPD_EEPROM_ADDRESS4, /* controller 2 */ + [2][0] = SPD_EEPROM_ADDRESS5, /* controller 3 */ + [2][1] = SPD_EEPROM_ADDRESS6, /* controller 3 */ +}; + +#endif + +#if CONFIG_IS_ENABLED(DM_I2C) +#define DEV_TYPE struct udevice +#else +/* Local udevice */ +struct ludevice { + u8 chip; +}; + +#define DEV_TYPE struct ludevice + +#endif + +#define SPD_SPA0_ADDRESS 0x36 +#define SPD_SPA1_ADDRESS 0x37 + +static int ddr_i2c_read(DEV_TYPE *dev, unsigned int addr, + int alen, uint8_t *buf, int len) +{ + int ret; + +#if CONFIG_IS_ENABLED(DM_I2C) + ret = dm_i2c_read(dev, 0, buf, len); +#else + ret = i2c_read(dev->chip, addr, alen, buf, len); +#endif + + return ret; +} + +#ifdef CONFIG_SYS_FSL_DDR4 +static int ddr_i2c_dummy_write(unsigned int chip_addr) +{ + uint8_t buf = 0; + +#if CONFIG_IS_ENABLED(DM_I2C) + struct udevice *dev; + int ret; + + ret = i2c_get_chip_for_busnum(CONFIG_SYS_SPD_BUS_NUM, chip_addr, + 1, &dev); + if (ret) { + printf("%s: Cannot find udev for a bus %d\n", __func__, + CONFIG_SYS_SPD_BUS_NUM); + return ret; + } + + return dm_i2c_write(dev, 0, &buf, 1); +#else + return i2c_write(chip_addr, 0, 1, &buf, 1); +#endif + + return 0; +} +#endif + +static void __get_spd(generic_spd_eeprom_t *spd, u8 i2c_address) +{ + int ret; + DEV_TYPE *dev; + +#if CONFIG_IS_ENABLED(DM_I2C) + ret = i2c_get_chip_for_busnum(CONFIG_SYS_SPD_BUS_NUM, i2c_address, + 1, &dev); + if (ret) { + printf("%s: Cannot find udev for a bus %d\n", __func__, + CONFIG_SYS_SPD_BUS_NUM); + return; + } +#else /* Non DM I2C support - will be removed */ + struct ludevice ldev = { + .chip = i2c_address, + }; + dev = &ldev; + + i2c_set_bus_num(CONFIG_SYS_SPD_BUS_NUM); +#endif + +#ifdef CONFIG_SYS_FSL_DDR4 + /* + * DDR4 SPD has 384 to 512 bytes + * To access the lower 256 bytes, we need to set EE page address to 0 + * To access the upper 256 bytes, we need to set EE page address to 1 + * See Jedec standar No. 21-C for detail + */ + ddr_i2c_dummy_write(SPD_SPA0_ADDRESS); + ret = ddr_i2c_read(dev, 0, 1, (uchar *)spd, 256); + if (!ret) { + ddr_i2c_dummy_write(SPD_SPA1_ADDRESS); + ret = ddr_i2c_read(dev, 0, 1, (uchar *)((ulong)spd + 256), + min(256, + (int)sizeof(generic_spd_eeprom_t) + - 256)); + } + +#else + ret = ddr_i2c_read(dev, 0, 1, (uchar *)spd, + sizeof(generic_spd_eeprom_t)); +#endif + + if (ret) { + if (i2c_address == +#ifdef SPD_EEPROM_ADDRESS + SPD_EEPROM_ADDRESS +#elif defined(SPD_EEPROM_ADDRESS1) + SPD_EEPROM_ADDRESS1 +#endif + ) { + printf("DDR: failed to read SPD from address %u\n", + i2c_address); + } else { + debug("DDR: failed to read SPD from address %u\n", + i2c_address); + } + memset(spd, 0, sizeof(generic_spd_eeprom_t)); + } +} + +__attribute__((weak, alias("__get_spd"))) +void get_spd(generic_spd_eeprom_t *spd, u8 i2c_address); + +/* This function allows boards to update SPD address */ +__weak void update_spd_address(unsigned int ctrl_num, + unsigned int slot, + unsigned int *addr) +{ +} + +void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd, + unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl) +{ + unsigned int i; + unsigned int i2c_address = 0; + + if (ctrl_num >= CONFIG_SYS_NUM_DDR_CTLRS) { + printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num); + return; + } + + for (i = 0; i < dimm_slots_per_ctrl; i++) { + i2c_address = spd_i2c_addr[ctrl_num][i]; + update_spd_address(ctrl_num, i, &i2c_address); + get_spd(&(ctrl_dimms_spd[i]), i2c_address); + } +} +#else +void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd, + unsigned int ctrl_num, unsigned int dimm_slots_per_ctrl) +{ +} +#endif /* SPD_EEPROM_ADDRESSx */ + +/* + * ASSUMPTIONS: + * - Same number of CONFIG_DIMM_SLOTS_PER_CTLR on each controller + * - Same memory data bus width on all controllers + * + * NOTES: + * + * The memory controller and associated documentation use confusing + * terminology when referring to the orgranization of DRAM. + * + * Here is a terminology translation table: + * + * memory controller/documention |industry |this code |signals + * -------------------------------|-----------|-----------|----------------- + * physical bank/bank |rank |rank |chip select (CS) + * logical bank/sub-bank |bank |bank |bank address (BA) + * page/row |row |page |row address + * ??? |column |column |column address + * + * The naming confusion is further exacerbated by the descriptions of the + * memory controller interleaving feature, where accesses are interleaved + * _BETWEEN_ two seperate memory controllers. This is configured only in + * CS0_CONFIG[INTLV_CTL] of each memory controller. + * + * memory controller documentation | number of chip selects + * | per memory controller supported + * --------------------------------|----------------------------------------- + * cache line interleaving | 1 (CS0 only) + * page interleaving | 1 (CS0 only) + * bank interleaving | 1 (CS0 only) + * superbank interleraving | depends on bank (chip select) + * | interleraving [rank interleaving] + * | mode used on every memory controller + * + * Even further confusing is the existence of the interleaving feature + * _WITHIN_ each memory controller. The feature is referred to in + * documentation as chip select interleaving or bank interleaving, + * although it is configured in the DDR_SDRAM_CFG field. + * + * Name of field | documentation name | this code + * -----------------------------|-----------------------|------------------ + * DDR_SDRAM_CFG[BA_INTLV_CTL] | Bank (chip select) | rank interleaving + * | interleaving + */ + +const char *step_string_tbl[] = { + "STEP_GET_SPD", + "STEP_COMPUTE_DIMM_PARMS", + "STEP_COMPUTE_COMMON_PARMS", + "STEP_GATHER_OPTS", + "STEP_ASSIGN_ADDRESSES", + "STEP_COMPUTE_REGS", + "STEP_PROGRAM_REGS", + "STEP_ALL" +}; + +const char * step_to_string(unsigned int step) { + + unsigned int s = __ilog2(step); + + if ((1 << s) != step) + return step_string_tbl[7]; + + if (s >= ARRAY_SIZE(step_string_tbl)) { + printf("Error for the step in %s\n", __func__); + s = 0; + } + + return step_string_tbl[s]; +} + +static unsigned long long __step_assign_addresses(fsl_ddr_info_t *pinfo, + unsigned int dbw_cap_adj[]) +{ + unsigned int i, j; + unsigned long long total_mem, current_mem_base, total_ctlr_mem; + unsigned long long rank_density, ctlr_density = 0; + unsigned int first_ctrl = pinfo->first_ctrl; + unsigned int last_ctrl = first_ctrl + pinfo->num_ctrls - 1; + + /* + * If a reduced data width is requested, but the SPD + * specifies a physically wider device, adjust the + * computed dimm capacities accordingly before + * assigning addresses. + */ + for (i = first_ctrl; i <= last_ctrl; i++) { + unsigned int found = 0; + + switch (pinfo->memctl_opts[i].data_bus_width) { + case 2: + /* 16-bit */ + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + unsigned int dw; + if (!pinfo->dimm_params[i][j].n_ranks) + continue; + dw = pinfo->dimm_params[i][j].primary_sdram_width; + if ((dw == 72 || dw == 64)) { + dbw_cap_adj[i] = 2; + break; + } else if ((dw == 40 || dw == 32)) { + dbw_cap_adj[i] = 1; + break; + } + } + break; + + case 1: + /* 32-bit */ + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + unsigned int dw; + dw = pinfo->dimm_params[i][j].data_width; + if (pinfo->dimm_params[i][j].n_ranks + && (dw == 72 || dw == 64)) { + /* + * FIXME: can't really do it + * like this because this just + * further reduces the memory + */ + found = 1; + break; + } + } + if (found) { + dbw_cap_adj[i] = 1; + } + break; + + case 0: + /* 64-bit */ + break; + + default: + printf("unexpected data bus width " + "specified controller %u\n", i); + return 1; + } + debug("dbw_cap_adj[%d]=%d\n", i, dbw_cap_adj[i]); + } + + current_mem_base = pinfo->mem_base; + total_mem = 0; + if (pinfo->memctl_opts[first_ctrl].memctl_interleaving) { + rank_density = pinfo->dimm_params[first_ctrl][0].rank_density >> + dbw_cap_adj[first_ctrl]; + switch (pinfo->memctl_opts[first_ctrl].ba_intlv_ctl & + FSL_DDR_CS0_CS1_CS2_CS3) { + case FSL_DDR_CS0_CS1_CS2_CS3: + ctlr_density = 4 * rank_density; + break; + case FSL_DDR_CS0_CS1: + case FSL_DDR_CS0_CS1_AND_CS2_CS3: + ctlr_density = 2 * rank_density; + break; + case FSL_DDR_CS2_CS3: + default: + ctlr_density = rank_density; + break; + } + debug("rank density is 0x%llx, ctlr density is 0x%llx\n", + rank_density, ctlr_density); + for (i = first_ctrl; i <= last_ctrl; i++) { + if (pinfo->memctl_opts[i].memctl_interleaving) { + switch (pinfo->memctl_opts[i].memctl_interleaving_mode) { + case FSL_DDR_256B_INTERLEAVING: + case FSL_DDR_CACHE_LINE_INTERLEAVING: + case FSL_DDR_PAGE_INTERLEAVING: + case FSL_DDR_BANK_INTERLEAVING: + case FSL_DDR_SUPERBANK_INTERLEAVING: + total_ctlr_mem = 2 * ctlr_density; + break; + case FSL_DDR_3WAY_1KB_INTERLEAVING: + case FSL_DDR_3WAY_4KB_INTERLEAVING: + case FSL_DDR_3WAY_8KB_INTERLEAVING: + total_ctlr_mem = 3 * ctlr_density; + break; + case FSL_DDR_4WAY_1KB_INTERLEAVING: + case FSL_DDR_4WAY_4KB_INTERLEAVING: + case FSL_DDR_4WAY_8KB_INTERLEAVING: + total_ctlr_mem = 4 * ctlr_density; + break; + default: + panic("Unknown interleaving mode"); + } + pinfo->common_timing_params[i].base_address = + current_mem_base; + pinfo->common_timing_params[i].total_mem = + total_ctlr_mem; + total_mem = current_mem_base + total_ctlr_mem; + debug("ctrl %d base 0x%llx\n", i, current_mem_base); + debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem); + } else { + /* when 3rd controller not interleaved */ + current_mem_base = total_mem; + total_ctlr_mem = 0; + pinfo->common_timing_params[i].base_address = + current_mem_base; + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + unsigned long long cap = + pinfo->dimm_params[i][j].capacity >> dbw_cap_adj[i]; + pinfo->dimm_params[i][j].base_address = + current_mem_base; + debug("ctrl %d dimm %d base 0x%llx\n", i, j, current_mem_base); + current_mem_base += cap; + total_ctlr_mem += cap; + } + debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem); + pinfo->common_timing_params[i].total_mem = + total_ctlr_mem; + total_mem += total_ctlr_mem; + } + } + } else { + /* + * Simple linear assignment if memory + * controllers are not interleaved. + */ + for (i = first_ctrl; i <= last_ctrl; i++) { + total_ctlr_mem = 0; + pinfo->common_timing_params[i].base_address = + current_mem_base; + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + /* Compute DIMM base addresses. */ + unsigned long long cap = + pinfo->dimm_params[i][j].capacity >> dbw_cap_adj[i]; + pinfo->dimm_params[i][j].base_address = + current_mem_base; + debug("ctrl %d dimm %d base 0x%llx\n", i, j, current_mem_base); + current_mem_base += cap; + total_ctlr_mem += cap; + } + debug("ctrl %d total 0x%llx\n", i, total_ctlr_mem); + pinfo->common_timing_params[i].total_mem = + total_ctlr_mem; + total_mem += total_ctlr_mem; + } + } + debug("Total mem by %s is 0x%llx\n", __func__, total_mem); + + return total_mem; +} + +/* Use weak function to allow board file to override the address assignment */ +__attribute__((weak, alias("__step_assign_addresses"))) +unsigned long long step_assign_addresses(fsl_ddr_info_t *pinfo, + unsigned int dbw_cap_adj[]); + +unsigned long long +fsl_ddr_compute(fsl_ddr_info_t *pinfo, unsigned int start_step, + unsigned int size_only) +{ + unsigned int i, j; + unsigned long long total_mem = 0; + int assert_reset = 0; + unsigned int first_ctrl = pinfo->first_ctrl; + unsigned int last_ctrl = first_ctrl + pinfo->num_ctrls - 1; + __maybe_unused int retval; + __maybe_unused bool goodspd = false; + __maybe_unused int dimm_slots_per_ctrl = pinfo->dimm_slots_per_ctrl; + + fsl_ddr_cfg_regs_t *ddr_reg = pinfo->fsl_ddr_config_reg; + common_timing_params_t *timing_params = pinfo->common_timing_params; + if (pinfo->board_need_mem_reset) + assert_reset = pinfo->board_need_mem_reset(); + + /* data bus width capacity adjust shift amount */ + unsigned int dbw_capacity_adjust[CONFIG_SYS_NUM_DDR_CTLRS]; + + for (i = first_ctrl; i <= last_ctrl; i++) + dbw_capacity_adjust[i] = 0; + + debug("starting at step %u (%s)\n", + start_step, step_to_string(start_step)); + + switch (start_step) { + case STEP_GET_SPD: +#if defined(CONFIG_DDR_SPD) || defined(CONFIG_SPD_EEPROM) + /* STEP 1: Gather all DIMM SPD data */ + for (i = first_ctrl; i <= last_ctrl; i++) { + fsl_ddr_get_spd(pinfo->spd_installed_dimms[i], i, + dimm_slots_per_ctrl); + } + + case STEP_COMPUTE_DIMM_PARMS: + /* STEP 2: Compute DIMM parameters from SPD data */ + + for (i = first_ctrl; i <= last_ctrl; i++) { + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + generic_spd_eeprom_t *spd = + &(pinfo->spd_installed_dimms[i][j]); + dimm_params_t *pdimm = + &(pinfo->dimm_params[i][j]); + retval = compute_dimm_parameters( + i, spd, pdimm, j); +#ifdef CONFIG_SYS_DDR_RAW_TIMING + if (!j && retval) { + printf("SPD error on controller %d! " + "Trying fallback to raw timing " + "calculation\n", i); + retval = fsl_ddr_get_dimm_params(pdimm, + i, j); + } +#else + if (retval == 2) { + printf("Error: compute_dimm_parameters" + " non-zero returned FATAL value " + "for memctl=%u dimm=%u\n", i, j); + return 0; + } +#endif + if (retval) { + debug("Warning: compute_dimm_parameters" + " non-zero return value for memctl=%u " + "dimm=%u\n", i, j); + } else { + goodspd = true; + } + } + } + if (!goodspd) { + /* + * No valid SPD found + * Throw an error if this is for main memory, i.e. + * first_ctrl == 0. Otherwise, siliently return 0 + * as the memory size. + */ + if (first_ctrl == 0) + printf("Error: No valid SPD detected.\n"); + + return 0; + } +#elif defined(CONFIG_SYS_DDR_RAW_TIMING) + case STEP_COMPUTE_DIMM_PARMS: + for (i = first_ctrl; i <= last_ctrl; i++) { + for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) { + dimm_params_t *pdimm = + &(pinfo->dimm_params[i][j]); + fsl_ddr_get_dimm_params(pdimm, i, j); + } + } + debug("Filling dimm parameters from board specific file\n"); +#endif + case STEP_COMPUTE_COMMON_PARMS: + /* + * STEP 3: Compute a common set of timing parameters + * suitable for all of the DIMMs on each memory controller + */ + for (i = first_ctrl; i <= last_ctrl; i++) { + debug("Computing lowest common DIMM" + " parameters for memctl=%u\n", i); + compute_lowest_common_dimm_parameters + (i, + pinfo->dimm_params[i], + &timing_params[i], + CONFIG_DIMM_SLOTS_PER_CTLR); + } + + case STEP_GATHER_OPTS: + /* STEP 4: Gather configuration requirements from user */ + for (i = first_ctrl; i <= last_ctrl; i++) { + debug("Reloading memory controller " + "configuration options for memctl=%u\n", i); + /* + * This "reloads" the memory controller options + * to defaults. If the user "edits" an option, + * next_step points to the step after this, + * which is currently STEP_ASSIGN_ADDRESSES. + */ + populate_memctl_options( + &timing_params[i], + &pinfo->memctl_opts[i], + pinfo->dimm_params[i], i); + /* + * For RDIMMs, JEDEC spec requires clocks to be stable + * before reset signal is deasserted. For the boards + * using fixed parameters, this function should be + * be called from board init file. + */ + if (timing_params[i].all_dimms_registered) + assert_reset = 1; + } + if (assert_reset && !size_only) { + if (pinfo->board_mem_reset) { + debug("Asserting mem reset\n"); + pinfo->board_mem_reset(); + } else { + debug("Asserting mem reset missing\n"); + } + } + + case STEP_ASSIGN_ADDRESSES: + /* STEP 5: Assign addresses to chip selects */ + check_interleaving_options(pinfo); + total_mem = step_assign_addresses(pinfo, dbw_capacity_adjust); + debug("Total mem %llu assigned\n", total_mem); + + case STEP_COMPUTE_REGS: + /* STEP 6: compute controller register values */ + debug("FSL Memory ctrl register computation\n"); + for (i = first_ctrl; i <= last_ctrl; i++) { + if (timing_params[i].ndimms_present == 0) { + memset(&ddr_reg[i], 0, + sizeof(fsl_ddr_cfg_regs_t)); + continue; + } + + compute_fsl_memctl_config_regs + (i, + &pinfo->memctl_opts[i], + &ddr_reg[i], &timing_params[i], + pinfo->dimm_params[i], + dbw_capacity_adjust[i], + size_only); + } + + default: + break; + } + + { + /* + * Compute the amount of memory available just by + * looking for the highest valid CSn_BNDS value. + * This allows us to also experiment with using + * only CS0 when using dual-rank DIMMs. + */ + unsigned int max_end = 0; + + for (i = first_ctrl; i <= last_ctrl; i++) { + for (j = 0; j < CONFIG_CHIP_SELECTS_PER_CTRL; j++) { + fsl_ddr_cfg_regs_t *reg = &ddr_reg[i]; + if (reg->cs[j].config & 0x80000000) { + unsigned int end; + /* + * 0xfffffff is a special value we put + * for unused bnds + */ + if (reg->cs[j].bnds == 0xffffffff) + continue; + end = reg->cs[j].bnds & 0xffff; + if (end > max_end) { + max_end = end; + } + } + } + } + + total_mem = 1 + (((unsigned long long)max_end << 24ULL) | + 0xFFFFFFULL) - pinfo->mem_base; + } + + return total_mem; +} + +phys_size_t __fsl_ddr_sdram(fsl_ddr_info_t *pinfo) +{ + unsigned int i, first_ctrl, last_ctrl; +#ifdef CONFIG_PPC + unsigned int law_memctl = LAW_TRGT_IF_DDR_1; +#endif + unsigned long long total_memory; + int deassert_reset = 0; + + first_ctrl = pinfo->first_ctrl; + last_ctrl = first_ctrl + pinfo->num_ctrls - 1; + + /* Compute it once normally. */ +#ifdef CONFIG_FSL_DDR_INTERACTIVE + if (tstc() && (getchar() == 'd')) { /* we got a key press of 'd' */ + total_memory = fsl_ddr_interactive(pinfo, 0); + } else if (fsl_ddr_interactive_env_var_exists()) { + total_memory = fsl_ddr_interactive(pinfo, 1); + } else +#endif + total_memory = fsl_ddr_compute(pinfo, STEP_GET_SPD, 0); + + /* setup 3-way interleaving before enabling DDRC */ + switch (pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode) { + case FSL_DDR_3WAY_1KB_INTERLEAVING: + case FSL_DDR_3WAY_4KB_INTERLEAVING: + case FSL_DDR_3WAY_8KB_INTERLEAVING: + fsl_ddr_set_intl3r( + pinfo->memctl_opts[first_ctrl]. + memctl_interleaving_mode); + break; + default: + break; + } + + /* + * Program configuration registers. + * JEDEC specs requires clocks to be stable before deasserting reset + * for RDIMMs. Clocks start after chip select is enabled and clock + * control register is set. During step 1, all controllers have their + * registers set but not enabled. Step 2 proceeds after deasserting + * reset through board FPGA or GPIO. + * For non-registered DIMMs, initialization can go through but it is + * also OK to follow the same flow. + */ + if (pinfo->board_need_mem_reset) + deassert_reset = pinfo->board_need_mem_reset(); + for (i = first_ctrl; i <= last_ctrl; i++) { + if (pinfo->common_timing_params[i].all_dimms_registered) + deassert_reset = 1; + } + for (i = first_ctrl; i <= last_ctrl; i++) { + debug("Programming controller %u\n", i); + if (pinfo->common_timing_params[i].ndimms_present == 0) { + debug("No dimms present on controller %u; " + "skipping programming\n", i); + continue; + } + /* + * The following call with step = 1 returns before enabling + * the controller. It has to finish with step = 2 later. + */ + fsl_ddr_set_memctl_regs(&(pinfo->fsl_ddr_config_reg[i]), i, + deassert_reset ? 1 : 0); + } + if (deassert_reset) { + /* Use board FPGA or GPIO to deassert reset signal */ + if (pinfo->board_mem_de_reset) { + debug("Deasserting mem reset\n"); + pinfo->board_mem_de_reset(); + } else { + debug("Deasserting mem reset missing\n"); + } + for (i = first_ctrl; i <= last_ctrl; i++) { + /* Call with step = 2 to continue initialization */ + fsl_ddr_set_memctl_regs(&(pinfo->fsl_ddr_config_reg[i]), + i, 2); + } + } + +#ifdef CONFIG_FSL_DDR_SYNC_REFRESH + fsl_ddr_sync_memctl_refresh(first_ctrl, last_ctrl); +#endif + +#ifdef CONFIG_PPC + /* program LAWs */ + for (i = first_ctrl; i <= last_ctrl; i++) { + if (pinfo->memctl_opts[i].memctl_interleaving) { + switch (pinfo->memctl_opts[i]. + memctl_interleaving_mode) { + case FSL_DDR_CACHE_LINE_INTERLEAVING: + case FSL_DDR_PAGE_INTERLEAVING: + case FSL_DDR_BANK_INTERLEAVING: + case FSL_DDR_SUPERBANK_INTERLEAVING: + if (i % 2) + break; + if (i == 0) { + law_memctl = LAW_TRGT_IF_DDR_INTRLV; + fsl_ddr_set_lawbar( + &pinfo->common_timing_params[i], + law_memctl, i); + } +#if CONFIG_SYS_NUM_DDR_CTLRS > 3 + else if (i == 2) { + law_memctl = LAW_TRGT_IF_DDR_INTLV_34; + fsl_ddr_set_lawbar( + &pinfo->common_timing_params[i], + law_memctl, i); + } +#endif + break; + case FSL_DDR_3WAY_1KB_INTERLEAVING: + case FSL_DDR_3WAY_4KB_INTERLEAVING: + case FSL_DDR_3WAY_8KB_INTERLEAVING: + law_memctl = LAW_TRGT_IF_DDR_INTLV_123; + if (i == 0) { + fsl_ddr_set_lawbar( + &pinfo->common_timing_params[i], + law_memctl, i); + } + break; + case FSL_DDR_4WAY_1KB_INTERLEAVING: + case FSL_DDR_4WAY_4KB_INTERLEAVING: + case FSL_DDR_4WAY_8KB_INTERLEAVING: + law_memctl = LAW_TRGT_IF_DDR_INTLV_1234; + if (i == 0) + fsl_ddr_set_lawbar( + &pinfo->common_timing_params[i], + law_memctl, i); + /* place holder for future 4-way interleaving */ + break; + default: + break; + } + } else { + switch (i) { + case 0: + law_memctl = LAW_TRGT_IF_DDR_1; + break; + case 1: + law_memctl = LAW_TRGT_IF_DDR_2; + break; + case 2: + law_memctl = LAW_TRGT_IF_DDR_3; + break; + case 3: + law_memctl = LAW_TRGT_IF_DDR_4; + break; + default: + break; + } + fsl_ddr_set_lawbar(&pinfo->common_timing_params[i], + law_memctl, i); + } + } +#endif + + debug("total_memory by %s = %llu\n", __func__, total_memory); + +#if !defined(CONFIG_PHYS_64BIT) + /* Check for 4G or more. Bad. */ + if ((first_ctrl == 0) && (total_memory >= (1ull << 32))) { + puts("Detected "); + print_size(total_memory, " of memory\n"); + printf(" This U-Boot only supports < 4G of DDR\n"); + printf(" You could rebuild it with CONFIG_PHYS_64BIT\n"); + printf(" "); /* re-align to match init_dram print */ + total_memory = CONFIG_MAX_MEM_MAPPED; + } +#endif + + return total_memory; +} + +/* + * fsl_ddr_sdram(void) -- this is the main function to be + * called by dram_init() in the board file. + * + * It returns amount of memory configured in bytes. + */ +phys_size_t fsl_ddr_sdram(void) +{ + fsl_ddr_info_t info; + + /* Reset info structure. */ + memset(&info, 0, sizeof(fsl_ddr_info_t)); + info.mem_base = CONFIG_SYS_FSL_DDR_SDRAM_BASE_PHY; + info.first_ctrl = 0; + info.num_ctrls = CONFIG_SYS_FSL_DDR_MAIN_NUM_CTRLS; + info.dimm_slots_per_ctrl = CONFIG_DIMM_SLOTS_PER_CTLR; + info.board_need_mem_reset = board_need_mem_reset; + info.board_mem_reset = board_assert_mem_reset; + info.board_mem_de_reset = board_deassert_mem_reset; + remove_unused_controllers(&info); + + return __fsl_ddr_sdram(&info); +} + +#ifdef CONFIG_SYS_FSL_OTHER_DDR_NUM_CTRLS +phys_size_t fsl_other_ddr_sdram(unsigned long long base, + unsigned int first_ctrl, + unsigned int num_ctrls, + unsigned int dimm_slots_per_ctrl, + int (*board_need_reset)(void), + void (*board_reset)(void), + void (*board_de_reset)(void)) +{ + fsl_ddr_info_t info; + + /* Reset info structure. */ + memset(&info, 0, sizeof(fsl_ddr_info_t)); + info.mem_base = base; + info.first_ctrl = first_ctrl; + info.num_ctrls = num_ctrls; + info.dimm_slots_per_ctrl = dimm_slots_per_ctrl; + info.board_need_mem_reset = board_need_reset; + info.board_mem_reset = board_reset; + info.board_mem_de_reset = board_de_reset; + + return __fsl_ddr_sdram(&info); +} +#endif + +/* + * fsl_ddr_sdram_size(first_ctrl, last_intlv) - This function only returns the + * size of the total memory without setting ddr control registers. + */ +phys_size_t +fsl_ddr_sdram_size(void) +{ + fsl_ddr_info_t info; + unsigned long long total_memory = 0; + + memset(&info, 0 , sizeof(fsl_ddr_info_t)); + info.mem_base = CONFIG_SYS_FSL_DDR_SDRAM_BASE_PHY; + info.first_ctrl = 0; + info.num_ctrls = CONFIG_SYS_FSL_DDR_MAIN_NUM_CTRLS; + info.dimm_slots_per_ctrl = CONFIG_DIMM_SLOTS_PER_CTLR; + info.board_need_mem_reset = NULL; + remove_unused_controllers(&info); + + /* Compute it once normally. */ + total_memory = fsl_ddr_compute(&info, STEP_GET_SPD, 1); + + return total_memory; +} diff --git a/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen1.c b/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen1.c new file mode 100644 index 000000000..572f3703d --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen1.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <log.h> +#include <asm/io.h> +#include <fsl_ddr_sdram.h> +#include <linux/delay.h> + +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 4) +#error Invalid setting for CONFIG_CHIP_SELECTS_PER_CTRL +#endif + +void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, + unsigned int ctrl_num, int step) +{ + unsigned int i; + struct ccsr_ddr __iomem *ddr = + (struct ccsr_ddr __iomem *)CONFIG_SYS_FSL_DDR_ADDR; + + if (ctrl_num != 0) { + printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num); + return; + } + + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (i == 0) { + out_be32(&ddr->cs0_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs0_config, regs->cs[i].config); + + } else if (i == 1) { + out_be32(&ddr->cs1_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs1_config, regs->cs[i].config); + + } else if (i == 2) { + out_be32(&ddr->cs2_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs2_config, regs->cs[i].config); + + } else if (i == 3) { + out_be32(&ddr->cs3_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs3_config, regs->cs[i].config); + } + } + + out_be32(&ddr->timing_cfg_1, regs->timing_cfg_1); + out_be32(&ddr->timing_cfg_2, regs->timing_cfg_2); + out_be32(&ddr->sdram_mode, regs->ddr_sdram_mode); + out_be32(&ddr->sdram_interval, regs->ddr_sdram_interval); +#if defined(CONFIG_ARCH_MPC8555) || defined(CONFIG_ARCH_MPC8541) + out_be32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl); +#endif + + /* + * 200 painful micro-seconds must elapse between + * the DDR clock setup and the DDR config enable. + */ + udelay(200); + asm volatile("sync;isync"); + + out_be32(&ddr->sdram_cfg, regs->ddr_sdram_cfg); + + asm("sync;isync;msync"); + udelay(500); +} + +#if defined(CONFIG_DDR_ECC) && !defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER) +/* + * Initialize all of memory for ECC, then enable errors. + */ + +void +ddr_enable_ecc(unsigned int dram_size) +{ + struct ccsr_ddr __iomem *ddr = + (struct ccsr_ddr __iomem *)(CONFIG_SYS_FSL_DDR_ADDR); + + dma_meminit(CONFIG_MEM_INIT_VALUE, dram_size); + + /* + * Enable errors for ECC. + */ + debug("DMA DDR: err_disable = 0x%08x\n", ddr->err_disable); + ddr->err_disable = 0x00000000; + asm("sync;isync;msync"); + debug("DMA DDR: err_disable = 0x%08x\n", ddr->err_disable); +} + +#endif /* CONFIG_DDR_ECC && ! CONFIG_ECC_INIT_VIA_DDRCONTROLLER */ diff --git a/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen2.c b/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen2.c new file mode 100644 index 000000000..d7b8064e5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen2.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008-2011 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <fsl_ddr_sdram.h> +#include <linux/delay.h> + +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 4) +#error Invalid setting for CONFIG_CHIP_SELECTS_PER_CTRL +#endif + +void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, + unsigned int ctrl_num, int step) +{ + unsigned int i; + struct ccsr_ddr __iomem *ddr = + (struct ccsr_ddr __iomem *)CONFIG_SYS_FSL_DDR_ADDR; + +#if defined(CONFIG_SYS_FSL_ERRATUM_NMG_DDR120) && defined(CONFIG_MPC85xx) + ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); + uint svr; +#endif + + if (ctrl_num) { + printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num); + return; + } + +#ifdef CONFIG_SYS_FSL_ERRATUM_NMG_DDR120 + /* + * Set the DDR IO receiver to an acceptable bias point. + * Fixed in Rev 2.1. + */ + svr = get_svr(); + if ((SVR_MAJ(svr) == 1) || IS_SVR_REV(svr, 2, 0)) { + if ((regs->ddr_sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) == + SDRAM_CFG_SDRAM_TYPE_DDR2) + out_be32(&gur->ddrioovcr, 0x90000000); + else + out_be32(&gur->ddrioovcr, 0xA8000000); + } +#endif + + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (i == 0) { + out_be32(&ddr->cs0_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs0_config, regs->cs[i].config); + + } else if (i == 1) { + out_be32(&ddr->cs1_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs1_config, regs->cs[i].config); + + } else if (i == 2) { + out_be32(&ddr->cs2_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs2_config, regs->cs[i].config); + + } else if (i == 3) { + out_be32(&ddr->cs3_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs3_config, regs->cs[i].config); + } + } + + out_be32(&ddr->timing_cfg_3, regs->timing_cfg_3); + out_be32(&ddr->timing_cfg_0, regs->timing_cfg_0); + out_be32(&ddr->timing_cfg_1, regs->timing_cfg_1); + out_be32(&ddr->timing_cfg_2, regs->timing_cfg_2); + out_be32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); + out_be32(&ddr->sdram_mode, regs->ddr_sdram_mode); + out_be32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2); + out_be32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl); + out_be32(&ddr->sdram_interval, regs->ddr_sdram_interval); + out_be32(&ddr->sdram_data_init, regs->ddr_data_init); + out_be32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl); + out_be32(&ddr->init_addr, regs->ddr_init_addr); + out_be32(&ddr->init_ext_addr, regs->ddr_init_ext_addr); + + /* + * 200 painful micro-seconds must elapse between + * the DDR clock setup and the DDR config enable. + */ + udelay(200); + asm volatile("sync;isync"); + + out_be32(&ddr->sdram_cfg, regs->ddr_sdram_cfg); + + /* Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done. */ + while (in_be32(&ddr->sdram_cfg_2) & 0x10) { + udelay(10000); /* throttle polling rate */ + } +} diff --git a/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen3.c b/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen3.c new file mode 100644 index 000000000..1ed4d50cc --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/mpc85xx_ddr_gen3.c @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008-2020 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <log.h> +#include <asm/io.h> +#include <fsl_ddr_sdram.h> +#include <asm/processor.h> +#include <linux/delay.h> + +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 4) +#error Invalid setting for CONFIG_CHIP_SELECTS_PER_CTRL +#endif + +/* + * regs has the to-be-set values for DDR controller registers + * ctrl_num is the DDR controller number + * step: 0 goes through the initialization in one pass + * 1 sets registers and returns before enabling controller + * 2 resumes from step 1 and continues to initialize + * Dividing the initialization to two steps to deassert DDR reset signal + * to comply with JEDEC specs for RDIMMs. + */ +void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, + unsigned int ctrl_num, int step) +{ + unsigned int i, bus_width; + struct ccsr_ddr __iomem *ddr; + u32 temp_sdram_cfg; + u32 total_gb_size_per_controller; + int timeout; +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR111_DDR134 + int timeout_save; + volatile ccsr_local_ecm_t *ecm = (void *)CONFIG_SYS_MPC85xx_ECM_ADDR; + unsigned int csn_bnds_backup = 0, cs_sa, cs_ea, *csn_bnds_t; + int csn = -1; +#endif +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR_A003 + u32 save1, save2; +#endif +#if defined(CONFIG_SYS_FSL_ERRATUM_A009942) || \ + (defined(CONFIG_SYS_FSL_ERRATUM_A008378) && \ + defined(CONFIG_SYS_FSL_DDRC_GEN4)) || \ + defined(CONFIG_SYS_FSL_ERRATUM_A008109) + u32 val32; +#endif +#ifdef CONFIG_SYS_FSL_ERRATUM_A009942 + unsigned int ddr_freq; +#endif + + switch (ctrl_num) { + case 0: + ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; + break; +#if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1) + case 1: + ddr = (void *)CONFIG_SYS_FSL_DDR2_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2) + case 2: + ddr = (void *)CONFIG_SYS_FSL_DDR3_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3) + case 3: + ddr = (void *)CONFIG_SYS_FSL_DDR4_ADDR; + break; +#endif + default: + printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num); + return; + } + + if (step == 2) + goto step2; + + if (regs->ddr_eor) + out_be32(&ddr->eor, regs->ddr_eor); +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR111_DDR134 + debug("Workaround for ERRATUM_DDR111_DDR134\n"); + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + cs_sa = (regs->cs[i].bnds >> 16) & 0xfff; + cs_ea = regs->cs[i].bnds & 0xfff; + if ((cs_sa <= 0xff) && (cs_ea >= 0xff)) { + csn = i; + csn_bnds_backup = regs->cs[i].bnds; + csn_bnds_t = (unsigned int *) ®s->cs[i].bnds; + if (cs_ea > 0xeff) + *csn_bnds_t = regs->cs[i].bnds + 0x01000000; + else + *csn_bnds_t = regs->cs[i].bnds + 0x01000100; + debug("Found cs%d_bns (0x%08x) covering 0xff000000, " + "change it to 0x%x\n", + csn, csn_bnds_backup, regs->cs[i].bnds); + break; + } + } +#endif + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (i == 0) { + out_be32(&ddr->cs0_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs0_config, regs->cs[i].config); + out_be32(&ddr->cs0_config_2, regs->cs[i].config_2); + + } else if (i == 1) { + out_be32(&ddr->cs1_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs1_config, regs->cs[i].config); + out_be32(&ddr->cs1_config_2, regs->cs[i].config_2); + + } else if (i == 2) { + out_be32(&ddr->cs2_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs2_config, regs->cs[i].config); + out_be32(&ddr->cs2_config_2, regs->cs[i].config_2); + + } else if (i == 3) { + out_be32(&ddr->cs3_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs3_config, regs->cs[i].config); + out_be32(&ddr->cs3_config_2, regs->cs[i].config_2); + } + } + + out_be32(&ddr->timing_cfg_3, regs->timing_cfg_3); + out_be32(&ddr->timing_cfg_0, regs->timing_cfg_0); + out_be32(&ddr->timing_cfg_1, regs->timing_cfg_1); + out_be32(&ddr->timing_cfg_2, regs->timing_cfg_2); + out_be32(&ddr->sdram_mode, regs->ddr_sdram_mode); + out_be32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2); + out_be32(&ddr->sdram_mode_3, regs->ddr_sdram_mode_3); + out_be32(&ddr->sdram_mode_4, regs->ddr_sdram_mode_4); + out_be32(&ddr->sdram_mode_5, regs->ddr_sdram_mode_5); + out_be32(&ddr->sdram_mode_6, regs->ddr_sdram_mode_6); + out_be32(&ddr->sdram_mode_7, regs->ddr_sdram_mode_7); + out_be32(&ddr->sdram_mode_8, regs->ddr_sdram_mode_8); + out_be32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl); + out_be32(&ddr->sdram_interval, regs->ddr_sdram_interval); + out_be32(&ddr->sdram_data_init, regs->ddr_data_init); + out_be32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl); + out_be32(&ddr->timing_cfg_4, regs->timing_cfg_4); + out_be32(&ddr->timing_cfg_5, regs->timing_cfg_5); + out_be32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl); + out_be32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl); +#ifndef CONFIG_SYS_FSL_DDR_EMU + /* + * Skip these two registers if running on emulator + * because emulator doesn't have skew between bytes. + */ + + if (regs->ddr_wrlvl_cntl_2) + out_be32(&ddr->ddr_wrlvl_cntl_2, regs->ddr_wrlvl_cntl_2); + if (regs->ddr_wrlvl_cntl_3) + out_be32(&ddr->ddr_wrlvl_cntl_3, regs->ddr_wrlvl_cntl_3); +#endif + + out_be32(&ddr->ddr_sr_cntr, regs->ddr_sr_cntr); + out_be32(&ddr->ddr_sdram_rcw_1, regs->ddr_sdram_rcw_1); + out_be32(&ddr->ddr_sdram_rcw_2, regs->ddr_sdram_rcw_2); + out_be32(&ddr->ddr_cdr1, regs->ddr_cdr1); +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + out_be32(&ddr->sdram_cfg_2, + regs->ddr_sdram_cfg_2 & ~SDRAM_CFG2_D_INIT); + out_be32(&ddr->init_addr, CONFIG_SYS_SDRAM_BASE); + out_be32(&ddr->init_ext_addr, DDR_INIT_ADDR_EXT_UIA); + + /* DRAM VRef will not be trained */ + out_be32(&ddr->ddr_cdr2, + regs->ddr_cdr2 & ~DDR_CDR2_VREF_TRAIN_EN); + } else +#endif + { + out_be32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); + out_be32(&ddr->init_addr, regs->ddr_init_addr); + out_be32(&ddr->init_ext_addr, regs->ddr_init_ext_addr); + out_be32(&ddr->ddr_cdr2, regs->ddr_cdr2); + } + out_be32(&ddr->err_disable, regs->err_disable); + out_be32(&ddr->err_int_en, regs->err_int_en); + for (i = 0; i < 32; i++) { + if (regs->debug[i]) { + debug("Write to debug_%d as %08x\n", i+1, regs->debug[i]); + out_be32(&ddr->debug[i], regs->debug[i]); + } + } + +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR_A003474 + out_be32(&ddr->debug[12], 0x00000015); + out_be32(&ddr->debug[21], 0x24000000); +#endif /* CONFIG_SYS_FSL_ERRATUM_DDR_A003474 */ + + /* + * For RDIMMs, JEDEC spec requires clocks to be stable before reset is + * deasserted. Clocks start when any chip select is enabled and clock + * control register is set. Because all DDR components are connected to + * one reset signal, this needs to be done in two steps. Step 1 is to + * get the clocks started. Step 2 resumes after reset signal is + * deasserted. + */ + if (step == 1) { + udelay(200); + return; + } + +step2: + /* Set, but do not enable the memory */ + temp_sdram_cfg = regs->ddr_sdram_cfg; + temp_sdram_cfg &= ~(SDRAM_CFG_MEM_EN); + out_be32(&ddr->sdram_cfg, temp_sdram_cfg); +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR_A003 + debug("Workaround for ERRATUM_DDR_A003\n"); + if (regs->ddr_sdram_rcw_2 & 0x00f00000) { + out_be32(&ddr->timing_cfg_2, regs->timing_cfg_2 & 0xf07fffff); + out_be32(&ddr->debug[2], 0x00000400); + out_be32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl & 0x7fffffff); + out_be32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl & 0x7fffffff); + out_be32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2 & 0xffffffeb); + out_be32(&ddr->mtcr, 0); + save1 = in_be32(&ddr->debug[12]); + save2 = in_be32(&ddr->debug[21]); + out_be32(&ddr->debug[12], 0x00000015); + out_be32(&ddr->debug[21], 0x24000000); + out_be32(&ddr->sdram_interval, regs->ddr_sdram_interval & 0xffff); + out_be32(&ddr->sdram_cfg, temp_sdram_cfg | SDRAM_CFG_BI | SDRAM_CFG_MEM_EN); + + asm volatile("sync;isync"); + while (!(in_be32(&ddr->debug[1]) & 0x2)) + ; + + switch (regs->ddr_sdram_rcw_2 & 0x00f00000) { + case 0x00000000: + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS0_CS1 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x02)); +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if (!(regs->cs[2].config & SDRAM_CS_CONFIG_EN)) + break; + while (in_be32(&ddr->sdram_md_cntl) & MD_CNTL_MD_EN) + ; + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS2_CS3 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x02)); +#endif + break; + case 0x00100000: + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS0_CS1 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x0a)); +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if (!(regs->cs[2].config & SDRAM_CS_CONFIG_EN)) + break; + while (in_be32(&ddr->sdram_md_cntl) & MD_CNTL_MD_EN) + ; + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS2_CS3 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x0a)); +#endif + break; + case 0x00200000: + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS0_CS1 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x12)); +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if (!(regs->cs[2].config & SDRAM_CS_CONFIG_EN)) + break; + while (in_be32(&ddr->sdram_md_cntl) & MD_CNTL_MD_EN) + ; + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS2_CS3 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x12)); +#endif + break; + case 0x00300000: + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS0_CS1 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x1a)); +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if (!(regs->cs[2].config & SDRAM_CS_CONFIG_EN)) + break; + while (in_be32(&ddr->sdram_md_cntl) & MD_CNTL_MD_EN) + ; + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS2_CS3 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x1a)); +#endif + break; + default: + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS0_CS1 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x02)); +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if (!(regs->cs[2].config & SDRAM_CS_CONFIG_EN)) + break; + while (in_be32(&ddr->sdram_md_cntl) & MD_CNTL_MD_EN) + ; + out_be32(&ddr->sdram_md_cntl, + MD_CNTL_MD_EN | + MD_CNTL_CS_SEL_CS2_CS3 | + 0x04000000 | + MD_CNTL_WRCW | + MD_CNTL_MD_VALUE(0x02)); +#endif + printf("Unsupported RC10\n"); + break; + } + + while (in_be32(&ddr->sdram_md_cntl) & 0x80000000) + ; + udelay(6); + out_be32(&ddr->sdram_cfg, temp_sdram_cfg); + out_be32(&ddr->timing_cfg_2, regs->timing_cfg_2); + out_be32(&ddr->debug[2], 0x0); + out_be32(&ddr->ddr_zq_cntl, regs->ddr_zq_cntl); + out_be32(&ddr->ddr_wrlvl_cntl, regs->ddr_wrlvl_cntl); + out_be32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); + out_be32(&ddr->debug[12], save1); + out_be32(&ddr->debug[21], save2); + out_be32(&ddr->sdram_interval, regs->ddr_sdram_interval); + + } +#endif + +#if defined(CONFIG_SYS_FSL_ERRATUM_A008378) && defined(CONFIG_SYS_FSL_DDRC_GEN4) + /* Erratum applies when accumulated ECC is used, or DBI is enabled */ +#define IS_ACC_ECC_EN(v) ((v) & 0x4) +#define IS_DBI(v) ((((v) >> 12) & 0x3) == 0x2) + if (has_erratum_a008378()) { + if (IS_ACC_ECC_EN(regs->ddr_sdram_cfg) || + IS_DBI(regs->ddr_sdram_cfg_3)) { + val32 = ddr_in32(&ddr->debug[28]); + val32 |= (0x9 << 20); + ddr_out32(&ddr->debug[28], val32); + } + debug("Applied errata CONFIG_SYS_FSL_ERRATUM_A008378\n"); + } +#endif + +#if defined(CONFIG_SYS_FSL_ERRATUM_A008109) + val32 = in_be32(&ddr->sdram_cfg_2) | 0x800; /* DDR_SLOW */ + out_be32(&ddr->sdram_cfg_2, val32); + + val32 = in_be32(&ddr->debug[18]) | 0x2; + out_be32(&ddr->debug[18], val32); + + out_be32(&ddr->debug[28], 0x30000000); + debug("Applied errta CONFIG_SYS_FSL_ERRATUM_A008109\n"); +#endif + +#ifdef CONFIG_SYS_FSL_ERRATUM_A009942 + ddr_freq = get_ddr_freq(ctrl_num) / 1000000; + val32 = in_be32(&ddr->debug[28]); + val32 &= 0xff0fff00; + if (ddr_freq <= 1333) + val32 |= 0x0080006a; + else if (ddr_freq <= 1600) + val32 |= 0x0070006f; + else if (ddr_freq <= 1867) + val32 |= 0x00700076; + else if (ddr_freq <= 2133) + val32 |= 0x0060007b; + + out_be32(&ddr->debug[28], val32); + debug("Applied errata CONFIG_SYS_FSL_ERRATUM_A009942\n"); +#endif + /* + * For 8572 DDR1 erratum - DDR controller may enter illegal state + * when operatiing in 32-bit bus mode with 4-beat bursts, + * This erratum does not affect DDR3 mode, only for DDR2 mode. + */ +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR_115 + debug("Workaround for ERRATUM_DDR_115\n"); + if ((((in_be32(&ddr->sdram_cfg) >> 24) & 0x7) == SDRAM_TYPE_DDR2) + && in_be32(&ddr->sdram_cfg) & 0x80000) { + /* set DEBUG_1[31] */ + setbits_be32(&ddr->debug[0], 1); + } +#endif +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR111_DDR134 + debug("Workaround for ERRATUM_DDR111_DDR134\n"); + /* + * This is the combined workaround for DDR111 and DDR134 + * following the published errata for MPC8572 + */ + + /* 1. Set EEBACR[3] */ + setbits_be32(&ecm->eebacr, 0x10000000); + debug("Setting EEBACR[3] to 0x%08x\n", in_be32(&ecm->eebacr)); + + /* 2. Set DINIT in SDRAM_CFG_2*/ + setbits_be32(&ddr->sdram_cfg_2, SDRAM_CFG2_D_INIT); + debug("Setting sdram_cfg_2[D_INIT] to 0x%08x\n", + in_be32(&ddr->sdram_cfg_2)); + + /* 3. Set DEBUG_3[21] */ + setbits_be32(&ddr->debug[2], 0x400); + debug("Setting DEBUG_3[21] to 0x%08x\n", in_be32(&ddr->debug[2])); + +#endif /* part 1 of the workaound */ + + /* + * 500 painful micro-seconds must elapse between + * the DDR clock setup and the DDR config enable. + * DDR2 need 200 us, and DDR3 need 500 us from spec, + * we choose the max, that is 500 us for all of case. + */ + udelay(500); + asm volatile("sync;isync"); + +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) { + /* enter self-refresh */ + setbits_be32(&ddr->sdram_cfg_2, SDRAM_CFG2_FRC_SR); + /* do board specific memory setup */ + board_mem_sleep_setup(); + temp_sdram_cfg = (in_be32(&ddr->sdram_cfg) | SDRAM_CFG_BI); + } else +#endif + temp_sdram_cfg = (in_be32(&ddr->sdram_cfg) & ~SDRAM_CFG_BI); + + /* Let the controller go */ + out_be32(&ddr->sdram_cfg, temp_sdram_cfg | SDRAM_CFG_MEM_EN); + asm volatile("sync;isync"); + + total_gb_size_per_controller = 0; + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (!(regs->cs[i].config & 0x80000000)) + continue; + total_gb_size_per_controller += 1 << ( + ((regs->cs[i].config >> 14) & 0x3) + 2 + + ((regs->cs[i].config >> 8) & 0x7) + 12 + + ((regs->cs[i].config >> 0) & 0x7) + 8 + + 3 - ((regs->ddr_sdram_cfg >> 19) & 0x3) - + 26); /* minus 26 (count of 64M) */ + } + if (fsl_ddr_get_intl3r() & 0x80000000) /* 3-way interleaving */ + total_gb_size_per_controller *= 3; + else if (regs->cs[0].config & 0x20000000) /* 2-way interleaving */ + total_gb_size_per_controller <<= 1; + /* + * total memory / bus width = transactions needed + * transactions needed / data rate = seconds + * to add plenty of buffer, double the time + * For example, 2GB on 666MT/s 64-bit bus takes about 402ms + * Let's wait for 800ms + */ + bus_width = 3 - ((ddr->sdram_cfg & SDRAM_CFG_DBW_MASK) + >> SDRAM_CFG_DBW_SHIFT); + timeout = ((total_gb_size_per_controller << (6 - bus_width)) * 100 / + (get_ddr_freq(ctrl_num) >> 20)) << 1; +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR111_DDR134 + timeout_save = timeout; +#endif + total_gb_size_per_controller >>= 4; /* shift down to gb size */ + debug("total %d GB\n", total_gb_size_per_controller); + debug("Need to wait up to %d * 10ms\n", timeout); + + /* Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done. */ + while ((in_be32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) && + (timeout >= 0)) { + udelay(10000); /* throttle polling rate */ + timeout--; + } + + if (timeout <= 0) + printf("Waiting for D_INIT timeout. Memory may not work.\n"); + +#ifdef CONFIG_SYS_FSL_ERRATUM_DDR111_DDR134 + /* continue this workaround */ + + /* 4. Clear DEBUG3[21] */ + clrbits_be32(&ddr->debug[2], 0x400); + debug("Clearing D3[21] to 0x%08x\n", in_be32(&ddr->debug[2])); + + /* DDR134 workaround starts */ + /* A: Clear sdram_cfg_2[odt_cfg] */ + clrbits_be32(&ddr->sdram_cfg_2, SDRAM_CFG2_ODT_CFG_MASK); + debug("Clearing SDRAM_CFG2[ODT_CFG] to 0x%08x\n", + in_be32(&ddr->sdram_cfg_2)); + + /* B: Set DEBUG1[15] */ + setbits_be32(&ddr->debug[0], 0x10000); + debug("Setting D1[15] to 0x%08x\n", in_be32(&ddr->debug[0])); + + /* C: Set timing_cfg_2[cpo] to 0b11111 */ + setbits_be32(&ddr->timing_cfg_2, TIMING_CFG_2_CPO_MASK); + debug("Setting TMING_CFG_2[CPO] to 0x%08x\n", + in_be32(&ddr->timing_cfg_2)); + + /* D: Set D6 to 0x9f9f9f9f */ + out_be32(&ddr->debug[5], 0x9f9f9f9f); + debug("Setting D6 to 0x%08x\n", in_be32(&ddr->debug[5])); + + /* E: Set D7 to 0x9f9f9f9f */ + out_be32(&ddr->debug[6], 0x9f9f9f9f); + debug("Setting D7 to 0x%08x\n", in_be32(&ddr->debug[6])); + + /* F: Set D2[20] */ + setbits_be32(&ddr->debug[1], 0x800); + debug("Setting D2[20] to 0x%08x\n", in_be32(&ddr->debug[1])); + + /* G: Poll on D2[20] until cleared */ + while (in_be32(&ddr->debug[1]) & 0x800) + udelay(10000); /* throttle polling rate */ + + /* H: Clear D1[15] */ + clrbits_be32(&ddr->debug[0], 0x10000); + debug("Setting D1[15] to 0x%08x\n", in_be32(&ddr->debug[0])); + + /* I: Set sdram_cfg_2[odt_cfg] */ + setbits_be32(&ddr->sdram_cfg_2, + regs->ddr_sdram_cfg_2 & SDRAM_CFG2_ODT_CFG_MASK); + debug("Setting sdram_cfg_2 to 0x%08x\n", in_be32(&ddr->sdram_cfg_2)); + + /* Continuing with the DDR111 workaround */ + /* 5. Set D2[21] */ + setbits_be32(&ddr->debug[1], 0x400); + debug("Setting D2[21] to 0x%08x\n", in_be32(&ddr->debug[1])); + + /* 6. Poll D2[21] until its cleared */ + while (in_be32(&ddr->debug[1]) & 0x400) + udelay(10000); /* throttle polling rate */ + + /* 7. Wait for state machine 2nd run, roughly 400ms/GB */ + debug("Wait for %d * 10ms\n", timeout_save); + udelay(timeout_save * 10000); + + /* 8. Set sdram_cfg_2[dinit] if options requires */ + setbits_be32(&ddr->sdram_cfg_2, + regs->ddr_sdram_cfg_2 & SDRAM_CFG2_D_INIT); + debug("Setting sdram_cfg_2 to 0x%08x\n", in_be32(&ddr->sdram_cfg_2)); + + /* 9. Poll until dinit is cleared */ + timeout = timeout_save; + debug("Need to wait up to %d * 10ms\n", timeout); + while ((in_be32(&ddr->sdram_cfg_2) & SDRAM_CFG2_D_INIT) && + (timeout >= 0)) { + udelay(10000); /* throttle polling rate */ + timeout--; + } + + if (timeout <= 0) + printf("Waiting for D_INIT timeout. Memory may not work.\n"); + + /* 10. Clear EEBACR[3] */ + clrbits_be32(&ecm->eebacr, 10000000); + debug("Clearing EEBACR[3] to 0x%08x\n", in_be32(&ecm->eebacr)); + + if (csn != -1) { + csn_bnds_t = (unsigned int *) ®s->cs[csn].bnds; + *csn_bnds_t = csn_bnds_backup; + debug("Change cs%d_bnds back to 0x%08x\n", + csn, regs->cs[csn].bnds); + setbits_be32(&ddr->sdram_cfg, 0x2); /* MEM_HALT */ + switch (csn) { + case 0: + out_be32(&ddr->cs0_bnds, regs->cs[csn].bnds); + break; + case 1: + out_be32(&ddr->cs1_bnds, regs->cs[csn].bnds); + break; +#if CONFIG_CHIP_SELECTS_PER_CTRL > 2 + case 2: + out_be32(&ddr->cs2_bnds, regs->cs[csn].bnds); + break; + case 3: + out_be32(&ddr->cs3_bnds, regs->cs[csn].bnds); + break; +#endif + } + clrbits_be32(&ddr->sdram_cfg, 0x2); + } +#endif /* CONFIG_SYS_FSL_ERRATUM_DDR111_DDR134 */ +#ifdef CONFIG_DEEP_SLEEP + if (is_warm_boot()) + /* exit self-refresh */ + clrbits_be32(&ddr->sdram_cfg_2, SDRAM_CFG2_FRC_SR); +#endif +} diff --git a/roms/u-boot/drivers/ddr/fsl/mpc86xx_ddr.c b/roms/u-boot/drivers/ddr/fsl/mpc86xx_ddr.c new file mode 100644 index 000000000..43ed1ba43 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/mpc86xx_ddr.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#include <log.h> +#include <asm/io.h> +#include <fsl_ddr_sdram.h> +#include <linux/delay.h> + +#if (CONFIG_CHIP_SELECTS_PER_CTRL > 4) +#error Invalid setting for CONFIG_CHIP_SELECTS_PER_CTRL +#endif + +void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs, + unsigned int ctrl_num, int step) +{ + unsigned int i; + struct ccsr_ddr __iomem *ddr; + + switch (ctrl_num) { + case 0: + ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; + break; + case 1: + ddr = (void *)CONFIG_SYS_FSL_DDR2_ADDR; + break; + default: + printf("%s unexpected ctrl_num = %u\n", __FUNCTION__, ctrl_num); + return; + } + + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { + if (i == 0) { + out_be32(&ddr->cs0_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs0_config, regs->cs[i].config); + + } else if (i == 1) { + out_be32(&ddr->cs1_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs1_config, regs->cs[i].config); + + } else if (i == 2) { + out_be32(&ddr->cs2_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs2_config, regs->cs[i].config); + + } else if (i == 3) { + out_be32(&ddr->cs3_bnds, regs->cs[i].bnds); + out_be32(&ddr->cs3_config, regs->cs[i].config); + } + } + + out_be32(&ddr->timing_cfg_3, regs->timing_cfg_3); + out_be32(&ddr->timing_cfg_0, regs->timing_cfg_0); + out_be32(&ddr->timing_cfg_1, regs->timing_cfg_1); + out_be32(&ddr->timing_cfg_2, regs->timing_cfg_2); + out_be32(&ddr->sdram_cfg_2, regs->ddr_sdram_cfg_2); + out_be32(&ddr->sdram_mode, regs->ddr_sdram_mode); + out_be32(&ddr->sdram_mode_2, regs->ddr_sdram_mode_2); + out_be32(&ddr->sdram_md_cntl, regs->ddr_sdram_md_cntl); + out_be32(&ddr->sdram_interval, regs->ddr_sdram_interval); + out_be32(&ddr->sdram_data_init, regs->ddr_data_init); + out_be32(&ddr->sdram_clk_cntl, regs->ddr_sdram_clk_cntl); + out_be32(&ddr->init_addr, regs->ddr_init_addr); + out_be32(&ddr->init_ext_addr, regs->ddr_init_ext_addr); + + debug("before go\n"); + + /* + * 200 painful micro-seconds must elapse between + * the DDR clock setup and the DDR config enable. + */ + udelay(200); + asm volatile("sync;isync"); + + out_be32(&ddr->sdram_cfg, regs->ddr_sdram_cfg); + + /* + * Poll DDR_SDRAM_CFG_2[D_INIT] bit until auto-data init is done + */ + while (in_be32(&ddr->sdram_cfg_2) & 0x10) { + udelay(10000); /* throttle polling rate */ + } +} diff --git a/roms/u-boot/drivers/ddr/fsl/options.c b/roms/u-boot/drivers/ddr/fsl/options.c new file mode 100644 index 000000000..c000a45f8 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/options.c @@ -0,0 +1,1428 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP Semiconductor + */ + +#include <common.h> +#include <env.h> +#include <hwconfig.h> +#include <fsl_ddr_sdram.h> +#include <log.h> + +#include <fsl_ddr.h> +#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \ + defined(CONFIG_ARM) +#include <asm/arch/clock.h> +#endif + +/* + * Use our own stack based buffer before relocation to allow accessing longer + * hwconfig strings that might be in the environment before we've relocated. + * This is pretty fragile on both the use of stack and if the buffer is big + * enough. However we will get a warning from env_get_f() for the latter. + */ + +/* Board-specific functions defined in each board's ddr.c */ +void __weak fsl_ddr_board_options(memctl_options_t *popts, + dimm_params_t *pdimm, + unsigned int ctrl_num) +{ + return; +} + +struct dynamic_odt { + unsigned int odt_rd_cfg; + unsigned int odt_wr_cfg; + unsigned int odt_rtt_norm; + unsigned int odt_rtt_wr; +}; + +#ifdef CONFIG_SYS_FSL_DDR4 +/* Quad rank is not verified yet due availability. + * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option + */ +static __maybe_unused const struct dynamic_odt single_Q[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS_AND_OTHER_DIMM, + DDR4_RTT_34_OHM, /* unverified */ + DDR4_RTT_120_OHM + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR4_RTT_OFF, + DDR4_RTT_120_OHM + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS_AND_OTHER_DIMM, + DDR4_RTT_34_OHM, + DDR4_RTT_120_OHM + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, /* tied high */ + DDR4_RTT_OFF, + DDR4_RTT_120_OHM + } +}; + +static __maybe_unused const struct dynamic_odt single_D[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR4_RTT_40_OHM, + DDR4_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR4_RTT_OFF, + DDR4_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt single_S[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR4_RTT_40_OHM, + DDR4_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0}, +}; + +static __maybe_unused const struct dynamic_odt dual_DD[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR4_RTT_34_OHM, + DDR4_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR4_RTT_34_OHM, + DDR4_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_DS[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR4_RTT_34_OHM, + DDR4_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR4_RTT_34_OHM, + DDR4_RTT_120_OHM + }, + {0, 0, 0, 0} +}; +static __maybe_unused const struct dynamic_odt dual_SD[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR4_RTT_34_OHM, + DDR4_RTT_120_OHM + }, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR4_RTT_34_OHM, + DDR4_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_SS[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR4_RTT_34_OHM, + DDR4_RTT_120_OHM + }, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR4_RTT_34_OHM, + DDR4_RTT_120_OHM + }, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_D0[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR4_RTT_40_OHM, + DDR4_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR4_RTT_OFF, + DDR4_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_0D[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR4_RTT_40_OHM, + DDR4_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR4_RTT_OFF, + DDR4_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_S0[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR4_RTT_40_OHM, + DDR4_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0} + +}; + +static __maybe_unused const struct dynamic_odt dual_0S[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR4_RTT_40_OHM, + DDR4_RTT_OFF + }, + {0, 0, 0, 0} + +}; + +static __maybe_unused const struct dynamic_odt odt_unknown[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR4_RTT_120_OHM, + DDR4_RTT_OFF + } +}; +#elif defined(CONFIG_SYS_FSL_DDR3) +static __maybe_unused const struct dynamic_odt single_Q[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS_AND_OTHER_DIMM, + DDR3_RTT_20_OHM, + DDR3_RTT_120_OHM + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, /* tied high */ + DDR3_RTT_OFF, + DDR3_RTT_120_OHM + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS_AND_OTHER_DIMM, + DDR3_RTT_20_OHM, + DDR3_RTT_120_OHM + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, /* tied high */ + DDR3_RTT_OFF, + DDR3_RTT_120_OHM + } +}; + +static __maybe_unused const struct dynamic_odt single_D[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR3_RTT_40_OHM, + DDR3_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR3_RTT_OFF, + DDR3_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt single_S[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR3_RTT_40_OHM, + DDR3_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0}, +}; + +static __maybe_unused const struct dynamic_odt dual_DD[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR3_RTT_30_OHM, + DDR3_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR3_RTT_30_OHM, + DDR3_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_DS[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR3_RTT_30_OHM, + DDR3_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR3_RTT_20_OHM, + DDR3_RTT_120_OHM + }, + {0, 0, 0, 0} +}; +static __maybe_unused const struct dynamic_odt dual_SD[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR3_RTT_20_OHM, + DDR3_RTT_120_OHM + }, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR3_RTT_20_OHM, + DDR3_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_SS[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR3_RTT_30_OHM, + DDR3_RTT_120_OHM + }, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_ALL, + DDR3_RTT_30_OHM, + DDR3_RTT_120_OHM + }, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_D0[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR3_RTT_40_OHM, + DDR3_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR3_RTT_OFF, + DDR3_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_0D[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_SAME_DIMM, + DDR3_RTT_40_OHM, + DDR3_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR3_RTT_OFF, + DDR3_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_S0[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR3_RTT_40_OHM, + DDR3_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0} + +}; + +static __maybe_unused const struct dynamic_odt dual_0S[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR3_RTT_40_OHM, + DDR3_RTT_OFF + }, + {0, 0, 0, 0} + +}; + +static __maybe_unused const struct dynamic_odt odt_unknown[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR3_RTT_120_OHM, + DDR3_RTT_OFF + } +}; +#else /* CONFIG_SYS_FSL_DDR3 */ +static __maybe_unused const struct dynamic_odt single_Q[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt single_D[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR2_RTT_150_OHM, + DDR2_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt single_S[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR2_RTT_150_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0}, +}; + +static __maybe_unused const struct dynamic_odt dual_DD[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_DS[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_SD[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_SS[4] = { + { /* cs0 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_OTHER_DIMM, + FSL_DDR_ODT_OTHER_DIMM, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_D0[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR2_RTT_150_OHM, + DDR2_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0} +}; + +static __maybe_unused const struct dynamic_odt dual_0D[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_ALL, + DDR2_RTT_150_OHM, + DDR2_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + } +}; + +static __maybe_unused const struct dynamic_odt dual_S0[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR2_RTT_150_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 0, 0} + +}; + +static __maybe_unused const struct dynamic_odt dual_0S[4] = { + {0, 0, 0, 0}, + {0, 0, 0, 0}, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR2_RTT_150_OHM, + DDR2_RTT_OFF + }, + {0, 0, 0, 0} + +}; + +static __maybe_unused const struct dynamic_odt odt_unknown[4] = { + { /* cs0 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + { /* cs1 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + }, + { /* cs2 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_CS, + DDR2_RTT_75_OHM, + DDR2_RTT_OFF + }, + { /* cs3 */ + FSL_DDR_ODT_NEVER, + FSL_DDR_ODT_NEVER, + DDR2_RTT_OFF, + DDR2_RTT_OFF + } +}; +#endif + +/* + * Automatically seleect bank interleaving mode based on DIMMs + * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null. + * This function only deal with one or two slots per controller. + */ +static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm) +{ +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) + if (pdimm[0].n_ranks == 4) + return FSL_DDR_CS0_CS1_CS2_CS3; + else if (pdimm[0].n_ranks == 2) + return FSL_DDR_CS0_CS1; +#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) +#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE + if (pdimm[0].n_ranks == 4) + return FSL_DDR_CS0_CS1_CS2_CS3; +#endif + if (pdimm[0].n_ranks == 2) { + if (pdimm[1].n_ranks == 2) + return FSL_DDR_CS0_CS1_CS2_CS3; + else + return FSL_DDR_CS0_CS1; + } +#endif + return 0; +} + +unsigned int populate_memctl_options(const common_timing_params_t *common_dimm, + memctl_options_t *popts, + dimm_params_t *pdimm, + unsigned int ctrl_num) +{ + unsigned int i; + char buf[HWCONFIG_BUFFER_SIZE]; +#if defined(CONFIG_SYS_FSL_DDR3) || \ + defined(CONFIG_SYS_FSL_DDR2) || \ + defined(CONFIG_SYS_FSL_DDR4) + const struct dynamic_odt *pdodt = odt_unknown; +#endif +#if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4) + ulong ddr_freq; +#endif + + /* + * Extract hwconfig from environment since we have not properly setup + * the environment but need it for ddr config params + */ + if (env_get_f("hwconfig", buf, sizeof(buf)) < 0) + buf[0] = '\0'; + +#if defined(CONFIG_SYS_FSL_DDR3) || \ + defined(CONFIG_SYS_FSL_DDR2) || \ + defined(CONFIG_SYS_FSL_DDR4) + /* Chip select options. */ +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) + switch (pdimm[0].n_ranks) { + case 1: + pdodt = single_S; + break; + case 2: + pdodt = single_D; + break; + case 4: + pdodt = single_Q; + break; + } +#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + switch (pdimm[0].n_ranks) { +#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE + case 4: + pdodt = single_Q; + if (pdimm[1].n_ranks) + printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n"); + break; +#endif + case 2: + switch (pdimm[1].n_ranks) { + case 2: + pdodt = dual_DD; + break; + case 1: + pdodt = dual_DS; + break; + case 0: + pdodt = dual_D0; + break; + } + break; + case 1: + switch (pdimm[1].n_ranks) { + case 2: + pdodt = dual_SD; + break; + case 1: + pdodt = dual_SS; + break; + case 0: + pdodt = dual_S0; + break; + } + break; + case 0: + switch (pdimm[1].n_ranks) { + case 2: + pdodt = dual_0D; + break; + case 1: + pdodt = dual_0S; + break; + } + break; + } +#endif /* CONFIG_DIMM_SLOTS_PER_CTLR */ +#endif /* CONFIG_SYS_FSL_DDR2, 3, 4 */ + + /* Pick chip-select local options. */ + for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { +#if defined(CONFIG_SYS_FSL_DDR3) || \ + defined(CONFIG_SYS_FSL_DDR2) || \ + defined(CONFIG_SYS_FSL_DDR4) + popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg; + popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg; + popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm; + popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr; +#else + popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER; + popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS; +#endif + popts->cs_local_opts[i].auto_precharge = 0; + } + + /* Pick interleaving mode. */ + + /* + * 0 = no interleaving + * 1 = interleaving between 2 controllers + */ + popts->memctl_interleaving = 0; + + /* + * 0 = cacheline + * 1 = page + * 2 = (logical) bank + * 3 = superbank (only if CS interleaving is enabled) + */ + popts->memctl_interleaving_mode = 0; + + /* + * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl + * 1: page: bit to the left of the column bits selects the memctl + * 2: bank: bit to the left of the bank bits selects the memctl + * 3: superbank: bit to the left of the chip select selects the memctl + * + * NOTE: ba_intlv (rank interleaving) is independent of memory + * controller interleaving; it is only within a memory controller. + * Must use superbank interleaving if rank interleaving is used and + * memory controller interleaving is enabled. + */ + + /* + * 0 = no + * 0x40 = CS0,CS1 + * 0x20 = CS2,CS3 + * 0x60 = CS0,CS1 + CS2,CS3 + * 0x04 = CS0,CS1,CS2,CS3 + */ + popts->ba_intlv_ctl = 0; + + /* Memory Organization Parameters */ + popts->registered_dimm_en = common_dimm->all_dimms_registered; + + /* Operational Mode Paramters */ + + /* Pick ECC modes */ + popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */ +#ifdef CONFIG_DDR_ECC + if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) { + if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf)) + popts->ecc_mode = 1; + } else + popts->ecc_mode = 1; +#endif + /* 1 = use memory controler to init data */ + popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0; + + /* + * Choose DQS config + * 0 for DDR1 + * 1 for DDR2 + */ +#if defined(CONFIG_SYS_FSL_DDR1) + popts->dqs_config = 0; +#elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3) + popts->dqs_config = 1; +#endif + + /* Choose self-refresh during sleep. */ + popts->self_refresh_in_sleep = 1; + + /* Choose dynamic power management mode. */ + popts->dynamic_power = 0; + + /* + * check first dimm for primary sdram width + * presuming all dimms are similar + * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit + */ +#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) + if (pdimm[0].n_ranks != 0) { + if ((pdimm[0].data_width >= 64) && \ + (pdimm[0].data_width <= 72)) + popts->data_bus_width = 0; + else if ((pdimm[0].data_width >= 32) && \ + (pdimm[0].data_width <= 40)) + popts->data_bus_width = 1; + else { + panic("Error: data width %u is invalid!\n", + pdimm[0].data_width); + } + } +#else + if (pdimm[0].n_ranks != 0) { + if (pdimm[0].primary_sdram_width == 64) + popts->data_bus_width = 0; + else if (pdimm[0].primary_sdram_width == 32) + popts->data_bus_width = 1; + else if (pdimm[0].primary_sdram_width == 16) + popts->data_bus_width = 2; + else { + panic("Error: primary sdram width %u is invalid!\n", + pdimm[0].primary_sdram_width); + } + } +#endif + + popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0; + + /* Choose burst length. */ +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) +#if defined(CONFIG_E500MC) + popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */ + popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */ +#else + if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) { + /* 32-bit or 16-bit bus */ + popts->otf_burst_chop_en = 0; + popts->burst_length = DDR_BL8; + } else { + popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */ + popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */ + } +#endif +#else + popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */ +#endif + + /* Choose ddr controller address mirror mode */ +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) { + if (pdimm[i].n_ranks) { + popts->mirrored_dimm = pdimm[i].mirrored_dimm; + break; + } + } +#endif + + /* Global Timing Parameters. */ + debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num)); + + /* Pick a caslat override. */ + popts->cas_latency_override = 0; + popts->cas_latency_override_value = 3; + if (popts->cas_latency_override) { + debug("using caslat override value = %u\n", + popts->cas_latency_override_value); + } + + /* Decide whether to use the computed derated latency */ + popts->use_derated_caslat = 0; + + /* Choose an additive latency. */ + popts->additive_latency_override = 0; + popts->additive_latency_override_value = 3; + if (popts->additive_latency_override) { + debug("using additive latency override value = %u\n", + popts->additive_latency_override_value); + } + + /* + * 2T_EN setting + * + * Factors to consider for 2T_EN: + * - number of DIMMs installed + * - number of components, number of active ranks + * - how much time you want to spend playing around + */ + popts->twot_en = 0; + popts->threet_en = 0; + + /* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */ + if (popts->registered_dimm_en) + popts->ap_en = 1; /* 0 = disable, 1 = enable */ + else + popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */ + + if (hwconfig_sub_f("fsl_ddr", "parity", buf)) { + if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) { + if (popts->registered_dimm_en || + (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4)) + popts->ap_en = 1; + } + } + + /* + * BSTTOPRE precharge interval + * + * Set this to 0 for global auto precharge + * The value of 0x100 has been used for DDR1, DDR2, DDR3. + * It is not wrong. Any value should be OK. The performance depends on + * applications. There is no one good value for all. One way to set + * is to use 1/4 of refint value. + */ + popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps) + >> 2; + + /* + * Window for four activates -- tFAW + * + * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only + * FIXME: varies depending upon number of column addresses or data + * FIXME: width, was considering looking at pdimm->primary_sdram_width + */ +#if defined(CONFIG_SYS_FSL_DDR1) + popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1); + +#elif defined(CONFIG_SYS_FSL_DDR2) + /* + * x4/x8; some datasheets have 35000 + * x16 wide columns only? Use 50000? + */ + popts->tfaw_window_four_activates_ps = 37500; + +#else + popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps; +#endif + popts->zq_en = 0; + popts->wrlvl_en = 0; +#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) + /* + * due to ddr3 dimm is fly-by topology + * we suggest to enable write leveling to + * meet the tQDSS under different loading. + */ + popts->wrlvl_en = 1; + popts->zq_en = 1; + popts->wrlvl_override = 0; +#endif + + /* + * Check interleaving configuration from environment. + * Please refer to doc/README.fsl-ddr for the detail. + * + * If memory controller interleaving is enabled, then the data + * bus widths must be programmed identically for all memory controllers. + * + * Attempt to set all controllers to the same chip select + * interleaving mode. It will do a best effort to get the + * requested ranks interleaved together such that the result + * should be a subset of the requested configuration. + * + * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving + * with 256 Byte is enabled. + */ +#if (CONFIG_SYS_NUM_DDR_CTLRS > 1) + if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf)) +#ifdef CONFIG_SYS_FSL_DDR_INTLV_256B + ; +#else + goto done; +#endif + if (pdimm[0].n_ranks == 0) { + printf("There is no rank on CS0 for controller %d.\n", ctrl_num); + popts->memctl_interleaving = 0; + goto done; + } + popts->memctl_interleaving = 1; +#ifdef CONFIG_SYS_FSL_DDR_INTLV_256B + popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING; + popts->memctl_interleaving = 1; + debug("256 Byte interleaving\n"); +#else + /* + * test null first. if CONFIG_HWCONFIG is not defined + * hwconfig_arg_cmp returns non-zero + */ + if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", + "null", buf)) { + popts->memctl_interleaving = 0; + debug("memory controller interleaving disabled.\n"); + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "cacheline", buf)) { + popts->memctl_interleaving_mode = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : FSL_DDR_CACHE_LINE_INTERLEAVING; + popts->memctl_interleaving = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : 1; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "page", buf)) { + popts->memctl_interleaving_mode = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : FSL_DDR_PAGE_INTERLEAVING; + popts->memctl_interleaving = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : 1; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "bank", buf)) { + popts->memctl_interleaving_mode = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : FSL_DDR_BANK_INTERLEAVING; + popts->memctl_interleaving = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : 1; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "superbank", buf)) { + popts->memctl_interleaving_mode = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : FSL_DDR_SUPERBANK_INTERLEAVING; + popts->memctl_interleaving = + ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ? + 0 : 1; +#if (CONFIG_SYS_NUM_DDR_CTLRS == 3) + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "3way_1KB", buf)) { + popts->memctl_interleaving_mode = + FSL_DDR_3WAY_1KB_INTERLEAVING; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "3way_4KB", buf)) { + popts->memctl_interleaving_mode = + FSL_DDR_3WAY_4KB_INTERLEAVING; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "3way_8KB", buf)) { + popts->memctl_interleaving_mode = + FSL_DDR_3WAY_8KB_INTERLEAVING; +#elif (CONFIG_SYS_NUM_DDR_CTLRS == 4) + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "4way_1KB", buf)) { + popts->memctl_interleaving_mode = + FSL_DDR_4WAY_1KB_INTERLEAVING; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "4way_4KB", buf)) { + popts->memctl_interleaving_mode = + FSL_DDR_4WAY_4KB_INTERLEAVING; + } else if (hwconfig_subarg_cmp_f("fsl_ddr", + "ctlr_intlv", + "4way_8KB", buf)) { + popts->memctl_interleaving_mode = + FSL_DDR_4WAY_8KB_INTERLEAVING; +#endif + } else { + popts->memctl_interleaving = 0; + printf("hwconfig has unrecognized parameter for ctlr_intlv.\n"); + } +#endif /* CONFIG_SYS_FSL_DDR_INTLV_256B */ +done: +#endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */ + if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) && + (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) { + /* test null first. if CONFIG_HWCONFIG is not defined, + * hwconfig_subarg_cmp_f returns non-zero */ + if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", + "null", buf)) + debug("bank interleaving disabled.\n"); + else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", + "cs0_cs1", buf)) + popts->ba_intlv_ctl = FSL_DDR_CS0_CS1; + else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", + "cs2_cs3", buf)) + popts->ba_intlv_ctl = FSL_DDR_CS2_CS3; + else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", + "cs0_cs1_and_cs2_cs3", buf)) + popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3; + else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", + "cs0_cs1_cs2_cs3", buf)) + popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3; + else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", + "auto", buf)) + popts->ba_intlv_ctl = auto_bank_intlv(pdimm); + else + printf("hwconfig has unrecognized parameter for bank_intlv.\n"); + switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) { + case FSL_DDR_CS0_CS1_CS2_CS3: +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) + if (pdimm[0].n_ranks < 4) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(chip-select) for " + "CS0+CS1+CS2+CS3 on controller %d, " + "interleaving disabled!\n", ctrl_num); + } +#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) +#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE + if (pdimm[0].n_ranks == 4) + break; +#endif + if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(chip-select) for " + "CS0+CS1+CS2+CS3 on controller %d, " + "interleaving disabled!\n", ctrl_num); + } + if (pdimm[0].capacity != pdimm[1].capacity) { + popts->ba_intlv_ctl = 0; + printf("Not identical DIMM size for " + "CS0+CS1+CS2+CS3 on controller %d, " + "interleaving disabled!\n", ctrl_num); + } +#endif + break; + case FSL_DDR_CS0_CS1: + if (pdimm[0].n_ranks < 2) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(chip-select) for " + "CS0+CS1 on controller %d, " + "interleaving disabled!\n", ctrl_num); + } + break; + case FSL_DDR_CS2_CS3: +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) + if (pdimm[0].n_ranks < 4) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(chip-select) for CS2+CS3 " + "on controller %d, interleaving disabled!\n", ctrl_num); + } +#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if (pdimm[1].n_ranks < 2) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(chip-select) for CS2+CS3 " + "on controller %d, interleaving disabled!\n", ctrl_num); + } +#endif + break; + case FSL_DDR_CS0_CS1_AND_CS2_CS3: +#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) + if (pdimm[0].n_ranks < 4) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(CS) for CS0+CS1 and " + "CS2+CS3 on controller %d, " + "interleaving disabled!\n", ctrl_num); + } +#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) + if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) { + popts->ba_intlv_ctl = 0; + printf("Not enough bank(CS) for CS0+CS1 and " + "CS2+CS3 on controller %d, " + "interleaving disabled!\n", ctrl_num); + } +#endif + break; + default: + popts->ba_intlv_ctl = 0; + break; + } + } + + if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) { + if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf)) + popts->addr_hash = 0; + else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", + "true", buf)) + popts->addr_hash = 1; + } + + if (pdimm[0].n_ranks == 4) + popts->quad_rank_present = 1; + + popts->package_3ds = pdimm->package_3ds; + +#if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4) + ddr_freq = get_ddr_freq(ctrl_num) / 1000000; + if (popts->registered_dimm_en) { + popts->rcw_override = 1; + popts->rcw_1 = 0x000a5a00; + if (ddr_freq <= 800) + popts->rcw_2 = 0x00000000; + else if (ddr_freq <= 1066) + popts->rcw_2 = 0x00100000; + else if (ddr_freq <= 1333) + popts->rcw_2 = 0x00200000; + else + popts->rcw_2 = 0x00300000; + } +#endif + + fsl_ddr_board_options(popts, pdimm, ctrl_num); + + return 0; +} + +void check_interleaving_options(fsl_ddr_info_t *pinfo) +{ + int i, j, k, check_n_ranks, intlv_invalid = 0; + unsigned int check_intlv, check_n_row_addr, check_n_col_addr; + unsigned long long check_rank_density; + struct dimm_params_s *dimm; + int first_ctrl = pinfo->first_ctrl; + int last_ctrl = first_ctrl + pinfo->num_ctrls - 1; + + /* + * Check if all controllers are configured for memory + * controller interleaving. Identical dimms are recommended. At least + * the size, row and col address should be checked. + */ + j = 0; + check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks; + check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density; + check_n_row_addr = pinfo->dimm_params[first_ctrl][0].n_row_addr; + check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr; + check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode; + for (i = first_ctrl; i <= last_ctrl; i++) { + dimm = &pinfo->dimm_params[i][0]; + if (!pinfo->memctl_opts[i].memctl_interleaving) { + continue; + } else if (((check_rank_density != dimm->rank_density) || + (check_n_ranks != dimm->n_ranks) || + (check_n_row_addr != dimm->n_row_addr) || + (check_n_col_addr != dimm->n_col_addr) || + (check_intlv != + pinfo->memctl_opts[i].memctl_interleaving_mode))){ + intlv_invalid = 1; + break; + } else { + j++; + } + + } + if (intlv_invalid) { + for (i = first_ctrl; i <= last_ctrl; i++) + pinfo->memctl_opts[i].memctl_interleaving = 0; + printf("Not all DIMMs are identical. " + "Memory controller interleaving disabled.\n"); + } else { + switch (check_intlv) { + case FSL_DDR_256B_INTERLEAVING: + case FSL_DDR_CACHE_LINE_INTERLEAVING: + case FSL_DDR_PAGE_INTERLEAVING: + case FSL_DDR_BANK_INTERLEAVING: + case FSL_DDR_SUPERBANK_INTERLEAVING: +#if (3 == CONFIG_SYS_NUM_DDR_CTLRS) + k = 2; +#else + k = CONFIG_SYS_NUM_DDR_CTLRS; +#endif + break; + case FSL_DDR_3WAY_1KB_INTERLEAVING: + case FSL_DDR_3WAY_4KB_INTERLEAVING: + case FSL_DDR_3WAY_8KB_INTERLEAVING: + case FSL_DDR_4WAY_1KB_INTERLEAVING: + case FSL_DDR_4WAY_4KB_INTERLEAVING: + case FSL_DDR_4WAY_8KB_INTERLEAVING: + default: + k = CONFIG_SYS_NUM_DDR_CTLRS; + break; + } + debug("%d of %d controllers are interleaving.\n", j, k); + if (j && (j != k)) { + for (i = first_ctrl; i <= last_ctrl; i++) + pinfo->memctl_opts[i].memctl_interleaving = 0; + if ((last_ctrl - first_ctrl) > 1) + puts("Not all controllers have compatible interleaving mode. All disabled.\n"); + } + } + debug("Checking interleaving options completed\n"); +} + +int fsl_use_spd(void) +{ + int use_spd = 0; + +#ifdef CONFIG_DDR_SPD + char buf[HWCONFIG_BUFFER_SIZE]; + + /* + * Extract hwconfig from environment since we have not properly setup + * the environment but need it for ddr config params + */ + if (env_get_f("hwconfig", buf, sizeof(buf)) < 0) + buf[0] = '\0'; + + /* if hwconfig is not enabled, or "sdram" is not defined, use spd */ + if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) { + if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf)) + use_spd = 1; + else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", + "fixed", buf)) + use_spd = 0; + else + use_spd = 1; + } else + use_spd = 1; +#endif + + return use_spd; +} diff --git a/roms/u-boot/drivers/ddr/fsl/util.c b/roms/u-boot/drivers/ddr/fsl/util.c new file mode 100644 index 000000000..ac4f8d273 --- /dev/null +++ b/roms/u-boot/drivers/ddr/fsl/util.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2008-2014 Freescale Semiconductor, Inc. + */ + +#include <common.h> +#ifdef CONFIG_PPC +#include <asm/fsl_law.h> +#endif +#include <div64.h> +#include <linux/delay.h> + +#include <fsl_ddr.h> +#include <fsl_immap.h> +#include <log.h> +#include <asm/io.h> +#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \ + defined(CONFIG_ARM) +#include <asm/arch/clock.h> +#endif + +/* To avoid 64-bit full-divides, we factor this here */ +#define ULL_2E12 2000000000000ULL +#define UL_5POW12 244140625UL +#define UL_2POW13 (1UL << 13) + +#define ULL_8FS 0xFFFFFFFFULL + +u32 fsl_ddr_get_version(unsigned int ctrl_num) +{ + struct ccsr_ddr __iomem *ddr; + u32 ver_major_minor_errata; + + switch (ctrl_num) { + case 0: + ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; + break; +#if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1) + case 1: + ddr = (void *)CONFIG_SYS_FSL_DDR2_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2) + case 2: + ddr = (void *)CONFIG_SYS_FSL_DDR3_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3) + case 3: + ddr = (void *)CONFIG_SYS_FSL_DDR4_ADDR; + break; +#endif + default: + printf("%s unexpected ctrl_num = %u\n", __func__, ctrl_num); + return 0; + } + ver_major_minor_errata = (ddr_in32(&ddr->ip_rev1) & 0xFFFF) << 8; + ver_major_minor_errata |= (ddr_in32(&ddr->ip_rev2) & 0xFF00) >> 8; + + return ver_major_minor_errata; +} + +/* + * Round up mclk_ps to nearest 1 ps in memory controller code + * if the error is 0.5ps or more. + * + * If an imprecise data rate is too high due to rounding error + * propagation, compute a suitably rounded mclk_ps to compute + * a working memory controller configuration. + */ +unsigned int get_memory_clk_period_ps(const unsigned int ctrl_num) +{ + unsigned int data_rate = get_ddr_freq(ctrl_num); + unsigned int result; + + /* Round to nearest 10ps, being careful about 64-bit multiply/divide */ + unsigned long long rem, mclk_ps = ULL_2E12; + + /* Now perform the big divide, the result fits in 32-bits */ + rem = do_div(mclk_ps, data_rate); + result = (rem >= (data_rate >> 1)) ? mclk_ps + 1 : mclk_ps; + + return result; +} + +/* Convert picoseconds into DRAM clock cycles (rounding up if needed). */ +unsigned int picos_to_mclk(const unsigned int ctrl_num, unsigned int picos) +{ + unsigned long long clks, clks_rem; + unsigned long data_rate = get_ddr_freq(ctrl_num); + + /* Short circuit for zero picos */ + if (!picos) + return 0; + + /* First multiply the time by the data rate (32x32 => 64) */ + clks = picos * (unsigned long long)data_rate; + /* + * Now divide by 5^12 and track the 32-bit remainder, then divide + * by 2*(2^12) using shifts (and updating the remainder). + */ + clks_rem = do_div(clks, UL_5POW12); + clks_rem += (clks & (UL_2POW13-1)) * UL_5POW12; + clks >>= 13; + + /* If we had a remainder greater than the 1ps error, then round up */ + if (clks_rem > data_rate) + clks++; + + /* Clamp to the maximum representable value */ + if (clks > ULL_8FS) + clks = ULL_8FS; + return (unsigned int) clks; +} + +unsigned int mclk_to_picos(const unsigned int ctrl_num, unsigned int mclk) +{ + return get_memory_clk_period_ps(ctrl_num) * mclk; +} + +#ifdef CONFIG_PPC +void +__fsl_ddr_set_lawbar(const common_timing_params_t *memctl_common_params, + unsigned int law_memctl, + unsigned int ctrl_num) +{ + unsigned long long base = memctl_common_params->base_address; + unsigned long long size = memctl_common_params->total_mem; + + /* + * If no DIMMs on this controller, do not proceed any further. + */ + if (!memctl_common_params->ndimms_present) { + return; + } + +#if !defined(CONFIG_PHYS_64BIT) + if (base >= CONFIG_MAX_MEM_MAPPED) + return; + if ((base + size) >= CONFIG_MAX_MEM_MAPPED) + size = CONFIG_MAX_MEM_MAPPED - base; +#endif + if (set_ddr_laws(base, size, law_memctl) < 0) { + printf("%s: ERROR (ctrl #%d, TRGT ID=%x)\n", __func__, ctrl_num, + law_memctl); + return ; + } + debug("setup ddr law base = 0x%llx, size 0x%llx, TRGT_ID 0x%x\n", + base, size, law_memctl); +} + +__attribute__((weak, alias("__fsl_ddr_set_lawbar"))) void +fsl_ddr_set_lawbar(const common_timing_params_t *memctl_common_params, + unsigned int memctl_interleaved, + unsigned int ctrl_num); +#endif + +void fsl_ddr_set_intl3r(const unsigned int granule_size) +{ +#ifdef CONFIG_E6500 + u32 *mcintl3r = (void *) (CONFIG_SYS_IMMR + 0x18004); + *mcintl3r = 0x80000000 | (granule_size & 0x1f); + debug("Enable MCINTL3R with granule size 0x%x\n", granule_size); +#endif +} + +u32 fsl_ddr_get_intl3r(void) +{ + u32 val = 0; +#ifdef CONFIG_E6500 + u32 *mcintl3r = (void *) (CONFIG_SYS_IMMR + 0x18004); + val = *mcintl3r; +#endif + return val; +} + +void print_ddr_info(unsigned int start_ctrl) +{ + struct ccsr_ddr __iomem *ddr = + (struct ccsr_ddr __iomem *)(CONFIG_SYS_FSL_DDR_ADDR); + +#if defined(CONFIG_E6500) && (CONFIG_SYS_NUM_DDR_CTLRS == 3) + u32 *mcintl3r = (void *) (CONFIG_SYS_IMMR + 0x18004); +#endif +#if (CONFIG_SYS_NUM_DDR_CTLRS > 1) + uint32_t cs0_config = ddr_in32(&ddr->cs0_config); +#endif + uint32_t sdram_cfg = ddr_in32(&ddr->sdram_cfg); + int cas_lat; + +#if CONFIG_SYS_NUM_DDR_CTLRS >= 2 + if ((!(sdram_cfg & SDRAM_CFG_MEM_EN)) || + (start_ctrl == 1)) { + ddr = (void __iomem *)CONFIG_SYS_FSL_DDR2_ADDR; + sdram_cfg = ddr_in32(&ddr->sdram_cfg); + } +#endif +#if CONFIG_SYS_NUM_DDR_CTLRS >= 3 + if ((!(sdram_cfg & SDRAM_CFG_MEM_EN)) || + (start_ctrl == 2)) { + ddr = (void __iomem *)CONFIG_SYS_FSL_DDR3_ADDR; + sdram_cfg = ddr_in32(&ddr->sdram_cfg); + } +#endif + + if (!(sdram_cfg & SDRAM_CFG_MEM_EN)) { + puts(" (DDR not enabled)\n"); + return; + } + + puts(" (DDR"); + switch ((sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) >> + SDRAM_CFG_SDRAM_TYPE_SHIFT) { + case SDRAM_TYPE_DDR1: + puts("1"); + break; + case SDRAM_TYPE_DDR2: + puts("2"); + break; + case SDRAM_TYPE_DDR3: + puts("3"); + break; + case SDRAM_TYPE_DDR4: + puts("4"); + break; + default: + puts("?"); + break; + } + + if (sdram_cfg & SDRAM_CFG_32_BE) + puts(", 32-bit"); + else if (sdram_cfg & SDRAM_CFG_16_BE) + puts(", 16-bit"); + else + puts(", 64-bit"); + + /* Calculate CAS latency based on timing cfg values */ + cas_lat = ((ddr_in32(&ddr->timing_cfg_1) >> 16) & 0xf); + if (fsl_ddr_get_version(0) <= 0x40400) + cas_lat += 1; + else + cas_lat += 2; + cas_lat += ((ddr_in32(&ddr->timing_cfg_3) >> 12) & 3) << 4; + printf(", CL=%d", cas_lat >> 1); + if (cas_lat & 0x1) + puts(".5"); + + if (sdram_cfg & SDRAM_CFG_ECC_EN) + puts(", ECC on)"); + else + puts(", ECC off)"); + +#if (CONFIG_SYS_NUM_DDR_CTLRS == 3) +#ifdef CONFIG_E6500 + if (*mcintl3r & 0x80000000) { + puts("\n"); + puts(" DDR Controller Interleaving Mode: "); + switch (*mcintl3r & 0x1f) { + case FSL_DDR_3WAY_1KB_INTERLEAVING: + puts("3-way 1KB"); + break; + case FSL_DDR_3WAY_4KB_INTERLEAVING: + puts("3-way 4KB"); + break; + case FSL_DDR_3WAY_8KB_INTERLEAVING: + puts("3-way 8KB"); + break; + default: + puts("3-way UNKNOWN"); + break; + } + } +#endif +#endif +#if (CONFIG_SYS_NUM_DDR_CTLRS >= 2) + if ((cs0_config & 0x20000000) && (start_ctrl == 0)) { + puts("\n"); + puts(" DDR Controller Interleaving Mode: "); + + switch ((cs0_config >> 24) & 0xf) { + case FSL_DDR_256B_INTERLEAVING: + puts("256B"); + break; + case FSL_DDR_CACHE_LINE_INTERLEAVING: + puts("cache line"); + break; + case FSL_DDR_PAGE_INTERLEAVING: + puts("page"); + break; + case FSL_DDR_BANK_INTERLEAVING: + puts("bank"); + break; + case FSL_DDR_SUPERBANK_INTERLEAVING: + puts("super-bank"); + break; + default: + puts("invalid"); + break; + } + } +#endif + + if ((sdram_cfg >> 8) & 0x7f) { + puts("\n"); + puts(" DDR Chip-Select Interleaving Mode: "); + switch(sdram_cfg >> 8 & 0x7f) { + case FSL_DDR_CS0_CS1_CS2_CS3: + puts("CS0+CS1+CS2+CS3"); + break; + case FSL_DDR_CS0_CS1: + puts("CS0+CS1"); + break; + case FSL_DDR_CS2_CS3: + puts("CS2+CS3"); + break; + case FSL_DDR_CS0_CS1_AND_CS2_CS3: + puts("CS0+CS1 and CS2+CS3"); + break; + default: + puts("invalid"); + break; + } + } +} + +void __weak detail_board_ddr_info(void) +{ + print_ddr_info(0); +} + +void board_add_ram_info(int use_default) +{ + detail_board_ddr_info(); +} + +#ifdef CONFIG_FSL_DDR_SYNC_REFRESH +#define DDRC_DEBUG20_INIT_DONE 0x80000000 +#define DDRC_DEBUG2_RF 0x00000040 +void fsl_ddr_sync_memctl_refresh(unsigned int first_ctrl, + unsigned int last_ctrl) +{ + unsigned int i; + u32 ddrc_debug20; + u32 ddrc_debug2[CONFIG_SYS_NUM_DDR_CTLRS] = {}; + u32 *ddrc_debug2_p[CONFIG_SYS_NUM_DDR_CTLRS] = {}; + struct ccsr_ddr __iomem *ddr; + + for (i = first_ctrl; i <= last_ctrl; i++) { + switch (i) { + case 0: + ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR; + break; +#if defined(CONFIG_SYS_FSL_DDR2_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 1) + case 1: + ddr = (void *)CONFIG_SYS_FSL_DDR2_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR3_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 2) + case 2: + ddr = (void *)CONFIG_SYS_FSL_DDR3_ADDR; + break; +#endif +#if defined(CONFIG_SYS_FSL_DDR4_ADDR) && (CONFIG_SYS_NUM_DDR_CTLRS > 3) + case 3: + ddr = (void *)CONFIG_SYS_FSL_DDR4_ADDR; + break; +#endif + default: + printf("%s unexpected ctrl = %u\n", __func__, i); + return; + } + ddrc_debug20 = ddr_in32(&ddr->debug[19]); + ddrc_debug2_p[i] = &ddr->debug[1]; + while (!(ddrc_debug20 & DDRC_DEBUG20_INIT_DONE)) { + /* keep polling until DDRC init is done */ + udelay(100); + ddrc_debug20 = ddr_in32(&ddr->debug[19]); + } + ddrc_debug2[i] = ddr_in32(&ddr->debug[1]) | DDRC_DEBUG2_RF; + } + /* + * Sync refresh + * This is put together to make sure the refresh reqeusts are sent + * closely to each other. + */ + for (i = first_ctrl; i <= last_ctrl; i++) + ddr_out32(ddrc_debug2_p[i], ddrc_debug2[i]); +} +#endif /* CONFIG_FSL_DDR_SYNC_REFRESH */ + +void remove_unused_controllers(fsl_ddr_info_t *info) +{ +#ifdef CONFIG_SYS_FSL_HAS_CCN504 + int i; + u64 nodeid; + void *hnf_sam_ctrl = (void *)(CCI_HN_F_0_BASE + CCN_HN_F_SAM_CTL); + bool ddr0_used = false; + bool ddr1_used = false; + + for (i = 0; i < 8; i++) { + nodeid = in_le64(hnf_sam_ctrl) & CCN_HN_F_SAM_NODEID_MASK; + if (nodeid == CCN_HN_F_SAM_NODEID_DDR0) { + ddr0_used = true; + } else if (nodeid == CCN_HN_F_SAM_NODEID_DDR1) { + ddr1_used = true; + } else { + printf("Unknown nodeid in HN-F SAM control: 0x%llx\n", + nodeid); + } + hnf_sam_ctrl += (CCI_HN_F_1_BASE - CCI_HN_F_0_BASE); + } + if (!ddr0_used && !ddr1_used) { + printf("Invalid configuration in HN-F SAM control\n"); + return; + } + + if (!ddr0_used && info->first_ctrl == 0) { + info->first_ctrl = 1; + info->num_ctrls = 1; + debug("First DDR controller disabled\n"); + return; + } + + if (!ddr1_used && info->first_ctrl + info->num_ctrls > 1) { + info->num_ctrls = 1; + debug("Second DDR controller disabled\n"); + } +#endif +} diff --git a/roms/u-boot/drivers/ddr/imx/Kconfig b/roms/u-boot/drivers/ddr/imx/Kconfig new file mode 100644 index 000000000..7e06fb2f7 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/Kconfig @@ -0,0 +1 @@ +source "drivers/ddr/imx/imx8m/Kconfig" diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/Kconfig b/roms/u-boot/drivers/ddr/imx/imx8m/Kconfig new file mode 100644 index 000000000..a90b7db49 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/Kconfig @@ -0,0 +1,47 @@ +menu "i.MX8M DDR controllers" + depends on ARCH_IMX8M + +config IMX8M_DRAM + bool "imx8m dram" + +config IMX8M_LPDDR4 + bool "imx8m lpddr4" + select IMX8M_DRAM + help + Select the i.MX8M LPDDR4 driver support on i.MX8M SOC. + +config IMX8M_DDR4 + bool "imx8m ddr4" + select IMX8M_DRAM + help + Select the i.MX8M DDR4 driver support on i.MX8M SOC. + +config IMX8M_DDR3L + bool "imx8m ddr3l" + select IMX8M_DRAM + help + Select the i.MX8M DDR3L driver support on i.MX8M SOC. + +config SAVED_DRAM_TIMING_BASE + hex "Define the base address for saved dram timing" + help + after DRAM is trained, need to save the dram related timming + info into memory for low power use. OCRAM_S is used for this + purpose on i.MX8MM. + default 0x180000 + +config IMX8M_DRAM_INLINE_ECC + bool "imx8mp inline ECC" + depends on IMX8MP && IMX8M_LPDDR4 + help + Select this config if you want to use inline ecc feature for + imx8mp-evk board. + +config IMX8M_VDD_SOC_850MV + bool "imx8mp change the vdd_soc voltage to 850mv" + depends on IMX8MP + +config IMX8M_LPDDR4_FREQ0_2400MTS + bool "imx8m PDDR4 freq0 change from 4000MTS to 2400MTS" + +endmenu diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/Makefile b/roms/u-boot/drivers/ddr/imx/imx8m/Makefile new file mode 100644 index 000000000..bd9bcb8d5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/Makefile @@ -0,0 +1,9 @@ +# +# Copyright 2018 NXP +# +# SPDX-License-Identifier: GPL-2.0+ +# + +ifdef CONFIG_SPL_BUILD +obj-$(CONFIG_IMX8M_DRAM) += helper.o ddrphy_utils.o ddrphy_train.o ddrphy_csr.o ddr_init.o +endif diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/ddr_init.c b/roms/u-boot/drivers/ddr/imx/imx8m/ddr_init.c new file mode 100644 index 000000000..b70bcc383 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/ddr_init.c @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2019 NXP + */ + +#include <common.h> +#include <errno.h> +#include <log.h> +#include <asm/io.h> +#include <asm/arch/ddr.h> +#include <asm/arch/clock.h> +#include <asm/arch/sys_proto.h> + +void ddr_cfg_umctl2(struct dram_cfg_param *ddrc_cfg, int num) +{ + int i = 0; + + for (i = 0; i < num; i++) { + reg32_write(ddrc_cfg->reg, ddrc_cfg->val); + ddrc_cfg++; + } +} + +#ifdef CONFIG_IMX8M_DRAM_INLINE_ECC +void ddrc_inline_ecc_scrub(unsigned int start_address, + unsigned int range_address) +{ + unsigned int tmp; + + /* Step1: Enable quasi-dynamic programming */ + reg32_write(DDRC_SWCTL(0), 0x00000000); + /* Step2: Set ECCCFG1.ecc_parity_region_lock to 1 */ + reg32setbit(DDRC_ECCCFG1(0), 0x4); + /* Step3: Block the AXI ports from taking the transaction */ + reg32_write(DDRC_PCTRL_0(0), 0x0); + /* Step4: Set scrub start address */ + reg32_write(DDRC_SBRSTART0(0), start_address); + /* Step5: Set scrub range address */ + reg32_write(DDRC_SBRRANGE0(0), range_address); + /* Step6: Set scrub_mode to write */ + reg32_write(DDRC_SBRCTL(0), 0x00000014); + /* Step7: Set the desired pattern through SBRWDATA0 registers */ + reg32_write(DDRC_SBRWDATA0(0), 0x55aa55aa); + /* Step8: Enable the SBR by programming SBRCTL.scrub_en=1 */ + reg32setbit(DDRC_SBRCTL(0), 0x0); + /* Step9: Poll SBRSTAT.scrub_done=1 */ + tmp = reg32_read(DDRC_SBRSTAT(0)); + while (tmp != 0x00000002) + tmp = reg32_read(DDRC_SBRSTAT(0)) & 0x2; + /* Step10: Poll SBRSTAT.scrub_busy=0 */ + tmp = reg32_read(DDRC_SBRSTAT(0)); + while (tmp != 0x0) + tmp = reg32_read(DDRC_SBRSTAT(0)) & 0x1; + /* Step11: Disable SBR by programming SBRCTL.scrub_en=0 */ + clrbits_le32(DDRC_SBRCTL(0), 0x1); + /* Step12: Prepare for normal scrub operation(Read) and set scrub_interval*/ + reg32_write(DDRC_SBRCTL(0), 0x100); + /* Step13: Enable the SBR by programming SBRCTL.scrub_en=1 */ + reg32_write(DDRC_SBRCTL(0), 0x101); + /* Step14: Enable AXI ports by programming */ + reg32_write(DDRC_PCTRL_0(0), 0x1); + /* Step15: Disable quasi-dynamic programming */ + reg32_write(DDRC_SWCTL(0), 0x00000001); +} + +void ddrc_inline_ecc_scrub_end(unsigned int start_address, + unsigned int range_address) +{ + /* Step1: Enable quasi-dynamic programming */ + reg32_write(DDRC_SWCTL(0), 0x00000000); + /* Step2: Block the AXI ports from taking the transaction */ + reg32_write(DDRC_PCTRL_0(0), 0x0); + /* Step3: Set scrub start address */ + reg32_write(DDRC_SBRSTART0(0), start_address); + /* Step4: Set scrub range address */ + reg32_write(DDRC_SBRRANGE0(0), range_address); + /* Step5: Disable SBR by programming SBRCTL.scrub_en=0 */ + clrbits_le32(DDRC_SBRCTL(0), 0x1); + /* Step6: Prepare for normal scrub operation(Read) and set scrub_interval */ + reg32_write(DDRC_SBRCTL(0), 0x100); + /* Step7: Enable the SBR by programming SBRCTL.scrub_en=1 */ + reg32_write(DDRC_SBRCTL(0), 0x101); + /* Step8: Enable AXI ports by programming */ + reg32_write(DDRC_PCTRL_0(0), 0x1); + /* Step9: Disable quasi-dynamic programming */ + reg32_write(DDRC_SWCTL(0), 0x00000001); +} +#endif + +void __weak board_dram_ecc_scrub(void) +{ +} + +int ddr_init(struct dram_timing_info *dram_timing) +{ + unsigned int tmp, initial_drate, target_freq; + int ret; + + debug("DDRINFO: start DRAM init\n"); + + /* Step1: Follow the power up procedure */ + if (is_imx8mq()) { + reg32_write(SRC_DDRC_RCR_ADDR + 0x04, 0x8F00000F); + reg32_write(SRC_DDRC_RCR_ADDR, 0x8F00000F); + reg32_write(SRC_DDRC_RCR_ADDR + 0x04, 0x8F000000); + } else { + reg32_write(SRC_DDRC_RCR_ADDR, 0x8F00001F); + reg32_write(SRC_DDRC_RCR_ADDR, 0x8F00000F); + } + + debug("DDRINFO: cfg clk\n"); + /* change the clock source of dram_apb_clk_root: source 4 800MHz /4 = 200MHz */ + clock_set_target_val(DRAM_APB_CLK_ROOT, CLK_ROOT_ON | CLK_ROOT_SOURCE_SEL(4) | + CLK_ROOT_PRE_DIV(CLK_ROOT_PRE_DIV4)); + + /* disable iso */ + reg32_write(0x303A00EC, 0x0000ffff); /* PGC_CPU_MAPPING */ + reg32setbit(0x303A00F8, 5); /* PU_PGC_SW_PUP_REQ */ + + initial_drate = dram_timing->fsp_msg[0].drate; + /* default to the frequency point 0 clock */ + ddrphy_init_set_dfi_clk(initial_drate); + + /* D-aasert the presetn */ + reg32_write(SRC_DDRC_RCR_ADDR, 0x8F000006); + + /* Step2: Program the dwc_ddr_umctl2 registers */ + debug("DDRINFO: ddrc config start\n"); + ddr_cfg_umctl2(dram_timing->ddrc_cfg, dram_timing->ddrc_cfg_num); + debug("DDRINFO: ddrc config done\n"); + + /* Step3: De-assert reset signal(core_ddrc_rstn & aresetn_n) */ + reg32_write(SRC_DDRC_RCR_ADDR, 0x8F000004); + reg32_write(SRC_DDRC_RCR_ADDR, 0x8F000000); + + /* + * Step4: Disable auto-refreshes, self-refresh, powerdown, and + * assertion of dfi_dram_clk_disable by setting RFSHCTL3.dis_auto_refresh = 1, + * PWRCTL.powerdown_en = 0, and PWRCTL.selfref_en = 0, PWRCTL.en_dfi_dram_clk_disable = 0 + */ + reg32_write(DDRC_DBG1(0), 0x00000000); + reg32_write(DDRC_RFSHCTL3(0), 0x0000001); + reg32_write(DDRC_PWRCTL(0), 0xa0); + + /* if ddr type is LPDDR4, do it */ + tmp = reg32_read(DDRC_MSTR(0)); + if (tmp & (0x1 << 5) && !is_imx8mn()) + reg32_write(DDRC_DDR_SS_GPR0, 0x01); /* LPDDR4 mode */ + + /* determine the initial boot frequency */ + target_freq = reg32_read(DDRC_MSTR2(0)) & 0x3; + target_freq = (tmp & (0x1 << 29)) ? target_freq : 0x0; + + /* Step5: Set SWCT.sw_done to 0 */ + reg32_write(DDRC_SWCTL(0), 0x00000000); + + /* Set the default boot frequency point */ + clrsetbits_le32(DDRC_DFIMISC(0), (0x1f << 8), target_freq << 8); + /* Step6: Set DFIMISC.dfi_init_complete_en to 0 */ + clrbits_le32(DDRC_DFIMISC(0), 0x1); + + /* Step7: Set SWCTL.sw_done to 1; need to polling SWSTAT.sw_done_ack */ + reg32_write(DDRC_SWCTL(0), 0x00000001); + do { + tmp = reg32_read(DDRC_SWSTAT(0)); + } while ((tmp & 0x1) == 0x0); + + /* + * Step8 ~ Step13: Start PHY initialization and training by + * accessing relevant PUB registers + */ + debug("DDRINFO:ddrphy config start\n"); + + ret = ddr_cfg_phy(dram_timing); + if (ret) + return ret; + + debug("DDRINFO: ddrphy config done\n"); + + /* + * step14 CalBusy.0 =1, indicates the calibrator is actively + * calibrating. Wait Calibrating done. + */ + do { + tmp = reg32_read(DDRPHY_CalBusy(0)); + } while ((tmp & 0x1)); + + debug("DDRINFO:ddrphy calibration done\n"); + + /* Step15: Set SWCTL.sw_done to 0 */ + reg32_write(DDRC_SWCTL(0), 0x00000000); + + /* Apply rank-to-rank workaround */ + update_umctl2_rank_space_setting(dram_timing->fsp_msg_num - 1); + + /* Step16: Set DFIMISC.dfi_init_start to 1 */ + setbits_le32(DDRC_DFIMISC(0), (0x1 << 5)); + + /* Step17: Set SWCTL.sw_done to 1; need to polling SWSTAT.sw_done_ack */ + reg32_write(DDRC_SWCTL(0), 0x00000001); + do { + tmp = reg32_read(DDRC_SWSTAT(0)); + } while ((tmp & 0x1) == 0x0); + + /* Step18: Polling DFISTAT.dfi_init_complete = 1 */ + do { + tmp = reg32_read(DDRC_DFISTAT(0)); + } while ((tmp & 0x1) == 0x0); + + /* Step19: Set SWCTL.sw_done to 0 */ + reg32_write(DDRC_SWCTL(0), 0x00000000); + + /* Step20: Set DFIMISC.dfi_init_start to 0 */ + clrbits_le32(DDRC_DFIMISC(0), (0x1 << 5)); + + /* Step21: optional */ + + /* Step22: Set DFIMISC.dfi_init_complete_en to 1 */ + setbits_le32(DDRC_DFIMISC(0), 0x1); + + /* Step23: Set PWRCTL.selfref_sw to 0 */ + clrbits_le32(DDRC_PWRCTL(0), (0x1 << 5)); + + /* Step24: Set SWCTL.sw_done to 1; need polling SWSTAT.sw_done_ack */ + reg32_write(DDRC_SWCTL(0), 0x00000001); + do { + tmp = reg32_read(DDRC_SWSTAT(0)); + } while ((tmp & 0x1) == 0x0); + + /* Step25: Wait for dwc_ddr_umctl2 to move to normal operating mode by monitoring + * STAT.operating_mode signal */ + do { + tmp = reg32_read(DDRC_STAT(0)); + } while ((tmp & 0x3) != 0x1); + + /* Step26: Set back register in Step4 to the original values if desired */ + reg32_write(DDRC_RFSHCTL3(0), 0x0000000); + + /* enable port 0 */ + reg32_write(DDRC_PCTRL_0(0), 0x00000001); + debug("DDRINFO: ddrmix config done\n"); + + board_dram_ecc_scrub(); + + /* enable selfref_en by default */ + setbits_le32(DDRC_PWRCTL(0), 0x1); + + /* save the dram timing config into memory */ + dram_config_save(dram_timing, CONFIG_SAVED_DRAM_TIMING_BASE); + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_csr.c b/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_csr.c new file mode 100644 index 000000000..67dd4e705 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_csr.c @@ -0,0 +1,732 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018 NXP + */ + +#include <linux/kernel.h> +#include <asm/arch/ddr.h> + +/* ddr phy trained csr */ +struct dram_cfg_param ddrphy_trained_csr[] = { + { 0x200b2, 0x0 }, + { 0x1200b2, 0x0 }, + { 0x2200b2, 0x0 }, + { 0x200cb, 0x0 }, + { 0x10043, 0x0 }, + { 0x110043, 0x0 }, + { 0x210043, 0x0 }, + { 0x10143, 0x0 }, + { 0x110143, 0x0 }, + { 0x210143, 0x0 }, + { 0x11043, 0x0 }, + { 0x111043, 0x0 }, + { 0x211043, 0x0 }, + { 0x11143, 0x0 }, + { 0x111143, 0x0 }, + { 0x211143, 0x0 }, + { 0x12043, 0x0 }, + { 0x112043, 0x0 }, + { 0x212043, 0x0 }, + { 0x12143, 0x0 }, + { 0x112143, 0x0 }, + { 0x212143, 0x0 }, + { 0x13043, 0x0 }, + { 0x113043, 0x0 }, + { 0x213043, 0x0 }, + { 0x13143, 0x0 }, + { 0x113143, 0x0 }, + { 0x213143, 0x0 }, + { 0x80, 0x0 }, + { 0x100080, 0x0 }, + { 0x200080, 0x0 }, + { 0x1080, 0x0 }, + { 0x101080, 0x0 }, + { 0x201080, 0x0 }, + { 0x2080, 0x0 }, + { 0x102080, 0x0 }, + { 0x202080, 0x0 }, + { 0x3080, 0x0 }, + { 0x103080, 0x0 }, + { 0x203080, 0x0 }, + { 0x4080, 0x0 }, + { 0x104080, 0x0 }, + { 0x204080, 0x0 }, + { 0x5080, 0x0 }, + { 0x105080, 0x0 }, + { 0x205080, 0x0 }, + { 0x6080, 0x0 }, + { 0x106080, 0x0 }, + { 0x206080, 0x0 }, + { 0x7080, 0x0 }, + { 0x107080, 0x0 }, + { 0x207080, 0x0 }, + { 0x8080, 0x0 }, + { 0x108080, 0x0 }, + { 0x208080, 0x0 }, + { 0x9080, 0x0 }, + { 0x109080, 0x0 }, + { 0x209080, 0x0 }, + { 0x10080, 0x0 }, + { 0x110080, 0x0 }, + { 0x210080, 0x0 }, + { 0x10180, 0x0 }, + { 0x110180, 0x0 }, + { 0x210180, 0x0 }, + { 0x11080, 0x0 }, + { 0x111080, 0x0 }, + { 0x211080, 0x0 }, + { 0x11180, 0x0 }, + { 0x111180, 0x0 }, + { 0x211180, 0x0 }, + { 0x12080, 0x0 }, + { 0x112080, 0x0 }, + { 0x212080, 0x0 }, + { 0x12180, 0x0 }, + { 0x112180, 0x0 }, + { 0x212180, 0x0 }, + { 0x13080, 0x0 }, + { 0x113080, 0x0 }, + { 0x213080, 0x0 }, + { 0x13180, 0x0 }, + { 0x113180, 0x0 }, + { 0x213180, 0x0 }, + { 0x10081, 0x0 }, + { 0x110081, 0x0 }, + { 0x210081, 0x0 }, + { 0x10181, 0x0 }, + { 0x110181, 0x0 }, + { 0x210181, 0x0 }, + { 0x11081, 0x0 }, + { 0x111081, 0x0 }, + { 0x211081, 0x0 }, + { 0x11181, 0x0 }, + { 0x111181, 0x0 }, + { 0x211181, 0x0 }, + { 0x12081, 0x0 }, + { 0x112081, 0x0 }, + { 0x212081, 0x0 }, + { 0x12181, 0x0 }, + { 0x112181, 0x0 }, + { 0x212181, 0x0 }, + { 0x13081, 0x0 }, + { 0x113081, 0x0 }, + { 0x213081, 0x0 }, + { 0x13181, 0x0 }, + { 0x113181, 0x0 }, + { 0x213181, 0x0 }, + { 0x100d0, 0x0 }, + { 0x1100d0, 0x0 }, + { 0x2100d0, 0x0 }, + { 0x101d0, 0x0 }, + { 0x1101d0, 0x0 }, + { 0x2101d0, 0x0 }, + { 0x110d0, 0x0 }, + { 0x1110d0, 0x0 }, + { 0x2110d0, 0x0 }, + { 0x111d0, 0x0 }, + { 0x1111d0, 0x0 }, + { 0x2111d0, 0x0 }, + { 0x120d0, 0x0 }, + { 0x1120d0, 0x0 }, + { 0x2120d0, 0x0 }, + { 0x121d0, 0x0 }, + { 0x1121d0, 0x0 }, + { 0x2121d0, 0x0 }, + { 0x130d0, 0x0 }, + { 0x1130d0, 0x0 }, + { 0x2130d0, 0x0 }, + { 0x131d0, 0x0 }, + { 0x1131d0, 0x0 }, + { 0x2131d0, 0x0 }, + { 0x100d1, 0x0 }, + { 0x1100d1, 0x0 }, + { 0x2100d1, 0x0 }, + { 0x101d1, 0x0 }, + { 0x1101d1, 0x0 }, + { 0x2101d1, 0x0 }, + { 0x110d1, 0x0 }, + { 0x1110d1, 0x0 }, + { 0x2110d1, 0x0 }, + { 0x111d1, 0x0 }, + { 0x1111d1, 0x0 }, + { 0x2111d1, 0x0 }, + { 0x120d1, 0x0 }, + { 0x1120d1, 0x0 }, + { 0x2120d1, 0x0 }, + { 0x121d1, 0x0 }, + { 0x1121d1, 0x0 }, + { 0x2121d1, 0x0 }, + { 0x130d1, 0x0 }, + { 0x1130d1, 0x0 }, + { 0x2130d1, 0x0 }, + { 0x131d1, 0x0 }, + { 0x1131d1, 0x0 }, + { 0x2131d1, 0x0 }, + { 0x10068, 0x0 }, + { 0x10168, 0x0 }, + { 0x10268, 0x0 }, + { 0x10368, 0x0 }, + { 0x10468, 0x0 }, + { 0x10568, 0x0 }, + { 0x10668, 0x0 }, + { 0x10768, 0x0 }, + { 0x10868, 0x0 }, + { 0x11068, 0x0 }, + { 0x11168, 0x0 }, + { 0x11268, 0x0 }, + { 0x11368, 0x0 }, + { 0x11468, 0x0 }, + { 0x11568, 0x0 }, + { 0x11668, 0x0 }, + { 0x11768, 0x0 }, + { 0x11868, 0x0 }, + { 0x12068, 0x0 }, + { 0x12168, 0x0 }, + { 0x12268, 0x0 }, + { 0x12368, 0x0 }, + { 0x12468, 0x0 }, + { 0x12568, 0x0 }, + { 0x12668, 0x0 }, + { 0x12768, 0x0 }, + { 0x12868, 0x0 }, + { 0x13068, 0x0 }, + { 0x13168, 0x0 }, + { 0x13268, 0x0 }, + { 0x13368, 0x0 }, + { 0x13468, 0x0 }, + { 0x13568, 0x0 }, + { 0x13668, 0x0 }, + { 0x13768, 0x0 }, + { 0x13868, 0x0 }, + { 0x10069, 0x0 }, + { 0x10169, 0x0 }, + { 0x10269, 0x0 }, + { 0x10369, 0x0 }, + { 0x10469, 0x0 }, + { 0x10569, 0x0 }, + { 0x10669, 0x0 }, + { 0x10769, 0x0 }, + { 0x10869, 0x0 }, + { 0x11069, 0x0 }, + { 0x11169, 0x0 }, + { 0x11269, 0x0 }, + { 0x11369, 0x0 }, + { 0x11469, 0x0 }, + { 0x11569, 0x0 }, + { 0x11669, 0x0 }, + { 0x11769, 0x0 }, + { 0x11869, 0x0 }, + { 0x12069, 0x0 }, + { 0x12169, 0x0 }, + { 0x12269, 0x0 }, + { 0x12369, 0x0 }, + { 0x12469, 0x0 }, + { 0x12569, 0x0 }, + { 0x12669, 0x0 }, + { 0x12769, 0x0 }, + { 0x12869, 0x0 }, + { 0x13069, 0x0 }, + { 0x13169, 0x0 }, + { 0x13269, 0x0 }, + { 0x13369, 0x0 }, + { 0x13469, 0x0 }, + { 0x13569, 0x0 }, + { 0x13669, 0x0 }, + { 0x13769, 0x0 }, + { 0x13869, 0x0 }, + { 0x1008c, 0x0 }, + { 0x11008c, 0x0 }, + { 0x21008c, 0x0 }, + { 0x1018c, 0x0 }, + { 0x11018c, 0x0 }, + { 0x21018c, 0x0 }, + { 0x1108c, 0x0 }, + { 0x11108c, 0x0 }, + { 0x21108c, 0x0 }, + { 0x1118c, 0x0 }, + { 0x11118c, 0x0 }, + { 0x21118c, 0x0 }, + { 0x1208c, 0x0 }, + { 0x11208c, 0x0 }, + { 0x21208c, 0x0 }, + { 0x1218c, 0x0 }, + { 0x11218c, 0x0 }, + { 0x21218c, 0x0 }, + { 0x1308c, 0x0 }, + { 0x11308c, 0x0 }, + { 0x21308c, 0x0 }, + { 0x1318c, 0x0 }, + { 0x11318c, 0x0 }, + { 0x21318c, 0x0 }, + { 0x1008d, 0x0 }, + { 0x11008d, 0x0 }, + { 0x21008d, 0x0 }, + { 0x1018d, 0x0 }, + { 0x11018d, 0x0 }, + { 0x21018d, 0x0 }, + { 0x1108d, 0x0 }, + { 0x11108d, 0x0 }, + { 0x21108d, 0x0 }, + { 0x1118d, 0x0 }, + { 0x11118d, 0x0 }, + { 0x21118d, 0x0 }, + { 0x1208d, 0x0 }, + { 0x11208d, 0x0 }, + { 0x21208d, 0x0 }, + { 0x1218d, 0x0 }, + { 0x11218d, 0x0 }, + { 0x21218d, 0x0 }, + { 0x1308d, 0x0 }, + { 0x11308d, 0x0 }, + { 0x21308d, 0x0 }, + { 0x1318d, 0x0 }, + { 0x11318d, 0x0 }, + { 0x21318d, 0x0 }, + { 0x100c0, 0x0 }, + { 0x1100c0, 0x0 }, + { 0x2100c0, 0x0 }, + { 0x101c0, 0x0 }, + { 0x1101c0, 0x0 }, + { 0x2101c0, 0x0 }, + { 0x102c0, 0x0 }, + { 0x1102c0, 0x0 }, + { 0x2102c0, 0x0 }, + { 0x103c0, 0x0 }, + { 0x1103c0, 0x0 }, + { 0x2103c0, 0x0 }, + { 0x104c0, 0x0 }, + { 0x1104c0, 0x0 }, + { 0x2104c0, 0x0 }, + { 0x105c0, 0x0 }, + { 0x1105c0, 0x0 }, + { 0x2105c0, 0x0 }, + { 0x106c0, 0x0 }, + { 0x1106c0, 0x0 }, + { 0x2106c0, 0x0 }, + { 0x107c0, 0x0 }, + { 0x1107c0, 0x0 }, + { 0x2107c0, 0x0 }, + { 0x108c0, 0x0 }, + { 0x1108c0, 0x0 }, + { 0x2108c0, 0x0 }, + { 0x110c0, 0x0 }, + { 0x1110c0, 0x0 }, + { 0x2110c0, 0x0 }, + { 0x111c0, 0x0 }, + { 0x1111c0, 0x0 }, + { 0x2111c0, 0x0 }, + { 0x112c0, 0x0 }, + { 0x1112c0, 0x0 }, + { 0x2112c0, 0x0 }, + { 0x113c0, 0x0 }, + { 0x1113c0, 0x0 }, + { 0x2113c0, 0x0 }, + { 0x114c0, 0x0 }, + { 0x1114c0, 0x0 }, + { 0x2114c0, 0x0 }, + { 0x115c0, 0x0 }, + { 0x1115c0, 0x0 }, + { 0x2115c0, 0x0 }, + { 0x116c0, 0x0 }, + { 0x1116c0, 0x0 }, + { 0x2116c0, 0x0 }, + { 0x117c0, 0x0 }, + { 0x1117c0, 0x0 }, + { 0x2117c0, 0x0 }, + { 0x118c0, 0x0 }, + { 0x1118c0, 0x0 }, + { 0x2118c0, 0x0 }, + { 0x120c0, 0x0 }, + { 0x1120c0, 0x0 }, + { 0x2120c0, 0x0 }, + { 0x121c0, 0x0 }, + { 0x1121c0, 0x0 }, + { 0x2121c0, 0x0 }, + { 0x122c0, 0x0 }, + { 0x1122c0, 0x0 }, + { 0x2122c0, 0x0 }, + { 0x123c0, 0x0 }, + { 0x1123c0, 0x0 }, + { 0x2123c0, 0x0 }, + { 0x124c0, 0x0 }, + { 0x1124c0, 0x0 }, + { 0x2124c0, 0x0 }, + { 0x125c0, 0x0 }, + { 0x1125c0, 0x0 }, + { 0x2125c0, 0x0 }, + { 0x126c0, 0x0 }, + { 0x1126c0, 0x0 }, + { 0x2126c0, 0x0 }, + { 0x127c0, 0x0 }, + { 0x1127c0, 0x0 }, + { 0x2127c0, 0x0 }, + { 0x128c0, 0x0 }, + { 0x1128c0, 0x0 }, + { 0x2128c0, 0x0 }, + { 0x130c0, 0x0 }, + { 0x1130c0, 0x0 }, + { 0x2130c0, 0x0 }, + { 0x131c0, 0x0 }, + { 0x1131c0, 0x0 }, + { 0x2131c0, 0x0 }, + { 0x132c0, 0x0 }, + { 0x1132c0, 0x0 }, + { 0x2132c0, 0x0 }, + { 0x133c0, 0x0 }, + { 0x1133c0, 0x0 }, + { 0x2133c0, 0x0 }, + { 0x134c0, 0x0 }, + { 0x1134c0, 0x0 }, + { 0x2134c0, 0x0 }, + { 0x135c0, 0x0 }, + { 0x1135c0, 0x0 }, + { 0x2135c0, 0x0 }, + { 0x136c0, 0x0 }, + { 0x1136c0, 0x0 }, + { 0x2136c0, 0x0 }, + { 0x137c0, 0x0 }, + { 0x1137c0, 0x0 }, + { 0x2137c0, 0x0 }, + { 0x138c0, 0x0 }, + { 0x1138c0, 0x0 }, + { 0x2138c0, 0x0 }, + { 0x100c1, 0x0 }, + { 0x1100c1, 0x0 }, + { 0x2100c1, 0x0 }, + { 0x101c1, 0x0 }, + { 0x1101c1, 0x0 }, + { 0x2101c1, 0x0 }, + { 0x102c1, 0x0 }, + { 0x1102c1, 0x0 }, + { 0x2102c1, 0x0 }, + { 0x103c1, 0x0 }, + { 0x1103c1, 0x0 }, + { 0x2103c1, 0x0 }, + { 0x104c1, 0x0 }, + { 0x1104c1, 0x0 }, + { 0x2104c1, 0x0 }, + { 0x105c1, 0x0 }, + { 0x1105c1, 0x0 }, + { 0x2105c1, 0x0 }, + { 0x106c1, 0x0 }, + { 0x1106c1, 0x0 }, + { 0x2106c1, 0x0 }, + { 0x107c1, 0x0 }, + { 0x1107c1, 0x0 }, + { 0x2107c1, 0x0 }, + { 0x108c1, 0x0 }, + { 0x1108c1, 0x0 }, + { 0x2108c1, 0x0 }, + { 0x110c1, 0x0 }, + { 0x1110c1, 0x0 }, + { 0x2110c1, 0x0 }, + { 0x111c1, 0x0 }, + { 0x1111c1, 0x0 }, + { 0x2111c1, 0x0 }, + { 0x112c1, 0x0 }, + { 0x1112c1, 0x0 }, + { 0x2112c1, 0x0 }, + { 0x113c1, 0x0 }, + { 0x1113c1, 0x0 }, + { 0x2113c1, 0x0 }, + { 0x114c1, 0x0 }, + { 0x1114c1, 0x0 }, + { 0x2114c1, 0x0 }, + { 0x115c1, 0x0 }, + { 0x1115c1, 0x0 }, + { 0x2115c1, 0x0 }, + { 0x116c1, 0x0 }, + { 0x1116c1, 0x0 }, + { 0x2116c1, 0x0 }, + { 0x117c1, 0x0 }, + { 0x1117c1, 0x0 }, + { 0x2117c1, 0x0 }, + { 0x118c1, 0x0 }, + { 0x1118c1, 0x0 }, + { 0x2118c1, 0x0 }, + { 0x120c1, 0x0 }, + { 0x1120c1, 0x0 }, + { 0x2120c1, 0x0 }, + { 0x121c1, 0x0 }, + { 0x1121c1, 0x0 }, + { 0x2121c1, 0x0 }, + { 0x122c1, 0x0 }, + { 0x1122c1, 0x0 }, + { 0x2122c1, 0x0 }, + { 0x123c1, 0x0 }, + { 0x1123c1, 0x0 }, + { 0x2123c1, 0x0 }, + { 0x124c1, 0x0 }, + { 0x1124c1, 0x0 }, + { 0x2124c1, 0x0 }, + { 0x125c1, 0x0 }, + { 0x1125c1, 0x0 }, + { 0x2125c1, 0x0 }, + { 0x126c1, 0x0 }, + { 0x1126c1, 0x0 }, + { 0x2126c1, 0x0 }, + { 0x127c1, 0x0 }, + { 0x1127c1, 0x0 }, + { 0x2127c1, 0x0 }, + { 0x128c1, 0x0 }, + { 0x1128c1, 0x0 }, + { 0x2128c1, 0x0 }, + { 0x130c1, 0x0 }, + { 0x1130c1, 0x0 }, + { 0x2130c1, 0x0 }, + { 0x131c1, 0x0 }, + { 0x1131c1, 0x0 }, + { 0x2131c1, 0x0 }, + { 0x132c1, 0x0 }, + { 0x1132c1, 0x0 }, + { 0x2132c1, 0x0 }, + { 0x133c1, 0x0 }, + { 0x1133c1, 0x0 }, + { 0x2133c1, 0x0 }, + { 0x134c1, 0x0 }, + { 0x1134c1, 0x0 }, + { 0x2134c1, 0x0 }, + { 0x135c1, 0x0 }, + { 0x1135c1, 0x0 }, + { 0x2135c1, 0x0 }, + { 0x136c1, 0x0 }, + { 0x1136c1, 0x0 }, + { 0x2136c1, 0x0 }, + { 0x137c1, 0x0 }, + { 0x1137c1, 0x0 }, + { 0x2137c1, 0x0 }, + { 0x138c1, 0x0 }, + { 0x1138c1, 0x0 }, + { 0x2138c1, 0x0 }, + { 0x10020, 0x0 }, + { 0x110020, 0x0 }, + { 0x210020, 0x0 }, + { 0x11020, 0x0 }, + { 0x111020, 0x0 }, + { 0x211020, 0x0 }, + { 0x12020, 0x0 }, + { 0x112020, 0x0 }, + { 0x212020, 0x0 }, + { 0x13020, 0x0 }, + { 0x113020, 0x0 }, + { 0x213020, 0x0 }, + { 0x20072, 0x0 }, + { 0x20073, 0x0 }, + { 0x20074, 0x0 }, + { 0x100aa, 0x0 }, + { 0x110aa, 0x0 }, + { 0x120aa, 0x0 }, + { 0x130aa, 0x0 }, + { 0x20010, 0x0 }, + { 0x120010, 0x0 }, + { 0x220010, 0x0 }, + { 0x20011, 0x0 }, + { 0x120011, 0x0 }, + { 0x220011, 0x0 }, + { 0x100ae, 0x0 }, + { 0x1100ae, 0x0 }, + { 0x2100ae, 0x0 }, + { 0x100af, 0x0 }, + { 0x1100af, 0x0 }, + { 0x2100af, 0x0 }, + { 0x110ae, 0x0 }, + { 0x1110ae, 0x0 }, + { 0x2110ae, 0x0 }, + { 0x110af, 0x0 }, + { 0x1110af, 0x0 }, + { 0x2110af, 0x0 }, + { 0x120ae, 0x0 }, + { 0x1120ae, 0x0 }, + { 0x2120ae, 0x0 }, + { 0x120af, 0x0 }, + { 0x1120af, 0x0 }, + { 0x2120af, 0x0 }, + { 0x130ae, 0x0 }, + { 0x1130ae, 0x0 }, + { 0x2130ae, 0x0 }, + { 0x130af, 0x0 }, + { 0x1130af, 0x0 }, + { 0x2130af, 0x0 }, + { 0x20020, 0x0 }, + { 0x120020, 0x0 }, + { 0x220020, 0x0 }, + { 0x100a0, 0x0 }, + { 0x100a1, 0x0 }, + { 0x100a2, 0x0 }, + { 0x100a3, 0x0 }, + { 0x100a4, 0x0 }, + { 0x100a5, 0x0 }, + { 0x100a6, 0x0 }, + { 0x100a7, 0x0 }, + { 0x110a0, 0x0 }, + { 0x110a1, 0x0 }, + { 0x110a2, 0x0 }, + { 0x110a3, 0x0 }, + { 0x110a4, 0x0 }, + { 0x110a5, 0x0 }, + { 0x110a6, 0x0 }, + { 0x110a7, 0x0 }, + { 0x120a0, 0x0 }, + { 0x120a1, 0x0 }, + { 0x120a2, 0x0 }, + { 0x120a3, 0x0 }, + { 0x120a4, 0x0 }, + { 0x120a5, 0x0 }, + { 0x120a6, 0x0 }, + { 0x120a7, 0x0 }, + { 0x130a0, 0x0 }, + { 0x130a1, 0x0 }, + { 0x130a2, 0x0 }, + { 0x130a3, 0x0 }, + { 0x130a4, 0x0 }, + { 0x130a5, 0x0 }, + { 0x130a6, 0x0 }, + { 0x130a7, 0x0 }, + { 0x2007c, 0x0 }, + { 0x12007c, 0x0 }, + { 0x22007c, 0x0 }, + { 0x2007d, 0x0 }, + { 0x12007d, 0x0 }, + { 0x22007d, 0x0 }, + { 0x400fd, 0x0 }, + { 0x400c0, 0x0 }, + { 0x90201, 0x0 }, + { 0x190201, 0x0 }, + { 0x290201, 0x0 }, + { 0x90202, 0x0 }, + { 0x190202, 0x0 }, + { 0x290202, 0x0 }, + { 0x90203, 0x0 }, + { 0x190203, 0x0 }, + { 0x290203, 0x0 }, + { 0x90204, 0x0 }, + { 0x190204, 0x0 }, + { 0x290204, 0x0 }, + { 0x90205, 0x0 }, + { 0x190205, 0x0 }, + { 0x290205, 0x0 }, + { 0x90206, 0x0 }, + { 0x190206, 0x0 }, + { 0x290206, 0x0 }, + { 0x90207, 0x0 }, + { 0x190207, 0x0 }, + { 0x290207, 0x0 }, + { 0x90208, 0x0 }, + { 0x190208, 0x0 }, + { 0x290208, 0x0 }, + { 0x10062, 0x0 }, + { 0x10162, 0x0 }, + { 0x10262, 0x0 }, + { 0x10362, 0x0 }, + { 0x10462, 0x0 }, + { 0x10562, 0x0 }, + { 0x10662, 0x0 }, + { 0x10762, 0x0 }, + { 0x10862, 0x0 }, + { 0x11062, 0x0 }, + { 0x11162, 0x0 }, + { 0x11262, 0x0 }, + { 0x11362, 0x0 }, + { 0x11462, 0x0 }, + { 0x11562, 0x0 }, + { 0x11662, 0x0 }, + { 0x11762, 0x0 }, + { 0x11862, 0x0 }, + { 0x12062, 0x0 }, + { 0x12162, 0x0 }, + { 0x12262, 0x0 }, + { 0x12362, 0x0 }, + { 0x12462, 0x0 }, + { 0x12562, 0x0 }, + { 0x12662, 0x0 }, + { 0x12762, 0x0 }, + { 0x12862, 0x0 }, + { 0x13062, 0x0 }, + { 0x13162, 0x0 }, + { 0x13262, 0x0 }, + { 0x13362, 0x0 }, + { 0x13462, 0x0 }, + { 0x13562, 0x0 }, + { 0x13662, 0x0 }, + { 0x13762, 0x0 }, + { 0x13862, 0x0 }, + { 0x20077, 0x0 }, + { 0x10001, 0x0 }, + { 0x11001, 0x0 }, + { 0x12001, 0x0 }, + { 0x13001, 0x0 }, + { 0x10040, 0x0 }, + { 0x10140, 0x0 }, + { 0x10240, 0x0 }, + { 0x10340, 0x0 }, + { 0x10440, 0x0 }, + { 0x10540, 0x0 }, + { 0x10640, 0x0 }, + { 0x10740, 0x0 }, + { 0x10840, 0x0 }, + { 0x10030, 0x0 }, + { 0x10130, 0x0 }, + { 0x10230, 0x0 }, + { 0x10330, 0x0 }, + { 0x10430, 0x0 }, + { 0x10530, 0x0 }, + { 0x10630, 0x0 }, + { 0x10730, 0x0 }, + { 0x10830, 0x0 }, + { 0x11040, 0x0 }, + { 0x11140, 0x0 }, + { 0x11240, 0x0 }, + { 0x11340, 0x0 }, + { 0x11440, 0x0 }, + { 0x11540, 0x0 }, + { 0x11640, 0x0 }, + { 0x11740, 0x0 }, + { 0x11840, 0x0 }, + { 0x11030, 0x0 }, + { 0x11130, 0x0 }, + { 0x11230, 0x0 }, + { 0x11330, 0x0 }, + { 0x11430, 0x0 }, + { 0x11530, 0x0 }, + { 0x11630, 0x0 }, + { 0x11730, 0x0 }, + { 0x11830, 0x0 }, + { 0x12040, 0x0 }, + { 0x12140, 0x0 }, + { 0x12240, 0x0 }, + { 0x12340, 0x0 }, + { 0x12440, 0x0 }, + { 0x12540, 0x0 }, + { 0x12640, 0x0 }, + { 0x12740, 0x0 }, + { 0x12840, 0x0 }, + { 0x12030, 0x0 }, + { 0x12130, 0x0 }, + { 0x12230, 0x0 }, + { 0x12330, 0x0 }, + { 0x12430, 0x0 }, + { 0x12530, 0x0 }, + { 0x12630, 0x0 }, + { 0x12730, 0x0 }, + { 0x12830, 0x0 }, + { 0x13040, 0x0 }, + { 0x13140, 0x0 }, + { 0x13240, 0x0 }, + { 0x13340, 0x0 }, + { 0x13440, 0x0 }, + { 0x13540, 0x0 }, + { 0x13640, 0x0 }, + { 0x13740, 0x0 }, + { 0x13840, 0x0 }, + { 0x13030, 0x0 }, + { 0x13130, 0x0 }, + { 0x13230, 0x0 }, + { 0x13330, 0x0 }, + { 0x13430, 0x0 }, + { 0x13530, 0x0 }, + { 0x13630, 0x0 }, + { 0x13730, 0x0 }, + { 0x13830, 0x0 }, +}; + +uint32_t ddrphy_trained_csr_num = ARRAY_SIZE(ddrphy_trained_csr); diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_train.c b/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_train.c new file mode 100644 index 000000000..08fed6178 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_train.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018 NXP + */ + +#include <common.h> +#include <log.h> +#include <linux/kernel.h> +#include <asm/arch/ddr.h> +#include <asm/arch/lpddr4_define.h> +#include <asm/arch/sys_proto.h> + +int ddr_cfg_phy(struct dram_timing_info *dram_timing) +{ + struct dram_cfg_param *dram_cfg; + struct dram_fsp_msg *fsp_msg; + unsigned int num; + int i = 0; + int j = 0; + int ret; + + /* initialize PHY configuration */ + dram_cfg = dram_timing->ddrphy_cfg; + num = dram_timing->ddrphy_cfg_num; + for (i = 0; i < num; i++) { + /* config phy reg */ + dwc_ddrphy_apb_wr(dram_cfg->reg, dram_cfg->val); + dram_cfg++; + } + + /* load the frequency setpoint message block config */ + fsp_msg = dram_timing->fsp_msg; + for (i = 0; i < dram_timing->fsp_msg_num; i++) { + debug("DRAM PHY training for %dMTS\n", fsp_msg->drate); + /* set dram PHY input clocks to desired frequency */ + ddrphy_init_set_dfi_clk(fsp_msg->drate); + + /* load the dram training firmware image */ + dwc_ddrphy_apb_wr(0xd0000, 0x0); + ddr_load_train_firmware(fsp_msg->fw_type); + + /* load the frequency set point message block parameter */ + dram_cfg = fsp_msg->fsp_cfg; + num = fsp_msg->fsp_cfg_num; + for (j = 0; j < num; j++) { + dwc_ddrphy_apb_wr(dram_cfg->reg, dram_cfg->val); + dram_cfg++; + } + + /* + * -------------------- excute the firmware -------------------- + * Running the firmware is a simply process to taking the + * PMU out of reset and stall, then the firwmare will be run + * 1. reset the PMU; + * 2. begin the excution; + * 3. wait for the training done; + * 4. read the message block result. + * ------------------------------------------------------------- + */ + dwc_ddrphy_apb_wr(0xd0000, 0x1); + dwc_ddrphy_apb_wr(0xd0099, 0x9); + dwc_ddrphy_apb_wr(0xd0099, 0x1); + dwc_ddrphy_apb_wr(0xd0099, 0x0); + + /* Wait for the training firmware to complete */ + ret = wait_ddrphy_training_complete(); + if (ret) + return ret; + + /* Halt the microcontroller. */ + dwc_ddrphy_apb_wr(0xd0099, 0x1); + + /* Read the Message Block results */ + dwc_ddrphy_apb_wr(0xd0000, 0x0); + + ddrphy_init_read_msg_block(fsp_msg->fw_type); + + if(fsp_msg->fw_type != FW_2D_IMAGE) + get_trained_CDD(i); + + dwc_ddrphy_apb_wr(0xd0000, 0x1); + + + fsp_msg++; + } + + /* Load PHY Init Engine Image */ + dram_cfg = dram_timing->ddrphy_pie; + num = dram_timing->ddrphy_pie_num; + for (i = 0; i < num; i++) { + dwc_ddrphy_apb_wr(dram_cfg->reg, dram_cfg->val); + dram_cfg++; + } + + /* save the ddr PHY trained CSR in memory for low power use */ + ddrphy_trained_csr_save(ddrphy_trained_csr, ddrphy_trained_csr_num); + + return 0; +} diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_utils.c b/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_utils.c new file mode 100644 index 000000000..0f8baefb1 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/ddrphy_utils.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018 NXP + */ + +#include <common.h> +#include <errno.h> +#include <log.h> +#include <asm/io.h> +#include <asm/arch/ddr.h> +#include <asm/arch/clock.h> +#include <asm/arch/ddr.h> +#include <asm/arch/lpddr4_define.h> +#include <asm/arch/sys_proto.h> + +static unsigned int g_cdd_rr_max[4]; +static unsigned int g_cdd_rw_max[4]; +static unsigned int g_cdd_wr_max[4]; +static unsigned int g_cdd_ww_max[4]; + +static inline void poll_pmu_message_ready(void) +{ + unsigned int reg; + + do { + reg = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0004); + } while (reg & 0x1); +} + +static inline void ack_pmu_message_receive(void) +{ + unsigned int reg; + + reg32_write(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0031, 0x0); + + do { + reg = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0004); + } while (!(reg & 0x1)); + + reg32_write(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0031, 0x1); +} + +static inline unsigned int get_mail(void) +{ + unsigned int reg; + + poll_pmu_message_ready(); + + reg = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0032); + + ack_pmu_message_receive(); + + return reg; +} + +static inline unsigned int get_stream_message(void) +{ + unsigned int reg, reg2; + + poll_pmu_message_ready(); + + reg = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0032); + + reg2 = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + 4 * 0xd0034); + + reg2 = (reg2 << 16) | reg; + + ack_pmu_message_receive(); + + return reg2; +} + +static inline void decode_major_message(unsigned int mail) +{ + debug("[PMU Major message = 0x%08x]\n", mail); +} + +static inline void decode_streaming_message(void) +{ + unsigned int string_index, arg __maybe_unused; + int i = 0; + + string_index = get_stream_message(); + debug("PMU String index = 0x%08x\n", string_index); + while (i < (string_index & 0xffff)) { + arg = get_stream_message(); + debug("arg[%d] = 0x%08x\n", i, arg); + i++; + } + + debug("\n"); +} + +int wait_ddrphy_training_complete(void) +{ + unsigned int mail; + + while (1) { + mail = get_mail(); + decode_major_message(mail); + if (mail == 0x08) { + decode_streaming_message(); + } else if (mail == 0x07) { + debug("Training PASS\n"); + return 0; + } else if (mail == 0xff) { + debug("Training FAILED\n"); + return -1; + } + } +} + +void ddrphy_init_set_dfi_clk(unsigned int drate) +{ + switch (drate) { + case 4000: + dram_pll_init(MHZ(1000)); + dram_disable_bypass(); + break; + case 3200: + dram_pll_init(MHZ(800)); + dram_disable_bypass(); + break; + case 3000: + dram_pll_init(MHZ(750)); + dram_disable_bypass(); + break; + case 2400: + dram_pll_init(MHZ(600)); + dram_disable_bypass(); + break; + case 1600: + dram_pll_init(MHZ(400)); + dram_disable_bypass(); + break; + case 1066: + dram_pll_init(MHZ(266)); + dram_disable_bypass(); + break; + case 667: + dram_pll_init(MHZ(167)); + dram_disable_bypass(); + break; + case 400: + dram_enable_bypass(MHZ(400)); + break; + case 100: + dram_enable_bypass(MHZ(100)); + break; + default: + return; + } +} + +void ddrphy_init_read_msg_block(enum fw_type type) +{ +} + +void lpddr4_mr_write(unsigned int mr_rank, unsigned int mr_addr, + unsigned int mr_data) +{ + unsigned int tmp; + /* + * 1. Poll MRSTAT.mr_wr_busy until it is 0. + * This checks that there is no outstanding MR transaction. + * No writes should be performed to MRCTRL0 and MRCTRL1 if + * MRSTAT.mr_wr_busy = 1. + */ + do { + tmp = reg32_read(DDRC_MRSTAT(0)); + } while (tmp & 0x1); + /* + * 2. Write the MRCTRL0.mr_type, MRCTRL0.mr_addr, MRCTRL0.mr_rank and + * (for MRWs) MRCTRL1.mr_data to define the MR transaction. + */ + reg32_write(DDRC_MRCTRL0(0), (mr_rank << 4)); + reg32_write(DDRC_MRCTRL1(0), (mr_addr << 8) | mr_data); + reg32setbit(DDRC_MRCTRL0(0), 31); +} + +unsigned int lpddr4_mr_read(unsigned int mr_rank, unsigned int mr_addr) +{ + unsigned int tmp; + + reg32_write(DRC_PERF_MON_MRR0_DAT(0), 0x1); + do { + tmp = reg32_read(DDRC_MRSTAT(0)); + } while (tmp & 0x1); + + reg32_write(DDRC_MRCTRL0(0), (mr_rank << 4) | 0x1); + reg32_write(DDRC_MRCTRL1(0), (mr_addr << 8)); + reg32setbit(DDRC_MRCTRL0(0), 31); + do { + tmp = reg32_read(DRC_PERF_MON_MRR0_DAT(0)); + } while ((tmp & 0x8) == 0); + tmp = reg32_read(DRC_PERF_MON_MRR1_DAT(0)); + tmp = tmp & 0xff; + reg32_write(DRC_PERF_MON_MRR0_DAT(0), 0x4); + + return tmp; +} + +unsigned int look_for_max(unsigned int data[], + unsigned int addr_start, unsigned int addr_end) +{ + unsigned int i, imax = 0; + + for (i = addr_start; i <= addr_end; i++) { + if (((data[i] >> 7) == 0) && (data[i] > imax)) + imax = data[i]; + } + + return imax; +} + +void get_trained_CDD(u32 fsp) +{ + unsigned int i, ddr_type, tmp; + unsigned int cdd_cha[12], cdd_chb[12]; + unsigned int cdd_cha_rr_max, cdd_cha_rw_max, cdd_cha_wr_max, cdd_cha_ww_max; + unsigned int cdd_chb_rr_max, cdd_chb_rw_max, cdd_chb_wr_max, cdd_chb_ww_max; + + ddr_type = reg32_read(DDRC_MSTR(0)) & 0x3f; + if (ddr_type == 0x20) { + for (i = 0; i < 6; i++) { + tmp = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + (0x54013 + i) * 4); + cdd_cha[i * 2] = tmp & 0xff; + cdd_cha[i * 2 + 1] = (tmp >> 8) & 0xff; + } + + for (i = 0; i < 7; i++) { + tmp = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + (0x5402c + i) * 4); + if (i == 0) { + cdd_cha[0] = (tmp >> 8) & 0xff; + } else if (i == 6) { + cdd_cha[11] = tmp & 0xff; + } else { + cdd_chb[i * 2 - 1] = tmp & 0xff; + cdd_chb[i * 2] = (tmp >> 8) & 0xff; + } + } + + cdd_cha_rr_max = look_for_max(cdd_cha, 0, 1); + cdd_cha_rw_max = look_for_max(cdd_cha, 2, 5); + cdd_cha_wr_max = look_for_max(cdd_cha, 6, 9); + cdd_cha_ww_max = look_for_max(cdd_cha, 10, 11); + cdd_chb_rr_max = look_for_max(cdd_chb, 0, 1); + cdd_chb_rw_max = look_for_max(cdd_chb, 2, 5); + cdd_chb_wr_max = look_for_max(cdd_chb, 6, 9); + cdd_chb_ww_max = look_for_max(cdd_chb, 10, 11); + g_cdd_rr_max[fsp] = cdd_cha_rr_max > cdd_chb_rr_max ? cdd_cha_rr_max : cdd_chb_rr_max; + g_cdd_rw_max[fsp] = cdd_cha_rw_max > cdd_chb_rw_max ? cdd_cha_rw_max : cdd_chb_rw_max; + g_cdd_wr_max[fsp] = cdd_cha_wr_max > cdd_chb_wr_max ? cdd_cha_wr_max : cdd_chb_wr_max; + g_cdd_ww_max[fsp] = cdd_cha_ww_max > cdd_chb_ww_max ? cdd_cha_ww_max : cdd_chb_ww_max; + } else { + unsigned int ddr4_cdd[64]; + + for (i = 0; i < 29; i++) { + tmp = reg32_read(IP2APB_DDRPHY_IPS_BASE_ADDR(0) + (0x54012 + i) * 4); + ddr4_cdd[i * 2] = tmp & 0xff; + ddr4_cdd[i * 2 + 1] = (tmp >> 8) & 0xff; + } + + g_cdd_rr_max[fsp] = look_for_max(ddr4_cdd, 1, 12); + g_cdd_ww_max[fsp] = look_for_max(ddr4_cdd, 13, 24); + g_cdd_rw_max[fsp] = look_for_max(ddr4_cdd, 25, 40); + g_cdd_wr_max[fsp] = look_for_max(ddr4_cdd, 41, 56); + } +} + +void update_umctl2_rank_space_setting(unsigned int pstat_num) +{ + unsigned int i, ddr_type; + unsigned int addr_slot, rdata, tmp, tmp_t; + unsigned int ddrc_w2r, ddrc_r2w, ddrc_wr_gap, ddrc_rd_gap; + + ddr_type = reg32_read(DDRC_MSTR(0)) & 0x3f; + for (i = 0; i < pstat_num; i++) { + addr_slot = i ? (i + 1) * 0x1000 : 0; + if (ddr_type == 0x20) { + /* update r2w:[13:8], w2r:[5:0] */ + rdata = reg32_read(DDRC_DRAMTMG2(0) + addr_slot); + ddrc_w2r = rdata & 0x3f; + if (is_imx8mp()) + tmp = ddrc_w2r + (g_cdd_wr_max[i] >> 1); + else + tmp = ddrc_w2r + (g_cdd_wr_max[i] >> 1) + 1; + ddrc_w2r = (tmp > 0x3f) ? 0x3f : tmp; + + ddrc_r2w = (rdata >> 8) & 0x3f; + if (is_imx8mp()) + tmp = ddrc_r2w + (g_cdd_rw_max[i] >> 1); + else + tmp = ddrc_r2w + (g_cdd_rw_max[i] >> 1) + 1; + ddrc_r2w = (tmp > 0x3f) ? 0x3f : tmp; + + tmp_t = (rdata & 0xffffc0c0) | (ddrc_r2w << 8) | ddrc_w2r; + reg32_write((DDRC_DRAMTMG2(0) + addr_slot), tmp_t); + } else { + /* update w2r:[5:0] */ + rdata = reg32_read(DDRC_DRAMTMG9(0) + addr_slot); + ddrc_w2r = rdata & 0x3f; + if (is_imx8mp()) + tmp = ddrc_w2r + (g_cdd_wr_max[i] >> 1); + else + tmp = ddrc_w2r + (g_cdd_wr_max[i] >> 1) + 1; + ddrc_w2r = (tmp > 0x3f) ? 0x3f : tmp; + tmp_t = (rdata & 0xffffffc0) | ddrc_w2r; + reg32_write((DDRC_DRAMTMG9(0) + addr_slot), tmp_t); + + /* update r2w:[13:8] */ + rdata = reg32_read(DDRC_DRAMTMG2(0) + addr_slot); + ddrc_r2w = (rdata >> 8) & 0x3f; + if (is_imx8mp()) + tmp = ddrc_r2w + (g_cdd_rw_max[i] >> 1); + else + tmp = ddrc_r2w + (g_cdd_rw_max[i] >> 1) + 1; + ddrc_r2w = (tmp > 0x3f) ? 0x3f : tmp; + + tmp_t = (rdata & 0xffffc0ff) | (ddrc_r2w << 8); + reg32_write((DDRC_DRAMTMG2(0) + addr_slot), tmp_t); + } + + if (!is_imx8mq()) { + /* update rankctl: wr_gap:11:8; rd:gap:7:4; quasi-dymic, doc wrong(static) */ + rdata = reg32_read(DDRC_RANKCTL(0) + addr_slot); + ddrc_wr_gap = (rdata >> 8) & 0xf; + if (is_imx8mp()) + tmp = ddrc_wr_gap + (g_cdd_ww_max[i] >> 1); + else + tmp = ddrc_wr_gap + (g_cdd_ww_max[i] >> 1) + 1; + ddrc_wr_gap = (tmp > 0xf) ? 0xf : tmp; + + ddrc_rd_gap = (rdata >> 4) & 0xf; + if (is_imx8mp()) + tmp = ddrc_rd_gap + (g_cdd_rr_max[i] >> 1); + else + tmp = ddrc_rd_gap + (g_cdd_rr_max[i] >> 1) + 1; + ddrc_rd_gap = (tmp > 0xf) ? 0xf : tmp; + + tmp_t = (rdata & 0xfffff00f) | (ddrc_wr_gap << 8) | (ddrc_rd_gap << 4); + reg32_write((DDRC_RANKCTL(0) + addr_slot), tmp_t); + } + } + + if (is_imx8mq()) { + /* update rankctl: wr_gap:11:8; rd:gap:7:4; quasi-dymic, doc wrong(static) */ + rdata = reg32_read(DDRC_RANKCTL(0)); + ddrc_wr_gap = (rdata >> 8) & 0xf; + tmp = ddrc_wr_gap + (g_cdd_ww_max[0] >> 1) + 1; + ddrc_wr_gap = (tmp > 0xf) ? 0xf : tmp; + + ddrc_rd_gap = (rdata >> 4) & 0xf; + tmp = ddrc_rd_gap + (g_cdd_rr_max[0] >> 1) + 1; + ddrc_rd_gap = (tmp > 0xf) ? 0xf : tmp; + + tmp_t = (rdata & 0xfffff00f) | (ddrc_wr_gap << 8) | (ddrc_rd_gap << 4); + reg32_write(DDRC_RANKCTL(0), tmp_t); + } +} diff --git a/roms/u-boot/drivers/ddr/imx/imx8m/helper.c b/roms/u-boot/drivers/ddr/imx/imx8m/helper.c new file mode 100644 index 000000000..f23904bf7 --- /dev/null +++ b/roms/u-boot/drivers/ddr/imx/imx8m/helper.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018 NXP + */ + +#include <common.h> +#include <log.h> +#include <spl.h> +#include <asm/global_data.h> +#include <asm/io.h> +#include <errno.h> +#include <asm/io.h> +#include <asm/arch/ddr.h> +#include <asm/arch/ddr.h> +#include <asm/arch/lpddr4_define.h> +#include <asm/sections.h> + +DECLARE_GLOBAL_DATA_PTR; + +#define IMEM_LEN 32768 /* byte */ +#define DMEM_LEN 16384 /* byte */ +#define IMEM_2D_OFFSET 49152 + +#define IMEM_OFFSET_ADDR 0x00050000 +#define DMEM_OFFSET_ADDR 0x00054000 +#define DDR_TRAIN_CODE_BASE_ADDR IP2APB_DDRPHY_IPS_BASE_ADDR(0) + +/* We need PHY iMEM PHY is 32KB padded */ +void ddr_load_train_firmware(enum fw_type type) +{ + u32 tmp32, i; + u32 error = 0; + unsigned long pr_to32, pr_from32; + unsigned long fw_offset = type ? IMEM_2D_OFFSET : 0; + unsigned long imem_start = (unsigned long)&_end + fw_offset; + unsigned long dmem_start; + +#ifdef CONFIG_SPL_OF_CONTROL + if (gd->fdt_blob && !fdt_check_header(gd->fdt_blob)) { + imem_start = roundup((unsigned long)&_end + + fdt_totalsize(gd->fdt_blob), 4) + + fw_offset; + } +#endif + + dmem_start = imem_start + IMEM_LEN; + + pr_from32 = imem_start; + pr_to32 = DDR_TRAIN_CODE_BASE_ADDR + 4 * IMEM_OFFSET_ADDR; + for (i = 0x0; i < IMEM_LEN; ) { + tmp32 = readl(pr_from32); + writew(tmp32 & 0x0000ffff, pr_to32); + pr_to32 += 4; + writew((tmp32 >> 16) & 0x0000ffff, pr_to32); + pr_to32 += 4; + pr_from32 += 4; + i += 4; + } + + pr_from32 = dmem_start; + pr_to32 = DDR_TRAIN_CODE_BASE_ADDR + 4 * DMEM_OFFSET_ADDR; + for (i = 0x0; i < DMEM_LEN; ) { + tmp32 = readl(pr_from32); + writew(tmp32 & 0x0000ffff, pr_to32); + pr_to32 += 4; + writew((tmp32 >> 16) & 0x0000ffff, pr_to32); + pr_to32 += 4; + pr_from32 += 4; + i += 4; + } + + debug("check ddr_pmu_train_imem code\n"); + pr_from32 = imem_start; + pr_to32 = DDR_TRAIN_CODE_BASE_ADDR + 4 * IMEM_OFFSET_ADDR; + for (i = 0x0; i < IMEM_LEN; ) { + tmp32 = (readw(pr_to32) & 0x0000ffff); + pr_to32 += 4; + tmp32 += ((readw(pr_to32) & 0x0000ffff) << 16); + + if (tmp32 != readl(pr_from32)) { + debug("%lx %lx\n", pr_from32, pr_to32); + error++; + } + pr_from32 += 4; + pr_to32 += 4; + i += 4; + } + if (error) + printf("check ddr_pmu_train_imem code fail=%d\n", error); + else + debug("check ddr_pmu_train_imem code pass\n"); + + debug("check ddr4_pmu_train_dmem code\n"); + pr_from32 = dmem_start; + pr_to32 = DDR_TRAIN_CODE_BASE_ADDR + 4 * DMEM_OFFSET_ADDR; + for (i = 0x0; i < DMEM_LEN;) { + tmp32 = (readw(pr_to32) & 0x0000ffff); + pr_to32 += 4; + tmp32 += ((readw(pr_to32) & 0x0000ffff) << 16); + if (tmp32 != readl(pr_from32)) { + debug("%lx %lx\n", pr_from32, pr_to32); + error++; + } + pr_from32 += 4; + pr_to32 += 4; + i += 4; + } + + if (error) + printf("check ddr_pmu_train_dmem code fail=%d", error); + else + debug("check ddr_pmu_train_dmem code pass\n"); +} + +void ddrphy_trained_csr_save(struct dram_cfg_param *ddrphy_csr, + unsigned int num) +{ + int i = 0; + + /* enable the ddrphy apb */ + dwc_ddrphy_apb_wr(0xd0000, 0x0); + dwc_ddrphy_apb_wr(0xc0080, 0x3); + for (i = 0; i < num; i++) { + ddrphy_csr->val = dwc_ddrphy_apb_rd(ddrphy_csr->reg); + ddrphy_csr++; + } + /* disable the ddrphy apb */ + dwc_ddrphy_apb_wr(0xc0080, 0x2); + dwc_ddrphy_apb_wr(0xd0000, 0x1); +} + +void dram_config_save(struct dram_timing_info *timing_info, + unsigned long saved_timing_base) +{ + int i = 0; + struct dram_timing_info *saved_timing = (struct dram_timing_info *)saved_timing_base; + struct dram_cfg_param *cfg; + + saved_timing->ddrc_cfg_num = timing_info->ddrc_cfg_num; + saved_timing->ddrphy_cfg_num = timing_info->ddrphy_cfg_num; + saved_timing->ddrphy_trained_csr_num = ddrphy_trained_csr_num; + saved_timing->ddrphy_pie_num = timing_info->ddrphy_pie_num; + + /* save the fsp table */ + for (i = 0; i < 4; i++) + saved_timing->fsp_table[i] = timing_info->fsp_table[i]; + + cfg = (struct dram_cfg_param *)(saved_timing_base + + sizeof(*timing_info)); + + /* save ddrc config */ + saved_timing->ddrc_cfg = cfg; + for (i = 0; i < timing_info->ddrc_cfg_num; i++) { + cfg->reg = timing_info->ddrc_cfg[i].reg; + cfg->val = timing_info->ddrc_cfg[i].val; + cfg++; + } + + /* save ddrphy config */ + saved_timing->ddrphy_cfg = cfg; + for (i = 0; i < timing_info->ddrphy_cfg_num; i++) { + cfg->reg = timing_info->ddrphy_cfg[i].reg; + cfg->val = timing_info->ddrphy_cfg[i].val; + cfg++; + } + + /* save the ddrphy csr */ + saved_timing->ddrphy_trained_csr = cfg; + for (i = 0; i < ddrphy_trained_csr_num; i++) { + cfg->reg = ddrphy_trained_csr[i].reg; + cfg->val = ddrphy_trained_csr[i].val; + cfg++; + } + + /* save the ddrphy pie */ + saved_timing->ddrphy_pie = cfg; + for (i = 0; i < timing_info->ddrphy_pie_num; i++) { + cfg->reg = timing_info->ddrphy_pie[i].reg; + cfg->val = timing_info->ddrphy_pie[i].val; + cfg++; + } +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/Makefile b/roms/u-boot/drivers/ddr/marvell/a38x/Makefile new file mode 100644 index 000000000..8251d6db6 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0+ + +obj-$(CONFIG_SPL_BUILD) += mv_ddr_plat.o +obj-$(CONFIG_SPL_BUILD) += mv_ddr_sys_env_lib.o +obj-$(CONFIG_SPL_BUILD) += ddr3_debug.o +obj-$(CONFIG_SPL_BUILD) += ddr3_init.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_bist.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_centralization.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_db.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_hw_algo.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_ip_engine.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_leveling.o +obj-$(CONFIG_SPL_BUILD) += ddr3_training_pbs.o +obj-$(CONFIG_SPL_BUILD) += mv_ddr_build_message.o +obj-$(CONFIG_SPL_BUILD) += mv_ddr_common.o +obj-$(CONFIG_SPL_BUILD) += mv_ddr_spd.o +obj-$(CONFIG_SPL_BUILD) += mv_ddr_topology.o +obj-$(CONFIG_SPL_BUILD) += xor.o diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_debug.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_debug.c new file mode 100644 index 000000000..f5fc964d6 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_debug.c @@ -0,0 +1,1386 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_training_db.h" +#include "mv_ddr_regs.h" + +u8 is_reg_dump = 0; +u8 debug_pbs = DEBUG_LEVEL_ERROR; + +/* + * API to change flags outside of the lib + */ +#if defined(SILENT_LIB) +void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level) +{ + /* do nothing */ +} +#else /* SILENT_LIB */ +/* Debug flags for other Training modules */ +u8 debug_training_static = DEBUG_LEVEL_ERROR; +u8 debug_training = DEBUG_LEVEL_ERROR; +u8 debug_leveling = DEBUG_LEVEL_ERROR; +u8 debug_centralization = DEBUG_LEVEL_ERROR; +u8 debug_training_ip = DEBUG_LEVEL_ERROR; +u8 debug_training_bist = DEBUG_LEVEL_ERROR; +u8 debug_training_hw_alg = DEBUG_LEVEL_ERROR; +u8 debug_training_access = DEBUG_LEVEL_ERROR; +u8 debug_training_device = DEBUG_LEVEL_ERROR; + + +void mv_ddr_user_log_level_set(enum ddr_lib_debug_block block) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + ddr3_hws_set_log_level(block, tm->debug_level); +}; + +void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level) +{ + switch (block) { + case DEBUG_BLOCK_STATIC: + debug_training_static = level; + break; + case DEBUG_BLOCK_TRAINING_MAIN: + debug_training = level; + break; + case DEBUG_BLOCK_LEVELING: + debug_leveling = level; + break; + case DEBUG_BLOCK_CENTRALIZATION: + debug_centralization = level; + break; + case DEBUG_BLOCK_PBS: + debug_pbs = level; + break; + case DEBUG_BLOCK_ALG: + debug_training_hw_alg = level; + break; + case DEBUG_BLOCK_DEVICE: + debug_training_device = level; + break; + case DEBUG_BLOCK_ACCESS: + debug_training_access = level; + break; + case DEBUG_STAGES_REG_DUMP: + if (level == DEBUG_LEVEL_TRACE) + is_reg_dump = 1; + else + is_reg_dump = 0; + break; + case DEBUG_BLOCK_ALL: + default: + debug_training_static = level; + debug_training = level; + debug_leveling = level; + debug_centralization = level; + debug_pbs = level; + debug_training_hw_alg = level; + debug_training_access = level; + debug_training_device = level; + } +} +#endif /* SILENT_LIB */ + +#if defined(DDR_VIEWER_TOOL) +static char *convert_freq(enum mv_ddr_freq freq); +#if defined(EXCLUDE_SWITCH_DEBUG) +u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM]; +u32 ctrl_adll1[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM]; +u32 ctrl_level_phase[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM]; +#endif /* EXCLUDE_SWITCH_DEBUG */ +#endif /* DDR_VIEWER_TOOL */ + +struct hws_tip_config_func_db config_func_info[MAX_DEVICE_NUM]; +u8 is_default_centralization = 0; +u8 is_tune_result = 0; +u8 is_validate_window_per_if = 0; +u8 is_validate_window_per_pup = 0; +u8 sweep_cnt = 1; +u32 is_bist_reset_bit = 1; +u8 is_run_leveling_sweep_tests; + +static struct hws_xsb_info xsb_info[MAX_DEVICE_NUM]; + +/* + * Dump Dunit & Phy registers + */ +int ddr3_tip_reg_dump(u32 dev_num) +{ + u32 if_id, reg_addr, data_value, bus_id; + u32 read_data[MAX_INTERFACE_NUM]; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + printf("-- dunit registers --\n"); + for (reg_addr = 0x1400; reg_addr < 0x19f0; reg_addr += 4) { + printf("0x%x ", reg_addr); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, + if_id, reg_addr, read_data, + MASK_ALL_BITS)); + printf("0x%x ", read_data[if_id]); + } + printf("\n"); + } + + printf("-- Phy registers --\n"); + for (reg_addr = 0; reg_addr <= 0xff; reg_addr++) { + printf("0x%x ", reg_addr); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; + bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, reg_addr, + &data_value)); + printf("0x%x ", data_value); + } + for (bus_id = 0; + bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_CONTROL, reg_addr, + &data_value)); + printf("0x%x ", data_value); + } + } + printf("\n"); + } + + return MV_OK; +} + +/* + * Register access func registration + */ +int ddr3_tip_init_config_func(u32 dev_num, + struct hws_tip_config_func_db *config_func) +{ + if (config_func == NULL) + return MV_BAD_PARAM; + + memcpy(&config_func_info[dev_num], config_func, + sizeof(struct hws_tip_config_func_db)); + + return MV_OK; +} + +/* + * Get training result info pointer + */ +enum hws_result *ddr3_tip_get_result_ptr(u32 stage) +{ + return training_result[stage]; +} + +/* + * Device info read + */ +int ddr3_tip_get_device_info(u32 dev_num, struct ddr3_device_info *info_ptr) +{ + if (config_func_info[dev_num].tip_get_device_info_func != NULL) { + return config_func_info[dev_num]. + tip_get_device_info_func((u8) dev_num, info_ptr); + } + + return MV_FAIL; +} + +#if defined(DDR_VIEWER_TOOL) +/* + * Convert freq to character string + */ +static char *convert_freq(enum mv_ddr_freq freq) +{ + switch (freq) { + case MV_DDR_FREQ_LOW_FREQ: + return "MV_DDR_FREQ_LOW_FREQ"; + + case MV_DDR_FREQ_400: + return "400"; + + case MV_DDR_FREQ_533: + return "533"; + + case MV_DDR_FREQ_667: + return "667"; + + case MV_DDR_FREQ_800: + return "800"; + + case MV_DDR_FREQ_933: + return "933"; + + case MV_DDR_FREQ_1066: + return "1066"; + + case MV_DDR_FREQ_311: + return "311"; + + case MV_DDR_FREQ_333: + return "333"; + + case MV_DDR_FREQ_467: + return "467"; + + case MV_DDR_FREQ_850: + return "850"; + + case MV_DDR_FREQ_900: + return "900"; + + case MV_DDR_FREQ_360: + return "MV_DDR_FREQ_360"; + + case MV_DDR_FREQ_1000: + return "MV_DDR_FREQ_1000"; + + default: + return "Unknown Frequency"; + } +} + +/* + * Convert device ID to character string + */ +static char *convert_dev_id(u32 dev_id) +{ + switch (dev_id) { + case 0x6800: + return "A38xx"; + case 0x6900: + return "A39XX"; + case 0xf400: + return "AC3"; + case 0xfc00: + return "BC2"; + + default: + return "Unknown Device"; + } +} + +/* + * Convert device ID to character string + */ +static char *convert_mem_size(u32 dev_id) +{ + switch (dev_id) { + case 0: + return "512 MB"; + case 1: + return "1 GB"; + case 2: + return "2 GB"; + case 3: + return "4 GB"; + case 4: + return "8 GB"; + + default: + return "wrong mem size"; + } +} + +int print_device_info(u8 dev_num) +{ + struct ddr3_device_info info_ptr; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + CHECK_STATUS(ddr3_tip_get_device_info(dev_num, &info_ptr)); + printf("=== DDR setup START===\n"); + printf("\tDevice ID: %s\n", convert_dev_id(info_ptr.device_id)); + printf("\tDDR3 CK delay: %d\n", info_ptr.ck_delay); + print_topology(tm); + printf("=== DDR setup END===\n"); + + return MV_OK; +} + +void hws_ddr3_tip_sweep_test(int enable) +{ + if (enable) { + is_validate_window_per_if = 1; + is_validate_window_per_pup = 1; + debug_training = DEBUG_LEVEL_TRACE; + } else { + is_validate_window_per_if = 0; + is_validate_window_per_pup = 0; + } +} +#endif /* DDR_VIEWER_TOOL */ + +char *ddr3_tip_convert_tune_result(enum hws_result tune_result) +{ + switch (tune_result) { + case TEST_FAILED: + return "FAILED"; + case TEST_SUCCESS: + return "PASS"; + case NO_TEST_DONE: + return "NOT COMPLETED"; + default: + return "Un-KNOWN"; + } +} + +/* + * Print log info + */ +int ddr3_tip_print_log(u32 dev_num, u32 mem_addr) +{ + u32 if_id = 0; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + +#if defined(DDR_VIEWER_TOOL) + if ((is_validate_window_per_if != 0) || + (is_validate_window_per_pup != 0)) { + u32 is_pup_log = 0; + enum mv_ddr_freq freq; + + freq = tm->interface_params[first_active_if].memory_freq; + + is_pup_log = (is_validate_window_per_pup != 0) ? 1 : 0; + printf("===VALIDATE WINDOW LOG START===\n"); + printf("DDR Frequency: %s ======\n", convert_freq(freq)); + /* print sweep windows */ + ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 1, is_pup_log); + ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 0, is_pup_log); +#if defined(EXCLUDE_SWITCH_DEBUG) + if (is_run_leveling_sweep_tests == 1) { + ddr3_tip_run_leveling_sweep_test(dev_num, sweep_cnt, 0, is_pup_log); + ddr3_tip_run_leveling_sweep_test(dev_num, sweep_cnt, 1, is_pup_log); + } +#endif /* EXCLUDE_SWITCH_DEBUG */ + ddr3_tip_print_all_pbs_result(dev_num); + ddr3_tip_print_wl_supp_result(dev_num); + printf("===VALIDATE WINDOW LOG END ===\n"); + CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num)); + ddr3_tip_reg_dump(dev_num); + } +#endif /* DDR_VIEWER_TOOL */ + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("IF %d Status:\n", if_id)); + + if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tInit Controller: %s\n", + ddr3_tip_convert_tune_result + (training_result[INIT_CONTROLLER] + [if_id]))); + } + if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tLow freq Config: %s\n", + ddr3_tip_convert_tune_result + (training_result[SET_LOW_FREQ] + [if_id]))); + } + if (mask_tune_func & LOAD_PATTERN_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tLoad Pattern: %s\n", + ddr3_tip_convert_tune_result + (training_result[LOAD_PATTERN] + [if_id]))); + } + if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tMedium freq Config: %s\n", + ddr3_tip_convert_tune_result + (training_result[SET_MEDIUM_FREQ] + [if_id]))); + } + if (mask_tune_func & WRITE_LEVELING_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tWL: %s\n", + ddr3_tip_convert_tune_result + (training_result[WRITE_LEVELING] + [if_id]))); + } + if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tLoad Pattern: %s\n", + ddr3_tip_convert_tune_result + (training_result[LOAD_PATTERN_2] + [if_id]))); + } + if (mask_tune_func & READ_LEVELING_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tRL: %s\n", + ddr3_tip_convert_tune_result + (training_result[READ_LEVELING] + [if_id]))); + } + if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tWL Supp: %s\n", + ddr3_tip_convert_tune_result + (training_result[WRITE_LEVELING_SUPP] + [if_id]))); + } + if (mask_tune_func & PBS_RX_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tPBS RX: %s\n", + ddr3_tip_convert_tune_result + (training_result[PBS_RX] + [if_id]))); + } + if (mask_tune_func & PBS_TX_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tPBS TX: %s\n", + ddr3_tip_convert_tune_result + (training_result[PBS_TX] + [if_id]))); + } + if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tTarget freq Config: %s\n", + ddr3_tip_convert_tune_result + (training_result[SET_TARGET_FREQ] + [if_id]))); + } + if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tWL TF: %s\n", + ddr3_tip_convert_tune_result + (training_result[WRITE_LEVELING_TF] + [if_id]))); + } + if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tRL TF: %s\n", + ddr3_tip_convert_tune_result + (training_result[READ_LEVELING_TF] + [if_id]))); + } + if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tWL TF Supp: %s\n", + ddr3_tip_convert_tune_result + (training_result + [WRITE_LEVELING_SUPP_TF] + [if_id]))); + } + if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tCentr RX: %s\n", + ddr3_tip_convert_tune_result + (training_result[CENTRALIZATION_RX] + [if_id]))); + } + if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tVREF_CALIBRATION: %s\n", + ddr3_tip_convert_tune_result + (training_result[VREF_CALIBRATION] + [if_id]))); + } + if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("\tCentr TX: %s\n", + ddr3_tip_convert_tune_result + (training_result[CENTRALIZATION_TX] + [if_id]))); + } + } + + return MV_OK; +} + +#if !defined(EXCLUDE_DEBUG_PRINTS) +/* + * Print stability log info + */ +int ddr3_tip_print_stability_log(u32 dev_num) +{ + u8 if_id = 0, csindex = 0, bus_id = 0, idx = 0; + u32 reg_data; + u32 read_data[MAX_INTERFACE_NUM]; + unsigned int max_cs = mv_ddr_cs_num_get(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* Title print */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + printf("Title: I/F# , Tj, Calibration_n0, Calibration_p0, Calibration_n1, Calibration_p1, Calibration_n2, Calibration_p2,"); + for (csindex = 0; csindex < max_cs; csindex++) { + printf("CS%d , ", csindex); + printf("\n"); + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + printf("VWTx, VWRx, WL_tot, WL_ADLL, WL_PH, RL_Tot, RL_ADLL, RL_PH, RL_Smp, Cen_tx, Cen_rx, Vref, DQVref,"); + printf("\t\t"); + for (idx = 0; idx < 11; idx++) + printf("PBSTx-Pad%d,", idx); + printf("\t\t"); + for (idx = 0; idx < 11; idx++) + printf("PBSRx-Pad%d,", idx); + } + } + printf("\n"); + + /* Data print */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + printf("Data: %d,%d,", if_id, + (config_func_info[dev_num].tip_get_temperature != NULL) + ? (config_func_info[dev_num]. + tip_get_temperature(dev_num)) : (0)); + + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x14c8, + read_data, MASK_ALL_BITS)); + printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4), + ((read_data[if_id] & 0xfc00) >> 10)); + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x17c8, + read_data, MASK_ALL_BITS)); + printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4), + ((read_data[if_id] & 0xfc00) >> 10)); + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1dc8, + read_data, MASK_ALL_BITS)); + printf("%d,%d,", ((read_data[if_id] & 0x3f0000) >> 16), + ((read_data[if_id] & 0xfc00000) >> 22)); + + for (csindex = 0; csindex < max_cs; csindex++) { + printf("CS%d , ", csindex); + for (bus_id = 0; bus_id < MAX_BUS_NUM; bus_id++) { + printf("\n"); + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + RESULT_PHY_REG + + csindex, ®_data); + printf("%d,%d,", (reg_data & 0x1f), + ((reg_data & 0x3e0) >> 5)); + /* WL */ + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + WL_PHY_REG(csindex), + ®_data); + printf("%d,%d,%d,", + (reg_data & 0x1f) + + ((reg_data & 0x1c0) >> 6) * 32, + (reg_data & 0x1f), + (reg_data & 0x1c0) >> 6); + /* RL */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, + if_id, + RD_DATA_SMPL_DLYS_REG, + read_data, MASK_ALL_BITS)); + read_data[if_id] = + (read_data[if_id] & + (0x1f << (8 * csindex))) >> + (8 * csindex); + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, + RL_PHY_REG(csindex), + ®_data); + printf("%d,%d,%d,%d,", + (reg_data & 0x1f) + + ((reg_data & 0x1c0) >> 6) * 32 + + read_data[if_id] * 64, + (reg_data & 0x1f), + ((reg_data & 0x1c0) >> 6), + read_data[if_id]); + /* Centralization */ + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, + CTX_PHY_REG(csindex), + ®_data); + printf("%d,", (reg_data & 0x3f)); + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, + CRX_PHY_REG(csindex), + ®_data); + printf("%d,", (reg_data & 0x1f)); + /* Vref */ + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, + PAD_CFG_PHY_REG, + ®_data); + printf("%d,", (reg_data & 0x7)); + /* DQVref */ + /* Need to add the Read Function from device */ + printf("%d,", 0); + printf("\t\t"); + for (idx = 0; idx < 11; idx++) { + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + 0x10 + + 16 * csindex + + idx, ®_data); + printf("%d,", (reg_data & 0x3f)); + } + printf("\t\t"); + for (idx = 0; idx < 11; idx++) { + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + 0x50 + + 16 * csindex + + idx, ®_data); + printf("%d,", (reg_data & 0x3f)); + } + } + } + } + printf("\n"); + + return MV_OK; +} +#endif /* EXCLUDE_DEBUG_PRINTS */ + +/* + * Register XSB information + */ +int ddr3_tip_register_xsb_info(u32 dev_num, struct hws_xsb_info *xsb_info_table) +{ + memcpy(&xsb_info[dev_num], xsb_info_table, sizeof(struct hws_xsb_info)); + return MV_OK; +} + +/* + * Read ADLL Value + */ +int ddr3_tip_read_adll_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], + u32 reg_addr, u32 mask) +{ + u32 data_value; + u32 if_id = 0, bus_id = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* + * multi CS support - reg_addr is calucalated in calling function + * with CS offset + */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, + bus_id, + DDR_PHY_DATA, reg_addr, + &data_value)); + pup_values[if_id * + octets_per_if_num + bus_id] = + data_value & mask; + } + } + + return 0; +} + +/* + * Write ADLL Value + */ +int ddr3_tip_write_adll_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], + u32 reg_addr) +{ + u32 if_id = 0, bus_id = 0; + u32 data; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* + * multi CS support - reg_addr is calucalated in calling function + * with CS offset + */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + data = pup_values[if_id * + octets_per_if_num + + bus_id]; + CHECK_STATUS(ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + reg_addr, data)); + } + } + + return 0; +} + +/** + * Read Phase Value + */ +int read_phase_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], + int reg_addr, u32 mask) +{ + u32 data_value; + u32 if_id = 0, bus_id = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* multi CS support - reg_addr is calucalated in calling function with CS offset */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, + bus_id, + DDR_PHY_DATA, reg_addr, + &data_value)); + pup_values[if_id * octets_per_if_num + bus_id] = data_value & mask; + } + } + + return 0; +} + +/** + * Write Leveling Value + */ +int write_leveling_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], + u32 pup_ph_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], int reg_addr) +{ + u32 if_id = 0, bus_id = 0; + u32 data; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* multi CS support - reg_addr is calucalated in calling function with CS offset */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0 ; bus_id < octets_per_if_num ; bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + data = pup_values[if_id * octets_per_if_num + bus_id] + + pup_ph_values[if_id * octets_per_if_num + bus_id]; + CHECK_STATUS(ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_id, + DDR_PHY_DATA, + reg_addr, + data)); + } + } + + return 0; +} + +#if !defined(EXCLUDE_SWITCH_DEBUG) +struct hws_tip_config_func_db config_func_info[MAX_DEVICE_NUM]; +u32 start_xsb_offset = 0; +u8 is_rl_old = 0; +u8 is_freq_old = 0; +u8 is_dfs_disabled = 0; +u32 default_centrlization_value = 0x12; +u32 activate_select_before_run_alg = 1, activate_deselect_after_run_alg = 1, + rl_test = 0, reset_read_fifo = 0; +int debug_acc = 0; +u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM]; + +u32 xsb_test_table[][8] = { + {0x00000000, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555, + 0x66666666, 0x77777777}, + {0x88888888, 0x99999999, 0xaaaaaaaa, 0xbbbbbbbb, 0xcccccccc, 0xdddddddd, + 0xeeeeeeee, 0xffffffff}, + {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff}, + {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff}, + {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff}, + {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff}, + {0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff}, + {0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000}, + {0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff} +}; + +int ddr3_tip_print_adll(void) +{ + u32 bus_cnt = 0, if_id, data_p1, data_p2, ui_data3, dev_num = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_cnt = 0; bus_cnt < octets_per_if_num; + bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_cnt, + DDR_PHY_DATA, 0x1, &data_p1)); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, + bus_cnt, DDR_PHY_DATA, 0x2, &data_p2)); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, + bus_cnt, DDR_PHY_DATA, 0x3, &ui_data3)); + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + (" IF %d bus_cnt %d phy_reg_1_data 0x%x phy_reg_2_data 0x%x phy_reg_3_data 0x%x\n", + if_id, bus_cnt, data_p1, data_p2, + ui_data3)); + } + } + + return MV_OK; +} + +#endif /* EXCLUDE_SWITCH_DEBUG */ + +#if defined(DDR_VIEWER_TOOL) +/* + * Print ADLL + */ +int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]) +{ + u32 i, j; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (j = 0; j < octets_per_if_num; j++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, j); + for (i = 0; i < MAX_INTERFACE_NUM; i++) + printf("%d ,", adll[i * octets_per_if_num + j]); + } + printf("\n"); + + return MV_OK; +} + +int print_ph(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]) +{ + u32 i, j; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (j = 0; j < octets_per_if_num; j++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, j); + for (i = 0; i < MAX_INTERFACE_NUM; i++) + printf("%d ,", adll[i * octets_per_if_num + j] >> 6); + } + printf("\n"); + + return MV_OK; +} +#endif /* DDR_VIEWER_TOOL */ + +#if !defined(EXCLUDE_SWITCH_DEBUG) +/* byte_index - only byte 0, 1, 2, or 3, oxff - test all bytes */ +static u32 ddr3_tip_compare(u32 if_id, u32 *p_src, u32 *p_dst, + u32 byte_index) +{ + u32 burst_cnt = 0, addr_offset, i_id; + int b_is_fail = 0; + + addr_offset = + (byte_index == + 0xff) ? (u32) 0xffffffff : (u32) (0xff << (byte_index * 8)); + for (burst_cnt = 0; burst_cnt < EXT_ACCESS_BURST_LENGTH; burst_cnt++) { + if ((p_src[burst_cnt] & addr_offset) != + (p_dst[if_id] & addr_offset)) + b_is_fail = 1; + } + + if (b_is_fail == 1) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("IF %d exp: ", if_id)); + for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("0x%8x ", p_src[i_id])); + } + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("\n_i_f %d rcv: ", if_id)); + for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("(0x%8x ", p_dst[i_id])); + } + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("\n ")); + } + + return b_is_fail; +} +#endif /* EXCLUDE_SWITCH_DEBUG */ + +#if defined(DDR_VIEWER_TOOL) +/* + * Sweep validation + */ +int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction, + u32 mode) +{ + u32 pup = 0, start_pup = 0, end_pup = 0; + u32 adll = 0, rep = 0, pattern_idx = 0; + u32 res[MAX_INTERFACE_NUM] = { 0 }; + int if_id = 0; + u32 adll_value = 0; + u32 reg; + enum hws_access_type pup_access; + u32 cs; + unsigned int max_cs = mv_ddr_cs_num_get(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + repeat_num = 2; + + if (mode == 1) { + /* per pup */ + start_pup = 0; + end_pup = octets_per_if_num - 1; + pup_access = ACCESS_TYPE_UNICAST; + } else { + start_pup = 0; + end_pup = 0; + pup_access = ACCESS_TYPE_MULTICAST; + } + + for (cs = 0; cs < max_cs; cs++) { + reg = (direction == 0) ? CTX_PHY_REG(cs) : CRX_PHY_REG(cs); + for (adll = 0; adll < ADLL_LENGTH; adll++) { + for (if_id = 0; + if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE + (tm->if_act_mask, + if_id); + for (pup = start_pup; pup <= end_pup; pup++) { + ctrl_sweepres[adll][if_id][pup] = + 0; + } + } + } + + for (adll = 0; adll < (MAX_INTERFACE_NUM * MAX_BUS_NUM); adll++) + ctrl_adll[adll] = 0; + /* Save DQS value(after algorithm run) */ + ddr3_tip_read_adll_value(dev_num, ctrl_adll, + reg, MASK_ALL_BITS); + + /* + * Sweep ADLL from 0:31 on all I/F on all Pup and perform + * BIST on each stage. + */ + for (pup = start_pup; pup <= end_pup; pup++) { + for (adll = 0; adll < ADLL_LENGTH; adll++) { + for (rep = 0; rep < repeat_num; rep++) { + for (pattern_idx = PATTERN_KILLER_DQ0; + pattern_idx < PATTERN_LAST; + pattern_idx++) { + adll_value = + (direction == 0) ? (adll * 2) : adll; + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, 0, + pup_access, pup, DDR_PHY_DATA, + reg, adll_value)); + hws_ddr3_run_bist(dev_num, sweep_pattern, res, + cs); + /* ddr3_tip_reset_fifo_ptr(dev_num); */ + for (if_id = 0; + if_id < MAX_INTERFACE_NUM; + if_id++) { + VALIDATE_IF_ACTIVE + (tm->if_act_mask, + if_id); + ctrl_sweepres[adll][if_id][pup] + += res[if_id]; + if (mode == 1) { + CHECK_STATUS + (ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + pup, + DDR_PHY_DATA, + reg, + ctrl_adll[if_id * + cs * + octets_per_if_num + + pup])); + } + } + } + } + } + } + printf("Final, CS %d,%s, Sweep, Result, Adll,", cs, + ((direction == 0) ? "TX" : "RX")); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (mode == 1) { + for (pup = start_pup; pup <= end_pup; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + printf("I/F%d-PHY%d , ", if_id, pup); + } + } else { + printf("I/F%d , ", if_id); + } + } + printf("\n"); + + for (adll = 0; adll < ADLL_LENGTH; adll++) { + adll_value = (direction == 0) ? (adll * 2) : adll; + printf("Final,%s, Sweep, Result, %d ,", + ((direction == 0) ? "TX" : "RX"), adll_value); + + for (if_id = 0; + if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = start_pup; pup <= end_pup; pup++) { + printf("%8d , ", + ctrl_sweepres[adll][if_id] + [pup]); + } + } + printf("\n"); + } + + /* + * Write back to the phy the Rx DQS value, we store in + * the beginning. + */ + ddr3_tip_write_adll_value(dev_num, ctrl_adll, reg); + /* print adll results */ + ddr3_tip_read_adll_value(dev_num, ctrl_adll, reg, MASK_ALL_BITS); + printf("%s, DQS, ADLL,,,", (direction == 0) ? "Tx" : "Rx"); + print_adll(dev_num, ctrl_adll); + } + ddr3_tip_reset_fifo_ptr(dev_num); + + return 0; +} + +#if defined(EXCLUDE_SWITCH_DEBUG) +int ddr3_tip_run_leveling_sweep_test(int dev_num, u32 repeat_num, + u32 direction, u32 mode) +{ + u32 pup = 0, start_pup = 0, end_pup = 0, start_adll = 0; + u32 adll = 0, rep = 0, pattern_idx = 0; + u32 read_data[MAX_INTERFACE_NUM]; + u32 res[MAX_INTERFACE_NUM] = { 0 }; + int if_id = 0, gap = 0; + u32 adll_value = 0; + u32 reg; + enum hws_access_type pup_access; + u32 cs; + unsigned int max_cs = mv_ddr_cs_num_get(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (mode == 1) { /* per pup */ + start_pup = 0; + end_pup = octets_per_if_num - 1; + pup_access = ACCESS_TYPE_UNICAST; + } else { + start_pup = 0; + end_pup = 0; + pup_access = ACCESS_TYPE_MULTICAST; + } + + for (cs = 0; cs < max_cs; cs++) { + reg = (direction == 0) ? WL_PHY_REG(cs) : RL_PHY_REG(cs); + for (adll = 0; adll < ADLL_LENGTH; adll++) { + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = start_pup; pup <= end_pup; pup++) + ctrl_sweepres[adll][if_id][pup] = 0; + } + } + + for (adll = 0; adll < MAX_INTERFACE_NUM * MAX_BUS_NUM; adll++) { + ctrl_adll[adll] = 0; + ctrl_level_phase[adll] = 0; + ctrl_adll1[adll] = 0; + } + + /* save leveling value after running algorithm */ + ddr3_tip_read_adll_value(dev_num, ctrl_adll, reg, 0x1f); + read_phase_value(dev_num, ctrl_level_phase, reg, 0x7 << 6); + + if (direction == 0) + ddr3_tip_read_adll_value(dev_num, ctrl_adll1, + CTX_PHY_REG(cs), MASK_ALL_BITS); + + /* Sweep ADLL from 0 to 31 on all interfaces, all pups, + * and perform BIST on each stage + */ + for (pup = start_pup; pup <= end_pup; pup++) { + for (adll = 0; adll < ADLL_LENGTH; adll++) { + for (rep = 0; rep < repeat_num; rep++) { + adll_value = (direction == 0) ? (adll * 2) : (adll * 3); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + start_adll = ctrl_adll[if_id * cs * octets_per_if_num + pup] + + (ctrl_level_phase[if_id * cs * + octets_per_if_num + + pup] >> 6) * 32; + + if (direction == 0) + start_adll = (start_adll > 32) ? (start_adll - 32) : 0; + else + start_adll = (start_adll > 48) ? (start_adll - 48) : 0; + + adll_value += start_adll; + + gap = ctrl_adll1[if_id * cs * octets_per_if_num + pup] - + ctrl_adll[if_id * cs * octets_per_if_num + pup]; + gap = (((adll_value % 32) + gap) % 64); + + adll_value = ((adll_value % 32) + + (((adll_value - (adll_value % 32)) / 32) << 6)); + + CHECK_STATUS(ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + pup_access, + pup, + DDR_PHY_DATA, + reg, + adll_value)); + if (direction == 0) + CHECK_STATUS(ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + pup_access, + pup, + DDR_PHY_DATA, + CTX_PHY_REG(cs), + gap)); + } + + for (pattern_idx = PATTERN_KILLER_DQ0; + pattern_idx < PATTERN_LAST; + pattern_idx++) { + hws_ddr3_run_bist(dev_num, sweep_pattern, res, cs); + ddr3_tip_reset_fifo_ptr(dev_num); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (pup != 4) { /* TODO: remove literal */ + ctrl_sweepres[adll][if_id][pup] += res[if_id]; + } else { + CHECK_STATUS(ddr3_tip_if_read(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + 0x1458, + read_data, + MASK_ALL_BITS)); + ctrl_sweepres[adll][if_id][pup] += read_data[if_id]; + CHECK_STATUS(ddr3_tip_if_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + 0x1458, + 0x0, + 0xFFFFFFFF)); + CHECK_STATUS(ddr3_tip_if_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + 0x145C, + 0x0, + 0xFFFFFFFF)); + } + } + } + } + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + start_adll = ctrl_adll[if_id * cs * octets_per_if_num + pup] + + ctrl_level_phase[if_id * cs * octets_per_if_num + pup]; + CHECK_STATUS(ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, pup_access, pup, + DDR_PHY_DATA, reg, start_adll)); + if (direction == 0) + CHECK_STATUS(ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + pup_access, + pup, + DDR_PHY_DATA, + CTX_PHY_REG(cs), + ctrl_adll1[if_id * + cs * + octets_per_if_num + + pup])); + } + } + + printf("Final,CS %d,%s,Leveling,Result,Adll,", cs, ((direction == 0) ? "TX" : "RX")); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (mode == 1) { + for (pup = start_pup; pup <= end_pup; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + printf("I/F%d-PHY%d , ", if_id, pup); + } + } else { + printf("I/F%d , ", if_id); + } + } + printf("\n"); + + for (adll = 0; adll < ADLL_LENGTH; adll++) { + adll_value = (direction == 0) ? ((adll * 2) - 32) : ((adll * 3) - 48); + printf("Final,%s,LevelingSweep,Result, %d ,", ((direction == 0) ? "TX" : "RX"), adll_value); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = start_pup; pup <= end_pup; pup++) + printf("%8d , ", ctrl_sweepres[adll][if_id][pup]); + } + printf("\n"); + } + + /* write back to the phy the Rx DQS value, we store in the beginning */ + write_leveling_value(dev_num, ctrl_adll, ctrl_level_phase, reg); + if (direction == 0) + ddr3_tip_write_adll_value(dev_num, ctrl_adll1, CTX_PHY_REG(cs)); + + /* print adll results */ + ddr3_tip_read_adll_value(dev_num, ctrl_adll, reg, MASK_ALL_BITS); + printf("%s,DQS,Leveling,,,", (direction == 0) ? "Tx" : "Rx"); + print_adll(dev_num, ctrl_adll); + print_ph(dev_num, ctrl_level_phase); + } + ddr3_tip_reset_fifo_ptr(dev_num); + + return 0; +} +#endif /* EXCLUDE_SWITCH_DEBUG */ + +void print_topology(struct mv_ddr_topology_map *topology_db) +{ + u32 ui, uj; + u32 dev_num = 0; + + printf("\tinterface_mask: 0x%x\n", topology_db->if_act_mask); + printf("\tNumber of buses: 0x%x\n", + ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE)); + printf("\tbus_act_mask: 0x%x\n", topology_db->bus_act_mask); + + for (ui = 0; ui < MAX_INTERFACE_NUM; ui++) { + VALIDATE_IF_ACTIVE(topology_db->if_act_mask, ui); + printf("\n\tInterface ID: %d\n", ui); + printf("\t\tDDR Frequency: %s\n", + convert_freq(topology_db-> + interface_params[ui].memory_freq)); + printf("\t\tSpeed_bin: %d\n", + topology_db->interface_params[ui].speed_bin_index); + printf("\t\tBus_width: %d\n", + (4 << topology_db->interface_params[ui].bus_width)); + printf("\t\tMem_size: %s\n", + convert_mem_size(topology_db-> + interface_params[ui].memory_size)); + printf("\t\tCAS-WL: %d\n", + topology_db->interface_params[ui].cas_wl); + printf("\t\tCAS-L: %d\n", + topology_db->interface_params[ui].cas_l); + printf("\t\tTemperature: %d\n", + topology_db->interface_params[ui].interface_temp); + printf("\n"); + for (uj = 0; uj < 4; uj++) { + printf("\t\tBus %d parameters- CS Mask: 0x%x\t", uj, + topology_db->interface_params[ui]. + as_bus_params[uj].cs_bitmask); + printf("Mirror: 0x%x\t", + topology_db->interface_params[ui]. + as_bus_params[uj].mirror_enable_bitmask); + printf("DQS Swap is %s \t", + (topology_db-> + interface_params[ui].as_bus_params[uj]. + is_dqs_swap == 1) ? "enabled" : "disabled"); + printf("Ck Swap:%s\t", + (topology_db-> + interface_params[ui].as_bus_params[uj]. + is_ck_swap == 1) ? "enabled" : "disabled"); + printf("\n"); + } + } +} +#endif /* DDR_VIEWER_TOOL */ + +#if !defined(EXCLUDE_SWITCH_DEBUG) +/* + * Execute XSB Test transaction (rd/wr/both) + */ +int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type, + u32 read_type, u32 burst_length) +{ + u32 seq = 0, if_id = 0, addr, cnt; + int ret = MV_OK, ret_tmp; + u32 data_read[MAX_INTERFACE_NUM]; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + addr = mem_addr; + for (cnt = 0; cnt <= burst_length; cnt++) { + seq = (seq + 1) % 8; + if (write_type != 0) { + CHECK_STATUS(ddr3_tip_ext_write + (dev_num, if_id, addr, 1, + xsb_test_table[seq])); + } + if (read_type != 0) { + CHECK_STATUS(ddr3_tip_ext_read + (dev_num, if_id, addr, 1, + data_read)); + } + if ((read_type != 0) && (write_type != 0)) { + ret_tmp = + ddr3_tip_compare(if_id, + xsb_test_table[seq], + data_read, + 0xff); + addr += (EXT_ACCESS_BURST_LENGTH * 4); + ret = (ret != MV_OK) ? ret : ret_tmp; + } + } + } + + return ret; +} + +#else /*EXCLUDE_SWITCH_DEBUG */ +u32 start_xsb_offset = 0; + +int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type, + u32 read_type, u32 burst_length) +{ + return MV_OK; +} + +#endif /* EXCLUDE_SWITCH_DEBUG */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_init.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_init.c new file mode 100644 index 000000000..f878b4512 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_init.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_common.h" + +static char *ddr_type = "DDR3"; + +/* + * generic_init_controller controls D-unit configuration: + * '1' - dynamic D-unit configuration, + */ +u8 generic_init_controller = 1; + +static int mv_ddr_training_params_set(u8 dev_num); + +/* + * Name: ddr3_init - Main DDR3 Init function + * Desc: This routine initialize the DDR3 MC and runs HW training. + * Args: None. + * Notes: + * Returns: None. + */ +int ddr3_init(void) +{ + int status; + int is_manual_cal_done; + + /* Print mv_ddr version */ + mv_ddr_ver_print(); + + mv_ddr_pre_training_fixup(); + + /* SoC/Board special initializations */ + mv_ddr_pre_training_soc_config(ddr_type); + + /* Set log level for training library */ + mv_ddr_user_log_level_set(DEBUG_BLOCK_ALL); + + mv_ddr_early_init(); + + if (mv_ddr_topology_map_update()) { + printf("mv_ddr: failed to update topology\n"); + return MV_FAIL; + } + + if (mv_ddr_early_init2() != MV_OK) + return MV_FAIL; + + /* Set training algorithm's parameters */ + status = mv_ddr_training_params_set(0); + if (MV_OK != status) + return status; + + mv_ddr_mc_config(); + + is_manual_cal_done = mv_ddr_manual_cal_do(); + + mv_ddr_mc_init(); + + if (!is_manual_cal_done) { + } + + + status = ddr3_silicon_post_init(); + if (MV_OK != status) { + printf("DDR3 Post Init - FAILED 0x%x\n", status); + return status; + } + + /* PHY initialization (Training) */ + status = hws_ddr3_tip_run_alg(0, ALGO_TYPE_DYNAMIC); + if (MV_OK != status) { + printf("%s Training Sequence - FAILED\n", ddr_type); + return status; + } + + + /* Post MC/PHY initializations */ + mv_ddr_post_training_soc_config(ddr_type); + + mv_ddr_post_training_fixup(); + + if (mv_ddr_is_ecc_ena()) + mv_ddr_mem_scrubbing(); + + printf("mv_ddr: completed successfully\n"); + + return MV_OK; +} + +/* + * Name: mv_ddr_training_params_set + * Desc: + * Args: + * Notes: sets internal training params + * Returns: + */ +static int mv_ddr_training_params_set(u8 dev_num) +{ + struct tune_train_params params; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + int status; + u32 cs_num; + int ck_delay; + + cs_num = mv_ddr_cs_num_get(); + ck_delay = mv_ddr_ck_delay_get(); + + /* NOTE: do not remove any field initilization */ + params.ck_delay = TUNE_TRAINING_PARAMS_CK_DELAY; + params.phy_reg3_val = TUNE_TRAINING_PARAMS_PHYREG3VAL; + params.g_zpri_data = TUNE_TRAINING_PARAMS_PRI_DATA; + params.g_znri_data = TUNE_TRAINING_PARAMS_NRI_DATA; + params.g_zpri_ctrl = TUNE_TRAINING_PARAMS_PRI_CTRL; + params.g_znri_ctrl = TUNE_TRAINING_PARAMS_NRI_CTRL; + params.g_znodt_data = TUNE_TRAINING_PARAMS_N_ODT_DATA; + params.g_zpodt_ctrl = TUNE_TRAINING_PARAMS_P_ODT_CTRL; + params.g_znodt_ctrl = TUNE_TRAINING_PARAMS_N_ODT_CTRL; + + params.g_zpodt_data = TUNE_TRAINING_PARAMS_P_ODT_DATA; + params.g_dic = TUNE_TRAINING_PARAMS_DIC; + params.g_rtt_nom = TUNE_TRAINING_PARAMS_RTT_NOM; + if (cs_num == 1) { + params.g_rtt_wr = TUNE_TRAINING_PARAMS_RTT_WR_1CS; + params.g_odt_config = TUNE_TRAINING_PARAMS_ODT_CONFIG_1CS; + } else { + params.g_rtt_wr = TUNE_TRAINING_PARAMS_RTT_WR_2CS; + params.g_odt_config = TUNE_TRAINING_PARAMS_ODT_CONFIG_2CS; + } + + if (ck_delay > 0) + params.ck_delay = ck_delay; + + /* Use platform specific override ODT value */ + if (tm->odt_config) + params.g_odt_config = tm->odt_config; + + status = ddr3_tip_tune_training_params(dev_num, ¶ms); + if (MV_OK != status) { + printf("%s Training Sequence - FAILED\n", ddr_type); + return status; + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_init.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_init.h new file mode 100644 index 000000000..055516b67 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_init.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_INIT_H +#define _DDR3_INIT_H + +#include "ddr_ml_wrapper.h" +#include "mv_ddr_plat.h" + +#include "seq_exec.h" +#include "ddr3_logging_def.h" +#include "ddr3_training_hw_algo.h" +#include "ddr3_training_ip.h" +#include "ddr3_training_ip_centralization.h" +#include "ddr3_training_ip_engine.h" +#include "ddr3_training_ip_flow.h" +#include "ddr3_training_ip_pbs.h" +#include "ddr3_training_ip_prv_if.h" +#include "ddr3_training_leveling.h" +#include "xor.h" + +/* For checking function return values */ +#define CHECK_STATUS(orig_func) \ + { \ + int status; \ + status = orig_func; \ + if (MV_OK != status) \ + return status; \ + } + +#define SUB_VERSION 0 + +enum log_level { + MV_LOG_LEVEL_0, + MV_LOG_LEVEL_1, + MV_LOG_LEVEL_2, + MV_LOG_LEVEL_3 +}; + +/* TODO: consider to move to misl phy driver */ +#define MISL_PHY_DRV_P_OFFS 0x7 +#define MISL_PHY_DRV_N_OFFS 0x0 +#define MISL_PHY_ODT_P_OFFS 0x6 +#define MISL_PHY_ODT_N_OFFS 0x0 + +/* Globals */ +extern u8 debug_training, debug_calibration, debug_ddr4_centralization, + debug_tap_tuning, debug_dm_tuning; +extern u8 is_reg_dump; +extern u8 generic_init_controller; +/* list of allowed frequency listed in order of enum mv_ddr_freq */ +extern u32 is_pll_old; +extern struct pattern_info pattern_table[]; +extern u8 debug_centralization, debug_training_ip, debug_training_bist, + debug_pbs, debug_training_static, debug_leveling; +extern struct hws_tip_config_func_db config_func_info[]; +extern u8 twr_mask_table[]; +extern u8 cl_mask_table[]; +extern u8 cwl_mask_table[]; +extern u32 speed_bin_table_t_rc[]; +extern u32 speed_bin_table_t_rcd_t_rp[]; + +extern u32 vref_init_val; +extern u32 g_zpri_data; +extern u32 g_znri_data; +extern u32 g_zpri_ctrl; +extern u32 g_znri_ctrl; +extern u32 g_zpodt_data; +extern u32 g_znodt_data; +extern u32 g_zpodt_ctrl; +extern u32 g_znodt_ctrl; +extern u32 g_dic; +extern u32 g_odt_config; +extern u32 g_rtt_nom; +extern u32 g_rtt_wr; +extern u32 g_rtt_park; + +extern u8 debug_training_access; +extern u32 first_active_if; +extern u32 delay_enable, ck_delay, ca_delay; +extern u32 mask_tune_func; +extern u32 rl_version; +extern int rl_mid_freq_wa; +extern u8 calibration_update_control; /* 2 external only, 1 is internal only */ +extern enum mv_ddr_freq medium_freq; + +extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]; +extern enum mv_ddr_freq low_freq; +extern enum auto_tune_stage training_stage; +extern u32 is_pll_before_init; +extern u32 is_adll_calib_before_init; +extern u32 is_dfs_in_init; +extern int wl_debug_delay; +extern u32 silicon_delay[MAX_DEVICE_NUM]; +extern u32 start_pattern, end_pattern; +extern u32 phy_reg0_val; +extern u32 phy_reg1_val; +extern u32 phy_reg2_val; +extern u32 phy_reg3_val; +extern enum hws_pattern sweep_pattern; +extern enum hws_pattern pbs_pattern; +extern u32 g_znri_data; +extern u32 g_zpri_data; +extern u32 g_znri_ctrl; +extern u32 g_zpri_ctrl; +extern u32 finger_test, p_finger_start, p_finger_end, n_finger_start, + n_finger_end, p_finger_step, n_finger_step; +extern u32 mode_2t; +extern u32 xsb_validate_type; +extern u32 xsb_validation_base_address; +extern u32 odt_additional; +extern u32 debug_mode; +extern u32 debug_dunit; +extern u32 clamp_tbl[]; +extern u32 freq_mask[MAX_DEVICE_NUM][MV_DDR_FREQ_LAST]; + +extern u32 maxt_poll_tries; +extern u32 is_bist_reset_bit; + +extern u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +extern u32 effective_cs; +extern int ddr3_tip_centr_skip_min_win_check; +extern u32 *dq_map_table; + +extern u8 debug_training_hw_alg; + +extern u32 start_xsb_offset; +extern u32 odt_config; + +extern u16 mask_results_dq_reg_map[]; + +extern u32 target_freq; +extern u32 dfs_low_freq; + +extern u32 nominal_avs; +extern u32 extension_avs; + + +/* Prototypes */ +int ddr3_init(void); +int ddr3_tip_enable_init_sequence(u32 dev_num); + +int ddr3_hws_hw_training(enum hws_algo_type algo_mode); +int mv_ddr_early_init(void); +int mv_ddr_early_init2(void); +int ddr3_silicon_post_init(void); +int ddr3_post_run_alg(void); +void ddr3_new_tip_ecc_scrub(void); + +int ddr3_tip_reg_write(u32 dev_num, u32 reg_addr, u32 data); +int ddr3_tip_reg_read(u32 dev_num, u32 reg_addr, u32 *data, u32 reg_mask); +int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq); + +int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]); +int print_ph(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]); +int read_phase_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], + int reg_addr, u32 mask); +int write_leveling_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], + u32 pup_ph_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], int reg_addr); +int ddr3_tip_restore_dunit_regs(u32 dev_num); +void print_topology(struct mv_ddr_topology_map *tm); + +u32 mv_board_id_get(void); + +int ddr3_load_topology_map(void); +void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level); +void mv_ddr_user_log_level_set(enum ddr_lib_debug_block block); +int ddr3_tip_tune_training_params(u32 dev_num, + struct tune_train_params *params); +void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps); +void ddr3_fast_path_static_cs_size_config(u32 cs_ena); +u32 mv_board_id_index_get(u32 board_id); +void ddr3_set_log_level(u32 n_log_level); + +int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr); + +int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode); +int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode); +void mv_ddr_mc_config(void); +int mv_ddr_mc_init(void); +void mv_ddr_set_calib_controller(void); +/* TODO: consider to move to misl phy driver */ +unsigned int mv_ddr_misl_phy_drv_data_p_get(void); +unsigned int mv_ddr_misl_phy_drv_data_n_get(void); +unsigned int mv_ddr_misl_phy_drv_ctrl_p_get(void); +unsigned int mv_ddr_misl_phy_drv_ctrl_n_get(void); +unsigned int mv_ddr_misl_phy_odt_p_get(void); +unsigned int mv_ddr_misl_phy_odt_n_get(void); + +#endif /* _DDR3_INIT_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_logging_def.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_logging_def.h new file mode 100644 index 000000000..ad9da1cff --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_logging_def.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_LOGGING_CONFIG_H +#define _DDR3_LOGGING_CONFIG_H + +#ifdef SILENT_LIB +#define DEBUG_TRAINING_BIST_ENGINE(level, s) +#define DEBUG_TRAINING_IP(level, s) +#define DEBUG_CENTRALIZATION_ENGINE(level, s) +#define DEBUG_TRAINING_HW_ALG(level, s) +#define DEBUG_TRAINING_IP_ENGINE(level, s) +#define DEBUG_LEVELING(level, s) +#define DEBUG_PBS_ENGINE(level, s) +#define DEBUG_TRAINING_STATIC_IP(level, s) +#define DEBUG_TRAINING_ACCESS(level, s) +#else +#ifdef LIB_FUNCTIONAL_DEBUG_ONLY +#define DEBUG_TRAINING_BIST_ENGINE(level, s) +#define DEBUG_TRAINING_IP_ENGINE(level, s) +#define DEBUG_TRAINING_IP(level, s) \ + if (level >= debug_training) \ + printf s +#define DEBUG_CENTRALIZATION_ENGINE(level, s) \ + if (level >= debug_centralization) \ + printf s +#define DEBUG_TRAINING_HW_ALG(level, s) \ + if (level >= debug_training_hw_alg) \ + printf s +#define DEBUG_LEVELING(level, s) \ + if (level >= debug_leveling) \ + printf s +#define DEBUG_PBS_ENGINE(level, s) \ + if (level >= debug_pbs) \ + printf s +#define DEBUG_TRAINING_STATIC_IP(level, s) \ + if (level >= debug_training_static) \ + printf s +#define DEBUG_TRAINING_ACCESS(level, s) \ + if (level >= debug_training_access) \ + printf s +#else +#define DEBUG_TRAINING_BIST_ENGINE(level, s) \ + if (level >= debug_training_bist) \ + printf s + +#define DEBUG_TRAINING_IP_ENGINE(level, s) \ + if (level >= debug_training_ip) \ + printf s +#define DEBUG_TRAINING_IP(level, s) \ + if (level >= debug_training) \ + printf s +#define DEBUG_CENTRALIZATION_ENGINE(level, s) \ + if (level >= debug_centralization) \ + printf s +#define DEBUG_TRAINING_HW_ALG(level, s) \ + if (level >= debug_training_hw_alg) \ + printf s +#define DEBUG_LEVELING(level, s) \ + if (level >= debug_leveling) \ + printf s +#define DEBUG_PBS_ENGINE(level, s) \ + if (level >= debug_pbs) \ + printf s +#define DEBUG_TRAINING_STATIC_IP(level, s) \ + if (level >= debug_training_static) \ + printf s +#define DEBUG_TRAINING_ACCESS(level, s) \ + if (level >= debug_training_access) \ + printf s +#endif +#endif + + +/* Logging defines */ +enum mv_ddr_debug_level { + DEBUG_LEVEL_TRACE = 1, + DEBUG_LEVEL_INFO = 2, + DEBUG_LEVEL_ERROR = 3, + DEBUG_LEVEL_LAST +}; + +enum ddr_lib_debug_block { + DEBUG_BLOCK_STATIC, + DEBUG_BLOCK_TRAINING_MAIN, + DEBUG_BLOCK_LEVELING, + DEBUG_BLOCK_CENTRALIZATION, + DEBUG_BLOCK_PBS, + DEBUG_BLOCK_IP, + DEBUG_BLOCK_BIST, + DEBUG_BLOCK_ALG, + DEBUG_BLOCK_DEVICE, + DEBUG_BLOCK_ACCESS, + DEBUG_STAGES_REG_DUMP, + /* All excluding IP and REG_DUMP, should be enabled separatelly */ + DEBUG_BLOCK_ALL +}; + +int ddr3_tip_print_log(u32 dev_num, u32 mem_addr); +int ddr3_tip_print_stability_log(u32 dev_num); + +#endif /* _DDR3_LOGGING_CONFIG_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_patterns_64bit.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_patterns_64bit.h new file mode 100644 index 000000000..1e2260b87 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_patterns_64bit.h @@ -0,0 +1,924 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __DDR3_PATTERNS_64_H +#define __DDR3_PATTERNS_64_H + +#define FAB_OPT 21 +/* + * Patterns Declerations + */ + +u32 wl_sup_pattern[LEN_WL_SUP_PATTERN] __aligned(32) = { + 0x04030201, 0x08070605, 0x0c0b0a09, 0x100f0e0d, + 0x14131211, 0x18171615, 0x1c1b1a19, 0x201f1e1d, + 0x24232221, 0x28272625, 0x2c2b2a29, 0x302f2e2d, + 0x34333231, 0x38373635, 0x3c3b3a39, 0x403f3e3d, + 0x44434241, 0x48474645, 0x4c4b4a49, 0x504f4e4d, + 0x54535251, 0x58575655, 0x5c5b5a59, 0x605f5e5d, + 0x64636261, 0x68676665, 0x6c6b6a69, 0x706f6e6d, + 0x74737271, 0x78777675, 0x7c7b7a79, 0x807f7e7d +}; + +u32 pbs_pattern_32b[2][LEN_PBS_PATTERN] __aligned(32) = { + { + 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555, + 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555, + 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555, + 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555 + }, + { + 0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, + 0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, + 0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, + 0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa + } +}; + +u32 pbs_pattern_64b[2][LEN_PBS_PATTERN] __aligned(32) = { + { + 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, + 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, + 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, + 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555 + }, + { + 0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa, + 0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa, + 0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa, + 0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa + } +}; + +u32 rl_pattern[LEN_STD_PATTERN] __aligned(32) = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x01010101, 0x01010101, 0x01010101, 0x01010101 +}; + +u32 killer_pattern_32b[DQ_NUM][LEN_KILLER_PATTERN] __aligned(32) = { + { + 0x01010101, 0x00000000, 0x01010101, 0xffffffff, + 0x01010101, 0x00000000, 0x01010101, 0xffffffff, + 0xfefefefe, 0xfefefefe, 0x01010101, 0xfefefefe, + 0xfefefefe, 0xfefefefe, 0x01010101, 0xfefefefe, + 0x01010101, 0xfefefefe, 0x01010101, 0x01010101, + 0x01010101, 0xfefefefe, 0x01010101, 0x01010101, + 0xfefefefe, 0x01010101, 0xfefefefe, 0x00000000, + 0xfefefefe, 0x01010101, 0xfefefefe, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x01010101, + 0xffffffff, 0x00000000, 0xffffffff, 0x01010101, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xfefefefe, + 0x00000000, 0x00000000, 0x00000000, 0xfefefefe, + 0xfefefefe, 0xffffffff, 0x00000000, 0x00000000, + 0xfefefefe, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xfefefefe, 0x00000000, 0xfefefefe, 0x00000000, + 0xfefefefe, 0x00000000, 0xfefefefe, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x01010101, + 0x00000000, 0xffffffff, 0xffffffff, 0x01010101, + 0xffffffff, 0xffffffff, 0x01010101, 0x00000000, + 0xffffffff, 0xffffffff, 0x01010101, 0x00000000, + 0x01010101, 0xffffffff, 0xfefefefe, 0xfefefefe, + 0x01010101, 0xffffffff, 0xfefefefe, 0xfefefefe + }, + { + 0x02020202, 0x00000000, 0x02020202, 0xffffffff, + 0x02020202, 0x00000000, 0x02020202, 0xffffffff, + 0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0xfdfdfdfd, + 0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0xfdfdfdfd, + 0x02020202, 0xfdfdfdfd, 0x02020202, 0x02020202, + 0x02020202, 0xfdfdfdfd, 0x02020202, 0x02020202, + 0xfdfdfdfd, 0x02020202, 0xfdfdfdfd, 0x00000000, + 0xfdfdfdfd, 0x02020202, 0xfdfdfdfd, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x02020202, + 0xffffffff, 0x00000000, 0xffffffff, 0x02020202, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xfdfdfdfd, + 0x00000000, 0x00000000, 0x00000000, 0xfdfdfdfd, + 0xfdfdfdfd, 0xffffffff, 0x00000000, 0x00000000, + 0xfdfdfdfd, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xfdfdfdfd, 0x00000000, 0xfdfdfdfd, 0x00000000, + 0xfdfdfdfd, 0x00000000, 0xfdfdfdfd, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x02020202, + 0x00000000, 0xffffffff, 0xffffffff, 0x02020202, + 0xffffffff, 0xffffffff, 0x02020202, 0x00000000, + 0xffffffff, 0xffffffff, 0x02020202, 0x00000000, + 0x02020202, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd, + 0x02020202, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd + }, + { + 0x04040404, 0x00000000, 0x04040404, 0xffffffff, + 0x04040404, 0x00000000, 0x04040404, 0xffffffff, + 0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0xfbfbfbfb, + 0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0xfbfbfbfb, + 0x04040404, 0xfbfbfbfb, 0x04040404, 0x04040404, + 0x04040404, 0xfbfbfbfb, 0x04040404, 0x04040404, + 0xfbfbfbfb, 0x04040404, 0xfbfbfbfb, 0x00000000, + 0xfbfbfbfb, 0x04040404, 0xfbfbfbfb, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x04040404, + 0xffffffff, 0x00000000, 0xffffffff, 0x04040404, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xfbfbfbfb, + 0x00000000, 0x00000000, 0x00000000, 0xfbfbfbfb, + 0xfbfbfbfb, 0xffffffff, 0x00000000, 0x00000000, + 0xfbfbfbfb, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xfbfbfbfb, 0x00000000, 0xfbfbfbfb, 0x00000000, + 0xfbfbfbfb, 0x00000000, 0xfbfbfbfb, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x04040404, + 0x00000000, 0xffffffff, 0xffffffff, 0x04040404, + 0xffffffff, 0xffffffff, 0x04040404, 0x00000000, + 0xffffffff, 0xffffffff, 0x04040404, 0x00000000, + 0x04040404, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb, + 0x04040404, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb + }, + { + 0x08080808, 0x00000000, 0x08080808, 0xffffffff, + 0x08080808, 0x00000000, 0x08080808, 0xffffffff, + 0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0xf7f7f7f7, + 0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0xf7f7f7f7, + 0x08080808, 0xf7f7f7f7, 0x08080808, 0x08080808, + 0x08080808, 0xf7f7f7f7, 0x08080808, 0x08080808, + 0xf7f7f7f7, 0x08080808, 0xf7f7f7f7, 0x00000000, + 0xf7f7f7f7, 0x08080808, 0xf7f7f7f7, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x08080808, + 0xffffffff, 0x00000000, 0xffffffff, 0x08080808, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xf7f7f7f7, + 0x00000000, 0x00000000, 0x00000000, 0xf7f7f7f7, + 0xf7f7f7f7, 0xffffffff, 0x00000000, 0x00000000, + 0xf7f7f7f7, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xf7f7f7f7, 0x00000000, 0xf7f7f7f7, 0x00000000, + 0xf7f7f7f7, 0x00000000, 0xf7f7f7f7, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x08080808, + 0x00000000, 0xffffffff, 0xffffffff, 0x08080808, + 0xffffffff, 0xffffffff, 0x08080808, 0x00000000, + 0xffffffff, 0xffffffff, 0x08080808, 0x00000000, + 0x08080808, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7, + 0x08080808, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7 + }, + { + 0x10101010, 0x00000000, 0x10101010, 0xffffffff, + 0x10101010, 0x00000000, 0x10101010, 0xffffffff, + 0xefefefef, 0xefefefef, 0x10101010, 0xefefefef, + 0xefefefef, 0xefefefef, 0x10101010, 0xefefefef, + 0x10101010, 0xefefefef, 0x10101010, 0x10101010, + 0x10101010, 0xefefefef, 0x10101010, 0x10101010, + 0xefefefef, 0x10101010, 0xefefefef, 0x00000000, + 0xefefefef, 0x10101010, 0xefefefef, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x10101010, + 0xffffffff, 0x00000000, 0xffffffff, 0x10101010, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xefefefef, + 0x00000000, 0x00000000, 0x00000000, 0xefefefef, + 0xefefefef, 0xffffffff, 0x00000000, 0x00000000, + 0xefefefef, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xefefefef, 0x00000000, 0xefefefef, 0x00000000, + 0xefefefef, 0x00000000, 0xefefefef, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x10101010, + 0x00000000, 0xffffffff, 0xffffffff, 0x10101010, + 0xffffffff, 0xffffffff, 0x10101010, 0x00000000, + 0xffffffff, 0xffffffff, 0x10101010, 0x00000000, + 0x10101010, 0xffffffff, 0xefefefef, 0xefefefef, + 0x10101010, 0xffffffff, 0xefefefef, 0xefefefef + }, + { + 0x20202020, 0x00000000, 0x20202020, 0xffffffff, + 0x20202020, 0x00000000, 0x20202020, 0xffffffff, + 0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0xdfdfdfdf, + 0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0xdfdfdfdf, + 0x20202020, 0xdfdfdfdf, 0x20202020, 0x20202020, + 0x20202020, 0xdfdfdfdf, 0x20202020, 0x20202020, + 0xdfdfdfdf, 0x20202020, 0xdfdfdfdf, 0x00000000, + 0xdfdfdfdf, 0x20202020, 0xdfdfdfdf, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x20202020, + 0xffffffff, 0x00000000, 0xffffffff, 0x20202020, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xdfdfdfdf, + 0x00000000, 0x00000000, 0x00000000, 0xdfdfdfdf, + 0xdfdfdfdf, 0xffffffff, 0x00000000, 0x00000000, + 0xdfdfdfdf, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xdfdfdfdf, 0x00000000, 0xdfdfdfdf, 0x00000000, + 0xdfdfdfdf, 0x00000000, 0xdfdfdfdf, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x20202020, + 0x00000000, 0xffffffff, 0xffffffff, 0x20202020, + 0xffffffff, 0xffffffff, 0x20202020, 0x00000000, + 0xffffffff, 0xffffffff, 0x20202020, 0x00000000, + 0x20202020, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf, + 0x20202020, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf + }, + { + 0x40404040, 0x00000000, 0x40404040, 0xffffffff, + 0x40404040, 0x00000000, 0x40404040, 0xffffffff, + 0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0xbfbfbfbf, + 0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0xbfbfbfbf, + 0x40404040, 0xbfbfbfbf, 0x40404040, 0x40404040, + 0x40404040, 0xbfbfbfbf, 0x40404040, 0x40404040, + 0xbfbfbfbf, 0x40404040, 0xbfbfbfbf, 0x00000000, + 0xbfbfbfbf, 0x40404040, 0xbfbfbfbf, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x40404040, + 0xffffffff, 0x00000000, 0xffffffff, 0x40404040, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0xbfbfbfbf, + 0x00000000, 0x00000000, 0x00000000, 0xbfbfbfbf, + 0xbfbfbfbf, 0xffffffff, 0x00000000, 0x00000000, + 0xbfbfbfbf, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0xbfbfbfbf, 0x00000000, 0xbfbfbfbf, 0x00000000, + 0xbfbfbfbf, 0x00000000, 0xbfbfbfbf, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x40404040, + 0x00000000, 0xffffffff, 0xffffffff, 0x40404040, + 0xffffffff, 0xffffffff, 0x40404040, 0x00000000, + 0xffffffff, 0xffffffff, 0x40404040, 0x00000000, + 0x40404040, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf, + 0x40404040, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf + }, + { + 0x80808080, 0x00000000, 0x80808080, 0xffffffff, + 0x80808080, 0x00000000, 0x80808080, 0xffffffff, + 0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x7f7f7f7f, + 0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x7f7f7f7f, + 0x80808080, 0x7f7f7f7f, 0x80808080, 0x80808080, + 0x80808080, 0x7f7f7f7f, 0x80808080, 0x80808080, + 0x7f7f7f7f, 0x80808080, 0x7f7f7f7f, 0x00000000, + 0x7f7f7f7f, 0x80808080, 0x7f7f7f7f, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, + 0xffffffff, 0x00000000, 0xffffffff, 0x80808080, + 0xffffffff, 0x00000000, 0xffffffff, 0x80808080, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x7f7f7f7f, + 0x00000000, 0x00000000, 0x00000000, 0x7f7f7f7f, + 0x7f7f7f7f, 0xffffffff, 0x00000000, 0x00000000, + 0x7f7f7f7f, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, + 0x7f7f7f7f, 0x00000000, 0x7f7f7f7f, 0x00000000, + 0x7f7f7f7f, 0x00000000, 0x7f7f7f7f, 0x00000000, + 0x00000000, 0xffffffff, 0xffffffff, 0x80808080, + 0x00000000, 0xffffffff, 0xffffffff, 0x80808080, + 0xffffffff, 0xffffffff, 0x80808080, 0x00000000, + 0xffffffff, 0xffffffff, 0x80808080, 0x00000000, + 0x80808080, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f, + 0x80808080, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f + } +}; + +u32 killer_pattern_64b[DQ_NUM][LEN_KILLER_PATTERN] __aligned(32) = { + { + 0x01010101, 0x01010101, 0x00000000, 0x00000000, + 0x01010101, 0x01010101, 0xffffffff, 0xffffffff, + 0xfefefefe, 0xfefefefe, 0xfefefefe, 0xfefefefe, + 0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe, + 0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe, + 0x01010101, 0x01010101, 0x01010101, 0x01010101, + 0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101, + 0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x01010101, 0x01010101, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xfefefefe, 0xfefefefe, + 0xfefefefe, 0xfefefefe, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000, + 0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x01010101, 0x01010101, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x01010101, 0x01010101, 0x00000000, 0x00000000, + 0x01010101, 0x01010101, 0xffffffff, 0xffffffff, + 0xfefefefe, 0xfefefefe, 0xfefefefe, 0xfefefefe + }, + { + 0x02020202, 0x02020202, 0x00000000, 0x00000000, + 0x02020202, 0x02020202, 0xffffffff, 0xffffffff, + 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, + 0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd, + 0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd, + 0x02020202, 0x02020202, 0x02020202, 0x02020202, + 0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202, + 0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x02020202, 0x02020202, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xfdfdfdfd, 0xfdfdfdfd, + 0xfdfdfdfd, 0xfdfdfdfd, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000, + 0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x02020202, 0x02020202, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x02020202, 0x02020202, 0x00000000, 0x00000000, + 0x02020202, 0x02020202, 0xffffffff, 0xffffffff, + 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd + }, + { + 0x04040404, 0x04040404, 0x00000000, 0x00000000, + 0x04040404, 0x04040404, 0xffffffff, 0xffffffff, + 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, + 0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb, + 0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb, + 0x04040404, 0x04040404, 0x04040404, 0x04040404, + 0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404, + 0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x04040404, 0x04040404, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xfbfbfbfb, 0xfbfbfbfb, + 0xfbfbfbfb, 0xfbfbfbfb, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000, + 0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x04040404, 0x04040404, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x04040404, 0x04040404, 0x00000000, 0x00000000, + 0x04040404, 0x04040404, 0xffffffff, 0xffffffff, + 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb + }, + { + 0x08080808, 0x08080808, 0x00000000, 0x00000000, + 0x08080808, 0x08080808, 0xffffffff, 0xffffffff, + 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, + 0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7, + 0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808, + 0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x08080808, 0x08080808, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xf7f7f7f7, 0xf7f7f7f7, + 0xf7f7f7f7, 0xf7f7f7f7, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000, + 0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x08080808, 0x08080808, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x08080808, 0x08080808, 0x00000000, 0x00000000, + 0x08080808, 0x08080808, 0xffffffff, 0xffffffff, + 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7 + }, + { + 0x10101010, 0x10101010, 0x00000000, 0x00000000, + 0x10101010, 0x10101010, 0xffffffff, 0xffffffff, + 0xefefefef, 0xefefefef, 0xefefefef, 0xefefefef, + 0x10101010, 0x10101010, 0xefefefef, 0xefefefef, + 0x10101010, 0x10101010, 0xefefefef, 0xefefefef, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0xefefefef, 0xefefefef, 0x10101010, 0x10101010, + 0xefefefef, 0xefefefef, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x10101010, 0x10101010, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xefefefef, 0xefefefef, + 0xefefefef, 0xefefefef, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xefefefef, 0xefefefef, 0x00000000, 0x00000000, + 0xefefefef, 0xefefefef, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x10101010, 0x10101010, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x10101010, 0x10101010, 0x00000000, 0x00000000, + 0x10101010, 0x10101010, 0xffffffff, 0xffffffff, + 0xefefefef, 0xefefefef, 0xefefefef, 0xefefefef + }, + { + 0x20202020, 0x20202020, 0x00000000, 0x00000000, + 0x20202020, 0x20202020, 0xffffffff, 0xffffffff, + 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, + 0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf, + 0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf, + 0x20202020, 0x20202020, 0x20202020, 0x20202020, + 0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020, + 0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x20202020, 0x20202020, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xdfdfdfdf, 0xdfdfdfdf, + 0xdfdfdfdf, 0xdfdfdfdf, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000, + 0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x20202020, 0x20202020, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x20202020, 0x20202020, 0x00000000, 0x00000000, + 0x20202020, 0x20202020, 0xffffffff, 0xffffffff, + 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf + }, + { + 0x40404040, 0x40404040, 0x00000000, 0x00000000, + 0x40404040, 0x40404040, 0xffffffff, 0xffffffff, + 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, + 0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf, + 0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf, + 0x40404040, 0x40404040, 0x40404040, 0x40404040, + 0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040, + 0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x40404040, 0x40404040, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xbfbfbfbf, 0xbfbfbfbf, + 0xbfbfbfbf, 0xbfbfbfbf, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000, + 0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x40404040, 0x40404040, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x40404040, 0x40404040, 0x00000000, 0x00000000, + 0x40404040, 0x40404040, 0xffffffff, 0xffffffff, + 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf + }, + { + 0x80808080, 0x80808080, 0x00000000, 0x00000000, + 0x80808080, 0x80808080, 0xffffffff, 0xffffffff, + 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, + 0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f, + 0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f, + 0x80808080, 0x80808080, 0x80808080, 0x80808080, + 0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080, + 0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x80808080, 0x80808080, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7f7f7f7f, 0x7f7f7f7f, + 0x7f7f7f7f, 0x7f7f7f7f, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000, + 0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x80808080, 0x80808080, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x80808080, 0x80808080, 0x00000000, 0x00000000, + 0x80808080, 0x80808080, 0xffffffff, 0xffffffff, + 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f + } +}; + +u32 special_pattern[DQ_NUM][LEN_SPECIAL_PATTERN] __aligned(32) = { + { + 0x00000000, 0x00000000, 0x01010101, 0x01010101, + 0xffffffff, 0xffffffff, 0xfefefefe, 0xfefefefe, + 0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101, + 0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101, + 0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101, + 0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe, + 0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x01010101, 0x01010101, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfefefefe, 0xfefefefe, 0xfefefefe, 0xfefefefe, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xfefefefe, 0xfefefefe, + 0x00000000, 0x00000000, 0xfefefefe, 0xfefefefe, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x01010101, 0x01010101, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x01010101, 0x01010101, + 0x00000000, 0x00000000, 0x01010101, 0x01010101, + 0xffffffff, 0xffffffff, 0xfefefefe, 0xfefefefe, + 0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x02020202, 0x02020202, + 0xffffffff, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd, + 0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202, + 0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202, + 0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202, + 0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd, + 0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x02020202, 0x02020202, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd, + 0x00000000, 0x00000000, 0xfdfdfdfd, 0xfdfdfdfd, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x02020202, 0x02020202, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x02020202, 0x02020202, + 0x00000000, 0x00000000, 0x02020202, 0x02020202, + 0xffffffff, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd, + 0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x04040404, 0x04040404, + 0xffffffff, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb, + 0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404, + 0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404, + 0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404, + 0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb, + 0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x04040404, 0x04040404, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb, + 0x00000000, 0x00000000, 0xfbfbfbfb, 0xfbfbfbfb, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x04040404, 0x04040404, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x04040404, 0x04040404, + 0x00000000, 0x00000000, 0x04040404, 0x04040404, + 0xffffffff, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb, + 0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x08080808, 0x08080808, + 0xffffffff, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7, + 0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808, + 0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808, + 0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7, + 0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x08080808, 0x08080808, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7, + 0x00000000, 0x00000000, 0xf7f7f7f7, 0xf7f7f7f7, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x08080808, 0x08080808, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x08080808, 0x08080808, + 0x00000000, 0x00000000, 0x08080808, 0x08080808, + 0xffffffff, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7, + 0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x10101010, 0x10101010, + 0xffffffff, 0xffffffff, 0xefefefef, 0xefefefef, + 0xefefefef, 0xefefefef, 0x10101010, 0x10101010, + 0xefefefef, 0xefefefef, 0x10101010, 0x10101010, + 0xefefefef, 0xefefefef, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0xefefefef, 0xefefefef, + 0x10101010, 0x10101010, 0xefefefef, 0xefefefef, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x10101010, 0x10101010, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xefefefef, 0xefefefef, 0xefefefef, 0xefefefef, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xefefefef, 0xefefefef, + 0x00000000, 0x00000000, 0xefefefef, 0xefefefef, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x10101010, 0x10101010, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x10101010, 0x10101010, + 0x00000000, 0x00000000, 0x10101010, 0x10101010, + 0xffffffff, 0xffffffff, 0xefefefef, 0xefefefef, + 0xefefefef, 0xefefefef, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x20202020, 0x20202020, + 0xffffffff, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf, + 0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020, + 0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020, + 0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020, + 0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf, + 0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x20202020, 0x20202020, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf, + 0x00000000, 0x00000000, 0xdfdfdfdf, 0xdfdfdfdf, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x20202020, 0x20202020, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x20202020, 0x20202020, + 0x00000000, 0x00000000, 0x20202020, 0x20202020, + 0xffffffff, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf, + 0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x40404040, 0x40404040, + 0xffffffff, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf, + 0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040, + 0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040, + 0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040, + 0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf, + 0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x40404040, 0x40404040, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf, + 0x00000000, 0x00000000, 0xbfbfbfbf, 0xbfbfbfbf, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x40404040, 0x40404040, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x40404040, 0x40404040, + 0x00000000, 0x00000000, 0x40404040, 0x40404040, + 0xffffffff, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf, + 0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x80808080, 0x80808080, + 0xffffffff, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f, + 0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080, + 0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080, + 0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080, + 0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f, + 0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0x80808080, 0x80808080, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f, + 0x00000000, 0x00000000, 0x7f7f7f7f, 0x7f7f7f7f, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, + 0x80808080, 0x80808080, 0xffffffff, 0xffffffff, + 0xffffffff, 0xffffffff, 0x80808080, 0x80808080, + 0x00000000, 0x00000000, 0x80808080, 0x80808080, + 0xffffffff, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f, + 0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000 + } +}; + +/* Fabric ratios table */ +u32 fabric_ratio[FAB_OPT] = { + 0x04010204, + 0x04020202, + 0x08020306, + 0x08020303, + 0x04020303, + 0x04020204, + 0x04010202, + 0x08030606, + 0x08030505, + 0x04020306, + 0x0804050a, + 0x04030606, + 0x04020404, + 0x04030306, + 0x04020505, + 0x08020505, + 0x04010303, + 0x08050a0a, + 0x04030408, + 0x04010102, + 0x08030306 +}; + +u32 pbs_dq_mapping[PUP_NUM_64BIT + 1][DQ_NUM] = { + {3, 2, 5, 7, 1, 0, 6, 4}, + {2, 3, 6, 7, 1, 0, 4, 5}, + {1, 3, 5, 6, 0, 2, 4, 7}, + {0, 2, 4, 7, 1, 3, 5, 6}, + {3, 0, 4, 6, 1, 2, 5, 7}, + {0, 3, 5, 7, 1, 2, 4, 6}, + {2, 3, 5, 7, 1, 0, 4, 6}, + {0, 2, 5, 4, 1, 3, 6, 7}, + {2, 3, 4, 7, 0, 1, 5, 6} +}; + +#endif /* __DDR3_PATTERNS_64_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training.c new file mode 100644 index 000000000..2512b58cb --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training.c @@ -0,0 +1,2898 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_common.h" +#include "mv_ddr_training_db.h" +#include "mv_ddr_regs.h" + +#define GET_CS_FROM_MASK(mask) (cs_mask2_num[mask]) +#define CS_CBE_VALUE(cs_num) (cs_cbe_reg[cs_num]) + +u32 window_mem_addr = 0; +u32 phy_reg0_val = 0; +u32 phy_reg1_val = 8; +u32 phy_reg2_val = 0; +u32 phy_reg3_val = PARAM_UNDEFINED; +enum mv_ddr_freq low_freq = MV_DDR_FREQ_LOW_FREQ; +enum mv_ddr_freq medium_freq; +u32 debug_dunit = 0; +u32 odt_additional = 1; +u32 *dq_map_table = NULL; + +/* in case of ddr4 do not run ddr3_tip_write_additional_odt_setting function - mc odt always 'on' + * in ddr4 case the terminations are rttWR and rttPARK and the odt must be always 'on' 0x1498 = 0xf + */ +u32 odt_config = 1; + +u32 nominal_avs; +u32 extension_avs; + +u32 is_pll_before_init = 0, is_adll_calib_before_init = 1, is_dfs_in_init = 0; +u32 dfs_low_freq; + +u32 g_rtt_nom_cs0, g_rtt_nom_cs1; +u8 calibration_update_control; /* 2 external only, 1 is internal only */ + +enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]; +enum auto_tune_stage training_stage = INIT_CONTROLLER; +u32 finger_test = 0, p_finger_start = 11, p_finger_end = 64, + n_finger_start = 11, n_finger_end = 64, + p_finger_step = 3, n_finger_step = 3; +u32 clamp_tbl[] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }; + +/* Initiate to 0xff, this variable is define by user in debug mode */ +u32 mode_2t = 0xff; +u32 xsb_validate_type = 0; +u32 xsb_validation_base_address = 0xf000; +u32 first_active_if = 0; +u32 dfs_low_phy1 = 0x1f; +u32 multicast_id = 0; +int use_broadcast = 0; +struct hws_tip_freq_config_info *freq_info_table = NULL; +u8 is_cbe_required = 0; +u32 debug_mode = 0; +u32 delay_enable = 0; +int rl_mid_freq_wa = 0; + +u32 effective_cs = 0; + +u32 vref_init_val = 0x4; +u32 ck_delay = PARAM_UNDEFINED; + +/* Design guidelines parameters */ +u32 g_zpri_data = PARAM_UNDEFINED; /* controller data - P drive strength */ +u32 g_znri_data = PARAM_UNDEFINED; /* controller data - N drive strength */ +u32 g_zpri_ctrl = PARAM_UNDEFINED; /* controller C/A - P drive strength */ +u32 g_znri_ctrl = PARAM_UNDEFINED; /* controller C/A - N drive strength */ + +u32 g_zpodt_data = PARAM_UNDEFINED; /* controller data - P ODT */ +u32 g_znodt_data = PARAM_UNDEFINED; /* controller data - N ODT */ +u32 g_zpodt_ctrl = PARAM_UNDEFINED; /* controller data - P ODT */ +u32 g_znodt_ctrl = PARAM_UNDEFINED; /* controller data - N ODT */ + +u32 g_odt_config = PARAM_UNDEFINED; +u32 g_rtt_nom = PARAM_UNDEFINED; +u32 g_rtt_wr = PARAM_UNDEFINED; +u32 g_dic = PARAM_UNDEFINED; +u32 g_rtt_park = PARAM_UNDEFINED; + +u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT | + WRITE_LEVELING_MASK_BIT | + LOAD_PATTERN_2_MASK_BIT | + READ_LEVELING_MASK_BIT | + SET_TARGET_FREQ_MASK_BIT | + WRITE_LEVELING_TF_MASK_BIT | + READ_LEVELING_TF_MASK_BIT | + CENTRALIZATION_RX_MASK_BIT | + CENTRALIZATION_TX_MASK_BIT); + +static int ddr3_tip_ddr3_training_main_flow(u32 dev_num); +static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, + u32 if_id, u32 cl_value, u32 cwl_value); +static int ddr3_tip_ddr3_auto_tune(u32 dev_num); + +#ifdef ODT_TEST_SUPPORT +static int odt_test(u32 dev_num, enum hws_algo_type algo_type); +#endif + +int adll_calibration(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum mv_ddr_freq frequency); +static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum mv_ddr_freq frequency); + +static u8 mem_size_config[MV_DDR_DIE_CAP_LAST] = { + 0x2, /* 512Mbit */ + 0x3, /* 1Gbit */ + 0x0, /* 2Gbit */ + 0x4, /* 4Gbit */ + 0x5, /* 8Gbit */ + 0x0, /* TODO: placeholder for 16-Mbit die capacity */ + 0x0, /* TODO: placeholder for 32-Mbit die capacity */ + 0x0, /* TODO: placeholder for 12-Mbit die capacity */ + 0x0 /* TODO: placeholder for 24-Mbit die capacity */ +}; + +static u8 cs_mask2_num[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 }; + +static struct reg_data odpg_default_value[] = { + {0x1034, 0x38000, MASK_ALL_BITS}, + {0x1038, 0x0, MASK_ALL_BITS}, + {0x10b0, 0x0, MASK_ALL_BITS}, + {0x10b8, 0x0, MASK_ALL_BITS}, + {0x10c0, 0x0, MASK_ALL_BITS}, + {0x10f0, 0x0, MASK_ALL_BITS}, + {0x10f4, 0x0, MASK_ALL_BITS}, + {0x10f8, 0xff, MASK_ALL_BITS}, + {0x10fc, 0xffff, MASK_ALL_BITS}, + {0x1130, 0x0, MASK_ALL_BITS}, + {0x1830, 0x2000000, MASK_ALL_BITS}, + {0x14d0, 0x0, MASK_ALL_BITS}, + {0x14d4, 0x0, MASK_ALL_BITS}, + {0x14d8, 0x0, MASK_ALL_BITS}, + {0x14dc, 0x0, MASK_ALL_BITS}, + {0x1454, 0x0, MASK_ALL_BITS}, + {0x1594, 0x0, MASK_ALL_BITS}, + {0x1598, 0x0, MASK_ALL_BITS}, + {0x159c, 0x0, MASK_ALL_BITS}, + {0x15a0, 0x0, MASK_ALL_BITS}, + {0x15a4, 0x0, MASK_ALL_BITS}, + {0x15a8, 0x0, MASK_ALL_BITS}, + {0x15ac, 0x0, MASK_ALL_BITS}, + {0x1600, 0x0, MASK_ALL_BITS}, + {0x1604, 0x0, MASK_ALL_BITS}, + {0x1608, 0x0, MASK_ALL_BITS}, + {0x160c, 0x0, MASK_ALL_BITS}, + {0x1610, 0x0, MASK_ALL_BITS}, + {0x1614, 0x0, MASK_ALL_BITS}, + {0x1618, 0x0, MASK_ALL_BITS}, + {0x1624, 0x0, MASK_ALL_BITS}, + {0x1690, 0x0, MASK_ALL_BITS}, + {0x1694, 0x0, MASK_ALL_BITS}, + {0x1698, 0x0, MASK_ALL_BITS}, + {0x169c, 0x0, MASK_ALL_BITS}, + {0x14b8, 0x6f67, MASK_ALL_BITS}, + {0x1630, 0x0, MASK_ALL_BITS}, + {0x1634, 0x0, MASK_ALL_BITS}, + {0x1638, 0x0, MASK_ALL_BITS}, + {0x163c, 0x0, MASK_ALL_BITS}, + {0x16b0, 0x0, MASK_ALL_BITS}, + {0x16b4, 0x0, MASK_ALL_BITS}, + {0x16b8, 0x0, MASK_ALL_BITS}, + {0x16bc, 0x0, MASK_ALL_BITS}, + {0x16c0, 0x0, MASK_ALL_BITS}, + {0x16c4, 0x0, MASK_ALL_BITS}, + {0x16c8, 0x0, MASK_ALL_BITS}, + {0x16cc, 0x1, MASK_ALL_BITS}, + {0x16f0, 0x1, MASK_ALL_BITS}, + {0x16f4, 0x0, MASK_ALL_BITS}, + {0x16f8, 0x0, MASK_ALL_BITS}, + {0x16fc, 0x0, MASK_ALL_BITS} +}; + +/* MR cmd and addr definitions */ +struct mv_ddr_mr_data mr_data[] = { + {MRS0_CMD, MR0_REG}, + {MRS1_CMD, MR1_REG}, + {MRS2_CMD, MR2_REG}, + {MRS3_CMD, MR3_REG} +}; + +/* inverse pads */ +static int ddr3_tip_pad_inv(void) +{ + u32 sphy, data; + u32 sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + u32 ck_swap_ctrl_sphy; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (sphy = 0; sphy < sphy_max; sphy++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sphy); + if (tm->interface_params[0]. + as_bus_params[sphy].is_dqs_swap == 1) { + data = (INVERT_PAD << INV_PAD4_OFFS | + INVERT_PAD << INV_PAD5_OFFS); + /* dqs swap */ + ddr3_tip_bus_read_modify_write(0, ACCESS_TYPE_UNICAST, + 0, sphy, + DDR_PHY_DATA, + PHY_CTRL_PHY_REG, + data, data); + } + + if (tm->interface_params[0].as_bus_params[sphy]. + is_ck_swap == 1 && sphy == 0) { +/* TODO: move this code to per platform one */ + /* clock swap for both cs0 and cs1 */ + data = (INVERT_PAD << INV_PAD2_OFFS | + INVERT_PAD << INV_PAD6_OFFS | + INVERT_PAD << INV_PAD4_OFFS | + INVERT_PAD << INV_PAD5_OFFS); + ck_swap_ctrl_sphy = CK_SWAP_CTRL_PHY_NUM; + ddr3_tip_bus_read_modify_write(0, ACCESS_TYPE_UNICAST, + 0, ck_swap_ctrl_sphy, + DDR_PHY_CONTROL, + PHY_CTRL_PHY_REG, + data, data); + } + } + + return MV_OK; +} + +static int ddr3_tip_rank_control(u32 dev_num, u32 if_id); + +/* + * Update global training parameters by data from user + */ +int ddr3_tip_tune_training_params(u32 dev_num, + struct tune_train_params *params) +{ + if (params->ck_delay != PARAM_UNDEFINED) + ck_delay = params->ck_delay; + if (params->phy_reg3_val != PARAM_UNDEFINED) + phy_reg3_val = params->phy_reg3_val; + if (params->g_rtt_nom != PARAM_UNDEFINED) + g_rtt_nom = params->g_rtt_nom; + if (params->g_rtt_wr != PARAM_UNDEFINED) + g_rtt_wr = params->g_rtt_wr; + if (params->g_dic != PARAM_UNDEFINED) + g_dic = params->g_dic; + if (params->g_odt_config != PARAM_UNDEFINED) + g_odt_config = params->g_odt_config; + if (params->g_zpri_data != PARAM_UNDEFINED) + g_zpri_data = params->g_zpri_data; + if (params->g_znri_data != PARAM_UNDEFINED) + g_znri_data = params->g_znri_data; + if (params->g_zpri_ctrl != PARAM_UNDEFINED) + g_zpri_ctrl = params->g_zpri_ctrl; + if (params->g_znri_ctrl != PARAM_UNDEFINED) + g_znri_ctrl = params->g_znri_ctrl; + if (params->g_zpodt_data != PARAM_UNDEFINED) + g_zpodt_data = params->g_zpodt_data; + if (params->g_znodt_data != PARAM_UNDEFINED) + g_znodt_data = params->g_znodt_data; + if (params->g_zpodt_ctrl != PARAM_UNDEFINED) + g_zpodt_ctrl = params->g_zpodt_ctrl; + if (params->g_znodt_ctrl != PARAM_UNDEFINED) + g_znodt_ctrl = params->g_znodt_ctrl; + if (params->g_rtt_park != PARAM_UNDEFINED) + g_rtt_park = params->g_rtt_park; + + + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("DGL parameters: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", + g_zpri_data, g_znri_data, g_zpri_ctrl, g_znri_ctrl, g_zpodt_data, g_znodt_data, + g_zpodt_ctrl, g_znodt_ctrl, g_rtt_nom, g_dic, g_odt_config, g_rtt_wr)); + + return MV_OK; +} + +/* + * Configure CS + */ +int ddr3_tip_configure_cs(u32 dev_num, u32 if_id, u32 cs_num, u32 enable) +{ + u32 data, addr_hi, data_high; + u32 mem_index; + u32 clk_enable; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (tm->clk_enable & (1 << cs_num)) + clk_enable = 1; + else + clk_enable = enable; + + if (enable == 1) { + data = (tm->interface_params[if_id].bus_width == + MV_DDR_DEV_WIDTH_8BIT) ? 0 : 1; + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + SDRAM_ADDR_CTRL_REG, (data << (cs_num * 4)), + 0x3 << (cs_num * 4))); + mem_index = tm->interface_params[if_id].memory_size; + + addr_hi = mem_size_config[mem_index] & 0x3; + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + SDRAM_ADDR_CTRL_REG, + (addr_hi << (2 + cs_num * 4)), + 0x3 << (2 + cs_num * 4))); + + data_high = (mem_size_config[mem_index] & 0x4) >> 2; + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + SDRAM_ADDR_CTRL_REG, + data_high << (20 + cs_num), 1 << (20 + cs_num))); + + /* Enable Address Select Mode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + SDRAM_ADDR_CTRL_REG, 1 << (16 + cs_num), + 1 << (16 + cs_num))); + } + switch (cs_num) { + case 0: + case 1: + case 2: + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUNIT_CTRL_LOW_REG, (clk_enable << (cs_num + 11)), + 1 << (cs_num + 11))); + break; + case 3: + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUNIT_CTRL_LOW_REG, (clk_enable << 15), 1 << 15)); + break; + } + + return MV_OK; +} + +/* + * Init Controller Flow + */ +int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_prm) +{ + u32 if_id; + u32 cs_num; + u32 t_ckclk = 0, t_wr = 0, t2t = 0; + u32 data_value = 0, cs_cnt = 0, + mem_mask = 0, bus_index = 0; + enum mv_ddr_speed_bin speed_bin_index = SPEED_BIN_DDR_2133N; + u32 cs_mask = 0; + u32 cl_value = 0, cwl_val = 0; + u32 bus_cnt = 0, adll_tap = 0; + enum hws_access_type access_type = ACCESS_TYPE_UNICAST; + u32 data_read[MAX_INTERFACE_NUM]; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + enum mv_ddr_timing timing; + enum mv_ddr_freq freq = tm->interface_params[0].memory_freq; + + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("Init_controller, do_mrs_phy=%d, is_ctrl64_bit=%d\n", + init_cntr_prm->do_mrs_phy, + init_cntr_prm->is_ctrl64_bit)); + + if (init_cntr_prm->init_phy == 1) { + CHECK_STATUS(ddr3_tip_configure_phy(dev_num)); + } + + if (generic_init_controller == 1) { + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("active IF %d\n", if_id)); + mem_mask = 0; + for (bus_index = 0; + bus_index < octets_per_if_num; + bus_index++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_index); + mem_mask |= + tm->interface_params[if_id]. + as_bus_params[bus_index].mirror_enable_bitmask; + } + + if (mem_mask != 0) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, + if_id, DUAL_DUNIT_CFG_REG, 0, + 0x8)); + } + + speed_bin_index = + tm->interface_params[if_id]. + speed_bin_index; + + /* t_ckclk is external clock */ + t_ckclk = (MEGA / mv_ddr_freq_get(freq)); + + if (MV_DDR_IS_HALF_BUS_DRAM_MODE(tm->bus_act_mask, octets_per_if_num)) + data_value = (0x4000 | 0 | 0x1000000) & ~(1 << 26); + else + data_value = (0x4000 | 0x8000 | 0x1000000) & ~(1 << 26); + + /* Interface Bus Width */ + /* SRMode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_CFG_REG, data_value, + 0x100c000)); + + /* Interleave first command pre-charge enable (TBD) */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_OPEN_PAGES_CTRL_REG, (1 << 10), + (1 << 10))); + + /* Reset divider_b assert -> de-assert */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + SDRAM_CFG_REG, + 0x0 << PUP_RST_DIVIDER_OFFS, + PUP_RST_DIVIDER_MASK << PUP_RST_DIVIDER_OFFS)); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + SDRAM_CFG_REG, + 0x1 << PUP_RST_DIVIDER_OFFS, + PUP_RST_DIVIDER_MASK << PUP_RST_DIVIDER_OFFS)); + + /* PHY configuration */ + /* + * Postamble Length = 1.5cc, Addresscntl to clk skew + * \BD, Preamble length normal, parralal ADLL enable + */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DRAM_PHY_CFG_REG, 0x28, 0x3e)); + if (init_cntr_prm->is_ctrl64_bit) { + /* positive edge */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DRAM_PHY_CFG_REG, 0x0, + 0xff80)); + } + + /* calibration block disable */ + /* Xbar Read buffer select (for Internal access) */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MAIN_PADS_CAL_MACH_CTRL_REG, 0x1200c, + 0x7dffe01c)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MAIN_PADS_CAL_MACH_CTRL_REG, + calibration_update_control << 3, 0x3 << 3)); + + /* Pad calibration control - enable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MAIN_PADS_CAL_MACH_CTRL_REG, 0x1, 0x1)); + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) < MV_TIP_REV_3) { + /* DDR3 rank ctrl \96 part of the generic code */ + /* CS1 mirroring enable + w/a for JIRA DUNIT-14581 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DDR3_RANK_CTRL_REG, 0x27, MASK_ALL_BITS)); + } + + cs_mask = 0; + data_value = 0x7; + /* + * Address ctrl \96 Part of the Generic code + * The next configuration is done: + * 1) Memory Size + * 2) Bus_width + * 3) CS# + * 4) Page Number + * Per Dunit get from the Map_topology the parameters: + * Bus_width + */ + + data_value = + (tm->interface_params[if_id]. + bus_width == MV_DDR_DEV_WIDTH_8BIT) ? 0 : 1; + + /* create merge cs mask for all cs available in dunit */ + for (bus_cnt = 0; + bus_cnt < octets_per_if_num; + bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + cs_mask |= + tm->interface_params[if_id]. + as_bus_params[bus_cnt].cs_bitmask; + } + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("Init_controller IF %d cs_mask %d\n", + if_id, cs_mask)); + /* + * Configure the next upon the Map Topology \96 If the + * Dunit is CS0 Configure CS0 if it is multi CS + * configure them both: The Bust_width it\92s the + * Memory Bus width \96 x8 or x16 + */ + for (cs_cnt = 0; cs_cnt < MAX_CS_NUM; cs_cnt++) { + ddr3_tip_configure_cs(dev_num, if_id, cs_cnt, + ((cs_mask & (1 << cs_cnt)) ? 1 + : 0)); + } + + if (init_cntr_prm->do_mrs_phy) { + /* + * MR0 \96 Part of the Generic code + * The next configuration is done: + * 1) Burst Length + * 2) CAS Latency + * get for each dunit what is it Speed_bin & + * Target Frequency. From those both parameters + * get the appropriate Cas_l from the CL table + */ + cl_value = + tm->interface_params[if_id]. + cas_l; + cwl_val = + tm->interface_params[if_id]. + cas_wl; + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("cl_value 0x%x cwl_val 0x%x\n", + cl_value, cwl_val)); + + t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get + (speed_bin_index, + SPEED_BIN_TWR), t_ckclk); + + data_value = + ((cl_mask_table[cl_value] & 0x1) << 2) | + ((cl_mask_table[cl_value] & 0xe) << 3); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MR0_REG, data_value, + (0x7 << 4) | (1 << 2))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MR0_REG, twr_mask_table[t_wr] << 9, + 0x7 << 9)); + + /* + * MR1: Set RTT and DIC Design GL values + * configured by user + */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, MR1_REG, + g_dic | g_rtt_nom, 0x266)); + + /* MR2 - Part of the Generic code */ + /* + * The next configuration is done: + * 1) SRT + * 2) CAS Write Latency + */ + data_value = (cwl_mask_table[cwl_val] << 3); + data_value |= + ((tm->interface_params[if_id]. + interface_temp == + MV_DDR_TEMP_HIGH) ? (1 << 7) : 0); + data_value |= g_rtt_wr; + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MR2_REG, data_value, + (0x7 << 3) | (0x1 << 7) | (0x3 << + 9))); + } + + ddr3_tip_write_odt(dev_num, access_type, if_id, + cl_value, cwl_val); + ddr3_tip_set_timing(dev_num, access_type, if_id, freq); + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) < MV_TIP_REV_3) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DUNIT_CTRL_HIGH_REG, 0x1000119, + 0x100017F)); + } else { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DUNIT_CTRL_HIGH_REG, 0x600177 | + (init_cntr_prm->is_ctrl64_bit ? + CPU_INTERJECTION_ENA_SPLIT_ENA << CPU_INTERJECTION_ENA_OFFS : + CPU_INTERJECTION_ENA_SPLIT_DIS << CPU_INTERJECTION_ENA_OFFS), + 0x1600177 | CPU_INTERJECTION_ENA_MASK << + CPU_INTERJECTION_ENA_OFFS)); + } + + /* reset bit 7 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DUNIT_CTRL_HIGH_REG, + (init_cntr_prm->msys_init << 7), (1 << 7))); + + timing = tm->interface_params[if_id].timing; + + if (mode_2t != 0xff) { + t2t = mode_2t; + } else if (timing != MV_DDR_TIM_DEFAULT) { + t2t = (timing == MV_DDR_TIM_2T) ? 1 : 0; + } else { + /* calculate number of CS (per interface) */ + cs_num = mv_ddr_cs_num_get(); + t2t = (cs_num == 1) ? 0 : 1; + } + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DUNIT_CTRL_LOW_REG, t2t << 3, + 0x3 << 3)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DDR_TIMING_REG, 0x28 << 9, 0x3f << 9)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DDR_TIMING_REG, 0xa << 21, 0xff << 21)); + + /* move the block to ddr3_tip_set_timing - end */ + /* AUTO_ZQC_TIMING */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ZQC_CFG_REG, (AUTO_ZQC_TIMING | (2 << 20)), + 0x3fffff)); + CHECK_STATUS(ddr3_tip_if_read + (dev_num, access_type, if_id, + DRAM_PHY_CFG_REG, data_read, 0x30)); + data_value = + (data_read[if_id] == 0) ? (1 << 11) : 0; + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DUNIT_CTRL_HIGH_REG, data_value, + (1 << 11))); + + /* Set Active control for ODT write transactions */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, 0x1494, g_odt_config, + MASK_ALL_BITS)); + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) == MV_TIP_REV_3) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + 0x14a8, 0x900, 0x900)); + /* wa: controls control sub-phy outputs floating during self-refresh */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + 0x16d0, 0, 0x8000)); + } + } + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_rank_control(dev_num, if_id)); + + if (init_cntr_prm->do_mrs_phy) + ddr3_tip_pad_inv(); + + /* Pad calibration control - disable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MAIN_PADS_CAL_MACH_CTRL_REG, 0x0, 0x1)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + MAIN_PADS_CAL_MACH_CTRL_REG, + calibration_update_control << 3, 0x3 << 3)); + } + + + if (delay_enable != 0) { + adll_tap = MEGA / (mv_ddr_freq_get(freq) * 64); + ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); + } + + return MV_OK; +} + +/* + * Rank Control Flow + */ +static int ddr3_tip_rev2_rank_control(u32 dev_num, u32 if_id) +{ + u32 data_value = 0, bus_cnt = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + data_value |= tm->interface_params[if_id].as_bus_params[bus_cnt]. + cs_bitmask; + + if (tm->interface_params[if_id].as_bus_params[bus_cnt]. + mirror_enable_bitmask == 1) { + /* + * Check mirror_enable_bitmask + * If it is enabled, CS + 4 bit in a word to be '1' + */ + if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. + cs_bitmask & 0x1) != 0) { + data_value |= tm->interface_params[if_id]. + as_bus_params[bus_cnt]. + mirror_enable_bitmask << 4; + } + + if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. + cs_bitmask & 0x2) != 0) { + data_value |= tm->interface_params[if_id]. + as_bus_params[bus_cnt]. + mirror_enable_bitmask << 5; + } + + if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. + cs_bitmask & 0x4) != 0) { + data_value |= tm->interface_params[if_id]. + as_bus_params[bus_cnt]. + mirror_enable_bitmask << 6; + } + + if ((tm->interface_params[if_id].as_bus_params[bus_cnt]. + cs_bitmask & 0x8) != 0) { + data_value |= tm->interface_params[if_id]. + as_bus_params[bus_cnt]. + mirror_enable_bitmask << 7; + } + } + } + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, DDR3_RANK_CTRL_REG, + data_value, 0xff)); + + return MV_OK; +} + +static int ddr3_tip_rev3_rank_control(u32 dev_num, u32 if_id) +{ + u32 data_value = 0, bus_cnt; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (bus_cnt = 1; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + if ((tm->interface_params[if_id]. + as_bus_params[0].cs_bitmask != + tm->interface_params[if_id]. + as_bus_params[bus_cnt].cs_bitmask) || + (tm->interface_params[if_id]. + as_bus_params[0].mirror_enable_bitmask != + tm->interface_params[if_id]. + as_bus_params[bus_cnt].mirror_enable_bitmask)) + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("WARNING:Wrong configuration for pup #%d CS mask and CS mirroring for all pups should be the same\n", + bus_cnt)); + } + + data_value |= tm->interface_params[if_id]. + as_bus_params[0].cs_bitmask; + data_value |= tm->interface_params[if_id]. + as_bus_params[0].mirror_enable_bitmask << 4; + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, DDR3_RANK_CTRL_REG, + data_value, 0xff)); + + return MV_OK; +} + +static int ddr3_tip_rank_control(u32 dev_num, u32 if_id) +{ + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) == MV_TIP_REV_2) + return ddr3_tip_rev2_rank_control(dev_num, if_id); + else + return ddr3_tip_rev3_rank_control(dev_num, if_id); +} + +/* + * Algorithm Parameters Validation + */ +int ddr3_tip_validate_algo_var(u32 value, u32 fail_value, char *var_name) +{ + if (value == fail_value) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Error: %s is not initialized (Algo Components Validation)\n", + var_name)); + return 0; + } + + return 1; +} + +int ddr3_tip_validate_algo_ptr(void *ptr, void *fail_value, char *ptr_name) +{ + if (ptr == fail_value) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Error: %s is not initialized (Algo Components Validation)\n", + ptr_name)); + return 0; + } + + return 1; +} + +int ddr3_tip_validate_algo_components(u8 dev_num) +{ + int status = 1; + + /* Check DGL parameters*/ + status &= ddr3_tip_validate_algo_var(ck_delay, PARAM_UNDEFINED, "ck_delay"); + status &= ddr3_tip_validate_algo_var(phy_reg3_val, PARAM_UNDEFINED, "phy_reg3_val"); + status &= ddr3_tip_validate_algo_var(g_rtt_nom, PARAM_UNDEFINED, "g_rtt_nom"); + status &= ddr3_tip_validate_algo_var(g_dic, PARAM_UNDEFINED, "g_dic"); + status &= ddr3_tip_validate_algo_var(odt_config, PARAM_UNDEFINED, "odt_config"); + status &= ddr3_tip_validate_algo_var(g_zpri_data, PARAM_UNDEFINED, "g_zpri_data"); + status &= ddr3_tip_validate_algo_var(g_znri_data, PARAM_UNDEFINED, "g_znri_data"); + status &= ddr3_tip_validate_algo_var(g_zpri_ctrl, PARAM_UNDEFINED, "g_zpri_ctrl"); + status &= ddr3_tip_validate_algo_var(g_znri_ctrl, PARAM_UNDEFINED, "g_znri_ctrl"); + status &= ddr3_tip_validate_algo_var(g_zpodt_data, PARAM_UNDEFINED, "g_zpodt_data"); + status &= ddr3_tip_validate_algo_var(g_znodt_data, PARAM_UNDEFINED, "g_znodt_data"); + status &= ddr3_tip_validate_algo_var(g_zpodt_ctrl, PARAM_UNDEFINED, "g_zpodt_ctrl"); + status &= ddr3_tip_validate_algo_var(g_znodt_ctrl, PARAM_UNDEFINED, "g_znodt_ctrl"); + + /* Check functions pointers */ + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_dunit_mux_select_func, + NULL, "tip_dunit_mux_select_func"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_dunit_write, + NULL, "mv_ddr_dunit_write"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_dunit_read, + NULL, "mv_ddr_dunit_read"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_phy_write, + NULL, "mv_ddr_phy_write"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].mv_ddr_phy_read, + NULL, "mv_ddr_phy_read"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_get_freq_config_info_func, + NULL, "tip_get_freq_config_info_func"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_set_freq_divider_func, + NULL, "tip_set_freq_divider_func"); + status &= ddr3_tip_validate_algo_ptr(config_func_info[dev_num].tip_get_clock_ratio, + NULL, "tip_get_clock_ratio"); + + status &= ddr3_tip_validate_algo_ptr(dq_map_table, NULL, "dq_map_table"); + status &= ddr3_tip_validate_algo_var(dfs_low_freq, 0, "dfs_low_freq"); + + return (status == 1) ? MV_OK : MV_NOT_INITIALIZED; +} + + +int ddr3_pre_algo_config(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* Set Bus3 ECC training mode */ + if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) { + /* Set Bus3 ECC MUX */ + CHECK_STATUS(ddr3_tip_if_write + (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, + DRAM_PINS_MUX_REG, 0x100, 0x100)); + } + + /* Set regular ECC training mode (bus4 and bus 3) */ + if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) || + (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) || + (DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))) { + /* Enable ECC Write MUX */ + CHECK_STATUS(ddr3_tip_if_write + (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x100, 0x100)); + /* General ECC enable */ + CHECK_STATUS(ddr3_tip_if_write + (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, + SDRAM_CFG_REG, 0x40000, 0x40000)); + /* Disable Read Data ECC MUX */ + CHECK_STATUS(ddr3_tip_if_write + (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x0, 0x2)); + } + + return MV_OK; +} + +int ddr3_post_algo_config(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + int status; + + status = ddr3_post_run_alg(); + if (MV_OK != status) { + printf("DDR3 Post Run Alg - FAILED 0x%x\n", status); + return status; + } + + /* Un_set ECC training mode */ + if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) || + (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) || + (DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))) { + /* Disable ECC Write MUX */ + CHECK_STATUS(ddr3_tip_if_write + (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x0, 0x100)); + /* General ECC and Bus3 ECC MUX remains enabled */ + } + + return MV_OK; +} + +/* + * Run Training Flow + */ +int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type) +{ + int status = MV_OK; + + status = ddr3_pre_algo_config(); + if (MV_OK != status) { + printf("DDR3 Pre Algo Config - FAILED 0x%x\n", status); + return status; + } + +#ifdef ODT_TEST_SUPPORT + if (finger_test == 1) + return odt_test(dev_num, algo_type); +#endif + + if (algo_type == ALGO_TYPE_DYNAMIC) { + status = ddr3_tip_ddr3_auto_tune(dev_num); + } + + if (status != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("******** DRAM initialization Failed (res 0x%x) ********\n", + status)); + return status; + } + + status = ddr3_post_algo_config(); + if (MV_OK != status) { + printf("DDR3 Post Algo Config - FAILED 0x%x\n", status); + return status; + } + + return status; +} + +#ifdef ODT_TEST_SUPPORT +/* + * ODT Test + */ +static int odt_test(u32 dev_num, enum hws_algo_type algo_type) +{ + int ret = MV_OK, ret_tune = MV_OK; + int pfinger_val = 0, nfinger_val; + + for (pfinger_val = p_finger_start; pfinger_val <= p_finger_end; + pfinger_val += p_finger_step) { + for (nfinger_val = n_finger_start; nfinger_val <= n_finger_end; + nfinger_val += n_finger_step) { + if (finger_test != 0) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("pfinger_val %d nfinger_val %d\n", + pfinger_val, nfinger_val)); + /* + * TODO: need to check the correctness + * of the following two lines. + */ + g_zpodt_data = pfinger_val; + g_znodt_data = nfinger_val; + } + + if (algo_type == ALGO_TYPE_DYNAMIC) { + ret = ddr3_tip_ddr3_auto_tune(dev_num); + } + } + } + + if (ret_tune != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Run_alg: tuning failed %d\n", ret_tune)); + ret = (ret == MV_OK) ? ret_tune : ret; + } + + return ret; +} +#endif + +/* + * Select Controller + */ +int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable) +{ + return config_func_info[dev_num]. + tip_dunit_mux_select_func((u8)dev_num, enable); +} + +/* + * Dunit Register Write + */ +int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access, + u32 if_id, u32 reg_addr, u32 data_value, u32 mask) +{ + config_func_info[dev_num].mv_ddr_dunit_write(reg_addr, mask, data_value); + + return MV_OK; +} + +/* + * Dunit Register Read + */ +int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access, + u32 if_id, u32 reg_addr, u32 *data, u32 mask) +{ + config_func_info[dev_num].mv_ddr_dunit_read(reg_addr, mask, data); + + return MV_OK; +} + +/* + * Dunit Register Polling + */ +int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type, + u32 if_id, u32 exp_value, u32 mask, u32 offset, + u32 poll_tries) +{ + u32 poll_cnt = 0, interface_num = 0, start_if, end_if; + u32 read_data[MAX_INTERFACE_NUM]; + int ret; + int is_fail = 0, is_if_fail; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (access_type == ACCESS_TYPE_MULTICAST) { + start_if = 0; + end_if = MAX_INTERFACE_NUM - 1; + } else { + start_if = if_id; + end_if = if_id; + } + + for (interface_num = start_if; interface_num <= end_if; interface_num++) { + /* polling bit 3 for n times */ + VALIDATE_IF_ACTIVE(tm->if_act_mask, interface_num); + + is_if_fail = 0; + for (poll_cnt = 0; poll_cnt < poll_tries; poll_cnt++) { + ret = + ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, + interface_num, offset, read_data, + mask); + if (ret != MV_OK) + return ret; + + if (read_data[interface_num] == exp_value) + break; + } + + if (poll_cnt >= poll_tries) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("max poll IF #%d\n", interface_num)); + is_fail = 1; + is_if_fail = 1; + } + + training_result[training_stage][interface_num] = + (is_if_fail == 1) ? TEST_FAILED : TEST_SUCCESS; + } + + return (is_fail == 0) ? MV_OK : MV_FAIL; +} + +/* + * Bus read access + */ +int ddr3_tip_bus_read(u32 dev_num, u32 if_id, + enum hws_access_type phy_access, u32 phy_id, + enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data) +{ + return config_func_info[dev_num]. + mv_ddr_phy_read(phy_access, phy_id, phy_type, reg_addr, data); +} + +/* + * Bus write access + */ +int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type interface_access, + u32 if_id, enum hws_access_type phy_access, + u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, + u32 data_value) +{ + return config_func_info[dev_num]. + mv_ddr_phy_write(phy_access, phy_id, phy_type, reg_addr, data_value, OPERATION_WRITE); +} + + +/* + * Phy read-modify-write + */ +int ddr3_tip_bus_read_modify_write(u32 dev_num, enum hws_access_type access_type, + u32 interface_id, u32 phy_id, + enum hws_ddr_phy phy_type, u32 reg_addr, + u32 data_value, u32 reg_mask) +{ + u32 data_val = 0, if_id, start_if, end_if; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (access_type == ACCESS_TYPE_MULTICAST) { + start_if = 0; + end_if = MAX_INTERFACE_NUM - 1; + } else { + start_if = interface_id; + end_if = interface_id; + } + + for (if_id = start_if; if_id <= end_if; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, phy_id, + phy_type, reg_addr, &data_val)); + data_value = (data_val & (~reg_mask)) | (data_value & reg_mask); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, phy_type, reg_addr, + data_value)); + } + + return MV_OK; +} + +/* + * ADLL Calibration + */ +int adll_calibration(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum mv_ddr_freq frequency) +{ + struct hws_tip_freq_config_info freq_config_info; + u32 bus_cnt = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* Reset Diver_b assert -> de-assert */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, SDRAM_CFG_REG, + 0, 0x10000000)); + mdelay(10); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, SDRAM_CFG_REG, + 0x10000000, 0x10000000)); + + CHECK_STATUS(config_func_info[dev_num]. + tip_get_freq_config_info_func((u8)dev_num, frequency, + &freq_config_info)); + + for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, access_type, if_id, bus_cnt, + DDR_PHY_DATA, ADLL_CFG0_PHY_REG, + freq_config_info.bw_per_freq << 8, 0x700)); + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, access_type, if_id, bus_cnt, + DDR_PHY_DATA, ADLL_CFG2_PHY_REG, + freq_config_info.rate_per_freq, 0x7)); + } + + for (bus_cnt = 0; bus_cnt < DDR_IF_CTRL_SUBPHYS_NUM; bus_cnt++) { + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, bus_cnt, + DDR_PHY_CONTROL, ADLL_CFG0_PHY_REG, + freq_config_info.bw_per_freq << 8, 0x700)); + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, bus_cnt, + DDR_PHY_CONTROL, ADLL_CFG2_PHY_REG, + freq_config_info.rate_per_freq, 0x7)); + } + + /* DUnit to Phy drive post edge, ADLL reset assert de-assert */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DRAM_PHY_CFG_REG, + 0, (0x80000000 | 0x40000000))); + mdelay(100 / (mv_ddr_freq_get(frequency)) / mv_ddr_freq_get(MV_DDR_FREQ_LOW_FREQ)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DRAM_PHY_CFG_REG, + (0x80000000 | 0x40000000), (0x80000000 | 0x40000000))); + + /* polling for ADLL Done */ + if (ddr3_tip_if_polling(dev_num, access_type, if_id, + 0x3ff03ff, 0x3ff03ff, PHY_LOCK_STATUS_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Freq_set: DDR3 poll failed(1)")); + } + + /* pup data_pup reset assert-> deassert */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, SDRAM_CFG_REG, + 0, 0x60000000)); + mdelay(10); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, SDRAM_CFG_REG, + 0x60000000, 0x60000000)); + + return MV_OK; +} + +int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum mv_ddr_freq frequency) +{ + u32 cl_value = 0, cwl_value = 0, mem_mask = 0, val = 0, + bus_cnt = 0, t_wr = 0, t_ckclk = 0, + cnt_id; + u32 end_if, start_if; + u32 bus_index = 0; + int is_dll_off = 0; + enum mv_ddr_speed_bin speed_bin_index = 0; + struct hws_tip_freq_config_info freq_config_info; + enum hws_result *flow_result = training_result[training_stage]; + u32 adll_tap = 0; + u32 cs_num; + u32 t2t; + u32 cs_mask[MAX_INTERFACE_NUM]; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int tclk; + enum mv_ddr_timing timing = tm->interface_params[if_id].timing; + u32 freq = mv_ddr_freq_get(frequency); + + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("dev %d access %d IF %d freq %d\n", dev_num, + access_type, if_id, frequency)); + + if (frequency == MV_DDR_FREQ_LOW_FREQ) + is_dll_off = 1; + if (access_type == ACCESS_TYPE_MULTICAST) { + start_if = 0; + end_if = MAX_INTERFACE_NUM - 1; + } else { + start_if = if_id; + end_if = if_id; + } + + /* calculate interface cs mask - Oferb 4/11 */ + /* speed bin can be different for each interface */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + /* cs enable is active low */ + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + cs_mask[if_id] = CS_BIT_MASK; + training_result[training_stage][if_id] = TEST_SUCCESS; + ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs, + &cs_mask[if_id]); + } + + /* speed bin can be different for each interface */ + /* + * moti b - need to remove the loop for multicas access functions + * and loop the unicast access functions + */ + for (if_id = start_if; if_id <= end_if; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + flow_result[if_id] = TEST_SUCCESS; + speed_bin_index = + tm->interface_params[if_id].speed_bin_index; + if (tm->interface_params[if_id].memory_freq == + frequency) { + cl_value = + tm->interface_params[if_id].cas_l; + cwl_value = + tm->interface_params[if_id].cas_wl; + } else if (tm->cfg_src == MV_DDR_CFG_SPD) { + tclk = 1000000 / freq; + cl_value = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk); + if (cl_value == 0) { + printf("mv_ddr: unsupported cas latency value found\n"); + return MV_FAIL; + } + cwl_value = mv_ddr_cwl_calc(tclk); + if (cwl_value == 0) { + printf("mv_ddr: unsupported cas write latency value found\n"); + return MV_FAIL; + } + } else { + cl_value = mv_ddr_cl_val_get(speed_bin_index, frequency); + cwl_value = mv_ddr_cwl_val_get(speed_bin_index, frequency); + } + + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("Freq_set dev 0x%x access 0x%x if 0x%x freq 0x%x speed %d:\n\t", + dev_num, access_type, if_id, + frequency, speed_bin_index)); + + for (cnt_id = 0; cnt_id < MV_DDR_FREQ_LAST; cnt_id++) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, + ("%d ", mv_ddr_cl_val_get(speed_bin_index, cnt_id))); + } + + DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, ("\n")); + mem_mask = 0; + for (bus_index = 0; bus_index < octets_per_if_num; + bus_index++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_index); + mem_mask |= + tm->interface_params[if_id]. + as_bus_params[bus_index].mirror_enable_bitmask; + } + + if (mem_mask != 0) { + /* motib redundent in KW28 */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, + if_id, + DUAL_DUNIT_CFG_REG, 0, 0x8)); + } + + /* dll state after exiting SR */ + if (is_dll_off == 1) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DFS_REG, 0x1, 0x1)); + } else { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DFS_REG, 0, 0x1)); + } + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DUNIT_MMASK_REG, 0, 0x1)); + /* DFS - block transactions */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DFS_REG, 0x2, 0x2)); + + /* disable ODT in case of dll off */ + if (is_dll_off == 1) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + 0x1874, 0, 0x244)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + 0x1884, 0, 0x244)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + 0x1894, 0, 0x244)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + 0x18a4, 0, 0x244)); + } + + /* DFS - Enter Self-Refresh */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DFS_REG, 0x4, + 0x4)); + /* polling on self refresh entry */ + if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, + if_id, 0x8, 0x8, DFS_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Freq_set: DDR3 poll failed on SR entry\n")); + } + + /* Calculate 2T mode */ + if (mode_2t != 0xff) { + t2t = mode_2t; + } else if (timing != MV_DDR_TIM_DEFAULT) { + t2t = (timing == MV_DDR_TIM_2T) ? 1 : 0; + } else { + /* Calculate number of CS per interface */ + cs_num = mv_ddr_cs_num_get(); + t2t = (cs_num == 1) ? 0 : 1; + } + + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_INTERLEAVE_WA) == 1) { + /* Use 1T mode if 1:1 ratio configured */ + if (config_func_info[dev_num].tip_get_clock_ratio(frequency) == 1) { + /* Low freq*/ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_OPEN_PAGES_CTRL_REG, 0x0, 0x3C0)); + t2t = 0; + } else { + /* Middle or target freq */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_OPEN_PAGES_CTRL_REG, 0x3C0, 0x3C0)); + } + } + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DUNIT_CTRL_LOW_REG, t2t << 3, 0x3 << 3)); + + /* PLL configuration */ + config_func_info[dev_num].tip_set_freq_divider_func(dev_num, if_id, + frequency); + + /* DFS - CL/CWL/WR parameters after exiting SR */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DFS_REG, + (cl_mask_table[cl_value] << 8), 0xf00)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DFS_REG, + (cwl_mask_table[cwl_value] << 12), 0x7000)); + + t_ckclk = (MEGA / freq); + t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get + (speed_bin_index, + SPEED_BIN_TWR), t_ckclk); + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DFS_REG, + (twr_mask_table[t_wr] << 16), 0x70000)); + + /* Restore original RTT values if returning from DLL OFF mode */ + if (is_dll_off == 1) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, 0x1874, + g_dic | g_rtt_nom, 0x266)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, 0x1884, + g_dic | g_rtt_nom, 0x266)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, 0x1894, + g_dic | g_rtt_nom, 0x266)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, 0x18a4, + g_dic | g_rtt_nom, 0x266)); + } + + /* Reset divider_b assert -> de-assert */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_CFG_REG, 0, 0x10000000)); + mdelay(10); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_CFG_REG, 0x10000000, 0x10000000)); + + /* ADLL configuration function of process and frequency */ + CHECK_STATUS(config_func_info[dev_num]. + tip_get_freq_config_info_func(dev_num, frequency, + &freq_config_info)); + + /* TBD check milo5 using device ID ? */ + for (bus_cnt = 0; bus_cnt < octets_per_if_num; + bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, bus_cnt, DDR_PHY_DATA, + 0x92, + freq_config_info. + bw_per_freq << 8 + /*freq_mask[dev_num][frequency] << 8 */ + , 0x700)); + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + bus_cnt, DDR_PHY_DATA, 0x94, + freq_config_info.rate_per_freq, 0x7)); + } + + /* Dunit to PHY drive post edge, ADLL reset assert -> de-assert */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DRAM_PHY_CFG_REG, 0, + (0x80000000 | 0x40000000))); + mdelay(100 / (freq / mv_ddr_freq_get(MV_DDR_FREQ_LOW_FREQ))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + DRAM_PHY_CFG_REG, (0x80000000 | 0x40000000), + (0x80000000 | 0x40000000))); + + /* polling for ADLL Done */ + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3ff03ff, + 0x3ff03ff, PHY_LOCK_STATUS_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Freq_set: DDR3 poll failed(1)\n")); + } + + /* pup data_pup reset assert-> deassert */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_CFG_REG, 0, 0x60000000)); + mdelay(10); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_CFG_REG, 0x60000000, 0x60000000)); + + /* Set proper timing params before existing Self-Refresh */ + ddr3_tip_set_timing(dev_num, access_type, if_id, frequency); + if (delay_enable != 0) { + adll_tap = (is_dll_off == 1) ? 1000 : (MEGA / (freq * 64)); + ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap); + } + + /* Exit SR */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DFS_REG, 0, + 0x4)); + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x8, DFS_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Freq_set: DDR3 poll failed(2)")); + } + + /* Refresh Command */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + SDRAM_OP_REG, 0x2, 0xf1f)); + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, + SDRAM_OP_REG, MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Freq_set: DDR3 poll failed(3)")); + } + + /* Release DFS Block */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DFS_REG, 0, + 0x2)); + /* Controller to MBUS Retry - normal */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, DUNIT_MMASK_REG, + 0x1, 0x1)); + + /* MRO: Burst Length 8, CL , Auto_precharge 0x16cc */ + val = + ((cl_mask_table[cl_value] & 0x1) << 2) | + ((cl_mask_table[cl_value] & 0xe) << 3); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, MR0_REG, + val, (0x7 << 4) | (1 << 2))); + /* MR2: CWL = 10 , Auto Self-Refresh - disable */ + val = (cwl_mask_table[cwl_value] << 3) | g_rtt_wr; + /* + * nklein 24.10.13 - should not be here - leave value as set in + * the init configuration val |= (1 << 9); + * val |= ((tm->interface_params[if_id]. + * interface_temp == MV_DDR_TEMP_HIGH) ? (1 << 7) : 0); + */ + /* nklein 24.10.13 - see above comment */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, + if_id, MR2_REG, + val, (0x7 << 3) | (0x3 << 9))); + + /* ODT TIMING */ + val = ((cl_value - cwl_value + 1) << 4) | + ((cl_value - cwl_value + 6) << 8) | + ((cl_value - 1) << 12) | ((cl_value + 6) << 16); + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, + if_id, DDR_ODT_TIMING_LOW_REG, + val, 0xffff0)); + val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, + if_id, DDR_ODT_TIMING_HIGH_REG, + val, 0xffff)); + + /* in case of ddr4 need to set the receiver to odt always 'on' (odt_config = '0') + * in case of ddr3 configure the odt through the timing + */ + if (odt_config != 0) { + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, DUNIT_ODT_CTRL_REG, 0xf, 0xf)); + } + else { + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, DUNIT_ODT_CTRL_REG, + 0x30f, 0x30f)); + } + + /* re-write CL */ + val = ((cl_mask_table[cl_value] & 0x1) << 2) | + ((cl_mask_table[cl_value] & 0xe) << 3); + + cs_mask[0] = 0xc; + + CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MR_CMD0, + val, (0x7 << 4) | (0x1 << 2))); + + /* re-write CWL */ + val = (cwl_mask_table[cwl_value] << 3) | g_rtt_wr; + CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MR_CMD2, + val, (0x7 << 3) | (0x3 << 9))); + + if (mem_mask != 0) { + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, + if_id, + DUAL_DUNIT_CFG_REG, + 1 << 3, 0x8)); + } + } + + return MV_OK; +} + +/* + * Set ODT values + */ +static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type, + u32 if_id, u32 cl_value, u32 cwl_value) +{ + /* ODT TIMING */ + u32 val = (cl_value - cwl_value + 6); + + val = ((cl_value - cwl_value + 1) << 4) | ((val & 0xf) << 8) | + (((cl_value - 1) & 0xf) << 12) | + (((cl_value + 6) & 0xf) << 16) | (((val & 0x10) >> 4) << 21); + val |= (((cl_value - 1) >> 4) << 22) | (((cl_value + 6) >> 4) << 23); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DDR_ODT_TIMING_LOW_REG, val, 0xffff0)); + val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12); + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DDR_ODT_TIMING_HIGH_REG, val, 0xffff)); + if (odt_additional == 1) { + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, + if_id, + SDRAM_ODT_CTRL_HIGH_REG, + 0xf, 0xf)); + } + + /* ODT Active */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DUNIT_ODT_CTRL_REG, 0xf, 0xf)); + + return MV_OK; +} + +/* + * Set Timing values for training + */ +static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum mv_ddr_freq frequency) +{ + u32 t_ckclk = 0, t_ras = 0; + u32 t_rcd = 0, t_rp = 0, t_wr = 0, t_wtr = 0, t_rrd = 0, t_rtp = 0, + t_rfc = 0, t_mod = 0, t_r2r = 0x3, t_r2r_high = 0, + t_r2w_w2r = 0x3, t_r2w_w2r_high = 0x1, t_w2w = 0x3; + u32 refresh_interval_cnt, t_hclk, t_refi, t_faw, t_pd, t_xpdll; + u32 val = 0, page_size = 0, mask = 0; + enum mv_ddr_speed_bin speed_bin_index; + enum mv_ddr_die_capacity memory_size = MV_DDR_DIE_CAP_2GBIT; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + u32 freq = mv_ddr_freq_get(frequency); + + speed_bin_index = tm->interface_params[if_id].speed_bin_index; + memory_size = tm->interface_params[if_id].memory_size; + page_size = mv_ddr_page_size_get(tm->interface_params[if_id].bus_width, memory_size); + t_ckclk = (MEGA / freq); + /* HCLK in[ps] */ + t_hclk = MEGA / (freq / config_func_info[dev_num].tip_get_clock_ratio(frequency)); + + t_refi = (tm->interface_params[if_id].interface_temp == MV_DDR_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW; + t_refi *= 1000; /* psec */ + refresh_interval_cnt = t_refi / t_hclk; /* no units */ + + if (page_size == 1) { + t_faw = mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TFAW1K); + t_faw = time_to_nclk(t_faw, t_ckclk); + t_faw = GET_MAX_VALUE(20, t_faw); + } else { /* page size =2, we do not support page size 0.5k */ + t_faw = mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TFAW2K); + t_faw = time_to_nclk(t_faw, t_ckclk); + t_faw = GET_MAX_VALUE(28, t_faw); + } + + t_pd = GET_MAX_VALUE(t_ckclk * 3, mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TPD)); + t_pd = time_to_nclk(t_pd, t_ckclk); + + t_xpdll = GET_MAX_VALUE(t_ckclk * 10, mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TXPDLL)); + t_xpdll = time_to_nclk(t_xpdll, t_ckclk); + + t_rrd = (page_size == 1) ? mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TRRD1K) : + mv_ddr_speed_bin_timing_get(speed_bin_index, SPEED_BIN_TRRD2K); + t_rrd = GET_MAX_VALUE(t_ckclk * 4, t_rrd); + t_rtp = GET_MAX_VALUE(t_ckclk * 4, mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TRTP)); + t_mod = GET_MAX_VALUE(t_ckclk * 12, 15000); + t_wtr = GET_MAX_VALUE(t_ckclk * 4, mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TWTR)); + t_ras = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TRAS), + t_ckclk); + t_rcd = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TRCD), + t_ckclk); + t_rp = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TRP), + t_ckclk); + t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get(speed_bin_index, + SPEED_BIN_TWR), + t_ckclk); + t_wtr = time_to_nclk(t_wtr, t_ckclk); + t_rrd = time_to_nclk(t_rrd, t_ckclk); + t_rtp = time_to_nclk(t_rtp, t_ckclk); + t_rfc = time_to_nclk(mv_ddr_rfc_get(memory_size) * 1000, t_ckclk); + t_mod = time_to_nclk(t_mod, t_ckclk); + + /* SDRAM Timing Low */ + val = (((t_ras - 1) & SDRAM_TIMING_LOW_TRAS_MASK) << SDRAM_TIMING_LOW_TRAS_OFFS) | + (((t_rcd - 1) & SDRAM_TIMING_LOW_TRCD_MASK) << SDRAM_TIMING_LOW_TRCD_OFFS) | + (((t_rcd - 1) >> SDRAM_TIMING_LOW_TRCD_OFFS & SDRAM_TIMING_HIGH_TRCD_MASK) + << SDRAM_TIMING_HIGH_TRCD_OFFS) | + (((t_rp - 1) & SDRAM_TIMING_LOW_TRP_MASK) << SDRAM_TIMING_LOW_TRP_OFFS) | + (((t_rp - 1) >> SDRAM_TIMING_LOW_TRP_MASK & SDRAM_TIMING_HIGH_TRP_MASK) + << SDRAM_TIMING_HIGH_TRP_OFFS) | + (((t_wr - 1) & SDRAM_TIMING_LOW_TWR_MASK) << SDRAM_TIMING_LOW_TWR_OFFS) | + (((t_wtr - 1) & SDRAM_TIMING_LOW_TWTR_MASK) << SDRAM_TIMING_LOW_TWTR_OFFS) | + ((((t_ras - 1) >> 4) & SDRAM_TIMING_LOW_TRAS_HIGH_MASK) << SDRAM_TIMING_LOW_TRAS_HIGH_OFFS) | + (((t_rrd - 1) & SDRAM_TIMING_LOW_TRRD_MASK) << SDRAM_TIMING_LOW_TRRD_OFFS) | + (((t_rtp - 1) & SDRAM_TIMING_LOW_TRTP_MASK) << SDRAM_TIMING_LOW_TRTP_OFFS); + + mask = (SDRAM_TIMING_LOW_TRAS_MASK << SDRAM_TIMING_LOW_TRAS_OFFS) | + (SDRAM_TIMING_LOW_TRCD_MASK << SDRAM_TIMING_LOW_TRCD_OFFS) | + (SDRAM_TIMING_HIGH_TRCD_MASK << SDRAM_TIMING_HIGH_TRCD_OFFS) | + (SDRAM_TIMING_LOW_TRP_MASK << SDRAM_TIMING_LOW_TRP_OFFS) | + (SDRAM_TIMING_HIGH_TRP_MASK << SDRAM_TIMING_HIGH_TRP_OFFS) | + (SDRAM_TIMING_LOW_TWR_MASK << SDRAM_TIMING_LOW_TWR_OFFS) | + (SDRAM_TIMING_LOW_TWTR_MASK << SDRAM_TIMING_LOW_TWTR_OFFS) | + (SDRAM_TIMING_LOW_TRAS_HIGH_MASK << SDRAM_TIMING_LOW_TRAS_HIGH_OFFS) | + (SDRAM_TIMING_LOW_TRRD_MASK << SDRAM_TIMING_LOW_TRRD_OFFS) | + (SDRAM_TIMING_LOW_TRTP_MASK << SDRAM_TIMING_LOW_TRTP_OFFS); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + SDRAM_TIMING_LOW_REG, val, mask)); + + /* SDRAM Timing High */ + val = 0; + mask = 0; + + val = (((t_rfc - 1) & SDRAM_TIMING_HIGH_TRFC_MASK) << SDRAM_TIMING_HIGH_TRFC_OFFS) | + ((t_r2r & SDRAM_TIMING_HIGH_TR2R_MASK) << SDRAM_TIMING_HIGH_TR2R_OFFS) | + ((t_r2w_w2r & SDRAM_TIMING_HIGH_TR2W_W2R_MASK) << SDRAM_TIMING_HIGH_TR2W_W2R_OFFS) | + ((t_w2w & SDRAM_TIMING_HIGH_TW2W_MASK) << SDRAM_TIMING_HIGH_TW2W_OFFS) | + ((((t_rfc - 1) >> 7) & SDRAM_TIMING_HIGH_TRFC_HIGH_MASK) << SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS) | + ((t_r2r_high & SDRAM_TIMING_HIGH_TR2R_HIGH_MASK) << SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS) | + ((t_r2w_w2r_high & SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK) << SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS) | + (((t_mod - 1) & SDRAM_TIMING_HIGH_TMOD_MASK) << SDRAM_TIMING_HIGH_TMOD_OFFS) | + ((((t_mod - 1) >> 4) & SDRAM_TIMING_HIGH_TMOD_HIGH_MASK) << SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS); + + mask = (SDRAM_TIMING_HIGH_TRFC_MASK << SDRAM_TIMING_HIGH_TRFC_OFFS) | + (SDRAM_TIMING_HIGH_TR2R_MASK << SDRAM_TIMING_HIGH_TR2R_OFFS) | + (SDRAM_TIMING_HIGH_TR2W_W2R_MASK << SDRAM_TIMING_HIGH_TR2W_W2R_OFFS) | + (SDRAM_TIMING_HIGH_TW2W_MASK << SDRAM_TIMING_HIGH_TW2W_OFFS) | + (SDRAM_TIMING_HIGH_TRFC_HIGH_MASK << SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS) | + (SDRAM_TIMING_HIGH_TR2R_HIGH_MASK << SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS) | + (SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK << SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS) | + (SDRAM_TIMING_HIGH_TMOD_MASK << SDRAM_TIMING_HIGH_TMOD_OFFS) | + (SDRAM_TIMING_HIGH_TMOD_HIGH_MASK << SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + SDRAM_TIMING_HIGH_REG, val, mask)); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + SDRAM_CFG_REG, + refresh_interval_cnt << REFRESH_OFFS, + REFRESH_MASK << REFRESH_OFFS)); + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + SDRAM_ADDR_CTRL_REG, (t_faw - 1) << T_FAW_OFFS, + T_FAW_MASK << T_FAW_OFFS)); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, DDR_TIMING_REG, + (t_pd - 1) << DDR_TIMING_TPD_OFFS | + (t_xpdll - 1) << DDR_TIMING_TXPDLL_OFFS, + DDR_TIMING_TPD_MASK << DDR_TIMING_TPD_OFFS | + DDR_TIMING_TXPDLL_MASK << DDR_TIMING_TXPDLL_OFFS)); + + + return MV_OK; +} + + +/* + * Write CS Result + */ +int ddr3_tip_write_cs_result(u32 dev_num, u32 offset) +{ + u32 if_id, bus_num, cs_bitmask, data_val, cs_num; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_num = 0; bus_num < octets_per_if_num; + bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); + cs_bitmask = + tm->interface_params[if_id]. + as_bus_params[bus_num].cs_bitmask; + if (cs_bitmask != effective_cs) { + cs_num = GET_CS_FROM_MASK(cs_bitmask); + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_num, + DDR_PHY_DATA, + offset + + (effective_cs * 0x4), + &data_val); + ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_num, DDR_PHY_DATA, + offset + + (cs_num * 0x4), + data_val); + } + } + } + + return MV_OK; +} + +/* + * Write MRS + */ +int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, enum mr_number mr_num, u32 data, u32 mask) +{ + u32 if_id; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, mr_data[mr_num].reg_addr, data, mask)); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + SDRAM_OP_REG, + (cs_mask_arr[if_id] << 8) | mr_data[mr_num].cmd, 0xf1f)); + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, + 0x1f, SDRAM_OP_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("write_mrs_cmd: Poll cmd fail")); + } + } + + return MV_OK; +} + +/* + * Reset XSB Read FIFO + */ +int ddr3_tip_reset_fifo_ptr(u32 dev_num) +{ + u32 if_id = 0; + + /* Configure PHY reset value to 0 in order to "clean" the FIFO */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, 0x15c8, 0, 0xff000000)); + /* + * Move PHY to RL mode (only in RL mode the PHY overrides FIFO values + * during FIFO reset) + */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, TRAINING_SW_2_REG, + 0x1, 0x9)); + /* In order that above configuration will influence the PHY */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, 0x15b0, + 0x80000000, 0x80000000)); + /* Reset read fifo assertion */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, 0x1400, 0, 0x40000000)); + /* Reset read fifo deassertion */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, 0x1400, + 0x40000000, 0x40000000)); + /* Move PHY back to functional mode */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, TRAINING_SW_2_REG, + 0x8, 0x9)); + /* Stop training machine */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + if_id, 0x15b4, 0x10000, 0x10000)); + + return MV_OK; +} + +/* + * Reset Phy registers + */ +int ddr3_tip_ddr3_reset_phy_regs(u32 dev_num) +{ + u32 if_id, phy_id, cs; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (phy_id = 0; phy_id < octets_per_if_num; + phy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + phy_id, DDR_PHY_DATA, + WL_PHY_REG(effective_cs), + phy_reg0_val)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + RL_PHY_REG(effective_cs), + phy_reg2_val)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + CRX_PHY_REG(effective_cs), phy_reg3_val)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), phy_reg1_val)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_TX_BCAST_PHY_REG(effective_cs), 0x0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_RX_BCAST_PHY_REG(effective_cs), 0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_TX_PHY_REG(effective_cs, DQSP_PAD), 0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, DQSP_PAD), 0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_TX_PHY_REG(effective_cs, DQSN_PAD), 0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, DQSN_PAD), 0)); + } + } + + /* Set Receiver Calibration value */ + for (cs = 0; cs < MAX_CS_NUM; cs++) { + /* PHY register 0xdb bits[5:0] - configure to 63 */ + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + DDR_PHY_DATA, VREF_BCAST_PHY_REG(cs), 63)); + } + + return MV_OK; +} + +/* + * Restore Dunit registers + */ +int ddr3_tip_restore_dunit_regs(u32 dev_num) +{ + u32 index_cnt; + + mv_ddr_set_calib_controller(); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, MAIN_PADS_CAL_MACH_CTRL_REG, + 0x1, 0x1)); + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, MAIN_PADS_CAL_MACH_CTRL_REG, + calibration_update_control << 3, + 0x3 << 3)); + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + ODPG_WR_RD_MODE_ENA_REG, + 0xffff, MASK_ALL_BITS)); + + for (index_cnt = 0; index_cnt < ARRAY_SIZE(odpg_default_value); + index_cnt++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + odpg_default_value[index_cnt].reg_addr, + odpg_default_value[index_cnt].reg_data, + odpg_default_value[index_cnt].reg_mask)); + } + + return MV_OK; +} + +int ddr3_tip_adll_regs_bypass(u32 dev_num, u32 reg_val1, u32 reg_val2) +{ + u32 if_id, phy_id; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (phy_id = 0; phy_id < octets_per_if_num; phy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), reg_val1)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA, + PBS_TX_BCAST_PHY_REG(effective_cs), reg_val2)); + } + } + + return MV_OK; +} + +/* + * Auto tune main flow + */ +static int ddr3_tip_ddr3_training_main_flow(u32 dev_num) +{ +/* TODO: enable this functionality for other platforms */ + struct init_cntr_param init_cntr_prm; + int ret = MV_OK; + int adll_bypass_flag = 0; + u32 if_id; + unsigned int max_cs = mv_ddr_cs_num_get(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + enum mv_ddr_freq freq = tm->interface_params[0].memory_freq; + unsigned int *freq_tbl = mv_ddr_freq_tbl_get(); + +#ifdef DDR_VIEWER_TOOL + if (debug_training == DEBUG_LEVEL_TRACE) { + CHECK_STATUS(print_device_info((u8)dev_num)); + } +#endif + + ddr3_tip_validate_algo_components(dev_num); + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + CHECK_STATUS(ddr3_tip_ddr3_reset_phy_regs(dev_num)); + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + freq_tbl[MV_DDR_FREQ_LOW_FREQ] = dfs_low_freq; + + if (is_pll_before_init != 0) { + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + config_func_info[dev_num].tip_set_freq_divider_func( + (u8)dev_num, if_id, freq); + } + } + +/* TODO: enable this functionality for other platforms */ + if (is_adll_calib_before_init != 0) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("with adll calib before init\n")); + adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); + } + + if (is_reg_dump != 0) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("Dump before init controller\n")); + ddr3_tip_reg_dump(dev_num); + } + + if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) { + training_stage = INIT_CONTROLLER; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("INIT_CONTROLLER_MASK_BIT\n")); + init_cntr_prm.do_mrs_phy = 1; + init_cntr_prm.is_ctrl64_bit = 0; + init_cntr_prm.init_phy = 1; + init_cntr_prm.msys_init = 0; + ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("hws_ddr3_tip_init_controller failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + ret = adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("adll_calibration failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + + if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) { + training_stage = SET_LOW_FREQ; + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + ddr3_tip_adll_regs_bypass(dev_num, 0, 0x1f); + adll_bypass_flag = 1; + } + effective_cs = 0; + + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("SET_LOW_FREQ_MASK_BIT %d\n", + freq_tbl[low_freq])); + ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, low_freq); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_freq_set failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & WRITE_LEVELING_LF_MASK_BIT) { + training_stage = WRITE_LEVELING_LF; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("WRITE_LEVELING_LF_MASK_BIT\n")); + ret = ddr3_tip_dynamic_write_leveling(dev_num, 1); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_write_leveling LF failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & LOAD_PATTERN_MASK_BIT) { + training_stage = LOAD_PATTERN; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("LOAD_PATTERN_MASK_BIT #%d\n", + effective_cs)); + ret = ddr3_tip_load_all_pattern_to_mem(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + + if (adll_bypass_flag == 1) { + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + ddr3_tip_adll_regs_bypass(dev_num, phy_reg1_val, 0); + adll_bypass_flag = 0; + } + } + + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) { + training_stage = SET_MEDIUM_FREQ; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("SET_MEDIUM_FREQ_MASK_BIT %d\n", + freq_tbl[medium_freq])); + ret = + ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, medium_freq); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_freq_set failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & WRITE_LEVELING_MASK_BIT) { + training_stage = WRITE_LEVELING; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("WRITE_LEVELING_MASK_BIT\n")); + if ((rl_mid_freq_wa == 0) || (freq_tbl[medium_freq] == 533)) { + ret = ddr3_tip_dynamic_write_leveling(dev_num, 0); + } else { + /* Use old WL */ + ret = ddr3_tip_legacy_dynamic_write_leveling(dev_num); + } + + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_write_leveling failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) { + training_stage = LOAD_PATTERN_2; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("LOAD_PATTERN_2_MASK_BIT CS #%d\n", + effective_cs)); + ret = ddr3_tip_load_all_pattern_to_mem(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + if (mask_tune_func & READ_LEVELING_MASK_BIT) { + training_stage = READ_LEVELING; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("READ_LEVELING_MASK_BIT\n")); + if ((rl_mid_freq_wa == 0) || (freq_tbl[medium_freq] == 533)) { + ret = ddr3_tip_dynamic_read_leveling(dev_num, medium_freq); + } else { + /* Use old RL */ + ret = ddr3_tip_legacy_dynamic_read_leveling(dev_num); + } + + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_read_leveling failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) { + training_stage = WRITE_LEVELING_SUPP; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("WRITE_LEVELING_SUPP_MASK_BIT\n")); + ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_write_leveling_supp failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & PBS_RX_MASK_BIT) { + training_stage = PBS_RX; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("PBS_RX_MASK_BIT CS #%d\n", + effective_cs)); + ret = ddr3_tip_pbs_rx(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_pbs_rx failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & PBS_TX_MASK_BIT) { + training_stage = PBS_TX; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("PBS_TX_MASK_BIT CS #%d\n", + effective_cs)); + ret = ddr3_tip_pbs_tx(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_pbs_tx failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) { + training_stage = SET_TARGET_FREQ; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("SET_TARGET_FREQ_MASK_BIT %d\n", + freq_tbl[tm-> + interface_params[first_active_if]. + memory_freq])); + ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + tm->interface_params[first_active_if]. + memory_freq); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_freq_set failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) { + training_stage = WRITE_LEVELING_TF; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("WRITE_LEVELING_TF_MASK_BIT\n")); + ret = ddr3_tip_dynamic_write_leveling(dev_num, 0); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_write_leveling TF failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & LOAD_PATTERN_HIGH_MASK_BIT) { + training_stage = LOAD_PATTERN_HIGH; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("LOAD_PATTERN_HIGH\n")); + ret = ddr3_tip_load_all_pattern_to_mem(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_load_all_pattern_to_mem failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) { + training_stage = READ_LEVELING_TF; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("READ_LEVELING_TF_MASK_BIT\n")); + ret = ddr3_tip_dynamic_read_leveling(dev_num, tm-> + interface_params[first_active_if]. + memory_freq); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_read_leveling TF failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & RL_DQS_BURST_MASK_BIT) { + training_stage = READ_LEVELING_TF; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("RL_DQS_BURST_MASK_BIT\n")); + ret = mv_ddr_rl_dqs_burst(0, 0, tm->interface_params[0].memory_freq); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("mv_ddr_rl_dqs_burst TF failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + + if (mask_tune_func & DM_PBS_TX_MASK_BIT) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n")); + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) { + training_stage = VREF_CALIBRATION; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("VREF\n")); + ret = ddr3_tip_vref(dev_num); + if (is_reg_dump != 0) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("VREF Dump\n")); + ddr3_tip_reg_dump(dev_num); + } + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_vref failure\n")); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) { + training_stage = CENTRALIZATION_RX; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("CENTRALIZATION_RX_MASK_BIT CS #%d\n", + effective_cs)); + ret = ddr3_tip_centralization_rx(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_centralization_rx failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) { + training_stage = WRITE_LEVELING_SUPP_TF; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("WRITE_LEVELING_SUPP_TF_MASK_BIT CS #%d\n", + effective_cs)); + ret = ddr3_tip_dynamic_write_leveling_supp(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_dynamic_write_leveling_supp TF failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) { + training_stage = CENTRALIZATION_TX; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("CENTRALIZATION_TX_MASK_BIT CS #%d\n", + effective_cs)); + ret = ddr3_tip_centralization_tx(dev_num); + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + if (ret != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("ddr3_tip_centralization_tx failure CS #%d\n", + effective_cs)); + if (debug_mode == 0) + return MV_FAIL; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n")); + /* restore register values */ + CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num)); + + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + + return MV_OK; +} + +/* + * DDR3 Dynamic training flow + */ +static int ddr3_tip_ddr3_auto_tune(u32 dev_num) +{ + int status; + u32 if_id, stage; + int is_if_fail = 0, is_auto_tune_fail = 0; + + training_stage = INIT_CONTROLLER; + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) + training_result[stage][if_id] = NO_TEST_DONE; + } + + status = ddr3_tip_ddr3_training_main_flow(dev_num); + + /* activate XSB test */ + if (xsb_validate_type != 0) { + run_xsb_test(dev_num, xsb_validation_base_address, 1, 1, + 0x1024); + } + + if (is_reg_dump != 0) + ddr3_tip_reg_dump(dev_num); + + /* print log */ + CHECK_STATUS(ddr3_tip_print_log(dev_num, window_mem_addr)); + +#ifndef EXCLUDE_DEBUG_PRINTS + if (status != MV_OK) { + CHECK_STATUS(ddr3_tip_print_stability_log(dev_num)); + } +#endif /* EXCLUDE_DEBUG_PRINTS */ + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + is_if_fail = 0; + for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) { + if (training_result[stage][if_id] == TEST_FAILED) + is_if_fail = 1; + } + if (is_if_fail == 1) { + is_auto_tune_fail = 1; + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("Auto Tune failed for IF %d\n", + if_id)); + } + } + + if (((status == MV_FAIL) && (is_auto_tune_fail == 0)) || + ((status == MV_OK) && (is_auto_tune_fail == 1))) { + /* + * If MainFlow result and trainingResult DB not in sync, + * issue warning (caused by no update of trainingResult DB + * when failed) + */ + DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, + ("Warning: Algorithm return value and Result DB" + "are not synced (status 0x%x result DB %d)\n", + status, is_auto_tune_fail)); + } + + if ((status != MV_OK) || (is_auto_tune_fail == 1)) + return MV_FAIL; + else + return MV_OK; +} + +/* + * Enable init sequence + */ +int ddr3_tip_enable_init_sequence(u32 dev_num) +{ + int is_fail = 0; + u32 if_id = 0, mem_mask = 0, bus_index = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* Enable init sequence */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 0, + SDRAM_INIT_CTRL_REG, 0x1, 0x1)); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1, + SDRAM_INIT_CTRL_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("polling failed IF %d\n", + if_id)); + is_fail = 1; + continue; + } + + mem_mask = 0; + for (bus_index = 0; bus_index < octets_per_if_num; + bus_index++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_index); + mem_mask |= + tm->interface_params[if_id]. + as_bus_params[bus_index].mirror_enable_bitmask; + } + + if (mem_mask != 0) { + /* Disable Multi CS */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, + if_id, DUAL_DUNIT_CFG_REG, 1 << 3, + 1 << 3)); + } + } + + return (is_fail == 0) ? MV_OK : MV_FAIL; +} + +int ddr3_tip_register_dq_table(u32 dev_num, u32 *table) +{ + dq_map_table = table; + + return MV_OK; +} + +/* + * Check if pup search is locked + */ +int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode) +{ + u32 bit_start = 0, bit_end = 0, bit_id; + + if (read_mode == RESULT_PER_BIT) { + bit_start = 0; + bit_end = BUS_WIDTH_IN_BITS - 1; + } else { + bit_start = 0; + bit_end = 0; + } + + for (bit_id = bit_start; bit_id <= bit_end; bit_id++) { + if (GET_LOCK_RESULT(pup_buf[bit_id]) == 0) + return 0; + } + + return 1; +} + +/* + * Get minimum buffer value + */ +u8 ddr3_tip_get_buf_min(u8 *buf_ptr) +{ + u8 min_val = 0xff; + u8 cnt = 0; + + for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { + if (buf_ptr[cnt] < min_val) + min_val = buf_ptr[cnt]; + } + + return min_val; +} + +/* + * Get maximum buffer value + */ +u8 ddr3_tip_get_buf_max(u8 *buf_ptr) +{ + u8 max_val = 0; + u8 cnt = 0; + + for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) { + if (buf_ptr[cnt] > max_val) + max_val = buf_ptr[cnt]; + } + + return max_val; +} + +/* + * The following functions return memory parameters: + * bus and device width, device size + */ + +u32 hws_ddr3_get_bus_width(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + return (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == + 1) ? 16 : 32; +} + +u32 hws_ddr3_get_device_width(u32 if_id) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + return (tm->interface_params[if_id].bus_width == + MV_DDR_DEV_WIDTH_8BIT) ? 8 : 16; +} + +u32 hws_ddr3_get_device_size(u32 if_id) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (tm->interface_params[if_id].memory_size >= + MV_DDR_DIE_CAP_LAST) { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Error: Wrong device size of Cs: %d", + tm->interface_params[if_id].memory_size)); + return 0; + } else { + return 1 << tm->interface_params[if_id].memory_size; + } +} + +int hws_ddr3_calc_mem_cs_size(u32 if_id, u32 cs, u32 *cs_size) +{ + u32 cs_mem_size, dev_size; + + dev_size = hws_ddr3_get_device_size(if_id); + if (dev_size != 0) { + cs_mem_size = ((hws_ddr3_get_bus_width() / + hws_ddr3_get_device_width(if_id)) * dev_size); + + /* the calculated result in Gbytex16 to avoid float using */ + + if (cs_mem_size == 2) { + *cs_size = _128M; + } else if (cs_mem_size == 4) { + *cs_size = _256M; + } else if (cs_mem_size == 8) { + *cs_size = _512M; + } else if (cs_mem_size == 16) { + *cs_size = _1G; + } else if (cs_mem_size == 32) { + *cs_size = _2G; + } else { + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Error: Wrong Memory size of Cs: %d", cs)); + return MV_FAIL; + } + return MV_OK; + } else { + return MV_FAIL; + } +} + +int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr) +{ + u32 cs_mem_size = 0; +#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE + u32 physical_mem_size; + u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; +#endif + + if (hws_ddr3_calc_mem_cs_size(if_id, cs, &cs_mem_size) != MV_OK) + return MV_FAIL; + +#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + /* + * if number of address pins doesn't allow to use max mem size that + * is defined in topology mem size is defined by + * DEVICE_MAX_DRAM_ADDRESS_SIZE + */ + physical_mem_size = mem_size[tm->interface_params[0].memory_size]; + + if (hws_ddr3_get_device_width(cs) == 16) { + /* + * 16bit mem device can be twice more - no need in less + * significant pin + */ + max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; + } + + if (physical_mem_size > max_mem_size) { + cs_mem_size = max_mem_size * + (hws_ddr3_get_bus_width() / + hws_ddr3_get_device_width(if_id)); + DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, + ("Updated Physical Mem size is from 0x%x to %x\n", + physical_mem_size, + DEVICE_MAX_DRAM_ADDRESS_SIZE)); + } +#endif + + /* calculate CS base addr */ + *cs_base_addr = ((cs_mem_size) * cs) & 0xffff0000; + + return MV_OK; +} + +/* TODO: consider to move to misl phy driver */ +enum { + MISL_PHY_DRV_OHM_30 = 0xf, + MISL_PHY_DRV_OHM_48 = 0xa, + MISL_PHY_DRV_OHM_80 = 0x6, + MISL_PHY_DRV_OHM_120 = 0x4 +}; + +enum { + MISL_PHY_ODT_OHM_60 = 0x8, + MISL_PHY_ODT_OHM_80 = 0x6, + MISL_PHY_ODT_OHM_120 = 0x4, + MISL_PHY_ODT_OHM_240 = 0x2 +}; + +static unsigned int mv_ddr_misl_phy_drv_calc(unsigned int cfg) +{ + unsigned int val; + + switch (cfg) { + case MV_DDR_OHM_30: + val = MISL_PHY_DRV_OHM_30; + break; + case MV_DDR_OHM_48: + val = MISL_PHY_DRV_OHM_48; + break; + case MV_DDR_OHM_80: + val = MISL_PHY_DRV_OHM_80; + break; + case MV_DDR_OHM_120: + val = MISL_PHY_DRV_OHM_120; + break; + default: + val = PARAM_UNDEFINED; + } + + return val; +} + +static unsigned int mv_ddr_misl_phy_odt_calc(unsigned int cfg) +{ + unsigned int val; + + switch (cfg) { + case MV_DDR_OHM_60: + val = MISL_PHY_ODT_OHM_60; + break; + case MV_DDR_OHM_80: + val = MISL_PHY_ODT_OHM_80; + break; + case MV_DDR_OHM_120: + val = MISL_PHY_ODT_OHM_120; + break; + case MV_DDR_OHM_240: + val = MISL_PHY_ODT_OHM_240; + break; + default: + val = PARAM_UNDEFINED; + } + + return val; +} + +unsigned int mv_ddr_misl_phy_drv_data_p_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int drv_data_p = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_data_p); + + if (drv_data_p == PARAM_UNDEFINED) + printf("error: %s: unsupported drv_data_p parameter found\n", __func__); + + return drv_data_p; +} + +unsigned int mv_ddr_misl_phy_drv_data_n_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int drv_data_n = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_data_n); + + if (drv_data_n == PARAM_UNDEFINED) + printf("error: %s: unsupported drv_data_n parameter found\n", __func__); + + return drv_data_n; +} + +unsigned int mv_ddr_misl_phy_drv_ctrl_p_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int drv_ctrl_p = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_ctrl_p); + + if (drv_ctrl_p == PARAM_UNDEFINED) + printf("error: %s: unsupported drv_ctrl_p parameter found\n", __func__); + + return drv_ctrl_p; +} + +unsigned int mv_ddr_misl_phy_drv_ctrl_n_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int drv_ctrl_n = mv_ddr_misl_phy_drv_calc(tm->edata.phy_edata.drv_ctrl_n); + + if (drv_ctrl_n == PARAM_UNDEFINED) + printf("error: %s: unsupported drv_ctrl_n parameter found\n", __func__); + + return drv_ctrl_n; +} + +unsigned int mv_ddr_misl_phy_odt_p_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int cs_num = mv_ddr_cs_num_get(); + unsigned int odt_p = PARAM_UNDEFINED; + + if (cs_num > 0 && cs_num <= MAX_CS_NUM) + odt_p = mv_ddr_misl_phy_odt_calc(tm->edata.phy_edata.odt_p[cs_num - 1]); + + if (odt_p == PARAM_UNDEFINED) + printf("error: %s: unsupported odt_p parameter found\n", __func__); + + return odt_p; +} + +unsigned int mv_ddr_misl_phy_odt_n_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int cs_num = mv_ddr_cs_num_get(); + unsigned int odt_n = PARAM_UNDEFINED; + + if (cs_num > 0 && cs_num <= MAX_CS_NUM) + odt_n = mv_ddr_misl_phy_odt_calc(tm->edata.phy_edata.odt_n[cs_num - 1]); + + if (odt_n == PARAM_UNDEFINED) + printf("error: %s: unsupported odt_n parameter found\n", __func__); + + return odt_n; +} + diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_bist.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_bist.c new file mode 100644 index 000000000..d388a1729 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_bist.c @@ -0,0 +1,603 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_regs.h" + +static u32 bist_offset = 32; +enum hws_pattern sweep_pattern = PATTERN_KILLER_DQ0; + +static int ddr3_tip_bist_operation(u32 dev_num, + enum hws_access_type access_type, + u32 if_id, + enum hws_bist_operation oper_type); + +/* + * BIST activate + */ +int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern, + enum hws_access_type access_type, u32 if_num, + enum hws_dir dir, + enum hws_stress_jump addr_stress_jump, + enum hws_pattern_duration duration, + enum hws_bist_operation oper_type, + u32 offset, u32 cs_num, u32 pattern_addr_length) +{ + u32 tx_burst_size; + u32 delay_between_burst; + u32 rd_mode; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + + /* odpg bist write enable */ + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, + (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), + (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); + + /* odpg bist read enable/disable */ + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, + (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) : + (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), + (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); + + ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset); + + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_length, MASK_ALL_BITS); + tx_burst_size = (dir == OPER_WRITE) ? + pattern_table[pattern].tx_burst_size : 0; + delay_between_burst = (dir == OPER_WRITE) ? 2 : 0; + rd_mode = (dir == OPER_WRITE) ? 1 : 0; + ddr3_tip_configure_odpg(0, access_type, 0, dir, + pattern_table[pattern].num_of_phases_tx, tx_burst_size, + pattern_table[pattern].num_of_phases_rx, + delay_between_burst, + rd_mode, cs_num, addr_stress_jump, duration); + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_OFFS_REG, offset, MASK_ALL_BITS); + + if (oper_type == BIST_STOP) { + ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); + } else { + ddr3_tip_bist_operation(0, access_type, 0, BIST_START); + if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) + return MV_FAIL; + ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); + } + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); + + return MV_OK; +} + +/* + * BIST read result + */ +int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id, + struct bist_result *pst_bist_result) +{ + int ret; + u32 read_data[MAX_INTERFACE_NUM]; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (IS_IF_ACTIVE(tm->if_act_mask, if_id) == 0) + return MV_NOT_SUPPORTED; + DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE, + ("ddr3_tip_bist_read_result if_id %d\n", + if_id)); + ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, + ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG, read_data, + MASK_ALL_BITS); + if (ret != MV_OK) + return ret; + pst_bist_result->bist_fail_high = read_data[if_id]; + ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, + ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG, read_data, + MASK_ALL_BITS); + if (ret != MV_OK) + return ret; + pst_bist_result->bist_fail_low = read_data[if_id]; + + ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, + ODPG_DATA_RX_WORD_ERR_ADDR_REG, read_data, + MASK_ALL_BITS); + if (ret != MV_OK) + return ret; + pst_bist_result->bist_last_fail_addr = read_data[if_id]; + ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, + ODPG_DATA_RX_WORD_ERR_CNTR_REG, read_data, + MASK_ALL_BITS); + if (ret != MV_OK) + return ret; + pst_bist_result->bist_error_cnt = read_data[if_id]; + + return MV_OK; +} + +/* + * BIST flow - Activate & read result + */ +int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result, + u32 cs_num) +{ + int ret; + u32 i = 0; + u32 win_base; + struct bist_result st_bist_result; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (i = 0; i < MAX_INTERFACE_NUM; i++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, i); + hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base); + ret = ddr3_tip_bist_activate(dev_num, pattern, + ACCESS_TYPE_UNICAST, + i, OPER_WRITE, STRESS_NONE, + DURATION_SINGLE, BIST_START, + bist_offset + win_base, + cs_num, 15); + if (ret != MV_OK) { + printf("ddr3_tip_bist_activate failed (0x%x)\n", ret); + return ret; + } + + ret = ddr3_tip_bist_activate(dev_num, pattern, + ACCESS_TYPE_UNICAST, + i, OPER_READ, STRESS_NONE, + DURATION_SINGLE, BIST_START, + bist_offset + win_base, + cs_num, 15); + if (ret != MV_OK) { + printf("ddr3_tip_bist_activate failed (0x%x)\n", ret); + return ret; + } + + ret = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result); + if (ret != MV_OK) { + printf("ddr3_tip_bist_read_result failed\n"); + return ret; + } + result[i] = st_bist_result.bist_error_cnt; + } + + return MV_OK; +} + +/* + * Set BIST Operation + */ + +static int ddr3_tip_bist_operation(u32 dev_num, + enum hws_access_type access_type, + u32 if_id, enum hws_bist_operation oper_type) +{ + if (oper_type == BIST_STOP) + mv_ddr_odpg_disable(); + else + mv_ddr_odpg_enable(); + + return MV_OK; +} + +/* + * Print BIST result + */ +void ddr3_tip_print_bist_res(void) +{ + u32 dev_num = 0; + u32 i; + struct bist_result st_bist_result[MAX_INTERFACE_NUM]; + int res; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (i = 0; i < MAX_INTERFACE_NUM; i++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, i); + + res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]); + if (res != MV_OK) { + DEBUG_TRAINING_BIST_ENGINE( + DEBUG_LEVEL_ERROR, + ("ddr3_tip_bist_read_result failed\n")); + return; + } + } + + DEBUG_TRAINING_BIST_ENGINE( + DEBUG_LEVEL_INFO, + ("interface | error_cnt | fail_low | fail_high | fail_addr\n")); + + for (i = 0; i < MAX_INTERFACE_NUM; i++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, i); + + DEBUG_TRAINING_BIST_ENGINE( + DEBUG_LEVEL_INFO, + ("%d | 0x%08x | 0x%08x | 0x%08x | 0x%08x\n", + i, st_bist_result[i].bist_error_cnt, + st_bist_result[i].bist_fail_low, + st_bist_result[i].bist_fail_high, + st_bist_result[i].bist_last_fail_addr)); + } +} + +enum { + PASS, + FAIL +}; +#define TIP_ITERATION_NUM 31 +static int mv_ddr_tip_bist(enum hws_dir dir, u32 val, enum hws_pattern pattern, u32 cs, u32 *result) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + enum hws_training_ip_stat training_result; + u16 *reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u32 max_subphy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + u32 subphy, read_data; + + ddr3_tip_ip_training(0, ACCESS_TYPE_MULTICAST, 0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + RESULT_PER_BYTE, HWS_CONTROL_ELEMENT_ADLL, HWS_LOW2HIGH, dir, tm->if_act_mask, val, + TIP_ITERATION_NUM, pattern, EDGE_FP, CS_SINGLE, cs, &training_result); + + for (subphy = 0; subphy < max_subphy; subphy++) { + ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, reg_map[subphy], &read_data, MASK_ALL_BITS); + if (((read_data >> BLOCK_STATUS_OFFS) & BLOCK_STATUS_MASK) == BLOCK_STATUS_NOT_LOCKED) + *result |= (FAIL << subphy); + } + + return MV_OK; +} + +struct interval { + u8 *vector; + u8 lendpnt; /* interval's left endpoint */ + u8 rendpnt; /* interval's right endpoint */ + u8 size; /* interval's size */ + u8 lmarker; /* left marker */ + u8 rmarker; /* right marker */ + u8 pass_lendpnt; /* left endpoint of internal pass interval */ + u8 pass_rendpnt; /* right endpoint of internal pass interval */ +}; + +static int interval_init(u8 *vector, u8 lendpnt, u8 rendpnt, + u8 lmarker, u8 rmarker, struct interval *intrvl) +{ + if (intrvl == NULL) { + printf("%s: NULL intrvl pointer found\n", __func__); + return MV_FAIL; + } + + if (vector == NULL) { + printf("%s: NULL vector pointer found\n", __func__); + return MV_FAIL; + } + intrvl->vector = vector; + + if (lendpnt >= rendpnt) { + printf("%s: incorrect lendpnt and/or rendpnt parameters found\n", __func__); + return MV_FAIL; + } + intrvl->lendpnt = lendpnt; + intrvl->rendpnt = rendpnt; + intrvl->size = rendpnt - lendpnt + 1; + + if ((lmarker < lendpnt) || (lmarker > rendpnt)) { + printf("%s: incorrect lmarker parameter found\n", __func__); + return MV_FAIL; + } + intrvl->lmarker = lmarker; + + if ((rmarker < lmarker) || (rmarker > (intrvl->rendpnt + intrvl->size))) { + printf("%s: incorrect rmarker parameter found\n", __func__); + return MV_FAIL; + } + intrvl->rmarker = rmarker; + + return MV_OK; +} +static int interval_set(u8 pass_lendpnt, u8 pass_rendpnt, struct interval *intrvl) +{ + if (intrvl == NULL) { + printf("%s: NULL intrvl pointer found\n", __func__); + return MV_FAIL; + } + + intrvl->pass_lendpnt = pass_lendpnt; + intrvl->pass_rendpnt = pass_rendpnt; + + return MV_OK; +} + +static int interval_proc(struct interval *intrvl) +{ + int curr; + int pass_lendpnt, pass_rendpnt; + int lmt; + int fcnt = 0, pcnt = 0; + + if (intrvl == NULL) { + printf("%s: NULL intrvl pointer found\n", __func__); + return MV_FAIL; + } + + /* count fails and passes */ + curr = intrvl->lendpnt; + while (curr <= intrvl->rendpnt) { + if (intrvl->vector[curr] == PASS) + pcnt++; + else + fcnt++; + curr++; + } + + /* check for all fail */ + if (fcnt == intrvl->size) { + printf("%s: no pass found\n", __func__); + return MV_FAIL; + } + + /* check for all pass */ + if (pcnt == intrvl->size) { + if (interval_set(intrvl->lendpnt, intrvl->rendpnt, intrvl) != MV_OK) + return MV_FAIL; + return MV_OK; + } + + /* proceed with rmarker */ + curr = intrvl->rmarker; + if (intrvl->vector[curr % intrvl->size] == PASS) { /* pass at rmarker */ + /* search for fail on right */ + if (intrvl->rmarker > intrvl->rendpnt) + lmt = intrvl->rendpnt + intrvl->size; + else + lmt = intrvl->rmarker + intrvl->size - 1; + while ((curr <= lmt) && + (intrvl->vector[curr % intrvl->size] == PASS)) + curr++; + if (curr > lmt) { /* fail not found */ + printf("%s: rmarker: fail following pass not found\n", __func__); + return MV_FAIL; + } + /* fail found */ + pass_rendpnt = curr - 1; + } else { /* fail at rmarker */ + /* search for pass on left */ + if (intrvl->rmarker > intrvl->rendpnt) + lmt = intrvl->rmarker - intrvl->size + 1; + else + lmt = intrvl->lendpnt; + while ((curr >= lmt) && + (intrvl->vector[curr % intrvl->size] == FAIL)) + curr--; + if (curr < lmt) { /* pass not found */ + printf("%s: rmarker: pass preceding fail not found\n", __func__); + return MV_FAIL; + } + /* pass found */ + pass_rendpnt = curr; + } + + /* search for fail on left */ + curr = pass_rendpnt; + if (pass_rendpnt > intrvl->rendpnt) + lmt = pass_rendpnt - intrvl->size + 1; + else + lmt = intrvl->lendpnt; + while ((curr >= lmt) && + (intrvl->vector[curr % intrvl->size] == PASS)) + curr--; + if (curr < lmt) { /* fail not found */ + printf("%s: rmarker: fail preceding pass not found\n", __func__); + return MV_FAIL; + } + /* fail found */ + pass_lendpnt = curr + 1; + if (interval_set(pass_lendpnt, pass_rendpnt, intrvl) != MV_OK) + return MV_FAIL; + + return MV_OK; +} + +#define ADLL_TAPS_PER_PERIOD 64 +int mv_ddr_dm_to_dq_diff_get(u8 vw_sphy_hi_lmt, u8 vw_sphy_lo_lmt, u8 *vw_vector, + int *vw_sphy_hi_diff, int *vw_sphy_lo_diff) +{ + struct interval intrvl; + + /* init interval structure */ + if (interval_init(vw_vector, 0, ADLL_TAPS_PER_PERIOD - 1, + vw_sphy_lo_lmt, vw_sphy_hi_lmt, &intrvl) != MV_OK) + return MV_FAIL; + + /* find pass sub-interval */ + if (interval_proc(&intrvl) != MV_OK) + return MV_FAIL; + + /* check for all pass */ + if ((intrvl.pass_rendpnt == intrvl.rendpnt) && + (intrvl.pass_lendpnt == intrvl.lendpnt)) { + printf("%s: no fail found\n", __func__); + return MV_FAIL; + } + + *vw_sphy_hi_diff = intrvl.pass_rendpnt - vw_sphy_hi_lmt; + *vw_sphy_lo_diff = vw_sphy_lo_lmt - intrvl.pass_lendpnt; + + return MV_OK; +} + +static int mv_ddr_bist_tx(enum hws_access_type access_type) +{ + mv_ddr_odpg_done_clr(); + + ddr3_tip_bist_operation(0, access_type, 0, BIST_START); + + if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) + return MV_FAIL; + + ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); + + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); + + return MV_OK; +} + +/* prepare odpg for bist operation */ +#define WR_OP_ODPG_DATA_CMD_BURST_DLY 2 +static int mv_ddr_odpg_bist_prepare(enum hws_pattern pattern, enum hws_access_type access_type, + enum hws_dir dir, enum hws_stress_jump stress_jump_addr, + enum hws_pattern_duration duration, u32 offset, u32 cs, + u32 pattern_addr_len, enum dm_direction dm_dir) +{ + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + u32 tx_burst_size; + u32 burst_delay; + u32 rd_mode; + + /* odpg bist write enable */ + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, + (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), + (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); + + /* odpg bist read enable/disable */ + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, + (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) : + (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), + (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); + + if (pattern == PATTERN_00 || pattern == PATTERN_FF) + ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset); + else + mv_ddr_load_dm_pattern_to_odpg(access_type, pattern, dm_dir); + + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_len, MASK_ALL_BITS); + if (dir == OPER_WRITE) { + tx_burst_size = pattern_table[pattern].tx_burst_size; + burst_delay = WR_OP_ODPG_DATA_CMD_BURST_DLY; + rd_mode = ODPG_MODE_TX; + } else { + tx_burst_size = 0; + burst_delay = 0; + rd_mode = ODPG_MODE_RX; + } + ddr3_tip_configure_odpg(0, access_type, 0, dir, pattern_table[pattern].num_of_phases_tx, + tx_burst_size, pattern_table[pattern].num_of_phases_rx, burst_delay, + rd_mode, cs, stress_jump_addr, duration); + + return MV_OK; +} + +#define BYTES_PER_BURST_64BIT 0x20 +#define BYTES_PER_BURST_32BIT 0x10 +int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + u32 adll_tap; + u32 wr_ctrl_adll[MAX_BUS_NUM] = {0}; + u32 rd_ctrl_adll[MAX_BUS_NUM] = {0}; + u32 subphy; + u32 subphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + u32 odpg_addr = 0x0; + u32 result; + u32 idx; + /* burst length in bytes */ + u32 burst_len = (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ? + BYTES_PER_BURST_64BIT : BYTES_PER_BURST_32BIT); + + /* save dqs values to restore after algorithm's run */ + ddr3_tip_read_adll_value(0, wr_ctrl_adll, CTX_PHY_REG(cs), MASK_ALL_BITS); + ddr3_tip_read_adll_value(0, rd_ctrl_adll, CRX_PHY_REG(cs), MASK_ALL_BITS); + + /* fill memory with base pattern */ + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); + mv_ddr_odpg_bist_prepare(pattern, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE, + bist_offset, cs, pattern_table[pattern].num_of_phases_tx, + (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE); + + for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { + /* change target odpg address */ + odpg_addr = adll_tap * burst_len; + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG, + odpg_addr, MASK_ALL_BITS); + + ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE, + pattern_table[pattern].num_of_phases_tx, + pattern_table[pattern].tx_burst_size, + pattern_table[pattern].num_of_phases_rx, + WR_OP_ODPG_DATA_CMD_BURST_DLY, + ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE); + + /* odpg bist write enable */ + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, + (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), + (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); + + /* odpg bist read disable */ + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, + (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), + (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); + + /* trigger odpg */ + mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST); + } + + /* fill memory with vref pattern to increment addr using odpg bist */ + mv_ddr_odpg_bist_prepare(PATTERN_VREF, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE, + bist_offset, cs, pattern_table[pattern].num_of_phases_tx, + (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE); + + for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { + ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_MULTICAST, 0, + DDR_PHY_DATA, CTX_PHY_REG(cs), adll_tap); + /* change target odpg address */ + odpg_addr = adll_tap * burst_len; + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG, + odpg_addr, MASK_ALL_BITS); + ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE, + pattern_table[pattern].num_of_phases_tx, + pattern_table[pattern].tx_burst_size, + pattern_table[pattern].num_of_phases_rx, + WR_OP_ODPG_DATA_CMD_BURST_DLY, + ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE); + + /* odpg bist write enable */ + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, + (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), + (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); + + /* odpg bist read disable */ + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, + (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), + (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); + + /* trigger odpg */ + mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST); + } + + /* restore subphy's tx adll_tap to its position */ + for (subphy = 0; subphy < subphy_max; subphy++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); + ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST, + subphy, DDR_PHY_DATA, CTX_PHY_REG(cs), + wr_ctrl_adll[subphy]); + } + + /* read and validate bist (comparing with the base pattern) */ + for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { + result = 0; + odpg_addr = adll_tap * burst_len; + /* change addr to fit write */ + mv_ddr_pattern_start_addr_set(pattern_table, pattern, odpg_addr); + mv_ddr_tip_bist(OPER_READ, 0, pattern, 0, &result); + for (subphy = 0; subphy < subphy_max; subphy++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); + idx = ADLL_TAPS_PER_PERIOD * subphy + adll_tap; + vw_vector[idx] |= ((result >> subphy) & 0x1); + } + } + + /* restore subphy's rx adll_tap to its position */ + for (subphy = 0; subphy < subphy_max; subphy++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); + ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST, + subphy, DDR_PHY_DATA, CRX_PHY_REG(cs), + rd_ctrl_adll[subphy]); + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_centralization.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_centralization.c new file mode 100644 index 000000000..648b37ef6 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_centralization.c @@ -0,0 +1,716 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_regs.h" + +#define VALIDATE_WIN_LENGTH(e1, e2, maxsize) \ + (((e2) + 1 > (e1) + (u8)MIN_WINDOW_SIZE) && \ + ((e2) + 1 < (e1) + (u8)maxsize)) +#define IS_WINDOW_OUT_BOUNDARY(e1, e2, maxsize) \ + (((e1) == 0 && (e2) != 0) || \ + ((e1) != (maxsize - 1) && (e2) == (maxsize - 1))) +#define CENTRAL_TX 0 +#define CENTRAL_RX 1 +#define NUM_OF_CENTRAL_TYPES 2 + +u32 start_pattern = PATTERN_KILLER_DQ0, end_pattern = PATTERN_KILLER_DQ7; + +u32 start_if = 0, end_if = (MAX_INTERFACE_NUM - 1); +u8 bus_end_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 bus_start_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 centralization_state[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +static u8 ddr3_tip_special_rx_run_once_flag; + +static int ddr3_tip_centralization(u32 dev_num, u32 mode); + +/* + * Centralization RX Flow + */ +int ddr3_tip_centralization_rx(u32 dev_num) +{ + CHECK_STATUS(ddr3_tip_special_rx(dev_num)); + CHECK_STATUS(ddr3_tip_centralization(dev_num, CENTRAL_RX)); + + return MV_OK; +} + +/* + * Centralization TX Flow + */ +int ddr3_tip_centralization_tx(u32 dev_num) +{ + CHECK_STATUS(ddr3_tip_centralization(dev_num, CENTRAL_TX)); + + return MV_OK; +} + +/* + * Centralization Flow + */ +static int ddr3_tip_centralization(u32 dev_num, u32 mode) +{ + enum hws_training_ip_stat training_result[MAX_INTERFACE_NUM]; + u32 if_id, pattern_id, bit_id; + u8 bus_id; + u8 cur_start_win[BUS_WIDTH_IN_BITS]; + u8 centralization_result[MAX_INTERFACE_NUM][BUS_WIDTH_IN_BITS]; + u8 cur_end_win[BUS_WIDTH_IN_BITS]; + u8 current_window[BUS_WIDTH_IN_BITS]; + u8 opt_window, waste_window, start_window_skew, end_window_skew; + u8 final_pup_window[MAX_INTERFACE_NUM][BUS_WIDTH_IN_BITS]; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + enum hws_training_result result_type = RESULT_PER_BIT; + enum hws_dir direction; + u32 *result[HWS_SEARCH_DIR_LIMIT]; + u32 reg_phy_off, reg; + u8 max_win_size; + int lock_success = 1; + u8 cur_end_win_min, cur_start_win_max; + u32 cs_enable_reg_val[MAX_INTERFACE_NUM]; + int is_if_fail = 0; + enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage); + u32 pup_win_length = 0; + enum hws_search_dir search_dir_id; + u8 cons_tap = (mode == CENTRAL_TX) ? (64) : (0); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* save current cs enable reg val */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS)); + /* enable single cs */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3))); + } + + if (mode == CENTRAL_TX) { + max_win_size = MAX_WINDOW_SIZE_TX; + reg_phy_off = CTX_PHY_REG(effective_cs); + direction = OPER_WRITE; + } else { + max_win_size = MAX_WINDOW_SIZE_RX; + reg_phy_off = CRX_PHY_REG(effective_cs); + direction = OPER_READ; + } + + /* DB initialization */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; + bus_id < octets_per_if_num; bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + centralization_state[if_id][bus_id] = 0; + bus_end_window[mode][if_id][bus_id] = + (max_win_size - 1) + cons_tap; + bus_start_window[mode][if_id][bus_id] = 0; + centralization_result[if_id][bus_id] = 0; + } + } + + /* start flow */ + for (pattern_id = start_pattern; pattern_id <= end_pattern; + pattern_id++) { + ddr3_tip_ip_training_wrapper(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, result_type, + HWS_CONTROL_ELEMENT_ADLL, + PARAM_NOT_CARE, direction, + tm-> + if_act_mask, 0x0, + max_win_size - 1, + max_win_size - 1, + pattern_id, EDGE_FPF, CS_SINGLE, + PARAM_NOT_CARE, training_result); + + for (if_id = start_if; if_id <= end_if; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; + bus_id <= octets_per_if_num - 1; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + + for (search_dir_id = HWS_LOW2HIGH; + search_dir_id <= HWS_HIGH2LOW; + search_dir_id++) { + CHECK_STATUS + (ddr3_tip_read_training_result + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + ALL_BITS_PER_PUP, + search_dir_id, + direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, + CS_SINGLE, + &result[search_dir_id], + 1, 0, 0)); + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_INFO, + ("%s pat %d IF %d pup %d Regs: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + ((mode == + CENTRAL_TX) ? "TX" : "RX"), + pattern_id, if_id, bus_id, + result[search_dir_id][0], + result[search_dir_id][1], + result[search_dir_id][2], + result[search_dir_id][3], + result[search_dir_id][4], + result[search_dir_id][5], + result[search_dir_id][6], + result[search_dir_id][7])); + } + + for (bit_id = 0; bit_id < BUS_WIDTH_IN_BITS; + bit_id++) { + /* check if this code is valid for 2 edge, probably not :( */ + cur_start_win[bit_id] = + GET_TAP_RESULT(result + [HWS_LOW2HIGH] + [bit_id], + EDGE_1); + cur_end_win[bit_id] = + GET_TAP_RESULT(result + [HWS_HIGH2LOW] + [bit_id], + EDGE_1); + /* window length */ + current_window[bit_id] = + cur_end_win[bit_id] - + cur_start_win[bit_id] + 1; + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_TRACE, + ("cs %x patern %d IF %d pup %d cur_start_win %d cur_end_win %d current_window %d\n", + effective_cs, pattern_id, + if_id, bus_id, + cur_start_win[bit_id], + cur_end_win[bit_id], + current_window[bit_id])); + } + + if ((ddr3_tip_is_pup_lock + (result[HWS_LOW2HIGH], result_type)) && + (ddr3_tip_is_pup_lock + (result[HWS_HIGH2LOW], result_type))) { + /* read result success */ + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_INFO, + ("Pup locked, pat %d IF %d pup %d\n", + pattern_id, if_id, bus_id)); + } else { + /* read result failure */ + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_INFO, + ("fail Lock, pat %d IF %d pup %d\n", + pattern_id, if_id, bus_id)); + if (centralization_state[if_id][bus_id] + == 1) { + /* continue with next pup */ + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_TRACE, + ("continue to next pup %d %d\n", + if_id, bus_id)); + continue; + } + + for (bit_id = 0; + bit_id < BUS_WIDTH_IN_BITS; + bit_id++) { + /* + * the next check is relevant + * only when using search + * machine 2 edges + */ + if (cur_start_win[bit_id] > 0 && + cur_end_win[bit_id] == 0) { + cur_end_win + [bit_id] = + max_win_size - 1; + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_TRACE, + ("fail, IF %d pup %d bit %d fail #1\n", + if_id, bus_id, + bit_id)); + /* the next bit */ + continue; + } else { + centralization_state + [if_id][bus_id] = 1; + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_TRACE, + ("fail, IF %d pup %d bit %d fail #2\n", + if_id, bus_id, + bit_id)); + } + } + + if (centralization_state[if_id][bus_id] + == 1) { + /* going to next pup */ + continue; + } + } /*bit */ + + opt_window = + ddr3_tip_get_buf_min(current_window); + /* final pup window length */ + final_pup_window[if_id][bus_id] = + ddr3_tip_get_buf_min(cur_end_win) - + ddr3_tip_get_buf_max(cur_start_win) + + 1; + waste_window = + opt_window - + final_pup_window[if_id][bus_id]; + start_window_skew = + ddr3_tip_get_buf_max(cur_start_win) - + ddr3_tip_get_buf_min( + cur_start_win); + end_window_skew = + ddr3_tip_get_buf_max( + cur_end_win) - + ddr3_tip_get_buf_min( + cur_end_win); + /* min/max updated with pattern change */ + cur_end_win_min = + ddr3_tip_get_buf_min( + cur_end_win); + cur_start_win_max = + ddr3_tip_get_buf_max( + cur_start_win); + bus_end_window[mode][if_id][bus_id] = + GET_MIN(bus_end_window[mode][if_id] + [bus_id], + cur_end_win_min); + bus_start_window[mode][if_id][bus_id] = + GET_MAX(bus_start_window[mode][if_id] + [bus_id], + cur_start_win_max); + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_INFO, + ("pat %d IF %d pup %d opt_win %d final_win %d waste_win %d st_win_skew %d end_win_skew %d cur_st_win_max %d cur_end_win_min %d bus_st_win %d bus_end_win %d\n", + pattern_id, if_id, bus_id, opt_window, + final_pup_window[if_id][bus_id], + waste_window, start_window_skew, + end_window_skew, + cur_start_win_max, + cur_end_win_min, + bus_start_window[mode][if_id][bus_id], + bus_end_window[mode][if_id][bus_id])); + + /* check if window is valid */ + if (ddr3_tip_centr_skip_min_win_check == 0) { + if ((VALIDATE_WIN_LENGTH + (bus_start_window[mode][if_id] + [bus_id], + bus_end_window[mode][if_id] + [bus_id], + max_win_size) == 1) || + (IS_WINDOW_OUT_BOUNDARY + (bus_start_window[mode][if_id] + [bus_id], + bus_end_window[mode][if_id] + [bus_id], + max_win_size) == 1)) { + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_INFO, + ("win valid, pat %d IF %d pup %d\n", + pattern_id, if_id, + bus_id)); + /* window is valid */ + } else { + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_INFO, + ("fail win, pat %d IF %d pup %d bus_st_win %d bus_end_win %d\n", + pattern_id, if_id, bus_id, + bus_start_window[mode] + [if_id][bus_id], + bus_end_window[mode] + [if_id][bus_id])); + centralization_state[if_id] + [bus_id] = 1; + if (debug_mode == 0) { + flow_result[if_id] = TEST_FAILED; + return MV_FAIL; + } + } + } /* ddr3_tip_centr_skip_min_win_check */ + } /* pup */ + } /* interface */ + } /* pattern */ + + for (if_id = start_if; if_id <= end_if; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + is_if_fail = 0; + flow_result[if_id] = TEST_SUCCESS; + + for (bus_id = 0; + bus_id <= (octets_per_if_num - 1); bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + + /* continue only if lock */ + if (centralization_state[if_id][bus_id] != 1) { + if (ddr3_tip_centr_skip_min_win_check == 0) { + if ((bus_end_window + [mode][if_id][bus_id] == + (max_win_size - 1)) && + ((bus_end_window + [mode][if_id][bus_id] - + bus_start_window[mode][if_id] + [bus_id]) < MIN_WINDOW_SIZE) && + ((bus_end_window[mode][if_id] + [bus_id] - bus_start_window + [mode][if_id][bus_id]) > 2)) { + /* prevent false lock */ + /* TBD change to enum */ + centralization_state + [if_id][bus_id] = 2; + } + + if ((bus_end_window[mode][if_id][bus_id] + == 0) && + ((bus_end_window[mode][if_id] + [bus_id] - + bus_start_window[mode][if_id] + [bus_id]) < MIN_WINDOW_SIZE) && + ((bus_end_window[mode][if_id] + [bus_id] - + bus_start_window[mode][if_id] + [bus_id]) > 2)) + /*prevent false lock */ + centralization_state[if_id] + [bus_id] = 3; + } + + if ((bus_end_window[mode][if_id][bus_id] > + (max_win_size - 1)) && direction == + OPER_WRITE) { + DEBUG_CENTRALIZATION_ENGINE + (DEBUG_LEVEL_INFO, + ("Tx special pattern\n")); + cons_tap = 64; + } + } + + /* check states */ + if (centralization_state[if_id][bus_id] == 3) { + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_INFO, + ("SSW - TBD IF %d pup %d\n", + if_id, bus_id)); + lock_success = 1; + } else if (centralization_state[if_id][bus_id] == 2) { + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_INFO, + ("SEW - TBD IF %d pup %d\n", + if_id, bus_id)); + lock_success = 1; + } else if (centralization_state[if_id][bus_id] == 0) { + lock_success = 1; + } else { + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_ERROR, + ("fail, IF %d pup %d\n", + if_id, bus_id)); + lock_success = 0; + } + + if (lock_success == 1) { + centralization_result[if_id][bus_id] = + (bus_end_window[mode][if_id][bus_id] + + bus_start_window[mode][if_id][bus_id]) + / 2 - cons_tap; + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_TRACE, + (" bus_id %d Res= %d\n", bus_id, + centralization_result[if_id][bus_id])); + /* copy results to registers */ + pup_win_length = + bus_end_window[mode][if_id][bus_id] - + bus_start_window[mode][if_id][bus_id] + + 1; + + ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, + RESULT_PHY_REG + + effective_cs, ®); + reg = (reg & (~0x1f << + ((mode == CENTRAL_TX) ? + (RESULT_PHY_TX_OFFS) : + (RESULT_PHY_RX_OFFS)))) + | pup_win_length << + ((mode == CENTRAL_TX) ? + (RESULT_PHY_TX_OFFS) : + (RESULT_PHY_RX_OFFS)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + RESULT_PHY_REG + + effective_cs, reg)); + + /* offset per CS is calculated earlier */ + CHECK_STATUS( + ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_id, + DDR_PHY_DATA, + reg_phy_off, + centralization_result + [if_id] + [bus_id])); + } else { + is_if_fail = 1; + } + } + + if (is_if_fail == 1) + flow_result[if_id] = TEST_FAILED; + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + /* restore cs enable value */ + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, + if_id, DUAL_DUNIT_CFG_REG, + cs_enable_reg_val[if_id], + MASK_ALL_BITS)); + } + + return is_if_fail; +} + +/* + * Centralization Flow + */ +int ddr3_tip_special_rx(u32 dev_num) +{ + enum hws_training_ip_stat training_result[MAX_INTERFACE_NUM]; + u32 if_id, pup_id, pattern_id, bit_id; + u8 cur_start_win[BUS_WIDTH_IN_BITS]; + u8 cur_end_win[BUS_WIDTH_IN_BITS]; + enum hws_training_result result_type = RESULT_PER_BIT; + enum hws_dir direction; + enum hws_search_dir search_dir_id; + u32 *result[HWS_SEARCH_DIR_LIMIT]; + u32 max_win_size; + u8 cur_end_win_min, cur_start_win_max; + u32 cs_enable_reg_val[MAX_INTERFACE_NUM]; + u32 temp = 0; + int pad_num = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if ((ddr3_tip_special_rx_run_once_flag & (1 << effective_cs)) == (1 << effective_cs)) + return MV_OK; + + ddr3_tip_special_rx_run_once_flag |= (1 << effective_cs); + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* save current cs enable reg val */ + CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, + if_id, DUAL_DUNIT_CFG_REG, + cs_enable_reg_val, + MASK_ALL_BITS)); + /* enable single cs */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, + if_id, DUAL_DUNIT_CFG_REG, + (1 << 3), (1 << 3))); + } + + max_win_size = MAX_WINDOW_SIZE_RX; + direction = OPER_READ; + pattern_id = PATTERN_FULL_SSO1; + + /* start flow */ + ddr3_tip_ip_training_wrapper(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, result_type, + HWS_CONTROL_ELEMENT_ADLL, + PARAM_NOT_CARE, direction, + tm->if_act_mask, 0x0, + max_win_size - 1, max_win_size - 1, + pattern_id, EDGE_FPF, CS_SINGLE, + PARAM_NOT_CARE, training_result); + + for (if_id = start_if; if_id <= end_if; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup_id = 0; + pup_id <= octets_per_if_num; pup_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup_id); + + for (search_dir_id = HWS_LOW2HIGH; + search_dir_id <= HWS_HIGH2LOW; + search_dir_id++) { + CHECK_STATUS(ddr3_tip_read_training_result + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup_id, + ALL_BITS_PER_PUP, search_dir_id, + direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, + CS_SINGLE, &result[search_dir_id], + 1, 0, 0)); + DEBUG_CENTRALIZATION_ENGINE(DEBUG_LEVEL_INFO, + ("Special: pat %d IF %d pup %d Regs: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + pattern_id, if_id, + pup_id, + result + [search_dir_id][0], + result + [search_dir_id][1], + result + [search_dir_id][2], + result + [search_dir_id][3], + result + [search_dir_id][4], + result + [search_dir_id][5], + result + [search_dir_id][6], + result + [search_dir_id] + [7])); + } + + for (bit_id = 0; bit_id < BUS_WIDTH_IN_BITS; bit_id++) { + /* + * check if this code is valid for 2 edge, + * probably not :( + */ + cur_start_win[bit_id] = + GET_TAP_RESULT(result[HWS_LOW2HIGH] + [bit_id], EDGE_1); + cur_end_win[bit_id] = + GET_TAP_RESULT(result[HWS_HIGH2LOW] + [bit_id], EDGE_1); + } + if (!((ddr3_tip_is_pup_lock + (result[HWS_LOW2HIGH], result_type)) && + (ddr3_tip_is_pup_lock + (result[HWS_HIGH2LOW], result_type)))) { + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_ERROR, + ("Special: Pup lock fail, pat %d IF %d pup %d\n", + pattern_id, if_id, pup_id)); + return MV_FAIL; + } + + cur_end_win_min = + ddr3_tip_get_buf_min(cur_end_win); + cur_start_win_max = + ddr3_tip_get_buf_max(cur_start_win); + + if (cur_start_win_max <= 1) { /* Align left */ + for (bit_id = 0; bit_id < BUS_WIDTH_IN_BITS; + bit_id++) { + pad_num = + dq_map_table[bit_id + + pup_id * + BUS_WIDTH_IN_BITS + + if_id * + BUS_WIDTH_IN_BITS * + MAX_BUS_NUM]; + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, + pup_id, DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, pad_num), + &temp)); + temp = (temp + 0xa > 31) ? + (31) : (temp + 0xa); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + pup_id, DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, pad_num), + temp)); + } + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_INFO, + ("Special: PBS:: I/F# %d , Bus# %d fix align to the Left\n", + if_id, pup_id)); + } + + if (cur_end_win_min > 30) { /* Align right */ + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup_id, + DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, 4), + &temp)); + temp += 0xa; + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + pup_id, DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, 4), + temp)); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup_id, + DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, 5), + &temp)); + temp += 0xa; + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + pup_id, DDR_PHY_DATA, + PBS_RX_PHY_REG(effective_cs, 5), + temp)); + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_INFO, + ("Special: PBS:: I/F# %d , Bus# %d fix align to the right\n", + if_id, pup_id)); + } + + vref_window_size[if_id][pup_id] = + cur_end_win_min - + cur_start_win_max + 1; + DEBUG_CENTRALIZATION_ENGINE( + DEBUG_LEVEL_INFO, + ("Special: Winsize I/F# %d , Bus# %d is %d\n", + if_id, pup_id, vref_window_size + [if_id][pup_id])); + } /* pup */ + } /* end of interface */ + + return MV_OK; +} + +/* + * Print Centralization Result + */ +int ddr3_tip_print_centralization_result(u32 dev_num) +{ + u32 if_id = 0, bus_id = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + dev_num = dev_num; + + printf("Centralization Results\n"); + printf("I/F0 Result[0 - success 1-fail 2 - state_2 3 - state_3] ...\n"); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + printf("%d ,\n", centralization_state[if_id][bus_id]); + } + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_db.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_db.c new file mode 100644 index 000000000..6aa7b6069 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_db.c @@ -0,0 +1,874 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr_ml_wrapper.h" + +#include "ddr3_training_ip_flow.h" +#include "mv_ddr_topology.h" +#include "mv_ddr_training_db.h" +#include "ddr3_training_ip_db.h" + +/* Device attributes structures */ +enum mv_ddr_dev_attribute ddr_dev_attributes[MV_ATTR_LAST]; +int ddr_dev_attr_init_done = 0; + +static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index); +static inline u32 pattern_table_get_sso_word(u8 sso, u8 index); +static inline u32 pattern_table_get_vref_word(u8 index); +static inline u32 pattern_table_get_vref_word16(u8 index); +static inline u32 pattern_table_get_sso_full_xtalk_word(u8 bit, u8 index); +static inline u32 pattern_table_get_sso_full_xtalk_word16(u8 bit, u8 index); +static inline u32 pattern_table_get_sso_xtalk_free_word(u8 bit, u8 index); +static inline u32 pattern_table_get_sso_xtalk_free_word16(u8 bit, u8 index); +static inline u32 pattern_table_get_isi_word(u8 index); +static inline u32 pattern_table_get_isi_word16(u8 index); + +/* List of allowed frequency listed in order of enum mv_ddr_freq */ +static unsigned int freq_val[MV_DDR_FREQ_LAST] = { + 0, /*MV_DDR_FREQ_LOW_FREQ */ + 400, /*MV_DDR_FREQ_400, */ + 533, /*MV_DDR_FREQ_533, */ + 666, /*MV_DDR_FREQ_667, */ + 800, /*MV_DDR_FREQ_800, */ + 933, /*MV_DDR_FREQ_933, */ + 1066, /*MV_DDR_FREQ_1066, */ + 311, /*MV_DDR_FREQ_311, */ + 333, /*MV_DDR_FREQ_333, */ + 467, /*MV_DDR_FREQ_467, */ + 850, /*MV_DDR_FREQ_850, */ + 600, /*MV_DDR_FREQ_600 */ + 300, /*MV_DDR_FREQ_300 */ + 900, /*MV_DDR_FREQ_900 */ + 360, /*MV_DDR_FREQ_360 */ + 1000 /*MV_DDR_FREQ_1000 */ +}; + +unsigned int *mv_ddr_freq_tbl_get(void) +{ + return &freq_val[0]; +} + +u32 mv_ddr_freq_get(enum mv_ddr_freq freq) +{ + return freq_val[freq]; +} + +/* cas latency values per frequency for each speed bin index */ +static struct mv_ddr_cl_val_per_freq cl_table[] = { + /* + * 400M 667M 933M 311M 467M 600M 360 + * 100M 533M 800M 1066M 333M 850M 900 + * 1000 (the order is 100, 400, 533 etc.) + */ + /* DDR3-800D */ + { {6, 5, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 5, 0, 5, 0} }, + /* DDR3-800E */ + { {6, 6, 0, 0, 0, 0, 0, 6, 6, 0, 0, 0, 6, 0, 6, 0} }, + /* DDR3-1066E */ + { {6, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 0, 5, 0, 5, 0} }, + /* DDR3-1066F */ + { {6, 6, 7, 0, 0, 0, 0, 6, 6, 7, 0, 0, 6, 0, 6, 0} }, + /* DDR3-1066G */ + { {6, 6, 8, 0, 0, 0, 0, 6, 6, 8, 0, 0, 6, 0, 6, 0} }, + /* DDR3-1333F* */ + { {6, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1333G */ + { {6, 5, 7, 8, 0, 0, 0, 5, 5, 7, 0, 8, 5, 0, 5, 0} }, + /* DDR3-1333H */ + { {6, 6, 8, 9, 0, 0, 0, 6, 6, 8, 0, 9, 6, 0, 6, 0} }, + /* DDR3-1333J* */ + { {6, 6, 8, 10, 0, 0, 0, 6, 6, 8, 0, 10, 6, 0, 6, 0} + /* DDR3-1600G* */}, + { {6, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1600H */ + { {6, 5, 6, 8, 9, 0, 0, 5, 5, 6, 0, 8, 5, 0, 5, 0} }, + /* DDR3-1600J */ + { {6, 5, 7, 9, 10, 0, 0, 5, 5, 7, 0, 9, 5, 0, 5, 0} }, + /* DDR3-1600K */ + { {6, 6, 8, 10, 11, 0, 0, 6, 6, 8, 0, 10, 6, 0, 6, 0 } }, + /* DDR3-1866J* */ + { {6, 5, 6, 8, 9, 11, 0, 5, 5, 6, 11, 8, 5, 0, 5, 0} }, + /* DDR3-1866K */ + { {6, 5, 7, 8, 10, 11, 0, 5, 5, 7, 11, 8, 5, 11, 5, 11} }, + /* DDR3-1866L */ + { {6, 6, 7, 9, 11, 12, 0, 6, 6, 7, 12, 9, 6, 12, 6, 12} }, + /* DDR3-1866M* */ + { {6, 6, 8, 10, 11, 13, 0, 6, 6, 8, 13, 10, 6, 13, 6, 13} }, + /* DDR3-2133K* */ + { {6, 5, 6, 7, 9, 10, 11, 5, 5, 6, 10, 7, 5, 11, 5, 11} }, + /* DDR3-2133L */ + { {6, 5, 6, 8, 9, 11, 12, 5, 5, 6, 11, 8, 5, 12, 5, 12} }, + /* DDR3-2133M */ + { {6, 5, 7, 9, 10, 12, 13, 5, 5, 7, 12, 9, 5, 13, 5, 13} }, + /* DDR3-2133N* */ + { {6, 6, 7, 9, 11, 13, 14, 6, 6, 7, 13, 9, 6, 14, 6, 14} }, + /* DDR3-1333H-ext */ + { {6, 6, 7, 9, 0, 0, 0, 6, 6, 7, 0, 9, 6, 0, 6, 0} }, + /* DDR3-1600K-ext */ + { {6, 6, 7, 9, 11, 0, 0, 6, 6, 7, 0, 9, 6, 0, 6, 0} }, + /* DDR3-1866M-ext */ + { {6, 6, 7, 9, 11, 13, 0, 6, 6, 7, 13, 9, 6, 13, 6, 13} }, +}; + +u32 mv_ddr_cl_val_get(u32 index, u32 freq) +{ + return cl_table[index].cl_val[freq]; +} + +/* cas write latency values per frequency for each speed bin index */ +static struct mv_ddr_cl_val_per_freq cwl_table[] = { + /* + * 400M 667M 933M 311M 467M 600M 360 + * 100M 533M 800M 1066M 333M 850M 900 + * (the order is 100, 400, 533 etc.) + */ + /* DDR3-800D */ + { {5, 5, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 5, 0, 5, 0} }, + /* DDR3-800E */ + { {5, 5, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 5, 0, 5, 0} }, + /* DDR3-1066E */ + { {5, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1066F */ + { {5, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1066G */ + { {5, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1333F* */ + { {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1333G */ + { {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1333H */ + { {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1333J* */ + { {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1600G* */ + { {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1600H */ + { {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1600J */ + { {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1600K */ + { {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1866J* */ + { {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 0, 5, 0} }, + /* DDR3-1866K */ + { {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 0, 5, 0} }, + /* DDR3-1866L */ + { {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 9, 5, 9} }, + /* DDR3-1866M* */ + { {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 9, 5, 9} }, + /* DDR3-2133K* */ + { {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} }, + /* DDR3-2133L */ + { {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} }, + /* DDR3-2133M */ + { {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} }, + /* DDR3-2133N* */ + { {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} }, + /* DDR3-1333H-ext */ + { {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1600K-ext */ + { {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} }, + /* DDR3-1866M-ext */ + { {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 9, 5, 9} }, +}; + +u32 mv_ddr_cwl_val_get(u32 index, u32 freq) +{ + return cwl_table[index].cl_val[freq]; +} + +u8 twr_mask_table[] = { + 10, + 10, + 10, + 10, + 10, + 1, /* 5 */ + 2, /* 6 */ + 3, /* 7 */ + 4, /* 8 */ + 10, + 5, /* 10 */ + 10, + 6, /* 12 */ + 10, + 7, /* 14 */ + 10, + 0 /* 16 */ +}; + +u8 cl_mask_table[] = { + 0, + 0, + 0, + 0, + 0, + 0x2, + 0x4, + 0x6, + 0x8, + 0xa, + 0xc, + 0xe, + 0x1, + 0x3, + 0x5, + 0x5 +}; + +u8 cwl_mask_table[] = { + 0, + 0, + 0, + 0, + 0, + 0, + 0x1, + 0x2, + 0x3, + 0x4, + 0x5, + 0x6, + 0x7, + 0x8, + 0x9, + 0x9 +}; + +/* RFC values (in ns) */ +static unsigned int rfc_table[] = { + 90, /* 512M */ + 110, /* 1G */ + 160, /* 2G */ + 260, /* 4G */ + 350, /* 8G */ + 0, /* TODO: placeholder for 16-Mbit dev width */ + 0, /* TODO: placeholder for 32-Mbit dev width */ + 0, /* TODO: placeholder for 12-Mbit dev width */ + 0 /* TODO: placeholder for 24-Mbit dev width */ +}; + +u32 mv_ddr_rfc_get(u32 mem) +{ + return rfc_table[mem]; +} + +u32 speed_bin_table_t_rc[] = { + 50000, + 52500, + 48750, + 50625, + 52500, + 46500, + 48000, + 49500, + 51000, + 45000, + 46250, + 47500, + 48750, + 44700, + 45770, + 46840, + 47910, + 43285, + 44220, + 45155, + 46090 +}; + +u32 speed_bin_table_t_rcd_t_rp[] = { + 12500, + 15000, + 11250, + 13125, + 15000, + 10500, + 12000, + 13500, + 15000, + 10000, + 11250, + 12500, + 13750, + 10700, + 11770, + 12840, + 13910, + 10285, + 11220, + 12155, + 13090, +}; + +enum { + PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR = 0, + PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM +}; + +static u8 pattern_killer_pattern_table_map[KILLER_PATTERN_LENGTH * 2][2] = { + /*Aggressor / Victim */ + {1, 0}, + {0, 0}, + {1, 0}, + {1, 1}, + {0, 1}, + {0, 1}, + {1, 0}, + {0, 1}, + {1, 0}, + {0, 1}, + {1, 0}, + {1, 0}, + {0, 1}, + {1, 0}, + {0, 1}, + {0, 0}, + {1, 1}, + {0, 0}, + {1, 1}, + {0, 0}, + {1, 1}, + {0, 0}, + {1, 1}, + {1, 0}, + {0, 0}, + {1, 1}, + {0, 0}, + {1, 1}, + {0, 0}, + {0, 0}, + {0, 0}, + {0, 1}, + {0, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + {1, 1}, + {0, 0}, + {1, 1}, + {0, 0}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + {0, 0}, + {1, 1}, + {0, 1}, + {0, 0}, + {0, 1}, + {0, 1}, + {0, 0}, + {1, 1}, + {1, 1}, + {1, 0}, + {1, 0}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1} +}; + +static u8 pattern_vref_pattern_table_map[] = { + /* 1 means 0xffffffff, 0 is 0x0 */ + 0xb8, + 0x52, + 0x55, + 0x8a, + 0x33, + 0xa6, + 0x6d, + 0xfe +}; + +static struct mv_ddr_page_element page_tbl[] = { + /* 8-bit, 16-bit page size */ + {MV_DDR_PAGE_SIZE_1K, MV_DDR_PAGE_SIZE_2K}, /* 512M */ + {MV_DDR_PAGE_SIZE_1K, MV_DDR_PAGE_SIZE_2K}, /* 1G */ + {MV_DDR_PAGE_SIZE_1K, MV_DDR_PAGE_SIZE_2K}, /* 2G */ + {MV_DDR_PAGE_SIZE_1K, MV_DDR_PAGE_SIZE_2K}, /* 4G */ + {MV_DDR_PAGE_SIZE_2K, MV_DDR_PAGE_SIZE_2K}, /* 8G */ + {0, 0}, /* TODO: placeholder for 16-Mbit die capacity */ + {0, 0}, /* TODO: placeholder for 32-Mbit die capacity */ + {0, 0}, /* TODO: placeholder for 12-Mbit die capacity */ + {0, 0} /* TODO: placeholder for 24-Mbit die capacity */ +}; + +u32 mv_ddr_page_size_get(enum mv_ddr_dev_width bus_width, enum mv_ddr_die_capacity mem_size) +{ + if (bus_width == MV_DDR_DEV_WIDTH_8BIT) + return page_tbl[mem_size].page_size_8bit; + else + return page_tbl[mem_size].page_size_16bit; +} + +/* Return speed Bin value for selected index and t* element */ +unsigned int mv_ddr_speed_bin_timing_get(enum mv_ddr_speed_bin index, enum mv_ddr_speed_bin_timing element) +{ + u32 result = 0; + + switch (element) { + case SPEED_BIN_TRCD: + case SPEED_BIN_TRP: + result = speed_bin_table_t_rcd_t_rp[index]; + break; + case SPEED_BIN_TRAS: + if (index <= SPEED_BIN_DDR_1066G) + result = 37500; + else if (index <= SPEED_BIN_DDR_1333J) + result = 36000; + else if (index <= SPEED_BIN_DDR_1600K) + result = 35000; + else if (index <= SPEED_BIN_DDR_1866M) + result = 34000; + else + result = 33000; + break; + case SPEED_BIN_TRC: + result = speed_bin_table_t_rc[index]; + break; + case SPEED_BIN_TRRD1K: + if (index <= SPEED_BIN_DDR_800E) + result = 10000; + else if (index <= SPEED_BIN_DDR_1066G) + result = 7500; + else if (index <= SPEED_BIN_DDR_1600K) + result = 6000; + else + result = 5000; + break; + case SPEED_BIN_TRRD2K: + if (index <= SPEED_BIN_DDR_1066G) + result = 10000; + else if (index <= SPEED_BIN_DDR_1600K) + result = 7500; + else + result = 6000; + break; + case SPEED_BIN_TPD: + if (index < SPEED_BIN_DDR_800E) + result = 7500; + else if (index < SPEED_BIN_DDR_1333J) + result = 5625; + else + result = 5000; + break; + case SPEED_BIN_TFAW1K: + if (index <= SPEED_BIN_DDR_800E) + result = 40000; + else if (index <= SPEED_BIN_DDR_1066G) + result = 37500; + else if (index <= SPEED_BIN_DDR_1600K) + result = 30000; + else if (index <= SPEED_BIN_DDR_1866M) + result = 27000; + else + result = 25000; + break; + case SPEED_BIN_TFAW2K: + if (index <= SPEED_BIN_DDR_1066G) + result = 50000; + else if (index <= SPEED_BIN_DDR_1333J) + result = 45000; + else if (index <= SPEED_BIN_DDR_1600K) + result = 40000; + else + result = 35000; + break; + case SPEED_BIN_TWTR: + result = 7500; + break; + case SPEED_BIN_TRTP: + result = 7500; + break; + case SPEED_BIN_TWR: + result = 15000; + break; + case SPEED_BIN_TMOD: + result = 15000; + break; + case SPEED_BIN_TXPDLL: + result = 24000; + break; + case SPEED_BIN_TXSDLL: + result = 512; + break; + default: + break; + } + + return result; +} + +static inline u32 pattern_table_get_killer_word(u8 dqs, u8 index) +{ + u8 i, byte = 0; + u8 role; + + for (i = 0; i < 8; i++) { + role = (i == dqs) ? + (PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) : + (PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM); + byte |= pattern_killer_pattern_table_map[index][role] << i; + } + + return byte | (byte << 8) | (byte << 16) | (byte << 24); +} + +static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index) +{ + u8 i, byte0 = 0, byte1 = 0; + u8 role; + + for (i = 0; i < 8; i++) { + role = (i == dqs) ? + (PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) : + (PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM); + byte0 |= pattern_killer_pattern_table_map[index * 2][role] << i; + byte1 |= pattern_killer_pattern_table_map[index * 2 + 1][role] << i; + } + + return byte0 | (byte0 << 8) | (byte1 << 16) | (byte1 << 24); +} + +static inline u32 pattern_table_get_sso_word(u8 sso, u8 index) +{ + u8 step = sso + 1; + + if (0 == ((index / step) & 1)) + return 0x0; + else + return 0xffffffff; +} + +static inline u32 pattern_table_get_sso_full_xtalk_word(u8 bit, u8 index) +{ + u8 byte = (1 << bit); + + if ((index & 1) == 1) + byte = ~byte; + + return byte | (byte << 8) | (byte << 16) | (byte << 24); + +} + +static inline u32 pattern_table_get_sso_xtalk_free_word(u8 bit, u8 index) +{ + u8 byte = (1 << bit); + + if ((index & 1) == 1) + byte = 0; + + return byte | (byte << 8) | (byte << 16) | (byte << 24); +} + +static inline u32 pattern_table_get_isi_word(u8 index) +{ + u8 i0 = index % 32; + u8 i1 = index % 8; + u32 word; + + if (i0 > 15) + word = ((i1 == 5) | (i1 == 7)) ? 0xffffffff : 0x0; + else + word = (i1 == 6) ? 0xffffffff : 0x0; + + word = ((i0 % 16) > 7) ? ~word : word; + + return word; +} + +static inline u32 pattern_table_get_sso_full_xtalk_word16(u8 bit, u8 index) +{ + u8 byte = (1 << bit); + + if ((index & 1) == 1) + byte = ~byte; + + return byte | (byte << 8) | ((~byte) << 16) | ((~byte) << 24); +} + +static inline u32 pattern_table_get_sso_xtalk_free_word16(u8 bit, u8 index) +{ + u8 byte = (1 << bit); + + if ((index & 1) == 0) + return (byte << 16) | (byte << 24); + else + return byte | (byte << 8); +} + +static inline u32 pattern_table_get_isi_word16(u8 index) +{ + u8 i0 = index % 16; + u8 i1 = index % 4; + u32 word; + + if (i0 > 7) + word = (i1 > 1) ? 0x0000ffff : 0x0; + else + word = (i1 == 3) ? 0xffff0000 : 0x0; + + word = ((i0 % 8) > 3) ? ~word : word; + + return word; +} + +static inline u32 pattern_table_get_vref_word(u8 index) +{ + if (0 == ((pattern_vref_pattern_table_map[index / 8] >> + (index % 8)) & 1)) + return 0x0; + else + return 0xffffffff; +} + +static inline u32 pattern_table_get_vref_word16(u8 index) +{ + if (0 == pattern_killer_pattern_table_map + [PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2] && + 0 == pattern_killer_pattern_table_map + [PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2 + 1]) + return 0x00000000; + else if (1 == pattern_killer_pattern_table_map + [PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2] && + 0 == pattern_killer_pattern_table_map + [PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2 + 1]) + return 0xffff0000; + else if (0 == pattern_killer_pattern_table_map + [PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2] && + 1 == pattern_killer_pattern_table_map + [PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2 + 1]) + return 0x0000ffff; + else + return 0xffffffff; +} + +static inline u32 pattern_table_get_static_pbs_word(u8 index) +{ + u16 temp; + + temp = ((0x00ff << (index / 3)) & 0xff00) >> 8; + + return temp | (temp << 8) | (temp << 16) | (temp << 24); +} + +u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index) +{ + u32 pattern = 0; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 0) { + /* 32/64-bit patterns */ + switch (type) { + case PATTERN_PBS1: + case PATTERN_PBS2: + if (index == 0 || index == 2 || index == 5 || + index == 7) + pattern = PATTERN_55; + else + pattern = PATTERN_AA; + break; + case PATTERN_PBS3: + if (0 == (index & 1)) + pattern = PATTERN_55; + else + pattern = PATTERN_AA; + break; + case PATTERN_RL: + if (index < 6) + pattern = PATTERN_00; + else + pattern = PATTERN_80; + break; + case PATTERN_STATIC_PBS: + pattern = pattern_table_get_static_pbs_word(index); + break; + case PATTERN_KILLER_DQ0: + case PATTERN_KILLER_DQ1: + case PATTERN_KILLER_DQ2: + case PATTERN_KILLER_DQ3: + case PATTERN_KILLER_DQ4: + case PATTERN_KILLER_DQ5: + case PATTERN_KILLER_DQ6: + case PATTERN_KILLER_DQ7: + pattern = pattern_table_get_killer_word( + (u8)(type - PATTERN_KILLER_DQ0), index); + break; + case PATTERN_RL2: + if (index < 6) + pattern = PATTERN_00; + else + pattern = PATTERN_01; + break; + case PATTERN_TEST: + if (index > 1 && index < 6) + pattern = PATTERN_00; + else + pattern = PATTERN_FF; + break; + case PATTERN_FULL_SSO0: + case PATTERN_FULL_SSO1: + case PATTERN_FULL_SSO2: + case PATTERN_FULL_SSO3: + pattern = pattern_table_get_sso_word( + (u8)(type - PATTERN_FULL_SSO0), index); + break; + case PATTERN_VREF: + pattern = pattern_table_get_vref_word(index); + break; + case PATTERN_SSO_FULL_XTALK_DQ0: + case PATTERN_SSO_FULL_XTALK_DQ1: + case PATTERN_SSO_FULL_XTALK_DQ2: + case PATTERN_SSO_FULL_XTALK_DQ3: + case PATTERN_SSO_FULL_XTALK_DQ4: + case PATTERN_SSO_FULL_XTALK_DQ5: + case PATTERN_SSO_FULL_XTALK_DQ6: + case PATTERN_SSO_FULL_XTALK_DQ7: + pattern = pattern_table_get_sso_full_xtalk_word( + (u8)(type - PATTERN_SSO_FULL_XTALK_DQ0), index); + break; + case PATTERN_SSO_XTALK_FREE_DQ0: + case PATTERN_SSO_XTALK_FREE_DQ1: + case PATTERN_SSO_XTALK_FREE_DQ2: + case PATTERN_SSO_XTALK_FREE_DQ3: + case PATTERN_SSO_XTALK_FREE_DQ4: + case PATTERN_SSO_XTALK_FREE_DQ5: + case PATTERN_SSO_XTALK_FREE_DQ6: + case PATTERN_SSO_XTALK_FREE_DQ7: + pattern = pattern_table_get_sso_xtalk_free_word( + (u8)(type - PATTERN_SSO_XTALK_FREE_DQ0), index); + break; + case PATTERN_ISI_XTALK_FREE: + pattern = pattern_table_get_isi_word(index); + break; + default: + printf("error: %s: unsupported pattern type [%d] found\n", + __func__, (int)type); + pattern = 0; + break; + } + } else { + /* 16bit patterns */ + switch (type) { + case PATTERN_PBS1: + case PATTERN_PBS2: + case PATTERN_PBS3: + pattern = PATTERN_55AA; + break; + case PATTERN_RL: + if (index < 3) + pattern = PATTERN_00; + else + pattern = PATTERN_80; + break; + case PATTERN_STATIC_PBS: + pattern = PATTERN_00FF; + break; + case PATTERN_KILLER_DQ0: + case PATTERN_KILLER_DQ1: + case PATTERN_KILLER_DQ2: + case PATTERN_KILLER_DQ3: + case PATTERN_KILLER_DQ4: + case PATTERN_KILLER_DQ5: + case PATTERN_KILLER_DQ6: + case PATTERN_KILLER_DQ7: + pattern = pattern_table_get_killer_word16( + (u8)(type - PATTERN_KILLER_DQ0), index); + break; + case PATTERN_RL2: + if (index < 3) + pattern = PATTERN_00; + else + pattern = PATTERN_01; + break; + case PATTERN_TEST: + if ((index == 0) || (index == 3)) + pattern = 0x00000000; + else + pattern = 0xFFFFFFFF; + break; + case PATTERN_FULL_SSO0: + pattern = 0x0000ffff; + break; + case PATTERN_FULL_SSO1: + case PATTERN_FULL_SSO2: + case PATTERN_FULL_SSO3: + pattern = pattern_table_get_sso_word( + (u8)(type - PATTERN_FULL_SSO1), index); + break; + case PATTERN_VREF: + pattern = pattern_table_get_vref_word16(index); + break; + case PATTERN_SSO_FULL_XTALK_DQ0: + case PATTERN_SSO_FULL_XTALK_DQ1: + case PATTERN_SSO_FULL_XTALK_DQ2: + case PATTERN_SSO_FULL_XTALK_DQ3: + case PATTERN_SSO_FULL_XTALK_DQ4: + case PATTERN_SSO_FULL_XTALK_DQ5: + case PATTERN_SSO_FULL_XTALK_DQ6: + case PATTERN_SSO_FULL_XTALK_DQ7: + pattern = pattern_table_get_sso_full_xtalk_word16( + (u8)(type - PATTERN_SSO_FULL_XTALK_DQ0), index); + break; + case PATTERN_SSO_XTALK_FREE_DQ0: + case PATTERN_SSO_XTALK_FREE_DQ1: + case PATTERN_SSO_XTALK_FREE_DQ2: + case PATTERN_SSO_XTALK_FREE_DQ3: + case PATTERN_SSO_XTALK_FREE_DQ4: + case PATTERN_SSO_XTALK_FREE_DQ5: + case PATTERN_SSO_XTALK_FREE_DQ6: + case PATTERN_SSO_XTALK_FREE_DQ7: + pattern = pattern_table_get_sso_xtalk_free_word16( + (u8)(type - PATTERN_SSO_XTALK_FREE_DQ0), index); + break; + case PATTERN_ISI_XTALK_FREE: + pattern = pattern_table_get_isi_word16(index); + break; + default: + if (((int)type == 29) || ((int)type == 30)) + break; + + printf("error: %s: unsupported pattern type [%d] found\n", + __func__, (int)type); + pattern = 0; + break; + } + } + + return pattern; +} + +/* Device attribute functions */ +void ddr3_tip_dev_attr_init(u32 dev_num) +{ + u32 attr_id; + + for (attr_id = 0; attr_id < MV_ATTR_LAST; attr_id++) + ddr_dev_attributes[attr_id] = 0xFF; + + ddr_dev_attr_init_done = 1; +} + +u32 ddr3_tip_dev_attr_get(u32 dev_num, enum mv_ddr_dev_attribute attr_id) +{ + if (ddr_dev_attr_init_done == 0) + ddr3_tip_dev_attr_init(dev_num); + + return ddr_dev_attributes[attr_id]; +} + +void ddr3_tip_dev_attr_set(u32 dev_num, enum mv_ddr_dev_attribute attr_id, u32 value) +{ + if (ddr_dev_attr_init_done == 0) + ddr3_tip_dev_attr_init(dev_num); + + ddr_dev_attributes[attr_id] = value; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_hw_algo.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_hw_algo.c new file mode 100644 index 000000000..58ffb2050 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_hw_algo.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_regs.h" + +#define VREF_INITIAL_STEP 3 +#define VREF_SECOND_STEP 1 +#define VREF_MAX_INDEX 7 +#define MAX_VALUE (1024 - 1) +#define MIN_VALUE (-MAX_VALUE) +#define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0x1f) + +u32 ca_delay; +int ddr3_tip_centr_skip_min_win_check = 0; +u8 current_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM]; +u8 last_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM]; +u16 current_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM]; +u16 last_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM]; +u8 lim_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM]; +u8 interface_state[MAX_INTERFACE_NUM]; +u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 vref_window_size_th = 12; + +static u8 pup_st[MAX_BUS_NUM][MAX_INTERFACE_NUM]; + +static u32 rd_sample_mask[] = { + 0, + 8, + 16, + 24 +}; + +#define VREF_STEP_1 0 +#define VREF_STEP_2 1 +#define VREF_CONVERGE 2 + +/* + * ODT additional timing + */ +int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id) +{ + u32 cs_num = 0, max_read_sample = 0, min_read_sample = 0x1f; + u32 data_read[MAX_INTERFACE_NUM] = { 0 }; + u32 read_sample[MAX_CS_NUM]; + u32 val; + u32 pup_index; + int max_phase = MIN_VALUE, current_phase; + enum hws_access_type access_type = ACCESS_TYPE_UNICAST; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + unsigned int max_cs = mv_ddr_cs_num_get(); + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DUNIT_ODT_CTRL_REG, + 0 << 8, 0x3 << 8)); + CHECK_STATUS(ddr3_tip_if_read(dev_num, access_type, if_id, + RD_DATA_SMPL_DLYS_REG, + data_read, MASK_ALL_BITS)); + val = data_read[if_id]; + + for (cs_num = 0; cs_num < max_cs; cs_num++) { + read_sample[cs_num] = GET_RD_SAMPLE_DELAY(val, cs_num); + + /* find maximum of read_samples */ + if (read_sample[cs_num] >= max_read_sample) { + if (read_sample[cs_num] == max_read_sample) + max_phase = MIN_VALUE; + else + max_read_sample = read_sample[cs_num]; + + for (pup_index = 0; + pup_index < octets_per_if_num; + pup_index++) { + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup_index, + DDR_PHY_DATA, + RL_PHY_REG(cs_num), + &val)); + + current_phase = ((int)val & 0xe0) >> 6; + if (current_phase >= max_phase) + max_phase = current_phase; + } + } + + /* find minimum */ + if (read_sample[cs_num] < min_read_sample) + min_read_sample = read_sample[cs_num]; + } + + min_read_sample = min_read_sample + 2; + max_read_sample = max_read_sample + 7 + (max_phase + 1) / 2 + 1; + if (min_read_sample >= 0xf) + min_read_sample = 0xf; + if (max_read_sample >= 0x1f) + max_read_sample = 0x1f; + + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DDR_ODT_TIMING_LOW_REG, + ((min_read_sample - 1) << 12), + 0xf << 12)); + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id, + DDR_ODT_TIMING_LOW_REG, + (max_read_sample << 16), + 0x1f << 16)); + + return MV_OK; +} + +int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4]) +{ + u32 reg_pup = RESULT_PHY_REG; + u32 reg_data; + u32 cs_num; + int i; + + cs_num = 0; + + /* TBD */ + reg_pup += cs_num; + + for (i = 0; i < 4; i++) { + CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id, + ACCESS_TYPE_UNICAST, i, + DDR_PHY_DATA, reg_pup, + ®_data)); + res[i] = (reg_data >> RESULT_PHY_RX_OFFS) & 0x1f; + } + + return 0; +} + +/* + * This algorithm deals with the vertical optimum from Voltage point of view + * of the sample signal. + * Voltage sample point can improve the Eye / window size of the bit and the + * pup. + * The problem is that it is tune for all DQ the same so there isn't any + * PBS like code. + * It is more like centralization. + * But because we don't have The training SM support we do it a bit more + * smart search to save time. + */ +int ddr3_tip_vref(u32 dev_num) +{ + /* + * The Vref register have non linear order. Need to check what will be + * in future projects. + */ + u32 vref_map[8] = { + 1, 2, 3, 4, 5, 6, 7, 0 + }; + /* State and parameter definitions */ + u32 initial_step = VREF_INITIAL_STEP; + /* need to be assign with minus ????? */ + u32 second_step = VREF_SECOND_STEP; + u32 algo_run_flag = 0, currrent_vref = 0; + u32 while_count = 0; + u32 pup = 0, if_id = 0, num_pup = 0, rep = 0; + u32 val = 0; + u32 reg_addr = 0xa8; + u32 copy_start_pattern, copy_end_pattern; + enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage); + u8 res[4]; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + CHECK_STATUS(ddr3_tip_special_rx(dev_num)); + + /* save start/end pattern */ + copy_start_pattern = start_pattern; + copy_end_pattern = end_pattern; + + /* set vref as centralization pattern */ + start_pattern = PATTERN_VREF; + end_pattern = PATTERN_VREF; + + /* init params */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; + pup < octets_per_if_num; pup++) { + current_vref[pup][if_id] = 0; + last_vref[pup][if_id] = 0; + lim_vref[pup][if_id] = 0; + current_valid_window[pup][if_id] = 0; + last_valid_window[pup][if_id] = 0; + if (vref_window_size[if_id][pup] > + vref_window_size_th) { + pup_st[pup][if_id] = VREF_CONVERGE; + DEBUG_TRAINING_HW_ALG( + DEBUG_LEVEL_INFO, + ("VREF config, IF[ %d ]pup[ %d ] - Vref tune not requered (%d)\n", + if_id, pup, __LINE__)); + } else { + pup_st[pup][if_id] = VREF_STEP_1; + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, &val)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + pup, DDR_PHY_DATA, reg_addr, + (val & (~0xf)) | vref_map[0])); + DEBUG_TRAINING_HW_ALG( + DEBUG_LEVEL_INFO, + ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n", + if_id, pup, + (val & (~0xf)) | vref_map[0], + __LINE__)); + } + } + interface_state[if_id] = 0; + } + + /* TODO: Set number of active interfaces */ + num_pup = octets_per_if_num * MAX_INTERFACE_NUM; + + while ((algo_run_flag <= num_pup) & (while_count < 10)) { + while_count++; + for (rep = 1; rep < 4; rep++) { + ddr3_tip_centr_skip_min_win_check = 1; + ddr3_tip_centralization_rx(dev_num); + ddr3_tip_centr_skip_min_win_check = 0; + + /* Read Valid window results only for non converge pups */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (interface_state[if_id] != 4) { + get_valid_win_rx(dev_num, if_id, res); + for (pup = 0; + pup < octets_per_if_num; + pup++) { + VALIDATE_BUS_ACTIVE + (tm->bus_act_mask, pup); + if (pup_st[pup] + [if_id] == + VREF_CONVERGE) + continue; + + current_valid_window[pup] + [if_id] = + (current_valid_window[pup] + [if_id] * (rep - 1) + + 1000 * res[pup]) / rep; + } + } + } + } + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + DEBUG_TRAINING_HW_ALG( + DEBUG_LEVEL_TRACE, + ("current_valid_window: IF[ %d ] - ", if_id)); + + for (pup = 0; + pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, + ("%d ", + current_valid_window + [pup][if_id])); + } + DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, ("\n")); + } + + /* Compare results and respond as function of state */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; + pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, + ("I/F[ %d ], pup[ %d ] STATE #%d (%d)\n", + if_id, pup, + pup_st[pup] + [if_id], __LINE__)); + + if (pup_st[pup][if_id] == VREF_CONVERGE) + continue; + + DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, + ("I/F[ %d ], pup[ %d ] CHECK progress - Current %d Last %d, limit VREF %d (%d)\n", + if_id, pup, + current_valid_window[pup] + [if_id], + last_valid_window[pup] + [if_id], lim_vref[pup] + [if_id], __LINE__)); + + /* + * The -1 is for solution resolution +/- 1 tap + * of ADLL + */ + if (current_valid_window[pup][if_id] + 200 >= + (last_valid_window[pup][if_id])) { + if (pup_st[pup][if_id] == VREF_STEP_1) { + /* + * We stay in the same state and + * step just update the window + * size (take the max) and Vref + */ + if (current_vref[pup] + [if_id] == VREF_MAX_INDEX) { + /* + * If we step to the end + * and didn't converge + * to some particular + * better Vref value + * define the pup as + * converge and step + * back to nominal + * Vref. + */ + pup_st[pup] + [if_id] = + VREF_CONVERGE; + algo_run_flag++; + interface_state + [if_id]++; + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n", + if_id, pup, + current_vref[pup] + [if_id], + __LINE__)); + } else { + /* continue to update the Vref index */ + current_vref[pup] + [if_id] = + ((current_vref[pup] + [if_id] + + initial_step) > + VREF_MAX_INDEX) ? + VREF_MAX_INDEX + : (current_vref[pup] + [if_id] + + initial_step); + if (current_vref[pup] + [if_id] == + VREF_MAX_INDEX) { + pup_st[pup] + [if_id] + = + VREF_STEP_2; + } + lim_vref[pup] + [if_id] = + last_vref[pup] + [if_id] = + current_vref[pup] + [if_id]; + } + + last_valid_window[pup] + [if_id] = + GET_MAX(current_valid_window + [pup][if_id], + last_valid_window + [pup] + [if_id]); + + /* update the Vref for next stage */ + currrent_vref = + current_vref[pup] + [if_id]; + CHECK_STATUS + (ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + &val)); + CHECK_STATUS + (ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + (val & (~0xf)) | + vref_map[currrent_vref])); + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n", + if_id, pup, + (val & (~0xf)) | + vref_map[currrent_vref], + __LINE__)); + } else if (pup_st[pup][if_id] + == VREF_STEP_2) { + /* + * We keep on search back with + * the same step size. + */ + last_valid_window[pup] + [if_id] = + GET_MAX(current_valid_window + [pup][if_id], + last_valid_window + [pup] + [if_id]); + last_vref[pup][if_id] = + current_vref[pup] + [if_id]; + + /* we finish all search space */ + if ((current_vref[pup] + [if_id] - second_step) == lim_vref[pup][if_id]) { + /* + * If we step to the end + * and didn't converge + * to some particular + * better Vref value + * define the pup as + * converge and step + * back to nominal + * Vref. + */ + pup_st[pup] + [if_id] = + VREF_CONVERGE; + algo_run_flag++; + + interface_state + [if_id]++; + + current_vref[pup] + [if_id] = + (current_vref[pup] + [if_id] - + second_step); + + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n", + if_id, pup, + current_vref[pup] + [if_id], + __LINE__)); + } else + /* we finish all search space */ + if (current_vref[pup] + [if_id] == + lim_vref[pup] + [if_id]) { + /* + * If we step to the end + * and didn't converge + * to some particular + * better Vref value + * define the pup as + * converge and step + * back to nominal + * Vref. + */ + pup_st[pup] + [if_id] = + VREF_CONVERGE; + + algo_run_flag++; + interface_state + [if_id]++; + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n", + if_id, pup, + current_vref[pup] + [if_id], + __LINE__)); + } else { + current_vref[pup] + [if_id] = + current_vref[pup] + [if_id] - + second_step; + } + + /* Update the Vref for next stage */ + currrent_vref = + current_vref[pup] + [if_id]; + CHECK_STATUS + (ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + &val)); + CHECK_STATUS + (ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + (val & (~0xf)) | + vref_map[currrent_vref])); + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n", + if_id, pup, + (val & (~0xf)) | + vref_map[currrent_vref], + __LINE__)); + } + } else { + /* we change state and change step */ + if (pup_st[pup][if_id] == VREF_STEP_1) { + pup_st[pup][if_id] = + VREF_STEP_2; + lim_vref[pup][if_id] = + current_vref[pup] + [if_id] - initial_step; + last_valid_window[pup] + [if_id] = + current_valid_window[pup] + [if_id]; + last_vref[pup][if_id] = + current_vref[pup] + [if_id]; + current_vref[pup][if_id] = + last_vref[pup][if_id] - + second_step; + + /* Update the Vref for next stage */ + CHECK_STATUS + (ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + &val)); + CHECK_STATUS + (ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + (val & (~0xf)) | + vref_map[current_vref[pup] + [if_id]])); + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n", + if_id, pup, + (val & (~0xf)) | + vref_map[current_vref[pup] + [if_id]], + __LINE__)); + + } else if (pup_st[pup][if_id] == VREF_STEP_2) { + /* + * The last search was the max + * point set value and exit + */ + CHECK_STATUS + (ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + &val)); + CHECK_STATUS + (ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + (val & (~0xf)) | + vref_map[last_vref[pup] + [if_id]])); + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n", + if_id, pup, + (val & (~0xf)) | + vref_map[last_vref[pup] + [if_id]], + __LINE__)); + pup_st[pup][if_id] = + VREF_CONVERGE; + algo_run_flag++; + interface_state[if_id]++; + DEBUG_TRAINING_HW_ALG + (DEBUG_LEVEL_TRACE, + ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n", + if_id, pup, + current_vref[pup] + [if_id], __LINE__)); + } + } + } + } + } + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; + pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, &val)); + DEBUG_TRAINING_HW_ALG( + DEBUG_LEVEL_INFO, + ("FINAL values: I/F[ %d ], pup[ %d ] - Vref = %X (%d)\n", + if_id, pup, val, __LINE__)); + } + } + + flow_result[if_id] = TEST_SUCCESS; + + /* restore start/end pattern */ + start_pattern = copy_start_pattern; + end_pattern = copy_end_pattern; + + return 0; +} + +/* + * CK/CA Delay + */ +int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap) +{ + u32 if_id = 0; + u32 ck_num_adll_tap = 0, ca_num_adll_tap = 0, data = 0; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* + * ck_delay_table is delaying the of the clock signal only. + * (to overcome timing issues between_c_k & command/address signals) + */ + /* + * ca_delay is delaying the of the entire command & Address signals + * (include Clock signal to overcome DGL error on the Clock versus + * the DQS). + */ + + /* Calc ADLL Tap */ + if (ck_delay == PARAM_UNDEFINED) + DEBUG_TRAINING_HW_ALG( + DEBUG_LEVEL_ERROR, + ("ERROR: ck_delay is not initialized!\n")); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + /* Calc delay ps in ADLL tap */ + ck_num_adll_tap = ck_delay / adll_tap; + ca_num_adll_tap = ca_delay / adll_tap; + + data = (ck_num_adll_tap & 0x3f) + + ((ca_num_adll_tap & 0x3f) << 10); + + /* + * Set the ADLL number to the CK ADLL for Interfaces for + * all Pup + */ + DEBUG_TRAINING_HW_ALG( + DEBUG_LEVEL_TRACE, + ("ck_num_adll_tap %d ca_num_adll_tap %d adll_tap %d\n", + ck_num_adll_tap, ca_num_adll_tap, adll_tap)); + + CHECK_STATUS(ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, DDR_PHY_CONTROL, + 0x0, data)); + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_hw_algo.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_hw_algo.h new file mode 100644 index 000000000..fe0436668 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_hw_algo.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_HW_ALGO_H_ +#define _DDR3_TRAINING_HW_ALGO_H_ + +int ddr3_tip_vref(u32 dev_num); +int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id); +int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap); + +#endif /* _DDR3_TRAINING_HW_ALGO_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip.h new file mode 100644 index 000000000..056c21497 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_H_ +#define _DDR3_TRAINING_IP_H_ + +#include "ddr_topology_def.h" + +#define TIP_ENG_LOCK 0x02000000 +#define TIP_TX_DLL_RANGE_MAX 64 + +#define GET_MIN(arg1, arg2) ((arg1) < (arg2)) ? (arg1) : (arg2) +#define GET_MAX(arg1, arg2) ((arg1) < (arg2)) ? (arg2) : (arg1) + +#define INIT_CONTROLLER_MASK_BIT 0x00000001 +#define STATIC_LEVELING_MASK_BIT 0x00000002 +#define SET_LOW_FREQ_MASK_BIT 0x00000004 +#define LOAD_PATTERN_MASK_BIT 0x00000008 +#define SET_MEDIUM_FREQ_MASK_BIT 0x00000010 +#define WRITE_LEVELING_MASK_BIT 0x00000020 +#define LOAD_PATTERN_2_MASK_BIT 0x00000040 +#define READ_LEVELING_MASK_BIT 0x00000080 +#define SW_READ_LEVELING_MASK_BIT 0x00000100 +#define WRITE_LEVELING_SUPP_MASK_BIT 0x00000200 +#define PBS_RX_MASK_BIT 0x00000400 +#define PBS_TX_MASK_BIT 0x00000800 +#define SET_TARGET_FREQ_MASK_BIT 0x00001000 +#define ADJUST_DQS_MASK_BIT 0x00002000 +#define WRITE_LEVELING_TF_MASK_BIT 0x00004000 +#define LOAD_PATTERN_HIGH_MASK_BIT 0x00008000 +#define READ_LEVELING_TF_MASK_BIT 0x00010000 +#define WRITE_LEVELING_SUPP_TF_MASK_BIT 0x00020000 +#define DM_PBS_TX_MASK_BIT 0x00040000 +#define RL_DQS_BURST_MASK_BIT 0x00080000 +#define CENTRALIZATION_RX_MASK_BIT 0x00100000 +#define CENTRALIZATION_TX_MASK_BIT 0x00200000 +#define TX_EMPHASIS_MASK_BIT 0x00400000 +#define PER_BIT_READ_LEVELING_TF_MASK_BIT 0x00800000 +#define VREF_CALIBRATION_MASK_BIT 0x01000000 +#define WRITE_LEVELING_LF_MASK_BIT 0x02000000 + +/* DDR4 Specific Training Mask bits */ + +enum hws_result { + TEST_FAILED = 0, + TEST_SUCCESS = 1, + NO_TEST_DONE = 2 +}; + +enum hws_training_result { + RESULT_PER_BIT, + RESULT_PER_BYTE +}; + +enum auto_tune_stage { + INIT_CONTROLLER, + STATIC_LEVELING, + SET_LOW_FREQ, + LOAD_PATTERN, + SET_MEDIUM_FREQ, + WRITE_LEVELING, + LOAD_PATTERN_2, + READ_LEVELING, + WRITE_LEVELING_SUPP, + PBS_RX, + PBS_TX, + SET_TARGET_FREQ, + ADJUST_DQS, + WRITE_LEVELING_TF, + READ_LEVELING_TF, + WRITE_LEVELING_SUPP_TF, + DM_PBS_TX, + VREF_CALIBRATION, + CENTRALIZATION_RX, + CENTRALIZATION_TX, + TX_EMPHASIS, + LOAD_PATTERN_HIGH, + PER_BIT_READ_LEVELING_TF, + WRITE_LEVELING_LF, + MAX_STAGE_LIMIT +}; + +enum hws_access_type { + ACCESS_TYPE_UNICAST = 0, + ACCESS_TYPE_MULTICAST = 1 +}; + +enum hws_algo_type { + ALGO_TYPE_DYNAMIC, + ALGO_TYPE_STATIC +}; + +struct init_cntr_param { + int is_ctrl64_bit; + int do_mrs_phy; + int init_phy; + int msys_init; +}; + +struct pattern_info { + u8 num_of_phases_tx; + u8 tx_burst_size; + u8 delay_between_bursts; + u8 num_of_phases_rx; + u32 start_addr; + u8 pattern_len; +}; + +struct cs_element { + u8 cs_num; + u8 num_of_cs; +}; + +struct hws_tip_freq_config_info { + u8 is_supported; + u8 bw_per_freq; + u8 rate_per_freq; +}; + +struct hws_cs_config_info { + u32 cs_reg_value; + u32 cs_cbe_value; +}; + +struct dfx_access { + u8 pipe; + u8 client; +}; + +struct hws_xsb_info { + struct dfx_access *dfx_table; +}; + +int ddr3_tip_register_dq_table(u32 dev_num, u32 *table); +int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable); +int hws_ddr3_tip_init_controller(u32 dev_num, + struct init_cntr_param *init_cntr_prm); +int hws_ddr3_tip_load_topology_map(u32 dev_num, + struct mv_ddr_topology_map *topology); +int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type); +int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode); +u8 ddr3_tip_get_buf_min(u8 *buf_ptr); +u8 ddr3_tip_get_buf_max(u8 *buf_ptr); +#endif /* _DDR3_TRAINING_IP_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_bist.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_bist.h new file mode 100644 index 000000000..38058cba8 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_bist.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_BIST_H_ +#define _DDR3_TRAINING_IP_BIST_H_ + +#include "ddr3_training_ip.h" + +enum hws_bist_operation { + BIST_STOP = 0, + BIST_START = 1 +}; + +enum hws_stress_jump { + STRESS_NONE = 0, + STRESS_ENABLE = 1 +}; + +enum hws_pattern_duration { + DURATION_SINGLE = 0, + DURATION_STOP_AT_FAIL = 1, + DURATION_ADDRESS = 2, + DURATION_CONT = 4 +}; + +struct bist_result { + u32 bist_error_cnt; + u32 bist_fail_low; + u32 bist_fail_high; + u32 bist_last_fail_addr; +}; + +int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id, + struct bist_result *pst_bist_result); +int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern, + enum hws_access_type access_type, + u32 if_num, enum hws_dir direction, + enum hws_stress_jump addr_stress_jump, + enum hws_pattern_duration duration, + enum hws_bist_operation oper_type, + u32 offset, u32 cs_num, u32 pattern_addr_length); +int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result, + u32 cs_num); +int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction, + u32 mode); +int ddr3_tip_run_leveling_sweep_test(int dev_num, u32 repeat_num, + u32 direction, u32 mode); +int ddr3_tip_print_regs(u32 dev_num); +int ddr3_tip_reg_dump(u32 dev_num); +int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type, u32 read_type, + u32 burst_length); +int mv_ddr_dm_to_dq_diff_get(u8 adll_byte_high, u8 adll_byte_low, u8 *vw_vector, + int *delta_h_adll, int *delta_l_adll); +int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector); +#endif /* _DDR3_TRAINING_IP_BIST_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_centralization.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_centralization.h new file mode 100644 index 000000000..392842a10 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_centralization.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_CENTRALIZATION_H +#define _DDR3_TRAINING_IP_CENTRALIZATION_H + +int ddr3_tip_centralization_tx(u32 dev_num); +int ddr3_tip_centralization_rx(u32 dev_num); +int ddr3_tip_print_centralization_result(u32 dev_num); +int ddr3_tip_special_rx(u32 dev_num); + +#endif /* _DDR3_TRAINING_IP_CENTRALIZATION_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_db.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_db.h new file mode 100644 index 000000000..e28b7ecee --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_db.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_DB_H_ +#define _DDR3_TRAINING_IP_DB_H_ + +enum hws_pattern { + PATTERN_PBS1, + PATTERN_PBS2, + PATTERN_PBS3, + PATTERN_TEST, + PATTERN_RL, + PATTERN_RL2, + PATTERN_STATIC_PBS, + PATTERN_KILLER_DQ0, + PATTERN_KILLER_DQ1, + PATTERN_KILLER_DQ2, + PATTERN_KILLER_DQ3, + PATTERN_KILLER_DQ4, + PATTERN_KILLER_DQ5, + PATTERN_KILLER_DQ6, + PATTERN_KILLER_DQ7, + PATTERN_VREF, + PATTERN_FULL_SSO0, + PATTERN_FULL_SSO1, + PATTERN_FULL_SSO2, + PATTERN_FULL_SSO3, + PATTERN_LAST, + PATTERN_SSO_FULL_XTALK_DQ0, + PATTERN_SSO_FULL_XTALK_DQ1, + PATTERN_SSO_FULL_XTALK_DQ2, + PATTERN_SSO_FULL_XTALK_DQ3, + PATTERN_SSO_FULL_XTALK_DQ4, + PATTERN_SSO_FULL_XTALK_DQ5, + PATTERN_SSO_FULL_XTALK_DQ6, + PATTERN_SSO_FULL_XTALK_DQ7, + PATTERN_SSO_XTALK_FREE_DQ0, + PATTERN_SSO_XTALK_FREE_DQ1, + PATTERN_SSO_XTALK_FREE_DQ2, + PATTERN_SSO_XTALK_FREE_DQ3, + PATTERN_SSO_XTALK_FREE_DQ4, + PATTERN_SSO_XTALK_FREE_DQ5, + PATTERN_SSO_XTALK_FREE_DQ6, + PATTERN_SSO_XTALK_FREE_DQ7, + PATTERN_ISI_XTALK_FREE +}; + +enum mv_wl_supp_mode { + WRITE_LEVELING_SUPP_REG_MODE, + WRITE_LEVELING_SUPP_ECC_MODE_DATA_PUPS, + WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP4, + WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP3, + WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP8 +}; + +enum mv_ddr_dev_attribute { + MV_ATTR_TIP_REV, + MV_ATTR_PHY_EDGE, + MV_ATTR_OCTET_PER_INTERFACE, + MV_ATTR_PLL_BEFORE_INIT, + MV_ATTR_TUNE_MASK, + MV_ATTR_INIT_FREQ, + MV_ATTR_MID_FREQ, + MV_ATTR_DFS_LOW_FREQ, + MV_ATTR_DFS_LOW_PHY, + MV_ATTR_DELAY_ENABLE, + MV_ATTR_CK_DELAY, + MV_ATTR_CA_DELAY, + MV_ATTR_INTERLEAVE_WA, + MV_ATTR_LAST +}; + +enum mv_ddr_tip_revison { + MV_TIP_REV_NA, + MV_TIP_REV_1, /* NP5 */ + MV_TIP_REV_2, /* BC2 */ + MV_TIP_REV_3, /* AC3 */ + MV_TIP_REV_4, /* A-380/A-390 */ + MV_TIP_REV_LAST +}; + +enum mv_ddr_phy_edge { + MV_DDR_PHY_EDGE_POSITIVE, + MV_DDR_PHY_EDGE_NEGATIVE +}; + +/* Device attribute functions */ +void ddr3_tip_dev_attr_init(u32 dev_num); +u32 ddr3_tip_dev_attr_get(u32 dev_num, enum mv_ddr_dev_attribute attr_id); +void ddr3_tip_dev_attr_set(u32 dev_num, enum mv_ddr_dev_attribute attr_id, u32 value); + +#endif /* _DDR3_TRAINING_IP_DB_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_def.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_def.h new file mode 100644 index 000000000..8765df7cf --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_def.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_DEF_H +#define _DDR3_TRAINING_IP_DEF_H + +#define PATTERN_55 0x55555555 +#define PATTERN_AA 0xaaaaaaaa +#define PATTERN_80 0x80808080 +#define PATTERN_20 0x20202020 +#define PATTERN_01 0x01010101 +#define PATTERN_FF 0xffffffff +#define PATTERN_00 0x00000000 + +/* 16bit bus width patterns */ +#define PATTERN_55AA 0x5555aaaa +#define PATTERN_00FF 0x0000ffff +#define PATTERN_0080 0x00008080 + +#define INVALID_VALUE 0xffffffff +#define MAX_NUM_OF_DUNITS 32 +/* + * length *2 = length in words of pattern, first low address, + * second high address + */ +#define TEST_PATTERN_LENGTH 4 +#define KILLER_PATTERN_DQ_NUMBER 8 +#define SSO_DQ_NUMBER 4 +#define PATTERN_MAXIMUM_LENGTH 64 +#define ADLL_TX_LENGTH 64 +#define ADLL_RX_LENGTH 32 + +#define PARAM_NOT_CARE 0 +#define PARAM_UNDEFINED 0xffffffff + +#define READ_LEVELING_PHY_OFFSET 2 +#define WRITE_LEVELING_PHY_OFFSET 0 + +#define MASK_ALL_BITS 0xffffffff + +#define CS_BIT_MASK 0xf + +/* DFX access */ +#define BROADCAST_ID 28 +#define MULTICAST_ID 29 + +#define XSB_BASE_ADDR 0x00004000 +#define XSB_CTRL_0_REG 0x00000000 +#define XSB_CTRL_1_REG 0x00000004 +#define XSB_CMD_REG 0x00000008 +#define XSB_ADDRESS_REG 0x0000000c +#define XSB_DATA_REG 0x00000010 +#define PIPE_ENABLE_ADDR 0x000f8000 +#define ENABLE_DDR_TUNING_ADDR 0x000f829c + +#define CLIENT_BASE_ADDR 0x00002000 +#define CLIENT_CTRL_REG 0x00000000 + +#define TARGET_INT 0x1801 +#define TARGET_EXT 0x180e +#define BYTE_EN 0 +#define CMD_READ 0 +#define CMD_WRITE 1 + +#define INTERNAL_ACCESS_PORT 1 +#define EXECUTING 1 +#define ACCESS_EXT 1 +#define CS2_EXIST_BIT 2 +#define TRAINING_ID 0xf +#define EXT_TRAINING_ID 1 +#define EXT_MODE 0x4 + +#define GET_RESULT_STATE(res) (res) +#define SET_RESULT_STATE(res, state) (res = state) + +#define ADDR_SIZE_512MB 0x04000000 +#define ADDR_SIZE_1GB 0x08000000 +#define ADDR_SIZE_2GB 0x10000000 +#define ADDR_SIZE_4GB 0x20000000 +#define ADDR_SIZE_8GB 0x40000000 +#define ADDR_SIZE_16GB 0x80000000 + + +enum hws_edge_compare { + EDGE_PF, + EDGE_FP, + EDGE_FPF, + EDGE_PFP +}; + +enum hws_control_element { + HWS_CONTROL_ELEMENT_ADLL, /* per bit 1 edge */ + HWS_CONTROL_ELEMENT_DQ_SKEW, + HWS_CONTROL_ELEMENT_DQS_SKEW +}; + +enum hws_search_dir { + HWS_LOW2HIGH, + HWS_HIGH2LOW, + HWS_SEARCH_DIR_LIMIT +}; + +enum hws_operation { + OPERATION_READ = 0, + OPERATION_WRITE = 1 +}; + +enum hws_training_ip_stat { + HWS_TRAINING_IP_STATUS_FAIL, + HWS_TRAINING_IP_STATUS_SUCCESS, + HWS_TRAINING_IP_STATUS_TIMEOUT +}; + +enum hws_ddr_cs { + CS_SINGLE, + CS_NON_SINGLE +}; + +enum hws_ddr_phy { + DDR_PHY_DATA = 0, + DDR_PHY_CONTROL = 1 +}; + +enum hws_dir { + OPER_WRITE, + OPER_READ, + OPER_WRITE_AND_READ +}; + +enum hws_wl_supp { + PHASE_SHIFT, + CLOCK_SHIFT, + ALIGN_SHIFT +}; + +enum mv_ddr_tip_bit_state { + BIT_LOW_UI, + BIT_HIGH_UI, + BIT_SPLIT_IN, + BIT_SPLIT_OUT, + BIT_STATE_LAST +}; + +enum mv_ddr_tip_byte_state{ + BYTE_NOT_DEFINED, + BYTE_HOMOGENEOUS_LOW = 0x1, + BYTE_HOMOGENEOUS_HIGH = 0x2, + BYTE_HOMOGENEOUS_SPLIT_IN = 0x4, + BYTE_HOMOGENEOUS_SPLIT_OUT = 0x8, + BYTE_SPLIT_OUT_MIX = 0x10, + BYTE_STATE_LAST +}; + +struct reg_data { + unsigned int reg_addr; + unsigned int reg_data; + unsigned int reg_mask; +}; + +enum dm_direction { + DM_DIR_INVERSE, + DM_DIR_DIRECT +}; + +#endif /* _DDR3_TRAINING_IP_DEF_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_engine.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_engine.c new file mode 100644 index 000000000..102f9bd63 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_engine.c @@ -0,0 +1,1680 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_regs.h" +#include "ddr_training_ip_db.h" + +#define PATTERN_1 0x55555555 +#define PATTERN_2 0xaaaaaaaa + +#define VALIDATE_TRAINING_LIMIT(e1, e2) \ + ((((e2) - (e1) + 1) > 33) && ((e1) < 67)) + +u32 phy_reg_bk[MAX_INTERFACE_NUM][MAX_BUS_NUM][BUS_WIDTH_IN_BITS]; + +u32 training_res[MAX_INTERFACE_NUM * MAX_BUS_NUM * BUS_WIDTH_IN_BITS * + HWS_SEARCH_DIR_LIMIT]; +u8 byte_status[MAX_INTERFACE_NUM][MAX_BUS_NUM]; /* holds the bit status in the byte in wrapper function*/ + +u16 mask_results_dq_reg_map[] = { + RESULT_CONTROL_PUP_0_BIT_0_REG, RESULT_CONTROL_PUP_0_BIT_1_REG, + RESULT_CONTROL_PUP_0_BIT_2_REG, RESULT_CONTROL_PUP_0_BIT_3_REG, + RESULT_CONTROL_PUP_0_BIT_4_REG, RESULT_CONTROL_PUP_0_BIT_5_REG, + RESULT_CONTROL_PUP_0_BIT_6_REG, RESULT_CONTROL_PUP_0_BIT_7_REG, + RESULT_CONTROL_PUP_1_BIT_0_REG, RESULT_CONTROL_PUP_1_BIT_1_REG, + RESULT_CONTROL_PUP_1_BIT_2_REG, RESULT_CONTROL_PUP_1_BIT_3_REG, + RESULT_CONTROL_PUP_1_BIT_4_REG, RESULT_CONTROL_PUP_1_BIT_5_REG, + RESULT_CONTROL_PUP_1_BIT_6_REG, RESULT_CONTROL_PUP_1_BIT_7_REG, + RESULT_CONTROL_PUP_2_BIT_0_REG, RESULT_CONTROL_PUP_2_BIT_1_REG, + RESULT_CONTROL_PUP_2_BIT_2_REG, RESULT_CONTROL_PUP_2_BIT_3_REG, + RESULT_CONTROL_PUP_2_BIT_4_REG, RESULT_CONTROL_PUP_2_BIT_5_REG, + RESULT_CONTROL_PUP_2_BIT_6_REG, RESULT_CONTROL_PUP_2_BIT_7_REG, + RESULT_CONTROL_PUP_3_BIT_0_REG, RESULT_CONTROL_PUP_3_BIT_1_REG, + RESULT_CONTROL_PUP_3_BIT_2_REG, RESULT_CONTROL_PUP_3_BIT_3_REG, + RESULT_CONTROL_PUP_3_BIT_4_REG, RESULT_CONTROL_PUP_3_BIT_5_REG, + RESULT_CONTROL_PUP_3_BIT_6_REG, RESULT_CONTROL_PUP_3_BIT_7_REG, + RESULT_CONTROL_PUP_4_BIT_0_REG, RESULT_CONTROL_PUP_4_BIT_1_REG, + RESULT_CONTROL_PUP_4_BIT_2_REG, RESULT_CONTROL_PUP_4_BIT_3_REG, + RESULT_CONTROL_PUP_4_BIT_4_REG, RESULT_CONTROL_PUP_4_BIT_5_REG, + RESULT_CONTROL_PUP_4_BIT_6_REG, RESULT_CONTROL_PUP_4_BIT_7_REG, +#if MAX_BUS_NUM == 9 + RESULT_CONTROL_PUP_5_BIT_0_REG, RESULT_CONTROL_PUP_5_BIT_1_REG, + RESULT_CONTROL_PUP_5_BIT_2_REG, RESULT_CONTROL_PUP_5_BIT_3_REG, + RESULT_CONTROL_PUP_5_BIT_4_REG, RESULT_CONTROL_PUP_5_BIT_5_REG, + RESULT_CONTROL_PUP_5_BIT_6_REG, RESULT_CONTROL_PUP_5_BIT_7_REG, + RESULT_CONTROL_PUP_6_BIT_0_REG, RESULT_CONTROL_PUP_6_BIT_1_REG, + RESULT_CONTROL_PUP_6_BIT_2_REG, RESULT_CONTROL_PUP_6_BIT_3_REG, + RESULT_CONTROL_PUP_6_BIT_4_REG, RESULT_CONTROL_PUP_6_BIT_5_REG, + RESULT_CONTROL_PUP_6_BIT_6_REG, RESULT_CONTROL_PUP_6_BIT_7_REG, + RESULT_CONTROL_PUP_7_BIT_0_REG, RESULT_CONTROL_PUP_7_BIT_1_REG, + RESULT_CONTROL_PUP_7_BIT_2_REG, RESULT_CONTROL_PUP_7_BIT_3_REG, + RESULT_CONTROL_PUP_7_BIT_4_REG, RESULT_CONTROL_PUP_7_BIT_5_REG, + RESULT_CONTROL_PUP_7_BIT_6_REG, RESULT_CONTROL_PUP_7_BIT_7_REG, + RESULT_CONTROL_PUP_8_BIT_0_REG, RESULT_CONTROL_PUP_8_BIT_1_REG, + RESULT_CONTROL_PUP_8_BIT_2_REG, RESULT_CONTROL_PUP_8_BIT_3_REG, + RESULT_CONTROL_PUP_8_BIT_4_REG, RESULT_CONTROL_PUP_8_BIT_5_REG, + RESULT_CONTROL_PUP_8_BIT_6_REG, RESULT_CONTROL_PUP_8_BIT_7_REG, +#endif + 0xffff +}; + +u16 mask_results_pup_reg_map[] = { + RESULT_CONTROL_BYTE_PUP_0_REG, RESULT_CONTROL_BYTE_PUP_1_REG, + RESULT_CONTROL_BYTE_PUP_2_REG, RESULT_CONTROL_BYTE_PUP_3_REG, + RESULT_CONTROL_BYTE_PUP_4_REG, +#if MAX_BUS_NUM == 9 + RESULT_CONTROL_BYTE_PUP_5_REG, RESULT_CONTROL_BYTE_PUP_6_REG, + RESULT_CONTROL_BYTE_PUP_7_REG, RESULT_CONTROL_BYTE_PUP_8_REG, +#endif + 0xffff +}; + +#if MAX_BUS_NUM == 5 +u16 mask_results_dq_reg_map_pup3_ecc[] = { + RESULT_CONTROL_PUP_0_BIT_0_REG, RESULT_CONTROL_PUP_0_BIT_1_REG, + RESULT_CONTROL_PUP_0_BIT_2_REG, RESULT_CONTROL_PUP_0_BIT_3_REG, + RESULT_CONTROL_PUP_0_BIT_4_REG, RESULT_CONTROL_PUP_0_BIT_5_REG, + RESULT_CONTROL_PUP_0_BIT_6_REG, RESULT_CONTROL_PUP_0_BIT_7_REG, + RESULT_CONTROL_PUP_1_BIT_0_REG, RESULT_CONTROL_PUP_1_BIT_1_REG, + RESULT_CONTROL_PUP_1_BIT_2_REG, RESULT_CONTROL_PUP_1_BIT_3_REG, + RESULT_CONTROL_PUP_1_BIT_4_REG, RESULT_CONTROL_PUP_1_BIT_5_REG, + RESULT_CONTROL_PUP_1_BIT_6_REG, RESULT_CONTROL_PUP_1_BIT_7_REG, + RESULT_CONTROL_PUP_2_BIT_0_REG, RESULT_CONTROL_PUP_2_BIT_1_REG, + RESULT_CONTROL_PUP_2_BIT_2_REG, RESULT_CONTROL_PUP_2_BIT_3_REG, + RESULT_CONTROL_PUP_2_BIT_4_REG, RESULT_CONTROL_PUP_2_BIT_5_REG, + RESULT_CONTROL_PUP_2_BIT_6_REG, RESULT_CONTROL_PUP_2_BIT_7_REG, + RESULT_CONTROL_PUP_4_BIT_0_REG, RESULT_CONTROL_PUP_4_BIT_1_REG, + RESULT_CONTROL_PUP_4_BIT_2_REG, RESULT_CONTROL_PUP_4_BIT_3_REG, + RESULT_CONTROL_PUP_4_BIT_4_REG, RESULT_CONTROL_PUP_4_BIT_5_REG, + RESULT_CONTROL_PUP_4_BIT_6_REG, RESULT_CONTROL_PUP_4_BIT_7_REG, + RESULT_CONTROL_PUP_3_BIT_0_REG, RESULT_CONTROL_PUP_3_BIT_1_REG, + RESULT_CONTROL_PUP_3_BIT_2_REG, RESULT_CONTROL_PUP_3_BIT_3_REG, + RESULT_CONTROL_PUP_3_BIT_4_REG, RESULT_CONTROL_PUP_3_BIT_5_REG, + RESULT_CONTROL_PUP_3_BIT_6_REG, RESULT_CONTROL_PUP_3_BIT_7_REG +}; +#endif + +#if MAX_BUS_NUM == 5 +u16 mask_results_pup_reg_map_pup3_ecc[] = { + RESULT_CONTROL_BYTE_PUP_0_REG, RESULT_CONTROL_BYTE_PUP_1_REG, + RESULT_CONTROL_BYTE_PUP_2_REG, RESULT_CONTROL_BYTE_PUP_4_REG, + RESULT_CONTROL_BYTE_PUP_4_REG +}; +#endif + +struct pattern_info pattern_table_64[] = { + /* + * num_of_phases_tx, tx_burst_size; + * delay_between_bursts, num_of_phases_rx, + * start_addr, pattern_len + */ + {0x7, 0x7, 2, 0x7, 0x00000, 8}, /* PATTERN_PBS1 */ + {0x7, 0x7, 2, 0x7, 0x00080, 8}, /* PATTERN_PBS2 */ + {0x7, 0x7, 2, 0x7, 0x00100, 8}, /* PATTERN_PBS3 */ + {0x7, 0x7, 2, 0x7, 0x00030, 8}, /* PATTERN_TEST */ + {0x7, 0x7, 2, 0x7, 0x00100, 8}, /* PATTERN_RL */ + {0x7, 0x7, 2, 0x7, 0x00100, 8}, /* PATTERN_RL2 */ + {0x1f, 0xf, 2, 0xf, 0x00680, 32}, /* PATTERN_STATIC_PBS */ + {0x1f, 0xf, 2, 0xf, 0x00a80, 32}, /* PATTERN_KILLER_DQ0 */ + {0x1f, 0xf, 2, 0xf, 0x01280, 32}, /* PATTERN_KILLER_DQ1 */ + {0x1f, 0xf, 2, 0xf, 0x01a80, 32}, /* PATTERN_KILLER_DQ2 */ + {0x1f, 0xf, 2, 0xf, 0x02280, 32}, /* PATTERN_KILLER_DQ3 */ + {0x1f, 0xf, 2, 0xf, 0x02a80, 32}, /* PATTERN_KILLER_DQ4 */ + {0x1f, 0xf, 2, 0xf, 0x03280, 32}, /* PATTERN_KILLER_DQ5 */ + {0x1f, 0xf, 2, 0xf, 0x03a80, 32}, /* PATTERN_KILLER_DQ6 */ + {0x1f, 0xf, 2, 0xf, 0x04280, 32}, /* PATTERN_KILLER_DQ7 */ + {0x1f, 0xf, 2, 0xf, 0x00e80, 32}, /* PATTERN_KILLER_DQ0_64 */ + {0x1f, 0xf, 2, 0xf, 0x01680, 32}, /* PATTERN_KILLER_DQ1_64 */ + {0x1f, 0xf, 2, 0xf, 0x01e80, 32}, /* PATTERN_KILLER_DQ2_64 */ + {0x1f, 0xf, 2, 0xf, 0x02680, 32}, /* PATTERN_KILLER_DQ3_64 */ + {0x1f, 0xf, 2, 0xf, 0x02e80, 32}, /* PATTERN_KILLER_DQ4_64 */ + {0x1f, 0xf, 2, 0xf, 0x03680, 32}, /* PATTERN_KILLER_DQ5_64 */ + {0x1f, 0xf, 2, 0xf, 0x03e80, 32}, /* PATTERN_KILLER_DQ6_64 */ + {0x1f, 0xf, 2, 0xf, 0x04680, 32}, /* PATTERN_KILLER_DQ7_64 */ + {0x1f, 0xf, 2, 0xf, 0x04a80, 32}, /* PATTERN_KILLER_DQ0_INV */ + {0x1f, 0xf, 2, 0xf, 0x05280, 32}, /* PATTERN_KILLER_DQ1_INV */ + {0x1f, 0xf, 2, 0xf, 0x05a80, 32}, /* PATTERN_KILLER_DQ2_INV */ + {0x1f, 0xf, 2, 0xf, 0x06280, 32}, /* PATTERN_KILLER_DQ3_INV */ + {0x1f, 0xf, 2, 0xf, 0x06a80, 32}, /* PATTERN_KILLER_DQ4_INV */ + {0x1f, 0xf, 2, 0xf, 0x07280, 32}, /* PATTERN_KILLER_DQ5_INV */ + {0x1f, 0xf, 2, 0xf, 0x07a80, 32}, /* PATTERN_KILLER_DQ6_INV */ + {0x1f, 0xf, 2, 0xf, 0x08280, 32}, /* PATTERN_KILLER_DQ7_INV */ + {0x1f, 0xf, 2, 0xf, 0x04e80, 32}, /* PATTERN_KILLER_DQ0_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x05680, 32}, /* PATTERN_KILLER_DQ1_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x05e80, 32}, /* PATTERN_KILLER_DQ2_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x06680, 32}, /* PATTERN_KILLER_DQ3_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x06e80, 32}, /* PATTERN_KILLER_DQ4_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x07680, 32}, /* PATTERN_KILLER_DQ5_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x07e80, 32}, /* PATTERN_KILLER_DQ6_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x08680, 32}, /* PATTERN_KILLER_DQ7_INV_64 */ + {0x1f, 0xf, 2, 0xf, 0x08a80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ0 */ + {0x1f, 0xf, 2, 0xf, 0x09280, 32}, /* PATTERN_SSO_FULL_XTALK_DQ1 */ + {0x1f, 0xf, 2, 0xf, 0x09a80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ2 */ + {0x1f, 0xf, 2, 0xf, 0x0a280, 32}, /* PATTERN_SSO_FULL_XTALK_DQ3 */ + {0x1f, 0xf, 2, 0xf, 0x0aa80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ4 */ + {0x1f, 0xf, 2, 0xf, 0x0b280, 32}, /* PATTERN_SSO_FULL_XTALK_DQ5 */ + {0x1f, 0xf, 2, 0xf, 0x0ba80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ6 */ + {0x1f, 0xf, 2, 0xf, 0x0c280, 32}, /* PATTERN_SSO_FULL_XTALK_DQ7 */ + {0x1f, 0xf, 2, 0xf, 0x08e80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ0_64 */ + {0x1f, 0xf, 2, 0xf, 0x09680, 32}, /* PATTERN_SSO_FULL_XTALK_DQ1_64 */ + {0x1f, 0xf, 2, 0xf, 0x09e80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ2_64 */ + {0x1f, 0xf, 2, 0xf, 0x0a680, 32}, /* PATTERN_SSO_FULL_XTALK_DQ3_64 */ + {0x1f, 0xf, 2, 0xf, 0x0ae80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ4_64 */ + {0x1f, 0xf, 2, 0xf, 0x0b680, 32}, /* PATTERN_SSO_FULL_XTALK_DQ5_64 */ + {0x1f, 0xf, 2, 0xf, 0x0be80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ6_64 */ + {0x1f, 0xf, 2, 0xf, 0x0c680, 32}, /* PATTERN_SSO_FULL_XTALK_DQ7_64 */ + {0x1f, 0xf, 2, 0xf, 0x0ca80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ0 */ + {0x1f, 0xf, 2, 0xf, 0x0d280, 32}, /* PATTERN_SSO_XTALK_FREE_DQ1 */ + {0x1f, 0xf, 2, 0xf, 0x0da80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ2 */ + {0x1f, 0xf, 2, 0xf, 0x0e280, 32}, /* PATTERN_SSO_XTALK_FREE_DQ3 */ + {0x1f, 0xf, 2, 0xf, 0x0ea80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ4 */ + {0x1f, 0xf, 2, 0xf, 0x0f280, 32}, /* PATTERN_SSO_XTALK_FREE_DQ5 */ + {0x1f, 0xf, 2, 0xf, 0x0fa80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ6 */ + {0x1f, 0xf, 2, 0xf, 0x10280, 32}, /* PATTERN_SSO_XTALK_FREE_DQ7 */ + {0x1f, 0xf, 2, 0xf, 0x0ce80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ0_64 */ + {0x1f, 0xf, 2, 0xf, 0x0d680, 32}, /* PATTERN_SSO_XTALK_FREE_DQ1_64 */ + {0x1f, 0xf, 2, 0xf, 0x0de80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ2_64 */ + {0x1f, 0xf, 2, 0xf, 0x0e680, 32}, /* PATTERN_SSO_XTALK_FREE_DQ3_64 */ + {0x1f, 0xf, 2, 0xf, 0x0ee80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ4_64 */ + {0x1f, 0xf, 2, 0xf, 0x0f680, 32}, /* PATTERN_SSO_XTALK_FREE_DQ5_64 */ + {0x1f, 0xf, 2, 0xf, 0x0fe80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ6_64 */ + {0x1f, 0xf, 2, 0xf, 0x10680, 32}, /* PATTERN_SSO_XTALK_FREE_DQ7_64 */ + {0x1f, 0xf, 2, 0xf, 0x10a80, 32}, /* PATTERN_ISI_XTALK_FREE */ + {0x1f, 0xf, 2, 0xf, 0x10e80, 32}, /* PATTERN_ISI_XTALK_FREE_64 */ + {0x1f, 0xf, 2, 0xf, 0x11280, 32}, /* PATTERN_VREF */ + {0x1f, 0xf, 2, 0xf, 0x11680, 32}, /* PATTERN_VREF_64 */ + {0x1f, 0xf, 2, 0xf, 0x11a80, 32}, /* PATTERN_VREF_INV */ + {0x1f, 0xf, 2, 0xf, 0x11e80, 32}, /* PATTERN_FULL_SSO_0T */ + {0x1f, 0xf, 2, 0xf, 0x12280, 32}, /* PATTERN_FULL_SSO_1T */ + {0x1f, 0xf, 2, 0xf, 0x12680, 32}, /* PATTERN_FULL_SSO_2T */ + {0x1f, 0xf, 2, 0xf, 0x12a80, 32}, /* PATTERN_FULL_SSO_3T */ + {0x1f, 0xf, 2, 0xf, 0x12e80, 32}, /* PATTERN_RESONANCE_1T */ + {0x1f, 0xf, 2, 0xf, 0x13280, 32}, /* PATTERN_RESONANCE_2T */ + {0x1f, 0xf, 2, 0xf, 0x13680, 32}, /* PATTERN_RESONANCE_3T */ + {0x1f, 0xf, 2, 0xf, 0x13a80, 32}, /* PATTERN_RESONANCE_4T */ + {0x1f, 0xf, 2, 0xf, 0x13e80, 32}, /* PATTERN_RESONANCE_5T */ + {0x1f, 0xf, 2, 0xf, 0x14280, 32}, /* PATTERN_RESONANCE_6T */ + {0x1f, 0xf, 2, 0xf, 0x14680, 32}, /* PATTERN_RESONANCE_7T */ + {0x1f, 0xf, 2, 0xf, 0x14a80, 32}, /* PATTERN_RESONANCE_8T */ + {0x1f, 0xf, 2, 0xf, 0x14e80, 32}, /* PATTERN_RESONANCE_9T */ + {0x1f, 0xf, 2, 0xf, 0x15280, 32}, /* PATTERN_ZERO */ + {0x1f, 0xf, 2, 0xf, 0x15680, 32} /* PATTERN_ONE */ + /* Note: actual start_address is "<< 3" of defined address */ +}; + +struct pattern_info pattern_table_16[] = { + /* + * num tx phases, tx burst, delay between, rx pattern, + * start_address, pattern_len + */ + {1, 1, 2, 1, 0x0080, 2}, /* PATTERN_PBS1 */ + {1, 1, 2, 1, 0x00c0, 2}, /* PATTERN_PBS2 */ + {1, 1, 2, 1, 0x0380, 2}, /* PATTERN_PBS3 */ + {1, 1, 2, 1, 0x0040, 2}, /* PATTERN_TEST */ + {1, 1, 2, 1, 0x0100, 2}, /* PATTERN_RL */ + {1, 1, 2, 1, 0x0000, 2}, /* PATTERN_RL2 */ + {0xf, 0x7, 2, 0x7, 0x0140, 16}, /* PATTERN_STATIC_PBS */ + {0xf, 0x7, 2, 0x7, 0x0190, 16}, /* PATTERN_KILLER_DQ0 */ + {0xf, 0x7, 2, 0x7, 0x01d0, 16}, /* PATTERN_KILLER_DQ1 */ + {0xf, 0x7, 2, 0x7, 0x0210, 16}, /* PATTERN_KILLER_DQ2 */ + {0xf, 0x7, 2, 0x7, 0x0250, 16}, /* PATTERN_KILLER_DQ3 */ + {0xf, 0x7, 2, 0x7, 0x0290, 16}, /* PATTERN_KILLER_DQ4 */ + {0xf, 0x7, 2, 0x7, 0x02d0, 16}, /* PATTERN_KILLER_DQ5 */ + {0xf, 0x7, 2, 0x7, 0x0310, 16}, /* PATTERN_KILLER_DQ6 */ + {0xf, 0x7, 2, 0x7, 0x0350, 16}, /* PATTERN_KILLER_DQ7 */ + {0xf, 0x7, 2, 0x7, 0x04c0, 16}, /* PATTERN_VREF */ + {0xf, 0x7, 2, 0x7, 0x03c0, 16}, /* PATTERN_FULL_SSO_1T */ + {0xf, 0x7, 2, 0x7, 0x0400, 16}, /* PATTERN_FULL_SSO_2T */ + {0xf, 0x7, 2, 0x7, 0x0440, 16}, /* PATTERN_FULL_SSO_3T */ + {0xf, 0x7, 2, 0x7, 0x0480, 16}, /* PATTERN_FULL_SSO_4T */ + {0xf, 7, 2, 7, 0x6280, 16}, /* PATTERN_SSO_FULL_XTALK_DQ1 */ + {0xf, 7, 2, 7, 0x6680, 16}, /* PATTERN_SSO_FULL_XTALK_DQ1 */ + {0xf, 7, 2, 7, 0x6A80, 16}, /* PATTERN_SSO_FULL_XTALK_DQ2 */ + {0xf, 7, 2, 7, 0x6E80, 16}, /* PATTERN_SSO_FULL_XTALK_DQ3 */ + {0xf, 7, 2, 7, 0x7280, 16}, /* PATTERN_SSO_FULL_XTALK_DQ4 */ + {0xf, 7, 2, 7, 0x7680, 16}, /* PATTERN_SSO_FULL_XTALK_DQ5 */ + {0xf, 7, 2, 7, 0x7A80, 16}, /* PATTERN_SSO_FULL_XTALK_DQ6 */ + {0xf, 7, 2, 7, 0x7E80, 16}, /* PATTERN_SSO_FULL_XTALK_DQ7 */ + {0xf, 7, 2, 7, 0x8280, 16}, /* PATTERN_SSO_XTALK_FREE_DQ0 */ + {0xf, 7, 2, 7, 0x8680, 16}, /* PATTERN_SSO_XTALK_FREE_DQ1 */ + {0xf, 7, 2, 7, 0x8A80, 16}, /* PATTERN_SSO_XTALK_FREE_DQ2 */ + {0xf, 7, 2, 7, 0x8E80, 16}, /* PATTERN_SSO_XTALK_FREE_DQ3 */ + {0xf, 7, 2, 7, 0x9280, 16}, /* PATTERN_SSO_XTALK_FREE_DQ4 */ + {0xf, 7, 2, 7, 0x9680, 16}, /* PATTERN_SSO_XTALK_FREE_DQ5 */ + {0xf, 7, 2, 7, 0x9A80, 16}, /* PATTERN_SSO_XTALK_FREE_DQ6 */ + {0xf, 7, 2, 7, 0x9E80, 16}, /* PATTERN_SSO_XTALK_FREE_DQ7 */ + {0xf, 7, 2, 7, 0xA280, 16} /* PATTERN_ISI_XTALK_FREE */ + /* Note: actual start_address is "<< 3" of defined address */ +}; + +struct pattern_info pattern_table_32[] = { + /* + * num tx phases, tx burst, delay between, rx pattern, + * start_address, pattern_len + */ + {3, 3, 2, 3, 0x0080, 4}, /* PATTERN_PBS1 */ + {3, 3, 2, 3, 0x00c0, 4}, /* PATTERN_PBS2 */ + {3, 3, 2, 3, 0x0380, 4}, /* PATTERN_PBS3 */ + {3, 3, 2, 3, 0x0040, 4}, /* PATTERN_TEST */ + {3, 3, 2, 3, 0x0100, 4}, /* PATTERN_RL */ + {3, 3, 2, 3, 0x0000, 4}, /* PATTERN_RL2 */ + {0x1f, 0xf, 2, 0xf, 0x0140, 32}, /* PATTERN_STATIC_PBS */ + {0x1f, 0xf, 2, 0xf, 0x0190, 32}, /* PATTERN_KILLER_DQ0 */ + {0x1f, 0xf, 2, 0xf, 0x01d0, 32}, /* PATTERN_KILLER_DQ1 */ + {0x1f, 0xf, 2, 0xf, 0x0210, 32}, /* PATTERN_KILLER_DQ2 */ + {0x1f, 0xf, 2, 0xf, 0x0250, 32}, /* PATTERN_KILLER_DQ3 */ + {0x1f, 0xf, 2, 0xf, 0x0290, 32}, /* PATTERN_KILLER_DQ4 */ + {0x1f, 0xf, 2, 0xf, 0x02d0, 32}, /* PATTERN_KILLER_DQ5 */ + {0x1f, 0xf, 2, 0xf, 0x0310, 32}, /* PATTERN_KILLER_DQ6 */ + {0x1f, 0xf, 2, 0xf, 0x0350, 32}, /* PATTERN_KILLER_DQ7 */ + {0x1f, 0xf, 2, 0xf, 0x04c0, 32}, /* PATTERN_VREF */ + {0x1f, 0xf, 2, 0xf, 0x03c0, 32}, /* PATTERN_FULL_SSO_1T */ + {0x1f, 0xf, 2, 0xf, 0x0400, 32}, /* PATTERN_FULL_SSO_2T */ + {0x1f, 0xf, 2, 0xf, 0x0440, 32}, /* PATTERN_FULL_SSO_3T */ + {0x1f, 0xf, 2, 0xf, 0x0480, 32}, /* PATTERN_FULL_SSO_4T */ + {0x1f, 0xF, 2, 0xf, 0x6280, 32}, /* PATTERN_SSO_FULL_XTALK_DQ0 */ + {0x1f, 0xF, 2, 0xf, 0x6680, 32}, /* PATTERN_SSO_FULL_XTALK_DQ1 */ + {0x1f, 0xF, 2, 0xf, 0x6A80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ2 */ + {0x1f, 0xF, 2, 0xf, 0x6E80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ3 */ + {0x1f, 0xF, 2, 0xf, 0x7280, 32}, /* PATTERN_SSO_FULL_XTALK_DQ4 */ + {0x1f, 0xF, 2, 0xf, 0x7680, 32}, /* PATTERN_SSO_FULL_XTALK_DQ5 */ + {0x1f, 0xF, 2, 0xf, 0x7A80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ6 */ + {0x1f, 0xF, 2, 0xf, 0x7E80, 32}, /* PATTERN_SSO_FULL_XTALK_DQ7 */ + {0x1f, 0xF, 2, 0xf, 0x8280, 32}, /* PATTERN_SSO_XTALK_FREE_DQ0 */ + {0x1f, 0xF, 2, 0xf, 0x8680, 32}, /* PATTERN_SSO_XTALK_FREE_DQ1 */ + {0x1f, 0xF, 2, 0xf, 0x8A80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ2 */ + {0x1f, 0xF, 2, 0xf, 0x8E80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ3 */ + {0x1f, 0xF, 2, 0xf, 0x9280, 32}, /* PATTERN_SSO_XTALK_FREE_DQ4 */ + {0x1f, 0xF, 2, 0xf, 0x9680, 32}, /* PATTERN_SSO_XTALK_FREE_DQ5 */ + {0x1f, 0xF, 2, 0xf, 0x9A80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ6 */ + {0x1f, 0xF, 2, 0xf, 0x9E80, 32}, /* PATTERN_SSO_XTALK_FREE_DQ7 */ + {0x1f, 0xF, 2, 0xf, 0xA280, 32} /* PATTERN_ISI_XTALK_FREE */ + /* Note: actual start_address is "<< 3" of defined address */ +}; + +u32 train_dev_num; +enum hws_ddr_cs traintrain_cs_type; +u32 train_pup_num; +enum hws_training_result train_result_type; +enum hws_control_element train_control_element; +enum hws_search_dir traine_search_dir; +enum hws_dir train_direction; +u32 train_if_select; +u32 train_init_value; +u32 train_number_iterations; +enum hws_pattern train_pattern; +enum hws_edge_compare train_edge_compare; +u32 train_cs_num; +u32 train_if_acess, train_if_id, train_pup_access; +u32 max_polling_for_done = 1000000; + +u32 *ddr3_tip_get_buf_ptr(u32 dev_num, enum hws_search_dir search, + enum hws_training_result result_type, + u32 interface_num) +{ + u32 *buf_ptr = NULL; + + buf_ptr = &training_res + [MAX_INTERFACE_NUM * MAX_BUS_NUM * BUS_WIDTH_IN_BITS * search + + interface_num * MAX_BUS_NUM * BUS_WIDTH_IN_BITS]; + + return buf_ptr; +} + +enum { + PASS, + FAIL +}; +/* + * IP Training search + * Note: for one edge search only from fail to pass, else jitter can + * be be entered into solution. + */ +int ddr3_tip_ip_training(u32 dev_num, enum hws_access_type access_type, + u32 interface_num, + enum hws_access_type pup_access_type, + u32 pup_num, enum hws_training_result result_type, + enum hws_control_element control_element, + enum hws_search_dir search_dir, enum hws_dir direction, + u32 interface_mask, u32 init_value, u32 num_iter, + enum hws_pattern pattern, + enum hws_edge_compare edge_comp, + enum hws_ddr_cs cs_type, u32 cs_num, + enum hws_training_ip_stat *train_status) +{ + u32 mask_dq_num_of_regs, mask_pup_num_of_regs, index_cnt, + reg_data, pup_id; + u32 tx_burst_size; + u32 delay_between_burst; + u32 rd_mode; + u32 data; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (pup_num >= octets_per_if_num) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("pup_num %d not valid\n", pup_num)); + } + if (interface_num >= MAX_INTERFACE_NUM) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("if_id %d not valid\n", + interface_num)); + } + if (train_status == NULL) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("error param 4\n")); + return MV_BAD_PARAM; + } + + /* load pattern */ + if (cs_type == CS_SINGLE) { + /* All CSs to CS0 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + DUAL_DUNIT_CFG_REG, 1 << 3, 1 << 3)); + /* All CSs to CS0 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + ODPG_DATA_CTRL_REG, + (0x3 | (effective_cs << 26)), 0xc000003)); + } else { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + DUAL_DUNIT_CFG_REG, 0, 1 << 3)); + /* CS select */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + ODPG_DATA_CTRL_REG, 0x3 | cs_num << 26, + 0x3 | 3 << 26)); + } + + /* load pattern to ODPG */ + ddr3_tip_load_pattern_to_odpg(dev_num, access_type, interface_num, + pattern, + pattern_table[pattern].start_addr); + tx_burst_size = (direction == OPER_WRITE) ? + pattern_table[pattern].tx_burst_size : 0; + delay_between_burst = (direction == OPER_WRITE) ? 2 : 0; + rd_mode = (direction == OPER_WRITE) ? 1 : 0; + CHECK_STATUS(ddr3_tip_configure_odpg + (dev_num, access_type, interface_num, direction, + pattern_table[pattern].num_of_phases_tx, tx_burst_size, + pattern_table[pattern].num_of_phases_rx, + delay_between_burst, rd_mode, effective_cs, STRESS_NONE, + DURATION_SINGLE)); + reg_data = (direction == OPER_READ) ? 0 : (0x3 << 30); + reg_data |= (direction == OPER_READ) ? 0x60 : 0xfa; + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + ODPG_WR_RD_MODE_ENA_REG, reg_data, + MASK_ALL_BITS)); + reg_data = (edge_comp == EDGE_PF || edge_comp == EDGE_FP) ? 0 : 1 << 6; + reg_data |= (edge_comp == EDGE_PF || edge_comp == EDGE_PFP) ? + (1 << 7) : 0; + + /* change from Pass to Fail will lock the result */ + if (pup_access_type == ACCESS_TYPE_MULTICAST) + reg_data |= 0xe << 14; + else + reg_data |= pup_num << 14; + + if (edge_comp == EDGE_FP) { + /* don't search for readl edge change, only the state */ + reg_data |= (0 << 20); + } else if (edge_comp == EDGE_FPF) { + reg_data |= (0 << 20); + } else { + reg_data |= (3 << 20); + } + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + GENERAL_TRAINING_OPCODE_REG, + reg_data | (0x7 << 8) | (0x7 << 11), + (0x3 | (0x3 << 2) | (0x3 << 6) | (1 << 5) | (0x7 << 8) | + (0x7 << 11) | (0xf << 14) | (0x3 << 18) | (3 << 20)))); + reg_data = (search_dir == HWS_LOW2HIGH) ? 0 : (1 << 8); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, OPCODE_REG0_REG(1), + 1 | reg_data | init_value << 9 | (1 << 25) | (1 << 26), + 0xff | (1 << 8) | (0xffff << 9) | (1 << 25) | (1 << 26))); + + /* + * Write2_dunit(0x10b4, Number_iteration , [15:0]) + * Max number of iterations + */ + CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, interface_num, + OPCODE_REG1_REG(1), num_iter, + 0xffff)); + if (control_element == HWS_CONTROL_ELEMENT_DQ_SKEW && + direction == OPER_READ) { + /* + * Write2_dunit(0x10c0, 0x5f , [7:0]) + * MC PBS Reg Address at DDR PHY + */ + reg_data = PBS_RX_BCAST_PHY_REG(effective_cs); + } else if (control_element == HWS_CONTROL_ELEMENT_DQ_SKEW && + direction == OPER_WRITE) { + reg_data = PBS_TX_BCAST_PHY_REG(effective_cs); + } else if (control_element == HWS_CONTROL_ELEMENT_ADLL && + direction == OPER_WRITE) { + /* + * LOOP 0x00000001 + 4*n: + * where n (0-3) represents M_CS number + */ + /* + * Write2_dunit(0x10c0, 0x1 , [7:0]) + * ADLL WR Reg Address at DDR PHY + */ + reg_data = CTX_PHY_REG(effective_cs); + } else if (control_element == HWS_CONTROL_ELEMENT_ADLL && + direction == OPER_READ) { + /* ADLL RD Reg Address at DDR PHY */ + reg_data = CRX_PHY_REG(effective_cs); + } else if (control_element == HWS_CONTROL_ELEMENT_DQS_SKEW && + direction == OPER_WRITE) { + /* TBD not defined in 0.5.0 requirement */ + } else if (control_element == HWS_CONTROL_ELEMENT_DQS_SKEW && + direction == OPER_READ) { + /* TBD not defined in 0.5.0 requirement */ + } + + reg_data |= (0x6 << 28); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, CAL_PHY_REG(1), + reg_data | (init_value << 8), + 0xff | (0xffff << 8) | (0xf << 24) | (u32) (0xf << 28))); + + mask_dq_num_of_regs = octets_per_if_num * BUS_WIDTH_IN_BITS; + mask_pup_num_of_regs = octets_per_if_num; + + if (result_type == RESULT_PER_BIT) { + for (index_cnt = 0; index_cnt < mask_dq_num_of_regs; + index_cnt++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + mask_results_dq_reg_map[index_cnt], 0, + 1 << 24)); + } + + /* Mask disabled buses */ + for (pup_id = 0; pup_id < octets_per_if_num; + pup_id++) { + if (IS_BUS_ACTIVE(tm->bus_act_mask, pup_id) == 1) + continue; + + for (index_cnt = (pup_id * 8); index_cnt < (pup_id + 1) * 8; index_cnt++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, + interface_num, + mask_results_dq_reg_map + [index_cnt], (1 << 24), 1 << 24)); + } + } + + for (index_cnt = 0; index_cnt < mask_pup_num_of_regs; + index_cnt++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + mask_results_pup_reg_map[index_cnt], + (1 << 24), 1 << 24)); + } + } else if (result_type == RESULT_PER_BYTE) { + /* write to adll */ + for (index_cnt = 0; index_cnt < mask_pup_num_of_regs; + index_cnt++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + mask_results_pup_reg_map[index_cnt], 0, + 1 << 24)); + } + for (index_cnt = 0; index_cnt < mask_dq_num_of_regs; + index_cnt++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, interface_num, + mask_results_dq_reg_map[index_cnt], + (1 << 24), (1 << 24))); + } + } + + /* trigger training */ + mv_ddr_training_enable(); + + /* wa for 16-bit mode: wait for all rfu tests to finish or timeout */ + mdelay(1); + + /* check for training done */ + if (mv_ddr_is_training_done(MAX_POLLING_ITERATIONS, &data) != MV_OK) { + train_status[0] = HWS_TRAINING_IP_STATUS_TIMEOUT; + } else { /* training done; check for pass */ + if (data == PASS) + train_status[0] = HWS_TRAINING_IP_STATUS_SUCCESS; + else + train_status[0] = HWS_TRAINING_IP_STATUS_FAIL; + } + + ddr3_tip_if_write(0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); + + return MV_OK; +} + +/* + * Load expected Pattern to ODPG + */ +int ddr3_tip_load_pattern_to_odpg(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum hws_pattern pattern, + u32 load_addr) +{ + u32 pattern_length_cnt = 0; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (pattern_length_cnt = 0; + pattern_length_cnt < pattern_table[pattern].pattern_len; + pattern_length_cnt++) { /* FIXME: the ecc patch below is only for a7040 A0 */ + if (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask)/* || tm->bus_act_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK*/) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ODPG_DATA_WR_DATA_LOW_REG, + pattern_table_get_word(dev_num, pattern, + (u8) (pattern_length_cnt)), + MASK_ALL_BITS)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ODPG_DATA_WR_DATA_HIGH_REG, + pattern_table_get_word(dev_num, pattern, + (u8) (pattern_length_cnt)), + MASK_ALL_BITS)); + } else { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ODPG_DATA_WR_DATA_LOW_REG, + pattern_table_get_word(dev_num, pattern, + (u8) (pattern_length_cnt * 2)), + MASK_ALL_BITS)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ODPG_DATA_WR_DATA_HIGH_REG, + pattern_table_get_word(dev_num, pattern, + (u8) (pattern_length_cnt * 2 + 1)), + MASK_ALL_BITS)); + } + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ODPG_DATA_WR_ADDR_REG, pattern_length_cnt, + MASK_ALL_BITS)); + } + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, access_type, if_id, + ODPG_DATA_BUFFER_OFFS_REG, load_addr, MASK_ALL_BITS)); + + return MV_OK; +} + +/* + * Configure ODPG + */ +int ddr3_tip_configure_odpg(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum hws_dir direction, u32 tx_phases, + u32 tx_burst_size, u32 rx_phases, + u32 delay_between_burst, u32 rd_mode, u32 cs_num, + u32 addr_stress_jump, u32 single_pattern) +{ + u32 data_value = 0; + int ret; + + data_value = ((single_pattern << 2) | (tx_phases << 5) | + (tx_burst_size << 11) | (delay_between_burst << 15) | + (rx_phases << 21) | (rd_mode << 25) | (cs_num << 26) | + (addr_stress_jump << 29)); + ret = ddr3_tip_if_write(dev_num, access_type, if_id, + ODPG_DATA_CTRL_REG, data_value, 0xaffffffc); + if (ret != MV_OK) + return ret; + + return MV_OK; +} + +int ddr3_tip_process_result(u32 *ar_result, enum hws_edge e_edge, + enum hws_edge_search e_edge_search, + u32 *edge_result) +{ + u32 i, res; + int tap_val, max_val = -10000, min_val = 10000; + int lock_success = 1; + + for (i = 0; i < BUS_WIDTH_IN_BITS; i++) { + res = GET_LOCK_RESULT(ar_result[i]); + if (res == 0) { + lock_success = 0; + break; + } + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("lock failed for bit %d\n", i)); + } + + if (lock_success == 1) { + for (i = 0; i < BUS_WIDTH_IN_BITS; i++) { + tap_val = GET_TAP_RESULT(ar_result[i], e_edge); + if (tap_val > max_val) + max_val = tap_val; + if (tap_val < min_val) + min_val = tap_val; + if (e_edge_search == TRAINING_EDGE_MAX) + *edge_result = (u32) max_val; + else + *edge_result = (u32) min_val; + + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("i %d ar_result[i] 0x%x tap_val %d max_val %d min_val %d Edge_result %d\n", + i, ar_result[i], tap_val, + max_val, min_val, + *edge_result)); + } + } else { + return MV_FAIL; + } + + return MV_OK; +} + +/* + * Read training search result + */ +int ddr3_tip_read_training_result(u32 dev_num, u32 if_id, + enum hws_access_type pup_access_type, + u32 pup_num, u32 bit_num, + enum hws_search_dir search, + enum hws_dir direction, + enum hws_training_result result_type, + enum hws_training_load_op operation, + u32 cs_num_type, u32 **load_res, + int is_read_from_db, u8 cons_tap, + int is_check_result_validity) +{ + u32 reg_offset, pup_cnt, start_pup, end_pup, start_reg, end_reg; + u32 *interface_train_res = NULL; + u16 *reg_addr = NULL; + u32 read_data[MAX_INTERFACE_NUM]; + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* + * Agreed assumption: all CS mask contain same number of bits, + * i.e. in multi CS, the number of CS per memory is the same for + * all pups + */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, DUAL_DUNIT_CFG_REG, + (cs_num_type == 0) ? 1 << 3 : 0, (1 << 3))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ODPG_DATA_CTRL_REG, (cs_num_type << 26), (3 << 26))); + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_TRACE, + ("Read_from_d_b %d cs_type %d oper %d result_type %d direction %d search %d pup_num %d if_id %d pup_access_type %d\n", + is_read_from_db, cs_num_type, operation, + result_type, direction, search, pup_num, + if_id, pup_access_type)); + + if ((load_res == NULL) && (is_read_from_db == 1)) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("ddr3_tip_read_training_result load_res = NULL")); + return MV_FAIL; + } + if (pup_num >= octets_per_if_num) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("pup_num %d not valid\n", pup_num)); + } + if (if_id >= MAX_INTERFACE_NUM) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("if_id %d not valid\n", if_id)); + } + if (result_type == RESULT_PER_BIT) + reg_addr = mask_results_dq_reg_map; + else + reg_addr = mask_results_pup_reg_map; + if (pup_access_type == ACCESS_TYPE_UNICAST) { + start_pup = pup_num; + end_pup = pup_num; + } else { /*pup_access_type == ACCESS_TYPE_MULTICAST) */ + + start_pup = 0; + end_pup = octets_per_if_num - 1; + } + + for (pup_cnt = start_pup; pup_cnt <= end_pup; pup_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup_cnt); + DEBUG_TRAINING_IP_ENGINE( + DEBUG_LEVEL_TRACE, + ("if_id %d start_pup %d end_pup %d pup_cnt %d\n", + if_id, start_pup, end_pup, pup_cnt)); + if (result_type == RESULT_PER_BIT) { + if (bit_num == ALL_BITS_PER_PUP) { + start_reg = pup_cnt * BUS_WIDTH_IN_BITS; + end_reg = (pup_cnt + 1) * BUS_WIDTH_IN_BITS - 1; + } else { + start_reg = + pup_cnt * BUS_WIDTH_IN_BITS + bit_num; + end_reg = pup_cnt * BUS_WIDTH_IN_BITS + bit_num; + } + } else { + start_reg = pup_cnt; + end_reg = pup_cnt; + } + + interface_train_res = + ddr3_tip_get_buf_ptr(dev_num, search, result_type, + if_id); + DEBUG_TRAINING_IP_ENGINE( + DEBUG_LEVEL_TRACE, + ("start_reg %d end_reg %d interface %p\n", + start_reg, end_reg, interface_train_res)); + if (interface_train_res == NULL) { + DEBUG_TRAINING_IP_ENGINE( + DEBUG_LEVEL_ERROR, + ("interface_train_res is NULL\n")); + return MV_FAIL; + } + + for (reg_offset = start_reg; reg_offset <= end_reg; + reg_offset++) { + if (operation == TRAINING_LOAD_OPERATION_UNLOAD) { + if (is_read_from_db == 0) { + CHECK_STATUS(ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + reg_addr[reg_offset], + read_data, + MASK_ALL_BITS)); + if (is_check_result_validity == 1) { + if ((read_data[if_id] & + TIP_ENG_LOCK) == 0) { + interface_train_res + [reg_offset] = + TIP_ENG_LOCK + + TIP_TX_DLL_RANGE_MAX; + } else { + interface_train_res + [reg_offset] = + read_data + [if_id] + + cons_tap; + } + } else { + interface_train_res[reg_offset] + = read_data[if_id] + + cons_tap; + } + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("reg_offset %d value 0x%x addr %p\n", + reg_offset, + interface_train_res + [reg_offset], + &interface_train_res + [reg_offset])); + } else { + *load_res = + &interface_train_res[start_reg]; + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("*load_res %p\n", *load_res)); + } + } else { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_TRACE, + ("not supported\n")); + } + } + } + + return MV_OK; +} + +/* + * Load all pattern to memory using ODPG + */ +int ddr3_tip_load_all_pattern_to_mem(u32 dev_num) +{ + u32 pattern = 0, if_id; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + training_result[training_stage][if_id] = TEST_SUCCESS; + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* enable single cs */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3))); + } + + for (pattern = 0; pattern < PATTERN_LAST; pattern++) { + if (pattern == PATTERN_TEST) + continue; + ddr3_tip_load_pattern_to_mem(dev_num, pattern); + } + + return MV_OK; +} + +/* + * Load specific pattern to memory using ODPG + */ +int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern pattern) +{ + u32 reg_data, if_id; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* load pattern to memory */ + /* + * Write Tx mode, CS0, phases, Tx burst size, delay between burst, + * rx pattern phases + */ + reg_data = + 0x1 | (pattern_table[pattern].num_of_phases_tx << 5) | + (pattern_table[pattern].tx_burst_size << 11) | + (pattern_table[pattern].delay_between_bursts << 15) | + (pattern_table[pattern].num_of_phases_rx << 21) | (0x1 << 25) | + (effective_cs << 26); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, reg_data, MASK_ALL_BITS)); + /* ODPG Write enable from BIST */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, (0x1 | (effective_cs << 26)), + 0xc000003)); + /* disable error injection */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_WR_DATA_ERR_REG, 0, 0x1)); + /* load pattern to ODPG */ + ddr3_tip_load_pattern_to_odpg(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, pattern, + pattern_table[pattern].start_addr); + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) >= MV_TIP_REV_3) { + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + SDRAM_ODT_CTRL_HIGH_REG, + 0x3, 0xf)); + } + + mv_ddr_odpg_enable(); + } else { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, (u32)(0x1 << 31), + (u32)(0x1 << 31))); + } + mdelay(1); + + if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) + return MV_FAIL; + + /* Disable ODPG and stop write to memory */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, (0x1 << 30), (u32) (0x3 << 30))); + + /* return to default */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS)); + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) >= MV_TIP_REV_3) { + /* Disable odt0 for CS0 training - need to adjust for multy CS */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_ODT_CTRL_HIGH_REG, 0x0, 0xf)); + } + /* temporary added */ + mdelay(1); + + return MV_OK; +} + +/* + * Training search routine + */ +int ddr3_tip_ip_training_wrapper_int(u32 dev_num, + enum hws_access_type access_type, + u32 if_id, + enum hws_access_type pup_access_type, + u32 pup_num, u32 bit_num, + enum hws_training_result result_type, + enum hws_control_element control_element, + enum hws_search_dir search_dir, + enum hws_dir direction, + u32 interface_mask, u32 init_value_l2h, + u32 init_value_h2l, u32 num_iter, + enum hws_pattern pattern, + enum hws_edge_compare edge_comp, + enum hws_ddr_cs train_cs_type, u32 cs_num, + enum hws_training_ip_stat *train_status) +{ + u32 interface_num = 0, start_if, end_if, init_value_used; + enum hws_search_dir search_dir_id, start_search, end_search; + enum hws_edge_compare edge_comp_used; + u8 cons_tap = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (train_status == NULL) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("train_status is NULL\n")); + return MV_FAIL; + } + + if ((train_cs_type > CS_NON_SINGLE) || + (edge_comp >= EDGE_PFP) || + (pattern >= PATTERN_LAST) || + (direction > OPER_WRITE_AND_READ) || + (search_dir > HWS_HIGH2LOW) || + (control_element > HWS_CONTROL_ELEMENT_DQS_SKEW) || + (result_type > RESULT_PER_BYTE) || + (pup_num >= octets_per_if_num) || + (pup_access_type > ACCESS_TYPE_MULTICAST) || + (if_id > 11) || (access_type > ACCESS_TYPE_MULTICAST)) { + DEBUG_TRAINING_IP_ENGINE( + DEBUG_LEVEL_ERROR, + ("wrong parameter train_cs_type %d edge_comp %d pattern %d direction %d search_dir %d control_element %d result_type %d pup_num %d pup_access_type %d if_id %d access_type %d\n", + train_cs_type, edge_comp, pattern, direction, + search_dir, control_element, result_type, pup_num, + pup_access_type, if_id, access_type)); + return MV_FAIL; + } + + if (edge_comp == EDGE_FPF) { + start_search = HWS_LOW2HIGH; + end_search = HWS_HIGH2LOW; + edge_comp_used = EDGE_FP; + } else { + start_search = search_dir; + end_search = search_dir; + edge_comp_used = edge_comp; + } + + for (search_dir_id = start_search; search_dir_id <= end_search; + search_dir_id++) { + init_value_used = (search_dir_id == HWS_LOW2HIGH) ? + init_value_l2h : init_value_h2l; + DEBUG_TRAINING_IP_ENGINE( + DEBUG_LEVEL_TRACE, + ("dev_num %d, access_type %d, if_id %d, pup_access_type %d,pup_num %d, result_type %d, control_element %d search_dir_id %d, direction %d, interface_mask %d,init_value_used %d, num_iter %d, pattern %d, edge_comp_used %d, train_cs_type %d, cs_num %d\n", + dev_num, access_type, if_id, pup_access_type, pup_num, + result_type, control_element, search_dir_id, + direction, interface_mask, init_value_used, num_iter, + pattern, edge_comp_used, train_cs_type, cs_num)); + + ddr3_tip_ip_training(dev_num, access_type, if_id, + pup_access_type, pup_num, result_type, + control_element, search_dir_id, direction, + interface_mask, init_value_used, num_iter, + pattern, edge_comp_used, train_cs_type, + cs_num, train_status); + if (access_type == ACCESS_TYPE_MULTICAST) { + start_if = 0; + end_if = MAX_INTERFACE_NUM - 1; + } else { + start_if = if_id; + end_if = if_id; + } + + for (interface_num = start_if; interface_num <= end_if; + interface_num++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, interface_num); + cs_num = 0; + CHECK_STATUS(ddr3_tip_read_training_result + (dev_num, interface_num, pup_access_type, + pup_num, bit_num, search_dir_id, + direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, + train_cs_type, NULL, 0, cons_tap, + 0)); + } + } + + return MV_OK; +} +/* + * Training search & read result routine + * This function implements the search algorithm + * first it calls the function ddr3_tip_ip_training_wrapper_int which triggers the search from l2h and h2l + * this function handles rx and tx search cases + * in case of rx it only triggers the search (l2h and h2l) + * in case of tx there are 3 optional algorithm phases: + * phase 1: + * it first triggers the search and handles the results as following (phase 1): + * each bit, which defined by the search two edges (e1 or VW_L and e2 or VW_H), match on of cases: + * 1. BIT_LOW_UI 0 =< VW =< 31 in case of jitter use: VW_L <= 31, VW_H <= 31 + * 2. BIT_HIGH_UI 32 =< VW =< 63 in case of jitter use: VW_L >= 32, VW_H >= 32 + * 3. BIT_SPLIT_IN VW_L <= 31 & VW_H >= 32 + * 4. BIT_SPLIT_OUT* VW_H < 32 & VW_L > 32 + * note: the VW units is adll taps + * phase 2: + * only bit case BIT_SPLIT_OUT requires another search (phase 2) from the middle range in two directions h2l and l2h + * because only this case is not locked by the search engine in the first search trigger (phase 1). + * phase 3: + * each subphy is categorized according to its bits definition. + * the sub-phy cases are as follows: + * 1.BYTE_NOT_DEFINED the byte has not yet been categorized + * 2.BYTE_HOMOGENEOUS_LOW 0 =< VW =< 31 + * 3.BYTE_HOMOGENEOUS_HIGH 32 =< VW =< 63 + * 4.BYTE_HOMOGENEOUS_SPLIT_IN VW_L <= 31 & VW_H >= 32 + * or the center of all bits in the byte =< 31 + * 5.BYTE_HOMOGENEOUS_SPLIT_OUT VW_H < 32 & VW_L > 32 + * 6.BYTE_SPLIT_OUT_MIX at least one bits is in split out state and one bit is in other + * or the center of all bits in the byte => 32 + * after the two phases above a center valid window for each subphy is calculated accordingly: + * center valid window = maximum center of all bits in the subphy - minimum center of all bits in the subphy. + * now decisions are made in each subphy as following: + * all subphys which are homogeneous remains as is + * all subphys which are homogeneous low | homogeneous high and the subphy center valid window is less than 32 + * mark this subphy as homogeneous split in. + * now the bits in the bytes which are BYTE_SPLIT_OUT_MIX needed to be reorganized and handles as following + * all bits which are BIT_LOW_UI will be added with 64 adll, + * this will hopefully ensures that all the bits in the sub phy can be sampled by the dqs + */ +int ddr3_tip_ip_training_wrapper(u32 dev_num, enum hws_access_type access_type, + u32 if_id, + enum hws_access_type pup_access_type, + u32 pup_num, + enum hws_training_result result_type, + enum hws_control_element control_element, + enum hws_search_dir search_dir, + enum hws_dir direction, u32 interface_mask, + u32 init_value_l2h, u32 init_value_h2l, + u32 num_iter, enum hws_pattern pattern, + enum hws_edge_compare edge_comp, + enum hws_ddr_cs train_cs_type, u32 cs_num, + enum hws_training_ip_stat *train_status) +{ + u8 e1, e2; + u32 bit_id, start_if, end_if, bit_end = 0; + u32 *result[HWS_SEARCH_DIR_LIMIT] = { 0 }; + u8 cons_tap = (direction == OPER_WRITE) ? (64) : (0); + u8 bit_bit_mask[MAX_BUS_NUM] = { 0 }, bit_bit_mask_active = 0; + u8 bit_state[MAX_BUS_NUM * BUS_WIDTH_IN_BITS] = {0}; + u8 h2l_adll_value[MAX_BUS_NUM][BUS_WIDTH_IN_BITS]; + u8 l2h_adll_value[MAX_BUS_NUM][BUS_WIDTH_IN_BITS]; + u8 center_subphy_adll_window[MAX_BUS_NUM]; + u8 min_center_subphy_adll[MAX_BUS_NUM]; + u8 max_center_subphy_adll[MAX_BUS_NUM]; + u32 *l2h_if_train_res = NULL; + u32 *h2l_if_train_res = NULL; + enum hws_search_dir search_dir_id; + int status; + u32 bit_lock_result; + + u8 sybphy_id; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (pup_num >= octets_per_if_num) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("pup_num %d not valid\n", pup_num)); + } + + if (if_id >= MAX_INTERFACE_NUM) { + DEBUG_TRAINING_IP_ENGINE(DEBUG_LEVEL_ERROR, + ("if_id %d not valid\n", if_id)); + } + + status = ddr3_tip_ip_training_wrapper_int + (dev_num, access_type, if_id, pup_access_type, pup_num, + ALL_BITS_PER_PUP, result_type, control_element, + search_dir, direction, interface_mask, init_value_l2h, + init_value_h2l, num_iter, pattern, edge_comp, + train_cs_type, cs_num, train_status); + + if (MV_OK != status) + return status; + + if (access_type == ACCESS_TYPE_MULTICAST) { + start_if = 0; + end_if = MAX_INTERFACE_NUM - 1; + } else { + start_if = if_id; + end_if = if_id; + } + + for (if_id = start_if; if_id <= end_if; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* zero the database */ + bit_bit_mask_active = 0; /* clean the flag for level2 search */ + memset(bit_state, 0, sizeof(bit_state)); + /* phase 1 */ + for (sybphy_id = 0; sybphy_id < octets_per_if_num; sybphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sybphy_id); + if (result_type == RESULT_PER_BIT) + bit_end = BUS_WIDTH_IN_BITS; + else + bit_end = 0; + + /* zero the data base */ + bit_bit_mask[sybphy_id] = 0; + byte_status[if_id][sybphy_id] = BYTE_NOT_DEFINED; + for (bit_id = 0; bit_id < bit_end; bit_id++) { + h2l_adll_value[sybphy_id][bit_id] = 64; + l2h_adll_value[sybphy_id][bit_id] = 0; + for (search_dir_id = HWS_LOW2HIGH; search_dir_id <= HWS_HIGH2LOW; + search_dir_id++) { + status = ddr3_tip_read_training_result + (dev_num, if_id, + ACCESS_TYPE_UNICAST, sybphy_id, bit_id, + search_dir_id, direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, CS_SINGLE, + &result[search_dir_id], 1, 0, 0); + + if (MV_OK != status) + return status; + } + + e1 = GET_TAP_RESULT(result[HWS_LOW2HIGH][0], EDGE_1); + e2 = GET_TAP_RESULT(result[HWS_HIGH2LOW][0], EDGE_1); + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_INFO, + ("if_id %d sybphy_id %d bit %d l2h 0x%x (e1 0x%x) h2l 0x%x (e2 0x%x)\n", + if_id, sybphy_id, bit_id, result[HWS_LOW2HIGH][0], e1, + result[HWS_HIGH2LOW][0], e2)); + bit_lock_result = + (GET_LOCK_RESULT(result[HWS_LOW2HIGH][0]) && + GET_LOCK_RESULT(result[HWS_HIGH2LOW][0])); + + if (bit_lock_result) { + /* in case of read operation set the byte status as homogeneous low */ + if (direction == OPER_READ) { + byte_status[if_id][sybphy_id] |= BYTE_HOMOGENEOUS_LOW; + } else if ((e2 - e1) > 32) { /* oper_write */ + /* split out */ + bit_state[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] = + BIT_SPLIT_OUT; + byte_status[if_id][sybphy_id] |= BYTE_HOMOGENEOUS_SPLIT_OUT; + /* mark problem bits */ + bit_bit_mask[sybphy_id] |= (1 << bit_id); + bit_bit_mask_active = 1; + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d bit %d BIT_SPLIT_OUT\n", + if_id, sybphy_id, bit_id)); + } else { + /* low ui */ + if (e1 <= 31 && e2 <= 31) { + bit_state[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] = + BIT_LOW_UI; + byte_status[if_id][sybphy_id] |= BYTE_HOMOGENEOUS_LOW; + l2h_adll_value[sybphy_id][bit_id] = e1; + h2l_adll_value[sybphy_id][bit_id] = e2; + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d bit %d BIT_LOW_UI\n", + if_id, sybphy_id, bit_id)); + } + /* high ui */ + if (e1 >= 32 && e2 >= 32) { + bit_state[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] = + BIT_HIGH_UI; + byte_status[if_id][sybphy_id] |= BYTE_HOMOGENEOUS_HIGH; + l2h_adll_value[sybphy_id][bit_id] = e1; + h2l_adll_value[sybphy_id][bit_id] = e2; + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d bit %d BIT_HIGH_UI\n", + if_id, sybphy_id, bit_id)); + } + /* split in */ + if (e1 <= 31 && e2 >= 32) { + bit_state[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] = + BIT_SPLIT_IN; + byte_status[if_id][sybphy_id] |= + BYTE_HOMOGENEOUS_SPLIT_IN; + l2h_adll_value[sybphy_id][bit_id] = e1; + h2l_adll_value[sybphy_id][bit_id] = e2; + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d bit %d BIT_SPLIT_IN\n", + if_id, sybphy_id, bit_id)); + } + } + } else { + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_INFO, + ("if_id %d sybphy_id %d bit %d l2h 0x%x (e1 0x%x)" + "h2l 0x%x (e2 0x%x): bit cannot be categorized\n", + if_id, sybphy_id, bit_id, result[HWS_LOW2HIGH][0], e1, + result[HWS_HIGH2LOW][0], e2)); + /* mark the byte as not defined */ + byte_status[if_id][sybphy_id] = BYTE_NOT_DEFINED; + break; /* continue to next pup - no reason to analyze this byte */ + } + } /* for all bits */ + } /* for all PUPs */ + + /* phase 2 will occur only in write operation */ + if (bit_bit_mask_active != 0) { + l2h_if_train_res = ddr3_tip_get_buf_ptr(dev_num, HWS_LOW2HIGH, result_type, if_id); + h2l_if_train_res = ddr3_tip_get_buf_ptr(dev_num, HWS_HIGH2LOW, result_type, if_id); + /* search from middle to end */ + ddr3_tip_ip_training + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, result_type, + control_element, HWS_LOW2HIGH, + direction, interface_mask, + num_iter / 2, num_iter / 2, + pattern, EDGE_FP, train_cs_type, + cs_num, train_status); + + for (sybphy_id = 0; sybphy_id < octets_per_if_num; sybphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sybphy_id); + if (byte_status[if_id][sybphy_id] != BYTE_NOT_DEFINED) { + if (bit_bit_mask[sybphy_id] == 0) + continue; /* this byte bits have no split out state */ + + for (bit_id = 0; bit_id < bit_end; bit_id++) { + if ((bit_bit_mask[sybphy_id] & (1 << bit_id)) == 0) + continue; /* this bit is non split goto next bit */ + + /* enter the result to the data base */ + status = ddr3_tip_read_training_result + (dev_num, if_id, ACCESS_TYPE_UNICAST, sybphy_id, + bit_id, HWS_LOW2HIGH, direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, CS_SINGLE, + &l2h_if_train_res, 0, 0, 1); + + if (MV_OK != status) + return status; + + l2h_adll_value[sybphy_id][bit_id] = + l2h_if_train_res[sybphy_id * + BUS_WIDTH_IN_BITS + bit_id] & PUP_RESULT_EDGE_1_MASK; + } + } + } + /* Search from middle to start */ + ddr3_tip_ip_training + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, result_type, + control_element, HWS_HIGH2LOW, + direction, interface_mask, + num_iter / 2, num_iter / 2, + pattern, EDGE_FP, train_cs_type, + cs_num, train_status); + + for (sybphy_id = 0; sybphy_id < octets_per_if_num; sybphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sybphy_id); + if (byte_status[if_id][sybphy_id] != BYTE_NOT_DEFINED) { + if (bit_bit_mask[sybphy_id] == 0) + continue; + + for (bit_id = 0; bit_id < bit_end; bit_id++) { + if ((bit_bit_mask[sybphy_id] & (1 << bit_id)) == 0) + continue; + + status = ddr3_tip_read_training_result + (dev_num, if_id, ACCESS_TYPE_UNICAST, sybphy_id, + bit_id, HWS_HIGH2LOW, direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, CS_SINGLE, + &h2l_if_train_res, 0, cons_tap, 1); + + if (MV_OK != status) + return status; + + h2l_adll_value[sybphy_id][bit_id] = + h2l_if_train_res[sybphy_id * + BUS_WIDTH_IN_BITS + bit_id] & PUP_RESULT_EDGE_1_MASK; + } + } + } + } /* end if bit_bit_mask_active */ + /* + * phase 3 will occur only in write operation + * find the maximum and the minimum center of each subphy + */ + for (sybphy_id = 0; sybphy_id < octets_per_if_num; sybphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sybphy_id); + + if ((byte_status[if_id][sybphy_id] != BYTE_NOT_DEFINED) && (direction == OPER_WRITE)) { + /* clear the arrays and parameters */ + center_subphy_adll_window[sybphy_id] = 0; + max_center_subphy_adll[sybphy_id] = 0; + min_center_subphy_adll[sybphy_id] = 64; + /* find the max and min center adll value in the current subphy */ + for (bit_id = 0; bit_id < bit_end; bit_id++) { + /* debug print all the bit edges after alignment */ + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d bit %d l2h %d h2l %d\n", + if_id, sybphy_id, bit_id, l2h_adll_value[sybphy_id][bit_id], + h2l_adll_value[sybphy_id][bit_id])); + + if (((l2h_adll_value[sybphy_id][bit_id] + + h2l_adll_value[sybphy_id][bit_id]) / 2) > + max_center_subphy_adll[sybphy_id]) + max_center_subphy_adll[sybphy_id] = + (l2h_adll_value[sybphy_id][bit_id] + + h2l_adll_value[sybphy_id][bit_id]) / 2; + if (((l2h_adll_value[sybphy_id][bit_id] + + h2l_adll_value[sybphy_id][bit_id]) / 2) < + min_center_subphy_adll[sybphy_id]) + min_center_subphy_adll[sybphy_id] = + (l2h_adll_value[sybphy_id][bit_id] + + h2l_adll_value[sybphy_id][bit_id]) / 2; + } + + /* calculate the center of the current subphy */ + center_subphy_adll_window[sybphy_id] = + max_center_subphy_adll[sybphy_id] - + min_center_subphy_adll[sybphy_id]; + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d min center %d max center %d center %d\n", + if_id, sybphy_id, min_center_subphy_adll[sybphy_id], + max_center_subphy_adll[sybphy_id], + center_subphy_adll_window[sybphy_id])); + } + } + /* + * check byte state and fix bits state if needed + * in case the level 1 and 2 above subphy results are + * homogeneous continue to the next subphy + */ + for (sybphy_id = 0; sybphy_id < octets_per_if_num; sybphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sybphy_id); + if ((byte_status[if_id][sybphy_id] == BYTE_HOMOGENEOUS_LOW) || + (byte_status[if_id][sybphy_id] == BYTE_HOMOGENEOUS_HIGH) || + (byte_status[if_id][sybphy_id] == BYTE_HOMOGENEOUS_SPLIT_IN) || + (byte_status[if_id][sybphy_id] == BYTE_HOMOGENEOUS_SPLIT_OUT) || + (byte_status[if_id][sybphy_id] == BYTE_NOT_DEFINED)) + continue; + + /* + * in case all of the bits in the current subphy are + * less than 32 which will find alignment in the subphy bits + * mark this subphy as homogeneous split in + */ + if (center_subphy_adll_window[sybphy_id] <= 31) + byte_status[if_id][sybphy_id] = BYTE_HOMOGENEOUS_SPLIT_IN; + + /* + * in case the current byte is split_out and the center is bigger than 31 + * the byte can be aligned. in this case add 64 to the the low ui bits aligning it + * to the other ui bits + */ + if (center_subphy_adll_window[sybphy_id] >= 32) { + byte_status[if_id][sybphy_id] = BYTE_SPLIT_OUT_MIX; + + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d byte state 0x%x\n", + if_id, sybphy_id, byte_status[if_id][sybphy_id])); + for (bit_id = 0; bit_id < bit_end; bit_id++) { + if (bit_state[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] == BIT_LOW_UI) { + l2h_if_train_res[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] += 64; + h2l_if_train_res[sybphy_id * BUS_WIDTH_IN_BITS + bit_id] += 64; + } + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_TRACE, + ("if_id %d sybphy_id %d bit_id %d added 64 adlls\n", + if_id, sybphy_id, bit_id)); + } + } + } + } /* for all interfaces */ + + return MV_OK; +} + +u8 mv_ddr_tip_sub_phy_byte_status_get(u32 if_id, u32 subphy_id) +{ + return byte_status[if_id][subphy_id]; +} + +void mv_ddr_tip_sub_phy_byte_status_set(u32 if_id, u32 subphy_id, u8 byte_status_data) +{ + byte_status[if_id][subphy_id] = byte_status_data; +} + +/* + * Load phy values + */ +int ddr3_tip_load_phy_values(int b_load) +{ + u32 bus_cnt = 0, if_id, dev_num = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + if (b_load == 1) { + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_cnt, + DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + &phy_reg_bk[if_id][bus_cnt] + [0])); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_cnt, + DDR_PHY_DATA, + RL_PHY_REG(effective_cs), + &phy_reg_bk[if_id][bus_cnt] + [1])); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, bus_cnt, + DDR_PHY_DATA, + CRX_PHY_REG(effective_cs), + &phy_reg_bk[if_id][bus_cnt] + [2])); + } else { + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + bus_cnt, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + phy_reg_bk[if_id][bus_cnt] + [0])); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + bus_cnt, DDR_PHY_DATA, + RL_PHY_REG(effective_cs), + phy_reg_bk[if_id][bus_cnt] + [1])); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, + bus_cnt, DDR_PHY_DATA, + CRX_PHY_REG(effective_cs), + phy_reg_bk[if_id][bus_cnt] + [2])); + } + } + } + + return MV_OK; +} + +int ddr3_tip_training_ip_test(u32 dev_num, enum hws_training_result result_type, + enum hws_search_dir search_dir, + enum hws_dir direction, + enum hws_edge_compare edge, + u32 init_val1, u32 init_val2, + u32 num_of_iterations, + u32 start_pattern, u32 end_pattern) +{ + u32 pattern, if_id, pup_id; + enum hws_training_ip_stat train_status[MAX_INTERFACE_NUM]; + u32 *res = NULL; + u32 search_state = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + ddr3_tip_load_phy_values(1); + + for (pattern = start_pattern; pattern <= end_pattern; pattern++) { + for (search_state = 0; search_state < HWS_SEARCH_DIR_LIMIT; + search_state++) { + ddr3_tip_ip_training_wrapper(dev_num, + ACCESS_TYPE_MULTICAST, 0, + ACCESS_TYPE_MULTICAST, 0, + result_type, + HWS_CONTROL_ELEMENT_ADLL, + search_dir, direction, + 0xfff, init_val1, + init_val2, + num_of_iterations, pattern, + edge, CS_SINGLE, + PARAM_NOT_CARE, + train_status); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup_id = 0; pup_id < + octets_per_if_num; + pup_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, + pup_id); + CHECK_STATUS + (ddr3_tip_read_training_result + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup_id, + ALL_BITS_PER_PUP, + search_state, + direction, result_type, + TRAINING_LOAD_OPERATION_UNLOAD, + CS_SINGLE, &res, 1, 0, + 0)); + if (result_type == RESULT_PER_BYTE) { + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_INFO, + ("search_state %d if_id %d pup_id %d 0x%x\n", + search_state, if_id, + pup_id, res[0])); + } else { + DEBUG_TRAINING_IP_ENGINE + (DEBUG_LEVEL_INFO, + ("search_state %d if_id %d pup_id %d 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + search_state, if_id, + pup_id, res[0], + res[1], res[2], + res[3], res[4], + res[5], res[6], + res[7])); + } + } + } /* interface */ + } /* search */ + } /* pattern */ + + ddr3_tip_load_phy_values(0); + + return MV_OK; +} + +int mv_ddr_pattern_start_addr_set(struct pattern_info *pattern_tbl, enum hws_pattern pattern, u32 addr) +{ + pattern_tbl[pattern].start_addr = addr; + + return 0; +} + +struct pattern_info *ddr3_tip_get_pattern_table() +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask)) + return pattern_table_64; + else if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 0) + return pattern_table_32; + else + return pattern_table_16; +} + +u16 *ddr3_tip_get_mask_results_dq_reg() +{ +#if MAX_BUS_NUM == 5 + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) + return mask_results_dq_reg_map_pup3_ecc; + else +#endif + return mask_results_dq_reg_map; +} + +u16 *ddr3_tip_get_mask_results_pup_reg_map() +{ +#if MAX_BUS_NUM == 5 + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) + return mask_results_pup_reg_map_pup3_ecc; + else +#endif + return mask_results_pup_reg_map; +} + +/* load expected dm pattern to odpg */ +#define LOW_NIBBLE_BYTE_MASK 0xf +#define HIGH_NIBBLE_BYTE_MASK 0xf0 +int mv_ddr_load_dm_pattern_to_odpg(enum hws_access_type access_type, enum hws_pattern pattern, + enum dm_direction dm_dir) +{ + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + u32 pattern_len = 0; + u32 data_low, data_high; + u8 dm_data; + + for (pattern_len = 0; + pattern_len < pattern_table[pattern].pattern_len; + pattern_len++) { + if (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask)) { + data_low = pattern_table_get_word(0, pattern, (u8)pattern_len); + data_high = data_low; + } else { + data_low = pattern_table_get_word(0, pattern, (u8)(pattern_len * 2)); + data_high = pattern_table_get_word(0, pattern, (u8)(pattern_len * 2 + 1)); + } + + /* odpg mbus dm definition is opposite to ddr4 protocol */ + if (dm_dir == DM_DIR_INVERSE) + dm_data = ~((data_low & LOW_NIBBLE_BYTE_MASK) | (data_high & HIGH_NIBBLE_BYTE_MASK)); + else + dm_data = (data_low & LOW_NIBBLE_BYTE_MASK) | (data_high & HIGH_NIBBLE_BYTE_MASK); + + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_WR_DATA_LOW_REG, data_low, MASK_ALL_BITS); + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_WR_DATA_HIGH_REG, data_high, MASK_ALL_BITS); + ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_WR_ADDR_REG, + pattern_len | ((dm_data & ODPG_DATA_WR_DATA_MASK) << ODPG_DATA_WR_DATA_OFFS), + MASK_ALL_BITS); + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_engine.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_engine.h new file mode 100644 index 000000000..2d40e68db --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_engine.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_ENGINE_H_ +#define _DDR3_TRAINING_IP_ENGINE_H_ + +#include "ddr3_training_ip_def.h" +#include "ddr3_training_ip_flow.h" +#include "ddr3_training_ip_pbs.h" + +#define EDGE_1 0 +#define EDGE_2 1 +#define ALL_PUP_TRAINING 0xe +#define PUP_RESULT_EDGE_1_MASK 0xff +#define PUP_RESULT_EDGE_2_MASK (0xff << 8) +#define PUP_LOCK_RESULT_BIT 25 + +#define GET_TAP_RESULT(reg, edge) \ + (((edge) == EDGE_1) ? ((reg) & PUP_RESULT_EDGE_1_MASK) : \ + (((reg) & PUP_RESULT_EDGE_2_MASK) >> 8)); +#define GET_LOCK_RESULT(reg) \ + (((reg) & (1<<PUP_LOCK_RESULT_BIT)) >> PUP_LOCK_RESULT_BIT) + +#define EDGE_FAILURE 128 +#define ALL_BITS_PER_PUP 128 + +#define MIN_WINDOW_SIZE 6 +#define MAX_WINDOW_SIZE_RX 32 +#define MAX_WINDOW_SIZE_TX 64 + +int ddr3_tip_training_ip_test(u32 dev_num, enum hws_training_result result_type, + enum hws_search_dir search_dir, + enum hws_dir direction, + enum hws_edge_compare edge, + u32 init_val1, u32 init_val2, + u32 num_of_iterations, u32 start_pattern, + u32 end_pattern); +int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern pattern); +int ddr3_tip_load_all_pattern_to_mem(u32 dev_num); +int ddr3_tip_read_training_result(u32 dev_num, u32 if_id, + enum hws_access_type pup_access_type, + u32 pup_num, u32 bit_num, + enum hws_search_dir search, + enum hws_dir direction, + enum hws_training_result result_type, + enum hws_training_load_op operation, + u32 cs_num_type, u32 **load_res, + int is_read_from_db, u8 cons_tap, + int is_check_result_validity); +int ddr3_tip_ip_training(u32 dev_num, enum hws_access_type access_type, + u32 interface_num, + enum hws_access_type pup_access_type, + u32 pup_num, enum hws_training_result result_type, + enum hws_control_element control_element, + enum hws_search_dir search_dir, enum hws_dir direction, + u32 interface_mask, u32 init_value, u32 num_iter, + enum hws_pattern pattern, + enum hws_edge_compare edge_comp, + enum hws_ddr_cs cs_type, u32 cs_num, + enum hws_training_ip_stat *train_status); +int ddr3_tip_ip_training_wrapper(u32 dev_num, enum hws_access_type access_type, + u32 if_id, + enum hws_access_type pup_access_type, + u32 pup_num, + enum hws_training_result result_type, + enum hws_control_element control_element, + enum hws_search_dir search_dir, + enum hws_dir direction, + u32 interface_mask, u32 init_value1, + u32 init_value2, u32 num_iter, + enum hws_pattern pattern, + enum hws_edge_compare edge_comp, + enum hws_ddr_cs train_cs_type, u32 cs_num, + enum hws_training_ip_stat *train_status); +u8 mv_ddr_tip_sub_phy_byte_status_get(u32 if_id, u32 subphy_id); +void mv_ddr_tip_sub_phy_byte_status_set(u32 if_id, u32 subphy_id, u8 byte_status_data); +void ddr3_tip_print_bist_res(void); +struct pattern_info *ddr3_tip_get_pattern_table(void); +u16 *ddr3_tip_get_mask_results_dq_reg(void); +u16 *ddr3_tip_get_mask_results_pup_reg_map(void); +int mv_ddr_load_dm_pattern_to_odpg(enum hws_access_type access_type, enum hws_pattern pattern, + enum dm_direction dm_dir); +int mv_ddr_pattern_start_addr_set(struct pattern_info *pattern_tbl, enum hws_pattern pattern, u32 addr); +#endif /* _DDR3_TRAINING_IP_ENGINE_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_flow.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_flow.h new file mode 100644 index 000000000..ab152cb45 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_flow.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_FLOW_H_ +#define _DDR3_TRAINING_IP_FLOW_H_ + +#include "ddr3_training_ip.h" +#include "ddr3_training_ip_db.h" + +#define KILLER_PATTERN_LENGTH 32 +#define EXT_ACCESS_BURST_LENGTH 8 + +#define ECC_READ_BUS_0 0 +#define ECC_PHY_ACCESS_3 3 +#define ECC_PHY_ACCESS_4 4 +#define ECC_PHY_ACCESS_8 8 +#define BUS_WIDTH_IN_BITS 8 +#define MAX_POLLING_ITERATIONS 1000000 +#define ADLL_LENGTH 32 + +#define GP_RSVD0_REG 0x182e0 + +/* + * DFX address Space + * Table 2: DFX address space + * Address Bits Value Description + * [31 : 20] 0x? DFX base address bases PCIe mapping + * [19 : 15] 0...Number_of_client-1 Client Index inside pipe. + * See also Table 1 Multi_cast = 29 Broadcast = 28 + * [14 : 13] 2'b01 Access to Client Internal Register + * [12 : 0] Client Internal Register offset See related Client Registers + * [14 : 13] 2'b00 Access to Ram Wrappers Internal Register + * [12 : 6] 0 Number_of_rams-1 Ram Index inside Client + * [5 : 0] Ram Wrapper Internal Register offset See related Ram Wrappers + * Registers + */ + +/* nsec */ +#define AUTO_ZQC_TIMING 15384 + +enum mr_number { + MR_CMD0, + MR_CMD1, + MR_CMD2, + MR_CMD3, + MR_LAST +}; + +struct mv_ddr_mr_data { + u32 cmd; + u32 reg_addr; +}; + +struct write_supp_result { + enum hws_wl_supp stage; + int is_pup_fail; +}; + +int ddr3_tip_write_leveling_static_config(u32 dev_num, u32 if_id, + enum mv_ddr_freq frequency, + u32 *round_trip_delay_arr); +int ddr3_tip_read_leveling_static_config(u32 dev_num, u32 if_id, + enum mv_ddr_freq frequency, + u32 *total_round_trip_delay_arr); +int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access, + u32 if_id, u32 reg_addr, u32 data_value, u32 mask); +int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type, + u32 if_id, u32 exp_value, u32 mask, u32 offset, + u32 poll_tries); +int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access, + u32 if_id, u32 reg_addr, u32 *data, u32 mask); +int ddr3_tip_bus_read_modify_write(u32 dev_num, + enum hws_access_type access_type, + u32 if_id, u32 phy_id, + enum hws_ddr_phy phy_type, + u32 reg_addr, u32 data_value, u32 reg_mask); +int ddr3_tip_bus_read(u32 dev_num, u32 if_id, enum hws_access_type phy_access, + u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, + u32 *data); +int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type e_interface_access, + u32 if_id, enum hws_access_type e_phy_access, u32 phy_id, + enum hws_ddr_phy e_phy_type, u32 reg_addr, + u32 data_value); +int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type e_access, u32 if_id, + enum mv_ddr_freq memory_freq); +int ddr3_tip_adjust_dqs(u32 dev_num); +int ddr3_tip_init_controller(u32 dev_num); +int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr, + u32 num_of_bursts, u32 *addr); +int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr, + u32 num_of_bursts, u32 *addr); +int ddr3_tip_dynamic_read_leveling(u32 dev_num, u32 ui_freq); +int mv_ddr_rl_dqs_burst(u32 dev_num, u32 if_id, u32 freq); +int ddr3_tip_legacy_dynamic_read_leveling(u32 dev_num); +int ddr3_tip_dynamic_per_bit_read_leveling(u32 dev_num, u32 ui_freq); +int ddr3_tip_legacy_dynamic_write_leveling(u32 dev_num); +int ddr3_tip_dynamic_write_leveling(u32 dev_num, int phase_remove); +int ddr3_tip_dynamic_write_leveling_supp(u32 dev_num); +int ddr3_tip_static_init_controller(u32 dev_num); +int ddr3_tip_configure_phy(u32 dev_num); +int ddr3_tip_load_pattern_to_odpg(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum hws_pattern pattern, + u32 load_addr); +int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern e_pattern); +int ddr3_tip_configure_odpg(u32 dev_num, enum hws_access_type access_type, + u32 if_id, enum hws_dir direction, u32 tx_phases, + u32 tx_burst_size, u32 rx_phases, + u32 delay_between_burst, u32 rd_mode, u32 cs_num, + u32 addr_stress_jump, u32 single_pattern); +int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, enum mr_number mr_num, u32 data, u32 mask); +int ddr3_tip_write_cs_result(u32 dev_num, u32 offset); +int ddr3_tip_reset_fifo_ptr(u32 dev_num); +int ddr3_tip_read_pup_value(u32 dev_num, u32 pup_values[], int reg_addr, u32 mask); +int ddr3_tip_read_adll_value(u32 dev_num, u32 pup_values[], u32 reg_addr, u32 mask); +int ddr3_tip_write_adll_value(u32 dev_num, u32 pup_values[], u32 reg_addr); +int ddr3_tip_tune_training_params(u32 dev_num, struct tune_train_params *params); + +#endif /* _DDR3_TRAINING_IP_FLOW_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_pbs.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_pbs.h new file mode 100644 index 000000000..323c67aa5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_pbs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_PBS_H_ +#define _DDR3_TRAINING_IP_PBS_H_ + +enum { + EBA_CONFIG, + EEBA_CONFIG, + SBA_CONFIG +}; + +enum hws_training_load_op { + TRAINING_LOAD_OPERATION_UNLOAD, + TRAINING_LOAD_OPERATION_LOAD +}; + +enum hws_edge { + TRAINING_EDGE_1, + TRAINING_EDGE_2 +}; + +enum hws_edge_search { + TRAINING_EDGE_MAX, + TRAINING_EDGE_MIN +}; + +enum pbs_dir { + PBS_TX_MODE = 0, + PBS_RX_MODE, + NUM_OF_PBS_MODES +}; + +int ddr3_tip_pbs_rx(u32 dev_num); +int ddr3_tip_print_all_pbs_result(u32 dev_num); +int ddr3_tip_pbs_tx(u32 dev_num); + +#endif /* _DDR3_TRAINING_IP_PBS_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_prv_if.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_prv_if.h new file mode 100644 index 000000000..2df592e1b --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_ip_prv_if.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_IP_PRV_IF_H +#define _DDR3_TRAINING_IP_PRV_IF_H + +#include "ddr3_training_ip.h" +#include "ddr3_training_ip_flow.h" +#include "ddr3_training_ip_bist.h" + +enum hws_static_config_type { + WRITE_LEVELING_STATIC, + READ_LEVELING_STATIC +}; + +struct ddr3_device_info { + u32 device_id; + u32 ck_delay; +}; + +typedef int (*HWS_TIP_DUNIT_MUX_SELECT_FUNC_PTR)(u8 dev_num, int enable); +typedef int (*HWS_TIP_DUNIT_REG_READ_FUNC_PTR)( + u8 dev_num, enum hws_access_type interface_access, u32 if_id, + u32 offset, u32 *data, u32 mask); +typedef int (*HWS_TIP_DUNIT_REG_WRITE_FUNC_PTR)( + u8 dev_num, enum hws_access_type interface_access, u32 if_id, + u32 offset, u32 data, u32 mask); +typedef int (*HWS_TIP_GET_FREQ_CONFIG_INFO)( + u8 dev_num, enum mv_ddr_freq freq, + struct hws_tip_freq_config_info *freq_config_info); +typedef int (*HWS_TIP_GET_DEVICE_INFO)( + u8 dev_num, struct ddr3_device_info *info_ptr); +typedef int (*HWS_GET_CS_CONFIG_FUNC_PTR)( + u8 dev_num, u32 cs_mask, struct hws_cs_config_info *cs_info); +typedef int (*HWS_SET_FREQ_DIVIDER_FUNC_PTR)( + u8 dev_num, u32 if_id, enum mv_ddr_freq freq); +typedef int (*HWS_GET_INIT_FREQ)(u8 dev_num, enum mv_ddr_freq *freq); +typedef int (*HWS_TRAINING_IP_IF_WRITE_FUNC_PTR)( + u32 dev_num, enum hws_access_type access_type, u32 dunit_id, + u32 reg_addr, u32 data, u32 mask); +typedef int (*HWS_TRAINING_IP_IF_READ_FUNC_PTR)( + u32 dev_num, enum hws_access_type access_type, u32 dunit_id, + u32 reg_addr, u32 *data, u32 mask); +typedef int (*HWS_TRAINING_IP_BUS_WRITE_FUNC_PTR)( + u32 dev_num, enum hws_access_type dunit_access_type, u32 if_id, + enum hws_access_type phy_access_type, u32 phy_id, + enum hws_ddr_phy phy_type, u32 reg_addr, u32 data); +typedef int (*HWS_TRAINING_IP_BUS_READ_FUNC_PTR)( + u32 dev_num, u32 if_id, enum hws_access_type phy_access_type, + u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data); +typedef int (*HWS_TRAINING_IP_ALGO_RUN_FUNC_PTR)( + u32 dev_num, enum hws_algo_type algo_type); +typedef int (*HWS_TRAINING_IP_SET_FREQ_FUNC_PTR)( + u32 dev_num, enum hws_access_type access_type, u32 if_id, + enum mv_ddr_freq frequency); +typedef int (*HWS_TRAINING_IP_INIT_CONTROLLER_FUNC_PTR)( + u32 dev_num, struct init_cntr_param *init_cntr_prm); +typedef int (*HWS_TRAINING_IP_PBS_RX_FUNC_PTR)(u32 dev_num); +typedef int (*HWS_TRAINING_IP_PBS_TX_FUNC_PTR)(u32 dev_num); +typedef int (*HWS_TRAINING_IP_SELECT_CONTROLLER_FUNC_PTR)( + u32 dev_num, int enable); +typedef int (*HWS_TRAINING_IP_TOPOLOGY_MAP_LOAD_FUNC_PTR)( + u32 dev_num, struct mv_ddr_topology_map *tm); +typedef int (*HWS_TRAINING_IP_STATIC_CONFIG_FUNC_PTR)( + u32 dev_num, enum mv_ddr_freq frequency, + enum hws_static_config_type static_config_type, u32 if_id); +typedef int (*HWS_TRAINING_IP_EXTERNAL_READ_PTR)( + u32 dev_num, u32 if_id, u32 ddr_addr, u32 num_bursts, u32 *data); +typedef int (*HWS_TRAINING_IP_EXTERNAL_WRITE_PTR)( + u32 dev_num, u32 if_id, u32 ddr_addr, u32 num_bursts, u32 *data); +typedef int (*HWS_TRAINING_IP_BIST_ACTIVATE)( + u32 dev_num, enum hws_pattern pattern, enum hws_access_type access_type, + u32 if_num, enum hws_dir direction, + enum hws_stress_jump addr_stress_jump, + enum hws_pattern_duration duration, + enum hws_bist_operation oper_type, u32 offset, u32 cs_num, + u32 pattern_addr_length); +typedef int (*HWS_TRAINING_IP_BIST_READ_RESULT)( + u32 dev_num, u32 if_id, struct bist_result *pst_bist_result); +typedef int (*HWS_TRAINING_IP_LOAD_TOPOLOGY)(u32 dev_num, u32 config_num); +typedef int (*HWS_TRAINING_IP_READ_LEVELING)(u32 dev_num, u32 config_num); +typedef int (*HWS_TRAINING_IP_WRITE_LEVELING)(u32 dev_num, u32 config_num); +typedef u32 (*HWS_TRAINING_IP_GET_TEMP)(u8 dev_num); +typedef u8 (*HWS_TRAINING_IP_GET_RATIO)(u32 freq); + +struct hws_tip_config_func_db { + HWS_TIP_DUNIT_MUX_SELECT_FUNC_PTR tip_dunit_mux_select_func; + void (*mv_ddr_dunit_read)(u32 addr, u32 mask, u32 *data); + void (*mv_ddr_dunit_write)(u32 addr, u32 mask, u32 data); + HWS_TIP_GET_FREQ_CONFIG_INFO tip_get_freq_config_info_func; + HWS_TIP_GET_DEVICE_INFO tip_get_device_info_func; + HWS_SET_FREQ_DIVIDER_FUNC_PTR tip_set_freq_divider_func; + HWS_GET_CS_CONFIG_FUNC_PTR tip_get_cs_config_info; + HWS_TRAINING_IP_GET_TEMP tip_get_temperature; + HWS_TRAINING_IP_GET_RATIO tip_get_clock_ratio; + HWS_TRAINING_IP_EXTERNAL_READ_PTR tip_external_read; + HWS_TRAINING_IP_EXTERNAL_WRITE_PTR tip_external_write; + int (*mv_ddr_phy_read)(enum hws_access_type phy_access, + u32 phy, enum hws_ddr_phy phy_type, + u32 reg_addr, u32 *data); + int (*mv_ddr_phy_write)(enum hws_access_type phy_access, + u32 phy, enum hws_ddr_phy phy_type, + u32 reg_addr, u32 data, + enum hws_operation op_type); +}; + +int ddr3_tip_init_config_func(u32 dev_num, + struct hws_tip_config_func_db *config_func); +int ddr3_tip_register_xsb_info(u32 dev_num, + struct hws_xsb_info *xsb_info_table); +enum hws_result *ddr3_tip_get_result_ptr(u32 stage); +int ddr3_set_freq_config_info(struct hws_tip_freq_config_info *table); +int print_device_info(u8 dev_num); + +#endif /* _DDR3_TRAINING_IP_PRV_IF_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_leveling.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_leveling.c new file mode 100644 index 000000000..6523281f2 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_leveling.c @@ -0,0 +1,1963 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_training_db.h" +#include "ddr_training_ip_db.h" +#include "mv_ddr_regs.h" + +#define WL_ITERATION_NUM 10 + +static u32 pup_mask_table[] = { + 0x000000ff, + 0x0000ff00, + 0x00ff0000, + 0xff000000 +}; + +static struct write_supp_result wr_supp_res[MAX_INTERFACE_NUM][MAX_BUS_NUM]; + +static int ddr3_tip_dynamic_write_leveling_seq(u32 dev_num); +static int ddr3_tip_dynamic_read_leveling_seq(u32 dev_num); +static int ddr3_tip_dynamic_per_bit_read_leveling_seq(u32 dev_num); +static int ddr3_tip_wl_supp_align_phase_shift(u32 dev_num, u32 if_id, + u32 bus_id); +static int ddr3_tip_xsb_compare_test(u32 dev_num, u32 if_id, u32 bus_id, + u32 edge_offset); + +enum { + PASS, + FAIL +}; +/***************************************************************************** +Dynamic read leveling +******************************************************************************/ +int ddr3_tip_dynamic_read_leveling(u32 dev_num, u32 freq) +{ + u32 data, mask; + unsigned int max_cs = mv_ddr_cs_num_get(); + u32 bus_num, if_id, cl_val; + enum mv_ddr_speed_bin speed_bin_index; + /* save current CS value */ + u32 cs_enable_reg_val[MAX_INTERFACE_NUM] = { 0 }; + int is_any_pup_fail = 0; + u32 data_read[MAX_INTERFACE_NUM + 1] = { 0 }; + u8 rl_values[MAX_CS_NUM][MAX_BUS_NUM][MAX_INTERFACE_NUM]; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (effective_cs = 0; effective_cs < MAX_CS_NUM; effective_cs++) + for (bus_num = 0; bus_num < MAX_BUS_NUM; bus_num++) + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) + rl_values[effective_cs][bus_num][if_id] = 0; + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + training_result[training_stage][if_id] = TEST_SUCCESS; + + /* save current cs enable reg val */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val, + MASK_ALL_BITS)); + /* enable single cs */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3))); + } + + ddr3_tip_reset_fifo_ptr(dev_num); + + /* + * Phase 1: Load pattern (using ODPG) + * + * enter Read Leveling mode + * only 27 bits are masked + * assuming non multi-CS configuration + * write to CS = 0 for the non multi CS configuration, note + * that the results shall be read back to the required CS !!! + */ + + /* BUS count is 0 shifted 26 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0x3, 0x3)); + CHECK_STATUS(ddr3_tip_configure_odpg + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0, + pattern_table[PATTERN_RL].num_of_phases_tx, 0, + pattern_table[PATTERN_RL].num_of_phases_rx, 0, 0, + effective_cs, STRESS_NONE, DURATION_SINGLE)); + + /* load pattern to ODPG */ + ddr3_tip_load_pattern_to_odpg(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, PATTERN_RL, + pattern_table[PATTERN_RL]. + start_addr); + + /* + * Phase 2: ODPG to Read Leveling mode + */ + + /* General Training Opcode register */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_WR_RD_MODE_ENA_REG, 0, + MASK_ALL_BITS)); + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + GENERAL_TRAINING_OPCODE_REG, + (0x301b01 | effective_cs << 2), 0x3c3fef)); + + /* Object1 opcode register 0 & 1 */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + speed_bin_index = + tm->interface_params[if_id].speed_bin_index; + cl_val = mv_ddr_cl_val_get(speed_bin_index, freq); + data = (cl_val << 17) | (0x3 << 25); + mask = (0xff << 9) | (0x1f << 17) | (0x3 << 25); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + OPCODE_REG0_REG(1), data, mask)); + } + + /* Set iteration count to max value */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + OPCODE_REG1_REG(1), 0xd00, 0xd00)); + + /* + * Phase 2: Mask config + */ + + ddr3_tip_dynamic_read_leveling_seq(dev_num); + + /* + * Phase 3: Read Leveling execution + */ + + /* temporary jira dunit=14751 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_DBG_1_REG, 0, (u32)(1 << 31))); + /* configure phy reset value */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_DBG_3_REG, (0x7f << 24), + (u32)(0xff << 24))); + /* data pup rd reset enable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_CFG_REG, 0, (1 << 30))); + /* data pup rd reset disable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_CFG_REG, (1 << 30), (1 << 30))); + /* training SW override & training RL mode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x1, 0x9)); + /* training enable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_REG, (1 << 24) | (1 << 20), + (1 << 24) | (1 << 20))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_REG, (u32)(1 << 31), (u32)(1 << 31))); + + /* trigger training */ + mv_ddr_training_enable(); + + /* check for training done */ + if (mv_ddr_is_training_done(MAX_POLLING_ITERATIONS, &data) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, ("training done failed\n")); + return MV_FAIL; + } + /* check for training pass */ + if (data != PASS) + DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("training result failed\n")); + + /* disable odpg; switch back to functional mode */ + mv_ddr_odpg_disable(); + + if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, ("odpg disable failed\n")); + return MV_FAIL; + } + + ddr3_tip_if_write(0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); + + /* double loop on bus, pup */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* check training done */ + is_any_pup_fail = 0; + for (bus_num = 0; + bus_num < octets_per_if_num; + bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, + if_id, (1 << 25), (1 << 25), + mask_results_pup_reg_map[bus_num], + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("\n_r_l: DDR3 poll failed(2) for IF %d CS %d bus %d", + if_id, effective_cs, bus_num)); + is_any_pup_fail = 1; + } else { + /* read result per pup */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + mask_results_pup_reg_map + [bus_num], data_read, + 0xff)); + rl_values[effective_cs][bus_num] + [if_id] = (u8)data_read[if_id]; + } + } + + if (is_any_pup_fail == 1) { + training_result[training_stage][if_id] = + TEST_FAILED; + if (debug_mode == 0) + return MV_FAIL; + } + } + + DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("RL exit read leveling\n")); + + /* + * Phase 3: Exit Read Leveling + */ + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, (1 << 3), (1 << 3))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_1_REG, (1 << 16), (1 << 16))); + /* set ODPG to functional */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0x0, MASK_ALL_BITS)); + + /* + * Copy the result from the effective CS search to the + * real Functional CS + */ + /*ddr3_tip_write_cs_result(dev_num, RL_PHY_REG(0); */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0x0, MASK_ALL_BITS)); + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + /* double loop on bus, pup */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_num = 0; + bus_num < octets_per_if_num; + bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); + /* read result per pup from arry */ + data = rl_values[effective_cs][bus_num][if_id]; + data = (data & 0x1f) | + (((data & 0xe0) >> 5) << 6); + ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_num, DDR_PHY_DATA, + RL_PHY_REG(effective_cs), + data); + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* restore cs enable value */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val[if_id], + MASK_ALL_BITS)); + if (odt_config != 0) { + CHECK_STATUS(ddr3_tip_write_additional_odt_setting + (dev_num, if_id)); + } + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (training_result[training_stage][if_id] == TEST_FAILED) + return MV_FAIL; + } + + return MV_OK; +} + +/* + * Legacy Dynamic write leveling + */ +int ddr3_tip_legacy_dynamic_write_leveling(u32 dev_num) +{ + u32 c_cs, if_id, cs_mask = 0; + unsigned int max_cs = mv_ddr_cs_num_get(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* + * In TRAINIUNG reg (0x15b0) write 0x80000008 | cs_mask: + * Trn_start + * cs_mask = 0x1 <<20 Trn_CS0 - CS0 is included in the DDR3 training + * cs_mask = 0x1 <<21 Trn_CS1 - CS1 is included in the DDR3 training + * cs_mask = 0x1 <<22 Trn_CS2 - CS2 is included in the DDR3 training + * cs_mask = 0x1 <<23 Trn_CS3 - CS3 is included in the DDR3 training + * Trn_auto_seq = write leveling + */ + for (c_cs = 0; c_cs < max_cs; c_cs++) + cs_mask = cs_mask | 1 << (20 + c_cs); + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, 0, + TRAINING_REG, (0x80000008 | cs_mask), + 0xffffffff)); + mdelay(20); + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, + (u32)0x80000000, TRAINING_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("polling failed for Old WL result\n")); + return MV_FAIL; + } + } + + return MV_OK; +} + +/* + * Legacy Dynamic read leveling + */ +int ddr3_tip_legacy_dynamic_read_leveling(u32 dev_num) +{ + u32 c_cs, if_id, cs_mask = 0; + unsigned int max_cs = mv_ddr_cs_num_get(); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* + * In TRAINIUNG reg (0x15b0) write 0x80000040 | cs_mask: + * Trn_start + * cs_mask = 0x1 <<20 Trn_CS0 - CS0 is included in the DDR3 training + * cs_mask = 0x1 <<21 Trn_CS1 - CS1 is included in the DDR3 training + * cs_mask = 0x1 <<22 Trn_CS2 - CS2 is included in the DDR3 training + * cs_mask = 0x1 <<23 Trn_CS3 - CS3 is included in the DDR3 training + * Trn_auto_seq = Read Leveling using training pattern + */ + for (c_cs = 0; c_cs < max_cs; c_cs++) + cs_mask = cs_mask | 1 << (20 + c_cs); + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, 0, TRAINING_REG, + (0x80000040 | cs_mask), 0xffffffff)); + mdelay(100); + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, + (u32)0x80000000, TRAINING_REG, + MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("polling failed for Old RL result\n")); + return MV_FAIL; + } + } + + return MV_OK; +} + +/* + * Dynamic per bit read leveling + */ +int ddr3_tip_dynamic_per_bit_read_leveling(u32 dev_num, u32 freq) +{ + u32 data, mask; + u32 bus_num, if_id, cl_val, bit_num; + u32 curr_numb, curr_min_delay; + int adll_array[3] = { 0, -0xa, 0x14 }; + u32 phyreg3_arr[MAX_INTERFACE_NUM][MAX_BUS_NUM]; + enum mv_ddr_speed_bin speed_bin_index; + int is_any_pup_fail = 0; + int break_loop = 0; + u32 cs_enable_reg_val[MAX_INTERFACE_NUM]; /* save current CS value */ + u32 data_read[MAX_INTERFACE_NUM]; + int per_bit_rl_pup_status[MAX_INTERFACE_NUM][MAX_BUS_NUM]; + u32 data2_write[MAX_INTERFACE_NUM][MAX_BUS_NUM]; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_num = 0; + bus_num <= octets_per_if_num; bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); + per_bit_rl_pup_status[if_id][bus_num] = 0; + data2_write[if_id][bus_num] = 0; + /* read current value of phy register 0x3 */ + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, + bus_num, DDR_PHY_DATA, + CRX_PHY_REG(0), + &phyreg3_arr[if_id][bus_num])); + } + } + + /* NEW RL machine */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + training_result[training_stage][if_id] = TEST_SUCCESS; + + /* save current cs enable reg val */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, &cs_enable_reg_val[if_id], + MASK_ALL_BITS)); + /* enable single cs */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3))); + } + + ddr3_tip_reset_fifo_ptr(dev_num); + for (curr_numb = 0; curr_numb < 3; curr_numb++) { + /* + * Phase 1: Load pattern (using ODPG) + * + * enter Read Leveling mode + * only 27 bits are masked + * assuming non multi-CS configuration + * write to CS = 0 for the non multi CS configuration, note that + * the results shall be read back to the required CS !!! + */ + + /* BUS count is 0 shifted 26 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0x3, 0x3)); + CHECK_STATUS(ddr3_tip_configure_odpg + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0, + pattern_table[PATTERN_TEST].num_of_phases_tx, 0, + pattern_table[PATTERN_TEST].num_of_phases_rx, 0, + 0, 0, STRESS_NONE, DURATION_SINGLE)); + + /* load pattern to ODPG */ + ddr3_tip_load_pattern_to_odpg(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, PATTERN_TEST, + pattern_table[PATTERN_TEST]. + start_addr); + + /* + * Phase 2: ODPG to Read Leveling mode + */ + + /* General Training Opcode register */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_WR_RD_MODE_ENA_REG, 0, + MASK_ALL_BITS)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + GENERAL_TRAINING_OPCODE_REG, 0x301b01, 0x3c3fef)); + + /* Object1 opcode register 0 & 1 */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + speed_bin_index = + tm->interface_params[if_id].speed_bin_index; + cl_val = mv_ddr_cl_val_get(speed_bin_index, freq); + data = (cl_val << 17) | (0x3 << 25); + mask = (0xff << 9) | (0x1f << 17) | (0x3 << 25); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + OPCODE_REG0_REG(1), data, mask)); + } + + /* Set iteration count to max value */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + OPCODE_REG1_REG(1), 0xd00, 0xd00)); + + /* + * Phase 2: Mask config + */ + + ddr3_tip_dynamic_per_bit_read_leveling_seq(dev_num); + + /* + * Phase 3: Read Leveling execution + */ + + /* temporary jira dunit=14751 */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_DBG_1_REG, 0, (u32)(1 << 31))); + /* configure phy reset value */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_DBG_3_REG, (0x7f << 24), + (u32)(0xff << 24))); + /* data pup rd reset enable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_CFG_REG, 0, (1 << 30))); + /* data pup rd reset disable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_CFG_REG, (1 << 30), (1 << 30))); + /* training SW override & training RL mode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x1, 0x9)); + /* training enable */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_REG, (1 << 24) | (1 << 20), + (1 << 24) | (1 << 20))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_REG, (u32)(1 << 31), (u32)(1 << 31))); + + /* trigger training */ + mv_ddr_training_enable(); + + /* check for training done */ + if (mv_ddr_is_training_done(MAX_POLLING_ITERATIONS, &data) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, ("training done failed\n")); + return MV_FAIL; + } + /* check for training pass */ + if (data != PASS) + DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("training result failed\n")); + + /* disable odpg; switch back to functional mode */ + mv_ddr_odpg_disable(); + + if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, ("odpg disable failed\n")); + return MV_FAIL; + } + + ddr3_tip_if_write(0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); + + /* double loop on bus, pup */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* check training done */ + for (bus_num = 0; + bus_num < octets_per_if_num; + bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); + + if (per_bit_rl_pup_status[if_id][bus_num] + == 0) { + curr_min_delay = 0; + for (bit_num = 0; bit_num < 8; + bit_num++) { + if (ddr3_tip_if_polling + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, (1 << 25), + (1 << 25), + mask_results_dq_reg_map + [bus_num * 8 + bit_num], + MAX_POLLING_ITERATIONS) != + MV_OK) { + DEBUG_LEVELING + (DEBUG_LEVEL_ERROR, + ("\n_r_l: DDR3 poll failed(2) for bus %d bit %d\n", + bus_num, + bit_num)); + } else { + /* read result per pup */ + CHECK_STATUS + (ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + mask_results_dq_reg_map + [bus_num * 8 + + bit_num], + data_read, + MASK_ALL_BITS)); + data = + (data_read + [if_id] & + 0x1f) | + ((data_read + [if_id] & + 0xe0) << 1); + if (curr_min_delay == 0) + curr_min_delay = + data; + else if (data < + curr_min_delay) + curr_min_delay = + data; + if (data > data2_write[if_id][bus_num]) + data2_write + [if_id] + [bus_num] = + data; + } + } + + if (data2_write[if_id][bus_num] <= + (curr_min_delay + + MAX_DQ_READ_LEVELING_DELAY)) { + per_bit_rl_pup_status[if_id] + [bus_num] = 1; + } + } + } + } + + /* check if there is need to search new phyreg3 value */ + if (curr_numb < 2) { + /* if there is DLL that is not checked yet */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_num = 0; + bus_num < octets_per_if_num; + bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, + bus_num); + if (per_bit_rl_pup_status[if_id] + [bus_num] != 1) { + /* go to next ADLL value */ + CHECK_STATUS + (ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_num, DDR_PHY_DATA, + CRX_PHY_REG(0), + (phyreg3_arr[if_id] + [bus_num] + + adll_array[curr_numb]))); + break_loop = 1; + break; + } + } + if (break_loop) + break; + } + } /* if (curr_numb < 2) */ + if (!break_loop) + break; + } /* for ( curr_numb = 0; curr_numb <3; curr_numb++) */ + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_num = 0; bus_num < octets_per_if_num; + bus_num++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_num); + if (per_bit_rl_pup_status[if_id][bus_num] == 1) + ddr3_tip_bus_write(dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_num, DDR_PHY_DATA, + RL_PHY_REG(effective_cs), + data2_write[if_id] + [bus_num]); + else + is_any_pup_fail = 1; + } + + /* TBD flow does not support multi CS */ + /* + * cs_bitmask = tm->interface_params[if_id]. + * as_bus_params[bus_num].cs_bitmask; + */ + /* divide by 4 is used for retrieving the CS number */ + /* + * TBD BC2 - what is the PHY address for other + * CS ddr3_tip_write_cs_result() ??? + */ + /* + * find what should be written to PHY + * - max delay that is less than threshold + */ + if (is_any_pup_fail == 1) { + training_result[training_stage][if_id] = TEST_FAILED; + if (debug_mode == 0) + return MV_FAIL; + } + } + DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("RL exit read leveling\n")); + + /* + * Phase 3: Exit Read Leveling + */ + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, (1 << 3), (1 << 3))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_1_REG, (1 << 16), (1 << 16))); + /* set ODPG to functional */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0x0, MASK_ALL_BITS)); + /* + * Copy the result from the effective CS search to the real + * Functional CS + */ + ddr3_tip_write_cs_result(dev_num, RL_PHY_REG(0)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_DATA_CTRL_REG, 0x0, MASK_ALL_BITS)); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* restore cs enable value */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val[if_id], + MASK_ALL_BITS)); + if (odt_config != 0) { + CHECK_STATUS(ddr3_tip_write_additional_odt_setting + (dev_num, if_id)); + } + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (training_result[training_stage][if_id] == TEST_FAILED) + return MV_FAIL; + } + + return MV_OK; +} + +int ddr3_tip_calc_cs_mask(u32 dev_num, u32 if_id, u32 effective_cs, + u32 *cs_mask) +{ + u32 all_bus_cs = 0, same_bus_cs; + u32 bus_cnt; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + *cs_mask = same_bus_cs = CS_BIT_MASK; + + /* + * In some of the devices (such as BC2), the CS is per pup and there + * for mixed mode is valid on like other devices where CS configuration + * is per interface. + * In order to know that, we do 'Or' and 'And' operation between all + * CS (of the pups). + * If they are they are not the same then it's mixed mode so all CS + * should be configured (when configuring the MRS) + */ + for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + + all_bus_cs |= tm->interface_params[if_id]. + as_bus_params[bus_cnt].cs_bitmask; + same_bus_cs &= tm->interface_params[if_id]. + as_bus_params[bus_cnt].cs_bitmask; + + /* cs enable is active low */ + *cs_mask &= ~tm->interface_params[if_id]. + as_bus_params[bus_cnt].cs_bitmask; + } + + if (all_bus_cs == same_bus_cs) + *cs_mask = (*cs_mask | (~(1 << effective_cs))) & CS_BIT_MASK; + + return MV_OK; +} + +/* + * Dynamic write leveling + */ +int ddr3_tip_dynamic_write_leveling(u32 dev_num, int phase_remove) +{ + u32 reg_data = 0, temp = 0, iter, if_id, bus_cnt; + u32 cs_enable_reg_val[MAX_INTERFACE_NUM] = { 0 }; + u32 cs_mask[MAX_INTERFACE_NUM]; + u32 read_data_sample_delay_vals[MAX_INTERFACE_NUM] = { 0 }; + u32 read_data_ready_delay_vals[MAX_INTERFACE_NUM] = { 0 }; + /* 0 for failure */ + u32 res_values[MAX_INTERFACE_NUM * MAX_BUS_NUM] = { 0 }; + u32 test_res = 0; /* 0 - success for all pup */ + u32 data_read[MAX_INTERFACE_NUM]; + u8 wl_values[MAX_CS_NUM][MAX_BUS_NUM][MAX_INTERFACE_NUM]; + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u32 cs_mask0[MAX_INTERFACE_NUM] = { 0 }; + unsigned int max_cs = mv_ddr_cs_num_get(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + training_result[training_stage][if_id] = TEST_SUCCESS; + + /* save Read Data Sample Delay */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + RD_DATA_SMPL_DLYS_REG, + read_data_sample_delay_vals, MASK_ALL_BITS)); + /* save Read Data Ready Delay */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + RD_DATA_RDY_DLYS_REG, read_data_ready_delay_vals, + MASK_ALL_BITS)); + /* save current cs reg val */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS)); + } + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) < MV_TIP_REV_3) { + /* Enable multi-CS */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, 0, (1 << 3))); + } + + /* + * Phase 1: DRAM 2 Write Leveling mode + */ + + /*Assert 10 refresh commands to DRAM to all CS */ + for (iter = 0; iter < WL_ITERATION_NUM; iter++) { + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, SDRAM_OP_REG, + (u32)((~(0xf) << 8) | 0x2), 0xf1f)); + } + } + /* check controller back to normal */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (ddr3_tip_if_polling + (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, + SDRAM_OP_REG, MAX_POLLING_ITERATIONS) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("WL: DDR3 poll failed(3)")); + } + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + /*enable write leveling to all cs - Q off , WL n */ + /* calculate interface cs mask */ + CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask0, MR_CMD1, + 0x1000, 0x1080)); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* cs enable is active low */ + ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs, + &cs_mask[if_id]); + } + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) >= MV_TIP_REV_3) { + /* Enable Output buffer to relevant CS - Q on , WL on */ + CHECK_STATUS(ddr3_tip_write_mrs_cmd + (dev_num, cs_mask, MR_CMD1, 0x80, 0x1080)); + + /*enable odt for relevant CS */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + 0x1498, (0x3 << (effective_cs * 2)), 0xf)); + } else { + /* FIXME: should be the same as _CPU case */ + CHECK_STATUS(ddr3_tip_write_mrs_cmd + (dev_num, cs_mask, MR_CMD1, 0xc0, 0x12c4)); + } + + /* + * Phase 2: Set training IP to write leveling mode + */ + + CHECK_STATUS(ddr3_tip_dynamic_write_leveling_seq(dev_num)); + + /* phase 3: trigger training */ + mv_ddr_training_enable(); + + /* check for training done */ + if (mv_ddr_is_training_done(MAX_POLLING_ITERATIONS, data_read) != MV_OK) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, ("training done failed\n")); + } else { /* check for training pass */ + reg_data = data_read[0]; + if (tm->bus_act_mask == 0xb) /* set to data to 0 to skip the check */ + reg_data = 0; + if (reg_data != PASS) + DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("training result failed\n")); + + /* check for training completion per bus */ + for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + /* training status */ + ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, + mask_results_pup_reg_map[bus_cnt], + data_read, MASK_ALL_BITS); + reg_data = data_read[0]; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, ("WL: IF %d BUS %d reg 0x%x\n", + 0, bus_cnt, reg_data)); + if ((reg_data & (1 << 25)) == 0) + res_values[bus_cnt] = 1; + ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, + mask_results_pup_reg_map[bus_cnt], + data_read, 0xff); + /* + * Save the read value that should be + * write to PHY register + */ + wl_values[effective_cs][bus_cnt][0] = (u8)data_read[0]; + } + } + + /* + * Phase 3.5: Validate result + */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_cnt = 0; bus_cnt < octets_per_if_num; bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + /* + * Read result control register according to subphy + * "16" below is for a half-phase + */ + reg_data = wl_values[effective_cs][bus_cnt][if_id] + 16; + /* + * Write to WL register: ADLL [4:0], Phase [8:6], + * Centralization ADLL [15:10] + 0x10 + */ + reg_data = (reg_data & 0x1f) | + (((reg_data & 0xe0) >> 5) << 6) | + (((reg_data & 0x1f) + phy_reg1_val) << 10); + /* Search with WL CS0 subphy reg */ + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_cnt, + DDR_PHY_DATA, WL_PHY_REG(0), reg_data); + /* + * Check for change in data read from DRAM. + * If changed, fix the result + */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + TRAINING_WL_REG, + data_read, MASK_ALL_BITS)); + if (((data_read[if_id] & (1 << (bus_cnt + 20))) >> + (bus_cnt + 20)) == 0) { + DEBUG_LEVELING( + DEBUG_LEVEL_ERROR, + ("WLValues was changed from 0x%X", + wl_values[effective_cs] + [bus_cnt][if_id])); + wl_values[effective_cs] + [bus_cnt][if_id] += 32; + DEBUG_LEVELING( + DEBUG_LEVEL_ERROR, + ("to 0x%X", + wl_values[effective_cs] + [bus_cnt][if_id])); + } + } + } + + /* + * Phase 4: Exit write leveling mode + */ + + /* disable DQs toggling */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + WL_DQS_PATTERN_REG, 0x0, 0x1)); + + /* Update MRS 1 (WL off) */ + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) >= MV_TIP_REV_3) { + CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask0, MR_CMD1, + 0x1000, 0x1080)); + } else { + /* FIXME: should be same as _CPU case */ + CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask0, MR_CMD1, + 0x1000, 0x12c4)); + } + + /* Update MRS 1 (return to functional mode - Q on , WL off) */ + CHECK_STATUS(ddr3_tip_write_mrs_cmd + (dev_num, cs_mask0, MR_CMD1, 0x0, 0x1080)); + + /* set phy to normal mode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x5, 0x7)); + + /* exit sw override mode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x4, 0x7)); + } + + /* + * Phase 5: Load WL values to each PHY + */ + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + test_res = 0; + for (bus_cnt = 0; + bus_cnt < octets_per_if_num; + bus_cnt++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt); + /* check if result == pass */ + if (res_values + [(if_id * + octets_per_if_num) + + bus_cnt] == 0) { + /* + * read result control register + * according to pup + */ + reg_data = + wl_values[effective_cs][bus_cnt] + [if_id]; + /* + * Write into write leveling register + * ([4:0] ADLL, [8:6] Phase, [15:10] + * (centralization) ADLL + 0x10) + */ + reg_data = + (reg_data & 0x1f) | + (((reg_data & 0xe0) >> 5) << 6) | + (((reg_data & 0x1f) + + phy_reg1_val) << 10); + /* + * in case phase remove should be executed + * need to remove more than one phase. + * this will take place only in low frequency, + * where there could be more than one phase between sub-phys + */ + if (phase_remove == 1) { + temp = (reg_data >> WR_LVL_PH_SEL_OFFS) & WR_LVL_PH_SEL_PHASE1; + reg_data &= ~(WR_LVL_PH_SEL_MASK << WR_LVL_PH_SEL_OFFS); + reg_data |= (temp << WR_LVL_PH_SEL_OFFS); + } + + ddr3_tip_bus_write( + dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, + bus_cnt, + DDR_PHY_DATA, + WL_PHY_REG(effective_cs), + reg_data); + } else { + test_res = 1; + /* + * read result control register + * according to pup + */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + mask_results_pup_reg_map + [bus_cnt], data_read, + 0xff)); + reg_data = data_read[if_id]; + DEBUG_LEVELING( + DEBUG_LEVEL_ERROR, + ("WL: IF %d BUS %d failed, reg 0x%x\n", + if_id, bus_cnt, reg_data)); + } + } + + if (test_res != 0) { + training_result[training_stage][if_id] = + TEST_FAILED; + } + } + } + /* Set to 0 after each loop to avoid illegal value may be used */ + effective_cs = 0; + + /* + * Copy the result from the effective CS search to the real + * Functional CS + */ + /* ddr3_tip_write_cs_result(dev_num, WL_PHY_REG(0); */ + /* restore saved values */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* restore Read Data Sample Delay */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + RD_DATA_SMPL_DLYS_REG, + read_data_sample_delay_vals[if_id], + MASK_ALL_BITS)); + + /* restore Read Data Ready Delay */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + RD_DATA_RDY_DLYS_REG, + read_data_ready_delay_vals[if_id], + MASK_ALL_BITS)); + + /* enable multi cs */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val[if_id], + MASK_ALL_BITS)); + } + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_TIP_REV) >= MV_TIP_REV_3) { + /* Disable modt0 for CS0 training - need to adjust for multi-CS + * in case of ddr4 set 0xf else 0 + */ + if (odt_config != 0) { + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_ODT_CTRL_HIGH_REG, 0x0, 0xf)); + } + else { + CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + SDRAM_ODT_CTRL_HIGH_REG, 0xf, 0xf)); + } + + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (training_result[training_stage][if_id] == TEST_FAILED) + return MV_FAIL; + } + + return MV_OK; +} + +/* + * Dynamic write leveling supplementary + */ +int ddr3_tip_dynamic_write_leveling_supp(u32 dev_num) +{ + int adll_offset; + u32 if_id, bus_id, data, data_tmp; + int is_if_fail = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + is_if_fail = 0; + + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + wr_supp_res[if_id][bus_id].is_pup_fail = 1; + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + &data)); + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("WL Supp: adll_offset=0 data delay = %d\n", + data)); + if (ddr3_tip_wl_supp_align_phase_shift + (dev_num, if_id, bus_id) == MV_OK) { + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("WL Supp: IF %d bus_id %d adll_offset=0 Success !\n", + if_id, bus_id)); + continue; + } + + /* change adll */ + adll_offset = 5; + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + data + adll_offset)); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + &data_tmp)); + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("WL Supp: adll_offset= %d data delay = %d\n", + adll_offset, data_tmp)); + + if (ddr3_tip_wl_supp_align_phase_shift + (dev_num, if_id, bus_id) == MV_OK) { + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("WL Supp: IF %d bus_id %d adll_offset= %d Success !\n", + if_id, bus_id, adll_offset)); + continue; + } + + /* change adll */ + adll_offset = -5; + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + data + adll_offset)); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, + bus_id, DDR_PHY_DATA, + CTX_PHY_REG(effective_cs), + &data_tmp)); + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("WL Supp: adll_offset= %d data delay = %d\n", + adll_offset, data_tmp)); + if (ddr3_tip_wl_supp_align_phase_shift + (dev_num, if_id, bus_id) == MV_OK) { + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("WL Supp: IF %d bus_id %d adll_offset= %d Success !\n", + if_id, bus_id, adll_offset)); + continue; + } else { + DEBUG_LEVELING( + DEBUG_LEVEL_ERROR, + ("WL Supp: IF %d bus_id %d Failed !\n", + if_id, bus_id)); + is_if_fail = 1; + } + } + + if (is_if_fail == 1) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("WL Supp: CS# %d: IF %d failed\n", + effective_cs, if_id)); + training_result[training_stage][if_id] = TEST_FAILED; + } else { + training_result[training_stage][if_id] = TEST_SUCCESS; + } + } + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (training_result[training_stage][if_id] == TEST_FAILED) + return MV_FAIL; + } + + return MV_OK; +} + +/* + * Phase Shift + */ +static int ddr3_tip_wl_supp_align_phase_shift(u32 dev_num, u32 if_id, + u32 bus_id) +{ + u32 original_phase; + u32 data, write_data; + + wr_supp_res[if_id][bus_id].stage = PHASE_SHIFT; + if (ddr3_tip_xsb_compare_test + (dev_num, if_id, bus_id, 0) == MV_OK) + return MV_OK; + + /* Read current phase */ + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, ACCESS_TYPE_UNICAST, bus_id, + DDR_PHY_DATA, WL_PHY_REG(effective_cs), &data)); + original_phase = (data >> 6) & 0x7; + + /* Set phase (0x0[6-8]) -2 */ + if (original_phase >= 1) { + if (original_phase == 1) + write_data = data & ~0x1df; + else + write_data = (data & ~0x1c0) | + ((original_phase - 2) << 6); + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + WL_PHY_REG(effective_cs), write_data); + if (ddr3_tip_xsb_compare_test + (dev_num, if_id, bus_id, -2) == MV_OK) + return MV_OK; + } + + /* Set phase (0x0[6-8]) +2 */ + if (original_phase <= 5) { + write_data = (data & ~0x1c0) | + ((original_phase + 2) << 6); + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + WL_PHY_REG(effective_cs), write_data); + if (ddr3_tip_xsb_compare_test + (dev_num, if_id, bus_id, 2) == MV_OK) + return MV_OK; + } + + /* Set phase (0x0[6-8]) +4 */ + if (original_phase <= 3) { + write_data = (data & ~0x1c0) | + ((original_phase + 4) << 6); + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + WL_PHY_REG(effective_cs), write_data); + if (ddr3_tip_xsb_compare_test + (dev_num, if_id, bus_id, 4) == MV_OK) + return MV_OK; + } + + /* Set phase (0x0[6-8]) +6 */ + if (original_phase <= 1) { + write_data = (data & ~0x1c0) | + ((original_phase + 6) << 6); + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + WL_PHY_REG(effective_cs), write_data); + if (ddr3_tip_xsb_compare_test + (dev_num, if_id, bus_id, 6) == MV_OK) + return MV_OK; + } + + /* Write original WL result back */ + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA, + WL_PHY_REG(effective_cs), data); + wr_supp_res[if_id][bus_id].is_pup_fail = 1; + + return MV_FAIL; +} + +/* + * Compare Test + */ +static int ddr3_tip_xsb_compare_test(u32 dev_num, u32 if_id, u32 bus_id, + u32 edge_offset) +{ + u32 num_of_succ_byte_compare, word_in_pattern; + u32 word_offset, i, num_of_word_mult; + u32 read_pattern[TEST_PATTERN_LENGTH * 2]; + struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); + u32 pattern_test_pattern_table[8]; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* 3 below for INTERFACE_BUS_MASK_16BIT */ + num_of_word_mult = (tm->bus_act_mask == 3) ? 1 : 2; + + for (i = 0; i < 8; i++) { + pattern_test_pattern_table[i] = + pattern_table_get_word(dev_num, PATTERN_TEST, (u8)i); + } + + /* External write, read and compare */ + CHECK_STATUS(ddr3_tip_load_pattern_to_mem(dev_num, PATTERN_TEST)); + + CHECK_STATUS(ddr3_tip_reset_fifo_ptr(dev_num)); + + CHECK_STATUS(ddr3_tip_ext_read + (dev_num, if_id, + ((pattern_table[PATTERN_TEST].start_addr << 3) + + ((SDRAM_CS_SIZE + 1) * effective_cs)), 1, read_pattern)); + + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("XSB-compt CS#%d: IF %d bus_id %d 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + effective_cs, if_id, bus_id, + read_pattern[0], read_pattern[1], + read_pattern[2], read_pattern[3], + read_pattern[4], read_pattern[5], + read_pattern[6], read_pattern[7])); + + /* compare byte per pup */ + num_of_succ_byte_compare = 0; + for (word_in_pattern = start_xsb_offset; + word_in_pattern < (TEST_PATTERN_LENGTH * num_of_word_mult); + word_in_pattern++) { + word_offset = word_in_pattern; + if ((word_offset > (TEST_PATTERN_LENGTH * 2 - 1))) + continue; + + if ((read_pattern[word_in_pattern] & pup_mask_table[bus_id]) == + (pattern_test_pattern_table[word_offset] & + pup_mask_table[bus_id])) + num_of_succ_byte_compare++; + } + + if ((TEST_PATTERN_LENGTH * num_of_word_mult - start_xsb_offset) == + num_of_succ_byte_compare) { + wr_supp_res[if_id][bus_id].stage = edge_offset; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("supplementary: shift to %d for if %d pup %d success\n", + edge_offset, if_id, bus_id)); + wr_supp_res[if_id][bus_id].is_pup_fail = 0; + + return MV_OK; + } else { + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("XSB-compt CS#%d: IF %d bus_id %d num_of_succ_byte_compare %d - Fail!\n", + effective_cs, if_id, bus_id, num_of_succ_byte_compare)); + + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("XSB-compt: expected 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + pattern_test_pattern_table[0], + pattern_test_pattern_table[1], + pattern_test_pattern_table[2], + pattern_test_pattern_table[3], + pattern_test_pattern_table[4], + pattern_test_pattern_table[5], + pattern_test_pattern_table[6], + pattern_test_pattern_table[7])); + DEBUG_LEVELING( + DEBUG_LEVEL_TRACE, + ("XSB-compt: recieved 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", + read_pattern[0], read_pattern[1], + read_pattern[2], read_pattern[3], + read_pattern[4], read_pattern[5], + read_pattern[6], read_pattern[7])); + + return MV_FAIL; + } +} + +/* + * Dynamic write leveling sequence + */ +static int ddr3_tip_dynamic_write_leveling_seq(u32 dev_num) +{ + u32 bus_id, dq_id; + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_SW_2_REG, 0x1, 0x5)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_WL_REG, 0x50, 0xff)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_WL_REG, 0x5c, 0xff)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + GENERAL_TRAINING_OPCODE_REG, 0x381b82, 0x3c3faf)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + OPCODE_REG0_REG(1), (0x3 << 25), (0x3ffff << 9))); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + OPCODE_REG1_REG(1), 0x80, 0xffff)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + WL_DONE_CNTR_REF_REG, 0x14, 0xff)); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + TRAINING_WL_REG, 0xff5c, 0xffff)); + + /* mask PBS */ + for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_dq_reg_map[dq_id], 0x1 << 24, + 0x1 << 24)); + } + + /* Mask all results */ + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_pup_reg_map[bus_id], 0x1 << 24, + 0x1 << 24)); + } + + /* Unmask only wanted */ + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_pup_reg_map[bus_id], 0, 0x1 << 24)); + } + + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + WL_DQS_PATTERN_REG, 0x1, 0x1)); + + return MV_OK; +} + +/* + * Dynamic read leveling sequence + */ +static int ddr3_tip_dynamic_read_leveling_seq(u32 dev_num) +{ + u32 bus_id, dq_id; + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* mask PBS */ + for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_dq_reg_map[dq_id], 0x1 << 24, + 0x1 << 24)); + } + + /* Mask all results */ + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_pup_reg_map[bus_id], 0x1 << 24, + 0x1 << 24)); + } + + /* Unmask only wanted */ + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_pup_reg_map[bus_id], 0, 0x1 << 24)); + } + + return MV_OK; +} + +/* + * Dynamic read leveling sequence + */ +static int ddr3_tip_dynamic_per_bit_read_leveling_seq(u32 dev_num) +{ + u32 bus_id, dq_id; + u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map(); + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* mask PBS */ + for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_dq_reg_map[dq_id], 0x1 << 24, + 0x1 << 24)); + } + + /* Mask all results */ + for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) { + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_pup_reg_map[bus_id], 0x1 << 24, + 0x1 << 24)); + } + + /* Unmask only wanted */ + for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, dq_id / 8); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + mask_results_dq_reg_map[dq_id], 0x0 << 24, + 0x1 << 24)); + } + + return MV_OK; +} + +/* + * Print write leveling supplementary results + */ +int ddr3_tip_print_wl_supp_result(u32 dev_num) +{ + u32 bus_id = 0, if_id = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + DEBUG_LEVELING(DEBUG_LEVEL_INFO, + ("I/F0 PUP0 Result[0 - success, 1-fail] ...\n")); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + DEBUG_LEVELING(DEBUG_LEVEL_INFO, + ("%d ,", wr_supp_res[if_id] + [bus_id].is_pup_fail)); + } + } + DEBUG_LEVELING( + DEBUG_LEVEL_INFO, + ("I/F0 PUP0 Stage[0-phase_shift, 1-clock_shift, 2-align_shift] ...\n")); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bus_id = 0; bus_id < octets_per_if_num; + bus_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id); + DEBUG_LEVELING(DEBUG_LEVEL_INFO, + ("%d ,", wr_supp_res[if_id] + [bus_id].stage)); + } + } + + return MV_OK; +} + +#define RD_FIFO_PTR_LOW_STAT_INDIR_ADDR 0x9a +#define RD_FIFO_PTR_HIGH_STAT_INDIR_ADDR 0x9b +/* position of falling dqs edge in fifo; walking 1 */ +#define RD_FIFO_DQS_FALL_EDGE_POS_0 0x1 +#define RD_FIFO_DQS_FALL_EDGE_POS_1 0x2 +#define RD_FIFO_DQS_FALL_EDGE_POS_2 0x4 +#define RD_FIFO_DQS_FALL_EDGE_POS_3 0x8 +#define RD_FIFO_DQS_FALL_EDGE_POS_4 0x10 /* lock */ +/* position of rising dqs edge in fifo; walking 0 */ +#define RD_FIFO_DQS_RISE_EDGE_POS_0 0x1fff +#define RD_FIFO_DQS_RISE_EDGE_POS_1 0x3ffe +#define RD_FIFO_DQS_RISE_EDGE_POS_2 0x3ffd +#define RD_FIFO_DQS_RISE_EDGE_POS_3 0x3ffb +#define RD_FIFO_DQS_RISE_EDGE_POS_4 0x3ff7 /* lock */ +#define TEST_ADDR 0x8 +#define TAPS_PER_UI 32 +#define UI_PER_RD_SAMPLE 4 +#define TAPS_PER_RD_SAMPLE ((UI_PER_RD_SAMPLE) * (TAPS_PER_UI)) +#define MAX_RD_SAMPLES 32 +#define MAX_RL_VALUE ((MAX_RD_SAMPLES) * (TAPS_PER_RD_SAMPLE)) +#define RD_FIFO_DLY 8 +#define STEP_SIZE 64 +#define RL_JITTER_WIDTH_LMT 20 +#define ADLL_TAPS_IN_CYCLE 64 + +enum rl_dqs_burst_state { + RL_AHEAD = 0, + RL_INSIDE, + RL_BEHIND +}; + + +int mv_ddr_rl_dqs_burst(u32 dev_num, u32 if_id, u32 freq) +{ + enum rl_dqs_burst_state rl_state[MAX_CS_NUM][MAX_BUS_NUM][MAX_INTERFACE_NUM] = { { {0} } }; + enum hws_ddr_phy subphy_type = DDR_PHY_DATA; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + int cl_val = tm->interface_params[0].cas_l; + int rl_adll_val, rl_phase_val, sdr_cycle_incr, rd_sample, rd_ready; + int final_rd_sample, final_rd_ready; + int i, subphy_id, step; + int pass_lock_num = 0; + int init_pass_lock_num; + int phase_delta; + int min_phase, max_phase; + unsigned int max_cs = mv_ddr_cs_num_get(); + u32 rl_values[MAX_CS_NUM][MAX_BUS_NUM][MAX_INTERFACE_NUM] = { { {0} } }; + u32 rl_min_values[MAX_CS_NUM][MAX_BUS_NUM][MAX_INTERFACE_NUM] = { { {0} } }; + u32 rl_max_values[MAX_CS_NUM][MAX_BUS_NUM][MAX_INTERFACE_NUM] = { { {0} } }; + u32 rl_val, rl_min_val[MAX_CS_NUM], rl_max_val[MAX_CS_NUM]; + u32 reg_val_low, reg_val_high; + u32 reg_val, reg_mask; + uintptr_t test_addr = TEST_ADDR; + + + /* initialization */ + if (mv_ddr_is_ecc_ena()) { + ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, TRAINING_SW_2_REG, + ®_val, MASK_ALL_BITS); + reg_mask = (TRAINING_ECC_MUX_MASK << TRAINING_ECC_MUX_OFFS) | + (TRAINING_SW_OVRD_MASK << TRAINING_SW_OVRD_OFFS); + reg_val &= ~reg_mask; + reg_val |= (TRAINING_ECC_MUX_DIS << TRAINING_ECC_MUX_OFFS) | + (TRAINING_SW_OVRD_ENA << TRAINING_SW_OVRD_OFFS); + ddr3_tip_if_write(0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, TRAINING_SW_2_REG, + reg_val, MASK_ALL_BITS); + ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, TRAINING_REG, + ®_val, MASK_ALL_BITS); + reg_mask = (TRN_START_MASK << TRN_START_OFFS); + reg_val &= ~reg_mask; + reg_val |= TRN_START_ENA << TRN_START_OFFS; + ddr3_tip_if_write(0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, TRAINING_REG, + reg_val, MASK_ALL_BITS); + } + + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) + for (subphy_id = 0; subphy_id < MAX_BUS_NUM; subphy_id++) + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) + if (IS_BUS_ACTIVE(tm->bus_act_mask, subphy_id) == 0) + pass_lock_num++; /* increment on inactive subphys */ + + init_pass_lock_num = pass_lock_num / max_cs; + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + training_result[training_stage][if_id] = TEST_SUCCESS; + } + } + + /* search for dqs edges per subphy */ + if_id = 0; + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + + pass_lock_num = init_pass_lock_num; + ddr3_tip_if_write(0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, ODPG_DATA_CTRL_REG, + effective_cs << ODPG_DATA_CS_OFFS, + ODPG_DATA_CS_MASK << ODPG_DATA_CS_OFFS); + rl_min_val[effective_cs] = MAX_RL_VALUE; + rl_max_val[effective_cs] = 0; + step = STEP_SIZE; + for (i = 0; i < MAX_RL_VALUE; i += step) { + rl_val = 0; + sdr_cycle_incr = i / TAPS_PER_RD_SAMPLE; /* sdr cycle increment */ + rd_sample = cl_val + 2 * sdr_cycle_incr; + /* fifo out to in delay in search is constant */ + rd_ready = rd_sample + RD_FIFO_DLY; + + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, RD_DATA_SMPL_DLYS_REG, + rd_sample << RD_SMPL_DLY_CS_OFFS(effective_cs), + RD_SMPL_DLY_CS_MASK << RD_SMPL_DLY_CS_OFFS(effective_cs)); + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, RD_DATA_RDY_DLYS_REG, + rd_ready << RD_RDY_DLY_CS_OFFS(effective_cs), + RD_RDY_DLY_CS_MASK << RD_RDY_DLY_CS_OFFS(effective_cs)); + + /* one sdr (single data rate) cycle incremented on every four phases of ddr clock */ + sdr_cycle_incr = i % TAPS_PER_RD_SAMPLE; + rl_adll_val = sdr_cycle_incr % MAX_RD_SAMPLES; + rl_phase_val = sdr_cycle_incr / MAX_RD_SAMPLES; + rl_val = ((rl_adll_val & RL_REF_DLY_MASK) << RL_REF_DLY_OFFS) | + ((rl_phase_val & RL_PH_SEL_MASK) << RL_PH_SEL_OFFS); + + /* write to all subphys (even to not connected or locked) */ + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST, + 0, DDR_PHY_DATA, RL_PHY_REG(effective_cs), rl_val); + + /* reset read fifo assertion */ + ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, if_id, SDRAM_CFG_REG, + DATA_PUP_RD_RESET_ENA << DATA_PUP_RD_RESET_OFFS, + DATA_PUP_RD_RESET_MASK << DATA_PUP_RD_RESET_OFFS); + + /* reset read fifo deassertion */ + ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, if_id, SDRAM_CFG_REG, + DATA_PUP_RD_RESET_DIS << DATA_PUP_RD_RESET_OFFS, + DATA_PUP_RD_RESET_MASK << DATA_PUP_RD_RESET_OFFS); + + /* perform one read burst */ + if (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask)) + readq(test_addr); + else + readl(test_addr); + + /* progress read ptr; decide on rl state per byte */ + for (subphy_id = 0; subphy_id < MAX_BUS_NUM; subphy_id++) { + if (rl_state[effective_cs][subphy_id][if_id] == RL_BEHIND) + continue; /* skip locked subphys */ + ddr3_tip_bus_read(dev_num, if_id, ACCESS_TYPE_UNICAST, subphy_id, DDR_PHY_DATA, + RD_FIFO_PTR_LOW_STAT_INDIR_ADDR, ®_val_low); + ddr3_tip_bus_read(dev_num, if_id, ACCESS_TYPE_UNICAST, subphy_id, DDR_PHY_DATA, + RD_FIFO_PTR_HIGH_STAT_INDIR_ADDR, ®_val_high); + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("%s: cs %d, step %d, subphy %d, state %d, low 0x%04x, high 0x%04x; move to ", + __func__, effective_cs, i, subphy_id, + rl_state[effective_cs][subphy_id][if_id], + reg_val_low, reg_val_high)); + + switch (rl_state[effective_cs][subphy_id][if_id]) { + case RL_AHEAD: + /* improve search resolution getting closer to the window */ + if (reg_val_low == RD_FIFO_DQS_FALL_EDGE_POS_4 && + reg_val_high == RD_FIFO_DQS_RISE_EDGE_POS_4) { + rl_state[effective_cs][subphy_id][if_id] = RL_INSIDE; + rl_values[effective_cs][subphy_id][if_id] = i; + rl_min_values[effective_cs][subphy_id][if_id] = i; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("new state %d\n", + rl_state[effective_cs][subphy_id][if_id])); + } else if (reg_val_low == RD_FIFO_DQS_FALL_EDGE_POS_3 && + reg_val_high == RD_FIFO_DQS_RISE_EDGE_POS_3) { + step = (step < 2) ? step : 2; + } else if (reg_val_low == RD_FIFO_DQS_FALL_EDGE_POS_2 && + reg_val_high == RD_FIFO_DQS_RISE_EDGE_POS_2) { + step = (step < 16) ? step : 16; + } else if (reg_val_low == RD_FIFO_DQS_FALL_EDGE_POS_1 && + reg_val_high == RD_FIFO_DQS_RISE_EDGE_POS_1) { + step = (step < 32) ? step : 32; + } else if (reg_val_low == RD_FIFO_DQS_FALL_EDGE_POS_0 && + reg_val_high == RD_FIFO_DQS_RISE_EDGE_POS_0) { + step = (step < 64) ? step : 64; + } else { + /* otherwise, step is unchanged */ + } + break; + case RL_INSIDE: + if (reg_val_low == RD_FIFO_DQS_FALL_EDGE_POS_4 && + reg_val_high == RD_FIFO_DQS_RISE_EDGE_POS_4) { + rl_max_values[effective_cs][subphy_id][if_id] = i; + if ((rl_max_values[effective_cs][subphy_id][if_id] - + rl_min_values[effective_cs][subphy_id][if_id]) > + ADLL_TAPS_IN_CYCLE) { + rl_state[effective_cs][subphy_id][if_id] = RL_BEHIND; + rl_values[effective_cs][subphy_id][if_id] = + (i + rl_values[effective_cs][subphy_id][if_id]) / 2; + pass_lock_num++; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("new lock %d\n", pass_lock_num)); + if (rl_min_val[effective_cs] > + rl_values[effective_cs][subphy_id][if_id]) + rl_min_val[effective_cs] = + rl_values[effective_cs][subphy_id][if_id]; + if (rl_max_val[effective_cs] < + rl_values[effective_cs][subphy_id][if_id]) + rl_max_val[effective_cs] = + rl_values[effective_cs][subphy_id][if_id]; + step = 2; + } + } + if (reg_val_low != RD_FIFO_DQS_FALL_EDGE_POS_4 || + reg_val_high != RD_FIFO_DQS_RISE_EDGE_POS_4) { + if ((i - rl_values[effective_cs][subphy_id][if_id]) < + RL_JITTER_WIDTH_LMT) { + /* inside the jitter; not valid segment */ + rl_state[effective_cs][subphy_id][if_id] = RL_AHEAD; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("new state %d; jitter on mask\n", + rl_state[effective_cs][subphy_id][if_id])); + } else { /* finished valid segment */ + rl_state[effective_cs][subphy_id][if_id] = RL_BEHIND; + rl_values[effective_cs][subphy_id][if_id] = + (i + rl_values[effective_cs][subphy_id][if_id]) / 2; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("new state %d, solution %d\n", + rl_state[effective_cs][subphy_id][if_id], + rl_values[effective_cs][subphy_id][if_id])); + pass_lock_num++; + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, + ("new lock %d\n", pass_lock_num)); + if (rl_min_val[effective_cs] > + rl_values[effective_cs][subphy_id][if_id]) + rl_min_val[effective_cs] = + rl_values[effective_cs][subphy_id][if_id]; + if (rl_max_val[effective_cs] < + rl_values[effective_cs][subphy_id][if_id]) + rl_max_val[effective_cs] = + rl_values[effective_cs][subphy_id][if_id]; + step = 2; + } + } + break; + case RL_BEHIND: /* do nothing */ + break; + } + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, ("\n")); + } + DEBUG_LEVELING(DEBUG_LEVEL_TRACE, ("pass_lock_num %d\n", pass_lock_num)); + /* exit condition */ + if (pass_lock_num == MAX_BUS_NUM) + break; + } /* for-loop on i */ + + if (pass_lock_num != MAX_BUS_NUM) { + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("%s: cs %d, pass_lock_num %d, max_bus_num %d, init_pass_lock_num %d\n", + __func__, effective_cs, pass_lock_num, MAX_BUS_NUM, init_pass_lock_num)); + for (subphy_id = 0; subphy_id < MAX_BUS_NUM; subphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy_id); + DEBUG_LEVELING(DEBUG_LEVEL_ERROR, + ("%s: subphy %d %s\n", + __func__, subphy_id, + (rl_state[effective_cs][subphy_id][if_id] == RL_BEHIND) ? + "locked" : "not locked")); + } + } + } /* for-loop on effective_cs */ + + /* post-processing read leveling results */ + if_id = 0; + for (effective_cs = 0; effective_cs < max_cs; effective_cs++) { + phase_delta = 0; + i = rl_min_val[effective_cs]; + sdr_cycle_incr = i / TAPS_PER_RD_SAMPLE; /* sdr cycle increment */ + rd_sample = cl_val + 2 * sdr_cycle_incr; + rd_ready = rd_sample + RD_FIFO_DLY; + min_phase = (rl_min_val[effective_cs] - (sdr_cycle_incr * TAPS_PER_RD_SAMPLE)) % MAX_RD_SAMPLES; + max_phase = (rl_max_val[effective_cs] - (sdr_cycle_incr * TAPS_PER_RD_SAMPLE)) % MAX_RD_SAMPLES; + final_rd_sample = rd_sample; + final_rd_ready = rd_ready; + + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, RD_DATA_SMPL_DLYS_REG, + rd_sample << RD_SMPL_DLY_CS_OFFS(effective_cs), + RD_SMPL_DLY_CS_MASK << RD_SMPL_DLY_CS_OFFS(effective_cs)); + ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, RD_DATA_RDY_DLYS_REG, + rd_ready << RD_RDY_DLY_CS_OFFS(effective_cs), + RD_RDY_DLY_CS_MASK << RD_RDY_DLY_CS_OFFS(effective_cs)); + DEBUG_LEVELING(DEBUG_LEVEL_INFO, + ("%s: cs %d, min phase %d, max phase %d, read sample %d\n", + __func__, effective_cs, min_phase, max_phase, rd_sample)); + + for (subphy_id = 0; subphy_id < MAX_BUS_NUM; subphy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy_id); + /* reduce sdr cycle per cs; extract rl adll and phase values */ + i = rl_values[effective_cs][subphy_id][if_id] - (sdr_cycle_incr * TAPS_PER_RD_SAMPLE); + rl_adll_val = i % MAX_RD_SAMPLES; + rl_phase_val = i / MAX_RD_SAMPLES; + rl_phase_val -= phase_delta; + DEBUG_LEVELING(DEBUG_LEVEL_INFO, + ("%s: final results: cs %d, subphy %d, read sample %d read ready %d, rl_phase_val %d, rl_adll_val %d\n", + __func__, effective_cs, subphy_id, final_rd_sample, + final_rd_ready, rl_phase_val, rl_adll_val)); + + rl_val = ((rl_adll_val & RL_REF_DLY_MASK) << RL_REF_DLY_OFFS) | + ((rl_phase_val & RL_PH_SEL_MASK) << RL_PH_SEL_OFFS); + ddr3_tip_bus_write(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, ACCESS_TYPE_UNICAST, + subphy_id, subphy_type, RL_PHY_REG(effective_cs), rl_val); + } + } /* for-loop on effective cs */ + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (odt_config != 0) + CHECK_STATUS(ddr3_tip_write_additional_odt_setting(dev_num, if_id)); + } + + + /* reset read fifo assertion */ + ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, if_id, SDRAM_CFG_REG, + DATA_PUP_RD_RESET_ENA << DATA_PUP_RD_RESET_OFFS, + DATA_PUP_RD_RESET_MASK << DATA_PUP_RD_RESET_OFFS); + + /* reset read fifo deassertion */ + ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, if_id, SDRAM_CFG_REG, + DATA_PUP_RD_RESET_DIS << DATA_PUP_RD_RESET_OFFS, + DATA_PUP_RD_RESET_MASK << DATA_PUP_RD_RESET_OFFS); + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_leveling.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_leveling.h new file mode 100644 index 000000000..6719fb83f --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_leveling.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR3_TRAINING_LEVELING_H_ +#define _DDR3_TRAINING_LEVELING_H_ + +#define MAX_DQ_READ_LEVELING_DELAY 15 + +int ddr3_tip_print_wl_supp_result(u32 dev_num); +int ddr3_tip_calc_cs_mask(u32 dev_num, u32 if_id, u32 effective_cs, + u32 *cs_mask); + +#endif /* _DDR3_TRAINING_LEVELING_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_pbs.c b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_pbs.c new file mode 100644 index 000000000..b7dfebd93 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr3_training_pbs.c @@ -0,0 +1,1012 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_training_db.h" +#include "mv_ddr_common.h" +#include "mv_ddr_regs.h" + +#define TYPICAL_PBS_VALUE 12 + +u32 nominal_adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]; +enum hws_training_ip_stat train_status[MAX_INTERFACE_NUM]; +u8 result_mat[MAX_INTERFACE_NUM][MAX_BUS_NUM][BUS_WIDTH_IN_BITS]; +u8 result_mat_rx_dqs[MAX_INTERFACE_NUM][MAX_BUS_NUM][MAX_CS_NUM]; +/* 4-EEWA, 3-EWA, 2-SWA, 1-Fail, 0-Pass */ +u8 result_all_bit[MAX_BUS_NUM * BUS_WIDTH_IN_BITS * MAX_INTERFACE_NUM]; +u8 max_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 min_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 max_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 min_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u32 pbsdelay_per_pup[NUM_OF_PBS_MODES][MAX_INTERFACE_NUM][MAX_BUS_NUM][MAX_CS_NUM]; +u8 adll_shift_lock[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +u8 adll_shift_val[MAX_INTERFACE_NUM][MAX_BUS_NUM]; +enum hws_pattern pbs_pattern = PATTERN_VREF; +static u8 pup_state[MAX_INTERFACE_NUM][MAX_BUS_NUM]; + +/* + * Name: ddr3_tip_pbs + * Desc: PBS + * Args: TBD + * Notes: + * Returns: OK if success, other error code if fail. + */ +int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode) +{ + u32 res0[MAX_INTERFACE_NUM]; + int adll_tap = MEGA / mv_ddr_freq_get(medium_freq) / 64; + int pad_num = 0; + enum hws_search_dir search_dir = + (pbs_mode == PBS_RX_MODE) ? HWS_HIGH2LOW : HWS_LOW2HIGH; + enum hws_dir dir = (pbs_mode == PBS_RX_MODE) ? OPER_READ : OPER_WRITE; + int iterations = (pbs_mode == PBS_RX_MODE) ? 31 : 63; + u32 res_valid_mask = (pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f; + int init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations; + enum hws_edge_compare search_edge = EDGE_FP; + u32 pup = 0, bit = 0, if_id = 0, all_lock = 0, cs_num = 0; + u32 reg_addr = 0; + u32 validation_val = 0; + u32 cs_enable_reg_val[MAX_INTERFACE_NUM]; + u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg(); + u8 temp = 0; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* save current cs enable reg val */ + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + /* save current cs enable reg val */ + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS)); + + /* enable single cs */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3))); + } + + reg_addr = (pbs_mode == PBS_RX_MODE) ? + CRX_PHY_REG(effective_cs) : + CTX_PHY_REG(effective_cs); + ddr3_tip_read_adll_value(dev_num, nominal_adll, reg_addr, MASK_ALL_BITS); + + /* stage 1 shift ADLL */ + ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, RESULT_PER_BIT, + HWS_CONTROL_ELEMENT_ADLL, search_dir, dir, + tm->if_act_mask, init_val, iterations, + pbs_pattern, search_edge, CS_SINGLE, cs_num, + train_status); + validation_val = (pbs_mode == PBS_RX_MODE) ? 0x1f : 0; + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + min_adll_per_pup[if_id][pup] = + (pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f; + pup_state[if_id][pup] = 0x3; + adll_shift_lock[if_id][pup] = 1; + max_adll_per_pup[if_id][pup] = 0x0; + } + } + + /* EBA */ + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + mask_results_dq_reg_map[ + bit + pup * BUS_WIDTH_IN_BITS], + res0, MASK_ALL_BITS)); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("FP I/F %d, bit:%d, pup:%d res0 0x%x\n", + if_id, bit, pup, + res0[if_id])); + if (pup_state[if_id][pup] != 3) + continue; + /* if not EBA state than move to next pup */ + + if ((res0[if_id] & 0x2000000) == 0) { + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("-- Fail Training IP\n")); + /* training machine failed */ + pup_state[if_id][pup] = 1; + adll_shift_lock[if_id][pup] = 0; + continue; + } + + else if ((res0[if_id] & res_valid_mask) == + validation_val) { + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("-- FAIL EBA %d %d %d %d\n", + if_id, bit, pup, + res0[if_id])); + pup_state[if_id][pup] = 4; + /* this pup move to EEBA */ + adll_shift_lock[if_id][pup] = 0; + continue; + } else { + /* + * The search ended in Pass we need + * Fail + */ + res0[if_id] = + (pbs_mode == PBS_RX_MODE) ? + ((res0[if_id] & + res_valid_mask) + 1) : + ((res0[if_id] & + res_valid_mask) - 1); + max_adll_per_pup[if_id][pup] = + (max_adll_per_pup[if_id][pup] < + res0[if_id]) ? + (u8)res0[if_id] : + max_adll_per_pup[if_id][pup]; + min_adll_per_pup[if_id][pup] = + (res0[if_id] > + min_adll_per_pup[if_id][pup]) ? + min_adll_per_pup[if_id][pup] : + (u8) + res0[if_id]; + /* + * vs the Rx we are searching for the + * smallest value of DQ shift so all + * Bus would fail + */ + adll_shift_val[if_id][pup] = + (pbs_mode == PBS_RX_MODE) ? + max_adll_per_pup[if_id][pup] : + min_adll_per_pup[if_id][pup]; + } + } + } + } + + /* EEBA */ + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + if (pup_state[if_id][pup] != 4) + continue; + /* + * if pup state different from EEBA than move to + * next pup + */ + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x54 + effective_cs * 0x10) : + (0x14 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA, + reg_addr, 0x1f)); + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x55 + effective_cs * 0x10) : + (0x15 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA, + reg_addr, 0x1f)); + /* initialize the Edge2 Max. */ + adll_shift_val[if_id][pup] = 0; + min_adll_per_pup[if_id][pup] = + (pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f; + max_adll_per_pup[if_id][pup] = 0x0; + + ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, RESULT_PER_BIT, + HWS_CONTROL_ELEMENT_ADLL, + search_dir, dir, + tm->if_act_mask, init_val, + iterations, pbs_pattern, + search_edge, CS_SINGLE, cs_num, + train_status); + DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + ("ADLL shift results:\n")); + + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + mask_results_dq_reg_map[ + bit + pup * + BUS_WIDTH_IN_BITS], + res0, MASK_ALL_BITS)); + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("FP I/F %d, bit:%d, pup:%d res0 0x%x\n", + if_id, bit, pup, + res0[if_id])); + + if ((res0[if_id] & 0x2000000) == 0) { + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + (" -- EEBA Fail\n")); + bit = BUS_WIDTH_IN_BITS; + /* exit bit loop */ + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("-- EEBA Fail Training IP\n")); + /* + * training machine failed but pass + * before in the EBA so maybe the DQS + * shift change env. + */ + pup_state[if_id][pup] = 2; + adll_shift_lock[if_id][pup] = 0; + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x54 + effective_cs * 0x10) : + (0x14 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + 0x0)); + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x55 + effective_cs * 0x10) : + (0x15 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + 0x0)); + continue; + } else if ((res0[if_id] & res_valid_mask) == + validation_val) { + /* exit bit loop */ + bit = BUS_WIDTH_IN_BITS; + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("-- FAIL EEBA\n")); + /* this pup move to SBA */ + pup_state[if_id][pup] = 2; + adll_shift_lock[if_id][pup] = 0; + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x54 + effective_cs * 0x10) : + (0x14 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + 0x0)); + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x55 + effective_cs * 0x10) : + (0x15 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, + ACCESS_TYPE_UNICAST, + if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + 0x0)); + continue; + } else { + adll_shift_lock[if_id][pup] = 1; + /* + * The search ended in Pass we need + * Fail + */ + res0[if_id] = + (pbs_mode == PBS_RX_MODE) ? + ((res0[if_id] & + res_valid_mask) + 1) : + ((res0[if_id] & + res_valid_mask) - 1); + max_adll_per_pup[if_id][pup] = + (max_adll_per_pup[if_id][pup] < + res0[if_id]) ? + (u8)res0[if_id] : + max_adll_per_pup[if_id][pup]; + min_adll_per_pup[if_id][pup] = + (res0[if_id] > + min_adll_per_pup[if_id][pup]) ? + min_adll_per_pup[if_id][pup] : + (u8)res0[if_id]; + /* + * vs the Rx we are searching for the + * smallest value of DQ shift so all Bus + * would fail + */ + adll_shift_val[if_id][pup] = + (pbs_mode == PBS_RX_MODE) ? + max_adll_per_pup[if_id][pup] : + min_adll_per_pup[if_id][pup]; + } + } + } + } + + /* Print Stage result */ + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("FP I/F %d, ADLL Shift for EBA: pup[%d] Lock status = %d Lock Val = %d,%d\n", + if_id, pup, + adll_shift_lock[if_id][pup], + max_adll_per_pup[if_id][pup], + min_adll_per_pup[if_id][pup])); + } + } + DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + ("Update ADLL Shift of all pups:\n")); + + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (adll_shift_lock[if_id][pup] != 1) + continue; + /* if pup not locked continue to next pup */ + + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x3 + effective_cs * 4) : + (0x1 + effective_cs * 4); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA, + reg_addr, adll_shift_val[if_id][pup])); + DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE, + ("FP I/F %d, Pup[%d] = %d\n", if_id, + pup, adll_shift_val[if_id][pup])); + } + } + + /* PBS EEBA&EBA */ + /* Start the Per Bit Skew search */ + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + max_pbs_per_pup[if_id][pup] = 0x0; + min_pbs_per_pup[if_id][pup] = 0x1f; + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + /* reset result for PBS */ + result_all_bit[bit + pup * BUS_WIDTH_IN_BITS + + if_id * MAX_BUS_NUM * + BUS_WIDTH_IN_BITS] = 0; + } + } + } + + iterations = 31; + search_dir = HWS_LOW2HIGH; + /* !!!!! ran sh (search_dir == HWS_LOW2HIGH)?0:iterations; */ + init_val = 0; + + ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + RESULT_PER_BIT, HWS_CONTROL_ELEMENT_DQ_SKEW, + search_dir, dir, tm->if_act_mask, init_val, + iterations, pbs_pattern, search_edge, + CS_SINGLE, cs_num, train_status); + + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (adll_shift_lock[if_id][pup] != 1) { + /* if pup not lock continue to next pup */ + continue; + } + + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + CHECK_STATUS(ddr3_tip_if_read + (dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + mask_results_dq_reg_map[ + bit + + pup * BUS_WIDTH_IN_BITS], + res0, MASK_ALL_BITS)); + DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + ("Per Bit Skew search, FP I/F %d, bit:%d, pup:%d res0 0x%x\n", + if_id, bit, pup, + res0[if_id])); + if ((res0[if_id] & 0x2000000) == 0) { + DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + ("--EBA PBS Fail - Training IP machine\n")); + /* exit the bit loop */ + bit = BUS_WIDTH_IN_BITS; + /* + * ADLL is no long in lock need new + * search + */ + adll_shift_lock[if_id][pup] = 0; + /* Move to SBA */ + pup_state[if_id][pup] = 2; + max_pbs_per_pup[if_id][pup] = 0x0; + min_pbs_per_pup[if_id][pup] = 0x1f; + continue; + } else { + temp = (u8)(res0[if_id] & + res_valid_mask); + max_pbs_per_pup[if_id][pup] = + (temp > + max_pbs_per_pup[if_id][pup]) ? + temp : + max_pbs_per_pup[if_id][pup]; + min_pbs_per_pup[if_id][pup] = + (temp < + min_pbs_per_pup[if_id][pup]) ? + temp : + min_pbs_per_pup[if_id][pup]; + result_all_bit[bit + + pup * BUS_WIDTH_IN_BITS + + if_id * MAX_BUS_NUM * + BUS_WIDTH_IN_BITS] = + temp; + } + } + } + } + + /* Check all Pup lock */ + all_lock = 1; + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + all_lock = all_lock * adll_shift_lock[if_id][pup]; + } + } + + /* Only if not all Pups Lock */ + if (all_lock == 0) { + DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + ("##########ADLL shift for SBA###########\n")); + + /* ADLL shift for SBA */ + search_dir = (pbs_mode == PBS_RX_MODE) ? HWS_LOW2HIGH : + HWS_HIGH2LOW; + init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations; + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + if (adll_shift_lock[if_id][pup] == 1) { + /*if pup lock continue to next pup */ + continue; + } + /*init the var altogth init before */ + adll_shift_lock[if_id][pup] = 0; + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x54 + effective_cs * 0x10) : + (0x14 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, 0)); + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x55 + effective_cs * 0x10) : + (0x15 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, 0)); + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x5f + effective_cs * 0x10) : + (0x1f + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, 0)); + /* initilaze the Edge2 Max. */ + adll_shift_val[if_id][pup] = 0; + min_adll_per_pup[if_id][pup] = 0x1f; + max_adll_per_pup[if_id][pup] = 0x0; + + ddr3_tip_ip_training(dev_num, + ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + RESULT_PER_BIT, + HWS_CONTROL_ELEMENT_ADLL, + search_dir, dir, + tm->if_act_mask, + init_val, iterations, + pbs_pattern, + search_edge, CS_SINGLE, + cs_num, train_status); + + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + CHECK_STATUS(ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + mask_results_dq_reg_map + [bit + + pup * + BUS_WIDTH_IN_BITS], + res0, MASK_ALL_BITS)); + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_INFO, + ("FP I/F %d, bit:%d, pup:%d res0 0x%x\n", + if_id, bit, pup, res0[if_id])); + if ((res0[if_id] & 0x2000000) == 0) { + /* exit the bit loop */ + bit = BUS_WIDTH_IN_BITS; + /* Fail SBA --> Fail PBS */ + pup_state[if_id][pup] = 1; + DEBUG_PBS_ENGINE + (DEBUG_LEVEL_INFO, + (" SBA Fail\n")); + continue; + } else { + /* + * - increment to get all + * 8 bit lock. + */ + adll_shift_lock[if_id][pup]++; + /* + * The search ended in Pass + * we need Fail + */ + res0[if_id] = + (pbs_mode == PBS_RX_MODE) ? + ((res0[if_id] & res_valid_mask) + 1) : + ((res0[if_id] & res_valid_mask) - 1); + max_adll_per_pup[if_id][pup] = + (max_adll_per_pup[if_id] + [pup] < res0[if_id]) ? + (u8)res0[if_id] : + max_adll_per_pup[if_id][pup]; + min_adll_per_pup[if_id][pup] = + (res0[if_id] > + min_adll_per_pup[if_id] + [pup]) ? + min_adll_per_pup[if_id][pup] : + (u8)res0[if_id]; + /* + * vs the Rx we are searching for + * the smallest value of DQ shift + * so all Bus would fail + */ + adll_shift_val[if_id][pup] = + (pbs_mode == PBS_RX_MODE) ? + max_adll_per_pup[if_id][pup] : + min_adll_per_pup[if_id][pup]; + } + } + /* 1 is lock */ + adll_shift_lock[if_id][pup] = + (adll_shift_lock[if_id][pup] == 8) ? + 1 : 0; + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x3 + effective_cs * 4) : + (0x1 + effective_cs * 4); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + adll_shift_val[if_id][pup])); + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_INFO, + ("adll_shift_lock[%x][%x] = %x\n", + if_id, pup, + adll_shift_lock[if_id][pup])); + } + } + + /* End ADLL Shift for SBA */ + /* Start the Per Bit Skew search */ + /* The ADLL shift finished with a Pass */ + search_edge = (pbs_mode == PBS_RX_MODE) ? EDGE_PF : EDGE_FP; + search_dir = (pbs_mode == PBS_RX_MODE) ? + HWS_LOW2HIGH : HWS_HIGH2LOW; + iterations = 0x1f; + /* - The initial value is different in Rx and Tx mode */ + init_val = (pbs_mode == PBS_RX_MODE) ? 0 : iterations; + + ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, RESULT_PER_BIT, + HWS_CONTROL_ELEMENT_DQ_SKEW, + search_dir, dir, tm->if_act_mask, + init_val, iterations, pbs_pattern, + search_edge, CS_SINGLE, cs_num, + train_status); + + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + CHECK_STATUS(ddr3_tip_if_read + (dev_num, + ACCESS_TYPE_MULTICAST, + PARAM_NOT_CARE, + mask_results_dq_reg_map + [bit + + pup * + BUS_WIDTH_IN_BITS], + res0, MASK_ALL_BITS)); + if (pup_state[if_id][pup] != 2) { + /* + * if pup is not SBA continue + * to next pup + */ + bit = BUS_WIDTH_IN_BITS; + continue; + } + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_INFO, + ("Per Bit Skew search, PF I/F %d, bit:%d, pup:%d res0 0x%x\n", + if_id, bit, pup, res0[if_id])); + if ((res0[if_id] & 0x2000000) == 0) { + DEBUG_PBS_ENGINE + (DEBUG_LEVEL_INFO, + ("SBA Fail\n")); + + max_pbs_per_pup[if_id][pup] = + 0x1f; + result_all_bit[ + bit + pup * + BUS_WIDTH_IN_BITS + + if_id * MAX_BUS_NUM * + BUS_WIDTH_IN_BITS] = + 0x1f; + } else { + temp = (u8)(res0[if_id] & + res_valid_mask); + max_pbs_per_pup[if_id][pup] = + (temp > + max_pbs_per_pup[if_id] + [pup]) ? temp : + max_pbs_per_pup + [if_id][pup]; + min_pbs_per_pup[if_id][pup] = + (temp < + min_pbs_per_pup[if_id] + [pup]) ? temp : + min_pbs_per_pup + [if_id][pup]; + result_all_bit[ + bit + pup * + BUS_WIDTH_IN_BITS + + if_id * MAX_BUS_NUM * + BUS_WIDTH_IN_BITS] = + temp; + adll_shift_lock[if_id][pup] = 1; + } + } + } + } + + /* Check all Pup state */ + all_lock = 1; + for (pup = 0; pup < octets_per_if_num; pup++) { + /* + * DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + * ("pup_state[%d][%d] = %d\n",if_id,pup,pup_state + * [if_id][pup])); + */ + } + } + + /* END OF SBA */ + /* Norm */ + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; + if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + /* if pup not lock continue to next pup */ + if (adll_shift_lock[if_id][pup] != 1) { + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_ERROR, + ("PBS failed for IF #%d\n", + if_id)); + training_result[training_stage][if_id] + = TEST_FAILED; + + result_mat[if_id][pup][bit] = 0; + max_pbs_per_pup[if_id][pup] = 0; + min_pbs_per_pup[if_id][pup] = 0; + } else { + training_result[ + training_stage][if_id] = + (training_result[training_stage] + [if_id] == TEST_FAILED) ? + TEST_FAILED : TEST_SUCCESS; + result_mat[if_id][pup][bit] = + result_all_bit[ + bit + pup * + BUS_WIDTH_IN_BITS + + if_id * MAX_BUS_NUM * + BUS_WIDTH_IN_BITS] - + min_pbs_per_pup[if_id][pup]; + } + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_INFO, + ("The abs min_pbs[%d][%d] = %d\n", + if_id, pup, + min_pbs_per_pup[if_id][pup])); + } + } + } + + /* Clean all results */ + ddr3_tip_clean_pbs_result(dev_num, pbs_mode); + + /* DQ PBS register update with the final result */ + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_INFO, + ("Final Results: if_id %d, pup %d, Pup State: %d\n", + if_id, pup, pup_state[if_id][pup])); + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + if (dq_map_table == NULL) { + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_ERROR, + ("dq_map_table not initialized\n")); + return MV_FAIL; + } + pad_num = dq_map_table[ + bit + pup * BUS_WIDTH_IN_BITS + + if_id * BUS_WIDTH_IN_BITS * + MAX_BUS_NUM]; + DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO, + ("result_mat: %d ", + result_mat[if_id][pup] + [bit])); + reg_addr = (pbs_mode == PBS_RX_MODE) ? + PBS_RX_PHY_REG(effective_cs, 0) : + PBS_TX_PHY_REG(effective_cs, 0); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr + pad_num, + result_mat[if_id][pup][bit])); + } + + if (max_pbs_per_pup[if_id][pup] == min_pbs_per_pup[if_id][pup]) { + temp = TYPICAL_PBS_VALUE; + } else { + temp = ((max_adll_per_pup[if_id][pup] - + min_adll_per_pup[if_id][pup]) * + adll_tap / + (max_pbs_per_pup[if_id][pup] - + min_pbs_per_pup[if_id][pup])); + } + pbsdelay_per_pup[pbs_mode] + [if_id][pup][effective_cs] = temp; + + /* RX results ready, write RX also */ + if (pbs_mode == PBS_TX_MODE) { + /* Write TX results */ + reg_addr = (0x14 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + (max_pbs_per_pup[if_id][pup] - + min_pbs_per_pup[if_id][pup]) / + 2)); + reg_addr = (0x15 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + (max_pbs_per_pup[if_id][pup] - + min_pbs_per_pup[if_id][pup]) / + 2)); + + /* Write previously stored RX results */ + reg_addr = (0x54 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + result_mat_rx_dqs[if_id][pup] + [effective_cs])); + reg_addr = (0x55 + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr, + result_mat_rx_dqs[if_id][pup] + [effective_cs])); + } else { + /* + * RX results may affect RL results correctess, + * so just store the results that will written + * in TX stage + */ + result_mat_rx_dqs[if_id][pup][effective_cs] = + (max_pbs_per_pup[if_id][pup] - + min_pbs_per_pup[if_id][pup]) / 2; + } + DEBUG_PBS_ENGINE( + DEBUG_LEVEL_INFO, + (", PBS tap=%d [psec] ==> skew observed = %d\n", + temp, + ((max_pbs_per_pup[if_id][pup] - + min_pbs_per_pup[if_id][pup]) * + temp))); + } + } + + /* Write back to the phy the default values */ + reg_addr = (pbs_mode == PBS_RX_MODE) ? + CRX_PHY_REG(effective_cs) : + CTX_PHY_REG(effective_cs); + ddr3_tip_write_adll_value(dev_num, nominal_adll, reg_addr); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + reg_addr = (pbs_mode == PBS_RX_MODE) ? + (0x5a + effective_cs * 0x10) : + (0x1a + effective_cs * 0x10); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA, reg_addr, + 0)); + + /* restore cs enable value */ + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_UNICAST, if_id, + DUAL_DUNIT_CFG_REG, cs_enable_reg_val[if_id], + MASK_ALL_BITS)); + } + + /* exit test mode */ + CHECK_STATUS(ddr3_tip_if_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ODPG_WR_RD_MODE_ENA_REG, 0xffff, MASK_ALL_BITS)); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + /* + * no valid window found + * (no lock at EBA ADLL shift at EBS) + */ + if (pup_state[if_id][pup] == 1) + return MV_FAIL; + } + } + + return MV_OK; +} + +/* + * Name: ddr3_tip_pbs_rx. + * Desc: PBS TX + * Args: TBD + * Notes: + * Returns: OK if success, other error code if fail. + */ +int ddr3_tip_pbs_rx(u32 uidev_num) +{ + return ddr3_tip_pbs(uidev_num, PBS_RX_MODE); +} + +/* + * Name: ddr3_tip_pbs_tx. + * Desc: PBS TX + * Args: TBD + * Notes: + * Returns: OK if success, other error code if fail. + */ +int ddr3_tip_pbs_tx(u32 uidev_num) +{ + return ddr3_tip_pbs(uidev_num, PBS_TX_MODE); +} + +#ifdef DDR_VIEWER_TOOL +/* + * Print PBS Result + */ +int ddr3_tip_print_all_pbs_result(u32 dev_num) +{ + u32 curr_cs; + unsigned int max_cs = mv_ddr_cs_num_get(); + + for (curr_cs = 0; curr_cs < max_cs; curr_cs++) { + ddr3_tip_print_pbs_result(dev_num, curr_cs, PBS_RX_MODE); + ddr3_tip_print_pbs_result(dev_num, curr_cs, PBS_TX_MODE); + } + + return MV_OK; +} + +/* + * Print PBS Result + */ +int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode) +{ + u32 data_value = 0, bit = 0, if_id = 0, pup = 0; + u32 reg_addr = (pbs_mode == PBS_RX_MODE) ? + PBS_RX_PHY_REG(cs_num, 0) : + PBS_TX_PHY_REG(cs_num , 0); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + printf("%s,CS%d,PBS,ADLLRATIO,,,", + (pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx", cs_num); + + for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; pup < octets_per_if_num; pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + printf("%d,", + pbsdelay_per_pup[pbs_mode][if_id][pup][cs_num]); + } + } + printf("CS%d, %s ,PBS\n", cs_num, + (pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx"); + + for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) { + printf("%s, DQ", (pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx"); + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + printf("%d ,PBS,,, ", bit); + for (pup = 0; pup <= octets_per_if_num; + pup++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup); + CHECK_STATUS(ddr3_tip_bus_read + (dev_num, if_id, + ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr + bit, + &data_value)); + printf("%d , ", data_value); + } + } + printf("\n"); + } + printf("\n"); + + return MV_OK; +} +#endif /* DDR_VIEWER_TOOL */ + +/* + * Fixup PBS Result + */ +int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode) +{ + u32 if_id, pup, bit; + u32 reg_addr = (pbs_mode == PBS_RX_MODE) ? + PBS_RX_PHY_REG(effective_cs, 0) : + PBS_TX_PHY_REG(effective_cs, 0); + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + for (pup = 0; pup <= octets_per_if_num; pup++) { + for (bit = 0; bit <= BUS_WIDTH_IN_BITS + 3; bit++) { + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, ACCESS_TYPE_UNICAST, pup, + DDR_PHY_DATA, reg_addr + bit, 0)); + } + } + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h new file mode 100644 index 000000000..735731196 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr_ml_wrapper.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR_ML_WRAPPER_H +#define _DDR_ML_WRAPPER_H + +#include <common.h> +#include <i2c.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> + +#define INTER_REGS_BASE SOC_REGS_PHY_BASE + +/* + * MV_DEBUG_INIT need to be defines, otherwise the output of the + * DDR2 training code is not complete and misleading + */ +#define MV_DEBUG_INIT + +#ifdef MV_DEBUG_INIT +#define DEBUG_INIT_S(s) puts(s) +#define DEBUG_INIT_D(d, l) printf("%x", d) +#define DEBUG_INIT_D_10(d, l) printf("%d", d) +#else +#define DEBUG_INIT_S(s) +#define DEBUG_INIT_D(d, l) +#define DEBUG_INIT_D_10(d, l) +#endif + +#ifdef MV_DEBUG_INIT_FULL +#define DEBUG_INIT_FULL_S(s) puts(s) +#define DEBUG_INIT_FULL_D(d, l) printf("%x", d) +#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d) +#define DEBUG_WR_REG(reg, val) \ + { DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \ + DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); } +#define DEBUG_RD_REG(reg, val) \ + { DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \ + DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); } +#else +#define DEBUG_INIT_FULL_S(s) +#define DEBUG_INIT_FULL_D(d, l) +#define DEBUG_INIT_FULL_D_10(d, l) +#define DEBUG_WR_REG(reg, val) +#define DEBUG_RD_REG(reg, val) +#endif + +#define DEBUG_INIT_FULL_C(s, d, l) \ + { DEBUG_INIT_FULL_S(s); \ + DEBUG_INIT_FULL_D(d, l); \ + DEBUG_INIT_FULL_S("\n"); } +#define DEBUG_INIT_C(s, d, l) \ + { DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); } + +/* + * Debug (Enable/Disable modules) and Error report + */ + +#ifdef BASIC_DEBUG +#define MV_DEBUG_WL +#define MV_DEBUG_RL +#define MV_DEBUG_DQS_RESULTS +#endif + +#ifdef FULL_DEBUG +#define MV_DEBUG_WL +#define MV_DEBUG_RL +#define MV_DEBUG_DQS + +#define MV_DEBUG_PBS +#define MV_DEBUG_DFS +#define MV_DEBUG_MAIN_FULL +#define MV_DEBUG_DFS_FULL +#define MV_DEBUG_DQS_FULL +#define MV_DEBUG_RL_FULL +#define MV_DEBUG_WL_FULL +#endif + + +/* The following is a list of Marvell status */ +#define MV_ERROR (-1) +#define MV_OK (0x00) /* Operation succeeded */ +#define MV_FAIL (0x01) /* Operation failed */ +#define MV_BAD_VALUE (0x02) /* Illegal value (general) */ +#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */ +#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */ +#define MV_BAD_PTR (0x05) /* Illegal pointer value */ +#define MV_BAD_SIZE (0x06) /* Illegal size */ +#define MV_BAD_STATE (0x07) /* Illegal state of state machine */ +#define MV_SET_ERROR (0x08) /* Set operation failed */ +#define MV_GET_ERROR (0x09) /* Get operation failed */ +#define MV_CREATE_ERROR (0x0a) /* Fail while creating an item */ +#define MV_NOT_FOUND (0x0b) /* Item not found */ +#define MV_NO_MORE (0x0c) /* No more items found */ +#define MV_NO_SUCH (0x0d) /* No such item */ +#define MV_TIMEOUT (0x0e) /* Time Out */ +#define MV_NO_CHANGE (0x0f) /* Parameter(s) is already in this value */ +#define MV_NOT_SUPPORTED (0x10) /* This request is not support */ +#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/ +#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */ +#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */ +#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */ +#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */ +#define MV_INIT_ERROR (0x16) /* Error occurred while INIT process */ +#define MV_HW_ERROR (0x17) /* Hardware error */ +#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */ +#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */ +#define MV_NOT_READY (0x1a) /* The other side is not ready yet */ +#define MV_ALREADY_EXIST (0x1b) /* Tried to create existing item */ +#define MV_OUT_OF_CPU_MEM (0x1c) /* Cpu memory allocation failed. */ +#define MV_NOT_STARTED (0x1d) /* Not started yet */ +#define MV_BUSY (0x1e) /* Item is busy. */ +#define MV_TERMINATE (0x1f) /* Item terminates it's work. */ +#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */ +#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */ +#define MV_WRITE_PROTECT (0x22) /* Write protected */ +#define MV_INVALID (int)(-1) + +/* + * Accessor functions for the registers + */ +static inline void reg_write(u32 addr, u32 val) +{ + writel(val, INTER_REGS_BASE + addr); +} + +static inline u32 reg_read(u32 addr) +{ + return readl(INTER_REGS_BASE + addr); +} + +static inline void reg_bit_set(u32 addr, u32 mask) +{ + setbits_le32(INTER_REGS_BASE + addr, mask); +} + +static inline void reg_bit_clr(u32 addr, u32 mask) +{ + clrbits_le32(INTER_REGS_BASE + addr, mask); +} + +#endif /* _DDR_ML_WRAPPER_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr_topology_def.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr_topology_def.h new file mode 100644 index 000000000..7f2317edf --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr_topology_def.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR_TOPOLOGY_DEF_H +#define _DDR_TOPOLOGY_DEF_H + +#include "ddr3_training_ip_def.h" +#include "mv_ddr_topology.h" +#include "mv_ddr_spd.h" +#include "ddr3_logging_def.h" + +#define MV_DDR_MAX_BUS_NUM 9 +#define MV_DDR_MAX_IFACE_NUM 1 + +enum mv_ddr_twin_die { + NOT_COMBINED, + COMBINED, +}; + +struct bus_params { + /* Chip Select (CS) bitmask (bits 0-CS0, bit 1- CS1 ...) */ + u8 cs_bitmask; + + /* + * mirror enable/disable + * (bits 0-CS0 mirroring, bit 1- CS1 mirroring ...) + */ + int mirror_enable_bitmask; + + /* DQS Swap (polarity) - true if enable */ + int is_dqs_swap; + + /* CK swap (polarity) - true if enable */ + int is_ck_swap; +}; + +struct if_params { + /* bus configuration */ + struct bus_params as_bus_params[MV_DDR_MAX_BUS_NUM]; + + /* Speed Bin Table */ + enum mv_ddr_speed_bin speed_bin_index; + + /* sdram device width */ + enum mv_ddr_dev_width bus_width; + + /* total sdram capacity per die, megabits */ + enum mv_ddr_die_capacity memory_size; + + /* The DDR frequency for each interfaces */ + enum mv_ddr_freq memory_freq; + + /* + * delay CAS Write Latency + * - 0 for using default value (jedec suggested) + */ + u8 cas_wl; + + /* + * delay CAS Latency + * - 0 for using default value (jedec suggested) + */ + u8 cas_l; + + /* operation temperature */ + enum mv_ddr_temperature interface_temp; + + /* 2T vs 1T mode (by default computed from number of CSs) */ + enum mv_ddr_timing timing; +}; + +/* memory electrical configuration */ +struct mv_ddr_mem_edata { + enum mv_ddr_rtt_nom_park_evalue rtt_nom; + enum mv_ddr_rtt_nom_park_evalue rtt_park[MAX_CS_NUM]; + enum mv_ddr_rtt_wr_evalue rtt_wr[MAX_CS_NUM]; + enum mv_ddr_dic_evalue dic; +}; + +/* phy electrical configuration */ +struct mv_ddr_phy_edata { + enum mv_ddr_ohm_evalue drv_data_p; + enum mv_ddr_ohm_evalue drv_data_n; + enum mv_ddr_ohm_evalue drv_ctrl_p; + enum mv_ddr_ohm_evalue drv_ctrl_n; + enum mv_ddr_ohm_evalue odt_p[MAX_CS_NUM]; + enum mv_ddr_ohm_evalue odt_n[MAX_CS_NUM]; +}; + +/* mac electrical configuration */ +struct mv_ddr_mac_edata { + enum mv_ddr_odt_cfg_evalue odt_cfg_pat; + enum mv_ddr_odt_cfg_evalue odt_cfg_wr; + enum mv_ddr_odt_cfg_evalue odt_cfg_rd; +}; + +struct mv_ddr_edata { + struct mv_ddr_mem_edata mem_edata; + struct mv_ddr_phy_edata phy_edata; + struct mv_ddr_mac_edata mac_edata; +}; + +struct mv_ddr_topology_map { + /* debug level configuration */ + enum mv_ddr_debug_level debug_level; + + /* Number of interfaces (default is 12) */ + u8 if_act_mask; + + /* Controller configuration per interface */ + struct if_params interface_params[MV_DDR_MAX_IFACE_NUM]; + + /* Bit mask for active buses */ + u16 bus_act_mask; + + /* source of ddr configuration data */ + enum mv_ddr_cfg_src cfg_src; + + /* ddr twin-die */ + enum mv_ddr_twin_die twin_die_combined; + + /* raw spd data */ + union mv_ddr_spd_data spd_data; + + /* timing parameters */ + unsigned int timing_data[MV_DDR_TDATA_LAST]; + + /* electrical configuration */ + struct mv_ddr_edata edata; + + /* electrical parameters */ + unsigned int electrical_data[MV_DDR_EDATA_LAST]; + + /* ODT configuration */ + u32 odt_config; + + /* Clock enable mask */ + u32 clk_enable; + + /* Clock delay */ + int ck_delay; +}; + +enum mv_ddr_iface_mode { + MV_DDR_RAR_ENA, + MV_DDR_RAR_DIS, +}; + +enum mv_ddr_iface_state { + MV_DDR_IFACE_NRDY, /* not ready */ + MV_DDR_IFACE_INIT, /* init'd */ + MV_DDR_IFACE_RDY, /* ready */ + MV_DDR_IFACE_DNE /* does not exist */ +}; + +enum mv_ddr_validation { + MV_DDR_VAL_DIS, + MV_DDR_VAL_RX, + MV_DDR_VAL_TX, + MV_DDR_VAL_RX_TX, + MV_DDR_MEMORY_CHECK +}; + +enum mv_ddr_sscg { + SSCG_EN, + SSCG_DIS, +}; + +struct mv_ddr_iface { + /* base addr of ap ddr interface belongs to */ + unsigned int ap_base; + + /* ddr interface id */ + unsigned int id; + + /* ddr interface state */ + enum mv_ddr_iface_state state; + + /* ddr interface mode (rar enabled/disabled) */ + enum mv_ddr_iface_mode iface_mode; + + /* ddr interface base address */ + unsigned long long iface_base_addr; + + /* ddr interface size - ddr flow will update this parameter */ + unsigned long long iface_byte_size; + + /* ddr i2c spd data address */ + unsigned int spd_data_addr; + + /* ddr i2c spd page 0 select address */ + unsigned int spd_page_sel_addr; + + /* ddr interface validation mode */ + enum mv_ddr_validation validation; + + /* ddr interface validation mode */ + enum mv_ddr_sscg sscg; + + /* ddr interface topology map */ + struct mv_ddr_topology_map tm; + +}; + +struct mv_ddr_iface *mv_ddr_iface_get(void); + +/* DDR3 training global configuration parameters */ +struct tune_train_params { + u32 ck_delay; + u32 phy_reg3_val; + u32 g_zpri_data; + u32 g_znri_data; + u32 g_zpri_ctrl; + u32 g_znri_ctrl; + u32 g_zpodt_data; + u32 g_znodt_data; + u32 g_zpodt_ctrl; + u32 g_znodt_ctrl; + u32 g_dic; + u32 g_odt_config; + u32 g_rtt_nom; + u32 g_rtt_wr; + u32 g_rtt_park; +}; + +#endif /* _DDR_TOPOLOGY_DEF_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/ddr_training_ip_db.h b/roms/u-boot/drivers/ddr/marvell/a38x/ddr_training_ip_db.h new file mode 100644 index 000000000..f1b4d8efc --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/ddr_training_ip_db.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _DDR_TRAINING_IP_DB_H_ +#define _DDR_TRAINING_IP_DB_H_ + +#include "ddr3_training_ip_db.h" + +u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index); + +#endif /* _DDR3_TRAINING_IP_DB_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/dram_if.h b/roms/u-boot/drivers/ddr/marvell/a38x/dram_if.h new file mode 100644 index 000000000..4d0846489 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/dram_if.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2016 Marvell International Ltd. + */ + +#ifndef _DRAM_IF_H_ +#define _DRAM_IF_H_ + +/* TODO: update atf to this new prototype */ +int dram_init(void); +void dram_mmap_config(void); +unsigned long long dram_iface_mem_sz_get(void); +#endif /* _DRAM_IF_H_ */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_build_message.c b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_build_message.c new file mode 100644 index 000000000..a2bb8a96a --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_build_message.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +const char mv_ddr_build_message[] = ""; +const char mv_ddr_version_string[] = "mv_ddr: 14.0.0"; diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_common.c b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_common.c new file mode 100644 index 000000000..7afabbfd8 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_common.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "mv_ddr_common.h" +#include "ddr_ml_wrapper.h" + +void mv_ddr_ver_print(void) +{ + printf("%s %s\n", mv_ddr_version_string, mv_ddr_build_message); +} + +/* ceiling division for positive integers */ +unsigned int ceil_div(unsigned int x, unsigned int y) +{ + return (x % y) ? (x / y + 1) : (x / y); +} + +/* + * time to number of clocks calculation based on the rounding algorithm + * using 97.4% inverse factor per JEDEC Standard No. 21-C, 4.1.2.L-4: + * Serial Presence Detect (SPD) for DDR4 SDRAM Modules + */ +unsigned int time_to_nclk(unsigned int t, unsigned int tclk) +{ + /* t & tclk parameters are in ps */ + return ((unsigned long)t * 1000 / tclk + 974) / 1000; +} + +/* round division of two positive integers to the nearest whole number */ +int round_div(unsigned int dividend, unsigned int divisor, unsigned int *quotient) +{ + if (quotient == NULL) { + printf("%s: error: NULL quotient pointer found\n", __func__); + return MV_FAIL; + } + + if (divisor == 0) { + printf("%s: error: division by zero\n", __func__); + return MV_FAIL; + } else { + *quotient = (dividend + divisor / 2) / divisor; + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_common.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_common.h new file mode 100644 index 000000000..321a390c0 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_common.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _MV_DDR_COMMON_H +#define _MV_DDR_COMMON_H + +extern const char mv_ddr_build_message[]; +extern const char mv_ddr_version_string[]; + +#define _1K 0x00000400 +#define _4K 0x00001000 +#define _8K 0x00002000 +#define _16K 0x00004000 +#define _32K 0x00008000 +#define _64K 0x00010000 +#define _128K 0x00020000 +#define _256K 0x00040000 +#define _512K 0x00080000 + +#define _1M 0x00100000 +#define _2M 0x00200000 +#define _4M 0x00400000 +#define _8M 0x00800000 +#define _16M 0x01000000 +#define _32M 0x02000000 +#define _64M 0x04000000 +#define _128M 0x08000000 +#define _256M 0x10000000 +#define _512M 0x20000000 + +#define _1G 0x40000000 +#define _2G 0x80000000 +#define _4G 0x100000000 +#define _8G 0x200000000 +#define _16G 0x400000000 +#define _32G 0x800000000 +#define _64G 0x1000000000 +#define _128G 0x2000000000 + +#define MEGA 1000000 +#define MV_DDR_MEGABYTE (1024 * 1024) +#define MV_DDR_32_BITS_MASK 0xffffffff + +#define GET_MAX_VALUE(x, y) \ + (((x) > (y)) ? (x) : (y)) + +void mv_ddr_ver_print(void); +unsigned int ceil_div(unsigned int x, unsigned int y); +unsigned int time_to_nclk(unsigned int t, unsigned int tclk); +int round_div(unsigned int dividend, unsigned int divisor, unsigned int *quotient); + +#endif /* _MV_DDR_COMMON_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.c b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.c new file mode 100644 index 000000000..faafc86ea --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.c @@ -0,0 +1,1446 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_common.h" +#include "mv_ddr_training_db.h" +#include "mv_ddr_regs.h" +#include "mv_ddr_sys_env_lib.h" + +#define DDR_INTERFACES_NUM 1 +#define DDR_INTERFACE_OCTETS_NUM 5 + +/* + * 1. L2 filter should be set at binary header to 0xD000000, + * to avoid conflict with internal register IO. + * 2. U-Boot modifies internal registers base to 0xf100000, + * and than should update L2 filter accordingly to 0xf000000 (3.75 GB) + */ +#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */ +#define ADDRESS_FILTERING_END_REGISTER 0x8c04 + +#define DYNAMIC_CS_SIZE_CONFIG +#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING + +/* Termal Sensor Registers */ +#define TSEN_CONTROL_LSB_REG 0xE4070 +#define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0 +#define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET) +#define TSEN_CONTROL_MSB_REG 0xE4074 +#define TSEN_CONTROL_MSB_RST_OFFSET 8 +#define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET) +#define TSEN_STATUS_REG 0xe4078 +#define TSEN_STATUS_READOUT_VALID_OFFSET 10 +#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \ + TSEN_STATUS_READOUT_VALID_OFFSET) +#define TSEN_STATUS_TEMP_OUT_OFFSET 0 +#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET) + +static struct dlb_config ddr3_dlb_config_table[] = { + {DLB_CTRL_REG, 0x2000005c}, + {DLB_BUS_OPT_WT_REG, 0x00880000}, + {DLB_AGING_REG, 0x0f7f007f}, + {DLB_EVICTION_CTRL_REG, 0x0000129f}, + {DLB_EVICTION_TIMERS_REG, 0x00ff0000}, + {DLB_WTS_DIFF_CS_REG, 0x04030802}, + {DLB_WTS_DIFF_BG_REG, 0x00000a02}, + {DLB_WTS_SAME_BG_REG, 0x09000a01}, + {DLB_WTS_CMDS_REG, 0x00020005}, + {DLB_WTS_ATTR_PRIO_REG, 0x00060f10}, + {DLB_QUEUE_MAP_REG, 0x00000543}, + {DLB_SPLIT_REG, 0x00000000}, + {DLB_USER_CMD_REG, 0x00000000}, + {0x0, 0x0} +}; + +static struct dlb_config *sys_env_dlb_config_ptr_get(void) +{ + return &ddr3_dlb_config_table[0]; +} + +static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = { + 0x3, /* MV_DDR_FREQ_100 */ + 0x4, /* MV_DDR_FREQ_400 */ + 0x4, /* MV_DDR_FREQ_533 */ + 0x5, /* MV_DDR_FREQ_667 */ + 0x5, /* MV_DDR_FREQ_800 */ + 0x5, /* MV_DDR_FREQ_933 */ + 0x5, /* MV_DDR_FREQ_1066 */ + 0x3, /* MV_DDR_FREQ_311 */ + 0x3, /* MV_DDR_FREQ_333 */ + 0x4, /* MV_DDR_FREQ_467 */ + 0x5, /* MV_DDR_FREQ_850 */ + 0x5, /* MV_DDR_FREQ_600 */ + 0x3, /* MV_DDR_FREQ_300 */ + 0x5, /* MV_DDR_FREQ_900 */ + 0x3, /* MV_DDR_FREQ_360 */ + 0x5 /* MV_DDR_FREQ_1000 */ +}; + +static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = { + 0x1, /* MV_DDR_FREQ_100 */ + 0x2, /* MV_DDR_FREQ_400 */ + 0x2, /* MV_DDR_FREQ_533 */ + 0x2, /* MV_DDR_FREQ_667 */ + 0x2, /* MV_DDR_FREQ_800 */ + 0x3, /* MV_DDR_FREQ_933 */ + 0x3, /* MV_DDR_FREQ_1066 */ + 0x1, /* MV_DDR_FREQ_311 */ + 0x1, /* MV_DDR_FREQ_333 */ + 0x2, /* MV_DDR_FREQ_467 */ + 0x2, /* MV_DDR_FREQ_850 */ + 0x2, /* MV_DDR_FREQ_600 */ + 0x1, /* MV_DDR_FREQ_300 */ + 0x2, /* MV_DDR_FREQ_900 */ + 0x1, /* MV_DDR_FREQ_360 */ + 0x2 /* MV_DDR_FREQ_1000 */ +}; + +static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = { + 666, /* 0 */ + 1332, + 800, + 1600, + 1066, + 2132, + 1200, + 2400, + 1332, + 1332, + 1500, + 1500, + 1600, /* 12 */ + 1600, + 1700, + 1700, + 1866, + 1866, + 1800, /* 18 */ + 2000, + 2000, + 4000, + 2132, + 2132, + 2300, + 2300, + 2400, + 2400, + 2500, + 2500, + 800 +}; + +static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = { + 666, /* 0 */ + 1332, + 800, + 800, /* 0x3 */ + 1066, + 1066, /* 0x5 */ + 1200, + 2400, + 1332, + 1332, + 1500, /* 10 */ + 1600, /* 0xB */ + 1600, + 1600, + 1700, + 1560, /* 0xF */ + 1866, + 1866, + 1800, + 2000, + 2000, /* 20 */ + 4000, + 2132, + 2132, + 2300, + 2300, + 2400, + 2400, + 2500, + 2500, + 1800 /* 30 - 0x1E */ +}; + + +static u32 async_mode_at_tf; + +static u32 dq_bit_map_2_phy_pin[] = { + 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */ + 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */ + 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */ + 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */ + 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */ +}; + +void mv_ddr_mem_scrubbing(void) +{ + ddr3_new_tip_ecc_scrub(); +} + +static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id, + enum mv_ddr_freq freq); + +/* + * Read temperature TJ value + */ +static u32 ddr3_ctrl_get_junc_temp(u8 dev_num) +{ + int reg = 0; + + /* Initiates TSEN hardware reset once */ + if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) { + reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK); + /* set Tsen Tc Trim to correct default value (errata #132698) */ + reg = reg_read(TSEN_CONTROL_LSB_REG); + reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK; + reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET; + reg_write(TSEN_CONTROL_LSB_REG, reg); + } + mdelay(10); + + /* Check if the readout field is valid */ + if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) { + printf("%s: TSEN not ready\n", __func__); + return 0; + } + + reg = reg_read(TSEN_STATUS_REG); + reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET; + + return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000; +} + +/* + * Name: ddr3_tip_a38x_get_freq_config. + * Desc: + * Args: + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq, + struct hws_tip_freq_config_info + *freq_config_info) +{ + if (a38x_bw_per_freq[freq] == 0xff) + return MV_NOT_SUPPORTED; + + if (freq_config_info == NULL) + return MV_BAD_PARAM; + + freq_config_info->bw_per_freq = a38x_bw_per_freq[freq]; + freq_config_info->rate_per_freq = a38x_rate_per_freq[freq]; + freq_config_info->is_supported = 1; + + return MV_OK; +} + +static void dunit_read(u32 addr, u32 mask, u32 *data) +{ + *data = reg_read(addr) & mask; +} + +static void dunit_write(u32 addr, u32 mask, u32 data) +{ + u32 reg_val = data; + + if (mask != MASK_ALL_BITS) { + dunit_read(addr, MASK_ALL_BITS, ®_val); + reg_val &= (~mask); + reg_val |= (data & mask); + } + + reg_write(addr, reg_val); +} + +#define ODPG_ENABLE_REG 0x186d4 +#define ODPG_EN_OFFS 0 +#define ODPG_EN_MASK 0x1 +#define ODPG_EN_ENA 1 +#define ODPG_EN_DONE 0 +#define ODPG_DIS_OFFS 8 +#define ODPG_DIS_MASK 0x1 +#define ODPG_DIS_DIS 1 +void mv_ddr_odpg_enable(void) +{ + dunit_write(ODPG_ENABLE_REG, + ODPG_EN_MASK << ODPG_EN_OFFS, + ODPG_EN_ENA << ODPG_EN_OFFS); +} + +void mv_ddr_odpg_disable(void) +{ + dunit_write(ODPG_ENABLE_REG, + ODPG_DIS_MASK << ODPG_DIS_OFFS, + ODPG_DIS_DIS << ODPG_DIS_OFFS); +} + +void mv_ddr_odpg_done_clr(void) +{ + return; +} + +int mv_ddr_is_odpg_done(u32 count) +{ + u32 i, data; + + for (i = 0; i < count; i++) { + dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data); + if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) == + ODPG_EN_DONE) + break; + } + + if (i >= count) { + printf("%s: timeout\n", __func__); + return MV_FAIL; + } + + return MV_OK; +} + +void mv_ddr_training_enable(void) +{ + dunit_write(GLOB_CTRL_STATUS_REG, + TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS, + TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS); +} + +#define DRAM_INIT_CTRL_STATUS_REG 0x18488 +#define TRAINING_TRIGGER_OFFS 0 +#define TRAINING_TRIGGER_MASK 0x1 +#define TRAINING_TRIGGER_ENA 1 +#define TRAINING_DONE_OFFS 1 +#define TRAINING_DONE_MASK 0x1 +#define TRAINING_DONE_DONE 1 +#define TRAINING_DONE_NOT_DONE 0 +#define TRAINING_RESULT_OFFS 2 +#define TRAINING_RESULT_MASK 0x1 +#define TRAINING_RESULT_PASS 0 +#define TRAINING_RESULT_FAIL 1 +int mv_ddr_is_training_done(u32 count, u32 *result) +{ + u32 i, data; + + if (result == NULL) { + printf("%s: NULL result pointer found\n", __func__); + return MV_FAIL; + } + + for (i = 0; i < count; i++) { + dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data); + if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) == + TRAINING_DONE_DONE) + break; + } + + if (i >= count) { + printf("%s: timeout\n", __func__); + return MV_FAIL; + } + + *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK; + + return MV_OK; +} + +#define DM_PAD 10 +u32 mv_ddr_dm_pad_get(void) +{ + return DM_PAD; +} + +/* + * Name: ddr3_tip_a38x_select_ddr_controller. + * Desc: Enable/Disable access to Marvell's server. + * Args: dev_num - device number + * enable - whether to enable or disable the server + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable) +{ + u32 reg; + + reg = reg_read(DUAL_DUNIT_CFG_REG); + + if (enable) + reg |= (1 << 6); + else + reg &= ~(1 << 6); + + reg_write(DUAL_DUNIT_CFG_REG, reg); + + return MV_OK; +} + +static u8 ddr3_tip_clock_mode(u32 frequency) +{ + if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400)) + return 1; + + return 2; +} + +static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq) +{ + u32 reg, ref_clk_satr; + + /* Read sample at reset setting */ + reg = (reg_read(REG_DEVICE_SAR1_ADDR) >> + RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) & + RST2_CPU_DDR_CLOCK_SELECT_IN_MASK; + + ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG); + if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) == + DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) { + switch (reg) { + case 0x1: + DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, + ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n", + reg)); + /* fallthrough */ + case 0x0: + *freq = MV_DDR_FREQ_333; + break; + case 0x3: + DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, + ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n", + reg)); + /* fallthrough */ + case 0x2: + *freq = MV_DDR_FREQ_400; + break; + case 0xd: + DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, + ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n", + reg)); + /* fallthrough */ + case 0x4: + *freq = MV_DDR_FREQ_533; + break; + case 0x6: + *freq = MV_DDR_FREQ_600; + break; + case 0x11: + case 0x14: + DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, + ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n", + reg)); + /* fallthrough */ + case 0x8: + *freq = MV_DDR_FREQ_667; + break; + case 0x15: + case 0x1b: + DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, + ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n", + reg)); + /* fallthrough */ + case 0xc: + *freq = MV_DDR_FREQ_800; + break; + case 0x10: + *freq = MV_DDR_FREQ_933; + break; + case 0x12: + *freq = MV_DDR_FREQ_900; + break; + case 0x13: + *freq = MV_DDR_FREQ_933; + break; + default: + *freq = 0; + return MV_NOT_SUPPORTED; + } + } else { /* REFCLK 40MHz case */ + switch (reg) { + case 0x3: + *freq = MV_DDR_FREQ_400; + break; + case 0x5: + *freq = MV_DDR_FREQ_533; + break; + case 0xb: + *freq = MV_DDR_FREQ_800; + break; + case 0x1e: + *freq = MV_DDR_FREQ_900; + break; + default: + *freq = 0; + return MV_NOT_SUPPORTED; + } + } + + return MV_OK; +} + +static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq) +{ + u32 reg, ref_clk_satr; + + /* Read sample at reset setting */ + reg = (reg_read(REG_DEVICE_SAR1_ADDR) >> + RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) & + RST2_CPU_DDR_CLOCK_SELECT_IN_MASK; + + ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG); + if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) == + DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) { + switch (reg) { + case 0x0: + case 0x1: + /* Medium is same as TF to run PBS in this freq */ + *freq = MV_DDR_FREQ_333; + break; + case 0x2: + case 0x3: + /* Medium is same as TF to run PBS in this freq */ + *freq = MV_DDR_FREQ_400; + break; + case 0x4: + case 0xd: + /* Medium is same as TF to run PBS in this freq */ + *freq = MV_DDR_FREQ_533; + break; + case 0x8: + case 0x10: + case 0x11: + case 0x14: + *freq = MV_DDR_FREQ_333; + break; + case 0xc: + case 0x15: + case 0x1b: + *freq = MV_DDR_FREQ_400; + break; + case 0x6: + *freq = MV_DDR_FREQ_300; + break; + case 0x12: + *freq = MV_DDR_FREQ_360; + break; + case 0x13: + *freq = MV_DDR_FREQ_400; + break; + default: + *freq = 0; + return MV_NOT_SUPPORTED; + } + } else { /* REFCLK 40MHz case */ + switch (reg) { + case 0x3: + /* Medium is same as TF to run PBS in this freq */ + *freq = MV_DDR_FREQ_400; + break; + case 0x5: + /* Medium is same as TF to run PBS in this freq */ + *freq = MV_DDR_FREQ_533; + break; + case 0xb: + *freq = MV_DDR_FREQ_400; + break; + case 0x1e: + *freq = MV_DDR_FREQ_360; + break; + default: + *freq = 0; + return MV_NOT_SUPPORTED; + } + } + + return MV_OK; +} + +static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr) +{ + info_ptr->device_id = 0x6800; + info_ptr->ck_delay = ck_delay; + + return MV_OK; +} + +/* check indirect access to phy register file completed */ +static int is_prfa_done(void) +{ + u32 reg_val; + u32 iter = 0; + + do { + if (iter++ > MAX_POLLING_ITERATIONS) { + printf("error: %s: polling timeout\n", __func__); + return MV_FAIL; + } + dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val); + reg_val >>= PRFA_REQ_OFFS; + reg_val &= PRFA_REQ_MASK; + } while (reg_val == PRFA_REQ_ENA); /* request pending */ + + return MV_OK; +} + +/* write to phy register thru indirect access */ +static int prfa_write(enum hws_access_type phy_access, u32 phy, + enum hws_ddr_phy phy_type, u32 addr, + u32 data, enum hws_operation op_type) +{ + u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) | + ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) | + ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) | + ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) | + ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) | + (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) | + ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS); + dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val); + reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS); + dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val); + + /* polling for prfa request completion */ + if (is_prfa_done() != MV_OK) + return MV_FAIL; + + return MV_OK; +} + +/* read from phy register thru indirect access */ +static int prfa_read(enum hws_access_type phy_access, u32 phy, + enum hws_ddr_phy phy_type, u32 addr, u32 *data) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + u32 i, reg_val; + + if (phy_access == ACCESS_TYPE_MULTICAST) { + for (i = 0; i < max_phy; i++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i); + if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK) + return MV_FAIL; + dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val); + data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK; + } + } else { + if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK) + return MV_FAIL; + dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val); + *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK; + } + + return MV_OK; +} + +static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id) +{ + struct hws_tip_config_func_db config_func; + + /* new read leveling version */ + config_func.mv_ddr_dunit_read = dunit_read; + config_func.mv_ddr_dunit_write = dunit_write; + config_func.tip_dunit_mux_select_func = + ddr3_tip_a38x_select_ddr_controller; + config_func.tip_get_freq_config_info_func = + ddr3_tip_a38x_get_freq_config; + config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider; + config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info; + config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp; + config_func.tip_get_clock_ratio = ddr3_tip_clock_mode; + config_func.tip_external_read = ddr3_tip_ext_read; + config_func.tip_external_write = ddr3_tip_ext_write; + config_func.mv_ddr_phy_read = prfa_read; + config_func.mv_ddr_phy_write = prfa_write; + + ddr3_tip_init_config_func(dev_num, &config_func); + + ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin); + + /* set device attributes*/ + ddr3_tip_dev_attr_init(dev_num); + ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4); + ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE); + ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM); + ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0); + + ca_delay = 0; + delay_enable = 1; + dfs_low_freq = DFS_LOW_FREQ_VALUE; + calibration_update_control = 1; + + ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq); + + return MV_OK; +} + +static int mv_ddr_training_mask_set(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq; + + mask_tune_func = (SET_LOW_FREQ_MASK_BIT | + LOAD_PATTERN_MASK_BIT | + SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT | + WRITE_LEVELING_SUPP_MASK_BIT | + READ_LEVELING_MASK_BIT | + PBS_RX_MASK_BIT | + PBS_TX_MASK_BIT | + SET_TARGET_FREQ_MASK_BIT | + WRITE_LEVELING_TF_MASK_BIT | + WRITE_LEVELING_SUPP_TF_MASK_BIT | + READ_LEVELING_TF_MASK_BIT | + CENTRALIZATION_RX_MASK_BIT | + CENTRALIZATION_TX_MASK_BIT); + rl_mid_freq_wa = 1; + + if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) { + mask_tune_func = (WRITE_LEVELING_MASK_BIT | + LOAD_PATTERN_2_MASK_BIT | + WRITE_LEVELING_SUPP_MASK_BIT | + READ_LEVELING_MASK_BIT | + PBS_RX_MASK_BIT | + PBS_TX_MASK_BIT | + CENTRALIZATION_RX_MASK_BIT | + CENTRALIZATION_TX_MASK_BIT); + rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */ + } + + /* Supplementary not supported for ECC modes */ + if (mv_ddr_is_ecc_ena()) { + mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT; + mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT; + mask_tune_func &= ~PBS_TX_MASK_BIT; + mask_tune_func &= ~PBS_RX_MASK_BIT; + } + + return MV_OK; +} + +/* function: mv_ddr_set_calib_controller + * this function sets the controller which will control + * the calibration cycle in the end of the training. + * 1 - internal controller + * 2 - external controller + */ +void mv_ddr_set_calib_controller(void) +{ + calibration_update_control = CAL_UPDATE_CTRL_INT; +} + +static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id, + enum mv_ddr_freq frequency) +{ + u32 divider = 0; + u32 sar_val, ref_clk_satr; + u32 async_val; + u32 freq = mv_ddr_freq_get(frequency); + + if (if_id != 0) { + DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, + ("A38x does not support interface 0x%x\n", + if_id)); + return MV_BAD_PARAM; + } + + /* get VCO freq index */ + sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >> + RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) & + RST2_CPU_DDR_CLOCK_SELECT_IN_MASK; + + ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG); + if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) == + DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) + divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq; + else + divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq; + + if ((async_mode_at_tf == 1) && (freq > 400)) { + /* Set async mode */ + dunit_write(0x20220, 0x1000, 0x1000); + dunit_write(0xe42f4, 0x200, 0x200); + + /* Wait for async mode setup */ + mdelay(5); + + /* Set KNL values */ + switch (frequency) { + case MV_DDR_FREQ_467: + async_val = 0x806f012; + break; + case MV_DDR_FREQ_533: + async_val = 0x807f012; + break; + case MV_DDR_FREQ_600: + async_val = 0x805f00a; + break; + case MV_DDR_FREQ_667: + async_val = 0x809f012; + break; + case MV_DDR_FREQ_800: + async_val = 0x807f00a; + break; + case MV_DDR_FREQ_850: + async_val = 0x80cb012; + break; + case MV_DDR_FREQ_900: + async_val = 0x80d7012; + break; + case MV_DDR_FREQ_933: + async_val = 0x80df012; + break; + case MV_DDR_FREQ_1000: + async_val = 0x80ef012; + break; + case MV_DDR_FREQ_1066: + async_val = 0x80ff012; + break; + default: + /* set MV_DDR_FREQ_667 as default */ + async_val = 0x809f012; + } + dunit_write(0xe42f0, 0xffffffff, async_val); + } else { + /* Set sync mode */ + dunit_write(0x20220, 0x1000, 0x0); + dunit_write(0xe42f4, 0x200, 0x0); + + /* cpupll_clkdiv_reset_mask */ + dunit_write(0xe4264, 0xff, 0x1f); + + /* cpupll_clkdiv_reload_smooth */ + dunit_write(0xe4260, (0xff << 8), (0x2 << 8)); + + /* cpupll_clkdiv_relax_en */ + dunit_write(0xe4260, (0xff << 24), (0x2 << 24)); + + /* write the divider */ + dunit_write(0xe4268, (0x3f << 8), (divider << 8)); + + /* set cpupll_clkdiv_reload_ratio */ + dunit_write(0xe4264, (1 << 8), (1 << 8)); + + /* undet cpupll_clkdiv_reload_ratio */ + dunit_write(0xe4264, (1 << 8), 0x0); + + /* clear cpupll_clkdiv_reload_force */ + dunit_write(0xe4260, (0xff << 8), 0x0); + + /* clear cpupll_clkdiv_relax_en */ + dunit_write(0xe4260, (0xff << 24), 0x0); + + /* clear cpupll_clkdiv_reset_mask */ + dunit_write(0xe4264, 0xff, 0x0); + } + + /* Dunit training clock + 1:1/2:1 mode */ + dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16)); + dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15)); + + return MV_OK; +} + +/* + * external read from memory + */ +int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr, + u32 num_of_bursts, u32 *data) +{ + u32 burst_num; + + for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++) + data[burst_num] = readl(reg_addr + 4 * burst_num); + + return MV_OK; +} + +/* + * external write to memory + */ +int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr, + u32 num_of_bursts, u32 *data) { + u32 burst_num; + + for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++) + writel(data[burst_num], reg_addr + 4 * burst_num); + + return MV_OK; +} + +int mv_ddr_early_init(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* FIXME: change this configuration per ddr type + * configure a380 and a390 to work with receiver odt timing + * the odt_config is defined: + * '1' in ddr4 + * '0' in ddr3 + * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1') + * to configure the odt to work with timing restrictions + */ + + mv_ddr_sw_db_init(0, 0); + + if (tm->interface_params[0].memory_freq != MV_DDR_FREQ_SAR) + async_mode_at_tf = 1; + + return MV_OK; +} + +int mv_ddr_early_init2(void) +{ + mv_ddr_training_mask_set(); + + return MV_OK; +} + +int mv_ddr_pre_training_fixup(void) +{ + return 0; +} + +int mv_ddr_post_training_fixup(void) +{ + return 0; +} + +int ddr3_post_run_alg(void) +{ + return MV_OK; +} + +int ddr3_silicon_post_init(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + /* Set half bus width */ + if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) { + CHECK_STATUS(ddr3_tip_if_write + (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, + SDRAM_CFG_REG, 0x0, 0x8000)); + } + + return MV_OK; +} + +u32 mv_ddr_init_freq_get(void) +{ + enum mv_ddr_freq freq; + + mv_ddr_sar_freq_get(0, &freq); + + return freq; +} + +static u32 ddr3_get_bus_width(void) +{ + u32 bus_width; + + bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >> + BUS_IN_USE_OFFS; + + return (bus_width == 0) ? 16 : 32; +} + +static u32 ddr3_get_device_width(u32 cs) +{ + u32 device_width; + + device_width = (reg_read(SDRAM_ADDR_CTRL_REG) & + (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >> + CS_STRUCT_OFFS(cs); + + return (device_width == 0) ? 8 : 16; +} + +static u32 ddr3_get_device_size(u32 cs) +{ + u32 device_size_low, device_size_high, device_size; + u32 data, cs_low_offset, cs_high_offset; + + cs_low_offset = CS_SIZE_OFFS(cs); + cs_high_offset = CS_SIZE_HIGH_OFFS(cs); + + data = reg_read(SDRAM_ADDR_CTRL_REG); + device_size_low = (data >> cs_low_offset) & 0x3; + device_size_high = (data >> cs_high_offset) & 0x1; + + device_size = device_size_low | (device_size_high << 2); + + switch (device_size) { + case 0: + return 2048; + case 2: + return 512; + case 3: + return 1024; + case 4: + return 4096; + case 5: + return 8192; + case 1: + default: + DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1); + /* zeroes mem size in ddr3_calc_mem_cs_size */ + return 0; + } +} + +int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size) +{ + u32 cs_mem_size; + + /* Calculate in MiB */ + cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) * + ddr3_get_device_size(cs)) / 8; + + /* + * Multiple controller bus width, 2x for 64 bit + * (SoC controller may be 32 or 64 bit, + * so bit 15 in 0x1400, that means if whole bus used or only half, + * have a differnt meaning + */ + cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER; + + if ((cs_mem_size < 128) || (cs_mem_size > 4096)) { + DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1); + return MV_BAD_VALUE; + } + + *cs_size = cs_mem_size; + + return MV_OK; +} + +static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena) +{ + u32 reg, cs; + uint64_t mem_total_size = 0; + uint64_t cs_mem_size_mb = 0; + uint64_t cs_mem_size = 0; + uint64_t mem_total_size_c, cs_mem_size_c; + + +#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE + u32 physical_mem_size; + u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); +#endif + + /* Open fast path windows */ + for (cs = 0; cs < MAX_CS_NUM; cs++) { + if (cs_ena & (1 << cs)) { + /* get CS size */ + if (ddr3_calc_mem_cs_size(cs, &cs_mem_size_mb) != MV_OK) + return MV_FAIL; + cs_mem_size = cs_mem_size_mb * _1M; + +#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE + /* + * if number of address pins doesn't allow to use max + * mem size that is defined in topology + * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE + */ + physical_mem_size = mem_size + [tm->interface_params[0].memory_size]; + + if (ddr3_get_device_width(cs) == 16) { + /* + * 16bit mem device can be twice more - no need + * in less significant pin + */ + max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; + } + + if (physical_mem_size > max_mem_size) { + cs_mem_size = max_mem_size * + (ddr3_get_bus_width() / + ddr3_get_device_width(cs)); + printf("Updated Physical Mem size is from 0x%x to %x\n", + physical_mem_size, + DEVICE_MAX_DRAM_ADDRESS_SIZE); + } +#endif + + /* set fast path window control for the cs */ + reg = 0xffffe1; + reg |= (cs << 2); + reg |= (cs_mem_size - 1) & 0xffff0000; + /*Open fast path Window */ + reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg); + + /* Set fast path window base address for the cs */ + reg = ((cs_mem_size) * cs) & 0xffff0000; + /* Set base address */ + reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg); + + /* + * Since memory size may be bigger than 4G the summ may + * be more than 32 bit word, + * so to estimate the result divide mem_total_size and + * cs_mem_size by 0x10000 (it is equal to >> 16) + */ + mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff; + cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff; + + /* if the sum less than 2 G - calculate the value */ + if (mem_total_size_c + cs_mem_size_c < 0x10000) + mem_total_size += cs_mem_size; + else /* put max possible size */ + mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE; + } + } + + /* Set L2 filtering to Max Memory size */ + reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size); + + return MV_OK; +} + +static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type) +{ + u32 win_ctrl_reg, num_of_win_regs; + u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg(); + u32 ui; + + win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR; + num_of_win_regs = 16; + + /* Return XBAR windows 4-7 or 16-19 init configuration */ + for (ui = 0; ui < num_of_win_regs; ui++) + reg_write((win_ctrl_reg + 0x4 * ui), win[ui]); + + printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n", + ddr_type); + +#if defined DYNAMIC_CS_SIZE_CONFIG + if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK) + printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n"); +#else + u32 reg, cs; + reg = 0x1fffffe1; + for (cs = 0; cs < MAX_CS_NUM; cs++) { + if (cs_ena & (1 << cs)) { + reg |= (cs << 2); + break; + } + } + /* Open fast path Window to - 0.5G */ + reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg); +#endif + + return MV_OK; +} + +static int ddr3_save_and_set_training_windows(u32 *win) +{ + u32 cs_ena; + u32 reg, tmp_count, cs, ui; + u32 win_ctrl_reg, win_base_reg, win_remap_reg; + u32 num_of_win_regs, win_jump_index; + win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR; + win_base_reg = REG_XBAR_WIN_4_BASE_ADDR; + win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR; + win_jump_index = 0x10; + num_of_win_regs = 16; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + +#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING + /* + * Disable L2 filtering during DDR training + * (when Cross Bar window is open) + */ + reg_write(ADDRESS_FILTERING_END_REGISTER, 0); +#endif + + cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask; + + /* Close XBAR Window 19 - Not needed */ + /* {0x000200e8} - Open Mbus Window - 2G */ + reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0); + + /* Save XBAR Windows 4-19 init configurations */ + for (ui = 0; ui < num_of_win_regs; ui++) + win[ui] = reg_read(win_ctrl_reg + 0x4 * ui); + + /* Open XBAR Windows 4-7 or 16-19 for other CS */ + reg = 0; + tmp_count = 0; + for (cs = 0; cs < MAX_CS_NUM; cs++) { + if (cs_ena & (1 << cs)) { + switch (cs) { + case 0: + reg = 0x0e00; + break; + case 1: + reg = 0x0d00; + break; + case 2: + reg = 0x0b00; + break; + case 3: + reg = 0x0700; + break; + } + reg |= (1 << 0); + reg |= (SDRAM_CS_SIZE & 0xffff0000); + + reg_write(win_ctrl_reg + win_jump_index * tmp_count, + reg); + reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) & + 0xffff0000); + reg_write(win_base_reg + win_jump_index * tmp_count, + reg); + + if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR) + reg_write(win_remap_reg + + win_jump_index * tmp_count, 0); + + tmp_count++; + } + } + + return MV_OK; +} + +static u32 win[16]; + +int mv_ddr_pre_training_soc_config(const char *ddr_type) +{ + u32 soc_num; + u32 reg_val; + + /* Switching CPU to MRVL ID */ + soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >> + SAR1_CPU_CORE_OFFSET; + switch (soc_num) { + case 0x3: + reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET); + reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET); + /* fallthrough */ + case 0x1: + reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET); + /* fallthrough */ + case 0x0: + reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET); + /* fallthrough */ + default: + break; + } + + /* + * Set DRAM Reset Mask in case detected GPIO indication of wakeup from + * suspend i.e the DRAM values will not be overwritten / reset when + * waking from suspend + */ + if (mv_ddr_sys_env_suspend_wakeup_check() == + SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) { + reg_bit_set(SDRAM_INIT_CTRL_REG, + DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS); + } + + /* Check if DRAM is already initialized */ + if (reg_read(REG_BOOTROM_ROUTINE_ADDR) & + (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) { + printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type); + return MV_OK; + } + + /* Fix read ready phases for all SOC in reg 0x15c8 */ + reg_val = reg_read(TRAINING_DBG_3_REG); + + reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); + reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); /* phase 0 */ + + reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); + reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); /* phase 1 */ + + reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); + reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); /* phase 3 */ + + reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); + reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); /* phase 4 */ + + reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); + reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); /* phase 5 */ + + reg_write(TRAINING_DBG_3_REG, reg_val); + + /* + * Axi_bresp_mode[8] = Compliant, + * Axi_addr_decode_cntrl[11] = Internal, + * Axi_data_bus_width[0] = 128bit + * */ + /* 0x14a8 - AXI Control Register */ + reg_write(AXI_CTRL_REG, 0); + + /* + * Stage 2 - Training Values Setup + */ + /* Set X-BAR windows for the training sequence */ + ddr3_save_and_set_training_windows(win); + + return MV_OK; +} + +static int ddr3_new_tip_dlb_config(void) +{ + u32 reg, i = 0; + struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get(); + + /* Write the configuration */ + while (config_table_ptr[i].reg_addr != 0) { + reg_write(config_table_ptr[i].reg_addr, + config_table_ptr[i].reg_data); + i++; + } + + + /* Enable DLB */ + reg = reg_read(DLB_CTRL_REG); + reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) & + ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) & + ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) & + ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) & + ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS); + + reg |= (DLB_EN_ENA << DLB_EN_OFFS) | + (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) | + (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) | + (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) | + (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS); + + reg_write(DLB_CTRL_REG, reg); + + return MV_OK; +} + +int mv_ddr_post_training_soc_config(const char *ddr_type) +{ + u32 reg_val; + + /* Restore and set windows */ + ddr3_restore_and_set_final_windows(win, ddr_type); + + /* Update DRAM init indication in bootROM register */ + reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR); + reg_write(REG_BOOTROM_ROUTINE_ADDR, + reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)); + + /* DLB config */ + ddr3_new_tip_dlb_config(); + + return MV_OK; +} + +void mv_ddr_mc_config(void) +{ + /* Memory controller initializations */ + struct init_cntr_param init_param; + int status; + + init_param.do_mrs_phy = 1; + init_param.is_ctrl64_bit = 0; + init_param.init_phy = 1; + init_param.msys_init = 1; + status = hws_ddr3_tip_init_controller(0, &init_param); + if (status != MV_OK) + printf("DDR3 init controller - FAILED 0x%x\n", status); + + status = mv_ddr_mc_init(); + if (status != MV_OK) + printf("DDR3 init_sequence - FAILED 0x%x\n", status); +} +/* function: mv_ddr_mc_init + * this function enables the dunit after init controller configuration + */ +int mv_ddr_mc_init(void) +{ + CHECK_STATUS(ddr3_tip_enable_init_sequence(0)); + + return MV_OK; +} + +/* function: ddr3_tip_configure_phy + * configures phy and electrical parameters + */ +int ddr3_tip_configure_phy(u32 dev_num) +{ + u32 if_id, phy_id; + u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, + PAD_ZRI_CAL_PHY_REG, + ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data)))); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL, + PAD_ZRI_CAL_PHY_REG, + ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl)))); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, + PAD_ODT_CAL_PHY_REG, + ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data)))); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL, + PAD_ODT_CAL_PHY_REG, + ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl)))); + + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, + PAD_PRE_DISABLE_PHY_REG, 0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, + CMOS_CONFIG_PHY_REG, 0)); + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL, + CMOS_CONFIG_PHY_REG, 0)); + + for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { + /* check if the interface is enabled */ + VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); + + for (phy_id = 0; + phy_id < octets_per_if_num; + phy_id++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id); + /* Vref & clamp */ + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, phy_id, DDR_PHY_DATA, + PAD_CFG_PHY_REG, + ((clamp_tbl[if_id] << 4) | vref_init_val), + ((0x7 << 4) | 0x7))); + /* clamp not relevant for control */ + CHECK_STATUS(ddr3_tip_bus_read_modify_write + (dev_num, ACCESS_TYPE_UNICAST, + if_id, phy_id, DDR_PHY_CONTROL, + PAD_CFG_PHY_REG, 0x4, 0x7)); + } + } + + if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) == + MV_DDR_PHY_EDGE_POSITIVE) + CHECK_STATUS(ddr3_tip_bus_write + (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, + DDR_PHY_DATA, 0x90, 0x6002)); + + + return MV_OK; +} + + +int mv_ddr_manual_cal_do(void) +{ + return 0; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.h new file mode 100644 index 000000000..44998847c --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_plat.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _MV_DDR_PLAT_H +#define _MV_DDR_PLAT_H + +#include <linux/delay.h> + +#define MAX_DEVICE_NUM 1 +#define MAX_INTERFACE_NUM 1 +#define MAX_BUS_NUM 5 +#define DDR_IF_CTRL_SUBPHYS_NUM 3 + +#define DFS_LOW_FREQ_VALUE 120 +#define SDRAM_CS_SIZE 0xfffffff /* FIXME: implement a function for cs size for each platform */ + +#define INTER_REGS_BASE SOC_REGS_PHY_BASE +#define AP_INT_REG_START_ADDR 0xd0000000 +#define AP_INT_REG_END_ADDR 0xd0100000 + +/* Controler bus divider 1 for 32 bit, 2 for 64 bit */ +#define DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER 1 + +/* Tune internal training params values */ +#define TUNE_TRAINING_PARAMS_CK_DELAY 160 +#define TUNE_TRAINING_PARAMS_PHYREG3VAL 0xA +#define TUNE_TRAINING_PARAMS_PRI_DATA 123 +#define TUNE_TRAINING_PARAMS_NRI_DATA 123 +#define TUNE_TRAINING_PARAMS_PRI_CTRL 74 +#define TUNE_TRAINING_PARAMS_NRI_CTRL 74 +#define TUNE_TRAINING_PARAMS_P_ODT_DATA 45 +#define TUNE_TRAINING_PARAMS_N_ODT_DATA 45 +#define TUNE_TRAINING_PARAMS_P_ODT_CTRL 45 +#define TUNE_TRAINING_PARAMS_N_ODT_CTRL 45 +#define TUNE_TRAINING_PARAMS_DIC 0x2 +#define TUNE_TRAINING_PARAMS_ODT_CONFIG_2CS 0x120012 +#define TUNE_TRAINING_PARAMS_ODT_CONFIG_1CS 0x10000 +#define TUNE_TRAINING_PARAMS_RTT_NOM 0x44 + +#define TUNE_TRAINING_PARAMS_RTT_WR_1CS 0x0 /*off*/ +#define TUNE_TRAINING_PARAMS_RTT_WR_2CS 0x0 /*off*/ + +#define MARVELL_BOARD MARVELL_BOARD_ID_BASE + + +#define REG_DEVICE_SAR1_ADDR 0xe4204 +#define RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET 17 +#define RST2_CPU_DDR_CLOCK_SELECT_IN_MASK 0x1f +#define DEVICE_SAMPLE_AT_RESET2_REG 0x18604 + +#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET 0 +#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ 0 +#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_40MHZ 1 + +/* DRAM Windows */ +#define REG_XBAR_WIN_5_CTRL_ADDR 0x20050 +#define REG_XBAR_WIN_5_BASE_ADDR 0x20054 + +/* DRAM Windows */ +#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040 +#define REG_XBAR_WIN_4_BASE_ADDR 0x20044 +#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048 +#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078 +#define REG_XBAR_WIN_16_CTRL_ADDR 0x200d0 +#define REG_XBAR_WIN_16_BASE_ADDR 0x200d4 +#define REG_XBAR_WIN_16_REMAP_ADDR 0x200dc +#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8 + +#define REG_FASTPATH_WIN_BASE_ADDR(win) (0x20180 + (0x8 * win)) +#define REG_FASTPATH_WIN_CTRL_ADDR(win) (0x20184 + (0x8 * win)) + +#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100)) +#define CPU_MRVL_ID_OFFSET 0x10 +#define SAR1_CPU_CORE_MASK 0x00000018 +#define SAR1_CPU_CORE_OFFSET 3 + +/* SatR defined too change topology busWidth and ECC configuration */ +#define DDR_SATR_CONFIG_MASK_WIDTH 0x8 +#define DDR_SATR_CONFIG_MASK_ECC 0x10 +#define DDR_SATR_CONFIG_MASK_ECC_PUP 0x20 + +#define REG_SAMPLE_RESET_HIGH_ADDR 0x18600 + +#define MV_BOARD_REFCLK_25MHZ 25000000 +#define MV_BOARD_REFCLK MV_BOARD_REFCLK_25MHZ + +#define MAX_DQ_NUM 40 + +/* dram line buffer registers */ +#define DLB_CTRL_REG 0x1700 +#define DLB_EN_OFFS 0 +#define DLB_EN_MASK 0x1 +#define DLB_EN_ENA 1 +#define DLB_EN_DIS 0 +#define WR_COALESCE_EN_OFFS 2 +#define WR_COALESCE_EN_MASK 0x1 +#define WR_COALESCE_EN_ENA 1 +#define WR_COALESCE_EN_DIS 0 +#define AXI_PREFETCH_EN_OFFS 3 +#define AXI_PREFETCH_EN_MASK 0x1 +#define AXI_PREFETCH_EN_ENA 1 +#define AXI_PREFETCH_EN_DIS 0 +#define MBUS_PREFETCH_EN_OFFS 4 +#define MBUS_PREFETCH_EN_MASK 0x1 +#define MBUS_PREFETCH_EN_ENA 1 +#define MBUS_PREFETCH_EN_DIS 0 +#define PREFETCH_NXT_LN_SZ_TRIG_OFFS 6 +#define PREFETCH_NXT_LN_SZ_TRIG_MASK 0x1 +#define PREFETCH_NXT_LN_SZ_TRIG_ENA 1 +#define PREFETCH_NXT_LN_SZ_TRIG_DIS 0 + +#define DLB_BUS_OPT_WT_REG 0x1704 +#define DLB_AGING_REG 0x1708 +#define DLB_EVICTION_CTRL_REG 0x170c +#define DLB_EVICTION_TIMERS_REG 0x1710 +#define DLB_USER_CMD_REG 0x1714 +#define DLB_WTS_DIFF_CS_REG 0x1770 +#define DLB_WTS_DIFF_BG_REG 0x1774 +#define DLB_WTS_SAME_BG_REG 0x1778 +#define DLB_WTS_CMDS_REG 0x177c +#define DLB_WTS_ATTR_PRIO_REG 0x1780 +#define DLB_QUEUE_MAP_REG 0x1784 +#define DLB_SPLIT_REG 0x1788 + +/* ck swap control subphy number */ +#define CK_SWAP_CTRL_PHY_NUM 2 + +/* Subphy result control per byte registers */ +#define RESULT_CONTROL_BYTE_PUP_0_REG 0x1830 +#define RESULT_CONTROL_BYTE_PUP_1_REG 0x1834 +#define RESULT_CONTROL_BYTE_PUP_2_REG 0x1838 +#define RESULT_CONTROL_BYTE_PUP_3_REG 0x183c +#define RESULT_CONTROL_BYTE_PUP_4_REG 0x18b0 + +/* Subphy result control per bit registers */ +#define RESULT_CONTROL_PUP_0_BIT_0_REG 0x18b4 +#define RESULT_CONTROL_PUP_0_BIT_1_REG 0x18b8 +#define RESULT_CONTROL_PUP_0_BIT_2_REG 0x18bc +#define RESULT_CONTROL_PUP_0_BIT_3_REG 0x18c0 +#define RESULT_CONTROL_PUP_0_BIT_4_REG 0x18c4 +#define RESULT_CONTROL_PUP_0_BIT_5_REG 0x18c8 +#define RESULT_CONTROL_PUP_0_BIT_6_REG 0x18cc +#define RESULT_CONTROL_PUP_0_BIT_7_REG 0x18f0 + +#define RESULT_CONTROL_PUP_1_BIT_0_REG 0x18f4 +#define RESULT_CONTROL_PUP_1_BIT_1_REG 0x18f8 +#define RESULT_CONTROL_PUP_1_BIT_2_REG 0x18fc +#define RESULT_CONTROL_PUP_1_BIT_3_REG 0x1930 +#define RESULT_CONTROL_PUP_1_BIT_4_REG 0x1934 +#define RESULT_CONTROL_PUP_1_BIT_5_REG 0x1938 +#define RESULT_CONTROL_PUP_1_BIT_6_REG 0x193c +#define RESULT_CONTROL_PUP_1_BIT_7_REG 0x19b0 + +#define RESULT_CONTROL_PUP_2_BIT_0_REG 0x19b4 +#define RESULT_CONTROL_PUP_2_BIT_1_REG 0x19b8 +#define RESULT_CONTROL_PUP_2_BIT_2_REG 0x19bc +#define RESULT_CONTROL_PUP_2_BIT_3_REG 0x19c0 +#define RESULT_CONTROL_PUP_2_BIT_4_REG 0x19c4 +#define RESULT_CONTROL_PUP_2_BIT_5_REG 0x19c8 +#define RESULT_CONTROL_PUP_2_BIT_6_REG 0x19cc +#define RESULT_CONTROL_PUP_2_BIT_7_REG 0x19f0 + +#define RESULT_CONTROL_PUP_3_BIT_0_REG 0x19f4 +#define RESULT_CONTROL_PUP_3_BIT_1_REG 0x19f8 +#define RESULT_CONTROL_PUP_3_BIT_2_REG 0x19fc +#define RESULT_CONTROL_PUP_3_BIT_3_REG 0x1a30 +#define RESULT_CONTROL_PUP_3_BIT_4_REG 0x1a34 +#define RESULT_CONTROL_PUP_3_BIT_5_REG 0x1a38 +#define RESULT_CONTROL_PUP_3_BIT_6_REG 0x1a3c +#define RESULT_CONTROL_PUP_3_BIT_7_REG 0x1ab0 + +#define RESULT_CONTROL_PUP_4_BIT_0_REG 0x1ab4 +#define RESULT_CONTROL_PUP_4_BIT_1_REG 0x1ab8 +#define RESULT_CONTROL_PUP_4_BIT_2_REG 0x1abc +#define RESULT_CONTROL_PUP_4_BIT_3_REG 0x1ac0 +#define RESULT_CONTROL_PUP_4_BIT_4_REG 0x1ac4 +#define RESULT_CONTROL_PUP_4_BIT_5_REG 0x1ac8 +#define RESULT_CONTROL_PUP_4_BIT_6_REG 0x1acc +#define RESULT_CONTROL_PUP_4_BIT_7_REG 0x1af0 + +/* CPU */ +#define REG_BOOTROM_ROUTINE_ADDR 0x182d0 +#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12 + +/* Matrix enables DRAM modes (bus width/ECC) per boardId */ +#define TOPOLOGY_UPDATE_32BIT 0 +#define TOPOLOGY_UPDATE_32BIT_ECC 1 +#define TOPOLOGY_UPDATE_16BIT 2 +#define TOPOLOGY_UPDATE_16BIT_ECC 3 +#define TOPOLOGY_UPDATE_16BIT_ECC_PUP3 4 +#define TOPOLOGY_UPDATE { \ + /* 32Bit, 32bit ECC, 16bit, 16bit ECC PUP4, 16bit ECC PUP3 */ \ + {1, 1, 1, 1, 1}, /* RD_NAS_68XX_ID */ \ + {1, 1, 1, 1, 1}, /* DB_68XX_ID */ \ + {1, 0, 1, 0, 1}, /* RD_AP_68XX_ID */ \ + {1, 0, 1, 0, 1}, /* DB_AP_68XX_ID */ \ + {1, 0, 1, 0, 1}, /* DB_GP_68XX_ID */ \ + {0, 0, 1, 1, 0}, /* DB_BP_6821_ID */ \ + {1, 1, 1, 1, 1} /* DB_AMC_6820_ID */ \ + }; + +enum { + CPU_1066MHZ_DDR_400MHZ, + CPU_RESERVED_DDR_RESERVED0, + CPU_667MHZ_DDR_667MHZ, + CPU_800MHZ_DDR_800MHZ, + CPU_RESERVED_DDR_RESERVED1, + CPU_RESERVED_DDR_RESERVED2, + CPU_RESERVED_DDR_RESERVED3, + LAST_FREQ +}; + +/* struct used for DLB configuration array */ +struct dlb_config { + u32 reg_addr; + u32 reg_data; +}; + +#define ACTIVE_INTERFACE_MASK 0x1 + +extern u32 dmin_phy_reg_table[][2]; +extern u16 odt_slope[]; +extern u16 odt_intercept[]; + +int mv_ddr_pre_training_soc_config(const char *ddr_type); +int mv_ddr_post_training_soc_config(const char *ddr_type); +void mv_ddr_mem_scrubbing(void); +u32 mv_ddr_init_freq_get(void); +void mv_ddr_odpg_enable(void); +void mv_ddr_odpg_disable(void); +void mv_ddr_odpg_done_clr(void); +int mv_ddr_is_odpg_done(u32 count); +void mv_ddr_training_enable(void); +int mv_ddr_is_training_done(u32 count, u32 *result); +u32 mv_ddr_dm_pad_get(void); +int mv_ddr_pre_training_fixup(void); +int mv_ddr_post_training_fixup(void); +int mv_ddr_manual_cal_do(void); +int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size); + +#endif /* _MV_DDR_PLAT_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_regs.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_regs.h new file mode 100644 index 000000000..cf2a6c92e --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_regs.h @@ -0,0 +1,465 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _MV_DDR_REGS_H +#define _MV_DDR_REGS_H + +#define GLOB_CTRL_STATUS_REG 0x1030 +#define TRAINING_TRIGGER_OFFS 0 +#define TRAINING_TRIGGER_MASK 0x1 +#define TRAINING_TRIGGER_ENA 1 +#define TRAINING_DONE_OFFS 1 +#define TRAINING_DONE_MASK 0x1 +#define TRAINING_DONE_DONE 1 +#define TRAINING_DONE_NOT_DONE 0 +#define TRAINING_RESULT_OFFS 2 +#define TRAINING_RESULT_MASK 0x1 +#define TRAINING_RESULT_PASS 0 +#define TRAINING_RESULT_FAIL 1 + +#define GENERAL_TRAINING_OPCODE_REG 0x1034 + +#define OPCODE_REG0_BASE 0x1038 +#define OPCODE_REG0_REG(obj) (OPCODE_REG0_BASE + (obj) * 0x4) + +#define OPCODE_REG1_BASE 0x10b0 +#define OPCODE_REG1_REG(obj) (OPCODE_REG1_BASE + (obj) * 0x4) + +#define CAL_PHY_BASE 0x10c0 +#define CAL_PHY_REG(obj) (CAL_PHY_BASE + (obj) * 0x4) + +#define WL_DONE_CNTR_REF_REG 0x10f8 +#define ODPG_WR_RD_MODE_ENA_REG 0x10fc + +#define SDRAM_CFG_REG 0x1400 +#define REFRESH_OFFS 0 +#define REFRESH_MASK 0x3fff +#define DRAM_TYPE_OFFS 14 +#define DRAM_TYPE_MASK 0x1 +#define BUS_IN_USE_OFFS 15 +#define BUS_IN_USE_MASK 0x1 +#define CPU_2DRAM_WR_BUFF_CUT_TH_OFFS 16 +#define CPU_2DRAM_WR_BUFF_CUT_TH_MASK 0x1 +#define REG_DIMM_OFFS 17 +#define REG_DIMM_MASK 0x1 +#define ECC_OFFS 18 +#define ECC_MASK 0x1 +#define IGNORE_ERRORS_OFFS 19 +#define IGNORE_ERRORS_MASK 0x1 +#define DRAM_TYPE_HIGH_OFFS 20 +#define DRAM_TYPE_HIGH_MASK 0x1 +#define SELF_REFRESH_MODE_OFFS 24 +#define SELF_REFRESH_MODE_MASK 0x1 +#define CPU_RD_PER_PROP_OFFS 25 +#define CPU_RD_PER_PROP_MASK 0x1 +#define DDR4_EMULATION_OFFS 26 +#define DDR4_EMULATION_MASK 0x1 +#define PHY_RF_RST_OFFS 27 +#define PHY_RF_RST_MASK 0x1 +#define PUP_RST_DIVIDER_OFFS 28 +#define PUP_RST_DIVIDER_MASK 0x1 +#define DATA_PUP_WR_RESET_OFFS 29 +#define DATA_PUP_WR_RESET_MASK 0x1 +#define DATA_PUP_RD_RESET_OFFS 30 +#define DATA_PUP_RD_RESET_MASK 0x1 +#define DATA_PUP_RD_RESET_ENA 0x0 +#define DATA_PUP_RD_RESET_DIS 0x1 +#define IO_BIST_OFFS 31 +#define DATA_PUP_RD_RESET_MASK 0x1 + +#define DUNIT_CTRL_LOW_REG 0x1404 + +#define SDRAM_TIMING_LOW_REG 0x1408 +#define SDRAM_TIMING_LOW_TRAS_OFFS 0 +#define SDRAM_TIMING_LOW_TRAS_MASK 0xf +#define SDRAM_TIMING_LOW_TRCD_OFFS 4 +#define SDRAM_TIMING_LOW_TRCD_MASK 0xf +#define SDRAM_TIMING_HIGH_TRCD_OFFS 22 +#define SDRAM_TIMING_HIGH_TRCD_MASK 0x1 +#define SDRAM_TIMING_LOW_TRP_OFFS 8 +#define SDRAM_TIMING_LOW_TRP_MASK 0xf +#define SDRAM_TIMING_HIGH_TRP_OFFS 23 +#define SDRAM_TIMING_HIGH_TRP_MASK 0x1 +#define SDRAM_TIMING_LOW_TWR_OFFS 12 +#define SDRAM_TIMING_LOW_TWR_MASK 0xf +#define SDRAM_TIMING_LOW_TWTR_OFFS 16 +#define SDRAM_TIMING_LOW_TWTR_MASK 0xf +#define SDRAM_TIMING_LOW_TRAS_HIGH_OFFS 20 +#define SDRAM_TIMING_LOW_TRAS_HIGH_MASK 0x3 +#define SDRAM_TIMING_LOW_TRRD_OFFS 24 +#define SDRAM_TIMING_LOW_TRRD_MASK 0xf +#define SDRAM_TIMING_LOW_TRTP_OFFS 28 +#define SDRAM_TIMING_LOW_TRTP_MASK 0xf + +#define SDRAM_TIMING_HIGH_REG 0x140c +#define SDRAM_TIMING_HIGH_TRFC_OFFS 0 +#define SDRAM_TIMING_HIGH_TRFC_MASK 0x7f +#define SDRAM_TIMING_HIGH_TR2R_OFFS 7 +#define SDRAM_TIMING_HIGH_TR2R_MASK 0x3 +#define SDRAM_TIMING_HIGH_TR2W_W2R_OFFS 9 +#define SDRAM_TIMING_HIGH_TR2W_W2R_MASK 0x3 +#define SDRAM_TIMING_HIGH_TW2W_OFFS 11 +#define SDRAM_TIMING_HIGH_TW2W_MASK 0x1f +#define SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS 16 +#define SDRAM_TIMING_HIGH_TRFC_HIGH_MASK 0x7 +#define SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS 19 +#define SDRAM_TIMING_HIGH_TR2R_HIGH_MASK 0x7 +#define SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS 22 +#define SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK 0x7 +#define SDRAM_TIMING_HIGH_TMOD_OFFS 25 +#define SDRAM_TIMING_HIGH_TMOD_MASK 0xf +#define SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS 30 +#define SDRAM_TIMING_HIGH_TMOD_HIGH_MASK 0x3 + +#define SDRAM_ADDR_CTRL_REG 0x1410 +#define CS_STRUCT_BASE 0 +#define CS_STRUCT_OFFS(cs) (CS_STRUCT_BASE + (cs) * 4) +#define CS_STRUCT_MASK 0x3 +#define CS_SIZE_BASE 2 +#define CS_SIZE_OFFS(cs) (CS_SIZE_BASE + (cs) * 4) +#define CS_SIZE_MASK 0x3 +#define CS_SIZE_HIGH_BASE 20 +#define CS_SIZE_HIGH_OFFS(cs) (CS_SIZE_HIGH_BASE + (cs)) +#define CS_SIZE_HIGH_MASK 0x1 +#define T_FAW_OFFS 24 +#define T_FAW_MASK 0x7f + +#define SDRAM_OPEN_PAGES_CTRL_REG 0x1414 + +#define SDRAM_OP_REG 0x1418 +#define SDRAM_OP_CMD_OFFS 0 +#define SDRAM_OP_CMD_MASK 0x1f +#define SDRAM_OP_CMD_CS_BASE 8 +#define SDRAM_OP_CMD_CS_OFFS(cs) (SDRAM_OP_CMD_CS_BASE + (cs)) +#define SDRAM_OP_CMD_CS_MASK 0x1 +#define SDRAM_OP_CMD_ALL_CS_MASK 0xf +enum { + CMD_NORMAL, + CMD_PRECHARGE, + CMD_REFRESH, + CMD_DDR3_DDR4_MR0, + CMD_DDR3_DDR4_MR1, + CMD_NOP, + CMD_RES_0X6, + CMD_SELFREFRESH, + CMD_DDR3_DDR4_MR2, + CMD_DDR3_DDR4_MR3, + CMD_ACT_PDE, + CMD_PRE_PDE, + CMD_ZQCL, + CMD_ZQCS, + CMD_CWA, + CMD_RES_0XF, + CMD_DDR4_MR4, + CMD_DDR4_MR5, + CMD_DDR4_MR6, + DDR4_MPR_WR +}; + +#define DUNIT_CTRL_HIGH_REG 0x1424 +#define CPU_INTERJECTION_ENA_OFFS 3 +#define CPU_INTERJECTION_ENA_MASK 0x1 +#define CPU_INTERJECTION_ENA_SPLIT_ENA 0 +#define CPU_INTERJECTION_ENA_SPLIT_DIS 1 + +#define DDR_ODT_TIMING_LOW_REG 0x1428 + +#define DDR_TIMING_REG 0x142c +#define DDR_TIMING_TCCD_OFFS 18 +#define DDR_TIMING_TCCD_MASK 0x7 +#define DDR_TIMING_TPD_OFFS 0 +#define DDR_TIMING_TPD_MASK 0xf +#define DDR_TIMING_TXPDLL_OFFS 4 +#define DDR_TIMING_TXPDLL_MASK 0x1f + +#define DDR_ODT_TIMING_HIGH_REG 0x147c + +#define SDRAM_INIT_CTRL_REG 0x1480 +#define DRAM_RESET_MASK_OFFS 1 +#define DRAM_RESET_MASK_MASK 0x1 +#define DRAM_RESET_MASK_NORMAL 0 +#define DRAM_RESET_MASK_MASKED 1 + +#define SDRAM_ODT_CTRL_HIGH_REG 0x1498 +#define DUNIT_ODT_CTRL_REG 0x149c +#define RD_BUFFER_SEL_REG 0x14a4 +#define AXI_CTRL_REG 0x14a8 +#define DUNIT_MMASK_REG 0x14b0 + +#define HORZ_SSTL_CAL_MACH_CTRL_REG 0x14c8 +#define HORZ_POD_CAL_MACH_CTRL_REG 0x17c8 +#define VERT_SSTL_CAL_MACH_CTRL_REG 0x1dc8 +#define VERT_POD_CAL_MACH_CTRL_REG 0x1ec8 + +#define MAIN_PADS_CAL_MACH_CTRL_REG 0x14cc +#define DYN_PADS_CAL_ENABLE_OFFS 0 +#define DYN_PADS_CAL_ENABLE_MASK 0x1 +#define DYN_PADS_CAL_ENABLE_DIS 0 +#define DYN_PADS_CAL_ENABLE_ENA 1 +#define PADS_RECAL_OFFS 1 +#define PADS_RECAL_MASK 0x1 +#define DYN_PADS_CAL_BLOCK_OFFS 2 +#define DYN_PADS_CAL_BLOCK_MASK 0x1 +#define CAL_UPDATE_CTRL_OFFS 3 +#define CAL_UPDATE_CTRL_MASK 0x3 +#define CAL_UPDATE_CTRL_INT 1 +#define CAL_UPDATE_CTRL_EXT 2 +#define DYN_PADS_CAL_CNTR_OFFS 13 +#define DYN_PADS_CAL_CNTR_MASK 0x3ffff +#define CAL_MACH_STATUS_OFFS 31 +#define CAL_MACH_STATUS_MASK 0x1 +#define CAL_MACH_BUSY 0 +#define CAL_MACH_RDY 1 + +#define DRAM_DLL_TIMING_REG 0x14e0 +#define DRAM_ZQ_INIT_TIMIMG_REG 0x14e4 +#define DRAM_ZQ_TIMING_REG 0x14e8 + +#define DRAM_LONG_TIMING_REG 0x14ec +#define DDR4_TRRD_L_OFFS 0 +#define DDR4_TRRD_L_MASK 0xf +#define DDR4_TWTR_L_OFFS 4 +#define DDR4_TWTR_L_MASK 0xf + +#define DDR_IO_REG 0x1524 +#define DFS_REG 0x1528 + +#define RD_DATA_SMPL_DLYS_REG 0x1538 +#define RD_SMPL_DLY_CS_BASE 0 +#define RD_SMPL_DLY_CS_OFFS(cs) (RD_SMPL_DLY_CS_BASE + (cs) * 8) +#define RD_SMPL_DLY_CS_MASK 0x1f + +#define RD_DATA_RDY_DLYS_REG 0x153c +#define RD_RDY_DLY_CS_BASE 0 +#define RD_RDY_DLY_CS_OFFS(cs) (RD_RDY_DLY_CS_BASE + (cs) * 8) +#define RD_RDY_DLY_CS_MASK 0x1f + +#define TRAINING_REG 0x15b0 +#define TRN_START_OFFS 31 +#define TRN_START_MASK 0x1 +#define TRN_START_ENA 1 +#define TRN_START_DIS 0 + +#define TRAINING_SW_1_REG 0x15b4 + +#define TRAINING_SW_2_REG 0x15b8 +#define TRAINING_ECC_MUX_OFFS 1 +#define TRAINING_ECC_MUX_MASK 0x1 +#define TRAINING_ECC_MUX_DIS 0 +#define TRAINING_ECC_MUX_ENA 1 +#define TRAINING_SW_OVRD_OFFS 0 +#define TRAINING_SW_OVRD_MASK 0x1 +#define TRAINING_SW_OVRD_DIS 0 +#define TRAINING_SW_OVRD_ENA 1 + +#define TRAINING_PATTERN_BASE_ADDR_REG 0x15bc +#define TRAINING_DBG_1_REG 0x15c0 +#define TRAINING_DBG_2_REG 0x15c4 + +#define TRAINING_DBG_3_REG 0x15c8 +#define TRN_DBG_RDY_INC_PH_2TO1_BASE 0 +#define TRN_DBG_RDY_INC_PH_2TO1_OFFS(phase) (TRN_DBG_RDY_INC_PH_2TO1_BASE + (phase) * 3) +#define TRN_DBG_RDY_INC_PH_2TO1_MASK 0x7 + +#define DDR3_RANK_CTRL_REG 0x15e0 +#define CS_EXIST_BASE 0 +#define CS_EXIST_OFFS(cs) (CS_EXIST_BASE + (cs)) +#define CS_EXIST_MASK 0x1 + +#define ZQC_CFG_REG 0x15e4 +#define DRAM_PHY_CFG_REG 0x15ec +#define ODPG_CTRL_CTRL_REG 0x1600 +#define ODPG_CTRL_AUTO_REFRESH_OFFS 21 +#define ODPG_CTRL_AUTO_REFRESH_MASK 0x1 +#define ODPG_CTRL_AUTO_REFRESH_DIS 1 +#define ODPG_CTRL_AUTO_REFRESH_ENA 0 + +#define ODPG_DATA_CTRL_REG 0x1630 +#define ODPG_WRBUF_WR_CTRL_OFFS 0 +#define ODPG_WRBUF_WR_CTRL_MASK 0x1 +#define ODPG_WRBUF_WR_CTRL_DIS 0 +#define ODPG_WRBUF_WR_CTRL_ENA 1 +#define ODPG_WRBUF_RD_CTRL_OFFS 1 +#define ODPG_WRBUF_RD_CTRL_MASK 0x1 +#define ODPG_WRBUF_RD_CTRL_DIS 0 +#define ODPG_WRBUF_RD_CTRL_ENA 1 +#define ODPG_DATA_CBDEL_OFFS 15 +#define ODPG_DATA_CBDEL_MASK 0x3f +#define ODPG_MODE_OFFS 25 +#define ODPG_MODE_MASK 0x1 +#define ODPG_MODE_RX 0 +#define ODPG_MODE_TX 1 +#define ODPG_DATA_CS_OFFS 26 +#define ODPG_DATA_CS_MASK 0x3 +#define ODPG_DISABLE_OFFS 30 +#define ODPG_DISABLE_MASK 0x1 +#define ODPG_DISABLE_DIS 1 +#define ODPG_ENABLE_OFFS 31 +#define ODPG_ENABLE_MASK 0x1 +#define ODPG_ENABLE_ENA 1 + +#define ODPG_DATA_BUFFER_OFFS_REG 0x1638 +#define ODPG_DATA_BUFFER_SIZE_REG 0x163c +#define PHY_LOCK_STATUS_REG 0x1674 + +#define PHY_REG_FILE_ACCESS_REG 0x16a0 +#define PRFA_DATA_OFFS 0 +#define PRFA_DATA_MASK 0xffff +#define PRFA_REG_NUM_OFFS 16 +#define PRFA_REG_NUM_MASK 0x3f +#define PRFA_PUP_NUM_OFFS 22 +#define PRFA_PUP_NUM_MASK 0xf +#define PRFA_PUP_CTRL_DATA_OFFS 26 +#define PRFA_PUP_CTRL_DATA_MASK 0x1 +#define PRFA_PUP_BCAST_WR_ENA_OFFS 27 +#define PRFA_PUP_BCAST_WR_ENA_MASK 0x1 +#define PRFA_REG_NUM_HI_OFFS 28 +#define PRFA_REG_NUM_HI_MASK 0x3 +#define PRFA_TYPE_OFFS 30 +#define PRFA_TYPE_MASK 0x1 +#define PRFA_REQ_OFFS 31 +#define PRFA_REQ_MASK 0x1 +#define PRFA_REQ_DIS 0x0 +#define PRFA_REQ_ENA 0x1 + +#define TRAINING_WL_REG 0x16ac + +#define ODPG_DATA_WR_ADDR_REG 0x16b0 +#define ODPG_DATA_WR_ACK_OFFS 0 +#define ODPG_DATA_WR_ACK_MASK 0x7f +#define ODPG_DATA_WR_DATA_OFFS 8 +#define ODPG_DATA_WR_DATA_MASK 0xff + +#define ODPG_DATA_WR_DATA_HIGH_REG 0x16b4 +#define ODPG_DATA_WR_DATA_LOW_REG 0x16b8 +#define ODPG_DATA_RX_WORD_ERR_ADDR_REG 0x16bc +#define ODPG_DATA_RX_WORD_ERR_CNTR_REG 0x16c0 +#define ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG 0x16c4 +#define ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG 0x16c8 +#define ODPG_DATA_WR_DATA_ERR_REG 0x16cc + +#define DUAL_DUNIT_CFG_REG 0x16d8 +#define FC_SAMPLE_STAGES_OFFS 0 +#define FC_SAMPLE_STAGES_MASK 0x7 +#define SINGLE_CS_PIN_OFFS 3 +#define SINGLE_CS_PIN_MASK 0x1 +#define SINGLE_CS_ENA 1 +#define TUNING_ACTIVE_SEL_OFFS 6 +#define TUNING_ACTIVE_SEL_MASK 0x1 +#define TUNING_ACTIVE_SEL_MC 0 +#define TUNING_ACTIVE_SEL_TIP 1 + +#define WL_DQS_PATTERN_REG 0x16dc +#define ODPG_DONE_STATUS_REG 0x16fc +#define ODPG_DONE_STATUS_BIT_OFFS 0 +#define ODPG_DONE_STATUS_BIT_MASK 0x1 +#define ODPG_DONE_STATUS_BIT_CLR 0 +#define ODPG_DONE_STATUS_BIT_SET 1 + +#define RESULT_CTRL_BASE 0x1830 +#define BLOCK_STATUS_OFFS 25 +#define BLOCK_STATUS_MASK 0x1 +#define BLOCK_STATUS_LOCK 1 +#define BLOCK_STATUS_NOT_LOCKED 0 + +#define MR0_REG 0x15d0 +#define MR1_REG 0x15d4 +#define MR2_REG 0x15d8 +#define MR3_REG 0x15dc +#define MRS0_CMD 0x3 +#define MRS1_CMD 0x4 +#define MRS2_CMD 0x8 +#define MRS3_CMD 0x9 + + +#define DRAM_PINS_MUX_REG 0x19d4 +#define CTRL_PINS_MUX_OFFS 0 +#define CTRL_PINS_MUX_MASK 0x3 +enum { + DUNIT_DDR3_ON_BOARD, + DUNIT_DDR3_DIMM, + DUNIT_DDR4_ON_BOARD, + DUNIT_DDR4_DIMM +}; + +/* ddr phy registers */ +#define WL_PHY_BASE 0x0 +#define WL_PHY_REG(cs) (WL_PHY_BASE + (cs) * 0x4) +#define WR_LVL_PH_SEL_OFFS 6 +#define WR_LVL_PH_SEL_MASK 0x7 +#define WR_LVL_PH_SEL_PHASE1 1 +#define WR_LVL_REF_DLY_OFFS 0 +#define WR_LVL_REF_DLY_MASK 0x1f +#define CTRL_CENTER_DLY_OFFS 10 +#define CTRL_CENTER_DLY_MASK 0x1f +#define CTRL_CENTER_DLY_INV_OFFS 15 +#define CTRL_CENTER_DLY_INV_MASK 0x1 + +#define CTX_PHY_BASE 0x1 +#define CTX_PHY_REG(cs) (CTX_PHY_BASE + (cs) * 0x4) + +#define RL_PHY_BASE 0x2 +#define RL_PHY_REG(cs) (RL_PHY_BASE + (cs) * 0x4) +#define RL_REF_DLY_OFFS 0 +#define RL_REF_DLY_MASK 0x1f +#define RL_PH_SEL_OFFS 6 +#define RL_PH_SEL_MASK 0x7 + +#define CRX_PHY_BASE 0x3 +#define CRX_PHY_REG(cs) (CRX_PHY_BASE + (cs) * 0x4) + +#define PHY_CTRL_PHY_REG 0x90 +#define INV_PAD0_OFFS 2 +#define INV_PAD1_OFFS 3 +#define INV_PAD2_OFFS 4 +#define INV_PAD3_OFFS 5 +#define INV_PAD4_OFFS 6 +#define INV_PAD5_OFFS 7 +#define INV_PAD6_OFFS 8 +#define INV_PAD7_OFFS 9 +#define INV_PAD8_OFFS 10 +#define INV_PAD9_OFFS 11 +#define INV_PAD10_OFFS 12 +#define INV_PAD_MASK 0x1 +#define INVERT_PAD 1 + +#define ADLL_CFG0_PHY_REG 0x92 +#define ADLL_CFG1_PHY_REG 0x93 +#define ADLL_CFG2_PHY_REG 0x94 +#define CMOS_CONFIG_PHY_REG 0xa2 +#define PAD_ZRI_CAL_PHY_REG 0xa4 +#define PAD_ODT_CAL_PHY_REG 0xa6 +#define PAD_CFG_PHY_REG 0xa8 +#define PAD_PRE_DISABLE_PHY_REG 0xa9 +#define TEST_ADLL_PHY_REG 0xbf + +#define VREF_PHY_BASE 0xd0 +#define VREF_PHY_REG(cs, bit) (VREF_PHY_BASE + (cs) * 12 + bit) +enum { + DQSP_PAD = 4, + DQSN_PAD +}; + +#define VREF_BCAST_PHY_BASE 0xdb +#define VREF_BCAST_PHY_REG(cs) (VREF_BCAST_PHY_BASE + (cs) * 12) + +#define PBS_TX_PHY_BASE 0x10 +#define PBS_TX_PHY_REG(cs, bit) (PBS_TX_PHY_BASE + (cs) * 0x10 + (bit)) + +#define PBS_TX_BCAST_PHY_BASE 0x1f +#define PBS_TX_BCAST_PHY_REG(cs) (PBS_TX_BCAST_PHY_BASE + (cs) * 0x10) + +#define PBS_RX_PHY_BASE 0x50 +#define PBS_RX_PHY_REG(cs, bit) (PBS_RX_PHY_BASE + (cs) * 0x10 + (bit)) + +#define PBS_RX_BCAST_PHY_BASE 0x5f +#define PBS_RX_BCAST_PHY_REG(cs) (PBS_RX_BCAST_PHY_BASE + (cs) * 0x10) + +#define RESULT_PHY_REG 0xc0 +#define RESULT_PHY_RX_OFFS 5 +#define RESULT_PHY_TX_OFFS 0 + + +#endif /* _MV_DDR_REGS_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_spd.c b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_spd.c new file mode 100644 index 000000000..04dbfe94d --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_spd.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "mv_ddr_spd.h" + +#define MV_DDR_SPD_DATA_MTB 125 /* medium timebase, ps */ +#define MV_DDR_SPD_DATA_FTB 1 /* fine timebase, ps */ +#define MV_DDR_SPD_MSB_OFFS 8 /* most significant byte offset, bits */ + +#define MV_DDR_SPD_SUPPORTED_CLS_NUM 30 + +static unsigned int mv_ddr_spd_supported_cls[MV_DDR_SPD_SUPPORTED_CLS_NUM]; + +int mv_ddr_spd_supported_cls_calc(union mv_ddr_spd_data *spd_data) +{ + unsigned int byte, bit, start_cl; + + start_cl = (spd_data->all_bytes[23] & 0x8) ? 23 : 7; + + for (byte = 20; byte < 23; byte++) { + for (bit = 0; bit < 8; bit++) { + if (spd_data->all_bytes[byte] & (1 << bit)) + mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = start_cl + (byte - 20) * 8 + bit; + else + mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = 0; + } + } + + for (byte = 23, bit = 0; bit < 6; bit++) { + if (spd_data->all_bytes[byte] & (1 << bit)) + mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = start_cl + (byte - 20) * 8 + bit; + else + mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = 0; + } + + return 0; +} + +unsigned int mv_ddr_spd_supported_cl_get(unsigned int cl) +{ + unsigned int supported_cl; + int i = 0; + + while (i < MV_DDR_SPD_SUPPORTED_CLS_NUM && + mv_ddr_spd_supported_cls[i] < cl) + i++; + + if (i < MV_DDR_SPD_SUPPORTED_CLS_NUM) + supported_cl = mv_ddr_spd_supported_cls[i]; + else + supported_cl = 0; + + return supported_cl; +} + +int mv_ddr_spd_timing_calc(union mv_ddr_spd_data *spd_data, unsigned int timing_data[]) +{ + int calc_val; + + /* t ck avg min, ps */ + calc_val = spd_data->byte_fields.byte_18 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_125 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TCK_AVG_MIN] = calc_val; + + /* t aa min, ps */ + calc_val = spd_data->byte_fields.byte_24 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_123 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TAA_MIN] = calc_val; + + /* t rfc1 min, ps */ + timing_data[MV_DDR_TRFC1_MIN] = (spd_data->byte_fields.byte_30 + + (spd_data->byte_fields.byte_31 << MV_DDR_SPD_MSB_OFFS)) * MV_DDR_SPD_DATA_MTB; + + /* t wr min, ps */ + timing_data[MV_DDR_TWR_MIN] = (spd_data->byte_fields.byte_42 + + (spd_data->byte_fields.byte_41.bit_fields.t_wr_min_msn << MV_DDR_SPD_MSB_OFFS)) * + MV_DDR_SPD_DATA_MTB; + + /* t rcd min, ps */ + calc_val = spd_data->byte_fields.byte_25 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_122 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TRCD_MIN] = calc_val; + + /* t rp min, ps */ + calc_val = spd_data->byte_fields.byte_26 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_121 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TRP_MIN] = calc_val; + + /* t rc min, ps */ + calc_val = (spd_data->byte_fields.byte_29 + + (spd_data->byte_fields.byte_27.bit_fields.t_rc_min_msn << MV_DDR_SPD_MSB_OFFS)) * + MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_120 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TRC_MIN] = calc_val; + + /* t ras min, ps */ + timing_data[MV_DDR_TRAS_MIN] = (spd_data->byte_fields.byte_28 + + (spd_data->byte_fields.byte_27.bit_fields.t_ras_min_msn << MV_DDR_SPD_MSB_OFFS)) * + MV_DDR_SPD_DATA_MTB; + + /* t rrd s min, ps */ + calc_val = spd_data->byte_fields.byte_38 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_119 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TRRD_S_MIN] = calc_val; + + /* t rrd l min, ps */ + calc_val = spd_data->byte_fields.byte_39 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_118 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TRRD_L_MIN] = calc_val; + + /* t ccd l min, ps */ + calc_val = spd_data->byte_fields.byte_40 * MV_DDR_SPD_DATA_MTB + + (signed char)spd_data->byte_fields.byte_117 * MV_DDR_SPD_DATA_FTB; + if (calc_val < 0) + return 1; + timing_data[MV_DDR_TCCD_L_MIN] = calc_val; + + /* t faw min, ps */ + timing_data[MV_DDR_TFAW_MIN] = (spd_data->byte_fields.byte_37 + + (spd_data->byte_fields.byte_36.bit_fields.t_faw_min_msn << MV_DDR_SPD_MSB_OFFS)) * + MV_DDR_SPD_DATA_MTB; + + /* t wtr s min, ps */ + timing_data[MV_DDR_TWTR_S_MIN] = (spd_data->byte_fields.byte_44 + + (spd_data->byte_fields.byte_43.bit_fields.t_wtr_s_min_msn << MV_DDR_SPD_MSB_OFFS)) * + MV_DDR_SPD_DATA_MTB; + + /* t wtr l min, ps */ + timing_data[MV_DDR_TWTR_L_MIN] = (spd_data->byte_fields.byte_45 + + (spd_data->byte_fields.byte_43.bit_fields.t_wtr_l_min_msn << MV_DDR_SPD_MSB_OFFS)) * + MV_DDR_SPD_DATA_MTB; + + return 0; +} + +enum mv_ddr_dev_width mv_ddr_spd_dev_width_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char dev_width = spd_data->byte_fields.byte_12.bit_fields.device_width; + enum mv_ddr_dev_width ret_val; + + switch (dev_width) { + case 0x00: + ret_val = MV_DDR_DEV_WIDTH_4BIT; + break; + case 0x01: + ret_val = MV_DDR_DEV_WIDTH_8BIT; + break; + case 0x02: + ret_val = MV_DDR_DEV_WIDTH_16BIT; + break; + case 0x03: + ret_val = MV_DDR_DEV_WIDTH_32BIT; + break; + default: + ret_val = MV_DDR_DEV_WIDTH_LAST; + } + + return ret_val; +} + +enum mv_ddr_die_capacity mv_ddr_spd_die_capacity_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char die_cap = spd_data->byte_fields.byte_4.bit_fields.die_capacity; + enum mv_ddr_die_capacity ret_val; + + switch (die_cap) { + case 0x00: + ret_val = MV_DDR_DIE_CAP_256MBIT; + break; + case 0x01: + ret_val = MV_DDR_DIE_CAP_512MBIT; + break; + case 0x02: + ret_val = MV_DDR_DIE_CAP_1GBIT; + break; + case 0x03: + ret_val = MV_DDR_DIE_CAP_2GBIT; + break; + case 0x04: + ret_val = MV_DDR_DIE_CAP_4GBIT; + break; + case 0x05: + ret_val = MV_DDR_DIE_CAP_8GBIT; + break; + case 0x06: + ret_val = MV_DDR_DIE_CAP_16GBIT; + break; + case 0x07: + ret_val = MV_DDR_DIE_CAP_32GBIT; + break; + case 0x08: + ret_val = MV_DDR_DIE_CAP_12GBIT; + break; + case 0x09: + ret_val = MV_DDR_DIE_CAP_24GBIT; + break; + default: + ret_val = MV_DDR_DIE_CAP_LAST; + } + + return ret_val; +} + +unsigned char mv_ddr_spd_mem_mirror_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char mem_mirror = spd_data->byte_fields.byte_131.bit_fields.rank_1_mapping; + + return mem_mirror; +} + +enum mv_ddr_pkg_rank mv_ddr_spd_pri_bus_width_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char pri_bus_width = spd_data->byte_fields.byte_13.bit_fields.primary_bus_width; + enum mv_ddr_pri_bus_width ret_val; + + switch (pri_bus_width) { + case 0x00: + ret_val = MV_DDR_PRI_BUS_WIDTH_8; + break; + case 0x01: + ret_val = MV_DDR_PRI_BUS_WIDTH_16; + break; + case 0x02: + ret_val = MV_DDR_PRI_BUS_WIDTH_32; + break; + case 0x03: + ret_val = MV_DDR_PRI_BUS_WIDTH_64; + break; + default: + ret_val = MV_DDR_PRI_BUS_WIDTH_LAST; + } + + return ret_val; +} + +enum mv_ddr_pkg_rank mv_ddr_spd_bus_width_ext_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char bus_width_ext = spd_data->byte_fields.byte_13.bit_fields.bus_width_ext; + enum mv_ddr_bus_width_ext ret_val; + + switch (bus_width_ext) { + case 0x00: + ret_val = MV_DDR_BUS_WIDTH_EXT_0; + break; + case 0x01: + ret_val = MV_DDR_BUS_WIDTH_EXT_8; + break; + default: + ret_val = MV_DDR_BUS_WIDTH_EXT_LAST; + } + + return ret_val; +} + +static enum mv_ddr_pkg_rank mv_ddr_spd_pkg_rank_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char pkg_rank = spd_data->byte_fields.byte_12.bit_fields.dimm_pkg_ranks_num; + enum mv_ddr_pkg_rank ret_val; + + switch (pkg_rank) { + case 0x00: + ret_val = MV_DDR_PKG_RANK_1; + break; + case 0x01: + ret_val = MV_DDR_PKG_RANK_2; + break; + case 0x02: + ret_val = MV_DDR_PKG_RANK_3; + break; + case 0x03: + ret_val = MV_DDR_PKG_RANK_4; + break; + case 0x04: + ret_val = MV_DDR_PKG_RANK_5; + break; + case 0x05: + ret_val = MV_DDR_PKG_RANK_6; + break; + case 0x06: + ret_val = MV_DDR_PKG_RANK_7; + break; + case 0x07: + ret_val = MV_DDR_PKG_RANK_8; + break; + default: + ret_val = MV_DDR_PKG_RANK_LAST; + } + + return ret_val; +} + +static enum mv_ddr_die_count mv_ddr_spd_die_count_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char die_count = spd_data->byte_fields.byte_6.bit_fields.die_count; + enum mv_ddr_die_count ret_val; + + switch (die_count) { + case 0x00: + ret_val = MV_DDR_DIE_CNT_1; + break; + case 0x01: + ret_val = MV_DDR_DIE_CNT_2; + break; + case 0x02: + ret_val = MV_DDR_DIE_CNT_3; + break; + case 0x03: + ret_val = MV_DDR_DIE_CNT_4; + break; + case 0x04: + ret_val = MV_DDR_DIE_CNT_5; + break; + case 0x05: + ret_val = MV_DDR_DIE_CNT_6; + break; + case 0x06: + ret_val = MV_DDR_DIE_CNT_7; + break; + case 0x07: + ret_val = MV_DDR_DIE_CNT_8; + break; + default: + ret_val = MV_DDR_DIE_CNT_LAST; + } + + return ret_val; +} + +unsigned char mv_ddr_spd_cs_bit_mask_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char cs_bit_mask = 0x0; + enum mv_ddr_pkg_rank pkg_rank = mv_ddr_spd_pkg_rank_get(spd_data); + enum mv_ddr_die_count die_cnt = mv_ddr_spd_die_count_get(spd_data); + + if (pkg_rank == MV_DDR_PKG_RANK_1 && die_cnt == MV_DDR_DIE_CNT_1) + cs_bit_mask = 0x1; + else if (pkg_rank == MV_DDR_PKG_RANK_1 && die_cnt == MV_DDR_DIE_CNT_2) + cs_bit_mask = 0x3; + else if (pkg_rank == MV_DDR_PKG_RANK_2 && die_cnt == MV_DDR_DIE_CNT_1) + cs_bit_mask = 0x3; + else if (pkg_rank == MV_DDR_PKG_RANK_2 && die_cnt == MV_DDR_DIE_CNT_2) + cs_bit_mask = 0xf; + + return cs_bit_mask; +} + +unsigned char mv_ddr_spd_dev_type_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char dev_type = spd_data->byte_fields.byte_2; + + return dev_type; +} + +unsigned char mv_ddr_spd_module_type_get(union mv_ddr_spd_data *spd_data) +{ + unsigned char module_type = spd_data->byte_fields.byte_3.bit_fields.module_type; + + return module_type; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_spd.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_spd.h new file mode 100644 index 000000000..6043f11b2 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_spd.h @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _MV_DDR_SPD_H +#define _MV_DDR_SPD_H + +#include "mv_ddr_topology.h" + +/* + * Based on JEDEC Standard No. 21-C, 4.1.2.L-4: + * Serial Presence Detect (SPD) for DDR4 SDRAM Modules + */ + +/* block 0: base configuration and dram parameters */ +#define MV_DDR_SPD_DATA_BLOCK0_SIZE 128 +/* block 1: module specific parameters sub-block */ +#define MV_DDR_SPD_DATA_BLOCK1M_SIZE 64 +/* block 1: hybrid memory parameters sub-block */ +#define MV_DDR_SPD_DATA_BLOCK1H_SIZE 64 +/* block 2: extended function parameter block */ +#define MV_DDR_SPD_DATA_BLOCK2E_SIZE 64 +/* block 2: manufacturing information */ +#define MV_DDR_SPD_DATA_BLOCK2M_SIZE 64 +/* block 3: end user programmable */ +#define MV_DDR_SPD_DATA_BLOCK3_SIZE 128 + +#define MV_DDR_SPD_DEV_TYPE_DDR4 0xc +#define MV_DDR_SPD_MODULE_TYPE_UDIMM 0x2 +#define MV_DDR_SPD_MODULE_TYPE_SO_DIMM 0x3 +#define MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM 0x6 +#define MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM 0x9 +#define MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM 0xc +#define MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM 0xd + +/* + * TODO: For now, the struct contains block 0 & block 1 with module specific + * parameters for unbuffered memory module types only. + */ +union mv_ddr_spd_data { + unsigned char all_bytes[MV_DDR_SPD_DATA_BLOCK0_SIZE + + MV_DDR_SPD_DATA_BLOCK1M_SIZE + + MV_DDR_SPD_DATA_BLOCK1H_SIZE + + MV_DDR_SPD_DATA_BLOCK2E_SIZE + + MV_DDR_SPD_DATA_BLOCK2M_SIZE]; + struct { + /* block 0 */ + union { /* num of bytes used/num of bytes in spd device/crc coverage */ + unsigned char all_bits; + struct { + unsigned char spd_bytes_used:4, + spd_bytes_total:3, + reserved:1; + } bit_fields; + } byte_0; + union { /* spd revision */ + unsigned char all_bits; + struct { + unsigned char addtions_level:4, + encoding_level:4; + } bit_fields; + } byte_1; + unsigned char byte_2; /* key_byte/dram device type */ + union { /* key byte/module type */ + unsigned char all_bits; + struct { + unsigned char module_type:4, + hybrid_media:3, + hybrid:1; + } bit_fields; + } byte_3; + union { /* sdram density & banks */ + unsigned char all_bits; + struct { + unsigned char die_capacity:4, + bank_address:2, + bank_group:2; + } bit_fields; + } byte_4; + union { /* sdram addressing */ + unsigned char all_bits; + struct { + unsigned char col_address:3, + row_address:3, + reserved:2; + } bit_fields; + } byte_5; + union { /* sdram package type */ + unsigned char all_bits; + struct { + unsigned char signal_loading:2, + reserved:2, + die_count:3, + sdram_package_type:1; + } bit_fields; + } byte_6; + union { /* sdram optional features */ + unsigned char all_bits; + struct { + unsigned char mac:4, /* max activate count */ + t_maw:2, /* max activate window */ + reserved:2; /* all 0s */ + } bit_fields; + } byte_7; + unsigned char byte_8; /* sdram thermal & refresh options; reserved; 0x00 */ + union { /* other sdram optional features */ + unsigned char all_bits; + struct { + unsigned char reserved:5, /* all 0s */ + soft_ppr:1, + ppr:2; /* post package repair */ + } bit_fields; + } byte_9; + union { /* secondary sdram package type */ + unsigned char all_bits; + struct { + unsigned char signal_loading:2, + density_ratio:2, /* dram density ratio */ + die_count:3, + sdram_package_type:1; + } bit_fields; + } byte_10; + union { /* module nominal voltage, vdd */ + unsigned char all_bits; + struct { + unsigned char operable:1, + endurant:1, + reserved:5; /* all 0s */ + } bit_fields; + } byte_11; + union { /* module organization*/ + unsigned char all_bits; + struct { + unsigned char device_width:3, + dimm_pkg_ranks_num:3, /* package ranks per dimm number */ + rank_mix:1, + reserved:1; /* 0 */ + } bit_fields; + } byte_12; + union { /* module memory bus width */ + unsigned char all_bits; + struct { + unsigned char primary_bus_width:3, /* in bits */ + bus_width_ext:2, /* in bits */ + reserved:3; /* all 0s */ + } bit_fields; + } byte_13; + union { /* module thernal sensor */ + unsigned char all_bits; + struct { + unsigned char reserved:7, + thermal_sensor:1; + } bit_fields; + } byte_14; + union { /* extended module type */ + unsigned char all_bits; + struct { + unsigned char ext_base_module_type:4, + reserved:4; /* all 0s */ + } bit_fields; + } byte_15; + unsigned char byte_16; /* reserved; 0x00 */ + union { /* timebases */ + unsigned char all_bits; + struct { + unsigned char ftb:2, /* fine timebase */ + mtb:2, /* medium timebase */ + reserved:4; /* all 0s */ + } bit_fields; + } byte_17; + unsigned char byte_18; /* sdram min cycle time (t ck avg min), mtb */ + unsigned char byte_19; /* sdram max cycle time (t ck avg max), mtb */ + unsigned char byte_20; /* cas latencies supported, first byte */ + unsigned char byte_21; /* cas latencies supported, second byte */ + unsigned char byte_22; /* cas latencies supported, third byte */ + unsigned char byte_23; /* cas latencies supported, fourth byte */ + unsigned char byte_24; /* min cas latency time (t aa min), mtb */ + unsigned char byte_25; /* min ras to cas delay time (t rcd min), mtb */ + unsigned char byte_26; /* min row precharge delay time (t rp min), mtb */ + union { /* upper nibbles for t ras min & t rc min */ + unsigned char all_bits; + struct { + unsigned char t_ras_min_msn:4, /* t ras min most significant nibble */ + t_rc_min_msn:4; /* t rc min most significant nibble */ + } bit_fields; + } byte_27; + unsigned char byte_28; /* min active to precharge delay time (t ras min), l-s-byte, mtb */ + unsigned char byte_29; /* min active to active/refresh delay time (t rc min), l-s-byte, mtb */ + unsigned char byte_30; /* min refresh recovery delay time (t rfc1 min), l-s-byte, mtb */ + unsigned char byte_31; /* min refresh recovery delay time (t rfc1 min), m-s-byte, mtb */ + unsigned char byte_32; /* min refresh recovery delay time (t rfc2 min), l-s-byte, mtb */ + unsigned char byte_33; /* min refresh recovery delay time (t rfc2 min), m-s-byte, mtb */ + unsigned char byte_34; /* min refresh recovery delay time (t rfc4 min), l-s-byte, mtb */ + unsigned char byte_35; /* min refresh recovery delay time (t rfc4 min), m-s-byte, mtb */ + union { /* upper nibble for t faw */ + unsigned char all_bits; + struct { + unsigned char t_faw_min_msn:4, /* t faw min most significant nibble */ + reserved:4; + } bit_fields; + } byte_36; + unsigned char byte_37; /* min four activate window delay time (t faw min), l-s-byte, mtb */ + /* byte 38: min activate to activate delay time (t rrd_s min), diff bank group, mtb */ + unsigned char byte_38; + /* byte 39: min activate to activate delay time (t rrd_l min), same bank group, mtb */ + unsigned char byte_39; + unsigned char byte_40; /* min cas to cas delay time (t ccd_l min), same bank group, mtb */ + union { /* upper nibble for t wr min */ + unsigned char all_bits; + struct { + unsigned char t_wr_min_msn:4, /* t wr min most significant nibble */ + reserved:4; + } bit_fields; + } byte_41; + unsigned char byte_42; /* min write recovery time (t wr min) */ + union { /* upper nibbles for t wtr min */ + unsigned char all_bits; + struct { + unsigned char t_wtr_s_min_msn:4, /* t wtr s min most significant nibble */ + t_wtr_l_min_msn:4; /* t wtr l min most significant nibble */ + } bit_fields; + } byte_43; + unsigned char byte_44; /* min write to read time (t wtr s min), diff bank group, mtb */ + unsigned char byte_45; /* min write to read time (t wtr l min), same bank group, mtb */ + unsigned char bytes_46_59[14]; /* reserved; all 0s */ + unsigned char bytes_60_77[18]; /* TODO: connector to sdram bit mapping */ + unsigned char bytes_78_116[39]; /* reserved; all 0s */ + /* fine offset for min cas to cas delay time (t ccd_l min), same bank group, ftb */ + unsigned char byte_117; + /* fine offset for min activate to activate delay time (t rrd_l min), same bank group, ftb */ + unsigned char byte_118; + /* fine offset for min activate to activate delay time (t rrd_s min), diff bank group, ftb */ + unsigned char byte_119; + /* fine offset for min active to active/refresh delay time (t rc min), ftb */ + unsigned char byte_120; + unsigned char byte_121; /* fine offset for min row precharge delay time (t rp min), ftb */ + unsigned char byte_122; /* fine offset for min ras to cas delay time (t rcd min), ftb */ + unsigned char byte_123; /* fine offset for min cas latency time (t aa min), ftb */ + unsigned char byte_124; /* fine offset for sdram max cycle time (t ck avg max), ftb */ + unsigned char byte_125; /* fine offset for sdram min cycle time (t ck avg min), ftb */ + unsigned char byte_126; /* crc for base configuration section, l-s-byte */ + unsigned char byte_127; /* crc for base configuration section, m-s-byte */ + /* + * block 1: module specific parameters for unbuffered memory module types only + */ + union { /* (unbuffered) raw card extension, module nominal height */ + unsigned char all_bits; + struct { + unsigned char nom_height_max:5, /* in mm */ + raw_cad_ext:3; + } bit_fields; + } byte_128; + union { /* (unbuffered) module maximum thickness */ + unsigned char all_bits; + struct { + unsigned char front_thickness_max:4, /* in mm */ + back_thickness_max:4; /* in mm */ + } bit_fields; + } byte_129; + union { /* (unbuffered) reference raw card used */ + unsigned char all_bits; + struct { + unsigned char ref_raw_card:5, + ref_raw_card_rev:2, + ref_raw_card_ext:1; + } bit_fields; + } byte_130; + union { /* (unbuffered) address mapping from edge connector to dram */ + unsigned char all_bits; + struct { + unsigned char rank_1_mapping:1, + reserved:7; + } bit_fields; + } byte_131; + unsigned char bytes_132_191[60]; /* reserved; all 0s */ + unsigned char bytes_192_255[MV_DDR_SPD_DATA_BLOCK1H_SIZE]; + unsigned char bytes_256_319[MV_DDR_SPD_DATA_BLOCK2E_SIZE]; + unsigned char bytes_320_383[MV_DDR_SPD_DATA_BLOCK2M_SIZE]; + } byte_fields; +}; + +int mv_ddr_spd_timing_calc(union mv_ddr_spd_data *spd_data, unsigned int timing_data[]); +enum mv_ddr_dev_width mv_ddr_spd_dev_width_get(union mv_ddr_spd_data *spd_data); +enum mv_ddr_die_capacity mv_ddr_spd_die_capacity_get(union mv_ddr_spd_data *spd_data); +unsigned char mv_ddr_spd_mem_mirror_get(union mv_ddr_spd_data *spd_data); +unsigned char mv_ddr_spd_cs_bit_mask_get(union mv_ddr_spd_data *spd_data); +unsigned char mv_ddr_spd_dev_type_get(union mv_ddr_spd_data *spd_data); +unsigned char mv_ddr_spd_module_type_get(union mv_ddr_spd_data *spd_data); +int mv_ddr_spd_supported_cls_calc(union mv_ddr_spd_data *spd_data); +unsigned int mv_ddr_spd_supported_cl_get(unsigned int cl); +enum mv_ddr_pkg_rank mv_ddr_spd_pri_bus_width_get(union mv_ddr_spd_data *spd_data); +enum mv_ddr_pkg_rank mv_ddr_spd_bus_width_ext_get(union mv_ddr_spd_data *spd_data); + +#endif /* _MV_DDR_SPD_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_sys_env_lib.c b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_sys_env_lib.c new file mode 100644 index 000000000..c9c6899e7 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_sys_env_lib.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "mv_ddr_regs.h" +#include "mv_ddr_sys_env_lib.h" + +static u32 mv_ddr_board_id_get(void) +{ +#if defined(CONFIG_TARGET_DB_88F6820_GP) + return DB_GP_68XX_ID; +#else + /* + * Return 0 here for custom board as this should not be used + * for custom boards. + */ + return 0; +#endif +} + +static u32 mv_ddr_board_id_index_get(u32 board_id) +{ + /* + * Marvell Boards use 0x10 as base for Board ID: + * mask MSB to receive index for board ID + */ + return board_id & (MARVELL_BOARD_ID_MASK - 1); +} + +/* + * read gpio input for suspend-wakeup indication + * return indicating suspend wakeup status: + * 0 - not supported, + * 1 - supported: read magic word detect wakeup, + * 2 - detected wakeup from gpio + */ +enum suspend_wakeup_status mv_ddr_sys_env_suspend_wakeup_check(void) +{ + u32 reg, board_id_index, gpio; + struct board_wakeup_gpio board_gpio[] = MV_BOARD_WAKEUP_GPIO_INFO; + + board_id_index = mv_ddr_board_id_index_get(mv_ddr_board_id_get()); + if (!(sizeof(board_gpio) / sizeof(struct board_wakeup_gpio) > + board_id_index)) { + printf("\n_failed loading Suspend-Wakeup information (invalid board ID)\n"); + return SUSPEND_WAKEUP_DISABLED; + } + + /* + * - Detect if Suspend-Wakeup is supported on current board + * - Fetch the GPIO number for wakeup status input indication + */ + if (board_gpio[board_id_index].gpio_num == -1) { + /* Suspend to RAM is not supported */ + return SUSPEND_WAKEUP_DISABLED; + } else if (board_gpio[board_id_index].gpio_num == -2) { + /* + * Suspend to RAM is supported but GPIO indication is + * not implemented - Skip + */ + return SUSPEND_WAKEUP_ENABLED; + } else { + gpio = board_gpio[board_id_index].gpio_num; + } + + /* Initialize MPP for GPIO (set MPP = 0x0) */ + reg = reg_read(MPP_CONTROL_REG(MPP_REG_NUM(gpio))); + /* reset MPP21 to 0x0, keep rest of MPP settings*/ + reg &= ~MPP_MASK(gpio); + reg_write(MPP_CONTROL_REG(MPP_REG_NUM(gpio)), reg); + + /* Initialize GPIO as input */ + reg = reg_read(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio))); + reg |= GPP_MASK(gpio); + reg_write(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)), reg); + + /* + * Check GPP for input status from PIC: 0 - regular init, + * 1 - suspend wakeup + */ + reg = reg_read(GPP_DATA_IN_REG(GPP_REG_NUM(gpio))); + + /* if GPIO is ON: wakeup from S2RAM indication detected */ + return (reg & GPP_MASK(gpio)) ? SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED : + SUSPEND_WAKEUP_DISABLED; +} + +/* + * get bit mask of enabled cs + * return bit mask of enabled cs: + * 1 - only cs0 enabled, + * 3 - both cs0 and cs1 enabled + */ +u32 mv_ddr_sys_env_get_cs_ena_from_reg(void) +{ + return reg_read(DDR3_RANK_CTRL_REG) & + ((CS_EXIST_MASK << CS_EXIST_OFFS(0)) | + (CS_EXIST_MASK << CS_EXIST_OFFS(1)) | + (CS_EXIST_MASK << CS_EXIST_OFFS(2)) | + (CS_EXIST_MASK << CS_EXIST_OFFS(3))); +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_sys_env_lib.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_sys_env_lib.h new file mode 100644 index 000000000..10b0d45b3 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_sys_env_lib.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _MV_DDR_SYS_ENV_LIB_H +#define _MV_DDR_SYS_ENV_LIB_H + +#include "ddr_ml_wrapper.h" + +/* device revision */ +#define DEV_ID_REG 0x18238 +#define DEV_VERSION_ID_REG 0x1823c +#define REVISON_ID_OFFS 8 +#define REVISON_ID_MASK 0xf00 + +#define MPP_CONTROL_REG(id) (0x18000 + (id * 4)) +#define GPP_DATA_OUT_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x00) +#define GPP_DATA_OUT_EN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x04) +#define GPP_DATA_IN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x10) +#define MV_GPP_REGS_BASE(unit) (0x18100 + ((unit) * 0x40)) + +#define MPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 8) +#define MPP_MASK(GPIO_NUM) (0xf << 4 * (GPIO_NUM - \ + (MPP_REG_NUM(GPIO_NUM) * 8))); +#define GPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 32) +#define GPP_MASK(GPIO_NUM) (1 << GPIO_NUM % 32) + +/* device ID */ +/* Board ID numbers */ +#define MARVELL_BOARD_ID_MASK 0x10 + +/* Customer boards for A38x */ +#define A38X_CUSTOMER_BOARD_ID_BASE 0x0 +#define A38X_CUSTOMER_BOARD_ID0 (A38X_CUSTOMER_BOARD_ID_BASE + 0) +#define A38X_CUSTOMER_BOARD_ID1 (A38X_CUSTOMER_BOARD_ID_BASE + 1) +#define A38X_MV_MAX_CUSTOMER_BOARD_ID (A38X_CUSTOMER_BOARD_ID_BASE + 2) +#define A38X_MV_CUSTOMER_BOARD_NUM (A38X_MV_MAX_CUSTOMER_BOARD_ID - \ + A38X_CUSTOMER_BOARD_ID_BASE) + +/* Marvell boards for A38x */ +#define A38X_MARVELL_BOARD_ID_BASE 0x10 +#define RD_NAS_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 0) +#define DB_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 1) +#define RD_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 2) +#define DB_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 3) +#define DB_GP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 4) +#define DB_BP_6821_ID (A38X_MARVELL_BOARD_ID_BASE + 5) +#define DB_AMC_6820_ID (A38X_MARVELL_BOARD_ID_BASE + 6) +#define A38X_MV_MAX_MARVELL_BOARD_ID (A38X_MARVELL_BOARD_ID_BASE + 7) +#define A38X_MV_MARVELL_BOARD_NUM (A38X_MV_MAX_MARVELL_BOARD_ID - \ + A38X_MARVELL_BOARD_ID_BASE) + +/* Marvell boards for A39x */ +#define A39X_MARVELL_BOARD_ID_BASE 0x30 +#define A39X_DB_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 0) +#define A39X_RD_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 1) +#define A39X_MV_MAX_MARVELL_BOARD_ID (A39X_MARVELL_BOARD_ID_BASE + 2) +#define A39X_MV_MARVELL_BOARD_NUM (A39X_MV_MAX_MARVELL_BOARD_ID - \ + A39X_MARVELL_BOARD_ID_BASE) + +struct board_wakeup_gpio { + u32 board_id; + int gpio_num; +}; + +enum suspend_wakeup_status { + SUSPEND_WAKEUP_DISABLED, + SUSPEND_WAKEUP_ENABLED, + SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED, +}; + +/* + * GPIO status indication for Suspend Wakeup: + * If suspend to RAM is supported and GPIO inidcation is implemented, + * set the gpio number + * If suspend to RAM is supported but GPIO indication is not implemented + * set '-2' + * If suspend to RAM is not supported set '-1' + */ + +#define MV_BOARD_WAKEUP_GPIO_INFO { \ + {RD_NAS_68XX_ID, -2 }, \ + {DB_68XX_ID, -1 }, \ + {RD_AP_68XX_ID, -2 }, \ + {DB_AP_68XX_ID, -2 }, \ + {DB_GP_68XX_ID, -2 }, \ + {DB_BP_6821_ID, -2 }, \ + {DB_AMC_6820_ID, -2 }, \ +}; + +enum suspend_wakeup_status mv_ddr_sys_env_suspend_wakeup_check(void); +u32 mv_ddr_sys_env_get_cs_ena_from_reg(void); + +#endif /* _MV_DDR_SYS_ENV_LIB_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_topology.c b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_topology.c new file mode 100644 index 000000000..2db6283c2 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_topology.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ +#include "ddr_ml_wrapper.h" +#include "mv_ddr_plat.h" + +#include "mv_ddr_topology.h" +#include "mv_ddr_common.h" +#include "mv_ddr_spd.h" +#include "ddr_topology_def.h" +#include "ddr3_training_ip_db.h" +#include "ddr3_training_ip.h" +#include "mv_ddr_training_db.h" + +unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk) +{ + unsigned int cl = ceil_div(taa_min, tclk); + + return mv_ddr_spd_supported_cl_get(cl); + +} + +unsigned int mv_ddr_cwl_calc(unsigned int tclk) +{ + unsigned int cwl; + + if (tclk >= 1250) + cwl = 9; + else if (tclk >= 1071) + cwl = 10; + else if (tclk >= 938) + cwl = 11; + else if (tclk >= 833) + cwl = 12; + else if (tclk >= 750) + cwl = 14; + else if (tclk >= 625) + cwl = 16; + else + cwl = 0; + + return cwl; +} + +int mv_ddr_topology_map_update(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + struct if_params *iface_params = &(tm->interface_params[0]); + unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + enum mv_ddr_speed_bin speed_bin_index; + enum mv_ddr_freq freq = MV_DDR_FREQ_LAST; + unsigned int tclk; + unsigned char val = 0; + int i; + + if (iface_params->memory_freq == MV_DDR_FREQ_SAR) + iface_params->memory_freq = mv_ddr_init_freq_get(); + + if (tm->cfg_src == MV_DDR_CFG_SPD) { + /* check dram device type */ + val = mv_ddr_spd_dev_type_get(&tm->spd_data); + if (val != MV_DDR_SPD_DEV_TYPE_DDR4) { + printf("mv_ddr: unsupported dram device type found\n"); + return -1; + } + + /* update topology map with timing data */ + if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) { + printf("mv_ddr: negative timing data found\n"); + return -1; + } + + /* update device width in topology map */ + iface_params->bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data); + + /* update die capacity in topology map */ + iface_params->memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data); + + /* update bus bit mask in topology map */ + tm->bus_act_mask = mv_ddr_bus_bit_mask_get(); + + /* update cs bit mask in topology map */ + val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data); + for (i = 0; i < octets_per_if_num; i++) + iface_params->as_bus_params[i].cs_bitmask = val; + + /* check dram module type */ + val = mv_ddr_spd_module_type_get(&tm->spd_data); + switch (val) { + case MV_DDR_SPD_MODULE_TYPE_UDIMM: + case MV_DDR_SPD_MODULE_TYPE_SO_DIMM: + case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM: + case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM: + case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM: + case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM: + break; + default: + printf("mv_ddr: unsupported dram module type found\n"); + return -1; + } + + /* update mirror bit mask in topology map */ + val = mv_ddr_spd_mem_mirror_get(&tm->spd_data); + for (i = 0; i < octets_per_if_num; i++) + iface_params->as_bus_params[i].mirror_enable_bitmask = val << 1; + + tclk = 1000000 / mv_ddr_freq_get(iface_params->memory_freq); + /* update cas write latency (cwl) */ + val = mv_ddr_cwl_calc(tclk); + if (val == 0) { + printf("mv_ddr: unsupported cas write latency value found\n"); + return -1; + } + iface_params->cas_wl = val; + + /* update cas latency (cl) */ + mv_ddr_spd_supported_cls_calc(&tm->spd_data); + val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk); + if (val == 0) { + printf("mv_ddr: unsupported cas latency value found\n"); + return -1; + } + iface_params->cas_l = val; + } else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) { + /* set cas and cas-write latencies per speed bin, if they unset */ + speed_bin_index = iface_params->speed_bin_index; + freq = iface_params->memory_freq; + + if (tm->twin_die_combined == COMBINED) { + iface_params->bus_width = MV_DDR_DEV_WIDTH_8BIT; + iface_params->memory_size -= 1; + } + + if (iface_params->cas_l == 0) + iface_params->cas_l = mv_ddr_cl_val_get(speed_bin_index, freq); + + if (iface_params->cas_wl == 0) + iface_params->cas_wl = mv_ddr_cwl_val_get(speed_bin_index, freq); + } + + return 0; +} + +unsigned short mv_ddr_bus_bit_mask_get(void) +{ + unsigned short pri_and_ext_bus_width = 0x0; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + + if (tm->cfg_src == MV_DDR_CFG_SPD) { + if (tm->bus_act_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK) + tm->spd_data.byte_fields.byte_13.bit_fields.primary_bus_width = MV_DDR_PRI_BUS_WIDTH_32; + + enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data); + enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data); + + switch (pri_bus_width) { + case MV_DDR_PRI_BUS_WIDTH_16: + pri_and_ext_bus_width = BUS_MASK_16BIT; + break; + case MV_DDR_PRI_BUS_WIDTH_32: /*each bit represents byte, so 0xf-is means 4 bytes-32 bit*/ + pri_and_ext_bus_width = BUS_MASK_32BIT; + break; + case MV_DDR_PRI_BUS_WIDTH_64: + pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK; + break; + default: + pri_and_ext_bus_width = 0x0; + } + + if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8) + pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1); + } + + return pri_and_ext_bus_width; +} + +unsigned int mv_ddr_if_bus_width_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int bus_width; + + switch (tm->bus_act_mask) { + case BUS_MASK_16BIT: + case BUS_MASK_16BIT_ECC: + case BUS_MASK_16BIT_ECC_PUP3: + bus_width = 16; + break; + case BUS_MASK_32BIT: + case BUS_MASK_32BIT_ECC: + case MV_DDR_32BIT_ECC_PUP8_BUS_MASK: + bus_width = 32; + break; + case MV_DDR_64BIT_BUS_MASK: + case MV_DDR_64BIT_ECC_PUP8_BUS_MASK: + bus_width = 64; + break; + default: + printf("mv_ddr: unsupported bus active mask parameter found\n"); + bus_width = 0; + } + + return bus_width; +} + +unsigned int mv_ddr_cs_num_get(void) +{ + unsigned int cs_num = 0; + unsigned int cs, sphy; + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + struct if_params *iface_params = &(tm->interface_params[0]); + unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + + for (sphy = 0; sphy < sphy_max; sphy++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sphy); + break; + } + + for (cs = 0; cs < MAX_CS_NUM; cs++) { + VALIDATE_ACTIVE(iface_params->as_bus_params[sphy].cs_bitmask, cs); + cs_num++; + } + + return cs_num; +} + +int mv_ddr_is_ecc_ena(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) || + DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask) || + DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask)) + return 1; + else + return 0; +} + +int mv_ddr_ck_delay_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + + if (tm->ck_delay) + return tm->ck_delay; + + return -1; +} + +/* translate topology map definition to real memory size in bits */ +static unsigned int mem_size[] = { + ADDR_SIZE_512MB, + ADDR_SIZE_1GB, + ADDR_SIZE_2GB, + ADDR_SIZE_4GB, + ADDR_SIZE_8GB, + ADDR_SIZE_16GB + /* TODO: add capacity up to 256GB */ +}; + +unsigned long long mv_ddr_mem_sz_per_cs_get(void) +{ + unsigned long long mem_sz_per_cs; + unsigned int i, sphys, sphys_per_dunit; + unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + struct if_params *iface_params = &(tm->interface_params[0]); + + /* calc number of active subphys excl. ecc one */ + for (i = 0, sphys = 0; i < sphy_max - 1; i++) { + VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i); + sphys++; + } + + /* calc number of subphys per ddr unit */ + if (iface_params->bus_width == MV_DDR_DEV_WIDTH_8BIT) + sphys_per_dunit = MV_DDR_ONE_SPHY_PER_DUNIT; + else if (iface_params->bus_width == MV_DDR_DEV_WIDTH_16BIT) + sphys_per_dunit = MV_DDR_TWO_SPHY_PER_DUNIT; + else { + printf("mv_ddr: unsupported bus width type found\n"); + return 0; + } + + /* calc dram size per cs */ + mem_sz_per_cs = (unsigned long long)mem_size[iface_params->memory_size] * + (unsigned long long)sphys / + (unsigned long long)sphys_per_dunit; + return mem_sz_per_cs; +} + +unsigned long long mv_ddr_mem_sz_get(void) +{ + unsigned long long tot_mem_sz = 0; + unsigned long long mem_sz_per_cs = 0; + unsigned long long max_cs = mv_ddr_cs_num_get(); + + mem_sz_per_cs = mv_ddr_mem_sz_per_cs_get(); + tot_mem_sz = max_cs * mem_sz_per_cs; + + return tot_mem_sz; +} + +unsigned int mv_ddr_rtt_nom_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int rtt_nom = tm->edata.mem_edata.rtt_nom; + + if (rtt_nom >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) { + printf("error: %s: unsupported rtt_nom parameter found\n", __func__); + rtt_nom = PARAM_UNDEFINED; + } + + return rtt_nom; +} + +unsigned int mv_ddr_rtt_park_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int cs_num = mv_ddr_cs_num_get(); + unsigned int rtt_park = MV_DDR_RTT_NOM_PARK_RZQ_LAST; + + if (cs_num > 0 && cs_num <= MAX_CS_NUM) + rtt_park = tm->edata.mem_edata.rtt_park[cs_num - 1]; + + if (rtt_park >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) { + printf("error: %s: unsupported rtt_park parameter found\n", __func__); + rtt_park = PARAM_UNDEFINED; + } + + return rtt_park; +} + +unsigned int mv_ddr_rtt_wr_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int cs_num = mv_ddr_cs_num_get(); + unsigned int rtt_wr = MV_DDR_RTT_WR_RZQ_LAST; + + if (cs_num > 0 && cs_num <= MAX_CS_NUM) + rtt_wr = tm->edata.mem_edata.rtt_wr[cs_num - 1]; + + if (rtt_wr >= MV_DDR_RTT_WR_RZQ_LAST) { + printf("error: %s: unsupported rtt_wr parameter found\n", __func__); + rtt_wr = PARAM_UNDEFINED; + } + + return rtt_wr; +} + +unsigned int mv_ddr_dic_get(void) +{ + struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); + unsigned int dic = tm->edata.mem_edata.dic; + + if (dic >= MV_DDR_DIC_RZQ_LAST) { + printf("error: %s: unsupported dic parameter found\n", __func__); + dic = PARAM_UNDEFINED; + } + + return dic; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_topology.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_topology.h new file mode 100644 index 000000000..1cb69ad08 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_topology.h @@ -0,0 +1,332 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _MV_DDR_TOPOLOGY_H +#define _MV_DDR_TOPOLOGY_H + +#define MAX_CS_NUM 4 + +enum mv_ddr_speed_bin { + SPEED_BIN_DDR_800D, + SPEED_BIN_DDR_800E, + SPEED_BIN_DDR_1066E, + SPEED_BIN_DDR_1066F, + SPEED_BIN_DDR_1066G, + SPEED_BIN_DDR_1333F, + SPEED_BIN_DDR_1333G, + SPEED_BIN_DDR_1333H, + SPEED_BIN_DDR_1333J, + SPEED_BIN_DDR_1600G, + SPEED_BIN_DDR_1600H, + SPEED_BIN_DDR_1600J, + SPEED_BIN_DDR_1600K, + SPEED_BIN_DDR_1866J, + SPEED_BIN_DDR_1866K, + SPEED_BIN_DDR_1866L, + SPEED_BIN_DDR_1866M, + SPEED_BIN_DDR_2133K, + SPEED_BIN_DDR_2133L, + SPEED_BIN_DDR_2133M, + SPEED_BIN_DDR_2133N, + + SPEED_BIN_DDR_1333H_EXT, + SPEED_BIN_DDR_1600K_EXT, + SPEED_BIN_DDR_1866M_EXT +}; + +enum mv_ddr_freq { + MV_DDR_FREQ_LOW_FREQ, + MV_DDR_FREQ_400, + MV_DDR_FREQ_533, + MV_DDR_FREQ_667, + MV_DDR_FREQ_800, + MV_DDR_FREQ_933, + MV_DDR_FREQ_1066, + MV_DDR_FREQ_311, + MV_DDR_FREQ_333, + MV_DDR_FREQ_467, + MV_DDR_FREQ_850, + MV_DDR_FREQ_600, + MV_DDR_FREQ_300, + MV_DDR_FREQ_900, + MV_DDR_FREQ_360, + MV_DDR_FREQ_1000, + MV_DDR_FREQ_LAST, + MV_DDR_FREQ_SAR +}; + +enum mv_ddr_speed_bin_timing { + SPEED_BIN_TRCD, + SPEED_BIN_TRP, + SPEED_BIN_TRAS, + SPEED_BIN_TRC, + SPEED_BIN_TRRD1K, + SPEED_BIN_TRRD2K, + SPEED_BIN_TPD, + SPEED_BIN_TFAW1K, + SPEED_BIN_TFAW2K, + SPEED_BIN_TWTR, + SPEED_BIN_TRTP, + SPEED_BIN_TWR, + SPEED_BIN_TMOD, + SPEED_BIN_TXPDLL, + SPEED_BIN_TXSDLL +}; + +/* ddr bus masks */ +#define BUS_MASK_32BIT 0xf +#define BUS_MASK_32BIT_ECC 0x1f +#define BUS_MASK_16BIT 0x3 +#define BUS_MASK_16BIT_ECC 0x13 +#define BUS_MASK_16BIT_ECC_PUP3 0xb +#define MV_DDR_64BIT_BUS_MASK 0xff +#define MV_DDR_64BIT_ECC_PUP8_BUS_MASK 0x1ff +#define MV_DDR_32BIT_ECC_PUP8_BUS_MASK 0x10f + +#define MV_DDR_CS_BITMASK_1CS 0x1 +#define MV_DDR_CS_BITMASK_2CS 0x3 + +#define MV_DDR_ONE_SPHY_PER_DUNIT 1 +#define MV_DDR_TWO_SPHY_PER_DUNIT 2 + +/* source of ddr configuration data */ +enum mv_ddr_cfg_src { + MV_DDR_CFG_DEFAULT, /* based on data in mv_ddr_topology_map structure */ + MV_DDR_CFG_SPD, /* based on data in spd */ + MV_DDR_CFG_USER, /* based on data from user */ + MV_DDR_CFG_STATIC, /* based on data from user in register-value format */ + MV_DDR_CFG_LAST +}; + +enum mv_ddr_temperature { + MV_DDR_TEMP_LOW, + MV_DDR_TEMP_NORMAL, + MV_DDR_TEMP_HIGH +}; + +enum mv_ddr_timing { + MV_DDR_TIM_DEFAULT, + MV_DDR_TIM_1T, + MV_DDR_TIM_2T +}; + +enum mv_ddr_timing_data { + MV_DDR_TCK_AVG_MIN, /* sdram min cycle time (t ck avg min) */ + MV_DDR_TAA_MIN, /* min cas latency time (t aa min) */ + MV_DDR_TRFC1_MIN, /* min refresh recovery delay time (t rfc1 min) */ + MV_DDR_TWR_MIN, /* min write recovery time (t wr min) */ + MV_DDR_TRCD_MIN, /* min ras to cas delay time (t rcd min) */ + MV_DDR_TRP_MIN, /* min row precharge delay time (t rp min) */ + MV_DDR_TRC_MIN, /* min active to active/refresh delay time (t rc min) */ + MV_DDR_TRAS_MIN, /* min active to precharge delay time (t ras min) */ + MV_DDR_TRRD_S_MIN, /* min activate to activate delay time (t rrd_s min), diff bank group */ + MV_DDR_TRRD_L_MIN, /* min activate to activate delay time (t rrd_l min), same bank group */ + MV_DDR_TCCD_L_MIN, /* min cas to cas delay time (t ccd_l min), same bank group */ + MV_DDR_TFAW_MIN, /* min four activate window delay time (t faw min) */ + MV_DDR_TWTR_S_MIN, /* min write to read time (t wtr s min), diff bank group */ + MV_DDR_TWTR_L_MIN, /* min write to read time (t wtr l min), same bank group */ + MV_DDR_TDATA_LAST +}; + +enum mv_ddr_electrical_data { + MV_DDR_CK_DLY, + MV_DDR_PHY_REG3, + MV_DDR_ZPRI_DATA, + MV_DDR_ZNRI_DATA, + MV_DDR_ZPRI_CTRL, + MV_DDR_ZNRI_CTRL, + MV_DDR_ZPODT_DATA, + MV_DDR_ZNODT_DATA, + MV_DDR_ZPODT_CTRL, + MV_DDR_ZNODT_CTRL, + MV_DDR_DIC, + MV_DDR_ODT_CFG, + MV_DDR_RTT_NOM, + MV_DDR_RTT_WR, + MV_DDR_RTT_PARK, + MV_DDR_EDATA_LAST +}; + +/* memory electrical configuration values */ +enum mv_ddr_rtt_nom_park_evalue { + MV_DDR_RTT_NOM_PARK_RZQ_DISABLE, + MV_DDR_RTT_NOM_PARK_RZQ_DIV4, /* 60-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_DIV2, /* 120-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_DIV6, /* 40-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_DIV1, /* 240-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_DIV5, /* 48-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_DIV3, /* 80-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_DIV7, /* 34-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_NOM_PARK_RZQ_LAST +}; + +enum mv_ddr_rtt_wr_evalue { + MV_DDR_RTT_WR_DYN_ODT_OFF, + MV_DDR_RTT_WR_RZQ_DIV2, /* 120-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_WR_RZQ_DIV1, /* 240-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_WR_HIZ, + MV_DDR_RTT_WR_RZQ_DIV3, /* 80-Ohm; RZQ = 240-Ohm */ + MV_DDR_RTT_WR_RZQ_LAST +}; + +enum mv_ddr_dic_evalue { + MV_DDR_DIC_RZQ_DIV7, /* 34-Ohm; RZQ = 240-Ohm */ + MV_DDR_DIC_RZQ_DIV5, /* 48-Ohm; RZQ = 240-Ohm */ + MV_DDR_DIC_RZQ_LAST +}; + +/* phy electrical configuration values */ +enum mv_ddr_ohm_evalue { + MV_DDR_OHM_20 = 20,/*relevant for Synopsys C/A Drive strength only*/ + MV_DDR_OHM_30 = 30, + MV_DDR_OHM_40 = 40,/*relevant for Synopsys C/A Drive strength only*/ + MV_DDR_OHM_48 = 48, + MV_DDR_OHM_60 = 60, + MV_DDR_OHM_80 = 80, + MV_DDR_OHM_120 = 120, + MV_DDR_OHM_240 = 240, + MV_DDR_OHM_LAST +}; + +/* mac electrical configuration values */ +enum mv_ddr_odt_cfg_evalue { + MV_DDR_ODT_CFG_NORMAL, + MV_DDR_ODT_CFG_ALWAYS_ON, + MV_DDR_ODT_CFG_LAST +}; + +enum mv_ddr_dev_width { /* sdram device width */ + MV_DDR_DEV_WIDTH_4BIT, + MV_DDR_DEV_WIDTH_8BIT, + MV_DDR_DEV_WIDTH_16BIT, + MV_DDR_DEV_WIDTH_32BIT, + MV_DDR_DEV_WIDTH_LAST +}; + +enum mv_ddr_die_capacity { /* total sdram capacity per die, megabits */ + MV_DDR_DIE_CAP_256MBIT, + MV_DDR_DIE_CAP_512MBIT = 0, + MV_DDR_DIE_CAP_1GBIT, + MV_DDR_DIE_CAP_2GBIT, + MV_DDR_DIE_CAP_4GBIT, + MV_DDR_DIE_CAP_8GBIT, + MV_DDR_DIE_CAP_16GBIT, + MV_DDR_DIE_CAP_32GBIT, + MV_DDR_DIE_CAP_12GBIT, + MV_DDR_DIE_CAP_24GBIT, + MV_DDR_DIE_CAP_LAST +}; + +enum mv_ddr_pkg_rank { /* number of package ranks per dimm */ + MV_DDR_PKG_RANK_1, + MV_DDR_PKG_RANK_2, + MV_DDR_PKG_RANK_3, + MV_DDR_PKG_RANK_4, + MV_DDR_PKG_RANK_5, + MV_DDR_PKG_RANK_6, + MV_DDR_PKG_RANK_7, + MV_DDR_PKG_RANK_8, + MV_DDR_PKG_RANK_LAST +}; + +enum mv_ddr_pri_bus_width { /* number of primary bus width bits */ + MV_DDR_PRI_BUS_WIDTH_8, + MV_DDR_PRI_BUS_WIDTH_16, + MV_DDR_PRI_BUS_WIDTH_32, + MV_DDR_PRI_BUS_WIDTH_64, + MV_DDR_PRI_BUS_WIDTH_LAST +}; + +enum mv_ddr_bus_width_ext { /* number of extension bus width bits */ + MV_DDR_BUS_WIDTH_EXT_0, + MV_DDR_BUS_WIDTH_EXT_8, + MV_DDR_BUS_WIDTH_EXT_LAST +}; + +enum mv_ddr_die_count { + MV_DDR_DIE_CNT_1, + MV_DDR_DIE_CNT_2, + MV_DDR_DIE_CNT_3, + MV_DDR_DIE_CNT_4, + MV_DDR_DIE_CNT_5, + MV_DDR_DIE_CNT_6, + MV_DDR_DIE_CNT_7, + MV_DDR_DIE_CNT_8, + MV_DDR_DIE_CNT_LAST +}; + +#define IS_ACTIVE(mask, id) \ + ((mask) & (1 << (id))) + +#define VALIDATE_ACTIVE(mask, id) \ + { \ + if (IS_ACTIVE(mask, id) == 0) \ + continue; \ + } + +#define IS_IF_ACTIVE(if_mask, if_id) \ + ((if_mask) & (1 << (if_id))) + +#define VALIDATE_IF_ACTIVE(mask, id) \ + { \ + if (IS_IF_ACTIVE(mask, id) == 0) \ + continue; \ + } + +#define IS_BUS_ACTIVE(if_mask , if_id) \ + (((if_mask) >> (if_id)) & 1) + +#define VALIDATE_BUS_ACTIVE(mask, id) \ + { \ + if (IS_BUS_ACTIVE(mask, id) == 0) \ + continue; \ + } + +#define DDR3_IS_ECC_PUP3_MODE(if_mask) \ + (((if_mask) == BUS_MASK_16BIT_ECC_PUP3) ? 1 : 0) + +#define DDR3_IS_ECC_PUP4_MODE(if_mask) \ + (((if_mask) == BUS_MASK_32BIT_ECC || \ + (if_mask) == BUS_MASK_16BIT_ECC) ? 1 : 0) + +#define DDR3_IS_16BIT_DRAM_MODE(mask) \ + (((mask) == BUS_MASK_16BIT || \ + (mask) == BUS_MASK_16BIT_ECC || \ + (mask) == BUS_MASK_16BIT_ECC_PUP3) ? 1 : 0) + +#define DDR3_IS_ECC_PUP8_MODE(if_mask) \ + (((if_mask) == MV_DDR_32BIT_ECC_PUP8_BUS_MASK || \ + (if_mask) == MV_DDR_64BIT_ECC_PUP8_BUS_MASK) ? 1 : 0) + +#define MV_DDR_IS_64BIT_DRAM_MODE(mask) \ + ((((mask) & MV_DDR_64BIT_BUS_MASK) == MV_DDR_64BIT_BUS_MASK) || \ + (((mask) & MV_DDR_64BIT_ECC_PUP8_BUS_MASK) == MV_DDR_64BIT_ECC_PUP8_BUS_MASK) ? 1 : 0) + +#define MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(mask, sphys) \ + (((sphys) == 9) && \ + (((mask) == BUS_MASK_32BIT) || \ + ((mask) == MV_DDR_32BIT_ECC_PUP8_BUS_MASK)) ? 1 : 0) + +#define MV_DDR_IS_HALF_BUS_DRAM_MODE(mask, sphys) \ + (MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(mask, sphys) || \ + DDR3_IS_16BIT_DRAM_MODE(mask)) + +struct mv_ddr_topology_map *mv_ddr_topology_map_get(void); +unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk); +unsigned int mv_ddr_cwl_calc(unsigned int tclk); +int mv_ddr_topology_map_update(void); +unsigned short mv_ddr_bus_bit_mask_get(void); +unsigned int mv_ddr_if_bus_width_get(void); +unsigned int mv_ddr_cs_num_get(void); +int mv_ddr_is_ecc_ena(void); +int mv_ddr_ck_delay_get(void); +unsigned long long mv_ddr_mem_sz_per_cs_get(void); +unsigned long long mv_ddr_mem_sz_get(void); +unsigned int mv_ddr_rtt_nom_get(void); +unsigned int mv_ddr_rtt_park_get(void); +unsigned int mv_ddr_rtt_wr_get(void); +unsigned int mv_ddr_dic_get(void); + +#endif /* _MV_DDR_TOPOLOGY_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_training_db.h b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_training_db.h new file mode 100644 index 000000000..838be4574 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/mv_ddr_training_db.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Marvell International Ltd. + */ + +#ifndef _MV_DDR_TRAINING_DB_H +#define _MV_DDR_TRAINING_DB_H + +#include "mv_ddr_topology.h" + +/* in ns */ +#define TREFI_LOW 7800 +#define TREFI_HIGH 3900 + +enum mv_ddr_page_size { + MV_DDR_PAGE_SIZE_1K = 1, + MV_DDR_PAGE_SIZE_2K +}; + +struct mv_ddr_page_element { + /* 8-bit bus width page size */ + enum mv_ddr_page_size page_size_8bit; + /* 16-bit bus width page size */ + enum mv_ddr_page_size page_size_16bit; +}; + +/* cas latency value per frequency */ +struct mv_ddr_cl_val_per_freq { + unsigned int cl_val[MV_DDR_FREQ_LAST]; +}; + +u32 mv_ddr_rfc_get(u32 mem); +unsigned int *mv_ddr_freq_tbl_get(void); +u32 mv_ddr_freq_get(enum mv_ddr_freq freq); +u32 mv_ddr_page_size_get(enum mv_ddr_dev_width bus_width, enum mv_ddr_die_capacity mem_size); +unsigned int mv_ddr_speed_bin_timing_get(enum mv_ddr_speed_bin index, enum mv_ddr_speed_bin_timing element); +u32 mv_ddr_cl_val_get(u32 index, u32 freq); +u32 mv_ddr_cwl_val_get(u32 index, u32 freq); + +#endif /* _MV_DDR_TRAINING_DB_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/seq_exec.h b/roms/u-boot/drivers/ddr/marvell/a38x/seq_exec.h new file mode 100644 index 000000000..fe0cb8f75 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/seq_exec.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _SEQ_EXEC_H +#define _SEQ_EXEC_H + +#define NA 0xff +#define DEFAULT_PARAM 0 +#define MV_BOARD_TCLK_ERROR 0xffffffff + +#define NO_DATA 0xffffffff +#define MAX_DATA_ARRAY 5 +#define FIRST_CELL 0 + +/* Operation types */ +enum mv_op { + WRITE_OP, + DELAY_OP, + POLL_OP, +}; + +/* Operation parameters */ +struct op_params { + u32 unit_base_reg; + u32 unit_offset; + u32 mask; + u32 data[MAX_DATA_ARRAY]; /* data array */ + u8 wait_time; /* msec */ + u16 num_of_loops; /* for polling only */ +}; + +/* + * Sequence parameters. Each sequence contains: + * 1. Sequence id. + * 2. Sequence size (total amount of operations during the sequence) + * 3. a series of operations. operations can be write, poll or delay + * 4. index in the data array (the entry where the relevant data sits) + */ +struct cfg_seq { + struct op_params *op_params_ptr; + u8 cfg_seq_size; + u8 data_arr_idx; +}; + +extern struct cfg_seq serdes_seq_db[]; + +/* + * A generic function type for executing an operation (write, poll or delay) + */ +typedef int (*op_execute_func_ptr)(u32 serdes_num, struct op_params *params, + u32 data_arr_idx); + +/* Specific functions for executing each operation */ +int write_op_execute(u32 serdes_num, struct op_params *params, + u32 data_arr_idx); +int delay_op_execute(u32 serdes_num, struct op_params *params, + u32 data_arr_idx); +int poll_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx); +enum mv_op get_cfg_seq_op(struct op_params *params); +int mv_seq_exec(u32 serdes_num, u32 seq_id); + +#endif /*_SEQ_EXEC_H*/ diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/xor.c b/roms/u-boot/drivers/ddr/marvell/a38x/xor.c new file mode 100644 index 000000000..7bc626829 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/xor.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include "ddr3_init.h" +#include "mv_ddr_common.h" +#include "xor_regs.h" + +/* defines */ +#ifdef MV_DEBUG +#define DB(x) x +#else +#define DB(x) +#endif + +static u32 ui_xor_regs_ctrl_backup; +static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1]; +static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1]; + +void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta) +{ + u32 reg, ui, cs_count; + uint64_t base, size_mask; + + ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0)); + for (ui = 0; ui < MAX_CS_NUM + 1; ui++) + ui_xor_regs_base_backup[ui] = + reg_read(XOR_BASE_ADDR_REG(0, ui)); + for (ui = 0; ui < MAX_CS_NUM + 1; ui++) + ui_xor_regs_mask_backup[ui] = + reg_read(XOR_SIZE_MASK_REG(0, ui)); + + reg = 0; + for (ui = 0, cs_count = 0; + (cs_count < num_of_cs) && (ui < 8); + ui++, cs_count++) { + if (cs_ena & (1 << ui)) { + /* Enable Window x for each CS */ + reg |= (0x1 << (ui)); + /* Enable Window x for each CS */ + reg |= (0x3 << ((ui * 2) + 16)); + } + } + + reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg); + + cs_count = 0; + for (ui = 0, cs_count = 0; + (cs_count < num_of_cs) && (ui < 8); + ui++, cs_count++) { + if (cs_ena & (1 << ui)) { + /* + * window x - Base - 0x00000000, + * Attribute 0x0e - DRAM + */ + base = cs_size * ui + base_delta; + /* fixed size 2GB for each CS */ + size_mask = 0x7FFF0000; + switch (ui) { + case 0: + base |= 0xe00; + break; + case 1: + base |= 0xd00; + break; + case 2: + base |= 0xb00; + break; + case 3: + base |= 0x700; + break; + case 4: /* SRAM */ + base = 0x40000000; + /* configure as shared transaction */ + base |= 0x1F00; + size_mask = 0xF0000; + break; + } + + reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base); + size_mask = (cs_size / _64K) - 1; + size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK; + /* window x - Size */ + reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask); + } + } + + mv_xor_hal_init(1); + + return; +} + +void mv_sys_xor_finish(void) +{ + u32 ui; + + reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup); + for (ui = 0; ui < MAX_CS_NUM + 1; ui++) + reg_write(XOR_BASE_ADDR_REG(0, ui), + ui_xor_regs_base_backup[ui]); + for (ui = 0; ui < MAX_CS_NUM + 1; ui++) + reg_write(XOR_SIZE_MASK_REG(0, ui), + ui_xor_regs_mask_backup[ui]); + + reg_write(XOR_ADDR_OVRD_REG(0, 0), 0); +} + +/* + * mv_xor_hal_init - Initialize XOR engine + * + * DESCRIPTION: + * This function initialize XOR unit. + * INPUT: + * None. + * + * OUTPUT: + * None. + * + * RETURN: + * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. + */ +void mv_xor_hal_init(u32 xor_chan_num) +{ + u32 i; + + /* Abort any XOR activity & set default configuration */ + for (i = 0; i < xor_chan_num; i++) { + mv_xor_command_set(i, MV_STOP); + mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) | + (4 << XEXCR_DST_BURST_LIMIT_OFFS) | + (4 << XEXCR_SRC_BURST_LIMIT_OFFS)); + } +} + +/* + * mv_xor_ctrl_set - Set XOR channel control registers + * + * DESCRIPTION: + * + * INPUT: + * + * OUTPUT: + * None. + * + * RETURN: + * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. + * NOTE: + * This function does not modify the Operation_mode field of control register. + */ +int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl) +{ + u32 old_value; + + /* update the XOR Engine [0..1] Configuration Registers (XEx_c_r) */ + old_value = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) & + XEXCR_OPERATION_MODE_MASK; + xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK; + xor_ctrl |= old_value; + reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl); + + return MV_OK; +} + +int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size, + u32 init_val_high, u32 init_val_low) +{ + u32 temp; + + if (block_size == _4G) + block_size -= 1; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) + return MV_BAD_PARAM; + + if (MV_ACTIVE == mv_xor_state_get(chan)) + return MV_BUSY; + + if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) || + (block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE)) + return MV_BAD_PARAM; + + /* set the operation mode to Memory Init */ + temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); + temp &= ~XEXCR_OPERATION_MODE_MASK; + temp |= XEXCR_OPERATION_MODE_MEM_INIT; + reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp); + + /* + * update the start_ptr field in XOR Engine [0..1] Destination Pointer + * Register + */ + reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr); + + /* + * update the Block_size field in the XOR Engine[0..1] Block Size + * Registers + */ + reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + block_size); + + /* + * update the field Init_val_l in the XOR Engine Initial Value Register + * Low (XEIVRL) + */ + reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low); + + /* + * update the field Init_val_h in the XOR Engine Initial Value Register + * High (XEIVRH) + */ + reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high); + + /* start transfer */ + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTART_MASK); + + return MV_OK; +} + +/* + * mv_xor_state_get - Get XOR channel state. + * + * DESCRIPTION: + * XOR channel activity state can be active, idle, paused. + * This function retrunes the channel activity state. + * + * INPUT: + * chan - the channel number + * + * OUTPUT: + * None. + * + * RETURN: + * XOR_CHANNEL_IDLE - If the engine is idle. + * XOR_CHANNEL_ACTIVE - If the engine is busy. + * XOR_CHANNEL_PAUSED - If the engine is paused. + * MV_UNDEFINED_STATE - If the engine state is undefind or there is no + * such engine + */ +enum mv_state mv_xor_state_get(u32 chan) +{ + u32 state; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) { + DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan)); + return MV_UNDEFINED_STATE; + } + + /* read the current state */ + state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan))); + state &= XEXACTR_XESTATUS_MASK; + + /* return the state */ + switch (state) { + case XEXACTR_XESTATUS_IDLE: + return MV_IDLE; + case XEXACTR_XESTATUS_ACTIVE: + return MV_ACTIVE; + case XEXACTR_XESTATUS_PAUSED: + return MV_PAUSED; + } + + return MV_UNDEFINED_STATE; +} + +/* + * mv_xor_command_set - Set command of XOR channel + * + * DESCRIPTION: + * XOR channel can be started, idle, paused and restarted. + * Paused can be set only if channel is active. + * Start can be set only if channel is idle or paused. + * Restart can be set only if channel is paused. + * Stop can be set only if channel is active. + * + * INPUT: + * chan - The channel number + * command - The command type (start, stop, restart, pause) + * + * OUTPUT: + * None. + * + * RETURN: + * MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on + * undefind XOR engine mode + */ +int mv_xor_command_set(u32 chan, enum mv_command command) +{ + enum mv_state state; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) { + DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan)); + return MV_BAD_PARAM; + } + + /* get the current state */ + state = mv_xor_state_get(chan); + + if ((command == MV_START) && (state == MV_IDLE)) { + /* command is start and current state is idle */ + reg_bit_set(XOR_ACTIVATION_REG + (XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTART_MASK); + return MV_OK; + } else if ((command == MV_STOP) && (state == MV_ACTIVE)) { + /* command is stop and current state is active */ + reg_bit_set(XOR_ACTIVATION_REG + (XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTOP_MASK); + return MV_OK; + } else if (((enum mv_state)command == MV_PAUSED) && + (state == MV_ACTIVE)) { + /* command is paused and current state is active */ + reg_bit_set(XOR_ACTIVATION_REG + (XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XEPAUSE_MASK); + return MV_OK; + } else if ((command == MV_RESTART) && (state == MV_PAUSED)) { + /* command is restart and current state is paused */ + reg_bit_set(XOR_ACTIVATION_REG + (XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XERESTART_MASK); + return MV_OK; + } else if ((command == MV_STOP) && (state == MV_IDLE)) { + /* command is stop and current state is active */ + return MV_OK; + } + + /* illegal command */ + DB(printf("%s: ERR. Illegal command\n", __func__)); + + return MV_BAD_PARAM; +} + +void ddr3_new_tip_ecc_scrub(void) +{ + u32 cs_c, max_cs; + u32 cs_ena = 0; + uint64_t total_mem_size, cs_mem_size_mb = 0, cs_mem_size = 0; + + printf("DDR Training Sequence - Start scrubbing\n"); + max_cs = mv_ddr_cs_num_get(); + for (cs_c = 0; cs_c < max_cs; cs_c++) + cs_ena |= 1 << cs_c; + + /* all chip-selects are of same size */ + ddr3_calc_mem_cs_size(0, &cs_mem_size_mb); + cs_mem_size = cs_mem_size_mb * _1M; + mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0); + total_mem_size = max_cs * cs_mem_size; + mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef); + /* wait for previous transfer completion */ + while (mv_xor_state_get(0) != MV_IDLE) + ; + /* Return XOR State */ + mv_sys_xor_finish(); + + printf("DDR3 Training Sequence - End scrubbing\n"); +} + +/* +* mv_xor_transfer - Transfer data from source to destination in one of +* three modes: XOR, CRC32 or DMA +* +* DESCRIPTION: +* This function initiates XOR channel, according to function parameters, +* in order to perform XOR, CRC32 or DMA transaction. +* To gain maximum performance the user is asked to keep the following +* restrictions: +* 1) Selected engine is available (not busy). +* 2) This module does not take into consideration CPU MMU issues. +* In order for the XOR engine to access the appropriate source +* and destination, address parameters must be given in system +* physical mode. +* 3) This API does not take care of cache coherency issues. The source, +* destination and, in case of chain, the descriptor list are assumed +* to be cache coherent. +* 4) Parameters validity. +* +* INPUT: +* chan - XOR channel number. +* type - One of three: XOR, CRC32 and DMA operations. +* xor_chain_ptr - address of chain pointer +* +* OUTPUT: +* None. +* +* RETURN: +* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. +* +*******************************************************************************/ +int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr) +{ + u32 temp; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) { + DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan)); + return MV_BAD_PARAM; + } + if (mv_xor_state_get(chan) == MV_ACTIVE) { + DB(printf("%s: ERR. Channel is already active\n", __func__)); + return MV_BUSY; + } + if (xor_chain_ptr == 0x0) { + DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__)); + return MV_BAD_PARAM; + } + + /* read configuration register and mask the operation mode field */ + temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); + temp &= ~XEXCR_OPERATION_MODE_MASK; + + switch (type) { + case MV_XOR: + if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) { + DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n", + __func__)); + return MV_BAD_PARAM; + } + /* set the operation mode to XOR */ + temp |= XEXCR_OPERATION_MODE_XOR; + break; + case MV_DMA: + if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) { + DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", + __func__)); + return MV_BAD_PARAM; + } + /* set the operation mode to DMA */ + temp |= XEXCR_OPERATION_MODE_DMA; + break; + case MV_CRC32: + if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) { + DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", + __func__)); + return MV_BAD_PARAM; + } + /* set the operation mode to CRC32 */ + temp |= XEXCR_OPERATION_MODE_CRC; + break; + default: + return MV_BAD_PARAM; + } + + /* write the operation mode to the register */ + reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp); + /* + * update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor + * Pointer Register (XExNDPR) + */ + reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + xor_chain_ptr); + + /* start transfer */ + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTART_MASK); + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/xor.h b/roms/u-boot/drivers/ddr/marvell/a38x/xor.h new file mode 100644 index 000000000..1e0265036 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/xor.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _XOR_H +#define _XOR_H + +#define SRAM_BASE 0x40000000 + +#define MV_XOR_MAX_UNIT 2 /* XOR unit == XOR engine */ +#define MV_XOR_MAX_CHAN 4 /* total channels for all units */ +#define MV_XOR_MAX_CHAN_PER_UNIT 2 /* channels for units */ + +#define MV_IS_POWER_OF_2(num) (((num) != 0) && (((num) & ((num) - 1)) == 0)) + +/* + * This structure describes address space window. Window base can be + * 64 bit, window size up to 4GB + */ +struct addr_win { + u32 base_low; /* 32bit base low */ + u32 base_high; /* 32bit base high */ + u32 size; /* 32bit size */ +}; + +/* This structure describes SoC units address decode window */ +struct unit_win_info { + struct addr_win addr_win; /* An address window */ + int enable; /* Address decode window is enabled/disabled */ + u8 attrib; /* chip select attributes */ + u8 target_id; /* Target Id of this MV_TARGET */ +}; + +/* + * This enumerator describes the type of functionality the XOR channel + * can have while using the same data structures. + */ +enum xor_type { + MV_XOR, /* XOR channel functions as XOR accelerator */ + MV_DMA, /* XOR channel functions as IDMA channel */ + MV_CRC32 /* XOR channel functions as CRC 32 calculator */ +}; + +enum mv_state { + MV_IDLE, + MV_ACTIVE, + MV_PAUSED, + MV_UNDEFINED_STATE +}; + +/* + * This enumerator describes the set of commands that can be applied on + * an engine (e.g. IDMA, XOR). Appling a comman depends on the current + * status (see MV_STATE enumerator) + * + * Start can be applied only when status is IDLE + * Stop can be applied only when status is IDLE, ACTIVE or PAUSED + * Pause can be applied only when status is ACTIVE + * Restart can be applied only when status is PAUSED + */ +enum mv_command { + MV_START, /* Start */ + MV_STOP, /* Stop */ + MV_PAUSE, /* Pause */ + MV_RESTART /* Restart */ +}; + +enum xor_override_target { + SRC_ADDR0, /* Source Address #0 Control */ + SRC_ADDR1, /* Source Address #1 Control */ + SRC_ADDR2, /* Source Address #2 Control */ + SRC_ADDR3, /* Source Address #3 Control */ + SRC_ADDR4, /* Source Address #4 Control */ + SRC_ADDR5, /* Source Address #5 Control */ + SRC_ADDR6, /* Source Address #6 Control */ + SRC_ADDR7, /* Source Address #7 Control */ + XOR_DST_ADDR, /* Destination Address Control */ + XOR_NEXT_DESC /* Next Descriptor Address Control */ +}; + +enum mv_state mv_xor_state_get(u32 chan); +void mv_xor_hal_init(u32 xor_chan_num); +int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl); +int mv_xor_command_set(u32 chan, enum mv_command command); +int mv_xor_override_set(u32 chan, enum xor_override_target target, u32 win_num, + int enable); +int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr); + +#endif diff --git a/roms/u-boot/drivers/ddr/marvell/a38x/xor_regs.h b/roms/u-boot/drivers/ddr/marvell/a38x/xor_regs.h new file mode 100644 index 000000000..fdfc0f234 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/a38x/xor_regs.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef _XOR_REGS_h +#define _XOR_REGS_h + +/* + * For controllers that have two XOR units, then chans 2 & 3 will be + * mapped to channels 0 & 1 of unit 1 + */ +#define XOR_UNIT(chan) ((chan) >> 1) +#define XOR_CHAN(chan) ((chan) & 1) + +#define MV_XOR_REGS_OFFSET(unit) (0x60900) +#define MV_XOR_REGS_BASE(unit) (MV_XOR_REGS_OFFSET(unit)) + +/* XOR Engine Control Register Map */ +#define XOR_CHANNEL_ARBITER_REG(unit) (MV_XOR_REGS_BASE(unit)) +#define XOR_CONFIG_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x10 + ((chan) * 4))) +#define XOR_ACTIVATION_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x20 + ((chan) * 4))) + +/* XOR Engine Interrupt Register Map */ +#define XOR_CAUSE_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x30)) +#define XOR_MASK_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x40)) +#define XOR_ERROR_CAUSE_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x50)) +#define XOR_ERROR_ADDR_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x60)) + +/* XOR Engine Descriptor Register Map */ +#define XOR_NEXT_DESC_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x200 + ((chan) * 4))) +#define XOR_CURR_DESC_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x210 + ((chan) * 4))) +#define XOR_BYTE_COUNT_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x220 + ((chan) * 4))) + +/* XOR Engine ECC/Mem_init Register Map */ +#define XOR_DST_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x2b0 + ((chan) * 4))) +#define XOR_BLOCK_SIZE_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x2c0 + ((chan) * 4))) +#define XOR_TIMER_MODE_CTRL_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2d0)) +#define XOR_TIMER_MODE_INIT_VAL_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2d4)) +#define XOR_TIMER_MODE_CURR_VAL_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2d8)) +#define XOR_INIT_VAL_LOW_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2e0)) +#define XOR_INIT_VAL_HIGH_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2e4)) + +/* XOR Engine Debug Register Map */ +#define XOR_DEBUG_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x70)) + +/* XOR register fileds */ + +/* XOR Engine Channel Arbiter Register */ +#define XECAR_SLICE_OFFS(slice_num) (slice_num) +#define XECAR_SLICE_MASK(slice_num) (1 << (XECAR_SLICE_OFFS(slice_num))) + +/* XOR Engine [0..1] Configuration Registers */ +#define XEXCR_OPERATION_MODE_OFFS (0) +#define XEXCR_OPERATION_MODE_MASK (7 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_XOR (0 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_CRC (1 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_DMA (2 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_ECC (3 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_MEM_INIT (4 << XEXCR_OPERATION_MODE_OFFS) + +#define XEXCR_SRC_BURST_LIMIT_OFFS (4) +#define XEXCR_SRC_BURST_LIMIT_MASK (7 << XEXCR_SRC_BURST_LIMIT_OFFS) +#define XEXCR_DST_BURST_LIMIT_OFFS (8) +#define XEXCR_DST_BURST_LIMIT_MASK (7 << XEXCR_DST_BURST_LIMIT_OFFS) +#define XEXCR_DRD_RES_SWP_OFFS (12) +#define XEXCR_DRD_RES_SWP_MASK (1 << XEXCR_DRD_RES_SWP_OFFS) +#define XEXCR_DWR_REQ_SWP_OFFS (13) +#define XEXCR_DWR_REQ_SWP_MASK (1 << XEXCR_DWR_REQ_SWP_OFFS) +#define XEXCR_DES_SWP_OFFS (14) +#define XEXCR_DES_SWP_MASK (1 << XEXCR_DES_SWP_OFFS) +#define XEXCR_REG_ACC_PROTECT_OFFS (15) +#define XEXCR_REG_ACC_PROTECT_MASK (1 << XEXCR_REG_ACC_PROTECT_OFFS) + +/* XOR Engine [0..1] Activation Registers */ +#define XEXACTR_XESTART_OFFS (0) +#define XEXACTR_XESTART_MASK (1 << XEXACTR_XESTART_OFFS) +#define XEXACTR_XESTOP_OFFS (1) +#define XEXACTR_XESTOP_MASK (1 << XEXACTR_XESTOP_OFFS) +#define XEXACTR_XEPAUSE_OFFS (2) +#define XEXACTR_XEPAUSE_MASK (1 << XEXACTR_XEPAUSE_OFFS) +#define XEXACTR_XERESTART_OFFS (3) +#define XEXACTR_XERESTART_MASK (1 << XEXACTR_XERESTART_OFFS) +#define XEXACTR_XESTATUS_OFFS (4) +#define XEXACTR_XESTATUS_MASK (3 << XEXACTR_XESTATUS_OFFS) +#define XEXACTR_XESTATUS_IDLE (0 << XEXACTR_XESTATUS_OFFS) +#define XEXACTR_XESTATUS_ACTIVE (1 << XEXACTR_XESTATUS_OFFS) +#define XEXACTR_XESTATUS_PAUSED (2 << XEXACTR_XESTATUS_OFFS) + +/* XOR Engine Interrupt Cause Register (XEICR) */ +#define XEICR_CHAN_OFFS 16 +#define XEICR_CAUSE_OFFS(chan) (chan * XEICR_CHAN_OFFS) +#define XEICR_CAUSE_MASK(chan, cause) (1 << (cause + XEICR_CAUSE_OFFS(chan))) +#define XEICR_COMP_MASK_ALL 0x000f000f +#define XEICR_COMP_MASK(chan) (0x000f << XEICR_CAUSE_OFFS(chan)) +#define XEICR_ERR_MASK 0x03800380 + +/* XOR Engine Error Cause Register (XEECR) */ +#define XEECR_ERR_TYPE_OFFS 0 +#define XEECR_ERR_TYPE_MASK (0x1f << XEECR_ERR_TYPE_OFFS) + +/* XOR Engine Error Address Register (XEEAR) */ +#define XEEAR_ERR_ADDR_OFFS (0) +#define XEEAR_ERR_ADDR_MASK (0xffffffff << XEEAR_ERR_ADDR_OFFS) + +/* XOR Engine [0..1] Next Descriptor Pointer Register */ +#define XEXNDPR_NEXT_DESC_PTR_OFFS (0) +#define XEXNDPR_NEXT_DESC_PTR_MASK (0xffffffff << \ + XEXNDPR_NEXT_DESC_PTR_OFFS) + +/* XOR Engine [0..1] Current Descriptor Pointer Register */ +#define XEXCDPR_CURRENT_DESC_PTR_OFFS (0) +#define XEXCDPR_CURRENT_DESC_PTR_MASK (0xffffffff << \ + XEXCDPR_CURRENT_DESC_PTR_OFFS) + +/* XOR Engine [0..1] Byte Count Register */ +#define XEXBCR_BYTE_CNT_OFFS (0) +#define XEXBCR_BYTE_CNT_MASK (0xffffffff << XEXBCR_BYTE_CNT_OFFS) + +/* XOR Engine [0..1] Destination Pointer Register */ +#define XEXDPR_DST_PTR_OFFS (0) +#define XEXDPR_DST_PTR_MASK (0xffffffff << XEXDPR_DST_PTR_OFFS) +#define XEXDPR_DST_PTR_XOR_MASK (0x3f) +#define XEXDPR_DST_PTR_DMA_MASK (0x1f) +#define XEXDPR_DST_PTR_CRC_MASK (0x1f) + +/* XOR Engine[0..1] Block Size Registers */ +#define XEXBSR_BLOCK_SIZE_OFFS (0) +#define XEXBSR_BLOCK_SIZE_MASK (0xffffffff << XEXBSR_BLOCK_SIZE_OFFS) +#define XEXBSR_BLOCK_SIZE_MIN_VALUE (128) +#define XEXBSR_BLOCK_SIZE_MAX_VALUE (0xffffffff) + +/* XOR Engine Timer Mode Control Register (XETMCR) */ +#define XETMCR_TIMER_EN_OFFS (0) +#define XETMCR_TIMER_EN_MASK (1 << XETMCR_TIMER_EN_OFFS) +#define XETMCR_TIMER_EN_ENABLE (1 << XETMCR_TIMER_EN_OFFS) +#define XETMCR_TIMER_EN_DISABLE (0 << XETMCR_TIMER_EN_OFFS) +#define XETMCR_SECTION_SIZE_CTRL_OFFS (8) +#define XETMCR_SECTION_SIZE_CTRL_MASK (0x1f << XETMCR_SECTION_SIZE_CTRL_OFFS) +#define XETMCR_SECTION_SIZE_MIN_VALUE (7) +#define XETMCR_SECTION_SIZE_MAX_VALUE (31) + +/* XOR Engine Timer Mode Initial Value Register (XETMIVR) */ +#define XETMIVR_TIMER_INIT_VAL_OFFS (0) +#define XETMIVR_TIMER_INIT_VAL_MASK (0xffffffff << \ + XETMIVR_TIMER_INIT_VAL_OFFS) + +/* XOR Engine Timer Mode Current Value Register (XETMCVR) */ +#define XETMCVR_TIMER_CRNT_VAL_OFFS (0) +#define XETMCVR_TIMER_CRNT_VAL_MASK (0xffffffff << \ + XETMCVR_TIMER_CRNT_VAL_OFFS) + +/* XOR Engine Initial Value Register Low (XEIVRL) */ +#define XEIVRL_INIT_VAL_L_OFFS (0) +#define XEIVRL_INIT_VAL_L_MASK (0xffffffff << XEIVRL_INIT_VAL_L_OFFS) + +/* XOR Engine Initial Value Register High (XEIVRH) */ +#define XEIVRH_INIT_VAL_H_OFFS (0) +#define XEIVRH_INIT_VAL_H_MASK (0xffffffff << XEIVRH_INIT_VAL_H_OFFS) + +/* XOR Engine Debug Register (XEDBR) */ +#define XEDBR_PARITY_ERR_INSR_OFFS (0) +#define XEDBR_PARITY_ERR_INSR_MASK (1 << XEDBR_PARITY_ERR_INSR_OFFS) +#define XEDBR_XBAR_ERR_INSR_OFFS (1) +#define XEDBR_XBAR_ERR_INSR_MASK (1 << XEDBR_XBAR_ERR_INSR_OFFS) + +/* XOR Engine address decode registers. */ +/* Maximum address decode windows */ +#define XOR_MAX_ADDR_DEC_WIN 8 +/* Maximum address arbiter windows */ +#define XOR_MAX_REMAP_WIN 4 + +/* XOR Engine Address Decoding Register Map */ +#define XOR_WINDOW_CTRL_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \ + (0x240 + ((chan) * 4))) +#define XOR_BASE_ADDR_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \ + (0x250 + ((win_num) * 4))) +#define XOR_SIZE_MASK_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \ + (0x270 + ((win_num) * 4))) +#define XOR_HIGH_ADDR_REMAP_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \ + (0x290 + ((win_num) * 4))) +#define XOR_ADDR_OVRD_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \ + (0x2a0 + ((win_num) * 4))) + +/* XOR Engine [0..1] Window Control Registers */ +#define XEXWCR_WIN_EN_OFFS(win_num) (win_num) +#define XEXWCR_WIN_EN_MASK(win_num) (1 << (XEXWCR_WIN_EN_OFFS(win_num))) +#define XEXWCR_WIN_EN_ENABLE(win_num) (1 << (XEXWCR_WIN_EN_OFFS(win_num))) +#define XEXWCR_WIN_EN_DISABLE(win_num) (0 << (XEXWCR_WIN_EN_OFFS(win_num))) + +#define XEXWCR_WIN_ACC_OFFS(win_num) ((2 * win_num) + 16) +#define XEXWCR_WIN_ACC_MASK(win_num) (3 << (XEXWCR_WIN_ACC_OFFS(win_num))) +#define XEXWCR_WIN_ACC_NO_ACC(win_num) (0 << (XEXWCR_WIN_ACC_OFFS(win_num))) +#define XEXWCR_WIN_ACC_RO(win_num) (1 << (XEXWCR_WIN_ACC_OFFS(win_num))) +#define XEXWCR_WIN_ACC_RW(win_num) (3 << (XEXWCR_WIN_ACC_OFFS(win_num))) + +/* XOR Engine Base Address Registers (XEBARx) */ +#define XEBARX_TARGET_OFFS (0) +#define XEBARX_TARGET_MASK (0xf << XEBARX_TARGET_OFFS) +#define XEBARX_ATTR_OFFS (8) +#define XEBARX_ATTR_MASK (0xff << XEBARX_ATTR_OFFS) +#define XEBARX_BASE_OFFS (16) +#define XEBARX_BASE_MASK (0xffff << XEBARX_BASE_OFFS) + +/* XOR Engine Size Mask Registers (XESMRx) */ +#define XESMRX_SIZE_MASK_OFFS (16) +#define XESMRX_SIZE_MASK_MASK (0xffff << XESMRX_SIZE_MASK_OFFS) +#define XOR_WIN_SIZE_ALIGN _64K + +/* XOR Engine High Address Remap Register (XEHARRx1) */ +#define XEHARRX_REMAP_OFFS (0) +#define XEHARRX_REMAP_MASK (0xffffffff << XEHARRX_REMAP_OFFS) + +#define XOR_OVERRIDE_CTRL_REG(chan) (MV_XOR_REGS_BASE(XOR_UNIT(chan)) + \ + (0x2a0 + ((XOR_CHAN(chan)) * 4))) + +/* XOR Engine [0..1] Address Override Control Register */ +#define XEXAOCR_OVR_EN_OFFS(target) (3 * target) +#define XEXAOCR_OVR_EN_MASK(target) (1 << (XEXAOCR_OVR_EN_OFFS(target))) +#define XEXAOCR_OVR_PTR_OFFS(target) ((3 * target) + 1) +#define XEXAOCR_OVR_PTR_MASK(target) (3 << (XEXAOCR_OVR_PTR_OFFS(target))) +#define XEXAOCR_OVR_BAR(win_num, target) (win_num << \ + (XEXAOCR_OVR_PTR_OFFS(target))) + +/* Maximum address override windows */ +#define XOR_MAX_OVERRIDE_WIN 4 + +#endif /* _XOR_REGS_h */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/Makefile b/roms/u-boot/drivers/ddr/marvell/axp/Makefile new file mode 100644 index 000000000..d04d9a21d --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0+ + +obj-$(CONFIG_SPL_BUILD) += ddr3_dfs.o +obj-$(CONFIG_SPL_BUILD) += ddr3_dqs.o +obj-$(CONFIG_SPL_BUILD) += ddr3_hw_training.o +obj-$(CONFIG_SPL_BUILD) += ddr3_init.o +obj-$(CONFIG_SPL_BUILD) += ddr3_pbs.o +obj-$(CONFIG_SPL_BUILD) += ddr3_read_leveling.o +obj-$(CONFIG_SPL_BUILD) += ddr3_sdram.o +obj-$(CONFIG_SPL_BUILD) += ddr3_spd.o +obj-$(CONFIG_SPL_BUILD) += ddr3_write_leveling.o +obj-$(CONFIG_SPL_BUILD) += xor.o diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp.h new file mode 100644 index 000000000..270691e9b --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp.h @@ -0,0 +1,512 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __DDR3_AXP_H +#define __DDR3_AXP_H + +#define MV_78XX0_Z1_REV 0x0 +#define MV_78XX0_A0_REV 0x1 +#define MV_78XX0_B0_REV 0x2 + +#define SAR_DDR3_FREQ_MASK 0xFE00000 +#define SAR_CPU_FAB_GET(cpu, fab) (((cpu & 0x7) << 21) | ((fab & 0xF) << 24)) + +#define MAX_CS 4 + +#define MIN_DIMM_ADDR 0x50 +#define FAR_END_DIMM_ADDR 0x50 +#define MAX_DIMM_ADDR 0x60 + +#ifndef CONFIG_DDR_FIXED_SIZE +#define SDRAM_CS_SIZE 0xFFFFFFF +#else +#define SDRAM_CS_SIZE (CONFIG_DDR_FIXED_SIZE - 1) +#endif +#define SDRAM_CS_BASE 0x0 +#define SDRAM_DIMM_SIZE 0x80000000 + +#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100)) +#define CPU_MRVL_ID_OFFSET 0x10 +#define SAR1_CPU_CORE_MASK 0x00000018 +#define SAR1_CPU_CORE_OFFSET 3 + +/* Only enable ECC if the board selects it */ +#ifdef CONFIG_BOARD_ECC_SUPPORT +#define ECC_SUPPORT +#endif +#define NEW_FABRIC_TWSI_ADDR 0x4E +#ifdef CONFIG_DB_784MP_GP +#define BUS_WIDTH_ECC_TWSI_ADDR 0x4E +#else +#define BUS_WIDTH_ECC_TWSI_ADDR 0x4F +#endif +#define MV_MAX_DDR3_STATIC_SIZE 50 +#define MV_DDR3_MODES_NUMBER 30 + +#define RESUME_RL_PATTERNS_ADDR (0xFE0000) +#define RESUME_RL_PATTERNS_SIZE (0x100) +#define RESUME_TRAINING_VALUES_ADDR (RESUME_RL_PATTERNS_ADDR + RESUME_RL_PATTERNS_SIZE) +#define RESUME_TRAINING_VALUES_MAX (0xCD0) +#define BOOT_INFO_ADDR (RESUME_RL_PATTERNS_ADDR + 0x1000) +#define CHECKSUM_RESULT_ADDR (BOOT_INFO_ADDR + 0x1000) +#define NUM_OF_REGISTER_ADDR (CHECKSUM_RESULT_ADDR + 4) +#define SUSPEND_MAGIC_WORD (0xDEADB002) +#define REGISTER_LIST_END (0xFFFFFFFF) + +/* + * Registers offset + */ + +#define REG_SAMPLE_RESET_LOW_ADDR 0x18230 +#define REG_SAMPLE_RESET_HIGH_ADDR 0x18234 +#define REG_SAMPLE_RESET_CPU_FREQ_OFFS 21 +#define REG_SAMPLE_RESET_CPU_FREQ_MASK 0x00E00000 +#define REG_SAMPLE_RESET_FAB_OFFS 24 +#define REG_SAMPLE_RESET_FAB_MASK 0xF000000 +#define REG_SAMPLE_RESET_TCLK_OFFS 28 +#define REG_SAMPLE_RESET_CPU_ARCH_OFFS 31 +#define REG_SAMPLE_RESET_HIGH_CPU_FREQ_OFFS 20 + +/* MISC */ +/* + * In mainline U-Boot we're re-configuring the mvebu base address + * register to 0xf1000000. So need to use this value for the DDR + * training code as well. + */ +#define INTER_REGS_BASE SOC_REGS_PHY_BASE + +/* DDR */ +#define REG_SDRAM_CONFIG_ADDR 0x1400 +#define REG_SDRAM_CONFIG_MASK 0x9FFFFFFF +#define REG_SDRAM_CONFIG_RFRS_MASK 0x3FFF +#define REG_SDRAM_CONFIG_WIDTH_OFFS 15 +#define REG_SDRAM_CONFIG_REGDIMM_OFFS 17 +#define REG_SDRAM_CONFIG_ECC_OFFS 18 +#define REG_SDRAM_CONFIG_IERR_OFFS 19 +#define REG_SDRAM_CONFIG_PUPRSTDIV_OFFS 28 +#define REG_SDRAM_CONFIG_RSTRD_OFFS 30 + +#define REG_DUNIT_CTRL_LOW_ADDR 0x1404 +#define REG_DUNIT_CTRL_LOW_2T_OFFS 3 +#define REG_DUNIT_CTRL_LOW_2T_MASK 0x3 +#define REG_DUNIT_CTRL_LOW_DPDE_OFFS 14 + +#define REG_SDRAM_TIMING_LOW_ADDR 0x1408 + +#define REG_SDRAM_TIMING_HIGH_ADDR 0x140C +#define REG_SDRAM_TIMING_H_R2R_OFFS 7 +#define REG_SDRAM_TIMING_H_R2R_MASK 0x3 +#define REG_SDRAM_TIMING_H_R2W_W2R_OFFS 9 +#define REG_SDRAM_TIMING_H_R2W_W2R_MASK 0x3 +#define REG_SDRAM_TIMING_H_W2W_OFFS 11 +#define REG_SDRAM_TIMING_H_W2W_MASK 0x1F +#define REG_SDRAM_TIMING_H_R2R_H_OFFS 19 +#define REG_SDRAM_TIMING_H_R2R_H_MASK 0x7 +#define REG_SDRAM_TIMING_H_R2W_W2R_H_OFFS 22 +#define REG_SDRAM_TIMING_H_R2W_W2R_H_MASK 0x7 + +#define REG_SDRAM_ADDRESS_CTRL_ADDR 0x1410 +#define REG_SDRAM_ADDRESS_SIZE_OFFS 2 +#define REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS 18 +#define REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS 4 + +#define REG_SDRAM_OPEN_PAGES_ADDR 0x1414 +#define REG_SDRAM_OPERATION_CS_OFFS 8 + +#define REG_SDRAM_OPERATION_ADDR 0x1418 +#define REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS 24 +#define REG_SDRAM_OPERATION_CWA_DATA_OFFS 20 +#define REG_SDRAM_OPERATION_CWA_DATA_MASK 0xF +#define REG_SDRAM_OPERATION_CWA_RC_OFFS 16 +#define REG_SDRAM_OPERATION_CWA_RC_MASK 0xF +#define REG_SDRAM_OPERATION_CMD_MR0 0xF03 +#define REG_SDRAM_OPERATION_CMD_MR1 0xF04 +#define REG_SDRAM_OPERATION_CMD_MR2 0xF08 +#define REG_SDRAM_OPERATION_CMD_MR3 0xF09 +#define REG_SDRAM_OPERATION_CMD_RFRS 0xF02 +#define REG_SDRAM_OPERATION_CMD_CWA 0xF0E +#define REG_SDRAM_OPERATION_CMD_RFRS_DONE 0xF +#define REG_SDRAM_OPERATION_CMD_MASK 0xF +#define REG_SDRAM_OPERATION_CS_OFFS 8 + +#define REG_OUDDR3_TIMING_ADDR 0x142C + +#define REG_SDRAM_MODE_ADDR 0x141C + +#define REG_SDRAM_EXT_MODE_ADDR 0x1420 + +#define REG_DDR_CONT_HIGH_ADDR 0x1424 + +#define REG_ODT_TIME_LOW_ADDR 0x1428 +#define REG_ODT_ON_CTL_RD_OFFS 12 +#define REG_ODT_OFF_CTL_RD_OFFS 16 +#define REG_SDRAM_ERROR_ADDR 0x1454 +#define REG_SDRAM_AUTO_PWR_SAVE_ADDR 0x1474 +#define REG_ODT_TIME_HIGH_ADDR 0x147C + +#define REG_SDRAM_INIT_CTRL_ADDR 0x1480 +#define REG_SDRAM_INIT_CTRL_OFFS 0 +#define REG_SDRAM_INIT_CKE_ASSERT_OFFS 2 +#define REG_SDRAM_INIT_RESET_DEASSERT_OFFS 3 + +#define REG_SDRAM_ODT_CTRL_LOW_ADDR 0x1494 + +#define REG_SDRAM_ODT_CTRL_HIGH_ADDR 0x1498 +/*#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK 0xFFFFFF55 */ +#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK 0x0 +#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_ENA 0x3 + +#define REG_DUNIT_ODT_CTRL_ADDR 0x149C +#define REG_DUNIT_ODT_CTRL_OVRD_OFFS 8 +#define REG_DUNIT_ODT_CTRL_OVRD_VAL_OFFS 9 + +#define REG_DRAM_FIFO_CTRL_ADDR 0x14A0 + +#define REG_DRAM_AXI_CTRL_ADDR 0x14A8 +#define REG_DRAM_AXI_CTRL_AXIDATABUSWIDTH_OFFS 0 + +#define REG_METAL_MASK_ADDR 0x14B0 +#define REG_METAL_MASK_MASK 0xDFFFFFFF +#define REG_METAL_MASK_RETRY_OFFS 0 + +#define REG_DRAM_ADDR_CTRL_DRIVE_STRENGTH_ADDR 0x14C0 + +#define REG_DRAM_DATA_DQS_DRIVE_STRENGTH_ADDR 0x14C4 +#define REG_DRAM_VER_CAL_MACHINE_CTRL_ADDR 0x14c8 +#define REG_DRAM_MAIN_PADS_CAL_ADDR 0x14CC + +#define REG_DRAM_HOR_CAL_MACHINE_CTRL_ADDR 0x17c8 + +#define REG_CS_SIZE_SCRATCH_ADDR 0x1504 +#define REG_DYNAMIC_POWER_SAVE_ADDR 0x1520 +#define REG_DDR_IO_ADDR 0x1524 +#define REG_DDR_IO_CLK_RATIO_OFFS 15 + +#define REG_DFS_ADDR 0x1528 +#define REG_DFS_DLLNEXTSTATE_OFFS 0 +#define REG_DFS_BLOCK_OFFS 1 +#define REG_DFS_SR_OFFS 2 +#define REG_DFS_ATSR_OFFS 3 +#define REG_DFS_RECONF_OFFS 4 +#define REG_DFS_CL_NEXT_STATE_OFFS 8 +#define REG_DFS_CL_NEXT_STATE_MASK 0xF +#define REG_DFS_CWL_NEXT_STATE_OFFS 12 +#define REG_DFS_CWL_NEXT_STATE_MASK 0x7 + +#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538 +#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1F +#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8 + +#define REG_READ_DATA_READY_DELAYS_ADDR 0x153C +#define REG_READ_DATA_READY_DELAYS_MASK 0x1F +#define REG_READ_DATA_READY_DELAYS_OFFS 8 + +#define START_BURST_IN_ADDR 1 + +#define REG_DRAM_TRAINING_SHADOW_ADDR 0x18488 +#define REG_DRAM_TRAINING_ADDR 0x15B0 +#define REG_DRAM_TRAINING_LOW_FREQ_OFFS 0 +#define REG_DRAM_TRAINING_PATTERNS_OFFS 4 +#define REG_DRAM_TRAINING_MED_FREQ_OFFS 2 +#define REG_DRAM_TRAINING_WL_OFFS 3 +#define REG_DRAM_TRAINING_RL_OFFS 6 +#define REG_DRAM_TRAINING_DQS_RX_OFFS 15 +#define REG_DRAM_TRAINING_DQS_TX_OFFS 16 +#define REG_DRAM_TRAINING_CS_OFFS 20 +#define REG_DRAM_TRAINING_RETEST_OFFS 24 +#define REG_DRAM_TRAINING_DFS_FREQ_OFFS 27 +#define REG_DRAM_TRAINING_DFS_REQ_OFFS 29 +#define REG_DRAM_TRAINING_ERROR_OFFS 30 +#define REG_DRAM_TRAINING_AUTO_OFFS 31 +#define REG_DRAM_TRAINING_RETEST_PAR 0x3 +#define REG_DRAM_TRAINING_RETEST_MASK 0xF8FFFFFF +#define REG_DRAM_TRAINING_CS_MASK 0xFF0FFFFF +#define REG_DRAM_TRAINING_PATTERNS_MASK 0xFF0F0000 + +#define REG_DRAM_TRAINING_1_ADDR 0x15B4 +#define REG_DRAM_TRAINING_1_TRNBPOINT_OFFS 16 + +#define REG_DRAM_TRAINING_2_ADDR 0x15B8 +#define REG_DRAM_TRAINING_2_OVERRUN_OFFS 17 +#define REG_DRAM_TRAINING_2_FIFO_RST_OFFS 4 +#define REG_DRAM_TRAINING_2_RL_MODE_OFFS 3 +#define REG_DRAM_TRAINING_2_WL_MODE_OFFS 2 +#define REG_DRAM_TRAINING_2_ECC_MUX_OFFS 1 +#define REG_DRAM_TRAINING_2_SW_OVRD_OFFS 0 + +#define REG_DRAM_TRAINING_PATTERN_BASE_ADDR 0x15BC +#define REG_DRAM_TRAINING_PATTERN_BASE_OFFS 3 + +#define REG_TRAINING_DEBUG_2_ADDR 0x15C4 +#define REG_TRAINING_DEBUG_2_OFFS 16 +#define REG_TRAINING_DEBUG_2_MASK 0x3 + +#define REG_TRAINING_DEBUG_3_ADDR 0x15C8 +#define REG_TRAINING_DEBUG_3_OFFS 3 +#define REG_TRAINING_DEBUG_3_MASK 0x7 + +#define MR_CS_ADDR_OFFS 4 + +#define REG_DDR3_MR0_ADDR 0x15D0 +#define REG_DDR3_MR0_CS_ADDR 0x1870 +#define REG_DDR3_MR0_CL_MASK 0x74 +#define REG_DDR3_MR0_CL_OFFS 2 +#define REG_DDR3_MR0_CL_HIGH_OFFS 3 +#define CL_MASK 0xF + +#define REG_DDR3_MR1_ADDR 0x15D4 +#define REG_DDR3_MR1_CS_ADDR 0x1874 +#define REG_DDR3_MR1_RTT_MASK 0xFFFFFDBB +#define REG_DDR3_MR1_DLL_ENA_OFFS 0 +#define REG_DDR3_MR1_RTT_DISABLED 0x0 +#define REG_DDR3_MR1_RTT_RZQ2 0x40 +#define REG_DDR3_MR1_RTT_RZQ4 0x2 +#define REG_DDR3_MR1_RTT_RZQ6 0x42 +#define REG_DDR3_MR1_RTT_RZQ8 0x202 +#define REG_DDR3_MR1_RTT_RZQ12 0x4 +#define REG_DDR3_MR1_OUTBUF_WL_MASK 0xFFFFEF7F /* WL-disabled,OB-enabled */ +#define REG_DDR3_MR1_OUTBUF_DIS_OFFS 12 /* Output Buffer Disabled */ +#define REG_DDR3_MR1_WL_ENA_OFFS 7 +#define REG_DDR3_MR1_WL_ENA 0x80 /* WL Enabled */ +#define REG_DDR3_MR1_ODT_MASK 0xFFFFFDBB + +#define REG_DDR3_MR2_ADDR 0x15D8 +#define REG_DDR3_MR2_CS_ADDR 0x1878 +#define REG_DDR3_MR2_CWL_OFFS 3 +#define REG_DDR3_MR2_CWL_MASK 0x7 +#define REG_DDR3_MR2_ODT_MASK 0xFFFFF9FF +#define REG_DDR3_MR3_ADDR 0x15DC +#define REG_DDR3_MR3_CS_ADDR 0x187C + +#define REG_DDR3_RANK_CTRL_ADDR 0x15E0 +#define REG_DDR3_RANK_CTRL_CS_ENA_MASK 0xF +#define REG_DDR3_RANK_CTRL_MIRROR_OFFS 4 + +#define REG_ZQC_CONF_ADDR 0x15E4 + +#define REG_DRAM_PHY_CONFIG_ADDR 0x15EC +#define REG_DRAM_PHY_CONFIG_MASK 0x3FFFFFFF + +#define REG_ODPG_CNTRL_ADDR 0x1600 +#define REG_ODPG_CNTRL_OFFS 21 + +#define REG_PHY_LOCK_MASK_ADDR 0x1670 +#define REG_PHY_LOCK_MASK_MASK 0xFFFFF000 + +#define REG_PHY_LOCK_STATUS_ADDR 0x1674 +#define REG_PHY_LOCK_STATUS_LOCK_OFFS 9 +#define REG_PHY_LOCK_STATUS_LOCK_MASK 0xFFF +#define REG_PHY_LOCK_APLL_ADLL_STATUS_MASK 0x7FF + +#define REG_PHY_REGISTRY_FILE_ACCESS_ADDR 0x16A0 +#define REG_PHY_REGISTRY_FILE_ACCESS_OP_WR 0xC0000000 +#define REG_PHY_REGISTRY_FILE_ACCESS_OP_RD 0x80000000 +#define REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE 0x80000000 +#define REG_PHY_BC_OFFS 27 +#define REG_PHY_CNTRL_OFFS 26 +#define REG_PHY_CS_OFFS 16 +#define REG_PHY_DQS_REF_DLY_OFFS 10 +#define REG_PHY_PHASE_OFFS 8 +#define REG_PHY_PUP_OFFS 22 + +#define REG_TRAINING_WL_ADDR 0x16AC +#define REG_TRAINING_WL_CS_MASK 0xFFFFFFFC +#define REG_TRAINING_WL_UPD_OFFS 2 +#define REG_TRAINING_WL_CS_DONE_OFFS 3 +#define REG_TRAINING_WL_RATIO_MASK 0xFFFFFF0F +#define REG_TRAINING_WL_1TO1 0x50 +#define REG_TRAINING_WL_2TO1 0x10 +#define REG_TRAINING_WL_DELAYEXP_MASK 0x20000000 +#define REG_TRAINING_WL_RESULTS_MASK 0x000001FF +#define REG_TRAINING_WL_RESULTS_OFFS 20 + +#define REG_REGISTERED_DRAM_CTRL_ADDR 0x16D0 +#define REG_REGISTERED_DRAM_CTRL_SR_FLOAT_OFFS 15 +#define REG_REGISTERED_DRAM_CTRL_PARITY_MASK 0x3F +/* DLB*/ +#define REG_STATIC_DRAM_DLB_CONTROL 0x1700 +#define DLB_BUS_OPTIMIZATION_WEIGHTS_REG 0x1704 +#define DLB_AGING_REGISTER 0x1708 +#define DLB_EVICTION_CONTROL_REG 0x170c +#define DLB_EVICTION_TIMERS_REGISTER_REG 0x1710 + +#define DLB_ENABLE 0x1 +#define DLB_WRITE_COALESING (0x1 << 2) +#define DLB_AXI_PREFETCH_EN (0x1 << 3) +#define DLB_MBUS_PREFETCH_EN (0x1 << 4) +#define PREFETCH_NLNSZTR (0x1 << 6) + +/* CPU */ +#define REG_BOOTROM_ROUTINE_ADDR 0x182D0 +#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12 + +#define REG_DRAM_INIT_CTRL_STATUS_ADDR 0x18488 +#define REG_DRAM_INIT_CTRL_TRN_CLK_OFFS 16 +#define REG_CPU_DIV_CLK_CTRL_0_NEW_RATIO 0x000200FF +#define REG_DRAM_INIT_CTRL_STATUS_2_ADDR 0x1488 + +#define REG_CPU_DIV_CLK_CTRL_0_ADDR 0x18700 + +#define REG_CPU_DIV_CLK_CTRL_1_ADDR 0x18704 +#define REG_CPU_DIV_CLK_CTRL_2_ADDR 0x18708 + +#define REG_CPU_DIV_CLK_CTRL_3_ADDR 0x1870C +#define REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK 0xFFFFC0FF +#define REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS 8 + +#define REG_CPU_DIV_CLK_CTRL_4_ADDR 0x18710 + +#define REG_CPU_DIV_CLK_STATUS_0_ADDR 0x18718 +#define REG_CPU_DIV_CLK_ALL_STABLE_OFFS 8 + +#define REG_CPU_PLL_CTRL_0_ADDR 0x1871C +#define REG_CPU_PLL_STATUS_0_ADDR 0x18724 +#define REG_CORE_DIV_CLK_CTRL_ADDR 0x18740 +#define REG_CORE_DIV_CLK_STATUS_ADDR 0x18744 +#define REG_DDRPHY_APLL_CTRL_ADDR 0x18780 + +#define REG_DDRPHY_APLL_CTRL_2_ADDR 0x18784 + +#define REG_SFABRIC_CLK_CTRL_ADDR 0x20858 +#define REG_SFABRIC_CLK_CTRL_SMPL_OFFS 8 + +/* DRAM Windows */ +#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8 +#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040 +#define REG_XBAR_WIN_4_BASE_ADDR 0x20044 +#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048 +#define REG_FASTPATH_WIN_0_CTRL_ADDR 0x20184 +#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078 + +/* SRAM */ +#define REG_CDI_CONFIG_ADDR 0x20220 +#define REG_SRAM_WINDOW_0_ADDR 0x20240 +#define REG_SRAM_WINDOW_0_ENA_OFFS 0 +#define REG_SRAM_WINDOW_1_ADDR 0x20244 +#define REG_SRAM_L2_ENA_ADDR 0x8500 +#define REG_SRAM_CLEAN_BY_WAY_ADDR 0x87BC + +/* PMU */ +#define REG_PMU_I_F_CTRL_ADDR 0x1C090 +#define REG_PMU_DUNIT_BLK_OFFS 16 +#define REG_PMU_DUNIT_RFRS_OFFS 20 +#define REG_PMU_DUNIT_ACK_OFFS 24 + +/* MBUS*/ +#define MBUS_UNITS_PRIORITY_CONTROL_REG (MV_MBUS_REGS_OFFSET + 0x420) +#define FABRIC_UNITS_PRIORITY_CONTROL_REG (MV_MBUS_REGS_OFFSET + 0x424) +#define MBUS_UNITS_PREFETCH_CONTROL_REG (MV_MBUS_REGS_OFFSET + 0x428) +#define FABRIC_UNITS_PREFETCH_CONTROL_REG (MV_MBUS_REGS_OFFSET + 0x42c) + +#define REG_PM_STAT_MASK_ADDR 0x2210C +#define REG_PM_STAT_MASK_CPU0_IDLE_MASK_OFFS 16 + +#define REG_PM_EVENT_STAT_MASK_ADDR 0x22120 +#define REG_PM_EVENT_STAT_MASK_DFS_DONE_OFFS 17 + +#define REG_PM_CTRL_CONFIG_ADDR 0x22104 +#define REG_PM_CTRL_CONFIG_DFS_REQ_OFFS 18 + +#define REG_FABRIC_LOCAL_IRQ_MASK_ADDR 0x218C4 +#define REG_FABRIC_LOCAL_IRQ_PMU_MASK_OFFS 18 + +/* Controller revision info */ +#define PCI_CLASS_CODE_AND_REVISION_ID 0x008 +#define PCCRIR_REVID_OFFS 0 /* Revision ID */ +#define PCCRIR_REVID_MASK (0xff << PCCRIR_REVID_OFFS) + +/* Power Management Clock Gating Control Register */ +#define MV_PEX_IF_REGS_OFFSET(if) \ + (if < 8 ? (0x40000 + ((if) / 4) * 0x40000 + ((if) % 4) * 0x4000) \ + : (0x42000 + ((if) % 8) * 0x40000)) +#define MV_PEX_IF_REGS_BASE(unit) (MV_PEX_IF_REGS_OFFSET(unit)) +#define POWER_MNG_CTRL_REG 0x18220 +#define PEX_DEVICE_AND_VENDOR_ID 0x000 +#define PEX_CFG_DIRECT_ACCESS(if, reg) (MV_PEX_IF_REGS_BASE(if) + (reg)) +#define PMC_PEXSTOPCLOCK_OFFS(port) ((port) < 8 ? (5 + (port)) : (18 + (port))) +#define PMC_PEXSTOPCLOCK_MASK(port) (1 << PMC_PEXSTOPCLOCK_OFFS(port)) +#define PMC_PEXSTOPCLOCK_EN(port) (1 << PMC_PEXSTOPCLOCK_OFFS(port)) +#define PMC_PEXSTOPCLOCK_STOP(port) (0 << PMC_PEXSTOPCLOCK_OFFS(port)) + +/* TWSI */ +#define TWSI_DATA_ADDR_MASK 0x7 +#define TWSI_DATA_ADDR_OFFS 1 + +/* General */ +#define MAX_CS 4 + +/* Frequencies */ +#define FAB_OPT 21 +#define CLK_CPU 12 +#define CLK_VCO (2 * CLK_CPU) +#define CLK_DDR 12 + +/* Cpu Frequencies: */ +#define CLK_CPU_1000 0 +#define CLK_CPU_1066 1 +#define CLK_CPU_1200 2 +#define CLK_CPU_1333 3 +#define CLK_CPU_1500 4 +#define CLK_CPU_1666 5 +#define CLK_CPU_1800 6 +#define CLK_CPU_2000 7 +#define CLK_CPU_600 8 +#define CLK_CPU_667 9 +#define CLK_CPU_800 0xa + +/* Extra Cpu Frequencies: */ +#define CLK_CPU_1600 11 +#define CLK_CPU_2133 12 +#define CLK_CPU_2200 13 +#define CLK_CPU_2400 14 + +/* DDR3 Frequencies: */ +#define DDR_100 0 +#define DDR_300 1 +#define DDR_333 1 +#define DDR_360 2 +#define DDR_400 3 +#define DDR_444 4 +#define DDR_500 5 +#define DDR_533 6 +#define DDR_600 7 +#define DDR_640 8 +#define DDR_666 8 +#define DDR_720 9 +#define DDR_750 9 +#define DDR_800 10 +#define DDR_833 11 +#define DDR_HCLK 20 +#define DDR_S 12 +#define DDR_S_1TO1 13 +#define MARGIN_FREQ DDR_400 +#define DFS_MARGIN DDR_100 + +#define ODT_OPT 16 +#define ODT20 0x200 +#define ODT30 0x204 +#define ODT40 0x44 +#define ODT120 0x40 +#define ODT120D 0x400 + +#define MRS_DELAY 100 + +#define SDRAM_WL_SW_OFFS 0x100 +#define SDRAM_RL_OFFS 0x0 +#define SDRAM_PBS_I_OFFS 0x140 +#define SDRAM_PBS_II_OFFS 0x180 +#define SDRAM_PBS_NEXT_OFFS (SDRAM_PBS_II_OFFS - SDRAM_PBS_I_OFFS) +#define SDRAM_PBS_TX_OFFS 0x180 +#define SDRAM_PBS_TX_DM_OFFS 576 +#define SDRAM_DQS_RX_OFFS 1024 +#define SDRAM_DQS_TX_OFFS 2048 +#define SDRAM_DQS_RX_SPECIAL_OFFS 5120 + +#define LEN_STD_PATTERN 16 +#define LEN_KILLER_PATTERN 128 +#define LEN_SPECIAL_PATTERN 128 +#define LEN_PBS_PATTERN 16 + +#endif /* __DDR3_AXP_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_config.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_config.h new file mode 100644 index 000000000..10d064d0a --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_config.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __DDR3_AXP_CONFIG_H +#define __DDR3_AXP_CONFIG_H + +/* + * DDR3_LOG_LEVEL Information + * + * Level 0: Provides an error code in a case of failure, RL, WL errors + * and other algorithm failure + * Level 1: Provides the D-Unit setup (SPD/Static configuration) + * Level 2: Provides the windows margin as a results of DQS centeralization + * Level 3: Provides the windows margin of each DQ as a results of DQS + * centeralization + */ +#ifdef CONFIG_DDR_LOG_LEVEL +#define DDR3_LOG_LEVEL CONFIG_DDR_LOG_LEVEL +#else +#define DDR3_LOG_LEVEL 0 +#endif + +#define DDR3_PBS 1 + +/* This flag allows the execution of SW WL/RL upon HW failure */ +#define DDR3_RUN_SW_WHEN_HW_FAIL 1 + +/* + * General Configurations + * + * The following parameters are required for proper setup: + * + * DDR_TARGET_FABRIC - Set desired fabric configuration + * (for sample@Reset fabfreq parameter) + * DRAM_ECC - Set ECC support 1/0 + * BUS_WIDTH - 64/32 bit + * CONFIG_SPD_EEPROM - Enables auto detection of DIMMs and their timing values + * DQS_CLK_ALIGNED - Set this if CLK and DQS signals are aligned on board + * MIXED_DIMM_STATIC - Mixed DIMM + On board devices support (ODT registers + * values are taken statically) + * DDR3_TRAINING_DEBUG - Debug prints of internal code + */ +#define DDR_TARGET_FABRIC 5 +/* Only enable ECC if the board selects it */ +#ifdef CONFIG_BOARD_ECC_SUPPORT +#define DRAM_ECC 1 +#else +#define DRAM_ECC 0 +#endif + +#ifdef CONFIG_DDR_32BIT +#define BUS_WIDTH 32 +#else +#define BUS_WIDTH 64 +#endif + +#undef DQS_CLK_ALIGNED +#undef MIXED_DIMM_STATIC +#define DDR3_TRAINING_DEBUG 0 +#define REG_DIMM_SKIP_WL 0 + +/* Marvell boards specific configurations */ +#if defined(DB_78X60_PCAC) +#undef CONFIG_SPD_EEPROM +#define STATIC_TRAINING +#endif + +#if defined(DB_78X60_AMC) +#undef CONFIG_SPD_EEPROM +#undef DRAM_ECC +#define DRAM_ECC 1 +#endif + +#ifdef CONFIG_SPD_EEPROM +/* + * DIMM support parameters: + * DRAM_2T - Set Desired 2T Mode - 0 - 1T, 0x1 - 2T, 0x2 - 3T + * DIMM_CS_BITMAP - bitmap representing the optional CS in DIMMs + * (0xF=CS0+CS1+CS2+CS3, 0xC=CS2+CS3...) + */ +#define DRAM_2T 0x0 +#define DIMM_CS_BITMAP 0xF +#define DUNIT_SPD +#endif + +#ifdef DRAM_ECC +/* + * ECC support parameters: + * + * U_BOOT_START_ADDR, U_BOOT_SCRUB_SIZE - relevant when using ECC and need + * to configure the scrubbing area + */ +#define TRAINING_SIZE 0x20000 +#define U_BOOT_START_ADDR 0 +#define U_BOOT_SCRUB_SIZE 0x1000000 /* TRAINING_SIZE */ +#endif + +/* + * Registered DIMM Support - In case registered DIMM is attached, + * please supply the following values: + * (see JEDEC - JESD82-29A "Definition of the SSTE32882 Registering Clock + * Driver with Parity and Quad Chip + * Selects for DDR3/DDR3L/DDR3U RDIMM 1.5 V/1.35 V/1.25 V Applications") + * RC0: Global Features Control Word + * RC1: Clock Driver Enable Control Word + * RC2: Timing Control Word + * RC3-RC5 - taken from SPD + * RC8: Additional IBT Setting Control Word + * RC9: Power Saving Settings Control Word + * RC10: Encoding for RDIMM Operating Speed + * RC11: Operating Voltage VDD and VREFCA Control Word + */ +#define RDIMM_RC0 0 +#define RDIMM_RC1 0 +#define RDIMM_RC2 0 +#define RDIMM_RC8 0 +#define RDIMM_RC9 0 +#define RDIMM_RC10 0x2 +#define RDIMM_RC11 0x0 + +#if defined(MIXED_DIMM_STATIC) || !defined(CONFIG_SPD_EEPROM) +#define DUNIT_STATIC +#endif + +#if defined(MIXED_DIMM_STATIC) || defined(CONFIG_SPD_EEPROM) +/* + * This flag allows the user to change the dram refresh cycle in ps, + * only in case of SPD or MIX DIMM topology + */ +#define TREFI_USER_EN + +#ifdef TREFI_USER_EN +#define TREFI_USER 3900000 +#endif +#endif + +#ifdef CONFIG_SPD_EEPROM +/* + * AUTO_DETECTION_SUPPORT - relevant ONLY for Marvell DB boards. + * Enables I2C auto detection different options + */ +#if defined(CONFIG_DB_88F78X60) || defined(CONFIG_DB_88F78X60_REV2) || \ + defined(CONFIG_DB_784MP_GP) +#define AUTO_DETECTION_SUPPORT +#endif +#endif + +#endif /* __DDR3_AXP_CONFIG_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_mc_static.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_mc_static.h new file mode 100644 index 000000000..5d43ae596 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_mc_static.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __AXP_MC_STATIC_H +#define __AXP_MC_STATIC_H + +MV_DRAM_MC_INIT ddr3_A0_db_667[MV_MAX_DDR3_STATIC_SIZE] = { +#ifdef CONFIG_DDR_32BIT + {0x00001400, 0x7301c924}, /*DDR SDRAM Configuration Register */ +#else /*CONFIG_DDR_64BIT */ + {0x00001400, 0x7301CA28}, /*DDR SDRAM Configuration Register */ +#endif + {0x00001404, 0x3630b800}, /*Dunit Control Low Register */ + {0x00001408, 0x43149775}, /*DDR SDRAM Timing (Low) Register */ + /* {0x0000140C, 0x38000C6A}, *//*DDR SDRAM Timing (High) Register */ + {0x0000140C, 0x38d83fe0}, /*DDR SDRAM Timing (High) Register */ + +#ifdef DB_78X60_PCAC + {0x00001410, 0x040F0001}, /*DDR SDRAM Address Control Register */ +#else + {0x00001410, 0x040F0000}, /*DDR SDRAM Open Pages Control Register */ +#endif + + {0x00001414, 0x00000000}, /*DDR SDRAM Open Pages Control Register */ + {0x00001418, 0x00000e00}, /*DDR SDRAM Operation Register */ + {0x00001420, 0x00000004}, /*DDR SDRAM Extended Mode Register */ + {0x00001424, 0x0000D3FF}, /*Dunit Control High Register */ + {0x00001428, 0x000F8830}, /*Dunit Control High Register */ + {0x0000142C, 0x214C2F38}, /*Dunit Control High Register */ + {0x0000147C, 0x0000c671}, + + {0x000014a0, 0x000002A9}, + {0x000014a8, 0x00000101}, /*2:1 */ + {0x00020220, 0x00000007}, + + {0x00001494, 0x00010000}, /*DDR SDRAM ODT Control (Low) Register */ + {0x00001498, 0x00000000}, /*DDR SDRAM ODT Control (High) Register */ + {0x0000149C, 0x00000301}, /*DDR Dunit ODT Control Register */ + + {0x000014C0, 0x192434e9}, /* DRAM address and Control Driving Strenght */ + {0x000014C4, 0x092434e9}, /* DRAM Data and DQS Driving Strenght */ + + {0x000200e8, 0x3FFF0E01}, /* DO NOT Modify - Open Mbus Window - 2G - Mbus is required for the training sequence */ + {0x00020184, 0x3FFFFFE0}, /* DO NOT Modify - Close fast path Window to - 2G */ + + {0x0001504, 0x7FFFFFF1}, /* CS0 Size */ + {0x000150C, 0x00000000}, /* CS1 Size */ + {0x0001514, 0x00000000}, /* CS2 Size */ + {0x000151C, 0x00000000}, /* CS3 Size */ + + /* {0x00001524, 0x0000C800}, */ + {0x00001538, 0x0000000b}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000d}, /*Read Data Ready Delay Register */ + + {0x000015D0, 0x00000640}, /*MR0 */ + {0x000015D4, 0x00000046}, /*MR1 */ + {0x000015D8, 0x00000010}, /*MR2 */ + {0x000015DC, 0x00000000}, /*MR3 */ + + {0x000015E4, 0x00203c18}, /*ZQC Configuration Register */ + {0x000015EC, 0xd800aa25}, /*DDR PHY */ + {0x0, 0x0} +}; + +MV_DRAM_MC_INIT ddr3_A0_AMC_667[MV_MAX_DDR3_STATIC_SIZE] = { +#ifdef CONFIG_DDR_32BIT + {0x00001400, 0x7301c924}, /*DDR SDRAM Configuration Register */ +#else /*CONFIG_DDR_64BIT */ + {0x00001400, 0x7301CA28}, /*DDR SDRAM Configuration Register */ +#endif + {0x00001404, 0x3630b800}, /*Dunit Control Low Register */ + {0x00001408, 0x43149775}, /*DDR SDRAM Timing (Low) Register */ + /* {0x0000140C, 0x38000C6A}, *//*DDR SDRAM Timing (High) Register */ + {0x0000140C, 0x38d83fe0}, /*DDR SDRAM Timing (High) Register */ + +#ifdef DB_78X60_PCAC + {0x00001410, 0x040F0001}, /*DDR SDRAM Address Control Register */ +#else + {0x00001410, 0x040F000C}, /*DDR SDRAM Open Pages Control Register */ +#endif + + {0x00001414, 0x00000000}, /*DDR SDRAM Open Pages Control Register */ + {0x00001418, 0x00000e00}, /*DDR SDRAM Operation Register */ + {0x00001420, 0x00000004}, /*DDR SDRAM Extended Mode Register */ + {0x00001424, 0x0000D3FF}, /*Dunit Control High Register */ + {0x00001428, 0x000F8830}, /*Dunit Control High Register */ + {0x0000142C, 0x214C2F38}, /*Dunit Control High Register */ + {0x0000147C, 0x0000c671}, + + {0x000014a0, 0x000002A9}, + {0x000014a8, 0x00000101}, /*2:1 */ + {0x00020220, 0x00000007}, + + {0x00001494, 0x00010000}, /*DDR SDRAM ODT Control (Low) Register */ + {0x00001498, 0x00000000}, /*DDR SDRAM ODT Control (High) Register */ + {0x0000149C, 0x00000301}, /*DDR Dunit ODT Control Register */ + + {0x000014C0, 0x192434e9}, /* DRAM address and Control Driving Strenght */ + {0x000014C4, 0x092434e9}, /* DRAM Data and DQS Driving Strenght */ + + {0x000200e8, 0x3FFF0E01}, /* DO NOT Modify - Open Mbus Window - 2G - Mbus is required for the training sequence */ + {0x00020184, 0x3FFFFFE0}, /* DO NOT Modify - Close fast path Window to - 2G */ + + {0x0001504, 0x3FFFFFF1}, /* CS0 Size */ + {0x000150C, 0x00000000}, /* CS1 Size */ + {0x0001514, 0x00000000}, /* CS2 Size */ + {0x000151C, 0x00000000}, /* CS3 Size */ + + /* {0x00001524, 0x0000C800}, */ + {0x00001538, 0x0000000b}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000d}, /*Read Data Ready Delay Register */ + + {0x000015D0, 0x00000640}, /*MR0 */ + {0x000015D4, 0x00000046}, /*MR1 */ + {0x000015D8, 0x00000010}, /*MR2 */ + {0x000015DC, 0x00000000}, /*MR3 */ + + {0x000015E4, 0x00203c18}, /*ZQC Configuration Register */ + {0x000015EC, 0xd800aa25}, /*DDR PHY */ + {0x0, 0x0} +}; + +MV_DRAM_MC_INIT ddr3_A0_db_400[MV_MAX_DDR3_STATIC_SIZE] = { +#ifdef CONFIG_DDR_32BIT + {0x00001400, 0x73004C30}, /*DDR SDRAM Configuration Register */ +#else /* CONFIG_DDR_64BIT */ + {0x00001400, 0x7300CC30}, /*DDR SDRAM Configuration Register */ +#endif + {0x00001404, 0x3630B840}, /*Dunit Control Low Register */ + {0x00001408, 0x33137663}, /*DDR SDRAM Timing (Low) Register */ + {0x0000140C, 0x38000C55}, /*DDR SDRAM Timing (High) Register */ + {0x00001410, 0x040F0000}, /*DDR SDRAM Address Control Register */ + {0x00001414, 0x00000000}, /*DDR SDRAM Open Pages Control Register */ + {0x00001418, 0x00000e00}, /*DDR SDRAM Operation Register */ + {0x0000141C, 0x00000672}, /*DDR SDRAM Mode Register */ + {0x00001420, 0x00000004}, /*DDR SDRAM Extended Mode Register */ + {0x00001424, 0x0100D3FF}, /*Dunit Control High Register */ + {0x00001428, 0x000D6720}, /*Dunit Control High Register */ + {0x0000142C, 0x014C2F38}, /*Dunit Control High Register */ + {0x0000147C, 0x00006571}, + + {0x00001494, 0x00010000}, /*DDR SDRAM ODT Control (Low) Register */ + {0x00001498, 0x00000000}, /*DDR SDRAM ODT Control (High) Register */ + {0x0000149C, 0x00000301}, /*DDR Dunit ODT Control Register */ + + {0x000014a0, 0x000002A9}, + {0x000014a8, 0x00000101}, /*2:1 */ + {0x00020220, 0x00000007}, + + {0x000014C0, 0x192424C8}, /* DRAM address and Control Driving Strenght */ + {0x000014C4, 0xEFB24C8}, /* DRAM Data and DQS Driving Strenght */ + + {0x000200e8, 0x3FFF0E01}, /* DO NOT Modify - Open Mbus Window - 2G - Mbus is required for the training sequence */ + {0x00020184, 0x3FFFFFE0}, /* DO NOT Modify - Close fast path Window to - 2G */ + + {0x0001504, 0x7FFFFFF1}, /* CS0 Size */ + {0x000150C, 0x00000000}, /* CS1 Size */ + {0x0001514, 0x00000000}, /* CS2 Size */ + {0x000151C, 0x00000000}, /* CS3 Size */ + + {0x00001538, 0x00000008}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000A}, /*Read Data Ready Delay Register */ + + {0x000015D0, 0x00000630}, /*MR0 */ + {0x000015D4, 0x00000046}, /*MR1 */ + {0x000015D8, 0x00000008}, /*MR2 */ + {0x000015DC, 0x00000000}, /*MR3 */ + + {0x000015E4, 0x00203c18}, /*ZQDS Configuration Register */ + /* {0x000015EC, 0xDE000025}, *//*DDR PHY */ + {0x000015EC, 0xF800AA25}, /*DDR PHY */ + {0x0, 0x0} +}; + +MV_DRAM_MC_INIT ddr3_Z1_db_600[MV_MAX_DDR3_STATIC_SIZE] = { +#ifdef CONFIG_DDR_32BIT + {0x00001400, 0x73014A28}, /*DDR SDRAM Configuration Register */ +#else /*CONFIG_DDR_64BIT */ + {0x00001400, 0x7301CA28}, /*DDR SDRAM Configuration Register */ +#endif + {0x00001404, 0x3630B040}, /*Dunit Control Low Register */ + {0x00001408, 0x44149887}, /*DDR SDRAM Timing (Low) Register */ + /* {0x0000140C, 0x38000C6A}, *//*DDR SDRAM Timing (High) Register */ + {0x0000140C, 0x38D83FE0}, /*DDR SDRAM Timing (High) Register */ + +#ifdef DB_78X60_PCAC + {0x00001410, 0x040F0001}, /*DDR SDRAM Address Control Register */ +#else + {0x00001410, 0x040F0000}, /*DDR SDRAM Open Pages Control Register */ +#endif + + {0x00001414, 0x00000000}, /*DDR SDRAM Open Pages Control Register */ + {0x00001418, 0x00000e00}, /*DDR SDRAM Operation Register */ + {0x00001420, 0x00000004}, /*DDR SDRAM Extended Mode Register */ + {0x00001424, 0x0100D1FF}, /*Dunit Control High Register */ + {0x00001428, 0x000F8830}, /*Dunit Control High Register */ + {0x0000142C, 0x214C2F38}, /*Dunit Control High Register */ + {0x0000147C, 0x0000c671}, + + {0x000014a8, 0x00000101}, /*2:1 */ + {0x00020220, 0x00000007}, + + {0x00001494, 0x00010000}, /*DDR SDRAM ODT Control (Low) Register */ + {0x00001498, 0x00000000}, /*DDR SDRAM ODT Control (High) Register */ + {0x0000149C, 0x00000301}, /*DDR Dunit ODT Control Register */ + + {0x000014C0, 0x192424C8}, /* DRAM address and Control Driving Strenght */ + {0x000014C4, 0xEFB24C8}, /* DRAM Data and DQS Driving Strenght */ + + {0x000200e8, 0x3FFF0E01}, /* DO NOT Modify - Open Mbus Window - 2G - Mbus is required for the training sequence */ + {0x00020184, 0x3FFFFFE0}, /* DO NOT Modify - Close fast path Window to - 2G */ + + {0x0001504, 0x7FFFFFF1}, /* CS0 Size */ + {0x000150C, 0x00000000}, /* CS1 Size */ + {0x0001514, 0x00000000}, /* CS2 Size */ + {0x000151C, 0x00000000}, /* CS3 Size */ + + /* {0x00001524, 0x0000C800}, */ + {0x00001538, 0x0000000b}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000d}, /*Read Data Ready Delay Register */ + + {0x000015D0, 0x00000650}, /*MR0 */ + {0x000015D4, 0x00000046}, /*MR1 */ + {0x000015D8, 0x00000010}, /*MR2 */ + {0x000015DC, 0x00000000}, /*MR3 */ + + {0x000015E4, 0x00203c18}, /*ZQC Configuration Register */ + {0x000015EC, 0xDE000025}, /*DDR PHY */ + {0x0, 0x0} +}; + +MV_DRAM_MC_INIT ddr3_Z1_db_300[MV_MAX_DDR3_STATIC_SIZE] = { +#ifdef CONFIG_DDR_32BIT + {0x00001400, 0x73004C30}, /*DDR SDRAM Configuration Register */ +#else /*CONFIG_DDR_64BIT */ + {0x00001400, 0x7300CC30}, /*DDR SDRAM Configuration Register */ + /*{0x00001400, 0x7304CC30}, *//*DDR SDRAM Configuration Register */ +#endif + {0x00001404, 0x3630B840}, /*Dunit Control Low Register */ + {0x00001408, 0x33137663}, /*DDR SDRAM Timing (Low) Register */ + {0x0000140C, 0x38000C55}, /*DDR SDRAM Timing (High) Register */ + {0x00001410, 0x040F0000}, /*DDR SDRAM Address Control Register */ + {0x00001414, 0x00000000}, /*DDR SDRAM Open Pages Control Register */ + {0x00001418, 0x00000e00}, /*DDR SDRAM Operation Register */ + {0x0000141C, 0x00000672}, /*DDR SDRAM Mode Register */ + {0x00001420, 0x00000004}, /*DDR SDRAM Extended Mode Register */ + {0x00001424, 0x0100F1FF}, /*Dunit Control High Register */ + {0x00001428, 0x000D6720}, /*Dunit Control High Register */ + {0x0000142C, 0x014C2F38}, /*Dunit Control High Register */ + {0x0000147C, 0x00006571}, + + {0x00001494, 0x00010000}, /*DDR SDRAM ODT Control (Low) Register */ + {0x00001498, 0x00000000}, /*DDR SDRAM ODT Control (High) Register */ + {0x0000149C, 0x00000301}, /*DDR Dunit ODT Control Register */ + + {0x000014C0, 0x192424C8}, /* DRAM address and Control Driving Strenght */ + {0x000014C4, 0xEFB24C8}, /* DRAM Data and DQS Driving Strenght */ + + {0x000200e8, 0x3FFF0E01}, /* DO NOT Modify - Open Mbus Window - 2G - Mbus is required for the training sequence */ + {0x00020184, 0x3FFFFFE0}, /* DO NOT Modify - Close fast path Window to - 2G */ + + {0x0001504, 0x7FFFFFF1}, /* CS0 Size */ + {0x000150C, 0x00000000}, /* CS1 Size */ + {0x0001514, 0x00000000}, /* CS2 Size */ + {0x000151C, 0x00000000}, /* CS3 Size */ + + {0x00001538, 0x00000008}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000A}, /*Read Data Ready Delay Register */ + + {0x000015D0, 0x00000630}, /*MR0 */ + {0x000015D4, 0x00000046}, /*MR1 */ + {0x000015D8, 0x00000008}, /*MR2 */ + {0x000015DC, 0x00000000}, /*MR3 */ + + {0x000015E4, 0x00203c18}, /*ZQDS Configuration Register */ + {0x000015EC, 0xDE000025}, /*DDR PHY */ + + {0x0, 0x0} +}; + +#endif /* __AXP_MC_STATIC_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_training_static.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_training_static.h new file mode 100644 index 000000000..253c11da3 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_training_static.h @@ -0,0 +1,769 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __AXP_TRAINING_STATIC_H +#define __AXP_TRAINING_STATIC_H + +/* + * STATIC_TRAINING - Set only if static parameters for training are set and + * required + */ + +MV_DRAM_TRAINING_INIT ddr3_db_rev2_667[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC002011A}, + /*1 */ + {0x000016A0, 0xC0420100}, + /*2 */ + {0x000016A0, 0xC082020A}, + /*3 */ + {0x000016A0, 0xC0C20017}, + /*4 */ + {0x000016A0, 0xC1020113}, + /*5 */ + {0x000016A0, 0xC1420107}, + /*6 */ + {0x000016A0, 0xC182011F}, + /*7 */ + {0x000016A0, 0xC1C2001C}, + /*8 */ + {0x000016A0, 0xC202010D}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0004A06}, + /*1 */ + {0x000016A0, 0xC040690D}, + /*2 */ + {0x000016A0, 0xC0806A0D}, + /*3 */ + {0x000016A0, 0xC0C0A01B}, + /*4 */ + {0x000016A0, 0xC1003A01}, + /*5 */ + {0x000016A0, 0xC1408113}, + /*6 */ + {0x000016A0, 0xC1805609}, + /*7 */ + {0x000016A0, 0xC1C04504}, + /*8 */ + {0x000016A0, 0xC2009518}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_rev2_800[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC0020301}, + /*1 */ + {0x000016A0, 0xC0420202}, + /*2 */ + {0x000016A0, 0xC0820314}, + /*3 */ + {0x000016A0, 0xC0C20117}, + /*4 */ + {0x000016A0, 0xC1020219}, + /*5 */ + {0x000016A0, 0xC142020B}, + /*6 */ + {0x000016A0, 0xC182030A}, + /*7 */ + {0x000016A0, 0xC1C2011D}, + /*8 */ + {0x000016A0, 0xC2020212}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0007A12}, + /*1 */ + {0x000016A0, 0xC0408D16}, + /*2 */ + {0x000016A0, 0xC0809E1B}, + /*3 */ + {0x000016A0, 0xC0C0AC1F}, + /*4 */ + {0x000016A0, 0xC1005E0A}, + /*5 */ + {0x000016A0, 0xC140A91D}, + /*6 */ + {0x000016A0, 0xC1808E17}, + /*7 */ + {0x000016A0, 0xC1C05509}, + /*8 */ + {0x000016A0, 0xC2003A01}, + + /* PBS Leveling */ + /*0 */ + {0x000016A0, 0xC0007A12}, + /*1 */ + {0x000016A0, 0xC0408D16}, + /*2 */ + {0x000016A0, 0xC0809E1B}, + /*3 */ + {0x000016A0, 0xC0C0AC1F}, + /*4 */ + {0x000016A0, 0xC1005E0A}, + /*5 */ + {0x000016A0, 0xC140A91D}, + /*6 */ + {0x000016A0, 0xC1808E17}, + /*7 */ + {0x000016A0, 0xC1C05509}, + /*8 */ + {0x000016A0, 0xC2003A01}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000B}, + + {0x00001538, 0x0000000D}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x00000011}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_400[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 2 4 15 */ + {0x000016A0, 0xC002010C}, + /*1 2 4 2 */ + {0x000016A0, 0xC042001C}, + /*2 2 4 27 */ + {0x000016A0, 0xC0820115}, + /*3 2 4 0 */ + {0x000016A0, 0xC0C20019}, + /*4 2 4 13 */ + {0x000016A0, 0xC1020108}, + /*5 2 4 5 */ + {0x000016A0, 0xC1420100}, + /*6 2 4 19 */ + {0x000016A0, 0xC1820111}, + /*7 2 4 0 */ + {0x000016A0, 0xC1C2001B}, + /*8 2 4 10 */ + /*{0x000016A0, 0xC2020117}, */ + {0x000016A0, 0xC202010C}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0005508}, + /*1 */ + {0x000016A0, 0xC0409819}, + /*2 */ + {0x000016A0, 0xC080650C}, + /*3 */ + {0x000016A0, 0xC0C0700F}, + /*4 */ + {0x000016A0, 0xC1004103}, + /*5 */ + {0x000016A0, 0xC140A81D}, + /*6 */ + {0x000016A0, 0xC180650C}, + /*7 */ + {0x000016A0, 0xC1C08013}, + /*8 */ + {0x000016A0, 0xC2005508}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x00000008}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000A}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_533[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 2 4 15 */ + {0x000016A0, 0xC002040C}, + /*1 2 4 2 */ + {0x000016A0, 0xC0420117}, + /*2 2 4 27 */ + {0x000016A0, 0xC082041B}, + /*3 2 4 0 */ + {0x000016A0, 0xC0C20117}, + /*4 2 4 13 */ + {0x000016A0, 0xC102040A}, + /*5 2 4 5 */ + {0x000016A0, 0xC1420117}, + /*6 2 4 19 */ + {0x000016A0, 0xC1820419}, + /*7 2 4 0 */ + {0x000016A0, 0xC1C20117}, + /*8 2 4 10 */ + {0x000016A0, 0xC2020117}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0008113}, + /*1 */ + {0x000016A0, 0xC0404504}, + /*2 */ + {0x000016A0, 0xC0808514}, + /*3 */ + {0x000016A0, 0xC0C09418}, + /*4 */ + {0x000016A0, 0xC1006D0E}, + /*5 */ + {0x000016A0, 0xC1405508}, + /*6 */ + {0x000016A0, 0xC1807D12}, + /*7 */ + {0x000016A0, 0xC1C0b01F}, + /*8 */ + {0x000016A0, 0xC2005D0A}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x00000008}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000A}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_600[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 2 3 1 */ + {0x000016A0, 0xC0020104}, + /*1 2 2 6 */ + {0x000016A0, 0xC0420010}, + /*2 2 3 16 */ + {0x000016A0, 0xC0820112}, + /*3 2 1 26 */ + {0x000016A0, 0xC0C20009}, + /*4 2 2 29 */ + {0x000016A0, 0xC102001F}, + /*5 2 2 13 */ + {0x000016A0, 0xC1420014}, + /*6 2 3 6 */ + {0x000016A0, 0xC1820109}, + /*7 2 1 31 */ + {0x000016A0, 0xC1C2000C}, + /*8 2 2 22 */ + {0x000016A0, 0xC2020112}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0009919}, + /*1 */ + {0x000016A0, 0xC0405508}, + /*2 */ + {0x000016A0, 0xC0809919}, + /*3 */ + {0x000016A0, 0xC0C09C1A}, + /*4 */ + {0x000016A0, 0xC1008113}, + /*5 */ + {0x000016A0, 0xC140650C}, + /*6 */ + {0x000016A0, 0xC1809518}, + /*7 */ + {0x000016A0, 0xC1C04103}, + /*8 */ + {0x000016A0, 0xC2006D0E}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_667[MV_MAX_DDR3_STATIC_SIZE] = { + + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 2 3 1 */ + {0x000016A0, 0xC0020103}, + /*1 2 2 6 */ + {0x000016A0, 0xC0420012}, + /*2 2 3 16 */ + {0x000016A0, 0xC0820113}, + /*3 2 1 26 */ + {0x000016A0, 0xC0C20012}, + /*4 2 2 29 */ + {0x000016A0, 0xC1020100}, + /*5 2 2 13 */ + {0x000016A0, 0xC1420016}, + /*6 2 3 6 */ + {0x000016A0, 0xC1820109}, + /*7 2 1 31 */ + {0x000016A0, 0xC1C20010}, + /*8 2 2 22 */ + {0x000016A0, 0xC2020112}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC000b11F}, + /*1 */ + {0x000016A0, 0xC040690D}, + /*2 */ + {0x000016A0, 0xC0803600}, + /*3 */ + {0x000016A0, 0xC0C0a81D}, + /*4 */ + {0x000016A0, 0xC1009919}, + /*5 */ + {0x000016A0, 0xC1407911}, + /*6 */ + {0x000016A0, 0xC180ad1e}, + /*7 */ + {0x000016A0, 0xC1C04d06}, + /*8 */ + {0x000016A0, 0xC2008514}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_800[MV_MAX_DDR3_STATIC_SIZE] = { + + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 2 3 1 */ + {0x000016A0, 0xC0020213}, + /*1 2 2 6 */ + {0x000016A0, 0xC0420108}, + /*2 2 3 16 */ + {0x000016A0, 0xC0820210}, + /*3 2 1 26 */ + {0x000016A0, 0xC0C20108}, + /*4 2 2 29 */ + {0x000016A0, 0xC102011A}, + /*5 2 2 13 */ + {0x000016A0, 0xC1420300}, + /*6 2 3 6 */ + {0x000016A0, 0xC1820204}, + /*7 2 1 31 */ + {0x000016A0, 0xC1C20106}, + /*8 2 2 22 */ + {0x000016A0, 0xC2020112}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC000620B}, + /*1 */ + {0x000016A0, 0xC0408D16}, + /*2 */ + {0x000016A0, 0xC0806A0D}, + /*3 */ + {0x000016A0, 0xC0C03D02}, + /*4 */ + {0x000016A0, 0xC1004a05}, + /*5 */ + {0x000016A0, 0xC140A11B}, + /*6 */ + {0x000016A0, 0xC1805E0A}, + /*7 */ + {0x000016A0, 0xC1C06D0E}, + /*8 */ + {0x000016A0, 0xC200AD1E}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000C}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000E}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_rd_667_0[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC002010E}, + /*1 */ + {0x000016A0, 0xC042001E}, + /*2 */ + {0x000016A0, 0xC0820118}, + /*3 */ + {0x000016A0, 0xC0C2001E}, + /*4 */ + {0x000016A0, 0xC102010C}, + /*5 */ + {0x000016A0, 0xC1420102}, + /*6 */ + {0x000016A0, 0xC1820111}, + /*7 */ + {0x000016A0, 0xC1C2001C}, + /*8 */ + {0x000016A0, 0xC2020109}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0003600}, + /*1 */ + {0x000016A0, 0xC040690D}, + /*2 */ + {0x000016A0, 0xC0805207}, + /*3 */ + {0x000016A0, 0xC0C0A81D}, + /*4 */ + {0x000016A0, 0xC1009919}, + /*5 */ + {0x000016A0, 0xC1407911}, + /*6 */ + {0x000016A0, 0xC1803E02}, + /*7 */ + {0x000016A0, 0xC1C05107}, + /*8 */ + {0x000016A0, 0xC2008113}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_rd_667_1[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC0020106}, + /*1 */ + {0x000016A0, 0xC0420016}, + /*2 */ + {0x000016A0, 0xC0820117}, + /*3 */ + {0x000016A0, 0xC0C2000F}, + /*4 */ + {0x000016A0, 0xC1020105}, + /*5 */ + {0x000016A0, 0xC142001B}, + /*6 */ + {0x000016A0, 0xC182010C}, + /*7 */ + {0x000016A0, 0xC1C20011}, + /*8 */ + {0x000016A0, 0xC2020101}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0003600}, + /*1 */ + {0x000016A0, 0xC0406D0E}, + /*2 */ + {0x000016A0, 0xC0803600}, + /*3 */ + {0x000016A0, 0xC0C04504}, + /*4 */ + {0x000016A0, 0xC1009919}, + /*5 */ + {0x000016A0, 0xC1407911}, + /*6 */ + {0x000016A0, 0xC1803600}, + /*7 */ + {0x000016A0, 0xC1C0610B}, + /*8 */ + {0x000016A0, 0xC2008113}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_rd_667_2[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC002010C}, + /*1 */ + {0x000016A0, 0xC042001B}, + /*2 */ + {0x000016A0, 0xC082011D}, + /*3 */ + {0x000016A0, 0xC0C20015}, + /*4 */ + {0x000016A0, 0xC102010B}, + /*5 */ + {0x000016A0, 0xC1420101}, + /*6 */ + {0x000016A0, 0xC1820113}, + /*7 */ + {0x000016A0, 0xC1C20017}, + /*8 */ + {0x000016A0, 0xC2020107}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0003600}, + /*1 */ + {0x000016A0, 0xC0406D0E}, + /*2 */ + {0x000016A0, 0xC0803600}, + /*3 */ + {0x000016A0, 0xC0C04504}, + /*4 */ + {0x000016A0, 0xC1009919}, + /*5 */ + {0x000016A0, 0xC1407911}, + /*6 */ + {0x000016A0, 0xC180B11F}, + /*7 */ + {0x000016A0, 0xC1C0610B}, + /*8 */ + {0x000016A0, 0xC2008113}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_db_667_M[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /* CS 0 */ + /*0 2 3 1 */ + {0x000016A0, 0xC0020103}, + /*1 2 2 6 */ + {0x000016A0, 0xC0420012}, + /*2 2 3 16 */ + {0x000016A0, 0xC0820113}, + /*3 2 1 26 */ + {0x000016A0, 0xC0C20012}, + /*4 2 2 29 */ + {0x000016A0, 0xC1020100}, + /*5 2 2 13 */ + {0x000016A0, 0xC1420016}, + /*6 2 3 6 */ + {0x000016A0, 0xC1820109}, + /*7 2 1 31 */ + {0x000016A0, 0xC1C20010}, + /*8 2 2 22 */ + {0x000016A0, 0xC2020112}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC000b11F}, + /*1 */ + {0x000016A0, 0xC040690D}, + /*2 */ + {0x000016A0, 0xC0803600}, + /*3 */ + {0x000016A0, 0xC0C0a81D}, + /*4 */ + {0x000016A0, 0xC1009919}, + /*5 */ + {0x000016A0, 0xC1407911}, + /*6 */ + {0x000016A0, 0xC180ad1e}, + /*7 */ + {0x000016A0, 0xC1C04d06}, + /*8 */ + {0x000016A0, 0xC2008514}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + /* CS 1 */ + + {0x000016A0, 0xC0060103}, + /*1 2 2 6 */ + {0x000016A0, 0xC0460012}, + /*2 2 3 16 */ + {0x000016A0, 0xC0860113}, + /*3 2 1 26 */ + {0x000016A0, 0xC0C60012}, + /*4 2 2 29 */ + {0x000016A0, 0xC1060100}, + /*5 2 2 13 */ + {0x000016A0, 0xC1460016}, + /*6 2 3 6 */ + {0x000016A0, 0xC1860109}, + /*7 2 1 31 */ + {0x000016A0, 0xC1C60010}, + /*8 2 2 22 */ + {0x000016A0, 0xC2060112}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC004b11F}, + /*1 */ + {0x000016A0, 0xC044690D}, + /*2 */ + {0x000016A0, 0xC0843600}, + /*3 */ + {0x000016A0, 0xC0C4a81D}, + /*4 */ + {0x000016A0, 0xC1049919}, + /*5 */ + {0x000016A0, 0xC1447911}, + /*6 */ + {0x000016A0, 0xC184ad1e}, + /*7 */ + {0x000016A0, 0xC1C44d06}, + /*8 */ + {0x000016A0, 0xC2048514}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC807000F}, + + /* Both CS */ + + {0x00001538, 0x00000B0B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x00000F0F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_rd_667_3[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC0020118}, + /*1 */ + {0x000016A0, 0xC0420108}, + /*2 */ + {0x000016A0, 0xC0820202}, + /*3 */ + {0x000016A0, 0xC0C20108}, + /*4 */ + {0x000016A0, 0xC1020117}, + /*5 */ + {0x000016A0, 0xC142010C}, + /*6 */ + {0x000016A0, 0xC182011B}, + /*7 */ + {0x000016A0, 0xC1C20107}, + /*8 */ + {0x000016A0, 0xC2020113}, + + /* Write Leveling */ + /*0 */ + {0x000016A0, 0xC0003600}, + /*1 */ + {0x000016A0, 0xC0406D0E}, + /*2 */ + {0x000016A0, 0xC0805207}, + /*3 */ + {0x000016A0, 0xC0C0A81D}, + /*4 */ + {0x000016A0, 0xC1009919}, + /*5 */ + {0x000016A0, 0xC1407911}, + /*6 */ + {0x000016A0, 0xC1803E02}, + /*7 */ + {0x000016A0, 0xC1C04D06}, + /*8 */ + {0x000016A0, 0xC2008113}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + + {0x00001538, 0x0000000B}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000F}, /*Read Data Ready Delay Register */ + + /*init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +MV_DRAM_TRAINING_INIT ddr3_pcac_600[MV_MAX_DDR3_STATIC_SIZE] = { + /* Read Leveling */ + /*PUP RdSampleDly (+CL) Phase RL ADLL value */ + /*0 */ + {0x000016A0, 0xC0020404}, + /* 1 2 2 6 */ + {0x000016A0, 0xC042031E}, + /* 2 2 3 16 */ + {0x000016A0, 0xC0820411}, + /* 3 2 1 26 */ + {0x000016A0, 0xC0C20400}, + /* 4 2 2 29 */ + {0x000016A0, 0xC1020404}, + /* 5 2 2 13 */ + {0x000016A0, 0xC142031D}, + /* 6 2 3 6 */ + {0x000016A0, 0xC182040C}, + /* 7 2 1 31 */ + {0x000016A0, 0xC1C2031B}, + /* 8 2 2 22 */ + {0x000016A0, 0xC2020112}, + + /* Write Leveling */ + /* 0 */ + {0x000016A0, 0xC0004905}, + /* 1 */ + {0x000016A0, 0xC040A81D}, + /* 2 */ + {0x000016A0, 0xC0804504}, + /* 3 */ + {0x000016A0, 0xC0C08013}, + /* 4 */ + {0x000016A0, 0xC1004504}, + /* 5 */ + {0x000016A0, 0xC140A81D}, + /* 6 */ + {0x000016A0, 0xC1805909}, + /* 7 */ + {0x000016A0, 0xC1C09418}, + /* 8 */ + {0x000016A0, 0xC2006D0E}, + + /*center DQS on read cycle */ + {0x000016A0, 0xC803000F}, + {0x00001538, 0x00000009}, /*Read Data Sample Delays Register */ + {0x0000153C, 0x0000000D}, /*Read Data Ready Delay Register */ + /* init DRAM */ + {0x00001480, 0x00000001}, + {0x0, 0x0} +}; + +#endif /* __AXP_TRAINING_STATIC_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_vars.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_vars.h new file mode 100644 index 000000000..f00e7e6a5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_axp_vars.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __AXP_VARS_H +#define __AXP_VARS_H + +#include "ddr3_axp_config.h" +#include "ddr3_axp_mc_static.h" +#include "ddr3_axp_training_static.h" + +MV_DRAM_MODES ddr_modes[MV_DDR3_MODES_NUMBER] = { + /* Conf name CPUFreq FabFreq Chip ID Chip/Board MC regs Training Values */ + /* db board values: */ + {"db_800-400", 0xA, 0x5, 0x0, A0, ddr3_A0_db_400, NULL}, + {"db_1200-300", 0x2, 0xC, 0x0, A0, ddr3_A0_db_400, NULL}, + {"db_1200-600", 0x2, 0x5, 0x0, A0, NULL, NULL}, + {"db_1333-667", 0x3, 0x5, 0x0, A0, ddr3_A0_db_667, ddr3_db_rev2_667}, + {"db_1600-800", 0xB, 0x5, 0x0, A0, ddr3_A0_db_667, ddr3_db_rev2_800}, + {"amc_1333-667", 0x3, 0x5, 0x0, A0_AMC, ddr3_A0_AMC_667, NULL}, + {"db_667-667", 0x9, 0x13, 0x0, Z1, ddr3_Z1_db_600, ddr3_db_667}, + {"db_800-400", 0xA, 0x1, 0x0, Z1, ddr3_Z1_db_300, ddr3_db_400}, + {"db_1066-533", 0x1, 0x1, 0x0, Z1, ddr3_Z1_db_300, ddr3_db_533}, + {"db_1200-300", 0x2, 0xC, 0x0, Z1, ddr3_Z1_db_300, ddr3_db_667}, + {"db_1200-600", 0x2, 0x5, 0x0, Z1, ddr3_Z1_db_600, NULL}, + {"db_1333-333", 0x3, 0xC, 0x0, Z1, ddr3_Z1_db_300, ddr3_db_400}, + {"db_1333-667", 0x3, 0x5, 0x0, Z1, ddr3_Z1_db_600, ddr3_db_667}, + /* pcac board values (Z1 device): */ + {"pcac_1200-600", 0x2, 0x5, 0x0, Z1_PCAC, ddr3_Z1_db_600, + ddr3_pcac_600}, + /* rd board values (Z1 device): */ + {"rd_667_0", 0x3, 0x5, 0x0, Z1_RD_SLED, ddr3_Z1_db_600, ddr3_rd_667_0}, + {"rd_667_1", 0x3, 0x5, 0x1, Z1_RD_SLED, ddr3_Z1_db_600, ddr3_rd_667_1}, + {"rd_667_2", 0x3, 0x5, 0x2, Z1_RD_SLED, ddr3_Z1_db_600, ddr3_rd_667_2}, + {"rd_667_3", 0x3, 0x5, 0x3, Z1_RD_SLED, ddr3_Z1_db_600, ddr3_rd_667_3} +}; + +/* ODT settings - if needed update the following tables: (ODT_OPT - represents the CS configuration bitmap) */ + +u16 odt_static[ODT_OPT][MAX_CS] = { /* NearEnd/FarEnd */ + {0, 0, 0, 0}, /* 0000 0/0 - Not supported */ + {ODT40, 0, 0, 0}, /* 0001 0/1 */ + {0, 0, 0, 0}, /* 0010 0/0 - Not supported */ + {ODT40, ODT40, 0, 0}, /* 0011 0/2 */ + {0, 0, ODT40, 0}, /* 0100 1/0 */ + {ODT30, 0, ODT30, 0}, /* 0101 1/1 */ + {0, 0, 0, 0}, /* 0110 0/0 - Not supported */ + {ODT120, ODT20, ODT20, 0}, /* 0111 1/2 */ + {0, 0, 0, 0}, /* 1000 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1001 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1010 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1011 0/0 - Not supported */ + {0, 0, ODT40, 0}, /* 1100 2/0 */ + {ODT20, 0, ODT120, ODT20}, /* 1101 2/1 */ + {0, 0, 0, 0}, /* 1110 0/0 - Not supported */ + {ODT120, ODT30, ODT120, ODT30} /* 1111 2/2 */ +}; + +u16 odt_dynamic[ODT_OPT][MAX_CS] = { /* NearEnd/FarEnd */ + {0, 0, 0, 0}, /* 0000 0/0 */ + {0, 0, 0, 0}, /* 0001 0/1 */ + {0, 0, 0, 0}, /* 0010 0/0 - Not supported */ + {0, 0, 0, 0}, /* 0011 0/2 */ + {0, 0, 0, 0}, /* 0100 1/0 */ + {ODT120D, 0, ODT120D, 0}, /* 0101 1/1 */ + {0, 0, 0, 0}, /* 0110 0/0 - Not supported */ + {0, 0, ODT120D, 0}, /* 0111 1/2 */ + {0, 0, 0, 0}, /* 1000 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1001 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1010 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1011 0/0 - Not supported */ + {0, 0, 0, 0}, /* 1100 2/0 */ + {ODT120D, 0, 0, 0}, /* 1101 2/1 */ + {0, 0, 0, 0}, /* 1110 0/0 - Not supported */ + {0, 0, 0, 0} /* 1111 2/2 */ +}; + +u32 odt_config[ODT_OPT] = { + 0, 0x00010000, 0, 0x00030000, 0x04000000, 0x05050104, 0, 0x07430340, 0, + 0, 0, 0, + 0x30000, 0x1C0D100C, 0, 0x3CC330C0 +}; + +/* + * User can manually set SPD values (in case SPD is not available on + * DIMM/System). + * SPD Values can simplify calculating the DUNIT registers values + */ +u8 spd_data[SPD_SIZE] = { + /* AXP DB Board DIMM SPD Values - manually set */ + 0x92, 0x10, 0x0B, 0x2, 0x3, 0x19, 0x0, 0x9, 0x09, 0x52, 0x1, 0x8, 0x0C, + 0x0, 0x7E, 0x0, 0x69, 0x78, + 0x69, 0x30, 0x69, 0x11, 0x20, 0x89, 0x0, 0x5, 0x3C, 0x3C, 0x0, 0xF0, + 0x82, 0x5, 0x80, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0F, 0x1, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x80, 0x2C, 0x1, 0x10, 0x23, 0x35, 0x28, 0xEB, 0xCA, 0x19, 0x8F +}; + +/* + * Controller Specific configurations Starts Here - DO NOT MODIFY + */ + +/* Frequency - values are 1/HCLK in ps */ +u32 cpu_fab_clk_to_hclk[FAB_OPT][CLK_CPU] = +/* CPU Frequency: + 1000 1066 1200 1333 1500 1666 1800 2000 600 667 800 1600 Fabric */ +{ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, 2500, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 4500, 3750, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 2500, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {4000, 3750, 3333, 3000, 2666, 2400, 0, 0, 0, 0, 5000, 2500}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {2500, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 5000, 0, 4000, 0, 0, 0, 0, 0, 0, 3750}, + {5000, 0, 0, 3750, 3333, 0, 0, 0, 0, 0, 0, 3125}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 3330, 3000, 0, 0, 0, 0, 0, 0, 0, 2500}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3750}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, 2500, 0}, + {3000, 0, 2500, 0, 0, 0, 0, 0, 0, 0, 3750, 0} +}; + +u32 cpu_ddr_ratios[FAB_OPT][CLK_CPU] = +/* CPU Frequency: + 1000 1066 1200 1333 1500 1666 1800 2000 600 667 800 1600 Fabric */ +{ + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, DDR_333, DDR_400, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, DDR_444, DDR_533, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, DDR_400, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {DDR_500, DDR_533, DDR_600, DDR_666, DDR_750, DDR_833, 0, 0, 0, 0, + DDR_400, DDR_800}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, DDR_333, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {DDR_400, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, DDR_400, 0, DDR_500, 0, 0, 0, 0, 0, 0, DDR_533}, + {DDR_400, 0, 0, DDR_533, DDR_600, 0, 0, 0, 0, 0, 0, DDR_640}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, DDR_300, DDR_333, 0, 0, 0, 0, 0, 0, 0, DDR_400}, + {0, 0, 0, 0, 0, 0, DDR_600, DDR_666, 0, 0, 0, DDR_533}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0, DDR_666, DDR_800, 0}, + {DDR_666, 0, DDR_800, 0, 0, 0, 0, 0, 0, 0, DDR_533, 0} +}; + +u8 div_ratio1to1[CLK_VCO][CLK_DDR] = +/* DDR Frequency: + 100 300 360 400 444 500 533 600 666 750 800 833 */ +{ {0xA, 3, 0, 3, 0, 2, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1000 */ +{0xB, 3, 0, 3, 0, 0, 2, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1066 */ +{0xC, 4, 0, 3, 0, 0, 0, 2, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1200 */ +{0xD, 4, 0, 4, 0, 0, 0, 0, 2, 0, 0, 0}, /* 1:1 CLK_CPU_1333 */ +{0xF, 5, 0, 4, 0, 3, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1500 */ +{0x11, 5, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1666 */ +{0x12, 6, 5, 4, 0, 0, 0, 3, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1800 */ +{0x14, 7, 0, 5, 0, 4, 0, 0, 3, 0, 0, 0}, /* 1:1 CLK_CPU_2000 */ +{0x6, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_600 */ +{0x6, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_667 */ +{0x8, 2, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_800 */ +{0x10, 5, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1600 */ +{0x14, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1000 VCO_2000 */ +{0x15, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1066 VCO_2133 */ +{0x18, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1200 VCO_2400 */ +{0x1A, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1333 VCO_2666 */ +{0x1E, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1500 VCO_3000 */ +{0x21, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1666 VCO_3333 */ +{0x24, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_1800 VCO_3600 */ +{0x28, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_2000 VCO_4000 */ +{0xC, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_600 VCO_1200 */ +{0xD, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_667 VCO_1333 */ +{0x10, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0}, /* 1:1 CLK_CPU_800 VCO_1600 */ +{0x20, 10, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0} /* 1:1 CLK_CPU_1600 VCO_3200 */ +}; + +u8 div_ratio2to1[CLK_VCO][CLK_DDR] = +/* DDR Frequency: + 100 300 360 400 444 500 533 600 666 750 800 833 */ +{ {0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0}, /* 2:1 CLK_CPU_1000 */ +{0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_1066 */ +{0, 0, 0, 3, 5, 0, 0, 2, 0, 0, 3, 3}, /* 2:1 CLK_CPU_1200 */ +{0, 0, 0, 0, 0, 0, 5, 0, 2, 0, 3, 0}, /* 2:1 CLK_CPU_1333 */ +{0, 0, 0, 0, 0, 3, 0, 5, 0, 2, 0, 0}, /* 2:1 CLK_CPU_1500 */ +{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 2}, /* 2:1 CLK_CPU_1666 */ +{0, 0, 0, 0, 0, 0, 0, 3, 0, 5, 0, 0}, /* 2:1 CLK_CPU_1800 */ +{0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 5}, /* 2:1 CLK_CPU_2000 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_600 */ +{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0}, /* 2:1 CLK_CPU_667 */ +{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0}, /* 2:1 CLK_CPU_800 */ +{0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 2, 0}, /* 2:1 CLK_CPU_1600 */ +{0, 0, 0, 5, 0, 0, 0, 0, 3, 0, 0, 0}, /* 2:1 CLK_CPU_1000 VCO_2000 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_1066 VCO_2133 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0}, /* 2:1 CLK_CPU_1200 VCO_2400 */ +{0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_1333 VCO_2666 */ +{0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0}, /* 2:1 CLK_CPU_1500 VCO_3000 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_1666 VCO_3333 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_1800 VCO_3600 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_2000 VCO_4000 */ +{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_600 VCO_1200 */ +{0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_667 VCO_1333 */ +{0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0}, /* 2:1 CLK_CPU_800 VCO_1600 */ +{0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0} /* 2:1 CLK_CPU_1600 VCO_3200 */ +}; + +#endif /* __AXP_VARS_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_dfs.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_dfs.c new file mode 100644 index 000000000..2a4596680 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_dfs.c @@ -0,0 +1,1552 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> +#include <linux/delay.h> + +#include "ddr3_hw_training.h" + +/* + * Debug + */ +#define DEBUG_DFS_C(s, d, l) \ + DEBUG_DFS_S(s); DEBUG_DFS_D(d, l); DEBUG_DFS_S("\n") +#define DEBUG_DFS_FULL_C(s, d, l) \ + DEBUG_DFS_FULL_S(s); DEBUG_DFS_FULL_D(d, l); DEBUG_DFS_FULL_S("\n") + +#ifdef MV_DEBUG_DFS +#define DEBUG_DFS_S(s) puts(s) +#define DEBUG_DFS_D(d, l) printf("%x", d) +#else +#define DEBUG_DFS_S(s) +#define DEBUG_DFS_D(d, l) +#endif + +#ifdef MV_DEBUG_DFS_FULL +#define DEBUG_DFS_FULL_S(s) puts(s) +#define DEBUG_DFS_FULL_D(d, l) printf("%x", d) +#else +#define DEBUG_DFS_FULL_S(s) +#define DEBUG_DFS_FULL_D(d, l) +#endif + +#if defined(MV88F672X) +extern u8 div_ratio[CLK_VCO][CLK_DDR]; +extern void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps); +#else +extern u16 odt_dynamic[ODT_OPT][MAX_CS]; +extern u8 div_ratio1to1[CLK_VCO][CLK_DDR]; +extern u8 div_ratio2to1[CLK_VCO][CLK_DDR]; +#endif +extern u16 odt_static[ODT_OPT][MAX_CS]; + +extern u32 cpu_fab_clk_to_hclk[FAB_OPT][CLK_CPU]; + +extern u32 ddr3_get_vco_freq(void); + +u32 ddr3_get_freq_parameter(u32 target_freq, int ratio_2to1); + +#ifdef MV_DEBUG_DFS +static inline void dfs_reg_write(u32 addr, u32 val) +{ + printf("\n write reg 0x%08x = 0x%08x", addr, val); + writel(val, INTER_REGS_BASE + addr); +} +#else +static inline void dfs_reg_write(u32 addr, u32 val) +{ + writel(val, INTER_REGS_BASE + addr); +} +#endif + +static void wait_refresh_op_complete(void) +{ + u32 reg; + + /* Poll - Wait for Refresh operation completion */ + do { + reg = reg_read(REG_SDRAM_OPERATION_ADDR) & + REG_SDRAM_OPERATION_CMD_RFRS_DONE; + } while (reg); /* Wait for '0' */ +} + +/* + * Name: ddr3_get_freq_parameter + * Desc: Finds CPU/DDR frequency ratio according to Sample@reset and table. + * Args: target_freq - target frequency + * Notes: + * Returns: freq_par - the ratio parameter + */ +u32 ddr3_get_freq_parameter(u32 target_freq, int ratio_2to1) +{ + u32 ui_vco_freq, freq_par; + + ui_vco_freq = ddr3_get_vco_freq(); + +#if defined(MV88F672X) + freq_par = div_ratio[ui_vco_freq][target_freq]; +#else + /* Find the ratio between PLL frequency and ddr-clk */ + if (ratio_2to1) + freq_par = div_ratio2to1[ui_vco_freq][target_freq]; + else + freq_par = div_ratio1to1[ui_vco_freq][target_freq]; +#endif + + return freq_par; +} + +/* + * Name: ddr3_dfs_high_2_low + * Desc: + * Args: freq - target frequency + * Notes: + * Returns: MV_OK - success, MV_FAIL - fail + */ +int ddr3_dfs_high_2_low(u32 freq, MV_DRAM_INFO *dram_info) +{ +#if defined(MV88F78X60) || defined(MV88F672X) + /* This Flow is relevant for ArmadaXP A0 */ + u32 reg, freq_par, tmp; + u32 cs = 0; + + DEBUG_DFS_C("DDR3 - DFS - High To Low - Starting DFS procedure to Frequency - ", + freq, 1); + + /* target frequency - 100MHz */ + freq_par = ddr3_get_freq_parameter(freq, 0); + +#if defined(MV88F672X) + u32 hclk; + u32 cpu_freq = ddr3_get_cpu_freq(); + get_target_freq(cpu_freq, &tmp, &hclk); +#endif + + /* Configure - DRAM DLL final state after DFS is complete - Enable */ + reg = reg_read(REG_DFS_ADDR); + /* [0] - DfsDllNextState - Disable */ + reg |= (1 << REG_DFS_DLLNEXTSTATE_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Configure - XBAR Retry response during Block to enable internal + * access - Disable + */ + reg = reg_read(REG_METAL_MASK_ADDR); + /* [0] - RetryMask - Disable */ + reg &= ~(1 << REG_METAL_MASK_RETRY_OFFS); + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + /* Configure - Block new external transactions - Enable */ + reg = reg_read(REG_DFS_ADDR); + reg |= (1 << REG_DFS_BLOCK_OFFS); /* [1] - DfsBlock - Enable */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Registered DIMM support */ + if (dram_info->reg_dimm) { + /* + * Configure - Disable Register DIMM CKE Power + * Down mode - CWA_RC + */ + reg = (0x9 & REG_SDRAM_OPERATION_CWA_RC_MASK) << + REG_SDRAM_OPERATION_CWA_RC_OFFS; + /* + * Configure - Disable Register DIMM CKE Power + * Down mode - CWA_DATA + */ + reg |= ((0 & REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + + /* + * Configure - Disable Register DIMM CKE Power + * Down mode - Set Delay - tMRD + */ + reg |= (0 << REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS); + + /* Configure - Issue CWA command with the above parameters */ + reg |= (REG_SDRAM_OPERATION_CMD_CWA & + ~(0xF << REG_SDRAM_OPERATION_CS_OFFS)); + + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for CWA operation completion */ + do { + reg = reg_read(REG_SDRAM_OPERATION_ADDR) & + (REG_SDRAM_OPERATION_CMD_MASK); + } while (reg); + + /* Configure - Disable outputs floating during Self Refresh */ + reg = reg_read(REG_REGISTERED_DRAM_CTRL_ADDR); + /* [15] - SRFloatEn - Disable */ + reg &= ~(1 << REG_REGISTERED_DRAM_CTRL_SR_FLOAT_OFFS); + /* 0x16D0 - DDR3 Registered DRAM Control */ + dfs_reg_write(REG_REGISTERED_DRAM_CTRL_ADDR, reg); + } + + /* Optional - Configure - DDR3_Rtt_nom_CS# */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = reg_read(REG_DDR3_MR1_CS_ADDR + + (cs << MR_CS_ADDR_OFFS)); + reg &= REG_DDR3_MR1_RTT_MASK; + dfs_reg_write(REG_DDR3_MR1_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + /* Configure - Move DRAM into Self Refresh */ + reg = reg_read(REG_DFS_ADDR); + reg |= (1 << REG_DFS_SR_OFFS); /* [2] - DfsSR - Enable */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Poll - Wait for Self Refresh indication */ + do { + reg = ((reg_read(REG_DFS_ADDR)) & (1 << REG_DFS_ATSR_OFFS)); + } while (reg == 0x0); /* 0x1528 [3] - DfsAtSR - Wait for '1' */ + + /* Start of clock change procedure (PLL) */ +#if defined(MV88F672X) + /* avantaLP */ + /* Configure cpupll_clkdiv_reset_mask */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL0); + reg &= CPU_PLL_CLOCK_DIVIDER_CNTRL0_MASK; + /* 0xE8264[7:0] 0xff CPU Clock Dividers Reset mask */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL0, (reg + 0xFF)); + + /* Configure cpu_clkdiv_reload_smooth */ + reg = reg_read(CPU_PLL_CNTRL0); + reg &= CPU_PLL_CNTRL0_RELOAD_SMOOTH_MASK; + /* 0xE8260 [15:8] 0x2 CPU Clock Dividers Reload Smooth enable */ + dfs_reg_write(CPU_PLL_CNTRL0, + (reg + (2 << CPU_PLL_CNTRL0_RELOAD_SMOOTH_OFFS))); + + /* Configure cpupll_clkdiv_relax_en */ + reg = reg_read(CPU_PLL_CNTRL0); + reg &= CPU_PLL_CNTRL0_RELAX_EN_MASK; + /* 0xE8260 [31:24] 0x2 Relax Enable */ + dfs_reg_write(CPU_PLL_CNTRL0, + (reg + (2 << CPU_PLL_CNTRL0_RELAX_EN_OFFS))); + + /* Configure cpupll_clkdiv_ddr_clk_ratio */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL1); + /* + * 0xE8268 [13:8] N Set Training clock: + * APLL Out Clock (VCO freq) / N = 100 MHz + */ + reg &= CPU_PLL_CLOCK_DIVIDER_CNTRL1_MASK; + reg |= (freq_par << 8); /* full Integer ratio from PLL-out to ddr-clk */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL1, reg); + + /* Configure cpupll_clkdiv_reload_ratio */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL0); + reg &= CPU_PLL_CLOCK_RELOAD_RATIO_MASK; + /* 0xE8264 [8]=0x1 CPU Clock Dividers Reload Ratio trigger set */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL0, + (reg + (1 << CPU_PLL_CLOCK_RELOAD_RATIO_OFFS))); + + udelay(1); + + /* Configure cpupll_clkdiv_reload_ratio */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL0); + reg &= CPU_PLL_CLOCK_RELOAD_RATIO_MASK; + /* 0xE8264 [8]=0x0 CPU Clock Dividers Reload Ratio trigger clear */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL0, reg); + + udelay(5); + +#else + /* + * Initial Setup - assure that the "load new ratio" is clear (bit 24) + * and in the same chance, block reassertions of reset [15:8] and + * force reserved bits[7:0]. + */ + reg = 0x0000FDFF; + /* 0x18700 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + /* + * RelaX whenever reset is asserted to that channel + * (good for any case) + */ + reg = 0x0000FF00; + /* 0x18704 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_1_ADDR, reg); + + reg = reg_read(REG_CPU_DIV_CLK_CTRL_2_ADDR) & + REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK; + + /* full Integer ratio from PLL-out to ddr-clk */ + reg |= (freq_par << REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS); + /* 0x1870C - CPU Div CLK control 3 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_2_ADDR, reg); + + /* + * Shut off clock enable to the DDRPHY clock channel (this is the "D"). + * All the rest are kept as is (forced, but could be read-modify-write). + * This is done now by RMW above. + */ + + /* Clock is not shut off gracefully - keep it running */ + reg = 0x000FFF02; + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_4_ADDR, reg); + + /* Wait before replacing the clock on the DDR Phy Channel. */ + udelay(1); + + /* + * This for triggering the frequency update. Bit[24] is the + * central control + * bits [23:16] == which channels to change ==2 ==> + * only DDR Phy (smooth transition) + * bits [15:8] == mask reset reassertion due to clock modification + * to these channels. + * bits [7:0] == not in use + */ + reg = 0x0102FDFF; + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(1); /* Wait 1usec */ + + /* + * Poll Div CLK status 0 register - indication that the clocks + * are active - 0x18718 [8] + */ + do { + reg = (reg_read(REG_CPU_DIV_CLK_STATUS_0_ADDR)) & + (1 << REG_CPU_DIV_CLK_ALL_STABLE_OFFS); + } while (reg == 0); + + /* + * Clean the CTRL0, to be ready for next resets and next requests + * of ratio modifications. + */ + reg = 0x000000FF; + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(5); +#endif + /* End of clock change procedure (PLL) */ + + /* Configure - Select normal clock for the DDR PHY - Enable */ + reg = reg_read(REG_DRAM_INIT_CTRL_STATUS_ADDR); + /* [16] - ddr_phy_trn_clk_sel - Enable */ + reg |= (1 << REG_DRAM_INIT_CTRL_TRN_CLK_OFFS); + /* 0x18488 - DRAM Init control status register */ + dfs_reg_write(REG_DRAM_INIT_CTRL_STATUS_ADDR, reg); + + /* Configure - Set Correct Ratio - 1:1 */ + /* [15] - Phy2UnitClkRatio = 0 - Set 1:1 Ratio between Dunit and Phy */ + + reg = reg_read(REG_DDR_IO_ADDR) & ~(1 << REG_DDR_IO_CLK_RATIO_OFFS); + dfs_reg_write(REG_DDR_IO_ADDR, reg); /* 0x1524 - DDR IO Register */ + + /* Configure - 2T Mode - Restore original configuration */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + /* [3:4] 2T - 1T Mode - low freq */ + reg &= ~(REG_DUNIT_CTRL_LOW_2T_MASK << REG_DUNIT_CTRL_LOW_2T_OFFS); + /* 0x1404 - DDR Controller Control Low Register */ + dfs_reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + + /* Configure - Restore CL and CWL - MRS Commands */ + reg = reg_read(REG_DFS_ADDR); + reg &= ~(REG_DFS_CL_NEXT_STATE_MASK << REG_DFS_CL_NEXT_STATE_OFFS); + reg &= ~(REG_DFS_CWL_NEXT_STATE_MASK << REG_DFS_CWL_NEXT_STATE_OFFS); + /* [8] - DfsCLNextState - MRS CL=6 after DFS (due to DLL-off mode) */ + reg |= (0x4 << REG_DFS_CL_NEXT_STATE_OFFS); + /* [12] - DfsCWLNextState - MRS CWL=6 after DFS (due to DLL-off mode) */ + reg |= (0x1 << REG_DFS_CWL_NEXT_STATE_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Poll - Wait for APLL + ADLLs lock on new frequency */ + do { + reg = (reg_read(REG_PHY_LOCK_STATUS_ADDR)) & + REG_PHY_LOCK_APLL_ADLL_STATUS_MASK; + /* 0x1674 [10:0] - Phy lock status Register */ + } while (reg != REG_PHY_LOCK_APLL_ADLL_STATUS_MASK); + + /* Configure - Reset the PHY Read FIFO and Write channels - Set Reset */ + reg = (reg_read(REG_SDRAM_CONFIG_ADDR) & REG_SDRAM_CONFIG_MASK); + /* [30:29] = 0 - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* + * Configure - DRAM Data PHY Read [30], Write [29] path + * reset - Release Reset + */ + reg = (reg_read(REG_SDRAM_CONFIG_ADDR) | ~REG_SDRAM_CONFIG_MASK); + /* [30:29] = '11' - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* Registered DIMM support */ + if (dram_info->reg_dimm) { + /* + * Configure - Change register DRAM operating speed + * (below 400MHz) - CWA_RC + */ + reg = (0xA & REG_SDRAM_OPERATION_CWA_RC_MASK) << + REG_SDRAM_OPERATION_CWA_RC_OFFS; + + /* + * Configure - Change register DRAM operating speed + * (below 400MHz) - CWA_DATA + */ + reg |= ((0x0 & REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + + /* Configure - Set Delay - tSTAB */ + reg |= (0x1 << REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS); + + /* Configure - Issue CWA command with the above parameters */ + reg |= (REG_SDRAM_OPERATION_CMD_CWA & + ~(0xF << REG_SDRAM_OPERATION_CS_OFFS)); + + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for CWA operation completion */ + do { + reg = reg_read(REG_SDRAM_OPERATION_ADDR) & + (REG_SDRAM_OPERATION_CMD_MASK); + } while (reg); + } + + /* Configure - Exit Self Refresh */ + /* [2] - DfsSR */ + reg = (reg_read(REG_DFS_ADDR) & ~(1 << REG_DFS_SR_OFFS)); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Poll - DFS Register - 0x1528 [3] - DfsAtSR - All DRAM devices + * on all ranks are NOT in self refresh mode + */ + do { + reg = ((reg_read(REG_DFS_ADDR)) & (1 << REG_DFS_ATSR_OFFS)); + } while (reg); /* Wait for '0' */ + + /* Configure - Issue Refresh command */ + /* [3-0] = 0x2 - Refresh Command, [11-8] - enabled Cs */ + reg = REG_SDRAM_OPERATION_CMD_RFRS; + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) + reg &= ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + } + + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + /* Configure - Block new external transactions - Disable */ + reg = reg_read(REG_DFS_ADDR); + reg &= ~(1 << REG_DFS_BLOCK_OFFS); /* [1] - DfsBlock - Disable */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Configure - XBAR Retry response during Block to enable + * internal access - Disable + */ + reg = reg_read(REG_METAL_MASK_ADDR); + /* [0] - RetryMask - Enable */ + reg |= (1 << REG_METAL_MASK_RETRY_OFFS); + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + /* Configure - Set CL */ + reg = reg_read(REG_DDR3_MR0_CS_ADDR + + (cs << MR_CS_ADDR_OFFS)) & + ~REG_DDR3_MR0_CL_MASK; + tmp = 0x4; /* CL=6 - 0x4 */ + reg |= ((tmp & 0x1) << REG_DDR3_MR0_CL_OFFS); + reg |= ((tmp & 0xE) << REG_DDR3_MR0_CL_HIGH_OFFS); + dfs_reg_write(REG_DDR3_MR0_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + + /* Configure - Set CWL */ + reg = reg_read(REG_DDR3_MR2_CS_ADDR + + (cs << MR_CS_ADDR_OFFS)) + & ~(REG_DDR3_MR2_CWL_MASK << REG_DDR3_MR2_CWL_OFFS); + /* CWL=6 - 0x1 */ + reg |= ((0x1) << REG_DDR3_MR2_CWL_OFFS); + dfs_reg_write(REG_DDR3_MR2_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + DEBUG_DFS_C("DDR3 - DFS - High To Low - Ended successfuly - new Frequency - ", + freq, 1); + + return MV_OK; +#else + /* This Flow is relevant for Armada370 A0 and ArmadaXP Z1 */ + + u32 reg, freq_par; + u32 cs = 0; + + DEBUG_DFS_C("DDR3 - DFS - High To Low - Starting DFS procedure to Frequency - ", + freq, 1); + + /* target frequency - 100MHz */ + freq_par = ddr3_get_freq_parameter(freq, 0); + + reg = 0x0000FF00; + /* 0x18700 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_1_ADDR, reg); + + /* 0x1600 - ODPG_CNTRL_Control */ + reg = reg_read(REG_ODPG_CNTRL_ADDR); + /* [21] = 1 - auto refresh disable */ + reg |= (1 << REG_ODPG_CNTRL_OFFS); + dfs_reg_write(REG_ODPG_CNTRL_ADDR, reg); + + /* 0x1670 - PHY lock mask register */ + reg = reg_read(REG_PHY_LOCK_MASK_ADDR); + reg &= REG_PHY_LOCK_MASK_MASK; /* [11:0] = 0 */ + dfs_reg_write(REG_PHY_LOCK_MASK_ADDR, reg); + + reg = reg_read(REG_DFS_ADDR); /* 0x1528 - DFS register */ + + /* Disable reconfig */ + reg &= ~0x10; /* [4] - Enable reconfig MR registers after DFS_ERG */ + reg |= 0x1; /* [0] - DRAM DLL disabled after DFS */ + + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + reg = reg_read(REG_METAL_MASK_ADDR) & ~(1 << 0); /* [0] - disable */ + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + /* [1] - DFS Block enable */ + reg = reg_read(REG_DFS_ADDR) | (1 << REG_DFS_BLOCK_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* [2] - DFS Self refresh enable */ + reg = reg_read(REG_DFS_ADDR) | (1 << REG_DFS_SR_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Poll DFS Register - 0x1528 [3] - DfsAtSR - + * All DRAM devices on all ranks are in self refresh mode - + * DFS can be executed afterwards + */ + do { + reg = reg_read(REG_DFS_ADDR) & (1 << REG_DFS_ATSR_OFFS); + } while (reg == 0x0); /* Wait for '1' */ + + /* Disable ODT on DLL-off mode */ + dfs_reg_write(REG_SDRAM_ODT_CTRL_HIGH_ADDR, + REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK); + + /* [11:0] = 0 */ + reg = (reg_read(REG_PHY_LOCK_MASK_ADDR) & REG_PHY_LOCK_MASK_MASK); + /* 0x1670 - PHY lock mask register */ + dfs_reg_write(REG_PHY_LOCK_MASK_ADDR, reg); + + /* Add delay between entering SR and start ratio modification */ + udelay(1); + + /* + * Initial Setup - assure that the "load new ratio" is clear (bit 24) + * and in the same chance, block reassertions of reset [15:8] and + * force reserved bits[7:0]. + */ + reg = 0x0000FDFF; + /* 0x18700 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + /* + * RelaX whenever reset is asserted to that channel (good for any case) + */ + reg = 0x0000FF00; + /* 0x18700 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_1_ADDR, reg); + + reg = reg_read(REG_CPU_DIV_CLK_CTRL_3_ADDR) & + REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK; + /* Full Integer ratio from PLL-out to ddr-clk */ + reg |= (freq_par << REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS); + /* 0x1870C - CPU Div CLK control 3 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_3_ADDR, reg); + + /* + * Shut off clock enable to the DDRPHY clock channel (this is the "D"). + * All the rest are kept as is (forced, but could be read-modify-write). + * This is done now by RMW above. + */ + + /* Clock is not shut off gracefully - keep it running */ + reg = 0x000FFF02; + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_4_ADDR, reg); + + /* Wait before replacing the clock on the DDR Phy Channel. */ + udelay(1); + + /* + * This for triggering the frequency update. Bit[24] is the + * central control + * bits [23:16] == which channels to change ==2 ==> only DDR Phy + * (smooth transition) + * bits [15:8] == mask reset reassertion due to clock modification + * to these channels. + * bits [7:0] == not in use + */ + reg = 0x0102FDFF; + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(1); /* Wait 1usec */ + + /* + * Poll Div CLK status 0 register - indication that the clocks + * are active - 0x18718 [8] + */ + do { + reg = (reg_read(REG_CPU_DIV_CLK_STATUS_0_ADDR)) & + (1 << REG_CPU_DIV_CLK_ALL_STABLE_OFFS); + } while (reg == 0); + + /* + * Clean the CTRL0, to be ready for next resets and next requests of + * ratio modifications. + */ + reg = 0x000000FF; + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(5); + + /* Switch HCLK Mux to training clk (100Mhz), keep DFS request bit */ + reg = 0x20050000; + /* 0x18488 - DRAM Init control status register */ + dfs_reg_write(REG_DRAM_INIT_CTRL_STATUS_ADDR, reg); + + reg = reg_read(REG_DDR_IO_ADDR) & ~(1 << REG_DDR_IO_CLK_RATIO_OFFS); + /* [15] = 0 - Set 1:1 Ratio between Dunit and Phy */ + dfs_reg_write(REG_DDR_IO_ADDR, reg); /* 0x1524 - DDR IO Regist */ + + reg = reg_read(REG_DRAM_PHY_CONFIG_ADDR) & REG_DRAM_PHY_CONFIG_MASK; + /* [31:30]] - reset pup data ctrl ADLL */ + /* 0x15EC - DRAM PHY Config register */ + dfs_reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + + reg = (reg_read(REG_DRAM_PHY_CONFIG_ADDR) | ~REG_DRAM_PHY_CONFIG_MASK); + /* [31:30] - normal pup data ctrl ADLL */ + /* 0x15EC - DRAM PHY Config register */ + dfs_reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + + udelay(1); /* Wait 1usec */ + + /* 0x1404 */ + reg = (reg_read(REG_DUNIT_CTRL_LOW_ADDR) & 0xFFFFFFE7); + dfs_reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + + /* Poll Phy lock status register - APLL lock indication - 0x1674 */ + do { + reg = (reg_read(REG_PHY_LOCK_STATUS_ADDR)) & + REG_PHY_LOCK_STATUS_LOCK_MASK; + } while (reg != REG_PHY_LOCK_STATUS_LOCK_MASK); /* Wait for '0xFFF' */ + + reg = (reg_read(REG_SDRAM_CONFIG_ADDR) & REG_SDRAM_CONFIG_MASK); + /* [30:29] = 0 - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + reg = reg_read(REG_SDRAM_CONFIG_ADDR) | ~REG_SDRAM_CONFIG_MASK; + /* [30:29] = '11' - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + udelay(1000); /* Wait 1msec */ + + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + /* Config CL and CWL with MR0 and MR2 registers */ + reg = reg_read(REG_DDR3_MR0_ADDR); + reg &= ~0x74; /* CL [3:0]; [6:4],[2] */ + reg |= (1 << 5); /* CL = 4, CAS is 6 */ + dfs_reg_write(REG_DDR3_MR0_ADDR, reg); + reg = REG_SDRAM_OPERATION_CMD_MR0 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + reg = reg_read(REG_DDR3_MR2_ADDR); + reg &= ~0x38; /* CWL [5:3] */ + reg |= (1 << 3); /* CWL = 1, CWL is 6 */ + dfs_reg_write(REG_DDR3_MR2_ADDR, reg); + + reg = REG_SDRAM_OPERATION_CMD_MR2 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + /* Set current rd_sample_delay */ + reg = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_SAMPLE_DELAYS_MASK << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + reg |= (5 << (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + dfs_reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, reg); + + /* Set current rd_ready_delay */ + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg |= ((6) << (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + dfs_reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + } + } + + /* [2] - DFS Self refresh disable */ + reg = reg_read(REG_DFS_ADDR) & ~(1 << REG_DFS_SR_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* [1] - DFS Block enable */ + reg = reg_read(REG_DFS_ADDR) & ~(1 << REG_DFS_BLOCK_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Poll DFS Register - 0x1528 [3] - DfsAtSR - + * All DRAM devices on all ranks are in self refresh mode - DFS can + * be executed afterwards + */ + do { + reg = reg_read(REG_DFS_ADDR) & (1 << REG_DFS_ATSR_OFFS); + } while (reg); /* Wait for '1' */ + + reg = (reg_read(REG_METAL_MASK_ADDR) | (1 << 0)); + /* [0] - Enable Dunit to crossbar retry */ + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + /* 0x1600 - PHY lock mask register */ + reg = reg_read(REG_ODPG_CNTRL_ADDR); + reg &= ~(1 << REG_ODPG_CNTRL_OFFS); /* [21] = 0 */ + dfs_reg_write(REG_ODPG_CNTRL_ADDR, reg); + + /* 0x1670 - PHY lock mask register */ + reg = reg_read(REG_PHY_LOCK_MASK_ADDR); + reg |= ~REG_PHY_LOCK_MASK_MASK; /* [11:0] = FFF */ + dfs_reg_write(REG_PHY_LOCK_MASK_ADDR, reg); + + DEBUG_DFS_C("DDR3 - DFS - High To Low - Ended successfuly - new Frequency - ", + freq, 1); + + return MV_OK; +#endif +} + +/* + * Name: ddr3_dfs_low_2_high + * Desc: + * Args: freq - target frequency + * Notes: + * Returns: MV_OK - success, MV_FAIL - fail + */ +int ddr3_dfs_low_2_high(u32 freq, int ratio_2to1, MV_DRAM_INFO *dram_info) +{ +#if defined(MV88F78X60) || defined(MV88F672X) + /* This Flow is relevant for ArmadaXP A0 */ + u32 reg, freq_par, tmp; + u32 cs = 0; + + DEBUG_DFS_C("DDR3 - DFS - Low To High - Starting DFS procedure to Frequency - ", + freq, 1); + + /* target frequency - freq */ + freq_par = ddr3_get_freq_parameter(freq, ratio_2to1); + +#if defined(MV88F672X) + u32 hclk; + u32 cpu_freq = ddr3_get_cpu_freq(); + get_target_freq(cpu_freq, &tmp, &hclk); +#endif + + /* Configure - DRAM DLL final state after DFS is complete - Enable */ + reg = reg_read(REG_DFS_ADDR); + /* [0] - DfsDllNextState - Enable */ + reg &= ~(1 << REG_DFS_DLLNEXTSTATE_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Configure - XBAR Retry response during Block to enable + * internal access - Disable + */ + reg = reg_read(REG_METAL_MASK_ADDR); + /* [0] - RetryMask - Disable */ + reg &= ~(1 << REG_METAL_MASK_RETRY_OFFS); + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + /* Configure - Block new external transactions - Enable */ + reg = reg_read(REG_DFS_ADDR); + reg |= (1 << REG_DFS_BLOCK_OFFS); /* [1] - DfsBlock - Enable */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Configure - Move DRAM into Self Refresh */ + reg = reg_read(REG_DFS_ADDR); + reg |= (1 << REG_DFS_SR_OFFS); /* [2] - DfsSR - Enable */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Poll - Wait for Self Refresh indication */ + do { + reg = ((reg_read(REG_DFS_ADDR)) & (1 << REG_DFS_ATSR_OFFS)); + } while (reg == 0x0); /* 0x1528 [3] - DfsAtSR - Wait for '1' */ + + /* Start of clock change procedure (PLL) */ +#if defined(MV88F672X) + /* avantaLP */ + /* Configure cpupll_clkdiv_reset_mask */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL0); + reg &= CPU_PLL_CLOCK_DIVIDER_CNTRL0_MASK; + /* 0xE8264[7:0] 0xff CPU Clock Dividers Reset mask */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL0, (reg + 0xFF)); + + /* Configure cpu_clkdiv_reload_smooth */ + reg = reg_read(CPU_PLL_CNTRL0); + reg &= CPU_PLL_CNTRL0_RELOAD_SMOOTH_MASK; + /* 0xE8260 [15:8] 0x2 CPU Clock Dividers Reload Smooth enable */ + dfs_reg_write(CPU_PLL_CNTRL0, + reg + (2 << CPU_PLL_CNTRL0_RELOAD_SMOOTH_OFFS)); + + /* Configure cpupll_clkdiv_relax_en */ + reg = reg_read(CPU_PLL_CNTRL0); + reg &= CPU_PLL_CNTRL0_RELAX_EN_MASK; + /* 0xE8260 [31:24] 0x2 Relax Enable */ + dfs_reg_write(CPU_PLL_CNTRL0, + reg + (2 << CPU_PLL_CNTRL0_RELAX_EN_OFFS)); + + /* Configure cpupll_clkdiv_ddr_clk_ratio */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL1); + /* + * 0xE8268 [13:8] N Set Training clock: + * APLL Out Clock (VCO freq) / N = 100 MHz + */ + reg &= CPU_PLL_CLOCK_DIVIDER_CNTRL1_MASK; + reg |= (freq_par << 8); /* full Integer ratio from PLL-out to ddr-clk */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL1, reg); + /* Configure cpupll_clkdiv_reload_ratio */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL0); + reg &= CPU_PLL_CLOCK_RELOAD_RATIO_MASK; + /* 0xE8264 [8]=0x1 CPU Clock Dividers Reload Ratio trigger set */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL0, + reg + (1 << CPU_PLL_CLOCK_RELOAD_RATIO_OFFS)); + + udelay(1); + + /* Configure cpupll_clkdiv_reload_ratio */ + reg = reg_read(CPU_PLL_CLOCK_DIVIDER_CNTRL0); + reg &= CPU_PLL_CLOCK_RELOAD_RATIO_MASK; + /* 0xE8264 [8]=0x0 CPU Clock Dividers Reload Ratio trigger clear */ + dfs_reg_write(CPU_PLL_CLOCK_DIVIDER_CNTRL0, reg); + + udelay(5); + +#else + /* + * Initial Setup - assure that the "load new ratio" is clear (bit 24) + * and in the same chance, block reassertions of reset [15:8] + * and force reserved bits[7:0]. + */ + reg = 0x0000FFFF; + + /* 0x18700 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + /* + * RelaX whenever reset is asserted to that channel (good for any case) + */ + reg = 0x0000FF00; + /* 0x18704 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_1_ADDR, reg); + + reg = reg_read(REG_CPU_DIV_CLK_CTRL_2_ADDR) & + REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK; + reg |= (freq_par << REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS); + /* full Integer ratio from PLL-out to ddr-clk */ + /* 0x1870C - CPU Div CLK control 3 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_2_ADDR, reg); + + /* + * Shut off clock enable to the DDRPHY clock channel (this is the "D"). + * All the rest are kept as is (forced, but could be read-modify-write). + * This is done now by RMW above. + */ + reg = 0x000FFF02; + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_4_ADDR, reg); + + /* Wait before replacing the clock on the DDR Phy Channel. */ + udelay(1); + + reg = 0x0102FDFF; + /* + * This for triggering the frequency update. Bit[24] is the + * central control + * bits [23:16] == which channels to change ==2 ==> only DDR Phy + * (smooth transition) + * bits [15:8] == mask reset reassertion due to clock modification + * to these channels. + * bits [7:0] == not in use + */ + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(1); + + /* + * Poll Div CLK status 0 register - indication that the clocks + * are active - 0x18718 [8] + */ + do { + reg = reg_read(REG_CPU_DIV_CLK_STATUS_0_ADDR) & + (1 << REG_CPU_DIV_CLK_ALL_STABLE_OFFS); + } while (reg == 0); + + reg = 0x000000FF; + /* + * Clean the CTRL0, to be ready for next resets and next requests + * of ratio modifications. + */ + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); +#endif + /* End of clock change procedure (PLL) */ + + if (ratio_2to1) { + /* Configure - Select normal clock for the DDR PHY - Disable */ + reg = reg_read(REG_DRAM_INIT_CTRL_STATUS_ADDR); + /* [16] - ddr_phy_trn_clk_sel - Disable */ + reg &= ~(1 << REG_DRAM_INIT_CTRL_TRN_CLK_OFFS); + /* 0x18488 - DRAM Init control status register */ + dfs_reg_write(REG_DRAM_INIT_CTRL_STATUS_ADDR, reg); + } + + /* + * Configure - Set Correct Ratio - according to target ratio + * parameter - 2:1/1:1 + */ + if (ratio_2to1) { + /* + * [15] - Phy2UnitClkRatio = 1 - Set 2:1 Ratio between + * Dunit and Phy + */ + reg = reg_read(REG_DDR_IO_ADDR) | + (1 << REG_DDR_IO_CLK_RATIO_OFFS); + } else { + /* + * [15] - Phy2UnitClkRatio = 0 - Set 1:1 Ratio between + * Dunit and Phy + */ + reg = reg_read(REG_DDR_IO_ADDR) & + ~(1 << REG_DDR_IO_CLK_RATIO_OFFS); + } + dfs_reg_write(REG_DDR_IO_ADDR, reg); /* 0x1524 - DDR IO Register */ + + /* Configure - 2T Mode - Restore original configuration */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + /* [3:4] 2T - Restore value */ + reg &= ~(REG_DUNIT_CTRL_LOW_2T_MASK << REG_DUNIT_CTRL_LOW_2T_OFFS); + reg |= ((dram_info->mode_2t & REG_DUNIT_CTRL_LOW_2T_MASK) << + REG_DUNIT_CTRL_LOW_2T_OFFS); + /* 0x1404 - DDR Controller Control Low Register */ + dfs_reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + + /* Configure - Restore CL and CWL - MRS Commands */ + reg = reg_read(REG_DFS_ADDR); + reg &= ~(REG_DFS_CL_NEXT_STATE_MASK << REG_DFS_CL_NEXT_STATE_OFFS); + reg &= ~(REG_DFS_CWL_NEXT_STATE_MASK << REG_DFS_CWL_NEXT_STATE_OFFS); + + if (freq == DDR_400) { + if (dram_info->target_frequency == 0x8) + tmp = ddr3_cl_to_valid_cl(5); + else + tmp = ddr3_cl_to_valid_cl(6); + } else { + tmp = ddr3_cl_to_valid_cl(dram_info->cl); + } + + /* [8] - DfsCLNextState */ + reg |= ((tmp & REG_DFS_CL_NEXT_STATE_MASK) << REG_DFS_CL_NEXT_STATE_OFFS); + if (freq == DDR_400) { + /* [12] - DfsCWLNextState */ + reg |= (((0) & REG_DFS_CWL_NEXT_STATE_MASK) << + REG_DFS_CWL_NEXT_STATE_OFFS); + } else { + /* [12] - DfsCWLNextState */ + reg |= (((dram_info->cwl) & REG_DFS_CWL_NEXT_STATE_MASK) << + REG_DFS_CWL_NEXT_STATE_OFFS); + } + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Optional - Configure - DDR3_Rtt_nom_CS# */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = reg_read(REG_DDR3_MR1_CS_ADDR + + (cs << MR_CS_ADDR_OFFS)); + reg &= REG_DDR3_MR1_RTT_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + dfs_reg_write(REG_DDR3_MR1_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + /* Configure - Reset ADLLs - Set Reset */ + reg = reg_read(REG_DRAM_PHY_CONFIG_ADDR) & REG_DRAM_PHY_CONFIG_MASK; + /* [31:30]] - reset pup data ctrl ADLL */ + /* 0x15EC - DRAM PHY Config Register */ + dfs_reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + + /* Configure - Reset ADLLs - Release Reset */ + reg = reg_read(REG_DRAM_PHY_CONFIG_ADDR) | ~REG_DRAM_PHY_CONFIG_MASK; + /* [31:30] - normal pup data ctrl ADLL */ + /* 0x15EC - DRAM PHY Config register */ + dfs_reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + + /* Poll - Wait for APLL + ADLLs lock on new frequency */ + do { + reg = reg_read(REG_PHY_LOCK_STATUS_ADDR) & + REG_PHY_LOCK_APLL_ADLL_STATUS_MASK; + /* 0x1674 [10:0] - Phy lock status Register */ + } while (reg != REG_PHY_LOCK_APLL_ADLL_STATUS_MASK); + + /* Configure - Reset the PHY SDR clock divider */ + if (ratio_2to1) { + /* Pup Reset Divider B - Set Reset */ + /* [28] - DataPupRdRST = 0 */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) & + ~(1 << REG_SDRAM_CONFIG_PUPRSTDIV_OFFS); + /* [28] - DataPupRdRST = 1 */ + tmp = reg_read(REG_SDRAM_CONFIG_ADDR) | + (1 << REG_SDRAM_CONFIG_PUPRSTDIV_OFFS); + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* Pup Reset Divider B - Release Reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, tmp); + } + + /* Configure - Reset the PHY Read FIFO and Write channels - Set Reset */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) & REG_SDRAM_CONFIG_MASK; + /* [30:29] = 0 - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* + * Configure - DRAM Data PHY Read [30], Write [29] path reset - + * Release Reset + */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) | ~REG_SDRAM_CONFIG_MASK; + /* [30:29] = '11' - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* Registered DIMM support */ + if (dram_info->reg_dimm) { + /* + * Configure - Change register DRAM operating speed + * (DDR3-1333 / DDR3-1600) - CWA_RC + */ + reg = (0xA & REG_SDRAM_OPERATION_CWA_RC_MASK) << + REG_SDRAM_OPERATION_CWA_RC_OFFS; + if (freq <= DDR_400) { + /* + * Configure - Change register DRAM operating speed + * (DDR3-800) - CWA_DATA + */ + reg |= ((0x0 & REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + } else if ((freq > DDR_400) && (freq <= DDR_533)) { + /* + * Configure - Change register DRAM operating speed + * (DDR3-1066) - CWA_DATA + */ + reg |= ((0x1 & REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + } else if ((freq > DDR_533) && (freq <= DDR_666)) { + /* + * Configure - Change register DRAM operating speed + * (DDR3-1333) - CWA_DATA + */ + reg |= ((0x2 & REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + } else { + /* + * Configure - Change register DRAM operating speed + * (DDR3-1600) - CWA_DATA + */ + reg |= ((0x3 & REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + } + + /* Configure - Set Delay - tSTAB */ + reg |= (0x1 << REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS); + /* Configure - Issue CWA command with the above parameters */ + reg |= (REG_SDRAM_OPERATION_CMD_CWA & + ~(0xF << REG_SDRAM_OPERATION_CS_OFFS)); + + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for CWA operation completion */ + do { + reg = reg_read(REG_SDRAM_OPERATION_ADDR) & + REG_SDRAM_OPERATION_CMD_MASK; + } while (reg); + } + + /* Configure - Exit Self Refresh */ + /* [2] - DfsSR */ + reg = reg_read(REG_DFS_ADDR) & ~(1 << REG_DFS_SR_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Poll - DFS Register - 0x1528 [3] - DfsAtSR - All DRAM + * devices on all ranks are NOT in self refresh mode + */ + do { + reg = reg_read(REG_DFS_ADDR) & (1 << REG_DFS_ATSR_OFFS); + } while (reg); /* Wait for '0' */ + + /* Configure - Issue Refresh command */ + /* [3-0] = 0x2 - Refresh Command, [11-8] - enabled Cs */ + reg = REG_SDRAM_OPERATION_CMD_RFRS; + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) + reg &= ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + } + + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + /* Configure - Block new external transactions - Disable */ + reg = reg_read(REG_DFS_ADDR); + reg &= ~(1 << REG_DFS_BLOCK_OFFS); /* [1] - DfsBlock - Disable */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Configure - XBAR Retry response during Block to enable + * internal access - Disable + */ + reg = reg_read(REG_METAL_MASK_ADDR); + /* [0] - RetryMask - Enable */ + reg |= (1 << REG_METAL_MASK_RETRY_OFFS); + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + /* Configure - Set CL */ + reg = reg_read(REG_DDR3_MR0_CS_ADDR + + (cs << MR_CS_ADDR_OFFS)) & + ~REG_DDR3_MR0_CL_MASK; + if (freq == DDR_400) + tmp = ddr3_cl_to_valid_cl(6); + else + tmp = ddr3_cl_to_valid_cl(dram_info->cl); + reg |= ((tmp & 0x1) << REG_DDR3_MR0_CL_OFFS); + reg |= ((tmp & 0xE) << REG_DDR3_MR0_CL_HIGH_OFFS); + dfs_reg_write(REG_DDR3_MR0_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + + /* Configure - Set CWL */ + reg = reg_read(REG_DDR3_MR2_CS_ADDR + + (cs << MR_CS_ADDR_OFFS)) & + ~(REG_DDR3_MR2_CWL_MASK << REG_DDR3_MR2_CWL_OFFS); + if (freq == DDR_400) + reg |= ((0) << REG_DDR3_MR2_CWL_OFFS); + else + reg |= ((dram_info->cwl) << REG_DDR3_MR2_CWL_OFFS); + dfs_reg_write(REG_DDR3_MR2_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + DEBUG_DFS_C("DDR3 - DFS - Low To High - Ended successfuly - new Frequency - ", + freq, 1); + + return MV_OK; + +#else + + /* This Flow is relevant for Armada370 A0 and ArmadaXP Z1 */ + + u32 reg, freq_par, tmp; + u32 cs = 0; + + DEBUG_DFS_C("DDR3 - DFS - Low To High - Starting DFS procedure to Frequency - ", + freq, 1); + + /* target frequency - freq */ + freq_par = ddr3_get_freq_parameter(freq, ratio_2to1); + + reg = 0x0000FF00; + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_1_ADDR, reg); + + /* 0x1600 - PHY lock mask register */ + reg = reg_read(REG_ODPG_CNTRL_ADDR); + reg |= (1 << REG_ODPG_CNTRL_OFFS); /* [21] = 1 */ + dfs_reg_write(REG_ODPG_CNTRL_ADDR, reg); + + /* 0x1670 - PHY lock mask register */ + reg = reg_read(REG_PHY_LOCK_MASK_ADDR); + reg &= REG_PHY_LOCK_MASK_MASK; /* [11:0] = 0 */ + dfs_reg_write(REG_PHY_LOCK_MASK_ADDR, reg); + + /* Enable reconfig MR Registers after DFS */ + reg = reg_read(REG_DFS_ADDR); /* 0x1528 - DFS register */ + /* [4] - Disable - reconfig MR registers after DFS_ERG */ + reg &= ~0x11; + /* [0] - Enable - DRAM DLL after DFS */ + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Disable DRAM Controller to crossbar retry */ + /* [0] - disable */ + reg = reg_read(REG_METAL_MASK_ADDR) & ~(1 << 0); + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + /* Enable DRAM Blocking */ + /* [1] - DFS Block enable */ + reg = reg_read(REG_DFS_ADDR) | (1 << REG_DFS_BLOCK_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Enable Self refresh */ + /* [2] - DFS Self refresh enable */ + reg = reg_read(REG_DFS_ADDR) | (1 << REG_DFS_SR_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Poll DFS Register - All DRAM devices on all ranks are in + * self refresh mode - DFS can be executed afterwards + */ + /* 0x1528 [3] - DfsAtSR */ + do { + reg = reg_read(REG_DFS_ADDR) & (1 << REG_DFS_ATSR_OFFS); + } while (reg == 0x0); /* Wait for '1' */ + + /* + * Set Correct Ratio - if freq>MARGIN_FREQ use 2:1 ratio + * else use 1:1 ratio + */ + if (ratio_2to1) { + /* [15] = 1 - Set 2:1 Ratio between Dunit and Phy */ + reg = reg_read(REG_DDR_IO_ADDR) | + (1 << REG_DDR_IO_CLK_RATIO_OFFS); + } else { + /* [15] = 0 - Set 1:1 Ratio between Dunit and Phy */ + reg = reg_read(REG_DDR_IO_ADDR) & + ~(1 << REG_DDR_IO_CLK_RATIO_OFFS); + } + dfs_reg_write(REG_DDR_IO_ADDR, reg); /* 0x1524 - DDR IO Register */ + + /* Switch HCLK Mux from (100Mhz) [16]=0, keep DFS request bit */ + reg = 0x20040000; + /* + * [29] - training logic request DFS, [28:27] - + * preload patterns frequency [18] + */ + + /* 0x18488 - DRAM Init control status register */ + dfs_reg_write(REG_DRAM_INIT_CTRL_STATUS_ADDR, reg); + + /* Add delay between entering SR and start ratio modification */ + udelay(1); + + /* + * Initial Setup - assure that the "load new ratio" is clear (bit 24) + * and in the same chance, block reassertions of reset [15:8] and + * force reserved bits[7:0]. + */ + reg = 0x0000FFFF; + /* 0x18700 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + /* + * RelaX whenever reset is asserted to that channel (good for any case) + */ + reg = 0x0000FF00; + /* 0x18704 - CPU Div CLK control 0 */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_1_ADDR, reg); + + reg = reg_read(REG_CPU_DIV_CLK_CTRL_3_ADDR) & + REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK; + reg |= (freq_par << REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS); + /* Full Integer ratio from PLL-out to ddr-clk */ + /* 0x1870C - CPU Div CLK control 3 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_3_ADDR, reg); + + /* + * Shut off clock enable to the DDRPHY clock channel (this is the "D"). + * All the rest are kept as is (forced, but could be read-modify-write). + * This is done now by RMW above. + */ + + reg = 0x000FFF02; + + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_4_ADDR, reg); + + /* Wait before replacing the clock on the DDR Phy Channel. */ + udelay(1); + + reg = 0x0102FDFF; + /* + * This for triggering the frequency update. Bit[24] is the + * central control + * bits [23:16] == which channels to change ==2 ==> only DDR Phy + * (smooth transition) + * bits [15:8] == mask reset reassertion due to clock modification + * to these channels. + * bits [7:0] == not in use + */ + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(1); + + /* + * Poll Div CLK status 0 register - indication that the clocks are + * active - 0x18718 [8] + */ + do { + reg = reg_read(REG_CPU_DIV_CLK_STATUS_0_ADDR) & + (1 << REG_CPU_DIV_CLK_ALL_STABLE_OFFS); + } while (reg == 0); + + reg = 0x000000FF; + /* + * Clean the CTRL0, to be ready for next resets and next requests of + * ratio modifications. + */ + /* 0x18700 - CPU Div CLK control 0 register */ + dfs_reg_write(REG_CPU_DIV_CLK_CTRL_0_ADDR, reg); + + udelay(5); + + if (ratio_2to1) { + /* Pup Reset Divider B - Set Reset */ + /* [28] = 0 - Pup Reset Divider B */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) & ~(1 << 28); + /* [28] = 1 - Pup Reset Divider B */ + tmp = reg_read(REG_SDRAM_CONFIG_ADDR) | (1 << 28); + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* Pup Reset Divider B - Release Reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, tmp); + } + + /* DRAM Data PHYs ADLL Reset - Set Reset */ + reg = (reg_read(REG_DRAM_PHY_CONFIG_ADDR) & REG_DRAM_PHY_CONFIG_MASK); + /* [31:30]] - reset pup data ctrl ADLL */ + /* 0x15EC - DRAM PHY Config Register */ + dfs_reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + + udelay(25); + + /* APLL lock indication - Poll Phy lock status Register - 0x1674 [9] */ + do { + reg = reg_read(REG_PHY_LOCK_STATUS_ADDR) & + (1 << REG_PHY_LOCK_STATUS_LOCK_OFFS); + } while (reg == 0); + + /* DRAM Data PHYs ADLL Reset - Release Reset */ + reg = reg_read(REG_DRAM_PHY_CONFIG_ADDR) | ~REG_DRAM_PHY_CONFIG_MASK; + /* [31:30] - normal pup data ctrl ADLL */ + /* 0x15EC - DRAM PHY Config register */ + dfs_reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + + udelay(10000); /* Wait 10msec */ + + /* + * APLL lock indication - Poll Phy lock status Register - 0x1674 [11:0] + */ + do { + reg = reg_read(REG_PHY_LOCK_STATUS_ADDR) & + REG_PHY_LOCK_STATUS_LOCK_MASK; + } while (reg != REG_PHY_LOCK_STATUS_LOCK_MASK); + + /* DRAM Data PHY Read [30], Write [29] path reset - Set Reset */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) & REG_SDRAM_CONFIG_MASK; + /* [30:29] = 0 - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* DRAM Data PHY Read [30], Write [29] path reset - Release Reset */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) | ~REG_SDRAM_CONFIG_MASK; + /* [30:29] = '11' - Data Pup R/W path reset */ + /* 0x1400 - SDRAM Configuration register */ + dfs_reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /* Disable DFS Reconfig */ + reg = reg_read(REG_DFS_ADDR) & ~(1 << 4); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* [2] - DFS Self refresh disable */ + reg = reg_read(REG_DFS_ADDR) & ~(1 << REG_DFS_SR_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* + * Poll DFS Register - 0x1528 [3] - DfsAtSR - All DRAM devices on + * all ranks are NOT in self refresh mode + */ + do { + reg = reg_read(REG_DFS_ADDR) & (1 << REG_DFS_ATSR_OFFS); + } while (reg); /* Wait for '0' */ + + /* 0x1404 */ + reg = (reg_read(REG_DUNIT_CTRL_LOW_ADDR) & 0xFFFFFFE7) | 0x2; + + /* Configure - 2T Mode - Restore original configuration */ + /* [3:4] 2T - Restore value */ + reg &= ~(REG_DUNIT_CTRL_LOW_2T_MASK << REG_DUNIT_CTRL_LOW_2T_OFFS); + reg |= ((dram_info->mode_2t & REG_DUNIT_CTRL_LOW_2T_MASK) << + REG_DUNIT_CTRL_LOW_2T_OFFS); + dfs_reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + + udelay(1); /* Wait 1us */ + + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = (reg_read(REG_DDR3_MR1_ADDR)); + /* DLL Enable */ + reg &= ~(1 << REG_DDR3_MR1_DLL_ENA_OFFS); + dfs_reg_write(REG_DDR3_MR1_ADDR, reg); + + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + /* DLL Reset - MR0 */ + reg = reg_read(REG_DDR3_MR0_ADDR); + dfs_reg_write(REG_DDR3_MR0_ADDR, reg); + + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR0 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + reg = reg_read(REG_DDR3_MR0_ADDR); + reg &= ~0x74; /* CL [3:0]; [6:4],[2] */ + + if (freq == DDR_400) + tmp = ddr3_cl_to_valid_cl(6) & 0xF; + else + tmp = ddr3_cl_to_valid_cl(dram_info->cl) & 0xF; + + reg |= ((tmp & 0x1) << 2); + reg |= ((tmp >> 1) << 4); /* to bit 4 */ + dfs_reg_write(REG_DDR3_MR0_ADDR, reg); + + reg = REG_SDRAM_OPERATION_CMD_MR0 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + reg = reg_read(REG_DDR3_MR2_ADDR); + reg &= ~0x38; /* CWL [5:3] */ + /* CWL = 0 ,for 400 MHg is 5 */ + if (freq != DDR_400) + reg |= dram_info->cwl << REG_DDR3_MR2_CWL_OFFS; + dfs_reg_write(REG_DDR3_MR2_ADDR, reg); + reg = REG_SDRAM_OPERATION_CMD_MR2 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* 0x1418 - SDRAM Operation Register */ + dfs_reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* Poll - Wait for Refresh operation completion */ + wait_refresh_op_complete(); + + /* Set current rd_sample_delay */ + reg = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_SAMPLE_DELAYS_MASK << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + reg |= (dram_info->cl << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + dfs_reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, reg); + + /* Set current rd_ready_delay */ + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg |= ((dram_info->cl + 1) << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + dfs_reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + } + } + + /* Enable ODT on DLL-on mode */ + dfs_reg_write(REG_SDRAM_ODT_CTRL_HIGH_ADDR, 0); + + /* [1] - DFS Block disable */ + reg = reg_read(REG_DFS_ADDR) & ~(1 << REG_DFS_BLOCK_OFFS); + dfs_reg_write(REG_DFS_ADDR, reg); /* 0x1528 - DFS register */ + + /* Change DDR frequency to 100MHz procedure: */ + /* 0x1600 - PHY lock mask register */ + reg = reg_read(REG_ODPG_CNTRL_ADDR); + reg &= ~(1 << REG_ODPG_CNTRL_OFFS); /* [21] = 0 */ + dfs_reg_write(REG_ODPG_CNTRL_ADDR, reg); + + /* Change DDR frequency to 100MHz procedure: */ + /* 0x1670 - PHY lock mask register */ + reg = reg_read(REG_PHY_LOCK_MASK_ADDR); + reg |= ~REG_PHY_LOCK_MASK_MASK; /* [11:0] = FFF */ + dfs_reg_write(REG_PHY_LOCK_MASK_ADDR, reg); + + reg = reg_read(REG_METAL_MASK_ADDR) | (1 << 0); /* [0] - disable */ + /* 0x14B0 - Dunit MMask Register */ + dfs_reg_write(REG_METAL_MASK_ADDR, reg); + + DEBUG_DFS_C("DDR3 - DFS - Low To High - Ended successfuly - new Frequency - ", + freq, 1); + return MV_OK; +#endif +} diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_dqs.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_dqs.c new file mode 100644 index 000000000..0db94212b --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_dqs.c @@ -0,0 +1,1374 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <log.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> + +#include "ddr3_hw_training.h" + +/* + * Debug + */ +#define DEBUG_DQS_C(s, d, l) \ + DEBUG_DQS_S(s); DEBUG_DQS_D(d, l); DEBUG_DQS_S("\n") +#define DEBUG_DQS_FULL_C(s, d, l) \ + DEBUG_DQS_FULL_S(s); DEBUG_DQS_FULL_D(d, l); DEBUG_DQS_FULL_S("\n") +#define DEBUG_DQS_RESULTS_C(s, d, l) \ + DEBUG_DQS_RESULTS_S(s); DEBUG_DQS_RESULTS_D(d, l); DEBUG_DQS_RESULTS_S("\n") +#define DEBUG_PER_DQ_C(s, d, l) \ + puts(s); printf("%x", d); puts("\n") + +#define DEBUG_DQS_RESULTS_S(s) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_2, "%s", s) +#define DEBUG_DQS_RESULTS_D(d, l) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_2, "%x", d) + +#define DEBUG_PER_DQ_S(s) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_3, "%s", s) +#define DEBUG_PER_DQ_D(d, l) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_3, "%x", d) +#define DEBUG_PER_DQ_DD(d, l) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_3, "%d", d) + +#ifdef MV_DEBUG_DQS +#define DEBUG_DQS_S(s) puts(s) +#define DEBUG_DQS_D(d, l) printf("%x", d) +#else +#define DEBUG_DQS_S(s) +#define DEBUG_DQS_D(d, l) +#endif + +#ifdef MV_DEBUG_DQS_FULL +#define DEBUG_DQS_FULL_S(s) puts(s) +#define DEBUG_DQS_FULL_D(d, l) printf("%x", d) +#else +#define DEBUG_DQS_FULL_S(s) +#define DEBUG_DQS_FULL_D(d, l) +#endif + +/* State machine for centralization - find low & high limit */ +enum { + PUP_ADLL_LIMITS_STATE_FAIL, + PUP_ADLL_LIMITS_STATE_PASS, + PUP_ADLL_LIMITS_STATE_FAIL_AFTER_PASS, +}; + +/* Hold centralization low results */ +static int centralization_low_limit[MAX_PUP_NUM] = { 0 }; +/* Hold centralization high results */ +static int centralization_high_limit[MAX_PUP_NUM] = { 0 }; + +int ddr3_find_adll_limits(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, int is_tx); +int ddr3_check_window_limits(u32 pup, int high_limit, int low_limit, int is_tx, + int *size_valid); +static int ddr3_center_calc(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx); +int ddr3_special_pattern_i_search(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx, u32 special_pattern_pup); +int ddr3_special_pattern_ii_search(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx, u32 special_pattern_pup); +int ddr3_set_dqs_centralization_results(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx); + +#ifdef MV88F78X60 +extern u32 killer_pattern_32b[DQ_NUM][LEN_SPECIAL_PATTERN]; +extern u32 killer_pattern_64b[DQ_NUM][LEN_SPECIAL_PATTERN]; +extern int per_bit_data[MAX_PUP_NUM][DQ_NUM]; +#else +extern u32 killer_pattern[DQ_NUM][LEN_16BIT_KILLER_PATTERN]; +extern u32 killer_pattern_32b[DQ_NUM][LEN_SPECIAL_PATTERN]; +#if defined(MV88F672X) +extern int per_bit_data[MAX_PUP_NUM][DQ_NUM]; +#endif +#endif +extern u32 special_pattern[DQ_NUM][LEN_SPECIAL_PATTERN]; + +static u32 *ddr3_dqs_choose_pattern(MV_DRAM_INFO *dram_info, u32 victim_dq) +{ + u32 *pattern_ptr; + + /* Choose pattern */ + switch (dram_info->ddr_width) { +#if defined(MV88F672X) + case 16: + pattern_ptr = (u32 *)&killer_pattern[victim_dq]; + break; +#endif + case 32: + pattern_ptr = (u32 *)&killer_pattern_32b[victim_dq]; + break; +#if defined(MV88F78X60) + case 64: + pattern_ptr = (u32 *)&killer_pattern_64b[victim_dq]; + break; +#endif + default: +#if defined(MV88F78X60) + pattern_ptr = (u32 *)&killer_pattern_32b[victim_dq]; +#else + pattern_ptr = (u32 *)&killer_pattern[victim_dq]; +#endif + break; + } + + return pattern_ptr; +} + +/* + * Name: ddr3_dqs_centralization_rx + * Desc: Execute the DQS centralization RX phase. + * Args: dram_info + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_dqs_centralization_rx(MV_DRAM_INFO *dram_info) +{ + u32 cs, ecc, reg; + int status; + + DEBUG_DQS_S("DDR3 - DQS Centralization RX - Starting procedure\n"); + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_DQS_S("DDR3 - DQS Centralization RX - SW Override Enabled\n"); + + reg = (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + /* Loop for each CS */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + DEBUG_DQS_FULL_C("DDR3 - DQS Centralization RX - CS - ", + (u32) cs, 1); + + for (ecc = 0; ecc < (dram_info->ecc_ena + 1); ecc++) { + + /* ECC Support - Switch ECC Mux on ecc=1 */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg |= (dram_info->ecc_ena * + ecc << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + if (ecc) + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization RX - ECC Mux Enabled\n"); + else + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization RX - ECC Mux Disabled\n"); + + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization RX - Find all limits\n"); + + status = ddr3_find_adll_limits(dram_info, cs, + ecc, 0); + if (MV_OK != status) + return status; + + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization RX - Start calculating center\n"); + + status = ddr3_center_calc(dram_info, cs, ecc, + 0); + if (MV_OK != status) + return status; + } + } + } + + /* ECC Support - Disable ECC MUX */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_1_ADDR) | + (1 << REG_DRAM_TRAINING_1_TRNBPOINT_OFFS); + reg_write(REG_DRAM_TRAINING_1_ADDR, reg); + + return MV_OK; +} + +/* + * Name: ddr3_dqs_centralization_tx + * Desc: Execute the DQS centralization TX phase. + * Args: dram_info + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_dqs_centralization_tx(MV_DRAM_INFO *dram_info) +{ + u32 cs, ecc, reg; + int status; + + DEBUG_DQS_S("DDR3 - DQS Centralization TX - Starting procedure\n"); + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_DQS_S("DDR3 - DQS Centralization TX - SW Override Enabled\n"); + + reg = (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + /* Loop for each CS */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + DEBUG_DQS_FULL_C("DDR3 - DQS Centralization TX - CS - ", + (u32) cs, 1); + for (ecc = 0; ecc < (dram_info->ecc_ena + 1); ecc++) { + /* ECC Support - Switch ECC Mux on ecc=1 */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg |= (dram_info->ecc_ena * + ecc << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + if (ecc) + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization TX - ECC Mux Enabled\n"); + else + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization TX - ECC Mux Disabled\n"); + + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization TX - Find all limits\n"); + + status = ddr3_find_adll_limits(dram_info, cs, + ecc, 1); + if (MV_OK != status) + return status; + + DEBUG_DQS_FULL_S("DDR3 - DQS Centralization TX - Start calculating center\n"); + + status = ddr3_center_calc(dram_info, cs, ecc, + 1); + if (MV_OK != status) + return status; + } + } + } + + /* ECC Support - Disable ECC MUX */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_1_ADDR) | + (1 << REG_DRAM_TRAINING_1_TRNBPOINT_OFFS); + reg_write(REG_DRAM_TRAINING_1_ADDR, reg); + + return MV_OK; +} + +/* + * Name: ddr3_find_adll_limits + * Desc: Execute the Find ADLL limits phase. + * Args: dram_info + * cs + * ecc_ena + * is_tx Indicate whether Rx or Tx + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_find_adll_limits(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, int is_tx) +{ + u32 victim_dq, pup, tmp; + u32 adll_addr; + u32 max_pup; /* maximal pup index */ + u32 pup_mask = 0; + u32 unlock_pup; /* bit array of un locked pups */ + u32 new_unlock_pup; /* bit array of compare failed pups */ + u32 curr_adll; + u32 adll_start_val; /* adll start loop value - for rx or tx limit */ + u32 high_limit; /* holds found High Limit */ + u32 low_limit; /* holds found Low Limit */ + int win_valid; + int update_win; + u32 sdram_offset; + u32 uj, cs_count, cs_tmp, ii; + u32 *pattern_ptr; + u32 dq; + u32 adll_end_val; /* adll end of loop val - for rx or tx limit */ + u8 analog_pbs[DQ_NUM][MAX_PUP_NUM][DQ_NUM][2]; + u8 analog_pbs_sum[MAX_PUP_NUM][DQ_NUM][2]; + int pup_adll_limit_state[MAX_PUP_NUM]; /* hold state of each pup */ + + adll_addr = ((is_tx == 1) ? PUP_DQS_WR : PUP_DQS_RD); + adll_end_val = ((is_tx == 1) ? ADLL_MIN : ADLL_MAX); + adll_start_val = ((is_tx == 1) ? ADLL_MAX : ADLL_MIN); + max_pup = (ecc + (1 - ecc) * dram_info->num_of_std_pups); + + DEBUG_DQS_FULL_S("DDR3 - DQS Find Limits - Starting Find ADLL Limits\n"); + + /* init the array */ + for (pup = 0; pup < max_pup; pup++) { + centralization_low_limit[pup] = ADLL_MIN; + centralization_high_limit[pup] = ADLL_MAX; + } + + /* Killer Pattern */ + cs_count = 0; + for (cs_tmp = 0; cs_tmp < cs; cs_tmp++) { + if (dram_info->cs_ena & (1 << cs_tmp)) + cs_count++; + } + sdram_offset = cs_count * (SDRAM_CS_SIZE + 1); + sdram_offset += ((is_tx == 1) ? + SDRAM_DQS_TX_OFFS : SDRAM_DQS_RX_OFFS); + + /* Prepare pup masks */ + for (pup = 0; pup < max_pup; pup++) + pup_mask |= (1 << pup); + + for (pup = 0; pup < max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + analog_pbs_sum[pup][dq][0] = adll_start_val; + analog_pbs_sum[pup][dq][1] = adll_end_val; + } + } + + /* Loop - use different pattern for each victim_dq */ + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + DEBUG_DQS_FULL_C("DDR3 - DQS Find Limits - Victim DQ - ", + (u32)victim_dq, 1); + /* + * The pups 3 bit arrays represent state machine. with + * 3 stages for each pup. + * 1. fail and didn't get pass in earlier compares. + * 2. pass compare + * 3. fail after pass - end state. + * The window limits are the adll values where the adll + * was in the pass stage. + */ + + /* Set all states to Fail (1st state) */ + for (pup = 0; pup < max_pup; pup++) + pup_adll_limit_state[pup] = PUP_ADLL_LIMITS_STATE_FAIL; + + /* Set current valid pups */ + unlock_pup = pup_mask; + + /* Set ADLL to start value */ + curr_adll = adll_start_val; + +#if defined(MV88F78X60) + for (pup = 0; pup < max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + analog_pbs[victim_dq][pup][dq][0] = + adll_start_val; + analog_pbs[victim_dq][pup][dq][1] = + adll_end_val; + per_bit_data[pup][dq] = 0; + } + } +#endif + + for (uj = 0; uj < ADLL_MAX; uj++) { + DEBUG_DQS_FULL_C("DDR3 - DQS Find Limits - Setting ADLL to ", + curr_adll, 2); + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 1) { + tmp = ((is_tx == 1) ? curr_adll + + dram_info->wl_val[cs] + [pup * (1 - ecc) + ecc * ECC_PUP] + [D] : curr_adll); + ddr3_write_pup_reg(adll_addr, cs, pup + + (ecc * ECC_PUP), 0, tmp); + } + } + + /* Choose pattern */ + pattern_ptr = ddr3_dqs_choose_pattern(dram_info, + victim_dq); + + /* '1' - means pup failed, '0' - means pup pass */ + new_unlock_pup = 0; + + /* Read and compare results for Victim_DQ# */ + for (ii = 0; ii < 3; ii++) { + u32 tmp = 0; + if (MV_OK != ddr3_sdram_dqs_compare(dram_info, + unlock_pup, &tmp, + pattern_ptr, + LEN_KILLER_PATTERN, + sdram_offset + + LEN_KILLER_PATTERN * + 4 * victim_dq, + is_tx, 0, NULL, + 0)) + return MV_DDR3_TRAINING_ERR_DRAM_COMPARE; + + new_unlock_pup |= tmp; + } + + pup = 0; + DEBUG_DQS_FULL_C("DDR3 - DQS Find Limits - UnlockPup: ", + unlock_pup, 2); + DEBUG_DQS_FULL_C("DDR3 - DQS Find Limits - NewUnlockPup: ", + new_unlock_pup, 2); + + /* Update pup state */ + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 0) { + DEBUG_DQS_FULL_C("DDR3 - DQS Find Limits - Skipping pup ", + pup, 1); + continue; + } + + /* + * Still didn't find the window limit of the pup + */ + if (IS_PUP_ACTIVE(new_unlock_pup, pup) == 1) { + /* Current compare result == fail */ + if (pup_adll_limit_state[pup] == + PUP_ADLL_LIMITS_STATE_PASS) { + /* + * If now it failed but passed + * earlier + */ + DEBUG_DQS_S("DDR3 - DQS Find Limits - PASS to FAIL: CS - "); + DEBUG_DQS_D(cs, 1); + DEBUG_DQS_S(", DQ - "); + DEBUG_DQS_D(victim_dq, 1); + DEBUG_DQS_S(", Pup - "); + DEBUG_DQS_D(pup, 1); + DEBUG_DQS_S(", ADLL - "); + DEBUG_DQS_D(curr_adll, 2); + DEBUG_DQS_S("\n"); + +#if defined(MV88F78X60) + for (dq = 0; dq < DQ_NUM; dq++) { + if ((analog_pbs[victim_dq][pup][dq][0] != adll_start_val) + && (analog_pbs[victim_dq][pup] + [dq][1] == adll_end_val)) + analog_pbs + [victim_dq] + [pup][dq] + [1] = + curr_adll; + } +#endif + win_valid = 1; + update_win = 0; + + /* Keep min / max limit value */ + if (is_tx == 0) { + /* RX - found upper limit */ + if (centralization_high_limit[pup] > + (curr_adll - 1)) { + high_limit = + curr_adll - 1; + low_limit = + centralization_low_limit[pup]; + update_win = 1; + } + } else { + /* TX - found lower limit */ + if (centralization_low_limit[pup] < (curr_adll + 1)) { + high_limit = + centralization_high_limit + [pup]; + low_limit = + curr_adll + 1; + update_win = + 1; + } + } + + if (update_win == 1) { + /* + * Before updating + * window limits we need + * to check that the + * limits are valid + */ + if (MV_OK != + ddr3_check_window_limits + (pup, high_limit, + low_limit, is_tx, + &win_valid)) + return MV_DDR3_TRAINING_ERR_WIN_LIMITS; + + if (win_valid == 1) { + /* + * Window limits + * should be + * updated + */ + centralization_low_limit + [pup] = + low_limit; + centralization_high_limit + [pup] = + high_limit; + } + } + + if (win_valid == 1) { + /* Found end of window - lock the pup */ + pup_adll_limit_state[pup] = + PUP_ADLL_LIMITS_STATE_FAIL_AFTER_PASS; + unlock_pup &= ~(1 << pup); + } else { + /* Probably false pass - reset status */ + pup_adll_limit_state[pup] = + PUP_ADLL_LIMITS_STATE_FAIL; + +#if defined(MV88F78X60) + /* Clear logging array of win size (per Dq) */ + for (dq = 0; + dq < DQ_NUM; + dq++) { + analog_pbs + [victim_dq] + [pup][dq] + [0] = + adll_start_val; + analog_pbs + [victim_dq] + [pup][dq] + [1] = + adll_end_val; + per_bit_data + [pup][dq] + = 0; + } +#endif + } + } + } else { + /* Current compare result == pass */ + if (pup_adll_limit_state[pup] == + PUP_ADLL_LIMITS_STATE_FAIL) { + /* If now it passed but failed earlier */ + DEBUG_DQS_S("DDR3 - DQS Find Limits - FAIL to PASS: CS - "); + DEBUG_DQS_D(cs, 1); + DEBUG_DQS_S(", DQ - "); + DEBUG_DQS_D(victim_dq, 1); + DEBUG_DQS_S(", Pup - "); + DEBUG_DQS_D(pup, 1); + DEBUG_DQS_S(", ADLL - "); + DEBUG_DQS_D(curr_adll, 2); + DEBUG_DQS_S("\n"); + +#if defined(MV88F78X60) + for (dq = 0; dq < DQ_NUM; + dq++) { + if (analog_pbs[victim_dq][pup][dq][0] == adll_start_val) + analog_pbs + [victim_dq] + [pup][dq] + [0] = + curr_adll; + } +#endif + /* Found start of window */ + pup_adll_limit_state[pup] = + PUP_ADLL_LIMITS_STATE_PASS; + + /* Keep min / max limit value */ + if (is_tx == 0) { + /* RX - found low limit */ + if (centralization_low_limit[pup] <= curr_adll) + centralization_low_limit + [pup] = + curr_adll; + } else { + /* TX - found high limit */ + if (centralization_high_limit[pup] >= curr_adll) + centralization_high_limit + [pup] = + curr_adll; + } + } + } + } + + if (unlock_pup == 0) { + /* Found limit to all pups */ + DEBUG_DQS_FULL_S("DDR3 - DQS Find Limits - found PUP limit\n"); + break; + } + + /* + * Increment / decrement (Move to right / left + * one phase - ADLL) dqs RX / TX delay (for all un + * lock pups + */ + if (is_tx == 0) + curr_adll++; + else + curr_adll--; + } + + if (unlock_pup != 0) { + /* + * Found pups that didn't reach to the end of the + * state machine + */ + DEBUG_DQS_C("DDR3 - DQS Find Limits - Pups that didn't reached end of the state machine: ", + unlock_pup, 1); + + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 1) { + if (pup_adll_limit_state[pup] == + PUP_ADLL_LIMITS_STATE_FAIL) { + /* ERROR - found fail for all window size */ + DEBUG_DQS_S("DDR3 - DQS Find Limits - Got FAIL for the complete range on pup - "); + DEBUG_DQS_D(pup, 1); + DEBUG_DQS_C(" victim DQ ", + victim_dq, 1); + + /* For debug - set min limit to illegal limit */ + centralization_low_limit[pup] + = ADLL_ERROR; + /* + * In case the pup is in mode + * PASS - the limit is the min + * / max adll, no need to + * update because of the results + * array default value + */ + return MV_DDR3_TRAINING_ERR_PUP_RANGE; + } + } + } + } + } + + DEBUG_DQS_S("DDR3 - DQS Find Limits - DQ values per victim results:\n"); + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + for (pup = 0; pup < max_pup; pup++) { + DEBUG_DQS_S("Victim DQ-"); + DEBUG_DQS_D(victim_dq, 1); + DEBUG_DQS_S(", PUP-"); + DEBUG_DQS_D(pup, 1); + for (dq = 0; dq < DQ_NUM; dq++) { + DEBUG_DQS_S(", DQ-"); + DEBUG_DQS_D(dq, 1); + DEBUG_DQS_S(",S-"); + DEBUG_DQS_D(analog_pbs[victim_dq][pup][dq] + [0], 2); + DEBUG_DQS_S(",E-"); + DEBUG_DQS_D(analog_pbs[victim_dq][pup][dq] + [1], 2); + + if (is_tx == 0) { + if (analog_pbs[victim_dq][pup][dq][0] + > analog_pbs_sum[pup][dq][0]) + analog_pbs_sum[pup][dq][0] = + analog_pbs[victim_dq][pup] + [dq][0]; + if (analog_pbs[victim_dq][pup][dq][1] + < analog_pbs_sum[pup][dq][1]) + analog_pbs_sum[pup][dq][1] = + analog_pbs[victim_dq][pup] + [dq][1]; + } else { + if (analog_pbs[victim_dq][pup][dq][0] + < analog_pbs_sum[pup][dq][0]) + analog_pbs_sum[pup][dq][0] = + analog_pbs[victim_dq][pup] + [dq][0]; + if (analog_pbs[victim_dq][pup][dq][1] + > analog_pbs_sum[pup][dq][1]) + analog_pbs_sum[pup][dq][1] = + analog_pbs[victim_dq][pup] + [dq][1]; + } + } + DEBUG_DQS_S("\n"); + } + } + + if (ddr3_get_log_level() >= MV_LOG_LEVEL_3) { + u32 dq; + + DEBUG_PER_DQ_S("\n########## LOG LEVEL 3(Windows margins per-DQ) ##########\n"); + if (is_tx) { + DEBUG_PER_DQ_C("DDR3 - TX CS: ", cs, 1); + } else { + DEBUG_PER_DQ_C("DDR3 - RX CS: ", cs, 1); + } + + if (ecc == 0) { + DEBUG_PER_DQ_S("\n DATA RESULTS:\n"); + } else { + DEBUG_PER_DQ_S("\n ECC RESULTS:\n"); + } + + /* Since all dq has the same value we take 0 as representive */ + dq = 0; + for (pup = 0; pup < max_pup; pup++) { + if (ecc == 0) { + DEBUG_PER_DQ_S("\nBYTE:"); + DEBUG_PER_DQ_D(pup, 1); + DEBUG_PER_DQ_S("\n"); + } else { + DEBUG_PER_DQ_S("\nECC BYTE:\n"); + } + DEBUG_PER_DQ_S(" DQ's LOW HIGH WIN-SIZE\n"); + DEBUG_PER_DQ_S("============================================\n"); + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + if (ecc == 0) { + DEBUG_PER_DQ_S("DQ["); + DEBUG_PER_DQ_DD((victim_dq + + DQ_NUM * pup), 2); + DEBUG_PER_DQ_S("]"); + } else { + DEBUG_PER_DQ_S("CB["); + DEBUG_PER_DQ_DD(victim_dq, 2); + DEBUG_PER_DQ_S("]"); + } + if (is_tx) { + DEBUG_PER_DQ_S(" 0x"); + DEBUG_PER_DQ_D(analog_pbs[victim_dq][pup][dq][1], 2); /* low value */ + DEBUG_PER_DQ_S(" 0x"); + DEBUG_PER_DQ_D(analog_pbs[victim_dq][pup][dq][0], 2); /* high value */ + DEBUG_PER_DQ_S(" 0x"); + DEBUG_PER_DQ_D(analog_pbs[victim_dq][pup][dq][0] - analog_pbs[victim_dq][pup][dq][1], 2); /* win-size */ + } else { + DEBUG_PER_DQ_S(" 0x"); + DEBUG_PER_DQ_D(analog_pbs[victim_dq][pup][dq][0], 2); /* low value */ + DEBUG_PER_DQ_S(" 0x"); + DEBUG_PER_DQ_D((analog_pbs[victim_dq][pup][dq][1] - 1), 2); /* high value */ + DEBUG_PER_DQ_S(" 0x"); + DEBUG_PER_DQ_D(analog_pbs[victim_dq][pup][dq][1] - analog_pbs[victim_dq][pup][dq][0], 2); /* win-size */ + } + DEBUG_PER_DQ_S("\n"); + } + } + DEBUG_PER_DQ_S("\n"); + } + + if (is_tx) { + DEBUG_DQS_S("DDR3 - DQS TX - Find Limits - DQ values Summary:\n"); + } else { + DEBUG_DQS_S("DDR3 - DQS RX - Find Limits - DQ values Summary:\n"); + } + + for (pup = 0; pup < max_pup; pup++) { + DEBUG_DQS_S("PUP-"); + DEBUG_DQS_D(pup, 1); + for (dq = 0; dq < DQ_NUM; dq++) { + DEBUG_DQS_S(", DQ-"); + DEBUG_DQS_D(dq, 1); + DEBUG_DQS_S(",S-"); + DEBUG_DQS_D(analog_pbs_sum[pup][dq][0], 2); + DEBUG_DQS_S(",E-"); + DEBUG_DQS_D(analog_pbs_sum[pup][dq][1], 2); + } + DEBUG_DQS_S("\n"); + } + + if (is_tx) { + DEBUG_DQS_S("DDR3 - DQS TX - Find Limits - DQ values Summary:\n"); + } else { + DEBUG_DQS_S("DDR3 - DQS RX - Find Limits - DQ values Summary:\n"); + } + + for (pup = 0; pup < max_pup; pup++) { + if (max_pup == 1) { + /* For ECC PUP */ + DEBUG_DQS_S("DDR3 - DQS8"); + } else { + DEBUG_DQS_S("DDR3 - DQS"); + DEBUG_DQS_D(pup, 1); + } + + for (dq = 0; dq < DQ_NUM; dq++) { + DEBUG_DQS_S(", DQ-"); + DEBUG_DQS_D(dq, 1); + DEBUG_DQS_S("::S-"); + DEBUG_DQS_D(analog_pbs_sum[pup][dq][0], 2); + DEBUG_DQS_S(",E-"); + DEBUG_DQS_D(analog_pbs_sum[pup][dq][1], 2); + } + DEBUG_DQS_S("\n"); + } + + DEBUG_DQS_S("DDR3 - DQS Find Limits - Ended\n"); + + return MV_OK; +} + +/* + * Name: ddr3_check_window_limits + * Desc: Check window High & Low limits. + * Args: pup pup index + * high_limit window high limit + * low_limit window low limit + * is_tx Indicate whether Rx or Tx + * size_valid Indicate whether window size is valid + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_check_window_limits(u32 pup, int high_limit, int low_limit, int is_tx, + int *size_valid) +{ + DEBUG_DQS_FULL_S("DDR3 - DQS Check Win Limits - Starting\n"); + + if (low_limit > high_limit) { + DEBUG_DQS_S("DDR3 - DQS Check Win Limits - Pup "); + DEBUG_DQS_D(pup, 1); + DEBUG_DQS_S(" Low Limit grater than High Limit\n"); + *size_valid = 0; + return MV_OK; + } + + /* + * Check that window size is valid, if not it was probably false pass + * before + */ + if ((high_limit - low_limit) < MIN_WIN_SIZE) { + /* + * Since window size is too small probably there was false + * pass + */ + *size_valid = 0; + + DEBUG_DQS_S("DDR3 - DQS Check Win Limits - Pup "); + DEBUG_DQS_D(pup, 1); + DEBUG_DQS_S(" Window size is smaller than MIN_WIN_SIZE\n"); + + } else if ((high_limit - low_limit) > ADLL_MAX) { + *size_valid = 0; + + DEBUG_DQS_S("DDR3 - DQS Check Win Limits - Pup "); + DEBUG_DQS_D(pup, 1); + DEBUG_DQS_S + (" Window size is bigger than max ADLL taps (31) Exiting.\n"); + + return MV_FAIL; + + } else { + *size_valid = 1; + + DEBUG_DQS_FULL_S("DDR3 - DQS Check Win Limits - Pup "); + DEBUG_DQS_FULL_D(pup, 1); + DEBUG_DQS_FULL_C(" window size is ", (high_limit - low_limit), + 2); + } + + return MV_OK; +} + +/* + * Name: ddr3_center_calc + * Desc: Execute the calculate the center of windows phase. + * Args: pDram Info + * is_tx Indicate whether Rx or Tx + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_center_calc(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx) +{ + /* bit array of pups that need specail search */ + u32 special_pattern_i_pup = 0; + u32 special_pattern_ii_pup = 0; + u32 pup; + u32 max_pup; + + max_pup = (ecc + (1 - ecc) * dram_info->num_of_std_pups); + + for (pup = 0; pup < max_pup; pup++) { + if (is_tx == 0) { + /* Check special pattern I */ + /* + * Special pattern Low limit search - relevant only + * for Rx, win size < threshold and low limit = 0 + */ + if (((centralization_high_limit[pup] - + centralization_low_limit[pup]) < VALID_WIN_THRS) + && (centralization_low_limit[pup] == MIN_DELAY)) + special_pattern_i_pup |= (1 << pup); + + /* Check special pattern II */ + /* + * Special pattern High limit search - relevant only + * for Rx, win size < threshold and high limit = 31 + */ + if (((centralization_high_limit[pup] - + centralization_low_limit[pup]) < VALID_WIN_THRS) + && (centralization_high_limit[pup] == MAX_DELAY)) + special_pattern_ii_pup |= (1 << pup); + } + } + + /* Run special pattern Low limit search - for relevant pup */ + if (special_pattern_i_pup != 0) { + DEBUG_DQS_S("DDR3 - DQS Center Calc - Entering special pattern I for Low limit search\n"); + if (MV_OK != + ddr3_special_pattern_i_search(dram_info, cs, ecc, is_tx, + special_pattern_i_pup)) + return MV_DDR3_TRAINING_ERR_DQS_LOW_LIMIT_SEARCH; + } + + /* Run special pattern High limit search - for relevant pup */ + if (special_pattern_ii_pup != 0) { + DEBUG_DQS_S("DDR3 - DQS Center Calc - Entering special pattern II for High limit search\n"); + if (MV_OK != + ddr3_special_pattern_ii_search(dram_info, cs, ecc, is_tx, + special_pattern_ii_pup)) + return MV_DDR3_TRAINING_ERR_DQS_HIGH_LIMIT_SEARCH; + } + + /* Set adll to center = (General_High_limit + General_Low_limit)/2 */ + return ddr3_set_dqs_centralization_results(dram_info, cs, ecc, is_tx); +} + +/* + * Name: ddr3_special_pattern_i_search + * Desc: Execute special pattern low limit search. + * Args: + * special_pattern_pup The pups that need the special search + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_special_pattern_i_search(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx, u32 special_pattern_pup) +{ + u32 victim_dq; /* loop index - victim DQ */ + u32 adll_idx; + u32 pup; + u32 unlock_pup; /* bit array of the unlock pups */ + u32 first_fail; /* bit array - of pups that get first fail */ + u32 new_lockup_pup; /* bit array of compare failed pups */ + u32 pass_pup; /* bit array of compare pass pup */ + u32 sdram_offset; + u32 max_pup; + u32 comp_val; + u32 special_res[MAX_PUP_NUM]; /* hold tmp results */ + + DEBUG_DQS_S("DDR3 - DQS - Special Pattern I Search - Starting\n"); + + max_pup = ecc + (1 - ecc) * dram_info->num_of_std_pups; + + /* Init the temporary results to max ADLL value */ + for (pup = 0; pup < max_pup; pup++) + special_res[pup] = ADLL_MAX; + + /* Run special pattern for all DQ - use the same pattern */ + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + unlock_pup = special_pattern_pup; + first_fail = 0; + + sdram_offset = cs * SDRAM_CS_SIZE + SDRAM_DQS_RX_OFFS + + LEN_KILLER_PATTERN * 4 * victim_dq; + + for (pup = 0; pup < max_pup; pup++) { + /* Set adll value per PUP. adll = high limit per pup */ + if (IS_PUP_ACTIVE(unlock_pup, pup)) { + /* only for pups that need special search */ + ddr3_write_pup_reg(PUP_DQS_RD, cs, + pup + (ecc * ECC_PUP), 0, + centralization_high_limit + [pup]); + } + } + + adll_idx = 0; + do { + /* + * Perform read and compare simultaneously for all + * un-locked MC use the special pattern mask + */ + new_lockup_pup = 0; + + if (MV_OK != + ddr3_sdram_dqs_compare(dram_info, unlock_pup, + &new_lockup_pup, + special_pattern + [victim_dq], + LEN_SPECIAL_PATTERN, + sdram_offset, 0, + 0, NULL, 1)) + return MV_FAIL; + + DEBUG_DQS_S("DDR3 - DQS - Special I - ADLL value is: "); + DEBUG_DQS_D(adll_idx, 2); + DEBUG_DQS_S(", UnlockPup: "); + DEBUG_DQS_D(unlock_pup, 2); + DEBUG_DQS_S(", NewLockPup: "); + DEBUG_DQS_D(new_lockup_pup, 2); + DEBUG_DQS_S("\n"); + + if (unlock_pup != new_lockup_pup) + DEBUG_DQS_S("DDR3 - DQS - Special I - Some Pup passed!\n"); + + /* Search for pups with passed compare & already fail */ + pass_pup = first_fail & ~new_lockup_pup & unlock_pup; + first_fail |= new_lockup_pup; + unlock_pup &= ~pass_pup; + + /* Get pass pups */ + if (pass_pup != 0) { + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(pass_pup, pup) == + 1) { + /* If pup passed and has first fail = 1 */ + /* keep min value of ADLL max value - current adll */ + /* (centralization_high_limit[pup] + adll_idx) = current adll !!! */ + comp_val = + (ADLL_MAX - + (centralization_high_limit + [pup] + adll_idx)); + + DEBUG_DQS_C + ("DDR3 - DQS - Special I - Pup - ", + pup, 1); + DEBUG_DQS_C + (" comp_val = ", + comp_val, 2); + + if (comp_val < + special_res[pup]) { + special_res[pup] = + comp_val; + centralization_low_limit + [pup] = + (-1) * + comp_val; + + DEBUG_DQS_C + ("DDR3 - DQS - Special I - Pup - ", + pup, 1); + DEBUG_DQS_C + (" Changed Low limit to ", + centralization_low_limit + [pup], 2); + } + } + } + } + + /* + * Did all PUP found missing window? + * Check for each pup if adll (different for each pup) + * reach maximum if reach max value - lock the pup + * if not - increment (Move to right one phase - ADLL) + * dqs RX delay + */ + adll_idx++; + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 1) { + /* Check only unlocked pups */ + if ((centralization_high_limit[pup] + + adll_idx) >= ADLL_MAX) { + /* reach maximum - lock the pup */ + DEBUG_DQS_C("DDR3 - DQS - Special I - reach maximum - lock pup ", + pup, 1); + unlock_pup &= ~(1 << pup); + } else { + /* Didn't reach maximum - increment ADLL */ + ddr3_write_pup_reg(PUP_DQS_RD, + cs, + pup + + (ecc * + ECC_PUP), 0, + (centralization_high_limit + [pup] + + adll_idx)); + } + } + } + } while (unlock_pup != 0); + } + + return MV_OK; +} + +/* + * Name: ddr3_special_pattern_ii_search + * Desc: Execute special pattern high limit search. + * Args: + * special_pattern_pup The pups that need the special search + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_special_pattern_ii_search(MV_DRAM_INFO *dram_info, u32 cs, u32 ecc, + int is_tx, u32 special_pattern_pup) +{ + u32 victim_dq; /* loop index - victim DQ */ + u32 adll_idx; + u32 pup; + u32 unlock_pup; /* bit array of the unlock pups */ + u32 first_fail; /* bit array - of pups that get first fail */ + u32 new_lockup_pup; /* bit array of compare failed pups */ + u32 pass_pup; /* bit array of compare pass pup */ + u32 sdram_offset; + u32 max_pup; + u32 comp_val; + u32 special_res[MAX_PUP_NUM]; /* hold tmp results */ + + DEBUG_DQS_S("DDR3 - DQS - Special Pattern II Search - Starting\n"); + + max_pup = (ecc + (1 - ecc) * dram_info->num_of_std_pups); + + /* init the tmporary results to max ADLL value */ + for (pup = 0; pup < max_pup; pup++) + special_res[pup] = ADLL_MAX; + + sdram_offset = cs * SDRAM_CS_SIZE + SDRAM_DQS_RX_OFFS; + + /* run special pattern for all DQ - use the same pattern */ + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + unlock_pup = special_pattern_pup; + first_fail = 0; + + for (pup = 0; pup < max_pup; pup++) { + /* Set adll value per PUP. adll = 0 */ + if (IS_PUP_ACTIVE(unlock_pup, pup)) { + /* Only for pups that need special search */ + ddr3_write_pup_reg(PUP_DQS_RD, cs, + pup + (ecc * ECC_PUP), 0, + ADLL_MIN); + } + } + + adll_idx = 0; + do { + /* + * Perform read and compare simultaneously for all + * un-locked MC use the special pattern mask + */ + new_lockup_pup = 0; + + if (MV_OK != ddr3_sdram_dqs_compare( + dram_info, unlock_pup, &new_lockup_pup, + special_pattern[victim_dq], + LEN_SPECIAL_PATTERN, + sdram_offset, 0, 0, NULL, 0)) + return MV_FAIL; + + DEBUG_DQS_S("DDR3 - DQS - Special II - ADLL value is "); + DEBUG_DQS_D(adll_idx, 2); + DEBUG_DQS_S("unlock_pup "); + DEBUG_DQS_D(unlock_pup, 1); + DEBUG_DQS_S("new_lockup_pup "); + DEBUG_DQS_D(new_lockup_pup, 1); + DEBUG_DQS_S("\n"); + + if (unlock_pup != new_lockup_pup) { + DEBUG_DQS_S("DDR3 - DQS - Special II - Some Pup passed!\n"); + } + + /* Search for pups with passed compare & already fail */ + pass_pup = first_fail & ~new_lockup_pup & unlock_pup; + first_fail |= new_lockup_pup; + unlock_pup &= ~pass_pup; + + /* Get pass pups */ + if (pass_pup != 0) { + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(pass_pup, pup) == + 1) { + /* If pup passed and has first fail = 1 */ + /* keep min value of ADLL max value - current adll */ + /* (adll_idx) = current adll !!! */ + comp_val = adll_idx; + + DEBUG_DQS_C("DDR3 - DQS - Special II - Pup - ", + pup, 1); + DEBUG_DQS_C(" comp_val = ", + comp_val, 1); + + if (comp_val < + special_res[pup]) { + special_res[pup] = + comp_val; + centralization_high_limit + [pup] = + ADLL_MAX + + comp_val; + + DEBUG_DQS_C + ("DDR3 - DQS - Special II - Pup - ", + pup, 1); + DEBUG_DQS_C + (" Changed High limit to ", + centralization_high_limit + [pup], 2); + } + } + } + } + + /* + * Did all PUP found missing window? + * Check for each pup if adll (different for each pup) + * reach maximum if reach max value - lock the pup + * if not - increment (Move to right one phase - ADLL) + * dqs RX delay + */ + adll_idx++; + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 1) { + /* Check only unlocked pups */ + if ((adll_idx) >= ADLL_MAX) { + /* Reach maximum - lock the pup */ + DEBUG_DQS_C("DDR3 - DQS - Special II - reach maximum - lock pup ", + pup, 1); + unlock_pup &= ~(1 << pup); + } else { + /* Didn't reach maximum - increment ADLL */ + ddr3_write_pup_reg(PUP_DQS_RD, + cs, + pup + + (ecc * + ECC_PUP), 0, + (adll_idx)); + } + } + } + } while (unlock_pup != 0); + } + + return MV_OK; +} + +/* + * Name: ddr3_set_dqs_centralization_results + * Desc: Set to HW the DQS centralization phase results. + * Args: + * is_tx Indicates whether to set Tx or RX results + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_set_dqs_centralization_results(MV_DRAM_INFO *dram_info, u32 cs, + u32 ecc, int is_tx) +{ + u32 pup, pup_num; + int addl_val; + u32 max_pup; + + max_pup = (ecc + (1 - ecc) * dram_info->num_of_std_pups); + + DEBUG_DQS_RESULTS_S("\n############ LOG LEVEL 2(Windows margins) ############\n"); + + if (is_tx) { + DEBUG_DQS_RESULTS_C("DDR3 - DQS TX - Set Dqs Centralization Results - CS: ", + cs, 1); + } else { + DEBUG_DQS_RESULTS_C("DDR3 - DQS RX - Set Dqs Centralization Results - CS: ", + cs, 1); + } + + /* Set adll to center = (General_High_limit + General_Low_limit)/2 */ + DEBUG_DQS_RESULTS_S("\nDQS LOW HIGH WIN-SIZE Set\n"); + DEBUG_DQS_RESULTS_S("==============================================\n"); + for (pup = 0; pup < max_pup; pup++) { + addl_val = (centralization_high_limit[pup] + + centralization_low_limit[pup]) / 2; + + pup_num = pup * (1 - ecc) + ecc * ECC_PUP; + + DEBUG_DQS_RESULTS_D(pup_num, 1); + DEBUG_DQS_RESULTS_S(" 0x"); + DEBUG_DQS_RESULTS_D(centralization_low_limit[pup], 2); + DEBUG_DQS_RESULTS_S(" 0x"); + DEBUG_DQS_RESULTS_D(centralization_high_limit[pup], 2); + DEBUG_DQS_RESULTS_S(" 0x"); + DEBUG_DQS_RESULTS_D(centralization_high_limit[pup] - + centralization_low_limit[pup], 2); + DEBUG_DQS_RESULTS_S(" 0x"); + DEBUG_DQS_RESULTS_D(addl_val, 2); + DEBUG_DQS_RESULTS_S("\n"); + + if (addl_val < ADLL_MIN) { + addl_val = ADLL_MIN; + DEBUG_DQS_RESULTS_S("DDR3 - DQS - Setting ADLL value for Pup to MIN (since it was lower than 0)\n"); + } + + if (addl_val > ADLL_MAX) { + addl_val = ADLL_MAX; + DEBUG_DQS_RESULTS_S("DDR3 - DQS - Setting ADLL value for Pup to MAX (since it was higher than 31)\n"); + } + + if (is_tx) { + ddr3_write_pup_reg(PUP_DQS_WR, cs, pup_num, 0, + addl_val + + dram_info->wl_val[cs][pup_num][D]); + } else { + ddr3_write_pup_reg(PUP_DQS_RD, cs, pup_num, 0, + addl_val); + } + } + + return MV_OK; +} + +/* + * Set training patterns + */ +int ddr3_load_dqs_patterns(MV_DRAM_INFO *dram_info) +{ + u32 cs, cs_count, cs_tmp, victim_dq; + u32 sdram_addr; + u32 *pattern_ptr; + + /* Loop for each CS */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + cs_count = 0; + for (cs_tmp = 0; cs_tmp < cs; cs_tmp++) { + if (dram_info->cs_ena & (1 << cs_tmp)) + cs_count++; + } + + /* Init killer pattern */ + sdram_addr = (cs_count * (SDRAM_CS_SIZE + 1) + + SDRAM_DQS_RX_OFFS); + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + pattern_ptr = ddr3_dqs_choose_pattern(dram_info, + victim_dq); + if (MV_OK != ddr3_sdram_dqs_compare( + dram_info, (u32)NULL, NULL, + pattern_ptr, LEN_KILLER_PATTERN, + sdram_addr + LEN_KILLER_PATTERN * + 4 * victim_dq, 1, 0, NULL, + 0)) + return MV_DDR3_TRAINING_ERR_DQS_PATTERN; + } + + /* Init special-killer pattern */ + sdram_addr = (cs_count * (SDRAM_CS_SIZE + 1) + + SDRAM_DQS_RX_SPECIAL_OFFS); + for (victim_dq = 0; victim_dq < DQ_NUM; victim_dq++) { + if (MV_OK != ddr3_sdram_dqs_compare( + dram_info, (u32)NULL, NULL, + special_pattern[victim_dq], + LEN_KILLER_PATTERN, sdram_addr + + LEN_KILLER_PATTERN * 4 * victim_dq, + 1, 0, NULL, 0)) + return MV_DDR3_TRAINING_ERR_DQS_PATTERN; + } + } + } + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_hw_training.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_hw_training.c new file mode 100644 index 000000000..35d98faf5 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_hw_training.c @@ -0,0 +1,1116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <log.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> +#include <linux/delay.h> + +#include "ddr3_init.h" +#include "ddr3_hw_training.h" +#include "xor.h" + +#ifdef MV88F78X60 +#include "ddr3_patterns_64bit.h" +#else +#include "ddr3_patterns_16bit.h" +#if defined(MV88F672X) +#include "ddr3_patterns_16bit.h" +#endif +#endif + +/* + * Debug + */ + +#define DEBUG_MAIN_C(s, d, l) \ + DEBUG_MAIN_S(s); DEBUG_MAIN_D(d, l); DEBUG_MAIN_S("\n") +#define DEBUG_MAIN_FULL_C(s, d, l) \ + DEBUG_MAIN_FULL_S(s); DEBUG_MAIN_FULL_D(d, l); DEBUG_MAIN_FULL_S("\n") + +#ifdef MV_DEBUG_MAIN +#define DEBUG_MAIN_S(s) puts(s) +#define DEBUG_MAIN_D(d, l) printf("%x", d) +#else +#define DEBUG_MAIN_S(s) +#define DEBUG_MAIN_D(d, l) +#endif + +#ifdef MV_DEBUG_MAIN_FULL +#define DEBUG_MAIN_FULL_S(s) puts(s) +#define DEBUG_MAIN_FULL_D(d, l) printf("%x", d) +#else +#define DEBUG_MAIN_FULL_S(s) +#define DEBUG_MAIN_FULL_D(d, l) +#endif + +#ifdef MV_DEBUG_SUSPEND_RESUME +#define DEBUG_SUSPEND_RESUME_S(s) puts(s) +#define DEBUG_SUSPEND_RESUME_D(d, l) printf("%x", d) +#else +#define DEBUG_SUSPEND_RESUME_S(s) +#define DEBUG_SUSPEND_RESUME_D(d, l) +#endif + +static u32 ddr3_sw_wl_rl_debug; +static u32 ddr3_run_pbs = 1; + +void ddr3_print_version(void) +{ + puts("DDR3 Training Sequence - Ver 5.7."); +} + +void ddr3_set_sw_wl_rl_debug(u32 val) +{ + ddr3_sw_wl_rl_debug = val; +} + +void ddr3_set_pbs(u32 val) +{ + ddr3_run_pbs = val; +} + +int ddr3_hw_training(u32 target_freq, u32 ddr_width, int xor_bypass, + u32 scrub_offs, u32 scrub_size, int dqs_clk_aligned, + int debug_mode, int reg_dimm_skip_wl) +{ + /* A370 has no PBS mechanism */ + __maybe_unused u32 first_loop_flag = 0; + u32 freq, reg; + MV_DRAM_INFO dram_info; + int ratio_2to1 = 0; + int tmp_ratio = 1; + int status; + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 1\n"); + + memset(&dram_info, 0, sizeof(dram_info)); + dram_info.num_cs = ddr3_get_cs_num_from_reg(); + dram_info.cs_ena = ddr3_get_cs_ena_from_reg(); + dram_info.target_frequency = target_freq; + dram_info.ddr_width = ddr_width; + dram_info.num_of_std_pups = ddr_width / PUP_SIZE; + dram_info.rl400_bug = 0; + dram_info.multi_cs_mr_support = 0; +#ifdef MV88F67XX + dram_info.rl400_bug = 1; +#endif + + /* Ignore ECC errors - if ECC is enabled */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR); + if (reg & (1 << REG_SDRAM_CONFIG_ECC_OFFS)) { + dram_info.ecc_ena = 1; + reg |= (1 << REG_SDRAM_CONFIG_IERR_OFFS); + reg_write(REG_SDRAM_CONFIG_ADDR, reg); + } else { + dram_info.ecc_ena = 0; + } + + reg = reg_read(REG_SDRAM_CONFIG_ADDR); + if (reg & (1 << REG_SDRAM_CONFIG_REGDIMM_OFFS)) + dram_info.reg_dimm = 1; + else + dram_info.reg_dimm = 0; + + dram_info.num_of_total_pups = ddr_width / PUP_SIZE + dram_info.ecc_ena; + + /* Get target 2T value */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + dram_info.mode_2t = (reg >> REG_DUNIT_CTRL_LOW_2T_OFFS) & + REG_DUNIT_CTRL_LOW_2T_MASK; + + /* Get target CL value */ +#ifdef MV88F67XX + reg = reg_read(REG_DDR3_MR0_ADDR) >> 2; +#else + reg = reg_read(REG_DDR3_MR0_CS_ADDR) >> 2; +#endif + + reg = (((reg >> 1) & 0xE) | (reg & 0x1)) & 0xF; + dram_info.cl = ddr3_valid_cl_to_cl(reg); + + /* Get target CWL value */ +#ifdef MV88F67XX + reg = reg_read(REG_DDR3_MR2_ADDR) >> REG_DDR3_MR2_CWL_OFFS; +#else + reg = reg_read(REG_DDR3_MR2_CS_ADDR) >> REG_DDR3_MR2_CWL_OFFS; +#endif + + reg &= REG_DDR3_MR2_CWL_MASK; + dram_info.cwl = reg; +#if !defined(MV88F67XX) + /* A370 has no PBS mechanism */ +#if defined(MV88F78X60) + if ((dram_info.target_frequency > DDR_400) && (ddr3_run_pbs)) + first_loop_flag = 1; +#else + /* first_loop_flag = 1; skip mid freq at ALP/A375 */ + if ((dram_info.target_frequency > DDR_400) && (ddr3_run_pbs) && + (mv_ctrl_revision_get() >= UMC_A0)) + first_loop_flag = 1; + else + first_loop_flag = 0; +#endif +#endif + + freq = dram_info.target_frequency; + + /* Set ODT to always on */ + ddr3_odt_activate(1); + + /* Init XOR */ + mv_sys_xor_init(&dram_info); + + /* Get DRAM/HCLK ratio */ + if (reg_read(REG_DDR_IO_ADDR) & (1 << REG_DDR_IO_CLK_RATIO_OFFS)) + ratio_2to1 = 1; + + /* + * Xor Bypass - ECC support in AXP is currently available for 1:1 + * modes frequency modes. + * Not all frequency modes support the ddr3 training sequence + * (Only 1200/300). + * Xor Bypass allows using the Xor initializations and scrubbing + * inside the ddr3 training sequence without running the training + * itself. + */ + if (xor_bypass == 0) { + if (ddr3_run_pbs) { + DEBUG_MAIN_S("DDR3 Training Sequence - Run with PBS.\n"); + } else { + DEBUG_MAIN_S("DDR3 Training Sequence - Run without PBS.\n"); + } + + if (dram_info.target_frequency > DFS_MARGIN) { + tmp_ratio = 0; + freq = DDR_100; + + if (dram_info.reg_dimm == 1) + freq = DDR_300; + + if (MV_OK != ddr3_dfs_high_2_low(freq, &dram_info)) { + /* Set low - 100Mhz DDR Frequency by HW */ + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Dfs High2Low)\n"); + return MV_DDR3_TRAINING_ERR_DFS_H2L; + } + + if ((dram_info.reg_dimm == 1) && + (reg_dimm_skip_wl == 0)) { + if (MV_OK != + ddr3_write_leveling_hw_reg_dimm(freq, + &dram_info)) + DEBUG_MAIN_S("DDR3 Training Sequence - Registered DIMM Low WL - SKIP\n"); + } + + if (ddr3_get_log_level() >= MV_LOG_LEVEL_1) + ddr3_print_freq(freq); + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 2\n"); + } else { + if (!dqs_clk_aligned) { +#ifdef MV88F67XX + /* + * If running training sequence without DFS, + * we must run Write leveling before writing + * the patterns + */ + + /* + * ODT - Multi CS system use SW WL, + * Single CS System use HW WL + */ + if (dram_info.cs_ena > 1) { + if (MV_OK != + ddr3_write_leveling_sw( + freq, tmp_ratio, + &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Sw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_SW; + } + } else { + if (MV_OK != + ddr3_write_leveling_hw(freq, + &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Hw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + } +#else + if (MV_OK != ddr3_write_leveling_hw( + freq, &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Hw)\n"); + if (ddr3_sw_wl_rl_debug) { + if (MV_OK != + ddr3_write_leveling_sw( + freq, tmp_ratio, + &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Sw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_SW; + } + } else { + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + } +#endif + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 3\n"); + } + + if (MV_OK != ddr3_load_patterns(&dram_info, 0)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Loading Patterns)\n"); + return MV_DDR3_TRAINING_ERR_LOAD_PATTERNS; + } + + /* + * TODO: + * The mainline U-Boot port of the bin_hdr DDR training code + * needs a delay of minimum 20ms here (10ms is a bit too short + * and the CPU hangs). The bin_hdr code doesn't have this delay. + * To be save here, lets add a delay of 50ms here. + * + * Tested on the Marvell DB-MV784MP-GP board + */ + mdelay(50); + + do { + freq = dram_info.target_frequency; + tmp_ratio = ratio_2to1; + DEBUG_MAIN_FULL_S("DDR3 Training Sequence - DEBUG - 4\n"); + +#if defined(MV88F78X60) + /* + * There is a difference on the DFS frequency at the + * first iteration of this loop + */ + if (first_loop_flag) { + freq = DDR_400; + tmp_ratio = 0; + } +#endif + + if (MV_OK != ddr3_dfs_low_2_high(freq, tmp_ratio, + &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Dfs Low2High)\n"); + return MV_DDR3_TRAINING_ERR_DFS_H2L; + } + + if (ddr3_get_log_level() >= MV_LOG_LEVEL_1) { + ddr3_print_freq(freq); + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 5\n"); + + /* Write leveling */ + if (!dqs_clk_aligned) { +#ifdef MV88F67XX + /* + * ODT - Multi CS system that not support Multi + * CS MRS commands must use SW WL + */ + if (dram_info.cs_ena > 1) { + if (MV_OK != ddr3_write_leveling_sw( + freq, tmp_ratio, &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Sw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_SW; + } + } else { + if (MV_OK != ddr3_write_leveling_hw( + freq, &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Hw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + } +#else + if ((dram_info.reg_dimm == 1) && + (freq == DDR_400)) { + if (reg_dimm_skip_wl == 0) { + if (MV_OK != ddr3_write_leveling_hw_reg_dimm( + freq, &dram_info)) + DEBUG_MAIN_S("DDR3 Training Sequence - Registered DIMM WL - SKIP\n"); + } + } else { + if (MV_OK != ddr3_write_leveling_hw( + freq, &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Hw)\n"); + if (ddr3_sw_wl_rl_debug) { + if (MV_OK != ddr3_write_leveling_sw( + freq, tmp_ratio, &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Sw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_SW; + } + } else { + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + } + } +#endif + if (debug_mode) + DEBUG_MAIN_S + ("DDR3 Training Sequence - DEBUG - 6\n"); + } + + /* Read Leveling */ + /* + * Armada 370 - Support for HCLK @ 400MHZ - must use + * SW read leveling + */ + if (freq == DDR_400 && dram_info.rl400_bug) { + status = ddr3_read_leveling_sw(freq, tmp_ratio, + &dram_info); + if (MV_OK != status) { + DEBUG_MAIN_S + ("DDR3 Training Sequence - FAILED (Read Leveling Sw)\n"); + return status; + } + } else { + if (MV_OK != ddr3_read_leveling_hw( + freq, &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Read Leveling Hw)\n"); + if (ddr3_sw_wl_rl_debug) { + if (MV_OK != ddr3_read_leveling_sw( + freq, tmp_ratio, + &dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Read Leveling Sw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_SW; + } + } else { + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + } + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 7\n"); + + if (MV_OK != ddr3_wl_supplement(&dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Hi-Freq Sup)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_HI_FREQ; + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 8\n"); +#if !defined(MV88F67XX) + /* A370 has no PBS mechanism */ +#if defined(MV88F78X60) || defined(MV88F672X) + if (first_loop_flag == 1) { + first_loop_flag = 0; + + status = MV_OK; + status = ddr3_pbs_rx(&dram_info); + if (MV_OK != status) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (PBS RX)\n"); + return status; + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 9\n"); + + status = ddr3_pbs_tx(&dram_info); + if (MV_OK != status) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (PBS TX)\n"); + return status; + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 10\n"); + } +#endif +#endif + } while (freq != dram_info.target_frequency); + + status = ddr3_dqs_centralization_rx(&dram_info); + if (MV_OK != status) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (DQS Centralization RX)\n"); + return status; + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 11\n"); + + status = ddr3_dqs_centralization_tx(&dram_info); + if (MV_OK != status) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (DQS Centralization TX)\n"); + return status; + } + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 12\n"); + } + + ddr3_set_performance_params(&dram_info); + + if (dram_info.ecc_ena) { + /* Need to SCRUB the DRAM memory area to load U-Boot */ + mv_sys_xor_finish(); + dram_info.num_cs = 1; + dram_info.cs_ena = 1; + mv_sys_xor_init(&dram_info); + mv_xor_mem_init(0, scrub_offs, scrub_size, 0xdeadbeef, + 0xdeadbeef); + + /* Wait for previous transfer completion */ + while (mv_xor_state_get(0) != MV_IDLE) + ; + + if (debug_mode) + DEBUG_MAIN_S("DDR3 Training Sequence - DEBUG - 13\n"); + } + + /* Return XOR State */ + mv_sys_xor_finish(); + +#if defined(MV88F78X60) + /* Save training results in memeory for resume state */ + ddr3_save_training(&dram_info); +#endif + /* Clear ODT always on */ + ddr3_odt_activate(0); + + /* Configure Dynamic read ODT */ + ddr3_odt_read_dynamic_config(&dram_info); + + return MV_OK; +} + +void ddr3_set_performance_params(MV_DRAM_INFO *dram_info) +{ + u32 twr2wr, trd2rd, trd2wr_wr2rd; + u32 tmp1, tmp2, reg; + + DEBUG_MAIN_FULL_C("Max WL Phase: ", dram_info->wl_max_phase, 2); + DEBUG_MAIN_FULL_C("Min WL Phase: ", dram_info->wl_min_phase, 2); + DEBUG_MAIN_FULL_C("Max RL Phase: ", dram_info->rl_max_phase, 2); + DEBUG_MAIN_FULL_C("Min RL Phase: ", dram_info->rl_min_phase, 2); + + if (dram_info->wl_max_phase < 2) + twr2wr = 0x2; + else + twr2wr = 0x3; + + trd2rd = 0x1 + (dram_info->rl_max_phase + 1) / 2 + + (dram_info->rl_max_phase + 1) % 2; + + tmp1 = (dram_info->rl_max_phase - dram_info->wl_min_phase) / 2 + + (((dram_info->rl_max_phase - dram_info->wl_min_phase) % 2) > + 0 ? 1 : 0); + tmp2 = (dram_info->wl_max_phase - dram_info->rl_min_phase) / 2 + + ((dram_info->wl_max_phase - dram_info->rl_min_phase) % 2 > + 0 ? 1 : 0); + trd2wr_wr2rd = (tmp1 >= tmp2) ? tmp1 : tmp2; + + trd2wr_wr2rd += 2; + trd2rd += 2; + twr2wr += 2; + + DEBUG_MAIN_FULL_C("WR 2 WR: ", twr2wr, 2); + DEBUG_MAIN_FULL_C("RD 2 RD: ", trd2rd, 2); + DEBUG_MAIN_FULL_C("RD 2 WR / WR 2 RD: ", trd2wr_wr2rd, 2); + + reg = reg_read(REG_SDRAM_TIMING_HIGH_ADDR); + + reg &= ~(REG_SDRAM_TIMING_H_W2W_MASK << REG_SDRAM_TIMING_H_W2W_OFFS); + reg |= ((twr2wr & REG_SDRAM_TIMING_H_W2W_MASK) << + REG_SDRAM_TIMING_H_W2W_OFFS); + + reg &= ~(REG_SDRAM_TIMING_H_R2R_MASK << REG_SDRAM_TIMING_H_R2R_OFFS); + reg &= ~(REG_SDRAM_TIMING_H_R2R_H_MASK << + REG_SDRAM_TIMING_H_R2R_H_OFFS); + reg |= ((trd2rd & REG_SDRAM_TIMING_H_R2R_MASK) << + REG_SDRAM_TIMING_H_R2R_OFFS); + reg |= (((trd2rd >> 2) & REG_SDRAM_TIMING_H_R2R_H_MASK) << + REG_SDRAM_TIMING_H_R2R_H_OFFS); + + reg &= ~(REG_SDRAM_TIMING_H_R2W_W2R_MASK << + REG_SDRAM_TIMING_H_R2W_W2R_OFFS); + reg &= ~(REG_SDRAM_TIMING_H_R2W_W2R_H_MASK << + REG_SDRAM_TIMING_H_R2W_W2R_H_OFFS); + reg |= ((trd2wr_wr2rd & REG_SDRAM_TIMING_H_R2W_W2R_MASK) << + REG_SDRAM_TIMING_H_R2W_W2R_OFFS); + reg |= (((trd2wr_wr2rd >> 2) & REG_SDRAM_TIMING_H_R2W_W2R_H_MASK) << + REG_SDRAM_TIMING_H_R2W_W2R_H_OFFS); + + reg_write(REG_SDRAM_TIMING_HIGH_ADDR, reg); +} + +/* + * Perform DDR3 PUP Indirect Write + */ +void ddr3_write_pup_reg(u32 mode, u32 cs, u32 pup, u32 phase, u32 delay) +{ + u32 reg = 0; + + if (pup == PUP_BC) + reg |= (1 << REG_PHY_BC_OFFS); + else + reg |= (pup << REG_PHY_PUP_OFFS); + + reg |= ((0x4 * cs + mode) << REG_PHY_CS_OFFS); + reg |= (phase << REG_PHY_PHASE_OFFS) | delay; + + if (mode == PUP_WL_MODE) + reg |= ((INIT_WL_DELAY + delay) << REG_PHY_DQS_REF_DLY_OFFS); + + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + reg |= REG_PHY_REGISTRY_FILE_ACCESS_OP_WR; + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + + do { + reg = reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); /* Wait for '0' to mark the end of the transaction */ + + /* If read Leveling mode - need to write to register 3 separetly */ + if (mode == PUP_RL_MODE) { + reg = 0; + + if (pup == PUP_BC) + reg |= (1 << REG_PHY_BC_OFFS); + else + reg |= (pup << REG_PHY_PUP_OFFS); + + reg |= ((0x4 * cs + mode + 1) << REG_PHY_CS_OFFS); + reg |= (INIT_RL_DELAY); + + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + reg |= REG_PHY_REGISTRY_FILE_ACCESS_OP_WR; + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + + do { + reg = reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); + } +} + +/* + * Perform DDR3 PUP Indirect Read + */ +u32 ddr3_read_pup_reg(u32 mode, u32 cs, u32 pup) +{ + u32 reg; + + reg = (pup << REG_PHY_PUP_OFFS) | + ((0x4 * cs + mode) << REG_PHY_CS_OFFS); + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + + reg |= REG_PHY_REGISTRY_FILE_ACCESS_OP_RD; + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + + do { + reg = reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); /* Wait for '0' to mark the end of the transaction */ + + return reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR); /* 0x16A0 */ +} + +/* + * Set training patterns + */ +int ddr3_load_patterns(MV_DRAM_INFO *dram_info, int resume) +{ + u32 reg; + + /* Enable SW override - Required for the ECC Pup */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + if (resume == 0) { +#if defined(MV88F78X60) || defined(MV88F672X) + ddr3_load_pbs_patterns(dram_info); +#endif + ddr3_load_dqs_patterns(dram_info); + } + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_1_ADDR) | + (1 << REG_DRAM_TRAINING_1_TRNBPOINT_OFFS); + reg_write(REG_DRAM_TRAINING_1_ADDR, reg); + + /* Set Base Addr */ +#if defined(MV88F67XX) + reg_write(REG_DRAM_TRAINING_PATTERN_BASE_ADDR, 0); +#else + if (resume == 0) + reg_write(REG_DRAM_TRAINING_PATTERN_BASE_ADDR, 0); + else + reg_write(REG_DRAM_TRAINING_PATTERN_BASE_ADDR, + RESUME_RL_PATTERNS_ADDR); +#endif + + /* Set Patterns */ + if (resume == 0) { + reg = (dram_info->cs_ena << REG_DRAM_TRAINING_CS_OFFS) | + (1 << REG_DRAM_TRAINING_PATTERNS_OFFS); + } else { + reg = (0x1 << REG_DRAM_TRAINING_CS_OFFS) | + (1 << REG_DRAM_TRAINING_PATTERNS_OFFS); + } + + reg |= (1 << REG_DRAM_TRAINING_AUTO_OFFS); + + reg_write(REG_DRAM_TRAINING_ADDR, reg); + + udelay(100); + + /* Check if Successful */ + if (reg_read(REG_DRAM_TRAINING_ADDR) & + (1 << REG_DRAM_TRAINING_ERROR_OFFS)) + return MV_OK; + else + return MV_FAIL; +} + +#if !defined(MV88F67XX) +/* + * Name: ddr3_save_training(MV_DRAM_INFO *dram_info) + * Desc: saves the training results to memeory (RL,WL,PBS,Rx/Tx + * Centeralization) + * Args: MV_DRAM_INFO *dram_info + * Notes: + * Returns: None. + */ +void ddr3_save_training(MV_DRAM_INFO *dram_info) +{ + u32 val, pup, tmp_cs, cs, i, dq; + u32 crc = 0; + u32 regs = 0; + u32 *sdram_offset = (u32 *)RESUME_TRAINING_VALUES_ADDR; + u32 mode_config[MAX_TRAINING_MODE]; + + mode_config[DQS_WR_MODE] = PUP_DQS_WR; + mode_config[WL_MODE_] = PUP_WL_MODE; + mode_config[RL_MODE_] = PUP_RL_MODE; + mode_config[DQS_RD_MODE] = PUP_DQS_RD; + mode_config[PBS_TX_DM_MODE] = PUP_PBS_TX_DM; + mode_config[PBS_TX_MODE] = PUP_PBS_TX; + mode_config[PBS_RX_MODE] = PUP_PBS_RX; + + /* num of training modes */ + for (i = 0; i < MAX_TRAINING_MODE; i++) { + tmp_cs = dram_info->cs_ena; + /* num of CS */ + for (cs = 0; cs < MAX_CS; cs++) { + if (tmp_cs & (1 << cs)) { + /* num of PUPs */ + for (pup = 0; pup < dram_info->num_of_total_pups; + pup++) { + if (pup == dram_info->num_of_std_pups && + dram_info->ecc_ena) + pup = ECC_PUP; + if (i == PBS_TX_DM_MODE) { + /* + * Change CS bitmask because + * PBS works only with CS0 + */ + tmp_cs = 0x1; + val = ddr3_read_pup_reg( + mode_config[i], CS0, pup); + } else if (i == PBS_TX_MODE || + i == PBS_RX_MODE) { + /* + * Change CS bitmask because + * PBS works only with CS0 + */ + tmp_cs = 0x1; + for (dq = 0; dq <= DQ_NUM; + dq++) { + val = ddr3_read_pup_reg( + mode_config[i] + dq, + CS0, + pup); + (*sdram_offset) = val; + crc += *sdram_offset; + sdram_offset++; + regs++; + } + continue; + } else { + val = ddr3_read_pup_reg( + mode_config[i], cs, pup); + } + + *sdram_offset = val; + crc += *sdram_offset; + sdram_offset++; + regs++; + } + } + } + } + + *sdram_offset = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + crc += *sdram_offset; + sdram_offset++; + regs++; + *sdram_offset = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + crc += *sdram_offset; + sdram_offset++; + regs++; + sdram_offset = (u32 *)NUM_OF_REGISTER_ADDR; + *sdram_offset = regs; + DEBUG_SUSPEND_RESUME_S("Training Results CheckSum write= "); + DEBUG_SUSPEND_RESUME_D(crc, 8); + DEBUG_SUSPEND_RESUME_S("\n"); + sdram_offset = (u32 *)CHECKSUM_RESULT_ADDR; + *sdram_offset = crc; +} + +/* + * Name: ddr3_read_training_results() + * Desc: Reads the training results from memeory (RL,WL,PBS,Rx/Tx + * Centeralization) + * and writes them to the relevant registers + * Args: MV_DRAM_INFO *dram_info + * Notes: + * Returns: None. + */ +int ddr3_read_training_results(void) +{ + u32 val, reg, idx, dqs_wr_idx = 0, crc = 0; + u32 *sdram_offset = (u32 *)RESUME_TRAINING_VALUES_ADDR; + u32 training_val[RESUME_TRAINING_VALUES_MAX] = { 0 }; + u32 regs = *((u32 *)NUM_OF_REGISTER_ADDR); + + /* + * Read Training results & Dunit registers from memory and write + * it to an array + */ + for (idx = 0; idx < regs; idx++) { + training_val[idx] = *sdram_offset; + crc += *sdram_offset; + sdram_offset++; + } + + sdram_offset = (u32 *)CHECKSUM_RESULT_ADDR; + + if ((*sdram_offset) == crc) { + DEBUG_SUSPEND_RESUME_S("Training Results CheckSum read PASS= "); + DEBUG_SUSPEND_RESUME_D(crc, 8); + DEBUG_SUSPEND_RESUME_S("\n"); + } else { + DEBUG_MAIN_S("Wrong Training Results CheckSum\n"); + return MV_FAIL; + } + + /* + * We iterate through all the registers except for the last 2 since + * they are Dunit registers (and not PHY registers) + */ + for (idx = 0; idx < (regs - 2); idx++) { + val = training_val[idx]; + reg = (val >> REG_PHY_CS_OFFS) & 0x3F; /*read the phy address */ + + /* Check if the values belongs to the DQS WR */ + if (reg == PUP_WL_MODE) { + /* bit[5:0] in DQS_WR are delay */ + val = (training_val[dqs_wr_idx++] & 0x3F); + /* + * bit[15:10] are DQS_WR delay & bit[9:0] are + * WL phase & delay + */ + val = (val << REG_PHY_DQS_REF_DLY_OFFS) | + (training_val[idx] & 0x3C003FF); + /* Add Request pending and write operation bits */ + val |= REG_PHY_REGISTRY_FILE_ACCESS_OP_WR; + } else if (reg == PUP_DQS_WR) { + /* + * Do nothing since DQS_WR will be done in PUP_WL_MODE + */ + continue; + } + + val |= REG_PHY_REGISTRY_FILE_ACCESS_OP_WR; + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, val); + do { + val = (reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR)) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (val); /* Wait for '0' to mark the end of the transaction */ + } + + /* write last 2 Dunit configurations */ + val = training_val[idx]; + reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, val); /* reg 0x1538 */ + val = training_val[idx + 1]; + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, val); /* reg 0x153c */ + + return MV_OK; +} + +/* + * Name: ddr3_check_if_resume_mode() + * Desc: Reads the address (0x3000) of the Resume Magic word (0xDEADB002) + * Args: MV_DRAM_INFO *dram_info + * Notes: + * Returns: return (magic_word == SUSPEND_MAGIC_WORD) + */ +int ddr3_check_if_resume_mode(MV_DRAM_INFO *dram_info, u32 freq) +{ + u32 magic_word; + u32 *sdram_offset = (u32 *)BOOT_INFO_ADDR; + + if (dram_info->reg_dimm != 1) { + /* + * Perform write levleling in order initiate the phy with + * low frequency + */ + if (MV_OK != ddr3_write_leveling_hw(freq, dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Write Leveling Hw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + } + + if (MV_OK != ddr3_load_patterns(dram_info, 1)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Loading Patterns)\n"); + return MV_DDR3_TRAINING_ERR_LOAD_PATTERNS; + } + + /* Enable CS0 only for RL */ + dram_info->cs_ena = 0x1; + + /* Perform Read levleling in order to get stable memory */ + if (MV_OK != ddr3_read_leveling_hw(freq, dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Read Leveling Hw)\n"); + return MV_DDR3_TRAINING_ERR_WR_LVL_HW; + } + + /* Back to relevant CS */ + dram_info->cs_ena = ddr3_get_cs_ena_from_reg(); + + magic_word = *sdram_offset; + return magic_word == SUSPEND_MAGIC_WORD; +} + +/* + * Name: ddr3_training_suspend_resume() + * Desc: Execute the Resume state + * Args: MV_DRAM_INFO *dram_info + * Notes: + * Returns: return (magic_word == SUSPEND_MAGIC_WORD) + */ +int ddr3_training_suspend_resume(MV_DRAM_INFO *dram_info) +{ + u32 freq, reg; + int tmp_ratio; + + /* Configure DDR */ + if (MV_OK != ddr3_read_training_results()) + return MV_FAIL; + + /* Reset read FIFO */ + reg = reg_read(REG_DRAM_TRAINING_ADDR); + + /* Start Auto Read Leveling procedure */ + reg |= (1 << REG_DRAM_TRAINING_RL_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg |= ((1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS) + + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS)); + + /* [0] = 1 - Enable SW override, [4] = 1 - FIFO reset */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + udelay(2); + + reg = reg_read(REG_DRAM_TRAINING_ADDR); + /* Clear Auto Read Leveling procedure */ + reg &= ~(1 << REG_DRAM_TRAINING_RL_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + /* Return to target frequency */ + freq = dram_info->target_frequency; + tmp_ratio = 1; + if (MV_OK != ddr3_dfs_low_2_high(freq, tmp_ratio, dram_info)) { + DEBUG_MAIN_S("DDR3 Training Sequence - FAILED (Dfs Low2High)\n"); + return MV_DDR3_TRAINING_ERR_DFS_H2L; + } + + if (dram_info->ecc_ena) { + /* Scabbling the RL area pattern and the training area */ + mv_sys_xor_finish(); + dram_info->num_cs = 1; + dram_info->cs_ena = 1; + mv_sys_xor_init(dram_info); + mv_xor_mem_init(0, RESUME_RL_PATTERNS_ADDR, + RESUME_RL_PATTERNS_SIZE, 0xFFFFFFFF, 0xFFFFFFFF); + + /* Wait for previous transfer completion */ + + while (mv_xor_state_get(0) != MV_IDLE) + ; + + /* Return XOR State */ + mv_sys_xor_finish(); + } + + return MV_OK; +} +#endif + +void ddr3_print_freq(u32 freq) +{ + u32 tmp_freq; + + switch (freq) { + case 0: + tmp_freq = 100; + break; + case 1: + tmp_freq = 300; + break; + case 2: + tmp_freq = 360; + break; + case 3: + tmp_freq = 400; + break; + case 4: + tmp_freq = 444; + break; + case 5: + tmp_freq = 500; + break; + case 6: + tmp_freq = 533; + break; + case 7: + tmp_freq = 600; + break; + case 8: + tmp_freq = 666; + break; + case 9: + tmp_freq = 720; + break; + case 10: + tmp_freq = 800; + break; + default: + tmp_freq = 100; + } + + printf("Current frequency is: %dMHz\n", tmp_freq); +} + +int ddr3_get_min_max_read_sample_delay(u32 cs_enable, u32 reg, u32 *min, + u32 *max, u32 *cs_max) +{ + u32 cs, delay; + + *min = 0xFFFFFFFF; + *max = 0x0; + + for (cs = 0; cs < MAX_CS; cs++) { + if ((cs_enable & (1 << cs)) == 0) + continue; + + delay = ((reg >> (cs * 8)) & 0x1F); + + if (delay < *min) + *min = delay; + + if (delay > *max) { + *max = delay; + *cs_max = cs; + } + } + + return MV_OK; +} + +int ddr3_get_min_max_rl_phase(MV_DRAM_INFO *dram_info, u32 *min, u32 *max, + u32 cs) +{ + u32 pup, reg, phase; + + *min = 0xFFFFFFFF; + *max = 0x0; + + for (pup = 0; pup < dram_info->num_of_total_pups; pup++) { + reg = ddr3_read_pup_reg(PUP_RL_MODE, cs, pup); + phase = ((reg >> 8) & 0x7); + + if (phase < *min) + *min = phase; + + if (phase > *max) + *max = phase; + } + + return MV_OK; +} + +int ddr3_odt_activate(int activate) +{ + u32 reg, mask; + + mask = (1 << REG_DUNIT_ODT_CTRL_OVRD_OFFS) | + (1 << REG_DUNIT_ODT_CTRL_OVRD_VAL_OFFS); + /* {0x0000149C} - DDR Dunit ODT Control Register */ + reg = reg_read(REG_DUNIT_ODT_CTRL_ADDR); + if (activate) + reg |= mask; + else + reg &= ~mask; + + reg_write(REG_DUNIT_ODT_CTRL_ADDR, reg); + + return MV_OK; +} + +int ddr3_odt_read_dynamic_config(MV_DRAM_INFO *dram_info) +{ + u32 min_read_sample_delay, max_read_sample_delay, max_rl_phase; + u32 min, max, cs_max; + u32 cs_ena, reg; + + reg = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + cs_ena = ddr3_get_cs_ena_from_reg(); + + /* Get minimum and maximum of read sample delay of all CS */ + ddr3_get_min_max_read_sample_delay(cs_ena, reg, &min_read_sample_delay, + &max_read_sample_delay, &cs_max); + + /* + * Get minimum and maximum read leveling phase which belongs to the + * maximal read sample delay + */ + ddr3_get_min_max_rl_phase(dram_info, &min, &max, cs_max); + max_rl_phase = max; + + /* DDR ODT Timing (Low) Register calculation */ + reg = reg_read(REG_ODT_TIME_LOW_ADDR); + reg &= ~(0x1FF << REG_ODT_ON_CTL_RD_OFFS); + reg |= (((min_read_sample_delay - 1) & 0xF) << REG_ODT_ON_CTL_RD_OFFS); + reg |= (((max_read_sample_delay + 4 + (((max_rl_phase + 1) / 2) + 1)) & + 0x1F) << REG_ODT_OFF_CTL_RD_OFFS); + reg_write(REG_ODT_TIME_LOW_ADDR, reg); + + return MV_OK; +} diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_hw_training.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_hw_training.h new file mode 100644 index 000000000..30daaa983 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_hw_training.h @@ -0,0 +1,391 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __DDR3_TRAINING_H +#define __DDR3_TRAINING_H + +#include "ddr3_init.h" + +#ifdef MV88F78X60 +#include "ddr3_axp.h" +#elif defined(MV88F67XX) +#include "ddr3_a370.h" +#elif defined(MV88F672X) +#include "ddr3_a375.h" +#endif + +/* The following is a list of Marvell status */ +#define MV_ERROR (-1) +#define MV_OK (0x00) /* Operation succeeded */ +#define MV_FAIL (0x01) /* Operation failed */ +#define MV_BAD_VALUE (0x02) /* Illegal value (general) */ +#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */ +#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */ +#define MV_BAD_PTR (0x05) /* Illegal pointer value */ +#define MV_BAD_SIZE (0x06) /* Illegal size */ +#define MV_BAD_STATE (0x07) /* Illegal state of state machine */ +#define MV_SET_ERROR (0x08) /* Set operation failed */ +#define MV_GET_ERROR (0x09) /* Get operation failed */ +#define MV_CREATE_ERROR (0x0A) /* Fail while creating an item */ +#define MV_NOT_FOUND (0x0B) /* Item not found */ +#define MV_NO_MORE (0x0C) /* No more items found */ +#define MV_NO_SUCH (0x0D) /* No such item */ +#define MV_TIMEOUT (0x0E) /* Time Out */ +#define MV_NO_CHANGE (0x0F) /* Parameter(s) is already in this value */ +#define MV_NOT_SUPPORTED (0x10) /* This request is not support */ +#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/ +#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */ +#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */ +#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */ +#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */ +#define MV_INIT_ERROR (0x16) /* Error occurred while INIT process */ +#define MV_HW_ERROR (0x17) /* Hardware error */ +#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */ +#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */ +#define MV_NOT_READY (0x1A) /* The other side is not ready yet */ +#define MV_ALREADY_EXIST (0x1B) /* Tried to create existing item */ +#define MV_OUT_OF_CPU_MEM (0x1C) /* Cpu memory allocation failed. */ +#define MV_NOT_STARTED (0x1D) /* Not started yet */ +#define MV_BUSY (0x1E) /* Item is busy. */ +#define MV_TERMINATE (0x1F) /* Item terminates it's work. */ +#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */ +#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */ +#define MV_WRITE_PROTECT (0x22) /* Write protected */ + +#define MV_INVALID (int)(-1) + +/* + * Debug (Enable/Disable modules) and Error report + */ + +#ifdef BASIC_DEBUG +#define MV_DEBUG_WL +#define MV_DEBUG_RL +#define MV_DEBUG_DQS_RESULTS +#endif + +#ifdef FULL_DEBUG +#define MV_DEBUG_WL +#define MV_DEBUG_RL +#define MV_DEBUG_DQS + +#define MV_DEBUG_PBS +#define MV_DEBUG_DFS +#define MV_DEBUG_MAIN_FULL +#define MV_DEBUG_DFS_FULL +#define MV_DEBUG_DQS_FULL +#define MV_DEBUG_RL_FULL +#define MV_DEBUG_WL_FULL +#endif + +/* + * General Consts + */ + +#define SDRAM_READ_WRITE_LEN_IN_WORDS 16 +#define SDRAM_READ_WRITE_LEN_IN_DOUBLE_WORDS 8 +#define CACHE_LINE_SIZE 0x20 + +#define SDRAM_CS_BASE 0x0 + +#define SRAM_BASE 0x40000000 +#define SRAM_SIZE 0xFFF + +#define LEN_64BIT_STD_PATTERN 16 +#define LEN_64BIT_KILLER_PATTERN 128 +#define LEN_64BIT_SPECIAL_PATTERN 128 +#define LEN_64BIT_PBS_PATTERN 16 +#define LEN_WL_SUP_PATTERN 32 + +#define LEN_16BIT_STD_PATTERN 4 +#define LEN_16BIT_KILLER_PATTERN 128 +#define LEN_16BIT_SPECIAL_PATTERN 128 +#define LEN_16BIT_PBS_PATTERN 4 + +#define CMP_BYTE_SHIFT 8 +#define CMP_BYTE_MASK 0xFF +#define PUP_SIZE 8 + +#define S 0 +#define C 1 +#define P 2 +#define D 3 +#define DQS 6 +#define PS 2 +#define DS 3 +#define PE 4 +#define DE 5 + +#define CS0 0 +#define MAX_DIMM_NUM 2 +#define MAX_DELAY 0x1F + +/* + * Invertion limit and phase1 limit are WA for the RL @ 1:1 design bug - + * Armada 370 & AXP Z1 + */ +#define MAX_DELAY_INV_LIMIT 0x5 +#define MIN_DELAY_PHASE_1_LIMIT 0x10 + +#define MAX_DELAY_INV (0x3F - MAX_DELAY_INV_LIMIT) +#define MIN_DELAY 0 +#define MAX_PUP_NUM 9 +#define ECC_PUP 8 +#define DQ_NUM 8 +#define DQS_DQ_NUM 8 +#define INIT_WL_DELAY 13 +#define INIT_RL_DELAY 15 +#define TWLMRD_DELAY 20 +#define TCLK_3_DELAY 3 +#define ECC_BIT 8 +#define DMA_SIZE 64 +#define MV_DMA_0 0 +#define MAX_TRAINING_RETRY 10 + +#define PUP_RL_MODE 0x2 +#define PUP_WL_MODE 0 +#define PUP_PBS_TX 0x10 +#define PUP_PBS_TX_DM 0x1A +#define PUP_PBS_RX 0x30 +#define PUP_DQS_WR 0x1 +#define PUP_DQS_RD 0x3 +#define PUP_BC 10 +#define PUP_DELAY_MASK 0x1F +#define PUP_PHASE_MASK 0x7 +#define PUP_NUM_64BIT 8 +#define PUP_NUM_32BIT 4 +#define PUP_NUM_16BIT 2 + +/* control PHY registers */ +#define CNTRL_PUP_DESKEW 0x10 + +/* WL */ +#define COUNT_WL_HI_FREQ 2 +#define COUNT_WL 2 +#define COUNT_WL_RFRS 9 +#define WL_HI_FREQ_SHIFT 2 +#define WL_HI_FREQ_STATE 1 +#define COUNT_HW_WL 2 + +/* RL */ +/* + * RL_MODE - this define uses the RL mode SW RL instead of the functional + * window SW RL + */ +#define RL_MODE +#define RL_WINDOW_WA +#define MAX_PHASE_1TO1 2 +#define MAX_PHASE_2TO1 4 + +#define MAX_PHASE_RL_UL_1TO1 0 +#define MAX_PHASE_RL_L_1TO1 4 +#define MAX_PHASE_RL_UL_2TO1 3 +#define MAX_PHASE_RL_L_2TO1 7 + +#define RL_UNLOCK_STATE 0 +#define RL_WINDOW_STATE 1 +#define RL_FINAL_STATE 2 +#define RL_RETRY_COUNT 2 +#define COUNT_HW_RL 2 + +/* PBS */ +#define MAX_PBS 31 +#define MIN_PBS 0 +#define COUNT_PBS_PATTERN 2 +#define COUNT_PBS_STARTOVER 2 +#define COUNT_PBS_REPEAT 3 +#define COUNT_PBS_COMP_RETRY_NUM 2 +#define PBS_DIFF_LIMIT 31 +#define PATTERN_PBS_TX_A 0x55555555 +#define PATTERN_PBS_TX_B 0xAAAAAAAA + +/* DQS */ +#define ADLL_ERROR 0x55 +#define ADLL_MAX 31 +#define ADLL_MIN 0 +#define MIN_WIN_SIZE 4 +#define VALID_WIN_THRS MIN_WIN_SIZE + +#define MODE_2TO1 1 +#define MODE_1TO1 0 + +/* + * Macros + */ +#define IS_PUP_ACTIVE(_data_, _pup_) (((_data_) >> (_pup_)) & 0x1) + +/* + * Internal ERROR codes + */ +#define MV_DDR3_TRAINING_ERR_WR_LVL_HW 0xDD302001 +#define MV_DDR3_TRAINING_ERR_LOAD_PATTERNS 0xDD302002 +#define MV_DDR3_TRAINING_ERR_WR_LVL_HI_FREQ 0xDD302003 +#define MV_DDR3_TRAINING_ERR_DFS_H2L 0xDD302004 +#define MV_DDR3_TRAINING_ERR_DRAM_COMPARE 0xDD302005 +#define MV_DDR3_TRAINING_ERR_WIN_LIMITS 0xDD302006 +#define MV_DDR3_TRAINING_ERR_PUP_RANGE 0xDD302025 +#define MV_DDR3_TRAINING_ERR_DQS_LOW_LIMIT_SEARCH 0xDD302007 +#define MV_DDR3_TRAINING_ERR_DQS_HIGH_LIMIT_SEARCH 0xDD302008 +#define MV_DDR3_TRAINING_ERR_DQS_PATTERN 0xDD302009 +#define MV_DDR3_TRAINING_ERR_PBS_ADLL_SHR_1PHASE 0xDD302010 +#define MV_DDR3_TRAINING_ERR_PBS_TX_MAX_VAL 0xDD302011 +#define MV_DDR3_TRAINING_ERR_PBS_RX_PER_BIT 0xDD302012 +#define MV_DDR3_TRAINING_ERR_PBS_TX_PER_BIT 0xDD302013 +#define MV_DDR3_TRAINING_ERR_PBS_RX_MAX_VAL 0xDD302014 +#define MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_SRAM_CMP 0xDD302015 +#define MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_MAX_VAL 0xDD302016 +#define MV_DDR3_TRAINING_ERR_RD_LVL_RL_PATTERN 0xDD302017 +#define MV_DDR3_TRAINING_ERR_RD_LVL_RL_PUP_UNLOCK 0xDD302018 +#define MV_DDR3_TRAINING_ERR_RD_LVL_PUP_UNLOCK 0xDD302019 +#define MV_DDR3_TRAINING_ERR_WR_LVL_SW 0xDD302020 +#define MV_DDR3_TRAINING_ERR_PRBS_RX 0xDD302021 +#define MV_DDR3_TRAINING_ERR_DQS_RX 0xDD302022 +#define MV_DDR3_TRAINING_ERR_PRBS_TX 0xDD302023 +#define MV_DDR3_TRAINING_ERR_DQS_TX 0xDD302024 + +/* + * DRAM information structure + */ +typedef struct dram_info { + u32 num_cs; + u32 cs_ena; + u32 num_of_std_pups; /* Q value = ddrWidth/8 - Without ECC!! */ + u32 num_of_total_pups; /* numOfStdPups + eccEna */ + u32 target_frequency; /* DDR Frequency */ + u32 ddr_width; /* 32/64 Bit or 16/32 Bit */ + u32 ecc_ena; /* 0/1 */ + u32 wl_val[MAX_CS][MAX_PUP_NUM][7]; + u32 rl_val[MAX_CS][MAX_PUP_NUM][7]; + u32 rl_max_phase; + u32 rl_min_phase; + u32 wl_max_phase; + u32 wl_min_phase; + u32 rd_smpl_dly; + u32 rd_rdy_dly; + u32 cl; + u32 cwl; + u32 mode_2t; + int rl400_bug; + int multi_cs_mr_support; + int reg_dimm; +} MV_DRAM_INFO; + +enum training_modes { + DQS_WR_MODE, + WL_MODE_, + RL_MODE_, + DQS_RD_MODE, + PBS_TX_DM_MODE, + PBS_TX_MODE, + PBS_RX_MODE, + MAX_TRAINING_MODE, +}; + +typedef struct dram_training_init { + u32 reg_addr; + u32 reg_value; +} MV_DRAM_TRAINING_INIT; + +typedef struct dram_mv_init { + u32 reg_addr; + u32 reg_value; +} MV_DRAM_MC_INIT; + +/* Board/Soc revisions define */ +enum board_rev { + Z1, + Z1_PCAC, + Z1_RD_SLED, + A0, + A0_AMC +}; + +typedef struct dram_modes { + char *mode_name; + u8 cpu_freq; + u8 fab_freq; + u8 chip_id; + int chip_board_rev; + MV_DRAM_MC_INIT *regs; + MV_DRAM_TRAINING_INIT *vals; +} MV_DRAM_MODES; + +/* + * Function Declarations + */ + +u32 cache_inv(u32 addr); +void flush_l1_v7(u32 line); +void flush_l1_v6(u32 line); + +u32 ddr3_cl_to_valid_cl(u32 cl); +u32 ddr3_valid_cl_to_cl(u32 ui_valid_cl); + +void ddr3_write_pup_reg(u32 mode, u32 cs, u32 pup, u32 phase, u32 delay); +u32 ddr3_read_pup_reg(u32 mode, u32 cs, u32 pup); + +int ddr3_sdram_pbs_compare(MV_DRAM_INFO *dram_info, u32 pup_locked, int is_tx, + u32 pbs_pattern_idx, u32 pbs_curr_val, + u32 pbs_lock_val, u32 *skew_array, + u8 *unlock_pup_dq_array, u32 ecc); + +int ddr3_sdram_dqs_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 pattern_len, u32 sdram_offset, int write, + int mask, u32 *mask_pattern, int b_special_compare); + +int ddr3_sdram_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, u32 pattern_len, + u32 sdram_offset, int write, int mask, + u32 *mask_pattern, int b_special_compare); + +int ddr3_sdram_direct_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 pattern_len, u32 sdram_offset, int write, + int mask, u32 *mask_pattern); + +int ddr3_sdram_dm_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 sdram_offset); +int ddr3_dram_sram_read(u32 src, u32 dst, u32 len); +int ddr3_load_patterns(MV_DRAM_INFO *dram_info, int resume); + +int ddr3_read_leveling_hw(u32 freq, MV_DRAM_INFO *dram_info); +int ddr3_read_leveling_sw(u32 freq, int ratio_2to1, MV_DRAM_INFO *dram_info); + +int ddr3_write_leveling_hw(u32 freq, MV_DRAM_INFO *dram_info); +int ddr3_write_leveling_sw(u32 freq, int ratio_2to1, MV_DRAM_INFO *dram_info); +int ddr3_write_leveling_hw_reg_dimm(u32 freq, MV_DRAM_INFO *dram_info); +int ddr3_wl_supplement(MV_DRAM_INFO *dram_info); + +int ddr3_dfs_high_2_low(u32 freq, MV_DRAM_INFO *dram_info); +int ddr3_dfs_low_2_high(u32 freq, int ratio_2to1, MV_DRAM_INFO *dram_info); + +int ddr3_pbs_tx(MV_DRAM_INFO *dram_info); +int ddr3_pbs_rx(MV_DRAM_INFO *dram_info); +int ddr3_load_pbs_patterns(MV_DRAM_INFO *dram_info); + +int ddr3_dqs_centralization_rx(MV_DRAM_INFO *dram_info); +int ddr3_dqs_centralization_tx(MV_DRAM_INFO *dram_info); +int ddr3_load_dqs_patterns(MV_DRAM_INFO *dram_info); + +void ddr3_static_training_init(void); + +u8 ddr3_get_eprom_fabric(void); +void ddr3_set_performance_params(MV_DRAM_INFO *dram_info); +int ddr3_dram_sram_burst(u32 src, u32 dst, u32 len); +void ddr3_save_training(MV_DRAM_INFO *dram_info); +int ddr3_read_training_results(void); +int ddr3_training_suspend_resume(MV_DRAM_INFO *dram_info); +int ddr3_get_min_max_read_sample_delay(u32 cs_enable, u32 reg, u32 *min, + u32 *max, u32 *cs_max); +int ddr3_get_min_max_rl_phase(MV_DRAM_INFO *dram_info, u32 *min, u32 *max, + u32 cs); +int ddr3_odt_activate(int activate); +int ddr3_odt_read_dynamic_config(MV_DRAM_INFO *dram_info); +void ddr3_print_freq(u32 freq); +void ddr3_reset_phy_read_fifo(void); + +#endif /* __DDR3_TRAINING_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_init.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_init.c new file mode 100644 index 000000000..607f3e12c --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_init.c @@ -0,0 +1,1216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <log.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> +#include <linux/delay.h> + +#include "ddr3_init.h" + +#if defined(MV88F78X60) +#include "ddr3_axp_vars.h" +#elif defined(MV88F67XX) +#include "ddr3_a370_vars.h" +#elif defined(MV88F672X) +#include "ddr3_a375_vars.h" +#endif + +#ifdef STATIC_TRAINING +static void ddr3_static_training_init(void); +#endif +#ifdef DUNIT_STATIC +static void ddr3_static_mc_init(void); +#endif +#if defined(DUNIT_STATIC) || defined(STATIC_TRAINING) +MV_DRAM_MODES *ddr3_get_static_ddr_mode(void); +#endif +#if defined(MV88F672X) +void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps); +#endif +u32 mv_board_id_get(void); +extern void ddr3_set_sw_wl_rl_debug(u32); +extern void ddr3_set_pbs(u32); +extern void ddr3_set_log_level(u32 val); + +static u32 log_level = DDR3_LOG_LEVEL; + +static u32 ddr3_init_main(void); + +/* + * Name: ddr3_set_log_level + * Desc: This routine initialize the log_level acording to nLogLevel + * which getting from user + * Args: nLogLevel + * Notes: + * Returns: None. + */ +void ddr3_set_log_level(u32 val) +{ + log_level = val; +} + +/* + * Name: ddr3_get_log_level + * Desc: This routine returns the log level + * Args: none + * Notes: + * Returns: log level. + */ +u32 ddr3_get_log_level(void) +{ + return log_level; +} + +static void debug_print_reg(u32 reg) +{ + printf("0x%08x = 0x%08x\n", reg, reg_read(reg)); +} + +static void print_dunit_setup(void) +{ + puts("\n########### LOG LEVEL 1 (D-UNIT SETUP)###########\n"); + +#ifdef DUNIT_STATIC + puts("\nStatic D-UNIT Setup:\n"); +#endif +#ifdef DUNIT_SPD + puts("\nDynamic(using SPD) D-UNIT Setup:\n"); +#endif + debug_print_reg(REG_SDRAM_CONFIG_ADDR); + debug_print_reg(REG_DUNIT_CTRL_LOW_ADDR); + debug_print_reg(REG_SDRAM_TIMING_LOW_ADDR); + debug_print_reg(REG_SDRAM_TIMING_HIGH_ADDR); + debug_print_reg(REG_SDRAM_ADDRESS_CTRL_ADDR); + debug_print_reg(REG_SDRAM_OPEN_PAGES_ADDR); + debug_print_reg(REG_SDRAM_OPERATION_ADDR); + debug_print_reg(REG_SDRAM_MODE_ADDR); + debug_print_reg(REG_SDRAM_EXT_MODE_ADDR); + debug_print_reg(REG_DDR_CONT_HIGH_ADDR); + debug_print_reg(REG_ODT_TIME_LOW_ADDR); + debug_print_reg(REG_SDRAM_ERROR_ADDR); + debug_print_reg(REG_SDRAM_AUTO_PWR_SAVE_ADDR); + debug_print_reg(REG_OUDDR3_TIMING_ADDR); + debug_print_reg(REG_ODT_TIME_HIGH_ADDR); + debug_print_reg(REG_SDRAM_ODT_CTRL_LOW_ADDR); + debug_print_reg(REG_SDRAM_ODT_CTRL_HIGH_ADDR); + debug_print_reg(REG_DUNIT_ODT_CTRL_ADDR); +#ifndef MV88F67XX + debug_print_reg(REG_DRAM_FIFO_CTRL_ADDR); + debug_print_reg(REG_DRAM_AXI_CTRL_ADDR); + debug_print_reg(REG_DRAM_ADDR_CTRL_DRIVE_STRENGTH_ADDR); + debug_print_reg(REG_DRAM_DATA_DQS_DRIVE_STRENGTH_ADDR); + debug_print_reg(REG_DRAM_VER_CAL_MACHINE_CTRL_ADDR); + debug_print_reg(REG_DRAM_MAIN_PADS_CAL_ADDR); + debug_print_reg(REG_DRAM_HOR_CAL_MACHINE_CTRL_ADDR); + debug_print_reg(REG_CS_SIZE_SCRATCH_ADDR); + debug_print_reg(REG_DYNAMIC_POWER_SAVE_ADDR); + debug_print_reg(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + debug_print_reg(REG_READ_DATA_READY_DELAYS_ADDR); + debug_print_reg(REG_DDR3_MR0_ADDR); + debug_print_reg(REG_DDR3_MR1_ADDR); + debug_print_reg(REG_DDR3_MR2_ADDR); + debug_print_reg(REG_DDR3_MR3_ADDR); + debug_print_reg(REG_DDR3_RANK_CTRL_ADDR); + debug_print_reg(REG_DRAM_PHY_CONFIG_ADDR); + debug_print_reg(REG_STATIC_DRAM_DLB_CONTROL); + debug_print_reg(DLB_BUS_OPTIMIZATION_WEIGHTS_REG); + debug_print_reg(DLB_AGING_REGISTER); + debug_print_reg(DLB_EVICTION_CONTROL_REG); + debug_print_reg(DLB_EVICTION_TIMERS_REGISTER_REG); +#if defined(MV88F672X) + debug_print_reg(REG_FASTPATH_WIN_CTRL_ADDR(0)); + debug_print_reg(REG_FASTPATH_WIN_BASE_ADDR(0)); + debug_print_reg(REG_FASTPATH_WIN_CTRL_ADDR(1)); + debug_print_reg(REG_FASTPATH_WIN_BASE_ADDR(1)); +#else + debug_print_reg(REG_FASTPATH_WIN_0_CTRL_ADDR); +#endif + debug_print_reg(REG_CDI_CONFIG_ADDR); +#endif +} + +#if !defined(STATIC_TRAINING) +static void ddr3_restore_and_set_final_windows(u32 *win_backup) +{ + u32 ui, reg, cs; + u32 win_ctrl_reg, num_of_win_regs; + u32 cs_ena = ddr3_get_cs_ena_from_reg(); + +#if defined(MV88F672X) + if (DDR3_FAST_PATH_EN == 0) + return; +#endif + +#if defined(MV88F672X) + win_ctrl_reg = REG_XBAR_WIN_16_CTRL_ADDR; + num_of_win_regs = 8; +#else + win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR; + num_of_win_regs = 16; +#endif + + /* Return XBAR windows 4-7 or 16-19 init configuration */ + for (ui = 0; ui < num_of_win_regs; ui++) + reg_write((win_ctrl_reg + 0x4 * ui), win_backup[ui]); + + DEBUG_INIT_FULL_S("DDR3 Training Sequence - Switching XBAR Window to FastPath Window\n"); + +#if defined(MV88F672X) + /* Set L2 filtering to 1G */ + reg_write(0x8c04, 0x40000000); + + /* Open fast path windows */ + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + /* set fast path window control for the cs */ + reg = 0x1FFFFFE1; + reg |= (cs << 2); + reg |= (SDRAM_CS_SIZE & 0xFFFF0000); + /* Open fast path Window */ + reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg); + /* set fast path window base address for the cs */ + reg = (((SDRAM_CS_SIZE + 1) * cs) & 0xFFFF0000); + /* Set base address */ + reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg); + } + } +#else + reg = 0x1FFFFFE1; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + reg |= (cs << 2); + break; + } + } + + /* Open fast path Window to - 0.5G */ + reg_write(REG_FASTPATH_WIN_0_CTRL_ADDR, reg); +#endif +} + +static void ddr3_save_and_set_training_windows(u32 *win_backup) +{ + u32 cs_ena = ddr3_get_cs_ena_from_reg(); + u32 reg, tmp_count, cs, ui; + u32 win_ctrl_reg, win_base_reg, win_remap_reg; + u32 num_of_win_regs, win_jump_index; + +#if defined(MV88F672X) + /* Disable L2 filtering */ + reg_write(0x8c04, 0); + + win_ctrl_reg = REG_XBAR_WIN_16_CTRL_ADDR; + win_base_reg = REG_XBAR_WIN_16_BASE_ADDR; + win_remap_reg = REG_XBAR_WIN_16_REMAP_ADDR; + win_jump_index = 0x8; + num_of_win_regs = 8; +#else + win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR; + win_base_reg = REG_XBAR_WIN_4_BASE_ADDR; + win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR; + win_jump_index = 0x10; + num_of_win_regs = 16; +#endif + + /* Close XBAR Window 19 - Not needed */ + /* {0x000200e8} - Open Mbus Window - 2G */ + reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0); + + /* Save XBAR Windows 4-19 init configurations */ + for (ui = 0; ui < num_of_win_regs; ui++) + win_backup[ui] = reg_read(win_ctrl_reg + 0x4 * ui); + + /* Open XBAR Windows 4-7 or 16-19 for other CS */ + reg = 0; + tmp_count = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + switch (cs) { + case 0: + reg = 0x0E00; + break; + case 1: + reg = 0x0D00; + break; + case 2: + reg = 0x0B00; + break; + case 3: + reg = 0x0700; + break; + } + reg |= (1 << 0); + reg |= (SDRAM_CS_SIZE & 0xFFFF0000); + + reg_write(win_ctrl_reg + win_jump_index * tmp_count, + reg); + reg = ((SDRAM_CS_SIZE + 1) * (tmp_count)) & 0xFFFF0000; + reg_write(win_base_reg + win_jump_index * tmp_count, + reg); + + if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR) { + reg_write(win_remap_reg + + win_jump_index * tmp_count, 0); + } + + tmp_count++; + } + } +} +#endif /* !defined(STATIC_TRAINING) */ + +/* + * Name: ddr3_init - Main DDR3 Init function + * Desc: This routine initialize the DDR3 MC and runs HW training. + * Args: None. + * Notes: + * Returns: None. + */ +int ddr3_init(void) +{ + unsigned int status; + + ddr3_set_pbs(DDR3_PBS); + ddr3_set_sw_wl_rl_debug(DDR3_RUN_SW_WHEN_HW_FAIL); + + status = ddr3_init_main(); + if (status == MV_DDR3_TRAINING_ERR_BAD_SAR) + DEBUG_INIT_S("DDR3 Training Error: Bad sample at reset"); + if (status == MV_DDR3_TRAINING_ERR_BAD_DIMM_SETUP) + DEBUG_INIT_S("DDR3 Training Error: Bad DIMM setup"); + if (status == MV_DDR3_TRAINING_ERR_MAX_CS_LIMIT) + DEBUG_INIT_S("DDR3 Training Error: Max CS limit"); + if (status == MV_DDR3_TRAINING_ERR_MAX_ENA_CS_LIMIT) + DEBUG_INIT_S("DDR3 Training Error: Max enable CS limit"); + if (status == MV_DDR3_TRAINING_ERR_BAD_R_DIMM_SETUP) + DEBUG_INIT_S("DDR3 Training Error: Bad R-DIMM setup"); + if (status == MV_DDR3_TRAINING_ERR_TWSI_FAIL) + DEBUG_INIT_S("DDR3 Training Error: TWSI failure"); + if (status == MV_DDR3_TRAINING_ERR_DIMM_TYPE_NO_MATCH) + DEBUG_INIT_S("DDR3 Training Error: DIMM type no match"); + if (status == MV_DDR3_TRAINING_ERR_TWSI_BAD_TYPE) + DEBUG_INIT_S("DDR3 Training Error: TWSI bad type"); + if (status == MV_DDR3_TRAINING_ERR_BUS_WIDTH_NOT_MATCH) + DEBUG_INIT_S("DDR3 Training Error: bus width no match"); + if (status > MV_DDR3_TRAINING_ERR_HW_FAIL_BASE) + DEBUG_INIT_C("DDR3 Training Error: HW Failure 0x", status, 8); + + return status; +} + +static void print_ddr_target_freq(u32 cpu_freq, u32 fab_opt) +{ + puts("\nDDR3 Training Sequence - Run DDR3 at "); + + switch (cpu_freq) { +#if defined(MV88F672X) + case 21: + puts("533 Mhz\n"); + break; +#else + case 1: + puts("533 Mhz\n"); + break; + case 2: + if (fab_opt == 5) + puts("600 Mhz\n"); + if (fab_opt == 9) + puts("400 Mhz\n"); + break; + case 3: + puts("667 Mhz\n"); + break; + case 4: + if (fab_opt == 5) + puts("750 Mhz\n"); + if (fab_opt == 9) + puts("500 Mhz\n"); + break; + case 0xa: + puts("400 Mhz\n"); + break; + case 0xb: + if (fab_opt == 5) + puts("800 Mhz\n"); + if (fab_opt == 9) + puts("553 Mhz\n"); + if (fab_opt == 0xA) + puts("640 Mhz\n"); + break; +#endif + default: + puts("NOT DEFINED FREQ\n"); + } +} + +static u32 ddr3_init_main(void) +{ + u32 target_freq; + u32 reg = 0; + u32 cpu_freq, fab_opt, hclk_time_ps, soc_num; + __maybe_unused u32 ecc = DRAM_ECC; + __maybe_unused int dqs_clk_aligned = 0; + __maybe_unused u32 scrub_offs, scrub_size; + __maybe_unused u32 ddr_width = BUS_WIDTH; + __maybe_unused int status; + __maybe_unused u32 win_backup[16]; + + /* SoC/Board special Initializtions */ + fab_opt = ddr3_get_fab_opt(); + +#ifdef CONFIG_SPD_EEPROM + i2c_init(CONFIG_SYS_I2C_SPEED, CONFIG_SYS_I2C_SLAVE); +#endif + + ddr3_print_version(); + DEBUG_INIT_S("4\n"); + /* Lib version 5.5.4 */ + + fab_opt = ddr3_get_fab_opt(); + + /* Switching CPU to MRVL ID */ + soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >> + SAR1_CPU_CORE_OFFSET; + switch (soc_num) { + case 0x3: + reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET); + reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET); + case 0x1: + reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET); + case 0x0: + reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET); + default: + break; + } + + /* Power down deskew PLL */ +#if !defined(MV88F672X) + /* 0x18780 [25] */ + reg = (reg_read(REG_DDRPHY_APLL_CTRL_ADDR) & ~(1 << 25)); + reg_write(REG_DDRPHY_APLL_CTRL_ADDR, reg); +#endif + + /* + * Stage 0 - Set board configuration + */ + cpu_freq = ddr3_get_cpu_freq(); + if (fab_opt > FAB_OPT) + fab_opt = FAB_OPT - 1; + + if (ddr3_get_log_level() > 0) + print_ddr_target_freq(cpu_freq, fab_opt); + +#if defined(MV88F672X) + get_target_freq(cpu_freq, &target_freq, &hclk_time_ps); +#else + target_freq = cpu_ddr_ratios[fab_opt][cpu_freq]; + hclk_time_ps = cpu_fab_clk_to_hclk[fab_opt][cpu_freq]; +#endif + if ((target_freq == 0) || (hclk_time_ps == 0)) { + DEBUG_INIT_S("DDR3 Training Sequence - FAILED - Wrong Sample at Reset Configurations\n"); + if (target_freq == 0) { + DEBUG_INIT_C("target_freq", target_freq, 2); + DEBUG_INIT_C("fab_opt", fab_opt, 2); + DEBUG_INIT_C("cpu_freq", cpu_freq, 2); + } else if (hclk_time_ps == 0) { + DEBUG_INIT_C("hclk_time_ps", hclk_time_ps, 2); + DEBUG_INIT_C("fab_opt", fab_opt, 2); + DEBUG_INIT_C("cpu_freq", cpu_freq, 2); + } + + return MV_DDR3_TRAINING_ERR_BAD_SAR; + } + +#if defined(ECC_SUPPORT) + scrub_offs = U_BOOT_START_ADDR; + scrub_size = U_BOOT_SCRUB_SIZE; +#else + scrub_offs = 0; + scrub_size = 0; +#endif + +#if defined(ECC_SUPPORT) && defined(AUTO_DETECTION_SUPPORT) + ecc = 0; + if (ddr3_check_config(BUS_WIDTH_ECC_TWSI_ADDR, CONFIG_ECC)) + ecc = 1; +#endif + +#ifdef DQS_CLK_ALIGNED + dqs_clk_aligned = 1; +#endif + + /* Check if DRAM is already initialized */ + if (reg_read(REG_BOOTROM_ROUTINE_ADDR) & + (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) { + DEBUG_INIT_S("DDR3 Training Sequence - 2nd boot - Skip\n"); + return MV_OK; + } + + /* + * Stage 1 - Dunit Setup + */ + +#ifdef DUNIT_STATIC + /* + * For Static D-Unit Setup use must set the correct static values + * at the ddr3_*soc*_vars.h file + */ + DEBUG_INIT_FULL_S("DDR3 Training Sequence - Static MC Init\n"); + ddr3_static_mc_init(); + +#ifdef ECC_SUPPORT + ecc = DRAM_ECC; + if (ecc) { + reg = reg_read(REG_SDRAM_CONFIG_ADDR); + reg |= (1 << REG_SDRAM_CONFIG_ECC_OFFS); + reg_write(REG_SDRAM_CONFIG_ADDR, reg); + } +#endif +#endif + +#if defined(MV88F78X60) || defined(MV88F672X) +#if defined(AUTO_DETECTION_SUPPORT) + /* + * Configurations for both static and dynamic MC setups + * + * Dynamically Set 32Bit and ECC for AXP (Relevant only for + * Marvell DB boards) + */ + if (ddr3_check_config(BUS_WIDTH_ECC_TWSI_ADDR, CONFIG_BUS_WIDTH)) { + ddr_width = 32; + DEBUG_INIT_S("DDR3 Training Sequence - DRAM bus width 32Bit\n"); + } +#endif + +#if defined(MV88F672X) + reg = reg_read(REG_SDRAM_CONFIG_ADDR); + if ((reg >> 15) & 1) + ddr_width = 32; + else + ddr_width = 16; +#endif +#endif + +#ifdef DUNIT_SPD + status = ddr3_dunit_setup(ecc, hclk_time_ps, &ddr_width); + if (MV_OK != status) { + DEBUG_INIT_S("DDR3 Training Sequence - FAILED (ddr3 Dunit Setup)\n"); + return status; + } +#endif + + /* Fix read ready phases for all SOC in reg 0x15C8 */ + reg = reg_read(REG_TRAINING_DEBUG_3_ADDR); + reg &= ~(REG_TRAINING_DEBUG_3_MASK); + reg |= 0x4; /* Phase 0 */ + reg &= ~(REG_TRAINING_DEBUG_3_MASK << REG_TRAINING_DEBUG_3_OFFS); + reg |= (0x4 << (1 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 1 */ + reg &= ~(REG_TRAINING_DEBUG_3_MASK << (3 * REG_TRAINING_DEBUG_3_OFFS)); + reg |= (0x6 << (3 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 3 */ + reg &= ~(REG_TRAINING_DEBUG_3_MASK << (4 * REG_TRAINING_DEBUG_3_OFFS)); + reg |= (0x6 << (4 * REG_TRAINING_DEBUG_3_OFFS)); + reg &= ~(REG_TRAINING_DEBUG_3_MASK << (5 * REG_TRAINING_DEBUG_3_OFFS)); + reg |= (0x6 << (5 * REG_TRAINING_DEBUG_3_OFFS)); + reg_write(REG_TRAINING_DEBUG_3_ADDR, reg); + +#if defined(MV88F672X) + /* + * AxiBrespMode[8] = Compliant, + * AxiAddrDecodeCntrl[11] = Internal, + * AxiDataBusWidth[0] = 128bit + */ + /* 0x14A8 - AXI Control Register */ + reg_write(REG_DRAM_AXI_CTRL_ADDR, 0); +#else + /* 0x14A8 - AXI Control Register */ + reg_write(REG_DRAM_AXI_CTRL_ADDR, 0x00000100); + reg_write(REG_CDI_CONFIG_ADDR, 0x00000006); + + if ((ddr_width == 64) && (reg_read(REG_DDR_IO_ADDR) & + (1 << REG_DDR_IO_CLK_RATIO_OFFS))) { + /* 0x14A8 - AXI Control Register */ + reg_write(REG_DRAM_AXI_CTRL_ADDR, 0x00000101); + reg_write(REG_CDI_CONFIG_ADDR, 0x00000007); + } +#endif + +#if !defined(MV88F67XX) + /* + * ARMADA-370 activate DLB later at the u-boot, + * Armada38x - No DLB activation at this time + */ + reg_write(DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x18C01E); + +#if defined(MV88F78X60) + /* WA according to eratta GL-8672902*/ + if (mv_ctrl_rev_get() == MV_78XX0_B0_REV) + reg_write(DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0xc19e); +#endif + + reg_write(DLB_AGING_REGISTER, 0x0f7f007f); + reg_write(DLB_EVICTION_CONTROL_REG, 0x0); + reg_write(DLB_EVICTION_TIMERS_REGISTER_REG, 0x00FF3C1F); + + reg_write(MBUS_UNITS_PRIORITY_CONTROL_REG, 0x55555555); + reg_write(FABRIC_UNITS_PRIORITY_CONTROL_REG, 0xAA); + reg_write(MBUS_UNITS_PREFETCH_CONTROL_REG, 0xffff); + reg_write(FABRIC_UNITS_PREFETCH_CONTROL_REG, 0xf0f); + +#if defined(MV88F78X60) + /* WA according to eratta GL-8672902 */ + if (mv_ctrl_rev_get() == MV_78XX0_B0_REV) { + reg = reg_read(REG_STATIC_DRAM_DLB_CONTROL); + reg |= DLB_ENABLE; + reg_write(REG_STATIC_DRAM_DLB_CONTROL, reg); + } +#endif /* end defined(MV88F78X60) */ +#endif /* end !defined(MV88F67XX) */ + + if (ddr3_get_log_level() >= MV_LOG_LEVEL_1) + print_dunit_setup(); + + /* + * Stage 2 - Training Values Setup + */ +#ifdef STATIC_TRAINING + /* + * DRAM Init - After all the D-unit values are set, its time to init + * the D-unit + */ + /* Wait for '0' */ + reg_write(REG_SDRAM_INIT_CTRL_ADDR, 0x1); + do { + reg = (reg_read(REG_SDRAM_INIT_CTRL_ADDR)) & + (1 << REG_SDRAM_INIT_CTRL_OFFS); + } while (reg); + + /* ddr3 init using static parameters - HW training is disabled */ + DEBUG_INIT_FULL_S("DDR3 Training Sequence - Static Training Parameters\n"); + ddr3_static_training_init(); + +#if defined(MV88F78X60) + /* + * If ECC is enabled, need to scrub the U-Boot area memory region - + * Run training function with Xor bypass just to scrub the memory + */ + status = ddr3_hw_training(target_freq, ddr_width, + 1, scrub_offs, scrub_size, + dqs_clk_aligned, DDR3_TRAINING_DEBUG, + REG_DIMM_SKIP_WL); + if (MV_OK != status) { + DEBUG_INIT_FULL_S("DDR3 Training Sequence - FAILED\n"); + return status; + } +#endif +#else + /* Set X-BAR windows for the training sequence */ + ddr3_save_and_set_training_windows(win_backup); + + /* Run DDR3 Training Sequence */ + /* DRAM Init */ + reg_write(REG_SDRAM_INIT_CTRL_ADDR, 0x1); + do { + reg = (reg_read(REG_SDRAM_INIT_CTRL_ADDR)) & + (1 << REG_SDRAM_INIT_CTRL_OFFS); + } while (reg); /* Wait for '0' */ + + /* ddr3 init using DDR3 HW training procedure */ + DEBUG_INIT_FULL_S("DDR3 Training Sequence - HW Training Procedure\n"); + status = ddr3_hw_training(target_freq, ddr_width, + 0, scrub_offs, scrub_size, + dqs_clk_aligned, DDR3_TRAINING_DEBUG, + REG_DIMM_SKIP_WL); + if (MV_OK != status) { + DEBUG_INIT_FULL_S("DDR3 Training Sequence - FAILED\n"); + return status; + } +#endif + + /* + * Stage 3 - Finish + */ +#if defined(MV88F78X60) || defined(MV88F672X) + /* Disable ECC Ignore bit */ + reg = reg_read(REG_SDRAM_CONFIG_ADDR) & + ~(1 << REG_SDRAM_CONFIG_IERR_OFFS); + reg_write(REG_SDRAM_CONFIG_ADDR, reg); +#endif + +#if !defined(STATIC_TRAINING) + /* Restore and set windows */ + ddr3_restore_and_set_final_windows(win_backup); +#endif + + /* Update DRAM init indication in bootROM register */ + reg = reg_read(REG_BOOTROM_ROUTINE_ADDR); + reg_write(REG_BOOTROM_ROUTINE_ADDR, + reg | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)); + +#if !defined(MV88F67XX) +#if defined(MV88F78X60) + if (mv_ctrl_rev_get() == MV_78XX0_B0_REV) { + reg = reg_read(REG_SDRAM_CONFIG_ADDR); + if (ecc == 0) + reg_write(REG_SDRAM_CONFIG_ADDR, reg | (1 << 19)); + } +#endif /* end defined(MV88F78X60) */ + + reg_write(DLB_EVICTION_CONTROL_REG, 0x9); + + reg = reg_read(REG_STATIC_DRAM_DLB_CONTROL); + reg |= (DLB_ENABLE | DLB_WRITE_COALESING | DLB_AXI_PREFETCH_EN | + DLB_MBUS_PREFETCH_EN | PREFETCH_NLNSZTR); + reg_write(REG_STATIC_DRAM_DLB_CONTROL, reg); +#endif /* end !defined(MV88F67XX) */ + +#ifdef STATIC_TRAINING + DEBUG_INIT_S("DDR3 Training Sequence - Ended Successfully (S)\n"); +#else + DEBUG_INIT_S("DDR3 Training Sequence - Ended Successfully\n"); +#endif + + return MV_OK; +} + +/* + * Name: ddr3_get_cpu_freq + * Desc: read S@R and return CPU frequency + * Args: + * Notes: + * Returns: required value + */ + +u32 ddr3_get_cpu_freq(void) +{ + u32 reg, cpu_freq; + +#if defined(MV88F672X) + /* Read sample at reset setting */ + reg = reg_read(REG_SAMPLE_RESET_HIGH_ADDR); /* 0xE8200 */ + cpu_freq = (reg & REG_SAMPLE_RESET_CPU_FREQ_MASK) >> + REG_SAMPLE_RESET_CPU_FREQ_OFFS; +#else + /* Read sample at reset setting */ + reg = reg_read(REG_SAMPLE_RESET_LOW_ADDR); /* 0x18230 [23:21] */ +#if defined(MV88F78X60) + cpu_freq = (reg & REG_SAMPLE_RESET_CPU_FREQ_MASK) >> + REG_SAMPLE_RESET_CPU_FREQ_OFFS; + reg = reg_read(REG_SAMPLE_RESET_HIGH_ADDR); /* 0x18234 [20] */ + cpu_freq |= (((reg >> REG_SAMPLE_RESET_HIGH_CPU_FREQ_OFFS) & 0x1) << 3); +#elif defined(MV88F67XX) + cpu_freq = (reg & REG_SAMPLE_RESET_CPU_FREQ_MASK) >> + REG_SAMPLE_RESET_CPU_FREQ_OFFS; +#endif +#endif + + return cpu_freq; +} + +/* + * Name: ddr3_get_fab_opt + * Desc: read S@R and return CPU frequency + * Args: + * Notes: + * Returns: required value + */ +u32 ddr3_get_fab_opt(void) +{ + __maybe_unused u32 reg, fab_opt; + +#if defined(MV88F672X) + return 0; /* No fabric */ +#else + /* Read sample at reset setting */ + reg = reg_read(REG_SAMPLE_RESET_LOW_ADDR); + fab_opt = (reg & REG_SAMPLE_RESET_FAB_MASK) >> + REG_SAMPLE_RESET_FAB_OFFS; + +#if defined(MV88F78X60) + reg = reg_read(REG_SAMPLE_RESET_HIGH_ADDR); + fab_opt |= (((reg >> 19) & 0x1) << 4); +#endif + + return fab_opt; +#endif +} + +/* + * Name: ddr3_get_vco_freq + * Desc: read S@R and return VCO frequency + * Args: + * Notes: + * Returns: required value + */ +u32 ddr3_get_vco_freq(void) +{ + u32 fab, cpu_freq, ui_vco_freq; + + fab = ddr3_get_fab_opt(); + cpu_freq = ddr3_get_cpu_freq(); + + if (fab == 2 || fab == 3 || fab == 7 || fab == 8 || fab == 10 || + fab == 15 || fab == 17 || fab == 20) + ui_vco_freq = cpu_freq + CLK_CPU; + else + ui_vco_freq = cpu_freq; + + return ui_vco_freq; +} + +#ifdef STATIC_TRAINING +/* + * Name: ddr3_static_training_init - Init DDR3 Training with + * static parameters + * Desc: Use this routine to init the controller without the HW training + * procedure + * User must provide compatible header file with registers data. + * Args: None. + * Notes: + * Returns: None. + */ +void ddr3_static_training_init(void) +{ + MV_DRAM_MODES *ddr_mode; + u32 reg; + int j; + + ddr_mode = ddr3_get_static_ddr_mode(); + + j = 0; + while (ddr_mode->vals[j].reg_addr != 0) { + udelay(10); /* haim want to delay each write */ + reg_write(ddr_mode->vals[j].reg_addr, + ddr_mode->vals[j].reg_value); + + if (ddr_mode->vals[j].reg_addr == + REG_PHY_REGISTRY_FILE_ACCESS_ADDR) + do { + reg = reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); + j++; + } +} +#endif + +/* + * Name: ddr3_get_static_mc_value - Init Memory controller with static + * parameters + * Desc: Use this routine to init the controller without the HW training + * procedure + * User must provide compatible header file with registers data. + * Args: None. + * Notes: + * Returns: None. + */ +u32 ddr3_get_static_mc_value(u32 reg_addr, u32 offset1, u32 mask1, u32 offset2, + u32 mask2) +{ + u32 reg, tmp; + + reg = reg_read(reg_addr); + + tmp = (reg >> offset1) & mask1; + if (mask2) + tmp |= (reg >> offset2) & mask2; + + return tmp; +} + +/* + * Name: ddr3_get_static_ddr_mode - Init Memory controller with static + * parameters + * Desc: Use this routine to init the controller without the HW training + * procedure + * User must provide compatible header file with registers data. + * Args: None. + * Notes: + * Returns: None. + */ +__weak MV_DRAM_MODES *ddr3_get_static_ddr_mode(void) +{ + u32 chip_board_rev, i; + u32 size; + + /* Do not modify this code. relevant only for marvell Boards */ +#if defined(DB_78X60_PCAC) + chip_board_rev = Z1_PCAC; +#elif defined(DB_78X60_AMC) + chip_board_rev = A0_AMC; +#elif defined(DB_88F6710_PCAC) + chip_board_rev = A0_PCAC; +#elif defined(RD_88F6710) + chip_board_rev = A0_RD; +#elif defined(MV88F672X) + chip_board_rev = mv_board_id_get(); +#else + chip_board_rev = A0; +#endif + + size = sizeof(ddr_modes) / sizeof(MV_DRAM_MODES); + for (i = 0; i < size; i++) { + if ((ddr3_get_cpu_freq() == ddr_modes[i].cpu_freq) && + (ddr3_get_fab_opt() == ddr_modes[i].fab_freq) && + (chip_board_rev == ddr_modes[i].chip_board_rev)) + return &ddr_modes[i]; + } + + return &ddr_modes[0]; +} + +#ifdef DUNIT_STATIC +/* + * Name: ddr3_static_mc_init - Init Memory controller with static parameters + * Desc: Use this routine to init the controller without the HW training + * procedure + * User must provide compatible header file with registers data. + * Args: None. + * Notes: + * Returns: None. + */ +void ddr3_static_mc_init(void) +{ + MV_DRAM_MODES *ddr_mode; + u32 reg; + int j; + + ddr_mode = ddr3_get_static_ddr_mode(); + j = 0; + while (ddr_mode->regs[j].reg_addr != 0) { + reg_write(ddr_mode->regs[j].reg_addr, + ddr_mode->regs[j].reg_value); + if (ddr_mode->regs[j].reg_addr == + REG_PHY_REGISTRY_FILE_ACCESS_ADDR) + do { + reg = reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); + j++; + } +} +#endif + +/* + * Name: ddr3_check_config - Check user configurations: ECC/MultiCS + * Desc: + * Args: twsi Address + * Notes: Only Available for ArmadaXP/Armada 370 DB boards + * Returns: None. + */ +int ddr3_check_config(u32 twsi_addr, MV_CONFIG_TYPE config_type) +{ +#ifdef AUTO_DETECTION_SUPPORT + u8 data = 0; + int ret; + int offset; + + if ((config_type == CONFIG_ECC) || (config_type == CONFIG_BUS_WIDTH)) + offset = 1; + else + offset = 0; + + ret = i2c_read(twsi_addr, offset, 1, (u8 *)&data, 1); + if (!ret) { + switch (config_type) { + case CONFIG_ECC: + if (data & 0x2) + return 1; + break; + case CONFIG_BUS_WIDTH: + if (data & 0x1) + return 1; + break; +#ifdef DB_88F6710 + case CONFIG_MULTI_CS: + if (CFG_MULTI_CS_MODE(data)) + return 1; + break; +#else + case CONFIG_MULTI_CS: + break; +#endif + } + } +#endif + + return 0; +} + +#if defined(DB_88F78X60_REV2) +/* + * Name: ddr3_get_eprom_fabric - Get Fabric configuration from EPROM + * Desc: + * Args: twsi Address + * Notes: Only Available for ArmadaXP DB Rev2 boards + * Returns: None. + */ +u8 ddr3_get_eprom_fabric(void) +{ +#ifdef AUTO_DETECTION_SUPPORT + u8 data = 0; + int ret; + + ret = i2c_read(NEW_FABRIC_TWSI_ADDR, 1, 1, (u8 *)&data, 1); + if (!ret) + return data & 0x1F; +#endif + + return 0; +} + +#endif + +/* + * Name: ddr3_cl_to_valid_cl - this return register matching CL value + * Desc: + * Args: clValue - the value + + * Notes: + * Returns: required CL value + */ +u32 ddr3_cl_to_valid_cl(u32 cl) +{ + switch (cl) { + case 5: + return 2; + break; + case 6: + return 4; + break; + case 7: + return 6; + break; + case 8: + return 8; + break; + case 9: + return 10; + break; + case 10: + return 12; + break; + case 11: + return 14; + break; + case 12: + return 1; + break; + case 13: + return 3; + break; + case 14: + return 5; + break; + default: + return 2; + } +} + +/* + * Name: ddr3_cl_to_valid_cl - this return register matching CL value + * Desc: + * Args: clValue - the value + * Notes: + * Returns: required CL value + */ +u32 ddr3_valid_cl_to_cl(u32 ui_valid_cl) +{ + switch (ui_valid_cl) { + case 1: + return 12; + break; + case 2: + return 5; + break; + case 3: + return 13; + break; + case 4: + return 6; + break; + case 5: + return 14; + break; + case 6: + return 7; + break; + case 8: + return 8; + break; + case 10: + return 9; + break; + case 12: + return 10; + break; + case 14: + return 11; + break; + default: + return 0; + } +} + +/* + * Name: ddr3_get_cs_num_from_reg + * Desc: + * Args: + * Notes: + * Returns: + */ +u32 ddr3_get_cs_num_from_reg(void) +{ + u32 cs_ena = ddr3_get_cs_ena_from_reg(); + u32 cs_count = 0; + u32 cs; + + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) + cs_count++; + } + + return cs_count; +} + +/* + * Name: ddr3_get_cs_ena_from_reg + * Desc: + * Args: + * Notes: + * Returns: + */ +u32 ddr3_get_cs_ena_from_reg(void) +{ + return reg_read(REG_DDR3_RANK_CTRL_ADDR) & + REG_DDR3_RANK_CTRL_CS_ENA_MASK; +} + +/* + * mv_ctrl_rev_get - Get Marvell controller device revision number + * + * DESCRIPTION: + * This function returns 8bit describing the device revision as defined + * in PCI Express Class Code and Revision ID Register. + * + * INPUT: + * None. + * + * OUTPUT: + * None. + * + * RETURN: + * 8bit desscribing Marvell controller revision number + * + */ +#if !defined(MV88F672X) +u8 mv_ctrl_rev_get(void) +{ + u8 rev_num; + +#if defined(MV_INCLUDE_CLK_PWR_CNTRL) + /* Check pex power state */ + u32 pex_power; + pex_power = mv_ctrl_pwr_clck_get(PEX_UNIT_ID, 0); + if (pex_power == 0) + mv_ctrl_pwr_clck_set(PEX_UNIT_ID, 0, 1); +#endif + rev_num = (u8)reg_read(PEX_CFG_DIRECT_ACCESS(0, + PCI_CLASS_CODE_AND_REVISION_ID)); + +#if defined(MV_INCLUDE_CLK_PWR_CNTRL) + /* Return to power off state */ + if (pex_power == 0) + mv_ctrl_pwr_clck_set(PEX_UNIT_ID, 0, 0); +#endif + + return (rev_num & PCCRIR_REVID_MASK) >> PCCRIR_REVID_OFFS; +} + +#endif + +#if defined(MV88F672X) +void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps) +{ + u32 tmp, hclk; + + switch (freq_mode) { + case CPU_333MHz_DDR_167MHz_L2_167MHz: + hclk = 84; + tmp = DDR_100; + break; + case CPU_266MHz_DDR_266MHz_L2_133MHz: + case CPU_333MHz_DDR_222MHz_L2_167MHz: + case CPU_400MHz_DDR_200MHz_L2_200MHz: + case CPU_400MHz_DDR_267MHz_L2_200MHz: + case CPU_533MHz_DDR_267MHz_L2_267MHz: + case CPU_500MHz_DDR_250MHz_L2_250MHz: + case CPU_600MHz_DDR_300MHz_L2_300MHz: + case CPU_800MHz_DDR_267MHz_L2_400MHz: + case CPU_900MHz_DDR_300MHz_L2_450MHz: + tmp = DDR_300; + hclk = 150; + break; + case CPU_333MHz_DDR_333MHz_L2_167MHz: + case CPU_500MHz_DDR_334MHz_L2_250MHz: + case CPU_666MHz_DDR_333MHz_L2_333MHz: + tmp = DDR_333; + hclk = 165; + break; + case CPU_533MHz_DDR_356MHz_L2_267MHz: + tmp = DDR_360; + hclk = 180; + break; + case CPU_400MHz_DDR_400MHz_L2_200MHz: + case CPU_600MHz_DDR_400MHz_L2_300MHz: + case CPU_800MHz_DDR_400MHz_L2_400MHz: + case CPU_400MHz_DDR_400MHz_L2_400MHz: + tmp = DDR_400; + hclk = 200; + break; + case CPU_666MHz_DDR_444MHz_L2_333MHz: + case CPU_900MHz_DDR_450MHz_L2_450MHz: + tmp = DDR_444; + hclk = 222; + break; + case CPU_500MHz_DDR_500MHz_L2_250MHz: + case CPU_1000MHz_DDR_500MHz_L2_500MHz: + case CPU_1000MHz_DDR_500MHz_L2_333MHz: + tmp = DDR_500; + hclk = 250; + break; + case CPU_533MHz_DDR_533MHz_L2_267MHz: + case CPU_800MHz_DDR_534MHz_L2_400MHz: + case CPU_1100MHz_DDR_550MHz_L2_550MHz: + tmp = DDR_533; + hclk = 267; + break; + case CPU_600MHz_DDR_600MHz_L2_300MHz: + case CPU_900MHz_DDR_600MHz_L2_450MHz: + case CPU_1200MHz_DDR_600MHz_L2_600MHz: + tmp = DDR_600; + hclk = 300; + break; + case CPU_666MHz_DDR_666MHz_L2_333MHz: + case CPU_1000MHz_DDR_667MHz_L2_500MHz: + tmp = DDR_666; + hclk = 333; + break; + default: + *ddr_freq = 0; + *hclk_ps = 0; + break; + } + + *ddr_freq = tmp; /* DDR freq define */ + *hclk_ps = 1000000 / hclk; /* values are 1/HCLK in ps */ + + return; +} +#endif diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_init.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_init.h new file mode 100644 index 000000000..569a14b71 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_init.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __DDR3_INIT_H +#define __DDR3_INIT_H + +/* + * Debug + */ + +/* + * MV_DEBUG_INIT need to be defines, otherwise the output of the + * DDR2 training code is not complete and misleading + */ +#define MV_DEBUG_INIT + +#ifdef MV_DEBUG_INIT +#define DEBUG_INIT_S(s) puts(s) +#define DEBUG_INIT_D(d, l) printf("%x", d) +#define DEBUG_INIT_D_10(d, l) printf("%d", d) +#else +#define DEBUG_INIT_S(s) +#define DEBUG_INIT_D(d, l) +#define DEBUG_INIT_D_10(d, l) +#endif + +#ifdef MV_DEBUG_INIT_FULL +#define DEBUG_INIT_FULL_S(s) puts(s) +#define DEBUG_INIT_FULL_D(d, l) printf("%x", d) +#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d) +#define DEBUG_WR_REG(reg, val) \ + { DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \ + DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); } +#define DEBUG_RD_REG(reg, val) \ + { DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \ + DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); } +#else +#define DEBUG_INIT_FULL_S(s) +#define DEBUG_INIT_FULL_D(d, l) +#define DEBUG_INIT_FULL_D_10(d, l) +#define DEBUG_WR_REG(reg, val) +#define DEBUG_RD_REG(reg, val) +#endif + +#define DEBUG_INIT_FULL_C(s, d, l) \ + { DEBUG_INIT_FULL_S(s); DEBUG_INIT_FULL_D(d, l); DEBUG_INIT_FULL_S("\n"); } +#define DEBUG_INIT_C(s, d, l) \ + { DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); } + +#define MV_MBUS_REGS_OFFSET (0x20000) + +#include "ddr3_hw_training.h" + +#define MAX_DIMM_NUM 2 +#define SPD_SIZE 128 + +#ifdef MV88F78X60 +#include "ddr3_axp.h" +#elif defined(MV88F67XX) +#include "ddr3_a370.h" +#elif defined(MV88F672X) +#include "ddr3_a375.h" +#endif + +/* DRR training Error codes */ +/* Stage 0 errors */ +#define MV_DDR3_TRAINING_ERR_BAD_SAR 0xDD300001 +/* Stage 1 errors */ +#define MV_DDR3_TRAINING_ERR_TWSI_FAIL 0xDD301001 +#define MV_DDR3_TRAINING_ERR_DIMM_TYPE_NO_MATCH 0xDD301001 +#define MV_DDR3_TRAINING_ERR_TWSI_BAD_TYPE 0xDD301003 +#define MV_DDR3_TRAINING_ERR_BUS_WIDTH_NOT_MATCH 0xDD301004 +#define MV_DDR3_TRAINING_ERR_BAD_DIMM_SETUP 0xDD301005 +#define MV_DDR3_TRAINING_ERR_MAX_CS_LIMIT 0xDD301006 +#define MV_DDR3_TRAINING_ERR_MAX_ENA_CS_LIMIT 0xDD301007 +#define MV_DDR3_TRAINING_ERR_BAD_R_DIMM_SETUP 0xDD301008 +/* Stage 2 errors */ +#define MV_DDR3_TRAINING_ERR_HW_FAIL_BASE 0xDD302000 + +typedef enum config_type { + CONFIG_ECC, + CONFIG_MULTI_CS, + CONFIG_BUS_WIDTH +} MV_CONFIG_TYPE; + +enum log_level { + MV_LOG_LEVEL_0, + MV_LOG_LEVEL_1, + MV_LOG_LEVEL_2, + MV_LOG_LEVEL_3 +}; + +int ddr3_hw_training(u32 target_freq, u32 ddr_width, + int xor_bypass, u32 scrub_offs, u32 scrub_size, + int dqs_clk_aligned, int debug_mode, int reg_dimm_skip_wl); + +void ddr3_print_version(void); +void fix_pll_val(u8 target_fab); +u8 ddr3_get_eprom_fabric(void); +u32 ddr3_get_fab_opt(void); +u32 ddr3_get_cpu_freq(void); +u32 ddr3_get_vco_freq(void); +int ddr3_check_config(u32 addr, MV_CONFIG_TYPE config_type); +u32 ddr3_get_static_mc_value(u32 reg_addr, u32 offset1, u32 mask1, u32 offset2, + u32 mask2); +u32 ddr3_cl_to_valid_cl(u32 cl); +u32 ddr3_valid_cl_to_cl(u32 ui_valid_cl); +u32 ddr3_get_cs_num_from_reg(void); +u32 ddr3_get_cs_ena_from_reg(void); +u8 mv_ctrl_rev_get(void); + +u32 ddr3_get_log_level(void); + +/* SPD */ +int ddr3_dunit_setup(u32 ecc_ena, u32 hclk_time, u32 *ddr_width); + +/* + * Accessor functions for the registers + */ +static inline void reg_write(u32 addr, u32 val) +{ + writel(val, INTER_REGS_BASE + addr); +} + +static inline u32 reg_read(u32 addr) +{ + return readl(INTER_REGS_BASE + addr); +} + +static inline void reg_bit_set(u32 addr, u32 mask) +{ + setbits_le32(INTER_REGS_BASE + addr, mask); +} + +static inline void reg_bit_clr(u32 addr, u32 mask) +{ + clrbits_le32(INTER_REGS_BASE + addr, mask); +} + +#endif /* __DDR3_INIT_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_patterns_64bit.h b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_patterns_64bit.h new file mode 100644 index 000000000..00dc9e355 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_patterns_64bit.h @@ -0,0 +1,923 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __DDR3_PATTERNS_64_H +#define __DDR3_PATTERNS_64_H + +/* + * Patterns Declerations + */ + +u32 wl_sup_pattern[LEN_WL_SUP_PATTERN] __aligned(32) = { + 0x04030201, 0x08070605, 0x0c0b0a09, 0x100f0e0d, + 0x14131211, 0x18171615, 0x1c1b1a19, 0x201f1e1d, + 0x24232221, 0x28272625, 0x2c2b2a29, 0x302f2e2d, + 0x34333231, 0x38373635, 0x3c3b3a39, 0x403f3e3d, + 0x44434241, 0x48474645, 0x4c4b4a49, 0x504f4e4d, + 0x54535251, 0x58575655, 0x5c5b5a59, 0x605f5e5d, + 0x64636261, 0x68676665, 0x6c6b6a69, 0x706f6e6d, + 0x74737271, 0x78777675, 0x7c7b7a79, 0x807f7e7d +}; + +u32 pbs_pattern_32b[2][LEN_PBS_PATTERN] __aligned(32) = { + { + 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, 0x55555555, + 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, 0x55555555, + 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, 0x55555555, + 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, 0x55555555 + }, + { + 0x55555555, 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, + 0x55555555, 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, + 0x55555555, 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA, + 0x55555555, 0xAAAAAAAA, 0x55555555, 0xAAAAAAAA + } +}; + +u32 pbs_pattern_64b[2][LEN_PBS_PATTERN] __aligned(32) = { + { + 0xAAAAAAAA, 0xAAAAAAAA, 0x55555555, 0x55555555, + 0xAAAAAAAA, 0xAAAAAAAA, 0x55555555, 0x55555555, + 0xAAAAAAAA, 0xAAAAAAAA, 0x55555555, 0x55555555, + 0xAAAAAAAA, 0xAAAAAAAA, 0x55555555, 0x55555555 + }, + { + 0x55555555, 0x55555555, 0xAAAAAAAA, 0xAAAAAAAA, + 0x55555555, 0x55555555, 0xAAAAAAAA, 0xAAAAAAAA, + 0x55555555, 0x55555555, 0xAAAAAAAA, 0xAAAAAAAA, + 0x55555555, 0x55555555, 0xAAAAAAAA, 0xAAAAAAAA + } +}; + +u32 rl_pattern[LEN_STD_PATTERN] __aligned(32) = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x01010101, 0x01010101, 0x01010101, 0x01010101 +}; + +u32 killer_pattern_32b[DQ_NUM][LEN_KILLER_PATTERN] __aligned(32) = { + { + 0x01010101, 0x00000000, 0x01010101, 0xFFFFFFFF, + 0x01010101, 0x00000000, 0x01010101, 0xFFFFFFFF, + 0xFEFEFEFE, 0xFEFEFEFE, 0x01010101, 0xFEFEFEFE, + 0xFEFEFEFE, 0xFEFEFEFE, 0x01010101, 0xFEFEFEFE, + 0x01010101, 0xFEFEFEFE, 0x01010101, 0x01010101, + 0x01010101, 0xFEFEFEFE, 0x01010101, 0x01010101, + 0xFEFEFEFE, 0x01010101, 0xFEFEFEFE, 0x00000000, + 0xFEFEFEFE, 0x01010101, 0xFEFEFEFE, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x01010101, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x01010101, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xFEFEFEFE, + 0x00000000, 0x00000000, 0x00000000, 0xFEFEFEFE, + 0xFEFEFEFE, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFEFEFEFE, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFEFEFEFE, 0x00000000, 0xFEFEFEFE, 0x00000000, + 0xFEFEFEFE, 0x00000000, 0xFEFEFEFE, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, + 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, 0x00000000, + 0x01010101, 0xFFFFFFFF, 0xFEFEFEFE, 0xFEFEFEFE, + 0x01010101, 0xFFFFFFFF, 0xFEFEFEFE, 0xFEFEFEFE + }, + { + 0x02020202, 0x00000000, 0x02020202, 0xFFFFFFFF, + 0x02020202, 0x00000000, 0x02020202, 0xFFFFFFFF, + 0xFDFDFDFD, 0xFDFDFDFD, 0x02020202, 0xFDFDFDFD, + 0xFDFDFDFD, 0xFDFDFDFD, 0x02020202, 0xFDFDFDFD, + 0x02020202, 0xFDFDFDFD, 0x02020202, 0x02020202, + 0x02020202, 0xFDFDFDFD, 0x02020202, 0x02020202, + 0xFDFDFDFD, 0x02020202, 0xFDFDFDFD, 0x00000000, + 0xFDFDFDFD, 0x02020202, 0xFDFDFDFD, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x02020202, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x02020202, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xFDFDFDFD, + 0x00000000, 0x00000000, 0x00000000, 0xFDFDFDFD, + 0xFDFDFDFD, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFDFDFDFD, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFDFDFDFD, 0x00000000, 0xFDFDFDFD, 0x00000000, + 0xFDFDFDFD, 0x00000000, 0xFDFDFDFD, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, + 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, 0x00000000, + 0x02020202, 0xFFFFFFFF, 0xFDFDFDFD, 0xFDFDFDFD, + 0x02020202, 0xFFFFFFFF, 0xFDFDFDFD, 0xFDFDFDFD + }, + { + 0x04040404, 0x00000000, 0x04040404, 0xFFFFFFFF, + 0x04040404, 0x00000000, 0x04040404, 0xFFFFFFFF, + 0xFBFBFBFB, 0xFBFBFBFB, 0x04040404, 0xFBFBFBFB, + 0xFBFBFBFB, 0xFBFBFBFB, 0x04040404, 0xFBFBFBFB, + 0x04040404, 0xFBFBFBFB, 0x04040404, 0x04040404, + 0x04040404, 0xFBFBFBFB, 0x04040404, 0x04040404, + 0xFBFBFBFB, 0x04040404, 0xFBFBFBFB, 0x00000000, + 0xFBFBFBFB, 0x04040404, 0xFBFBFBFB, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x04040404, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x04040404, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xFBFBFBFB, + 0x00000000, 0x00000000, 0x00000000, 0xFBFBFBFB, + 0xFBFBFBFB, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFBFBFBFB, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFBFBFBFB, 0x00000000, 0xFBFBFBFB, 0x00000000, + 0xFBFBFBFB, 0x00000000, 0xFBFBFBFB, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, + 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, 0x00000000, + 0x04040404, 0xFFFFFFFF, 0xFBFBFBFB, 0xFBFBFBFB, + 0x04040404, 0xFFFFFFFF, 0xFBFBFBFB, 0xFBFBFBFB + }, + { + 0x08080808, 0x00000000, 0x08080808, 0xFFFFFFFF, + 0x08080808, 0x00000000, 0x08080808, 0xFFFFFFFF, + 0xF7F7F7F7, 0xF7F7F7F7, 0x08080808, 0xF7F7F7F7, + 0xF7F7F7F7, 0xF7F7F7F7, 0x08080808, 0xF7F7F7F7, + 0x08080808, 0xF7F7F7F7, 0x08080808, 0x08080808, + 0x08080808, 0xF7F7F7F7, 0x08080808, 0x08080808, + 0xF7F7F7F7, 0x08080808, 0xF7F7F7F7, 0x00000000, + 0xF7F7F7F7, 0x08080808, 0xF7F7F7F7, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x08080808, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x08080808, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xF7F7F7F7, + 0x00000000, 0x00000000, 0x00000000, 0xF7F7F7F7, + 0xF7F7F7F7, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xF7F7F7F7, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xF7F7F7F7, 0x00000000, 0xF7F7F7F7, 0x00000000, + 0xF7F7F7F7, 0x00000000, 0xF7F7F7F7, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, + 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, 0x00000000, + 0x08080808, 0xFFFFFFFF, 0xF7F7F7F7, 0xF7F7F7F7, + 0x08080808, 0xFFFFFFFF, 0xF7F7F7F7, 0xF7F7F7F7 + }, + { + 0x10101010, 0x00000000, 0x10101010, 0xFFFFFFFF, + 0x10101010, 0x00000000, 0x10101010, 0xFFFFFFFF, + 0xEFEFEFEF, 0xEFEFEFEF, 0x10101010, 0xEFEFEFEF, + 0xEFEFEFEF, 0xEFEFEFEF, 0x10101010, 0xEFEFEFEF, + 0x10101010, 0xEFEFEFEF, 0x10101010, 0x10101010, + 0x10101010, 0xEFEFEFEF, 0x10101010, 0x10101010, + 0xEFEFEFEF, 0x10101010, 0xEFEFEFEF, 0x00000000, + 0xEFEFEFEF, 0x10101010, 0xEFEFEFEF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x10101010, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x10101010, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xEFEFEFEF, + 0x00000000, 0x00000000, 0x00000000, 0xEFEFEFEF, + 0xEFEFEFEF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xEFEFEFEF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xEFEFEFEF, 0x00000000, 0xEFEFEFEF, 0x00000000, + 0xEFEFEFEF, 0x00000000, 0xEFEFEFEF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, + 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, 0x00000000, + 0x10101010, 0xFFFFFFFF, 0xEFEFEFEF, 0xEFEFEFEF, + 0x10101010, 0xFFFFFFFF, 0xEFEFEFEF, 0xEFEFEFEF + }, + { + 0x20202020, 0x00000000, 0x20202020, 0xFFFFFFFF, + 0x20202020, 0x00000000, 0x20202020, 0xFFFFFFFF, + 0xDFDFDFDF, 0xDFDFDFDF, 0x20202020, 0xDFDFDFDF, + 0xDFDFDFDF, 0xDFDFDFDF, 0x20202020, 0xDFDFDFDF, + 0x20202020, 0xDFDFDFDF, 0x20202020, 0x20202020, + 0x20202020, 0xDFDFDFDF, 0x20202020, 0x20202020, + 0xDFDFDFDF, 0x20202020, 0xDFDFDFDF, 0x00000000, + 0xDFDFDFDF, 0x20202020, 0xDFDFDFDF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x20202020, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x20202020, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xDFDFDFDF, + 0x00000000, 0x00000000, 0x00000000, 0xDFDFDFDF, + 0xDFDFDFDF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xDFDFDFDF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xDFDFDFDF, 0x00000000, 0xDFDFDFDF, 0x00000000, + 0xDFDFDFDF, 0x00000000, 0xDFDFDFDF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, + 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, 0x00000000, + 0x20202020, 0xFFFFFFFF, 0xDFDFDFDF, 0xDFDFDFDF, + 0x20202020, 0xFFFFFFFF, 0xDFDFDFDF, 0xDFDFDFDF + }, + { + 0x40404040, 0x00000000, 0x40404040, 0xFFFFFFFF, + 0x40404040, 0x00000000, 0x40404040, 0xFFFFFFFF, + 0xBFBFBFBF, 0xBFBFBFBF, 0x40404040, 0xBFBFBFBF, + 0xBFBFBFBF, 0xBFBFBFBF, 0x40404040, 0xBFBFBFBF, + 0x40404040, 0xBFBFBFBF, 0x40404040, 0x40404040, + 0x40404040, 0xBFBFBFBF, 0x40404040, 0x40404040, + 0xBFBFBFBF, 0x40404040, 0xBFBFBFBF, 0x00000000, + 0xBFBFBFBF, 0x40404040, 0xBFBFBFBF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x40404040, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x40404040, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xBFBFBFBF, + 0x00000000, 0x00000000, 0x00000000, 0xBFBFBFBF, + 0xBFBFBFBF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xBFBFBFBF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xBFBFBFBF, 0x00000000, 0xBFBFBFBF, 0x00000000, + 0xBFBFBFBF, 0x00000000, 0xBFBFBFBF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, + 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, 0x00000000, + 0x40404040, 0xFFFFFFFF, 0xBFBFBFBF, 0xBFBFBFBF, + 0x40404040, 0xFFFFFFFF, 0xBFBFBFBF, 0xBFBFBFBF + }, + { + 0x80808080, 0x00000000, 0x80808080, 0xFFFFFFFF, + 0x80808080, 0x00000000, 0x80808080, 0xFFFFFFFF, + 0x7F7F7F7F, 0x7F7F7F7F, 0x80808080, 0x7F7F7F7F, + 0x7F7F7F7F, 0x7F7F7F7F, 0x80808080, 0x7F7F7F7F, + 0x80808080, 0x7F7F7F7F, 0x80808080, 0x80808080, + 0x80808080, 0x7F7F7F7F, 0x80808080, 0x80808080, + 0x7F7F7F7F, 0x80808080, 0x7F7F7F7F, 0x00000000, + 0x7F7F7F7F, 0x80808080, 0x7F7F7F7F, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x80808080, + 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x80808080, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x7F7F7F7F, + 0x00000000, 0x00000000, 0x00000000, 0x7F7F7F7F, + 0x7F7F7F7F, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x7F7F7F7F, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, + 0x7F7F7F7F, 0x00000000, 0x7F7F7F7F, 0x00000000, + 0x7F7F7F7F, 0x00000000, 0x7F7F7F7F, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, + 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, 0x00000000, + 0x80808080, 0xFFFFFFFF, 0x7F7F7F7F, 0x7F7F7F7F, + 0x80808080, 0xFFFFFFFF, 0x7F7F7F7F, 0x7F7F7F7F + } +}; + +u32 killer_pattern_64b[DQ_NUM][LEN_KILLER_PATTERN] __aligned(32) = { + { + 0x01010101, 0x01010101, 0x00000000, 0x00000000, + 0x01010101, 0x01010101, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFEFEFEFE, 0xFEFEFEFE, 0xFEFEFEFE, 0xFEFEFEFE, + 0x01010101, 0x01010101, 0xFEFEFEFE, 0xFEFEFEFE, + 0x01010101, 0x01010101, 0xFEFEFEFE, 0xFEFEFEFE, + 0x01010101, 0x01010101, 0x01010101, 0x01010101, + 0xFEFEFEFE, 0xFEFEFEFE, 0x01010101, 0x01010101, + 0xFEFEFEFE, 0xFEFEFEFE, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, 0x01010101, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFEFEFEFE, 0xFEFEFEFE, + 0xFEFEFEFE, 0xFEFEFEFE, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFEFEFEFE, 0xFEFEFEFE, 0x00000000, 0x00000000, + 0xFEFEFEFE, 0xFEFEFEFE, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, 0x01010101, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x01010101, 0x01010101, 0x00000000, 0x00000000, + 0x01010101, 0x01010101, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFEFEFEFE, 0xFEFEFEFE, 0xFEFEFEFE, 0xFEFEFEFE + }, + { + 0x02020202, 0x02020202, 0x00000000, 0x00000000, + 0x02020202, 0x02020202, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFDFDFDFD, 0xFDFDFDFD, 0xFDFDFDFD, 0xFDFDFDFD, + 0x02020202, 0x02020202, 0xFDFDFDFD, 0xFDFDFDFD, + 0x02020202, 0x02020202, 0xFDFDFDFD, 0xFDFDFDFD, + 0x02020202, 0x02020202, 0x02020202, 0x02020202, + 0xFDFDFDFD, 0xFDFDFDFD, 0x02020202, 0x02020202, + 0xFDFDFDFD, 0xFDFDFDFD, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, 0x02020202, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFDFDFDFD, 0xFDFDFDFD, + 0xFDFDFDFD, 0xFDFDFDFD, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFDFDFDFD, 0xFDFDFDFD, 0x00000000, 0x00000000, + 0xFDFDFDFD, 0xFDFDFDFD, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, 0x02020202, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x02020202, 0x02020202, 0x00000000, 0x00000000, + 0x02020202, 0x02020202, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFDFDFDFD, 0xFDFDFDFD, 0xFDFDFDFD, 0xFDFDFDFD + }, + { + 0x04040404, 0x04040404, 0x00000000, 0x00000000, + 0x04040404, 0x04040404, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, + 0x04040404, 0x04040404, 0xFBFBFBFB, 0xFBFBFBFB, + 0x04040404, 0x04040404, 0xFBFBFBFB, 0xFBFBFBFB, + 0x04040404, 0x04040404, 0x04040404, 0x04040404, + 0xFBFBFBFB, 0xFBFBFBFB, 0x04040404, 0x04040404, + 0xFBFBFBFB, 0xFBFBFBFB, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, 0x04040404, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFBFBFBFB, 0xFBFBFBFB, + 0xFBFBFBFB, 0xFBFBFBFB, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFBFBFBFB, 0xFBFBFBFB, 0x00000000, 0x00000000, + 0xFBFBFBFB, 0xFBFBFBFB, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, 0x04040404, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x04040404, 0x04040404, 0x00000000, 0x00000000, + 0x04040404, 0x04040404, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB + }, + { + 0x08080808, 0x08080808, 0x00000000, 0x00000000, + 0x08080808, 0x08080808, 0xFFFFFFFF, 0xFFFFFFFF, + 0xF7F7F7F7, 0xF7F7F7F7, 0xF7F7F7F7, 0xF7F7F7F7, + 0x08080808, 0x08080808, 0xF7F7F7F7, 0xF7F7F7F7, + 0x08080808, 0x08080808, 0xF7F7F7F7, 0xF7F7F7F7, + 0x08080808, 0x08080808, 0x08080808, 0x08080808, + 0xF7F7F7F7, 0xF7F7F7F7, 0x08080808, 0x08080808, + 0xF7F7F7F7, 0xF7F7F7F7, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, 0x08080808, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xF7F7F7F7, 0xF7F7F7F7, + 0xF7F7F7F7, 0xF7F7F7F7, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xF7F7F7F7, 0xF7F7F7F7, 0x00000000, 0x00000000, + 0xF7F7F7F7, 0xF7F7F7F7, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, 0x08080808, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x08080808, 0x08080808, 0x00000000, 0x00000000, + 0x08080808, 0x08080808, 0xFFFFFFFF, 0xFFFFFFFF, + 0xF7F7F7F7, 0xF7F7F7F7, 0xF7F7F7F7, 0xF7F7F7F7 + }, + { + 0x10101010, 0x10101010, 0x00000000, 0x00000000, + 0x10101010, 0x10101010, 0xFFFFFFFF, 0xFFFFFFFF, + 0xEFEFEFEF, 0xEFEFEFEF, 0xEFEFEFEF, 0xEFEFEFEF, + 0x10101010, 0x10101010, 0xEFEFEFEF, 0xEFEFEFEF, + 0x10101010, 0x10101010, 0xEFEFEFEF, 0xEFEFEFEF, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0xEFEFEFEF, 0xEFEFEFEF, 0x10101010, 0x10101010, + 0xEFEFEFEF, 0xEFEFEFEF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, 0x10101010, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xEFEFEFEF, 0xEFEFEFEF, + 0xEFEFEFEF, 0xEFEFEFEF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xEFEFEFEF, 0xEFEFEFEF, 0x00000000, 0x00000000, + 0xEFEFEFEF, 0xEFEFEFEF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, 0x10101010, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x10101010, 0x10101010, 0x00000000, 0x00000000, + 0x10101010, 0x10101010, 0xFFFFFFFF, 0xFFFFFFFF, + 0xEFEFEFEF, 0xEFEFEFEF, 0xEFEFEFEF, 0xEFEFEFEF + }, + { + 0x20202020, 0x20202020, 0x00000000, 0x00000000, + 0x20202020, 0x20202020, 0xFFFFFFFF, 0xFFFFFFFF, + 0xDFDFDFDF, 0xDFDFDFDF, 0xDFDFDFDF, 0xDFDFDFDF, + 0x20202020, 0x20202020, 0xDFDFDFDF, 0xDFDFDFDF, + 0x20202020, 0x20202020, 0xDFDFDFDF, 0xDFDFDFDF, + 0x20202020, 0x20202020, 0x20202020, 0x20202020, + 0xDFDFDFDF, 0xDFDFDFDF, 0x20202020, 0x20202020, + 0xDFDFDFDF, 0xDFDFDFDF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, 0x20202020, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xDFDFDFDF, 0xDFDFDFDF, + 0xDFDFDFDF, 0xDFDFDFDF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xDFDFDFDF, 0xDFDFDFDF, 0x00000000, 0x00000000, + 0xDFDFDFDF, 0xDFDFDFDF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, 0x20202020, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x20202020, 0x20202020, 0x00000000, 0x00000000, + 0x20202020, 0x20202020, 0xFFFFFFFF, 0xFFFFFFFF, + 0xDFDFDFDF, 0xDFDFDFDF, 0xDFDFDFDF, 0xDFDFDFDF + }, + { + 0x40404040, 0x40404040, 0x00000000, 0x00000000, + 0x40404040, 0x40404040, 0xFFFFFFFF, 0xFFFFFFFF, + 0xBFBFBFBF, 0xBFBFBFBF, 0xBFBFBFBF, 0xBFBFBFBF, + 0x40404040, 0x40404040, 0xBFBFBFBF, 0xBFBFBFBF, + 0x40404040, 0x40404040, 0xBFBFBFBF, 0xBFBFBFBF, + 0x40404040, 0x40404040, 0x40404040, 0x40404040, + 0xBFBFBFBF, 0xBFBFBFBF, 0x40404040, 0x40404040, + 0xBFBFBFBF, 0xBFBFBFBF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, 0x40404040, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xBFBFBFBF, 0xBFBFBFBF, + 0xBFBFBFBF, 0xBFBFBFBF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xBFBFBFBF, 0xBFBFBFBF, 0x00000000, 0x00000000, + 0xBFBFBFBF, 0xBFBFBFBF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, 0x40404040, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x40404040, 0x40404040, 0x00000000, 0x00000000, + 0x40404040, 0x40404040, 0xFFFFFFFF, 0xFFFFFFFF, + 0xBFBFBFBF, 0xBFBFBFBF, 0xBFBFBFBF, 0xBFBFBFBF + }, + { + 0x80808080, 0x80808080, 0x00000000, 0x00000000, + 0x80808080, 0x80808080, 0xFFFFFFFF, 0xFFFFFFFF, + 0x7F7F7F7F, 0x7F7F7F7F, 0x7F7F7F7F, 0x7F7F7F7F, + 0x80808080, 0x80808080, 0x7F7F7F7F, 0x7F7F7F7F, + 0x80808080, 0x80808080, 0x7F7F7F7F, 0x7F7F7F7F, + 0x80808080, 0x80808080, 0x80808080, 0x80808080, + 0x7F7F7F7F, 0x7F7F7F7F, 0x80808080, 0x80808080, + 0x7F7F7F7F, 0x7F7F7F7F, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, 0x80808080, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x7F7F7F7F, 0x7F7F7F7F, + 0x7F7F7F7F, 0x7F7F7F7F, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x7F7F7F7F, 0x7F7F7F7F, 0x00000000, 0x00000000, + 0x7F7F7F7F, 0x7F7F7F7F, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, 0x80808080, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x80808080, 0x80808080, 0x00000000, 0x00000000, + 0x80808080, 0x80808080, 0xFFFFFFFF, 0xFFFFFFFF, + 0x7F7F7F7F, 0x7F7F7F7F, 0x7F7F7F7F, 0x7F7F7F7F + } +}; + +u32 special_pattern[DQ_NUM][LEN_SPECIAL_PATTERN] __aligned(32) = { + { + 0x00000000, 0x00000000, 0x01010101, 0x01010101, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFEFEFEFE, 0xFEFEFEFE, + 0xFEFEFEFE, 0xFEFEFEFE, 0x01010101, 0x01010101, + 0xFEFEFEFE, 0xFEFEFEFE, 0x01010101, 0x01010101, + 0xFEFEFEFE, 0xFEFEFEFE, 0x01010101, 0x01010101, + 0x01010101, 0x01010101, 0xFEFEFEFE, 0xFEFEFEFE, + 0x01010101, 0x01010101, 0xFEFEFEFE, 0xFEFEFEFE, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x01010101, 0x01010101, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFEFEFEFE, 0xFEFEFEFE, 0xFEFEFEFE, 0xFEFEFEFE, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFEFEFEFE, 0xFEFEFEFE, + 0x00000000, 0x00000000, 0xFEFEFEFE, 0xFEFEFEFE, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x01010101, 0x01010101, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x01010101, 0x01010101, + 0x00000000, 0x00000000, 0x01010101, 0x01010101, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFEFEFEFE, 0xFEFEFEFE, + 0xFEFEFEFE, 0xFEFEFEFE, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x02020202, 0x02020202, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFDFDFDFD, 0xFDFDFDFD, + 0xFDFDFDFD, 0xFDFDFDFD, 0x02020202, 0x02020202, + 0xFDFDFDFD, 0xFDFDFDFD, 0x02020202, 0x02020202, + 0xFDFDFDFD, 0xFDFDFDFD, 0x02020202, 0x02020202, + 0x02020202, 0x02020202, 0xFDFDFDFD, 0xFDFDFDFD, + 0x02020202, 0x02020202, 0xFDFDFDFD, 0xFDFDFDFD, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x02020202, 0x02020202, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFDFDFDFD, 0xFDFDFDFD, 0xFDFDFDFD, 0xFDFDFDFD, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFDFDFDFD, 0xFDFDFDFD, + 0x00000000, 0x00000000, 0xFDFDFDFD, 0xFDFDFDFD, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x02020202, 0x02020202, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x02020202, 0x02020202, + 0x00000000, 0x00000000, 0x02020202, 0x02020202, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFDFDFDFD, 0xFDFDFDFD, + 0xFDFDFDFD, 0xFDFDFDFD, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x04040404, 0x04040404, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFBFBFBFB, 0xFBFBFBFB, + 0xFBFBFBFB, 0xFBFBFBFB, 0x04040404, 0x04040404, + 0xFBFBFBFB, 0xFBFBFBFB, 0x04040404, 0x04040404, + 0xFBFBFBFB, 0xFBFBFBFB, 0x04040404, 0x04040404, + 0x04040404, 0x04040404, 0xFBFBFBFB, 0xFBFBFBFB, + 0x04040404, 0x04040404, 0xFBFBFBFB, 0xFBFBFBFB, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x04040404, 0x04040404, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFBFBFBFB, 0xFBFBFBFB, + 0x00000000, 0x00000000, 0xFBFBFBFB, 0xFBFBFBFB, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x04040404, 0x04040404, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x04040404, 0x04040404, + 0x00000000, 0x00000000, 0x04040404, 0x04040404, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFBFBFBFB, 0xFBFBFBFB, + 0xFBFBFBFB, 0xFBFBFBFB, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x08080808, 0x08080808, + 0xFFFFFFFF, 0xFFFFFFFF, 0xF7F7F7F7, 0xF7F7F7F7, + 0xF7F7F7F7, 0xF7F7F7F7, 0x08080808, 0x08080808, + 0xF7F7F7F7, 0xF7F7F7F7, 0x08080808, 0x08080808, + 0xF7F7F7F7, 0xF7F7F7F7, 0x08080808, 0x08080808, + 0x08080808, 0x08080808, 0xF7F7F7F7, 0xF7F7F7F7, + 0x08080808, 0x08080808, 0xF7F7F7F7, 0xF7F7F7F7, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x08080808, 0x08080808, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xF7F7F7F7, 0xF7F7F7F7, 0xF7F7F7F7, 0xF7F7F7F7, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xF7F7F7F7, 0xF7F7F7F7, + 0x00000000, 0x00000000, 0xF7F7F7F7, 0xF7F7F7F7, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x08080808, 0x08080808, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x08080808, 0x08080808, + 0x00000000, 0x00000000, 0x08080808, 0x08080808, + 0xFFFFFFFF, 0xFFFFFFFF, 0xF7F7F7F7, 0xF7F7F7F7, + 0xF7F7F7F7, 0xF7F7F7F7, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x10101010, 0x10101010, + 0xFFFFFFFF, 0xFFFFFFFF, 0xEFEFEFEF, 0xEFEFEFEF, + 0xEFEFEFEF, 0xEFEFEFEF, 0x10101010, 0x10101010, + 0xEFEFEFEF, 0xEFEFEFEF, 0x10101010, 0x10101010, + 0xEFEFEFEF, 0xEFEFEFEF, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0xEFEFEFEF, 0xEFEFEFEF, + 0x10101010, 0x10101010, 0xEFEFEFEF, 0xEFEFEFEF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x10101010, 0x10101010, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xEFEFEFEF, 0xEFEFEFEF, 0xEFEFEFEF, 0xEFEFEFEF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xEFEFEFEF, 0xEFEFEFEF, + 0x00000000, 0x00000000, 0xEFEFEFEF, 0xEFEFEFEF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x10101010, 0x10101010, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x10101010, 0x10101010, + 0x00000000, 0x00000000, 0x10101010, 0x10101010, + 0xFFFFFFFF, 0xFFFFFFFF, 0xEFEFEFEF, 0xEFEFEFEF, + 0xEFEFEFEF, 0xEFEFEFEF, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x20202020, 0x20202020, + 0xFFFFFFFF, 0xFFFFFFFF, 0xDFDFDFDF, 0xDFDFDFDF, + 0xDFDFDFDF, 0xDFDFDFDF, 0x20202020, 0x20202020, + 0xDFDFDFDF, 0xDFDFDFDF, 0x20202020, 0x20202020, + 0xDFDFDFDF, 0xDFDFDFDF, 0x20202020, 0x20202020, + 0x20202020, 0x20202020, 0xDFDFDFDF, 0xDFDFDFDF, + 0x20202020, 0x20202020, 0xDFDFDFDF, 0xDFDFDFDF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x20202020, 0x20202020, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xDFDFDFDF, 0xDFDFDFDF, 0xDFDFDFDF, 0xDFDFDFDF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xDFDFDFDF, 0xDFDFDFDF, + 0x00000000, 0x00000000, 0xDFDFDFDF, 0xDFDFDFDF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x20202020, 0x20202020, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x20202020, 0x20202020, + 0x00000000, 0x00000000, 0x20202020, 0x20202020, + 0xFFFFFFFF, 0xFFFFFFFF, 0xDFDFDFDF, 0xDFDFDFDF, + 0xDFDFDFDF, 0xDFDFDFDF, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x40404040, 0x40404040, + 0xFFFFFFFF, 0xFFFFFFFF, 0xBFBFBFBF, 0xBFBFBFBF, + 0xBFBFBFBF, 0xBFBFBFBF, 0x40404040, 0x40404040, + 0xBFBFBFBF, 0xBFBFBFBF, 0x40404040, 0x40404040, + 0xBFBFBFBF, 0xBFBFBFBF, 0x40404040, 0x40404040, + 0x40404040, 0x40404040, 0xBFBFBFBF, 0xBFBFBFBF, + 0x40404040, 0x40404040, 0xBFBFBFBF, 0xBFBFBFBF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x40404040, 0x40404040, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xBFBFBFBF, 0xBFBFBFBF, 0xBFBFBFBF, 0xBFBFBFBF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xBFBFBFBF, 0xBFBFBFBF, + 0x00000000, 0x00000000, 0xBFBFBFBF, 0xBFBFBFBF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x40404040, 0x40404040, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x40404040, 0x40404040, + 0x00000000, 0x00000000, 0x40404040, 0x40404040, + 0xFFFFFFFF, 0xFFFFFFFF, 0xBFBFBFBF, 0xBFBFBFBF, + 0xBFBFBFBF, 0xBFBFBFBF, 0x00000000, 0x00000000 + }, + { + 0x00000000, 0x00000000, 0x80808080, 0x80808080, + 0xFFFFFFFF, 0xFFFFFFFF, 0x7F7F7F7F, 0x7F7F7F7F, + 0x7F7F7F7F, 0x7F7F7F7F, 0x80808080, 0x80808080, + 0x7F7F7F7F, 0x7F7F7F7F, 0x80808080, 0x80808080, + 0x7F7F7F7F, 0x7F7F7F7F, 0x80808080, 0x80808080, + 0x80808080, 0x80808080, 0x7F7F7F7F, 0x7F7F7F7F, + 0x80808080, 0x80808080, 0x7F7F7F7F, 0x7F7F7F7F, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x80808080, 0x80808080, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x7F7F7F7F, 0x7F7F7F7F, 0x7F7F7F7F, 0x7F7F7F7F, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0x7F7F7F7F, 0x7F7F7F7F, + 0x00000000, 0x00000000, 0x7F7F7F7F, 0x7F7F7F7F, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x80808080, 0x80808080, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0x80808080, 0x80808080, + 0x00000000, 0x00000000, 0x80808080, 0x80808080, + 0xFFFFFFFF, 0xFFFFFFFF, 0x7F7F7F7F, 0x7F7F7F7F, + 0x7F7F7F7F, 0x7F7F7F7F, 0x00000000, 0x00000000 + } +}; + +/* Fabric ratios table */ +u32 fabric_ratio[FAB_OPT] = { + 0x04010204, + 0x04020202, + 0x08020306, + 0x08020303, + 0x04020303, + 0x04020204, + 0x04010202, + 0x08030606, + 0x08030505, + 0x04020306, + 0x0804050A, + 0x04030606, + 0x04020404, + 0x04030306, + 0x04020505, + 0x08020505, + 0x04010303, + 0x08050A0A, + 0x04030408, + 0x04010102, + 0x08030306 +}; + +u32 pbs_dq_mapping[PUP_NUM_64BIT + 1][DQ_NUM] = { + {3, 2, 5, 7, 1, 0, 6, 4}, + {2, 3, 6, 7, 1, 0, 4, 5}, + {1, 3, 5, 6, 0, 2, 4, 7}, + {0, 2, 4, 7, 1, 3, 5, 6}, + {3, 0, 4, 6, 1, 2, 5, 7}, + {0, 3, 5, 7, 1, 2, 4, 6}, + {2, 3, 5, 7, 1, 0, 4, 6}, + {0, 2, 5, 4, 1, 3, 6, 7}, + {2, 3, 4, 7, 0, 1, 5, 6} +}; + +#endif /* __DDR3_PATTERNS_64_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_pbs.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_pbs.c new file mode 100644 index 000000000..069a42fbf --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_pbs.c @@ -0,0 +1,1592 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> +#include <linux/delay.h> + +#include "ddr3_hw_training.h" + +/* + * Debug + */ +#define DEBUG_PBS_FULL_C(s, d, l) \ + DEBUG_PBS_FULL_S(s); DEBUG_PBS_FULL_D(d, l); DEBUG_PBS_FULL_S("\n") +#define DEBUG_PBS_C(s, d, l) \ + DEBUG_PBS_S(s); DEBUG_PBS_D(d, l); DEBUG_PBS_S("\n") + +#ifdef MV_DEBUG_PBS +#define DEBUG_PBS_S(s) puts(s) +#define DEBUG_PBS_D(d, l) printf("%x", d) +#else +#define DEBUG_PBS_S(s) +#define DEBUG_PBS_D(d, l) +#endif + +#ifdef MV_DEBUG_FULL_PBS +#define DEBUG_PBS_FULL_S(s) puts(s) +#define DEBUG_PBS_FULL_D(d, l) printf("%x", d) +#else +#define DEBUG_PBS_FULL_S(s) +#define DEBUG_PBS_FULL_D(d, l) +#endif + +#if defined(MV88F78X60) || defined(MV88F672X) + +/* Temp array for skew data storage */ +static u32 skew_array[(MAX_PUP_NUM) * DQ_NUM] = { 0 }; + +/* PBS locked dq (per pup) */ +extern u32 pbs_locked_dq[MAX_PUP_NUM][DQ_NUM]; +extern u32 pbs_locked_dm[MAX_PUP_NUM]; +extern u32 pbs_locked_value[MAX_PUP_NUM][DQ_NUM]; + +#if defined(MV88F672X) +extern u32 pbs_pattern[2][LEN_16BIT_PBS_PATTERN]; +extern u32 pbs_pattern_32b[2][LEN_PBS_PATTERN]; +#else +extern u32 pbs_pattern_32b[2][LEN_PBS_PATTERN]; +extern u32 pbs_pattern_64b[2][LEN_PBS_PATTERN]; +#endif + +extern u32 pbs_dq_mapping[PUP_NUM_64BIT + 1][DQ_NUM]; + +static int ddr3_tx_shift_dqs_adll_step_before_fail(MV_DRAM_INFO *dram_info, + u32 cur_pup, u32 pbs_pattern_idx, u32 ecc); +static int ddr3_rx_shift_dqs_to_first_fail(MV_DRAM_INFO *dram_info, u32 cur_pup, + u32 pbs_pattern_idx, u32 ecc); +static int ddr3_pbs_per_bit(MV_DRAM_INFO *dram_info, int *start_over, int is_tx, + u32 *pcur_pup, u32 pbs_pattern_idx, u32 ecc); +static int ddr3_set_pbs_results(MV_DRAM_INFO *dram_info, int is_tx); +static void ddr3_pbs_write_pup_dqs_reg(u32 cs, u32 pup, u32 dqs_delay); + +/* + * Name: ddr3_pbs_tx + * Desc: Execute the PBS TX phase. + * Args: dram_info ddr3 training information struct + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_pbs_tx(MV_DRAM_INFO *dram_info) +{ + /* Array of Deskew results */ + + /* + * Array to hold the total sum of skew from all iterations + * (for average purpose) + */ + u32 skew_sum_array[MAX_PUP_NUM][DQ_NUM] = { {0} }; + + /* + * Array to hold the total average skew from both patterns + * (for average purpose) + */ + u32 pattern_skew_array[MAX_PUP_NUM][DQ_NUM] = { {0} }; + + u32 pbs_rep_time = 0; /* counts number of loop in case of fail */ + /* bit array for unlock pups - used to repeat on the RX operation */ + u32 cur_pup; + u32 max_pup; + u32 pbs_retry; + u32 pup, dq, pups, cur_max_pup, valid_pup, reg; + u32 pattern_idx; + u32 ecc; + /* indicates whether we need to start the loop again */ + int start_over; + + DEBUG_PBS_S("DDR3 - PBS TX - Starting PBS TX procedure\n"); + + pups = dram_info->num_of_total_pups; + max_pup = dram_info->num_of_total_pups; + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_PBS_S("DDR3 - PBS RX - SW Override Enabled\n"); + + reg = 1 << REG_DRAM_TRAINING_AUTO_OFFS; + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + /* Running twice for 2 different patterns. each patterns - 3 times */ + for (pattern_idx = 0; pattern_idx < COUNT_PBS_PATTERN; pattern_idx++) { + DEBUG_PBS_C("DDR3 - PBS TX - Working with pattern - ", + pattern_idx, 1); + + /* Reset sum array */ + for (pup = 0; pup < pups; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) + skew_sum_array[pup][dq] = 0; + } + + /* + * Perform PBS several of times (3 for each pattern). + * At the end, we'll use the average + */ + /* If there is ECC, do each PBS again with mux change */ + for (pbs_retry = 0; pbs_retry < COUNT_PBS_REPEAT; pbs_retry++) { + for (ecc = 0; ecc < (dram_info->ecc_ena + 1); ecc++) { + + /* + * This parameter stores the current PUP + * num - ecc mode dependent - 4-8 / 1 pups + */ + cur_max_pup = (1 - ecc) * + dram_info->num_of_std_pups + ecc; + + if (ecc) { + /* Only 1 pup in this case */ + valid_pup = 0x1; + } else if (cur_max_pup > 4) { + /* 64 bit - 8 pups */ + valid_pup = 0xFF; + } else if (cur_max_pup == 4) { + /* 32 bit - 4 pups */ + valid_pup = 0xF; + } else { + /* 16 bit - 2 pups */ + valid_pup = 0x3; + } + + /* ECC Support - Switch ECC Mux on ecc=1 */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg |= (dram_info->ecc_ena * ecc << + REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + if (ecc) + DEBUG_PBS_S("DDR3 - PBS Tx - ECC Mux Enabled\n"); + else + DEBUG_PBS_S("DDR3 - PBS Tx - ECC Mux Disabled\n"); + + /* Init iteration values */ + /* Clear the locked DQs */ + for (pup = 0; pup < cur_max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + pbs_locked_dq[ + pup + ecc * + (max_pup - 1)][dq] = + 0; + } + } + + pbs_rep_time = 0; + cur_pup = valid_pup; + start_over = 0; + + /* + * Run loop On current Pattern and current + * pattern iteration (just to cover the false + * fail problem) + */ + do { + DEBUG_PBS_S("DDR3 - PBS Tx - Pbs Rep Loop is "); + DEBUG_PBS_D(pbs_rep_time, 1); + DEBUG_PBS_S(", for Retry No."); + DEBUG_PBS_D(pbs_retry, 1); + DEBUG_PBS_S("\n"); + + /* Set all PBS values to MIN (0) */ + DEBUG_PBS_S("DDR3 - PBS Tx - Set all PBS values to MIN\n"); + + for (dq = 0; dq < DQ_NUM; dq++) { + ddr3_write_pup_reg( + PUP_PBS_TX + + pbs_dq_mapping[pup * + (1 - ecc) + + ecc * ECC_PUP] + [dq], CS0, (1 - ecc) * + PUP_BC + ecc * ECC_PUP, 0, + 0); + } + + /* + * Shift DQ ADLL right, One step before + * fail + */ + DEBUG_PBS_S("DDR3 - PBS Tx - ADLL shift right one phase before fail\n"); + + if (MV_OK != ddr3_tx_shift_dqs_adll_step_before_fail + (dram_info, cur_pup, pattern_idx, + ecc)) + return MV_DDR3_TRAINING_ERR_PBS_ADLL_SHR_1PHASE; + + /* PBS For each bit */ + DEBUG_PBS_S("DDR3 - PBS Tx - perform PBS for each bit\n"); + + /* + * In this stage - start_over = 0 + */ + if (MV_OK != ddr3_pbs_per_bit( + dram_info, &start_over, 1, + &cur_pup, pattern_idx, ecc)) + return MV_DDR3_TRAINING_ERR_PBS_TX_PER_BIT; + + } while ((start_over == 1) && + (++pbs_rep_time < COUNT_PBS_STARTOVER)); + + if (pbs_rep_time == COUNT_PBS_STARTOVER && + start_over == 1) { + DEBUG_PBS_S("DDR3 - PBS Tx - FAIL - Adll reach max value\n"); + return MV_DDR3_TRAINING_ERR_PBS_TX_MAX_VAL; + } + + DEBUG_PBS_FULL_C("DDR3 - PBS TX - values for iteration - ", + pbs_retry, 1); + for (pup = 0; pup < cur_max_pup; pup++) { + /* + * To minimize delay elements, inc + * from pbs value the min pbs val + */ + DEBUG_PBS_S("DDR3 - PBS - PUP"); + DEBUG_PBS_D((pup + (ecc * ECC_PUP)), 1); + DEBUG_PBS_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + /* Set skew value for all dq */ + /* + * Bit# Deskew <- Bit# Deskew - + * last / first failing bit + * Deskew For all bits (per PUP) + * (minimize delay elements) + */ + DEBUG_PBS_S("DQ"); + DEBUG_PBS_D(dq, 1); + DEBUG_PBS_S("-"); + DEBUG_PBS_D(skew_array + [((pup) * DQ_NUM) + + dq], 2); + DEBUG_PBS_S(", "); + } + DEBUG_PBS_S("\n"); + } + + /* + * Collect the results we got on this trial + * of PBS + */ + for (pup = 0; pup < cur_max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + skew_sum_array[pup + (ecc * (max_pup - 1))] + [dq] += skew_array + [((pup) * DQ_NUM) + dq]; + } + } + + /* ECC Support - Disable ECC MUX */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + } + } + + DEBUG_PBS_C("DDR3 - PBS TX - values for current pattern - ", + pattern_idx, 1); + for (pup = 0; pup < max_pup; pup++) { + /* + * To minimize delay elements, inc from pbs value the + * min pbs val + */ + DEBUG_PBS_S("DDR3 - PBS - PUP"); + DEBUG_PBS_D(pup, 1); + DEBUG_PBS_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + /* set skew value for all dq */ + /* Bit# Deskew <- Bit# Deskew - last / first failing bit Deskew For all bits (per PUP) (minimize delay elements) */ + DEBUG_PBS_S("DQ"); + DEBUG_PBS_D(dq, 1); + DEBUG_PBS_S("-"); + DEBUG_PBS_D(skew_sum_array[pup][dq] / + COUNT_PBS_REPEAT, 2); + DEBUG_PBS_S(", "); + } + DEBUG_PBS_S("\n"); + } + + /* + * Calculate the average skew for current pattern for each + * pup and each bit + */ + DEBUG_PBS_C("DDR3 - PBS TX - Average for pattern - ", + pattern_idx, 1); + + for (pup = 0; pup < max_pup; pup++) { + /* + * FOR ECC only :: found min and max value for current + * pattern skew array + */ + /* Loop for all dqs */ + for (dq = 0; dq < DQ_NUM; dq++) { + pattern_skew_array[pup][dq] += + (skew_sum_array[pup][dq] / + COUNT_PBS_REPEAT); + } + } + } + + /* Calculate the average skew */ + for (pup = 0; pup < max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) + skew_array[((pup) * DQ_NUM) + dq] = + pattern_skew_array[pup][dq] / COUNT_PBS_PATTERN; + } + + DEBUG_PBS_S("DDR3 - PBS TX - Average for all patterns:\n"); + for (pup = 0; pup < max_pup; pup++) { + /* + * To minimize delay elements, inc from pbs value the min + * pbs val + */ + DEBUG_PBS_S("DDR3 - PBS - PUP"); + DEBUG_PBS_D(pup, 1); + DEBUG_PBS_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + /* Set skew value for all dq */ + /* + * Bit# Deskew <- Bit# Deskew - last / first + * failing bit Deskew For all bits (per PUP) + * (minimize delay elements) + */ + DEBUG_PBS_S("DQ"); + DEBUG_PBS_D(dq, 1); + DEBUG_PBS_S("-"); + DEBUG_PBS_D(skew_array[(pup * DQ_NUM) + dq], 2); + DEBUG_PBS_S(", "); + } + DEBUG_PBS_S("\n"); + } + + /* Return ADLL to default value */ + for (pup = 0; pup < max_pup; pup++) { + if (pup == (max_pup - 1) && dram_info->ecc_ena) + pup = ECC_PUP; + ddr3_pbs_write_pup_dqs_reg(CS0, pup, INIT_WL_DELAY); + } + + /* Set averaged PBS results */ + ddr3_set_pbs_results(dram_info, 1); + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_1_ADDR) | + (1 << REG_DRAM_TRAINING_1_TRNBPOINT_OFFS); + reg_write(REG_DRAM_TRAINING_1_ADDR, reg); + + DEBUG_PBS_S("DDR3 - PBS Tx - PBS TX ended successfuly\n"); + + return MV_OK; +} + +/* + * Name: ddr3_tx_shift_dqs_adll_step_before_fail + * Desc: Execute the Tx shift DQ phase. + * Args: dram_info ddr3 training information struct + * cur_pup bit array of the function active pups. + * pbs_pattern_idx Index of PBS pattern + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_tx_shift_dqs_adll_step_before_fail(MV_DRAM_INFO *dram_info, + u32 cur_pup, + u32 pbs_pattern_idx, u32 ecc) +{ + u32 unlock_pup; /* bit array of unlock pups */ + u32 new_lockup_pup; /* bit array of compare failed pups */ + u32 adll_val = 4; /* INIT_WL_DELAY */ + u32 cur_max_pup, pup; + u32 dqs_dly_set[MAX_PUP_NUM] = { 0 }; + u32 *pattern_ptr; + + /* Choose pattern */ + switch (dram_info->ddr_width) { +#if defined(MV88F672X) + case 16: + pattern_ptr = (u32 *)&pbs_pattern[pbs_pattern_idx]; + break; +#endif + case 32: + pattern_ptr = (u32 *)&pbs_pattern_32b[pbs_pattern_idx]; + break; +#if defined(MV88F78X60) + case 64: + pattern_ptr = (u32 *)&pbs_pattern_64b[pbs_pattern_idx]; + break; +#endif + default: + return MV_FAIL; + } + + /* Set current pup number */ + if (cur_pup == 0x1) /* Ecc mode */ + cur_max_pup = 1; + else + cur_max_pup = dram_info->num_of_std_pups; + + unlock_pup = cur_pup; /* '1' for each unlocked pup */ + + /* Loop on all ADLL Vaules */ + do { + /* Loop until found first fail */ + adll_val++; + + /* + * Increment (Move to right - ADLL) DQ TX delay + * (broadcast to all Data PUPs) + */ + for (pup = 0; pup < cur_max_pup; pup++) + ddr3_pbs_write_pup_dqs_reg(CS0, + pup * (1 - ecc) + + ECC_PUP * ecc, adll_val); + + /* + * Write and Read, compare results (read was already verified) + */ + /* 0 - all locked */ + new_lockup_pup = 0; + + if (MV_OK != ddr3_sdram_compare(dram_info, unlock_pup, + &new_lockup_pup, + pattern_ptr, LEN_PBS_PATTERN, + SDRAM_PBS_TX_OFFS, 1, 0, + NULL, + 0)) + return MV_FAIL; + + unlock_pup &= ~new_lockup_pup; + + DEBUG_PBS_FULL_S("Shift DQS by 2 steps for PUPs: "); + DEBUG_PBS_FULL_D(unlock_pup, 2); + DEBUG_PBS_FULL_C(", Set ADLL value = ", adll_val, 2); + + /* If any PUP failed there is '1' to mark the PUP */ + if (new_lockup_pup != 0) { + /* + * Decrement (Move Back to Left two steps - ADLL) + * DQ TX delay for current failed pups and save + */ + for (pup = 0; pup < cur_max_pup; pup++) { + if (((new_lockup_pup >> pup) & 0x1) && + dqs_dly_set[pup] == 0) + dqs_dly_set[pup] = adll_val - 1; + } + } + } while ((unlock_pup != 0) && (adll_val != ADLL_MAX)); + + if (unlock_pup != 0) { + DEBUG_PBS_FULL_S("DDR3 - PBS Tx - Shift DQ - Adll value reached maximum\n"); + + for (pup = 0; pup < cur_max_pup; pup++) { + if (((unlock_pup >> pup) & 0x1) && + dqs_dly_set[pup] == 0) + dqs_dly_set[pup] = adll_val - 1; + } + } + + DEBUG_PBS_FULL_C("PBS TX one step before fail last pups locked Adll ", + adll_val - 2, 2); + + /* Set the PUP DQS DLY Values */ + for (pup = 0; pup < cur_max_pup; pup++) + ddr3_pbs_write_pup_dqs_reg(CS0, pup * (1 - ecc) + ECC_PUP * ecc, + dqs_dly_set[pup]); + + /* Found one phase before fail */ + return MV_OK; +} + +/* + * Name: ddr3_pbs_rx + * Desc: Execute the PBS RX phase. + * Args: dram_info ddr3 training information struct + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_pbs_rx(MV_DRAM_INFO *dram_info) +{ + /* + * Array to hold the total sum of skew from all iterations + * (for average purpose) + */ + u32 skew_sum_array[MAX_PUP_NUM][DQ_NUM] = { {0} }; + + /* + * Array to hold the total average skew from both patterns + * (for average purpose) + */ + u32 pattern_skew_array[MAX_PUP_NUM][DQ_NUM] = { {0} }; + + u32 pbs_rep_time = 0; /* counts number of loop in case of fail */ + /* bit array for unlock pups - used to repeat on the RX operation */ + u32 cur_pup; + u32 max_pup; + u32 pbs_retry; + u32 pup, dq, pups, cur_max_pup, valid_pup, reg; + u32 pattern_idx; + u32 ecc; + /* indicates whether we need to start the loop again */ + int start_over; + int status; + + DEBUG_PBS_S("DDR3 - PBS RX - Starting PBS RX procedure\n"); + + pups = dram_info->num_of_total_pups; + max_pup = dram_info->num_of_total_pups; + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_PBS_FULL_S("DDR3 - PBS RX - SW Override Enabled\n"); + + reg = 1 << REG_DRAM_TRAINING_AUTO_OFFS; + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + /* Running twice for 2 different patterns. each patterns - 3 times */ + for (pattern_idx = 0; pattern_idx < COUNT_PBS_PATTERN; pattern_idx++) { + DEBUG_PBS_FULL_C("DDR3 - PBS RX - Working with pattern - ", + pattern_idx, 1); + + /* Reset sum array */ + for (pup = 0; pup < pups; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) + skew_sum_array[pup][dq] = 0; + } + + /* + * Perform PBS several of times (3 for each pattern). + * At the end, we'll use the average + */ + /* If there is ECC, do each PBS again with mux change */ + for (pbs_retry = 0; pbs_retry < COUNT_PBS_REPEAT; pbs_retry++) { + for (ecc = 0; ecc < (dram_info->ecc_ena + 1); ecc++) { + /* + * This parameter stores the current PUP + * num - ecc mode dependent - 4-8 / 1 pups + */ + cur_max_pup = (1 - ecc) * + dram_info->num_of_std_pups + ecc; + + if (ecc) { + /* Only 1 pup in this case */ + valid_pup = 0x1; + } else if (cur_max_pup > 4) { + /* 64 bit - 8 pups */ + valid_pup = 0xFF; + } else if (cur_max_pup == 4) { + /* 32 bit - 4 pups */ + valid_pup = 0xF; + } else { + /* 16 bit - 2 pups */ + valid_pup = 0x3; + } + + /* ECC Support - Switch ECC Mux on ecc=1 */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg |= (dram_info->ecc_ena * ecc << + REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + if (ecc) + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - ECC Mux Enabled\n"); + else + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - ECC Mux Disabled\n"); + + /* Init iteration values */ + /* Clear the locked DQs */ + for (pup = 0; pup < cur_max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + pbs_locked_dq[ + pup + ecc * (max_pup - 1)][dq] = + 0; + } + } + + pbs_rep_time = 0; + cur_pup = valid_pup; + start_over = 0; + + /* + * Run loop On current Pattern and current + * pattern iteration (just to cover the false + * fail problem + */ + do { + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - Pbs Rep Loop is "); + DEBUG_PBS_FULL_D(pbs_rep_time, 1); + DEBUG_PBS_FULL_S(", for Retry No."); + DEBUG_PBS_FULL_D(pbs_retry, 1); + DEBUG_PBS_FULL_S("\n"); + + /* Set all PBS values to MAX (31) */ + for (pup = 0; pup < cur_max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) + ddr3_write_pup_reg( + PUP_PBS_RX + + pbs_dq_mapping[ + pup * (1 - ecc) + + ecc * ECC_PUP] + [dq], CS0, + pup + ecc * ECC_PUP, + 0, MAX_PBS); + } + + /* Set all DQS PBS values to MIN (0) */ + for (pup = 0; pup < cur_max_pup; pup++) { + ddr3_write_pup_reg(PUP_PBS_RX + + DQ_NUM, CS0, + pup + + ecc * + ECC_PUP, 0, + 0); + } + + /* Shift DQS, To first Fail */ + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - Shift RX DQS to first fail\n"); + + status = ddr3_rx_shift_dqs_to_first_fail + (dram_info, cur_pup, + pattern_idx, ecc); + if (MV_OK != status) { + DEBUG_PBS_S("DDR3 - PBS Rx - ddr3_rx_shift_dqs_to_first_fail failed.\n"); + DEBUG_PBS_D(status, 8); + DEBUG_PBS_S("\nDDR3 - PBS Rx - SKIP.\n"); + + /* Reset read FIFO */ + reg = reg_read(REG_DRAM_TRAINING_ADDR); + /* Start Auto Read Leveling procedure */ + reg |= (1 << REG_DRAM_TRAINING_RL_OFFS); + /* 0x15B0 - Training Register */ + reg_write(REG_DRAM_TRAINING_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg |= ((1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS) + + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS)); + /* [0] = 1 - Enable SW override, [4] = 1 - FIFO reset */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + do { + reg = (reg_read(REG_DRAM_TRAINING_2_ADDR)) + & (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + } while (reg); /* Wait for '0' */ + + reg = reg_read(REG_DRAM_TRAINING_ADDR); + /* Clear Auto Read Leveling procedure */ + reg &= ~(1 << REG_DRAM_TRAINING_RL_OFFS); + /* 0x15B0 - Training Register */ + reg_write(REG_DRAM_TRAINING_ADDR, reg); + + /* Set ADLL to 15 */ + for (pup = 0; pup < max_pup; + pup++) { + ddr3_write_pup_reg + (PUP_DQS_RD, CS0, + pup + + (ecc * ECC_PUP), 0, + 15); + } + + /* Set all PBS values to MIN (0) */ + for (pup = 0; pup < cur_max_pup; + pup++) { + for (dq = 0; + dq < DQ_NUM; dq++) + ddr3_write_pup_reg + (PUP_PBS_RX + + pbs_dq_mapping + [pup * (1 - ecc) + + ecc * ECC_PUP] + [dq], CS0, + pup + ecc * ECC_PUP, + 0, MIN_PBS); + } + + return MV_OK; + } + + /* PBS For each bit */ + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - perform PBS for each bit\n"); + /* in this stage - start_over = 0; */ + if (MV_OK != ddr3_pbs_per_bit( + dram_info, &start_over, + 0, &cur_pup, + pattern_idx, ecc)) { + DEBUG_PBS_S("DDR3 - PBS Rx - ddr3_pbs_per_bit failed."); + return MV_DDR3_TRAINING_ERR_PBS_RX_PER_BIT; + } + + } while ((start_over == 1) && + (++pbs_rep_time < COUNT_PBS_STARTOVER)); + + if (pbs_rep_time == COUNT_PBS_STARTOVER && + start_over == 1) { + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - FAIL - Algorithm failed doing RX PBS\n"); + return MV_DDR3_TRAINING_ERR_PBS_RX_MAX_VAL; + } + + /* Return DQS ADLL to default value - 15 */ + /* Set all DQS PBS values to MIN (0) */ + for (pup = 0; pup < cur_max_pup; pup++) + ddr3_write_pup_reg(PUP_DQS_RD, CS0, + pup + ecc * ECC_PUP, + 0, INIT_RL_DELAY); + + DEBUG_PBS_FULL_C("DDR3 - PBS RX - values for iteration - ", + pbs_retry, 1); + for (pup = 0; pup < cur_max_pup; pup++) { + /* + * To minimize delay elements, inc from + * pbs value the min pbs val + */ + DEBUG_PBS_FULL_S("DDR3 - PBS - PUP"); + DEBUG_PBS_FULL_D((pup + + (ecc * ECC_PUP)), 1); + DEBUG_PBS_FULL_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + /* Set skew value for all dq */ + /* + * Bit# Deskew <- Bit# Deskew - + * last / first failing bit + * Deskew For all bits (per PUP) + * (minimize delay elements) + */ + DEBUG_PBS_FULL_S("DQ"); + DEBUG_PBS_FULL_D(dq, 1); + DEBUG_PBS_FULL_S("-"); + DEBUG_PBS_FULL_D(skew_array + [((pup) * + DQ_NUM) + + dq], 2); + DEBUG_PBS_FULL_S(", "); + } + DEBUG_PBS_FULL_S("\n"); + } + + /* + * Collect the results we got on this trial + * of PBS + */ + for (pup = 0; pup < cur_max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + skew_sum_array + [pup + (ecc * (max_pup - 1))] + [dq] += + skew_array[((pup) * DQ_NUM) + dq]; + } + } + + /* ECC Support - Disable ECC MUX */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + } + } + + /* + * Calculate the average skew for current pattern for each + * pup and each bit + */ + DEBUG_PBS_FULL_C("DDR3 - PBS RX - Average for pattern - ", + pattern_idx, 1); + for (pup = 0; pup < max_pup; pup++) { + /* + * FOR ECC only :: found min and max value for + * current pattern skew array + */ + /* Loop for all dqs */ + for (dq = 0; dq < DQ_NUM; dq++) { + pattern_skew_array[pup][dq] += + (skew_sum_array[pup][dq] / + COUNT_PBS_REPEAT); + } + } + + DEBUG_PBS_C("DDR3 - PBS RX - values for current pattern - ", + pattern_idx, 1); + for (pup = 0; pup < max_pup; pup++) { + /* + * To minimize delay elements, inc from pbs value the + * min pbs val + */ + DEBUG_PBS_S("DDR3 - PBS RX - PUP"); + DEBUG_PBS_D(pup, 1); + DEBUG_PBS_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + /* Set skew value for all dq */ + /* + * Bit# Deskew <- Bit# Deskew - last / first + * failing bit Deskew For all bits (per PUP) + * (minimize delay elements) + */ + DEBUG_PBS_S("DQ"); + DEBUG_PBS_D(dq, 1); + DEBUG_PBS_S("-"); + DEBUG_PBS_D(skew_sum_array[pup][dq] / + COUNT_PBS_REPEAT, 2); + DEBUG_PBS_S(", "); + } + DEBUG_PBS_S("\n"); + } + } + + /* Calculate the average skew */ + for (pup = 0; pup < max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) + skew_array[((pup) * DQ_NUM) + dq] = + pattern_skew_array[pup][dq] / COUNT_PBS_PATTERN; + } + + DEBUG_PBS_S("DDR3 - PBS RX - Average for all patterns:\n"); + for (pup = 0; pup < max_pup; pup++) { + /* + * To minimize delay elements, inc from pbs value the + * min pbs val + */ + DEBUG_PBS_S("DDR3 - PBS - PUP"); + DEBUG_PBS_D(pup, 1); + DEBUG_PBS_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + /* Set skew value for all dq */ + /* + * Bit# Deskew <- Bit# Deskew - last / first + * failing bit Deskew For all bits (per PUP) + * (minimize delay elements) + */ + DEBUG_PBS_S("DQ"); + DEBUG_PBS_D(dq, 1); + DEBUG_PBS_S("-"); + DEBUG_PBS_D(skew_array[(pup * DQ_NUM) + dq], 2); + DEBUG_PBS_S(", "); + } + DEBUG_PBS_S("\n"); + } + + /* Return ADLL to default value */ + ddr3_write_pup_reg(PUP_DQS_RD, CS0, PUP_BC, 0, INIT_RL_DELAY); + + /* Set averaged PBS results */ + ddr3_set_pbs_results(dram_info, 0); + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_1_ADDR) | + (1 << REG_DRAM_TRAINING_1_TRNBPOINT_OFFS); + reg_write(REG_DRAM_TRAINING_1_ADDR, reg); + + DEBUG_PBS_FULL_S("DDR3 - PBS RX - ended successfuly\n"); + + return MV_OK; +} + +/* + * Name: ddr3_rx_shift_dqs_to_first_fail + * Desc: Execute the Rx shift DQ phase. + * Args: dram_info ddr3 training information struct + * cur_pup bit array of the function active pups. + * pbs_pattern_idx Index of PBS pattern + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_rx_shift_dqs_to_first_fail(MV_DRAM_INFO *dram_info, u32 cur_pup, + u32 pbs_pattern_idx, u32 ecc) +{ + u32 unlock_pup; /* bit array of unlock pups */ + u32 new_lockup_pup; /* bit array of compare failed pups */ + u32 adll_val = MAX_DELAY; + u32 dqs_deskew_val = 0; /* current value of DQS PBS deskew */ + u32 cur_max_pup, pup, pass_pup; + u32 *pattern_ptr; + + /* Choose pattern */ + switch (dram_info->ddr_width) { +#if defined(MV88F672X) + case 16: + pattern_ptr = (u32 *)&pbs_pattern[pbs_pattern_idx]; + break; +#endif + case 32: + pattern_ptr = (u32 *)&pbs_pattern_32b[pbs_pattern_idx]; + break; +#if defined(MV88F78X60) + case 64: + pattern_ptr = (u32 *)&pbs_pattern_64b[pbs_pattern_idx]; + break; +#endif + default: + return MV_FAIL; + } + + /* Set current pup number */ + if (cur_pup == 0x1) /* Ecc mode */ + cur_max_pup = 1; + else + cur_max_pup = dram_info->num_of_std_pups; + + unlock_pup = cur_pup; /* '1' for each unlocked pup */ + + DEBUG_PBS_FULL_S("DDR3 - PBS RX - Shift DQS - Starting...\n"); + + /* Set DQS ADLL to MAX */ + DEBUG_PBS_FULL_S("DDR3 - PBS RX - Shift DQS - Set DQS ADLL to Max for all PUPs\n"); + for (pup = 0; pup < cur_max_pup; pup++) + ddr3_write_pup_reg(PUP_DQS_RD, CS0, pup + ecc * ECC_PUP, 0, + MAX_DELAY); + + /* Loop on all ADLL Vaules */ + do { + /* Loop until found fail for all pups */ + new_lockup_pup = 0; + if (MV_OK != ddr3_sdram_compare(dram_info, unlock_pup, + &new_lockup_pup, + pattern_ptr, LEN_PBS_PATTERN, + SDRAM_PBS_I_OFFS + + pbs_pattern_idx * SDRAM_PBS_NEXT_OFFS, + 0, 0, NULL, 0)) { + DEBUG_PBS_S("DDR3 - PBS Rx - Shift DQS - MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_SRAM_CMP(ddr3_sdram_compare)\n"); + return MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_SRAM_CMP; + } + + if ((new_lockup_pup != 0) && (dqs_deskew_val <= 1)) { + /* Fail on start with first deskew value */ + /* Decrement DQS ADLL */ + --adll_val; + if (adll_val == ADLL_MIN) { + DEBUG_PBS_S("DDR3 - PBS Rx - Shift DQS - fail on start with first deskew value\n"); + return MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_SRAM_CMP; + } + ddr3_write_pup_reg(PUP_DQS_RD, CS0, pup + ecc * ECC_PUP, + 0, adll_val); + continue; + } + + /* Update all new locked pups */ + unlock_pup &= ~new_lockup_pup; + + if ((unlock_pup == 0) || (dqs_deskew_val == MAX_PBS)) { + if (dqs_deskew_val == MAX_PBS) { + /* + * Reach max value of dqs deskew or get fail + * for all pups + */ + DEBUG_PBS_FULL_S("DDR3 - PBS RX - Shift DQS - DQS deskew reached maximum value\n"); + } + break; + } + + DEBUG_PBS_FULL_S("DDR3 - PBS RX - Shift DQS - Inc DQS deskew for PUPs: "); + DEBUG_PBS_FULL_D(unlock_pup, 2); + DEBUG_PBS_FULL_C(", deskew = ", dqs_deskew_val, 2); + + /* Increment DQS deskew elements - Only for unlocked pups */ + dqs_deskew_val++; + for (pup = 0; pup < cur_max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 1) { + ddr3_write_pup_reg(PUP_PBS_RX + DQS_DQ_NUM, CS0, + pup + ecc * ECC_PUP, 0, + dqs_deskew_val); + } + } + } while (1); + + DEBUG_PBS_FULL_S("DDR3 - PBS RX - Shift DQS - ADLL shift one step before fail\n"); + /* Continue to ADLL shift one step before fail */ + unlock_pup = cur_pup; + do { + /* Loop until pass compare for all pups */ + new_lockup_pup = 0; + /* Read and compare results */ + if (MV_OK != ddr3_sdram_compare(dram_info, unlock_pup, &new_lockup_pup, + pattern_ptr, LEN_PBS_PATTERN, + SDRAM_PBS_I_OFFS + + pbs_pattern_idx * SDRAM_PBS_NEXT_OFFS, + 1, 0, NULL, 0)) { + DEBUG_PBS_S("DDR3 - PBS Rx - Shift DQS - MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_SRAM_CMP(ddr3_sdram_compare)\n"); + return MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_SRAM_CMP; + } + + /* + * Get mask for pup which passed so their adll will be + * changed to 2 steps before fails + */ + pass_pup = unlock_pup & ~new_lockup_pup; + + DEBUG_PBS_FULL_S("Shift DQS by 2 steps for PUPs: "); + DEBUG_PBS_FULL_D(pass_pup, 2); + DEBUG_PBS_FULL_C(", Set ADLL value = ", (adll_val - 2), 2); + + /* Only for pass pups */ + for (pup = 0; pup < cur_max_pup; pup++) { + if (IS_PUP_ACTIVE(pass_pup, pup) == 1) { + ddr3_write_pup_reg(PUP_DQS_RD, CS0, + pup + ecc * ECC_PUP, 0, + (adll_val - 2)); + } + } + + /* Locked pups that compare success */ + unlock_pup &= new_lockup_pup; + + if (unlock_pup == 0) { + /* All pups locked */ + break; + } + + /* Found error */ + if (adll_val == 0) { + DEBUG_PBS_FULL_S("DDR3 - PBS Rx - Shift DQS - Adll reach min value\n"); + return MV_DDR3_TRAINING_ERR_PBS_SHIFT_QDS_MAX_VAL; + } + + /* + * Decrement (Move Back to Left one phase - ADLL) dqs RX delay + */ + adll_val--; + for (pup = 0; pup < cur_max_pup; pup++) { + if (IS_PUP_ACTIVE(unlock_pup, pup) == 1) { + ddr3_write_pup_reg(PUP_DQS_RD, CS0, + pup + ecc * ECC_PUP, 0, + adll_val); + } + } + } while (1); + + return MV_OK; +} + +/* + * lock_pups() extracted from ddr3_pbs_per_bit(). This just got too + * much indented making it hard to read / edit. + */ +static void lock_pups(u32 pup, u32 *pup_locked, u8 *unlock_pup_dq_array, + u32 pbs_curr_val, u32 start_pbs, u32 ecc, int is_tx) +{ + u32 dq; + int idx; + + /* Lock PBS value for all remaining PUPs bits */ + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - Lock PBS value for all remaining PUPs bits, pup "); + DEBUG_PBS_FULL_D(pup, 1); + DEBUG_PBS_FULL_C(" pbs value ", pbs_curr_val, 2); + + idx = pup * (1 - ecc) + ecc * ECC_PUP; + *pup_locked &= ~(1 << pup); + + for (dq = 0; dq < DQ_NUM; dq++) { + if (IS_PUP_ACTIVE(unlock_pup_dq_array[dq], pup) == 1) { + int offs; + + /* Lock current dq */ + unlock_pup_dq_array[dq] &= ~(1 << pup); + skew_array[(pup * DQ_NUM) + dq] = pbs_curr_val; + + if (is_tx == 1) + offs = PUP_PBS_TX; + else + offs = PUP_PBS_RX; + + ddr3_write_pup_reg(offs + + pbs_dq_mapping[idx][dq], CS0, + idx, 0, start_pbs); + } + } +} + +/* + * Name: ddr3_pbs_per_bit + * Desc: Execute the Per Bit Skew phase. + * Args: start_over Return whether need to start over the algorithm + * is_tx Indicate whether Rx or Tx + * pcur_pup bit array of the function active pups. return the + * pups that need to repeat on the PBS + * pbs_pattern_idx Index of PBS pattern + * + * Notes: Current implementation supports double activation of this function. + * i.e. in order to activate this function (using start_over) more than + * twice, the implementation should change. + * imlementation limitation are marked using + * ' CHIP-ONLY! - Implementation Limitation ' + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_pbs_per_bit(MV_DRAM_INFO *dram_info, int *start_over, int is_tx, + u32 *pcur_pup, u32 pbs_pattern_idx, u32 ecc) +{ + /* + * Bit array to indicate if we already get fail on bit per pup & dq bit + */ + u8 unlock_pup_dq_array[DQ_NUM] = { + *pcur_pup, *pcur_pup, *pcur_pup, *pcur_pup, *pcur_pup, + *pcur_pup, *pcur_pup, *pcur_pup + }; + + u8 cmp_unlock_pup_dq_array[COUNT_PBS_COMP_RETRY_NUM][DQ_NUM]; + u32 pup, dq; + /* value of pbs is according to RX or TX */ + u32 start_pbs, last_pbs; + u32 pbs_curr_val; + /* bit array that indicates all dq of the pup locked */ + u32 pup_locked; + u32 first_fail[MAX_PUP_NUM] = { 0 }; /* count first fail per pup */ + /* indicates whether we get first fail per pup */ + int first_failed[MAX_PUP_NUM] = { 0 }; + /* bit array that indicates pup already get fail */ + u32 sum_pup_fail; + /* use to calculate diff between curr pbs to first fail pbs */ + u32 calc_pbs_diff; + u32 pbs_cmp_retry; + u32 max_pup; + + /* Set init values for retry array - 8 retry */ + for (pbs_cmp_retry = 0; pbs_cmp_retry < COUNT_PBS_COMP_RETRY_NUM; + pbs_cmp_retry++) { + for (dq = 0; dq < DQ_NUM; dq++) + cmp_unlock_pup_dq_array[pbs_cmp_retry][dq] = *pcur_pup; + } + + memset(&skew_array, 0, MAX_PUP_NUM * DQ_NUM * sizeof(u32)); + + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - Started\n"); + + /* The pbs value depends if rx or tx */ + if (is_tx == 1) { + start_pbs = MIN_PBS; + last_pbs = MAX_PBS; + } else { + start_pbs = MAX_PBS; + last_pbs = MIN_PBS; + } + + pbs_curr_val = start_pbs; + pup_locked = *pcur_pup; + + /* Set current pup number */ + if (pup_locked == 0x1) /* Ecc mode */ + max_pup = 1; + else + max_pup = dram_info->num_of_std_pups; + + do { + /* Increment/ decrement PBS for un-lock bits only */ + if (is_tx == 1) + pbs_curr_val++; + else + pbs_curr_val--; + + /* Set Current PBS delay */ + for (dq = 0; dq < DQ_NUM; dq++) { + /* Check DQ bits to see if locked in all pups */ + if (unlock_pup_dq_array[dq] == 0) { + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - All pups are locked for DQ "); + DEBUG_PBS_FULL_D(dq, 1); + DEBUG_PBS_FULL_S("\n"); + continue; + } + + for (pup = 0; pup < max_pup; pup++) { + int idx; + + idx = pup * (1 - ecc) + ecc * ECC_PUP; + + if (IS_PUP_ACTIVE(unlock_pup_dq_array[dq], pup) + == 0) + continue; + + if (is_tx == 1) + ddr3_write_pup_reg( + PUP_PBS_TX + pbs_dq_mapping[idx][dq], + CS0, idx, 0, pbs_curr_val); + else + ddr3_write_pup_reg( + PUP_PBS_RX + pbs_dq_mapping[idx][dq], + CS0, idx, 0, pbs_curr_val); + } + } + + /* + * Write Read and compare results - run the test + * DDR_PBS_COMP_RETRY_NUM times + */ + /* Run number of read and write to verify */ + for (pbs_cmp_retry = 0; + pbs_cmp_retry < COUNT_PBS_COMP_RETRY_NUM; + pbs_cmp_retry++) { + + if (MV_OK != + ddr3_sdram_pbs_compare(dram_info, pup_locked, is_tx, + pbs_pattern_idx, + pbs_curr_val, start_pbs, + skew_array, + cmp_unlock_pup_dq_array + [pbs_cmp_retry], ecc)) + return MV_FAIL; + + for (pup = 0; pup < max_pup; pup++) { + for (dq = 0; dq < DQ_NUM; dq++) { + if ((IS_PUP_ACTIVE(unlock_pup_dq_array[dq], + pup) == 1) + && (IS_PUP_ACTIVE(cmp_unlock_pup_dq_array + [pbs_cmp_retry][dq], + pup) == 0)) { + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - PbsCurrVal: "); + DEBUG_PBS_FULL_D(pbs_curr_val, 2); + DEBUG_PBS_FULL_S(" PUP: "); + DEBUG_PBS_FULL_D(pup, 1); + DEBUG_PBS_FULL_S(" DQ: "); + DEBUG_PBS_FULL_D(dq, 1); + DEBUG_PBS_FULL_S(" - failed\n"); + } + } + } + + for (dq = 0; dq < DQ_NUM; dq++) { + unlock_pup_dq_array[dq] &= + cmp_unlock_pup_dq_array[pbs_cmp_retry][dq]; + } + } + + pup_locked = 0; + sum_pup_fail = *pcur_pup; + + /* Check which DQ is failed */ + for (dq = 0; dq < DQ_NUM; dq++) { + /* Summarize the locked pup */ + pup_locked |= unlock_pup_dq_array[dq]; + + /* Check if get fail */ + sum_pup_fail &= unlock_pup_dq_array[dq]; + } + + /* If all PUPS are locked in all DQ - Break */ + if (pup_locked == 0) { + /* All pups are locked */ + *start_over = 0; + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - All bit in all pups are successfully locked\n"); + break; + } + + /* PBS deskew elements reach max ? */ + if (pbs_curr_val == last_pbs) { + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - PBS deskew elements reach max\n"); + /* CHIP-ONLY! - Implementation Limitation */ + *start_over = (sum_pup_fail != 0) && (!(*start_over)); + *pcur_pup = pup_locked; + + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - StartOver: "); + DEBUG_PBS_FULL_D(*start_over, 1); + DEBUG_PBS_FULL_S(" pup_locked: "); + DEBUG_PBS_FULL_D(pup_locked, 2); + DEBUG_PBS_FULL_S(" sum_pup_fail: "); + DEBUG_PBS_FULL_D(sum_pup_fail, 2); + DEBUG_PBS_FULL_S("\n"); + + /* Lock PBS value for all remaining bits */ + for (pup = 0; pup < max_pup; pup++) { + /* Check if current pup already received error */ + if (IS_PUP_ACTIVE(pup_locked, pup) == 1) { + /* Valid pup for current function */ + if (IS_PUP_ACTIVE(sum_pup_fail, pup) == + 1 && (*start_over == 1)) { + DEBUG_PBS_FULL_C("DDR3 - PBS Per bit - skipping lock of pup (first loop of pbs)", + pup, 1); + continue; + } else + if (IS_PUP_ACTIVE(sum_pup_fail, pup) + == 1) { + DEBUG_PBS_FULL_C("DDR3 - PBS Per bit - Locking pup %d (even though it wasn't supposed to be locked)", + pup, 1); + } + + /* Already got fail on the PUP */ + /* Lock PBS value for all remaining bits */ + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - Locking remaning DQs for pup - "); + DEBUG_PBS_FULL_D(pup, 1); + DEBUG_PBS_FULL_S(": "); + + for (dq = 0; dq < DQ_NUM; dq++) { + if (IS_PUP_ACTIVE + (unlock_pup_dq_array[dq], + pup) == 1) { + DEBUG_PBS_FULL_D(dq, 1); + DEBUG_PBS_FULL_S(","); + /* set current PBS */ + skew_array[((pup) * + DQ_NUM) + + dq] = + pbs_curr_val; + } + } + + if (*start_over == 1) { + /* + * Reset this pup bit - when + * restart the PBS, ignore this + * pup + */ + *pcur_pup &= ~(1 << pup); + } + DEBUG_PBS_FULL_S("\n"); + } else { + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - Pup "); + DEBUG_PBS_FULL_D(pup, 1); + DEBUG_PBS_FULL_C(" is not set in puplocked - ", + pup_locked, 1); + } + } + + /* Need to start the PBS again */ + if (*start_over == 1) { + DEBUG_PBS_FULL_S("DDR3 - PBS Per bit - false fail - returning to start\n"); + return MV_OK; + } + break; + } + + /* Diff Check */ + for (pup = 0; pup < max_pup; pup++) { + if (IS_PUP_ACTIVE(pup_locked, pup) == 1) { + /* pup is not locked */ + if (first_failed[pup] == 0) { + /* No first fail until now */ + if (IS_PUP_ACTIVE(sum_pup_fail, pup) == + 0) { + /* Get first fail */ + DEBUG_PBS_FULL_C("DDR3 - PBS Per bit - First fail in pup ", + pup, 1); + first_failed[pup] = 1; + first_fail[pup] = pbs_curr_val; + } + } else { + /* Already got first fail */ + if (is_tx == 1) { + /* TX - inc pbs */ + calc_pbs_diff = pbs_curr_val - + first_fail[pup]; + } else { + /* RX - dec pbs */ + calc_pbs_diff = first_fail[pup] - + pbs_curr_val; + } + + if (calc_pbs_diff >= PBS_DIFF_LIMIT) { + lock_pups(pup, &pup_locked, + unlock_pup_dq_array, + pbs_curr_val, + start_pbs, ecc, is_tx); + } + } + } + } + } while (1); + + return MV_OK; +} + +/* + * Name: ddr3_set_pbs_results + * Desc: Set to HW the PBS phase results. + * Args: is_tx Indicates whether to set Tx or RX results + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static int ddr3_set_pbs_results(MV_DRAM_INFO *dram_info, int is_tx) +{ + u32 pup, phys_pup, dq; + u32 max_pup; /* number of valid pups */ + u32 pbs_min; /* minimal pbs val per pup */ + u32 pbs_max; /* maximum pbs val per pup */ + u32 val[9]; + + max_pup = dram_info->num_of_total_pups; + DEBUG_PBS_FULL_S("DDR3 - PBS - ddr3_set_pbs_results:\n"); + + /* Loop for all dqs & pups */ + for (pup = 0; pup < max_pup; pup++) { + if (pup == (max_pup - 1) && dram_info->ecc_ena) + phys_pup = ECC_PUP; + else + phys_pup = pup; + + /* + * To minimize delay elements, inc from pbs value the min + * pbs val + */ + pbs_min = MAX_PBS; + pbs_max = 0; + for (dq = 0; dq < DQ_NUM; dq++) { + if (pbs_min > skew_array[(pup * DQ_NUM) + dq]) + pbs_min = skew_array[(pup * DQ_NUM) + dq]; + + if (pbs_max < skew_array[(pup * DQ_NUM) + dq]) + pbs_max = skew_array[(pup * DQ_NUM) + dq]; + } + + pbs_max -= pbs_min; + + DEBUG_PBS_FULL_S("DDR3 - PBS - PUP"); + DEBUG_PBS_FULL_D(phys_pup, 1); + DEBUG_PBS_FULL_S(": Min Val = "); + DEBUG_PBS_FULL_D(pbs_min, 2); + DEBUG_PBS_FULL_C(", Max Val = ", pbs_max, 2); + + val[pup] = 0; + + for (dq = 0; dq < DQ_NUM; dq++) { + int idx; + int offs; + + /* Set skew value for all dq */ + /* + * Bit# Deskew <- Bit# Deskew - last / first + * failing bit Deskew For all bits (per PUP) + * (minimize delay elements) + */ + + DEBUG_PBS_FULL_S("DQ"); + DEBUG_PBS_FULL_D(dq, 1); + DEBUG_PBS_FULL_S("-"); + DEBUG_PBS_FULL_D((skew_array[(pup * DQ_NUM) + dq] - + pbs_min), 2); + DEBUG_PBS_FULL_S(", "); + + idx = (pup * DQ_NUM) + dq; + + if (is_tx == 1) + offs = PUP_PBS_TX; + else + offs = PUP_PBS_RX; + + ddr3_write_pup_reg(offs + pbs_dq_mapping[phys_pup][dq], + CS0, phys_pup, 0, + skew_array[idx] - pbs_min); + + if (is_tx == 1) + val[pup] += skew_array[idx] - pbs_min; + } + + DEBUG_PBS_FULL_S("\n"); + + /* Set the DQS the half of the Max PBS of the DQs */ + if (is_tx == 1) { + ddr3_write_pup_reg(PUP_PBS_TX + 8, CS0, phys_pup, 0, + pbs_max / 2); + ddr3_write_pup_reg(PUP_PBS_TX + 0xa, CS0, phys_pup, 0, + val[pup] / 8); + } else + ddr3_write_pup_reg(PUP_PBS_RX + 8, CS0, phys_pup, 0, + pbs_max / 2); + } + + return MV_OK; +} + +static void ddr3_pbs_write_pup_dqs_reg(u32 cs, u32 pup, u32 dqs_delay) +{ + u32 reg, delay; + + reg = (ddr3_read_pup_reg(PUP_WL_MODE, cs, pup) & 0x3FF); + delay = reg & PUP_DELAY_MASK; + reg |= ((dqs_delay + delay) << REG_PHY_DQS_REF_DLY_OFFS); + reg |= REG_PHY_REGISTRY_FILE_ACCESS_OP_WR; + reg |= (pup << REG_PHY_PUP_OFFS); + reg |= ((0x4 * cs + PUP_WL_MODE) << REG_PHY_CS_OFFS); + + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + do { + reg = reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); /* Wait for '0' to mark the end of the transaction */ + + udelay(10); +} + +/* + * Set training patterns + */ +int ddr3_load_pbs_patterns(MV_DRAM_INFO *dram_info) +{ + u32 cs, cs_count, cs_tmp; + u32 sdram_addr; + u32 *pattern_ptr0, *pattern_ptr1; + + /* Choose pattern */ + switch (dram_info->ddr_width) { +#if defined(MV88F672X) + case 16: + pattern_ptr0 = (u32 *)&pbs_pattern[0]; + pattern_ptr1 = (u32 *)&pbs_pattern[1]; + break; +#endif + case 32: + pattern_ptr0 = (u32 *)&pbs_pattern_32b[0]; + pattern_ptr1 = (u32 *)&pbs_pattern_32b[1]; + break; +#if defined(MV88F78X60) + case 64: + pattern_ptr0 = (u32 *)&pbs_pattern_64b[0]; + pattern_ptr1 = (u32 *)&pbs_pattern_64b[1]; + break; +#endif + default: + return MV_FAIL; + } + + /* Loop for each CS */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + cs_count = 0; + for (cs_tmp = 0; cs_tmp < cs; cs_tmp++) { + if (dram_info->cs_ena & (1 << cs_tmp)) + cs_count++; + } + + /* Init PBS I pattern */ + sdram_addr = (cs_count * (SDRAM_CS_SIZE + 1) + + SDRAM_PBS_I_OFFS); + if (MV_OK != + ddr3_sdram_compare(dram_info, (u32) NULL, NULL, + pattern_ptr0, LEN_STD_PATTERN, + sdram_addr, 1, 0, NULL, + 0)) + return MV_FAIL; + + /* Init PBS II pattern */ + sdram_addr = (cs_count * (SDRAM_CS_SIZE + 1) + + SDRAM_PBS_II_OFFS); + if (MV_OK != + ddr3_sdram_compare(dram_info, (u32) NULL, NULL, + pattern_ptr1, LEN_STD_PATTERN, + sdram_addr, 1, 0, NULL, + 0)) + return MV_FAIL; + } + } + + return MV_OK; +} +#endif diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_read_leveling.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_read_leveling.c new file mode 100644 index 000000000..30a5c3548 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_read_leveling.c @@ -0,0 +1,1214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <log.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> + +#include "ddr3_hw_training.h" + +/* + * Debug + */ +#define DEBUG_RL_C(s, d, l) \ + DEBUG_RL_S(s); DEBUG_RL_D(d, l); DEBUG_RL_S("\n") +#define DEBUG_RL_FULL_C(s, d, l) \ + DEBUG_RL_FULL_S(s); DEBUG_RL_FULL_D(d, l); DEBUG_RL_FULL_S("\n") + +#ifdef MV_DEBUG_RL +#define DEBUG_RL_S(s) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_2, "%s", s) +#define DEBUG_RL_D(d, l) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_2, "%x", d) +#else +#define DEBUG_RL_S(s) +#define DEBUG_RL_D(d, l) +#endif + +#ifdef MV_DEBUG_RL_FULL +#define DEBUG_RL_FULL_S(s) puts(s) +#define DEBUG_RL_FULL_D(d, l) printf("%x", d) +#else +#define DEBUG_RL_FULL_S(s) +#define DEBUG_RL_FULL_D(d, l) +#endif + +extern u32 rl_pattern[LEN_STD_PATTERN]; + +#ifdef RL_MODE +static int ddr3_read_leveling_single_cs_rl_mode(u32 cs, u32 freq, + int ratio_2to1, u32 ecc, + MV_DRAM_INFO *dram_info); +#else +static int ddr3_read_leveling_single_cs_window_mode(u32 cs, u32 freq, + int ratio_2to1, u32 ecc, + MV_DRAM_INFO *dram_info); +#endif + +/* + * Name: ddr3_read_leveling_hw + * Desc: Execute the Read leveling phase by HW + * Args: dram_info - main struct + * freq - current sequence frequency + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_read_leveling_hw(u32 freq, MV_DRAM_INFO *dram_info) +{ + u32 reg; + + /* Debug message - Start Read leveling procedure */ + DEBUG_RL_S("DDR3 - Read Leveling - Starting HW RL procedure\n"); + + /* Start Auto Read Leveling procedure */ + reg = 1 << REG_DRAM_TRAINING_RL_OFFS; + /* Config the retest number */ + reg |= (COUNT_HW_RL << REG_DRAM_TRAINING_RETEST_OFFS); + + /* Enable CS in the automatic process */ + reg |= (dram_info->cs_ena << REG_DRAM_TRAINING_CS_OFFS); + + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + reg = reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) | + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_SHADOW_ADDR, reg); + + /* Wait */ + do { + reg = reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) & + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + } while (reg); /* Wait for '0' */ + + /* Check if Successful */ + if (reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) & + (1 << REG_DRAM_TRAINING_ERROR_OFFS)) { + u32 delay, phase, pup, cs; + + dram_info->rl_max_phase = 0; + dram_info->rl_min_phase = 10; + + /* Read results to arrays */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + for (pup = 0; + pup < dram_info->num_of_total_pups; + pup++) { + if (pup == dram_info->num_of_std_pups + && dram_info->ecc_ena) + pup = ECC_PUP; + reg = + ddr3_read_pup_reg(PUP_RL_MODE, cs, + pup); + phase = (reg >> REG_PHY_PHASE_OFFS) & + PUP_PHASE_MASK; + delay = reg & PUP_DELAY_MASK; + dram_info->rl_val[cs][pup][P] = phase; + if (phase > dram_info->rl_max_phase) + dram_info->rl_max_phase = phase; + if (phase < dram_info->rl_min_phase) + dram_info->rl_min_phase = phase; + dram_info->rl_val[cs][pup][D] = delay; + dram_info->rl_val[cs][pup][S] = + RL_FINAL_STATE; + reg = + ddr3_read_pup_reg(PUP_RL_MODE + 0x1, + cs, pup); + dram_info->rl_val[cs][pup][DQS] = + (reg & 0x3F); + } +#ifdef MV_DEBUG_RL + /* Print results */ + DEBUG_RL_C("DDR3 - Read Leveling - Results for CS - ", + (u32) cs, 1); + + for (pup = 0; + pup < (dram_info->num_of_total_pups); + pup++) { + if (pup == dram_info->num_of_std_pups + && dram_info->ecc_ena) + pup = ECC_PUP; + DEBUG_RL_S("DDR3 - Read Leveling - PUP: "); + DEBUG_RL_D((u32) pup, 1); + DEBUG_RL_S(", Phase: "); + DEBUG_RL_D((u32) dram_info-> + rl_val[cs][pup][P], 1); + DEBUG_RL_S(", Delay: "); + DEBUG_RL_D((u32) dram_info-> + rl_val[cs][pup][D], 2); + DEBUG_RL_S("\n"); + } +#endif + } + } + + dram_info->rd_rdy_dly = + reg_read(REG_READ_DATA_READY_DELAYS_ADDR) & + REG_READ_DATA_SAMPLE_DELAYS_MASK; + dram_info->rd_smpl_dly = + reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR) & + REG_READ_DATA_READY_DELAYS_MASK; +#ifdef MV_DEBUG_RL + DEBUG_RL_C("DDR3 - Read Leveling - Read Sample Delay: ", + dram_info->rd_smpl_dly, 2); + DEBUG_RL_C("DDR3 - Read Leveling - Read Ready Delay: ", + dram_info->rd_rdy_dly, 2); + DEBUG_RL_S("DDR3 - Read Leveling - HW RL Ended Successfully\n"); +#endif + return MV_OK; + + } else { + DEBUG_RL_S("DDR3 - Read Leveling - HW RL Error\n"); + return MV_FAIL; + } +} + +/* + * Name: ddr3_read_leveling_sw + * Desc: Execute the Read leveling phase by SW + * Args: dram_info - main struct + * freq - current sequence frequency + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_read_leveling_sw(u32 freq, int ratio_2to1, MV_DRAM_INFO *dram_info) +{ + u32 reg, cs, ecc, pup_num, phase, delay, pup; + int status; + + /* Debug message - Start Read leveling procedure */ + DEBUG_RL_S("DDR3 - Read Leveling - Starting SW RL procedure\n"); + + /* Enable SW Read Leveling */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + reg &= ~(1 << REG_DRAM_TRAINING_2_RL_MODE_OFFS); + /* [0]=1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + +#ifdef RL_MODE + reg = (dram_info->cs_ena << REG_DRAM_TRAINING_CS_OFFS) | + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ +#endif + + /* Loop for each CS */ + for (cs = 0; cs < dram_info->num_cs; cs++) { + DEBUG_RL_C("DDR3 - Read Leveling - CS - ", (u32) cs, 1); + + for (ecc = 0; ecc <= (dram_info->ecc_ena); ecc++) { + /* ECC Support - Switch ECC Mux on ecc=1 */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg |= (dram_info->ecc_ena * + ecc << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + if (ecc) + DEBUG_RL_S("DDR3 - Read Leveling - ECC Mux Enabled\n"); + else + DEBUG_RL_S("DDR3 - Read Leveling - ECC Mux Disabled\n"); + + /* Set current sample delays */ + reg = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_SAMPLE_DELAYS_MASK << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + reg |= (dram_info->cl << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, reg); + + /* Set current Ready delay */ + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + if (!ratio_2to1) { + /* 1:1 mode */ + reg |= ((dram_info->cl + 1) << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + } else { + /* 2:1 mode */ + reg |= ((dram_info->cl + 2) << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + } + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + + /* Read leveling Single CS[cs] */ +#ifdef RL_MODE + status = + ddr3_read_leveling_single_cs_rl_mode(cs, freq, + ratio_2to1, + ecc, + dram_info); + if (MV_OK != status) + return status; +#else + status = + ddr3_read_leveling_single_cs_window_mode(cs, freq, + ratio_2to1, + ecc, + dram_info) + if (MV_OK != status) + return status; +#endif + } + + /* Print results */ + DEBUG_RL_C("DDR3 - Read Leveling - Results for CS - ", (u32) cs, + 1); + + for (pup = 0; + pup < (dram_info->num_of_std_pups + dram_info->ecc_ena); + pup++) { + DEBUG_RL_S("DDR3 - Read Leveling - PUP: "); + DEBUG_RL_D((u32) pup, 1); + DEBUG_RL_S(", Phase: "); + DEBUG_RL_D((u32) dram_info->rl_val[cs][pup][P], 1); + DEBUG_RL_S(", Delay: "); + DEBUG_RL_D((u32) dram_info->rl_val[cs][pup][D], 2); + DEBUG_RL_S("\n"); + } + + DEBUG_RL_C("DDR3 - Read Leveling - Read Sample Delay: ", + dram_info->rd_smpl_dly, 2); + DEBUG_RL_C("DDR3 - Read Leveling - Read Ready Delay: ", + dram_info->rd_rdy_dly, 2); + + /* Configure PHY with average of 3 locked leveling settings */ + for (pup = 0; + pup < (dram_info->num_of_std_pups + dram_info->ecc_ena); + pup++) { + /* ECC support - bit 8 */ + pup_num = (pup == dram_info->num_of_std_pups) ? ECC_BIT : pup; + + /* For now, set last cnt result */ + phase = dram_info->rl_val[cs][pup][P]; + delay = dram_info->rl_val[cs][pup][D]; + ddr3_write_pup_reg(PUP_RL_MODE, cs, pup_num, phase, + delay); + } + } + + /* Reset PHY read FIFO */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + do { + reg = (reg_read(REG_DRAM_TRAINING_2_ADDR)) & + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + } while (reg); /* Wait for '0' */ + + /* ECC Support - Switch ECC Mux off ecc=0 */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + +#ifdef RL_MODE + reg_write(REG_DRAM_TRAINING_ADDR, 0); /* 0x15B0 - Training Register */ +#endif + + /* Disable SW Read Leveling */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* [0] = 0 - Disable SW override */ + reg = (reg | (0x1 << REG_DRAM_TRAINING_2_RL_MODE_OFFS)); + /* [3] = 1 - Disable RL MODE */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + DEBUG_RL_S("DDR3 - Read Leveling - Finished RL procedure for all CS\n"); + return MV_OK; +} + +#ifdef RL_MODE +/* + * overrun() extracted from ddr3_read_leveling_single_cs_rl_mode(). + * This just got too much indented making it hard to read / edit. + */ +static void overrun(u32 cs, MV_DRAM_INFO *info, u32 pup, u32 locked_pups, + u32 *locked_sum, u32 ecc, int *first_octet_locked, + int *counter_in_progress, int final_delay, u32 delay, + u32 phase) +{ + /* If no OverRun */ + if (((~locked_pups >> pup) & 0x1) && (final_delay == 0)) { + int idx; + + idx = pup + ecc * ECC_BIT; + + /* PUP passed, start examining */ + if (info->rl_val[cs][idx][S] == RL_UNLOCK_STATE) { + /* Must be RL_UNLOCK_STATE */ + /* Match expected value ? - Update State Machine */ + if (info->rl_val[cs][idx][C] < RL_RETRY_COUNT) { + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We have no overrun and a match on pup: ", + (u32)pup, 1); + info->rl_val[cs][idx][C]++; + + /* If pup got to last state - lock the delays */ + if (info->rl_val[cs][idx][C] == RL_RETRY_COUNT) { + info->rl_val[cs][idx][C] = 0; + info->rl_val[cs][idx][DS] = delay; + info->rl_val[cs][idx][PS] = phase; + + /* Go to Final State */ + info->rl_val[cs][idx][S] = RL_FINAL_STATE; + *locked_sum = *locked_sum + 1; + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We have locked pup: ", + (u32)pup, 1); + + /* + * If first lock - need to lock delays + */ + if (*first_octet_locked == 0) { + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We got first lock on pup: ", + (u32)pup, 1); + *first_octet_locked = 1; + } + + /* + * If pup is in not in final state but + * there was match - dont increment + * counter + */ + } else { + *counter_in_progress = 1; + } + } + } + } +} + +/* + * Name: ddr3_read_leveling_single_cs_rl_mode + * Desc: Execute Read leveling for single Chip select + * Args: cs - current chip select + * freq - current sequence frequency + * ecc - ecc iteration indication + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +static int ddr3_read_leveling_single_cs_rl_mode(u32 cs, u32 freq, + int ratio_2to1, u32 ecc, + MV_DRAM_INFO *dram_info) +{ + u32 reg, delay, phase, pup, rd_sample_delay, add, locked_pups, + repeat_max_cnt, sdram_offset, locked_sum; + u32 phase_min, ui_max_delay; + int all_locked, first_octet_locked, counter_in_progress; + int final_delay = 0; + + DEBUG_RL_FULL_C("DDR3 - Read Leveling - Single CS - ", (u32) cs, 1); + + /* Init values */ + phase = 0; + delay = 0; + rd_sample_delay = dram_info->cl; + all_locked = 0; + first_octet_locked = 0; + repeat_max_cnt = 0; + locked_sum = 0; + + for (pup = 0; pup < (dram_info->num_of_std_pups * (1 - ecc) + ecc); + pup++) + dram_info->rl_val[cs][pup + ecc * ECC_BIT][S] = 0; + + /* Main loop */ + while (!all_locked) { + counter_in_progress = 0; + + DEBUG_RL_FULL_S("DDR3 - Read Leveling - RdSmplDly = "); + DEBUG_RL_FULL_D(rd_sample_delay, 2); + DEBUG_RL_FULL_S(", RdRdyDly = "); + DEBUG_RL_FULL_D(dram_info->rd_rdy_dly, 2); + DEBUG_RL_FULL_S(", Phase = "); + DEBUG_RL_FULL_D(phase, 1); + DEBUG_RL_FULL_S(", Delay = "); + DEBUG_RL_FULL_D(delay, 2); + DEBUG_RL_FULL_S("\n"); + + /* + * Broadcast to all PUPs current RL delays: DQS phase, + * leveling delay + */ + ddr3_write_pup_reg(PUP_RL_MODE, cs, PUP_BC, phase, delay); + + /* Reset PHY read FIFO */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + do { + reg = (reg_read(REG_DRAM_TRAINING_2_ADDR)) & + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + } while (reg); /* Wait for '0' */ + + /* Read pattern from SDRAM */ + sdram_offset = cs * (SDRAM_CS_SIZE + 1) + SDRAM_RL_OFFS; + locked_pups = 0; + if (MV_OK != + ddr3_sdram_compare(dram_info, 0xFF, &locked_pups, + rl_pattern, LEN_STD_PATTERN, + sdram_offset, 0, 0, NULL, 0)) + return MV_DDR3_TRAINING_ERR_RD_LVL_RL_PATTERN; + + /* Octet evaluation */ + /* pup_num = Q or 1 for ECC */ + for (pup = 0; pup < (dram_info->num_of_std_pups * (1 - ecc) + ecc); pup++) { + /* Check Overrun */ + if (!((reg_read(REG_DRAM_TRAINING_2_ADDR) >> + (REG_DRAM_TRAINING_2_OVERRUN_OFFS + pup)) & 0x1)) { + overrun(cs, dram_info, pup, locked_pups, + &locked_sum, ecc, &first_octet_locked, + &counter_in_progress, final_delay, + delay, phase); + } else { + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We got overrun on pup: ", + (u32)pup, 1); + } + } + + if (locked_sum == (dram_info->num_of_std_pups * + (1 - ecc) + ecc)) { + all_locked = 1; + DEBUG_RL_FULL_S("DDR3 - Read Leveling - Single Cs - All pups locked\n"); + } + + /* + * This is a fix for unstable condition where pups are + * toggling between match and no match + */ + /* + * If some of the pups is >1 <3, check if we did it too + * many times + */ + if (counter_in_progress == 1) { + /* Notify at least one Counter is >=1 and < 3 */ + if (repeat_max_cnt < RL_RETRY_COUNT) { + repeat_max_cnt++; + counter_in_progress = 1; + DEBUG_RL_FULL_S("DDR3 - Read Leveling - Counter is >=1 and <3\n"); + DEBUG_RL_FULL_S("DDR3 - Read Leveling - So we will not increment the delay to see if locked again\n"); + } else { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - repeat_max_cnt reached max so now we will increment the delay\n"); + counter_in_progress = 0; + } + } + + /* + * Check some of the pups are in the middle of state machine + * and don't increment the delays + */ + if (!counter_in_progress && !all_locked) { + int idx; + + idx = pup + ecc * ECC_BIT; + + repeat_max_cnt = 0; + /* if 1:1 mode */ + if ((!ratio_2to1) && ((phase == 0) || (phase == 4))) + ui_max_delay = MAX_DELAY_INV; + else + ui_max_delay = MAX_DELAY; + + /* Increment Delay */ + if (delay < ui_max_delay) { + delay++; + /* + * Mark the last delay/pahse place for + * window final place + */ + if (delay == ui_max_delay) { + if ((!ratio_2to1 && phase == + MAX_PHASE_RL_L_1TO1) + || (ratio_2to1 && phase == + MAX_PHASE_RL_L_2TO1)) + final_delay = 1; + } + } else { + /* Phase+CL Incrementation */ + delay = 0; + + if (!ratio_2to1) { + /* 1:1 mode */ + if (first_octet_locked) { + /* some Pup was Locked */ + if (phase < MAX_PHASE_RL_L_1TO1) { + if (phase == 1) { + phase = 4; + } else { + phase++; + delay = MIN_DELAY_PHASE_1_LIMIT; + } + } else { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + DEBUG_RL_S("1)DDR3 - Read Leveling - ERROR - NOT all PUPs Locked n"); + return MV_DDR3_TRAINING_ERR_RD_LVL_RL_PUP_UNLOCK; + } + } else { + /* NO Pup was Locked */ + if (phase < MAX_PHASE_RL_UL_1TO1) { + phase++; + delay = + MIN_DELAY_PHASE_1_LIMIT; + } else { + phase = 0; + } + } + } else { + /* 2:1 mode */ + if (first_octet_locked) { + /* some Pup was Locked */ + if (phase < MAX_PHASE_RL_L_2TO1) { + phase++; + } else { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + DEBUG_RL_S("2)DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + for (pup = 0; pup < (dram_info->num_of_std_pups * (1 - ecc) + ecc); pup++) { + /* pup_num = Q or 1 for ECC */ + if (dram_info->rl_val[cs][idx][S] + == 0) { + DEBUG_RL_C("Failed byte is = ", + pup, 1); + } + } + return MV_DDR3_TRAINING_ERR_RD_LVL_RL_PUP_UNLOCK; + } + } else { + /* No Pup was Locked */ + if (phase < MAX_PHASE_RL_UL_2TO1) + phase++; + else + phase = 0; + } + } + + /* + * If we finished a full Phases cycle (so now + * phase = 0, need to increment rd_sample_dly + */ + if (phase == 0 && first_octet_locked == 0) { + rd_sample_delay++; + if (rd_sample_delay == 0x10) { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + DEBUG_RL_S("3)DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + for (pup = 0; pup < (dram_info->num_of_std_pups * (1 - ecc) + ecc); pup++) { + /* pup_num = Q or 1 for ECC */ + if (dram_info-> + rl_val[cs][idx][S] == 0) { + DEBUG_RL_C("Failed byte is = ", + pup, 1); + } + } + return MV_DDR3_TRAINING_ERR_RD_LVL_PUP_UNLOCK; + } + + /* Set current rd_sample_delay */ + reg = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_SAMPLE_DELAYS_MASK + << (REG_READ_DATA_SAMPLE_DELAYS_OFFS + * cs)); + reg |= (rd_sample_delay << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * + cs)); + reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, + reg); + } + + /* + * Set current rdReadyDelay according to the + * hash table (Need to do this in every phase + * change) + */ + if (!ratio_2to1) { + /* 1:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_2_ADDR); + switch (phase) { + case 0: + add = (add >> + REG_TRAINING_DEBUG_2_OFFS); + break; + case 1: + add = (add >> + (REG_TRAINING_DEBUG_2_OFFS + + 3)); + break; + case 4: + add = (add >> + (REG_TRAINING_DEBUG_2_OFFS + + 6)); + break; + case 5: + add = (add >> + (REG_TRAINING_DEBUG_2_OFFS + + 9)); + break; + } + add &= REG_TRAINING_DEBUG_2_MASK; + } else { + /* 2:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_3_ADDR); + add = (add >> + (phase * + REG_TRAINING_DEBUG_3_OFFS)); + add &= REG_TRAINING_DEBUG_3_MASK; + } + + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg |= ((rd_sample_delay + add) << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + dram_info->rd_smpl_dly = rd_sample_delay; + dram_info->rd_rdy_dly = rd_sample_delay + add; + } + + /* Reset counters for pups with states<RD_STATE_COUNT */ + for (pup = 0; pup < + (dram_info->num_of_std_pups * (1 - ecc) + ecc); + pup++) { + if (dram_info->rl_val[cs][idx][C] < RL_RETRY_COUNT) + dram_info->rl_val[cs][idx][C] = 0; + } + } + } + + phase_min = 10; + + for (pup = 0; pup < (dram_info->num_of_std_pups); pup++) { + if (dram_info->rl_val[cs][pup][PS] < phase_min) + phase_min = dram_info->rl_val[cs][pup][PS]; + } + + /* + * Set current rdReadyDelay according to the hash table (Need to + * do this in every phase change) + */ + if (!ratio_2to1) { + /* 1:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_2_ADDR); + switch (phase_min) { + case 0: + add = (add >> REG_TRAINING_DEBUG_2_OFFS); + break; + case 1: + add = (add >> (REG_TRAINING_DEBUG_2_OFFS + 3)); + break; + case 4: + add = (add >> (REG_TRAINING_DEBUG_2_OFFS + 6)); + break; + case 5: + add = (add >> (REG_TRAINING_DEBUG_2_OFFS + 9)); + break; + } + add &= REG_TRAINING_DEBUG_2_MASK; + } else { + /* 2:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_3_ADDR); + add = (add >> (phase_min * REG_TRAINING_DEBUG_3_OFFS)); + add &= REG_TRAINING_DEBUG_3_MASK; + } + + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg |= ((rd_sample_delay + add) << (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + dram_info->rd_rdy_dly = rd_sample_delay + add; + + for (cs = 0; cs < dram_info->num_cs; cs++) { + for (pup = 0; pup < dram_info->num_of_total_pups; pup++) { + reg = ddr3_read_pup_reg(PUP_RL_MODE + 0x1, cs, pup); + dram_info->rl_val[cs][pup][DQS] = (reg & 0x3F); + } + } + + return MV_OK; +} + +#else + +/* + * Name: ddr3_read_leveling_single_cs_window_mode + * Desc: Execute Read leveling for single Chip select + * Args: cs - current chip select + * freq - current sequence frequency + * ecc - ecc iteration indication + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +static int ddr3_read_leveling_single_cs_window_mode(u32 cs, u32 freq, + int ratio_2to1, u32 ecc, + MV_DRAM_INFO *dram_info) +{ + u32 reg, delay, phase, sum, pup, rd_sample_delay, add, locked_pups, + repeat_max_cnt, sdram_offset, final_sum, locked_sum; + u32 delay_s, delay_e, tmp, phase_min, ui_max_delay; + int all_locked, first_octet_locked, counter_in_progress; + int final_delay = 0; + + DEBUG_RL_FULL_C("DDR3 - Read Leveling - Single CS - ", (u32) cs, 1); + + /* Init values */ + phase = 0; + delay = 0; + rd_sample_delay = dram_info->cl; + all_locked = 0; + first_octet_locked = 0; + repeat_max_cnt = 0; + sum = 0; + final_sum = 0; + locked_sum = 0; + + for (pup = 0; pup < (dram_info->num_of_std_pups * (1 - ecc) + ecc); + pup++) + dram_info->rl_val[cs][pup + ecc * ECC_BIT][S] = 0; + + /* Main loop */ + while (!all_locked) { + counter_in_progress = 0; + + DEBUG_RL_FULL_S("DDR3 - Read Leveling - RdSmplDly = "); + DEBUG_RL_FULL_D(rd_sample_delay, 2); + DEBUG_RL_FULL_S(", RdRdyDly = "); + DEBUG_RL_FULL_D(dram_info->rd_rdy_dly, 2); + DEBUG_RL_FULL_S(", Phase = "); + DEBUG_RL_FULL_D(phase, 1); + DEBUG_RL_FULL_S(", Delay = "); + DEBUG_RL_FULL_D(delay, 2); + DEBUG_RL_FULL_S("\n"); + + /* + * Broadcast to all PUPs current RL delays: DQS phase,leveling + * delay + */ + ddr3_write_pup_reg(PUP_RL_MODE, cs, PUP_BC, phase, delay); + + /* Reset PHY read FIFO */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + do { + reg = (reg_read(REG_DRAM_TRAINING_2_ADDR)) & + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + } while (reg); /* Wait for '0' */ + + /* Read pattern from SDRAM */ + sdram_offset = cs * (SDRAM_CS_SIZE + 1) + SDRAM_RL_OFFS; + locked_pups = 0; + if (MV_OK != + ddr3_sdram_compare(dram_info, 0xFF, &locked_pups, + rl_pattern, LEN_STD_PATTERN, + sdram_offset, 0, 0, NULL, 0)) + return MV_DDR3_TRAINING_ERR_RD_LVL_WIN_PATTERN; + + /* Octet evaluation */ + for (pup = 0; pup < (dram_info->num_of_std_pups * + (1 - ecc) + ecc); pup++) { + /* pup_num = Q or 1 for ECC */ + int idx; + + idx = pup + ecc * ECC_BIT; + + /* Check Overrun */ + if (!((reg_read(REG_DRAM_TRAINING_2_ADDR) >> + (REG_DRAM_TRAINING_2_OVERRUN_OFFS + + pup)) & 0x1)) { + /* If no OverRun */ + + /* Inside the window */ + if (dram_info->rl_val[cs][idx][S] == RL_WINDOW_STATE) { + /* + * Match expected value ? - Update + * State Machine + */ + if (((~locked_pups >> pup) & 0x1) + && (final_delay == 0)) { + /* Match - Still inside the Window */ + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We got another match inside the window for pup: ", + (u32)pup, 1); + + } else { + /* We got fail -> this is the end of the window */ + dram_info->rl_val[cs][idx][DE] = delay; + dram_info->rl_val[cs][idx][PE] = phase; + /* Go to Final State */ + dram_info->rl_val[cs][idx][S]++; + final_sum++; + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We finished the window for pup: ", + (u32)pup, 1); + } + + /* Before the start of the window */ + } else if (dram_info->rl_val[cs][idx][S] == + RL_UNLOCK_STATE) { + /* Must be RL_UNLOCK_STATE */ + /* + * Match expected value ? - Update + * State Machine + */ + if (dram_info->rl_val[cs][idx][C] < + RL_RETRY_COUNT) { + if (((~locked_pups >> pup) & 0x1)) { + /* Match */ + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We have no overrun and a match on pup: ", + (u32)pup, 1); + dram_info->rl_val[cs][idx][C]++; + + /* If pup got to last state - lock the delays */ + if (dram_info->rl_val[cs][idx][C] == + RL_RETRY_COUNT) { + dram_info->rl_val[cs][idx][C] = 0; + dram_info->rl_val[cs][idx][DS] = + delay; + dram_info->rl_val[cs][idx][PS] = + phase; + dram_info->rl_val[cs][idx][S]++; /* Go to Window State */ + locked_sum++; + /* Will count the pups that got locked */ + + /* IF First lock - need to lock delays */ + if (first_octet_locked == 0) { + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We got first lock on pup: ", + (u32)pup, 1); + first_octet_locked + = + 1; + } + } + + /* if pup is in not in final state but there was match - dont increment counter */ + else { + counter_in_progress + = 1; + } + } + } + } + } else { + DEBUG_RL_FULL_C("DDR3 - Read Leveling - We got overrun on pup: ", + (u32)pup, 1); + counter_in_progress = 1; + } + } + + if (final_sum == (dram_info->num_of_std_pups * (1 - ecc) + ecc)) { + all_locked = 1; + DEBUG_RL_FULL_S("DDR3 - Read Leveling - Single Cs - All pups locked\n"); + } + + /* + * This is a fix for unstable condition where pups are + * toggling between match and no match + */ + /* + * If some of the pups is >1 <3, check if we did it too many + * times + */ + if (counter_in_progress == 1) { + if (repeat_max_cnt < RL_RETRY_COUNT) { + /* Notify at least one Counter is >=1 and < 3 */ + repeat_max_cnt++; + counter_in_progress = 1; + DEBUG_RL_FULL_S("DDR3 - Read Leveling - Counter is >=1 and <3\n"); + DEBUG_RL_FULL_S("DDR3 - Read Leveling - So we will not increment the delay to see if locked again\n"); + } else { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - repeat_max_cnt reached max so now we will increment the delay\n"); + counter_in_progress = 0; + } + } + + /* + * Check some of the pups are in the middle of state machine + * and don't increment the delays + */ + if (!counter_in_progress && !all_locked) { + repeat_max_cnt = 0; + if (!ratio_2to1) + ui_max_delay = MAX_DELAY_INV; + else + ui_max_delay = MAX_DELAY; + + /* Increment Delay */ + if (delay < ui_max_delay) { + /* Delay Incrementation */ + delay++; + if (delay == ui_max_delay) { + /* + * Mark the last delay/pahse place + * for window final place + */ + if ((!ratio_2to1 + && phase == MAX_PHASE_RL_L_1TO1) + || (ratio_2to1 + && phase == + MAX_PHASE_RL_L_2TO1)) + final_delay = 1; + } + } else { + /* Phase+CL Incrementation */ + delay = 0; + if (!ratio_2to1) { + /* 1:1 mode */ + if (first_octet_locked) { + /* some pupet was Locked */ + if (phase < MAX_PHASE_RL_L_1TO1) { +#ifdef RL_WINDOW_WA + if (phase == 0) +#else + if (phase == 1) +#endif + phase = 4; + else + phase++; + } else { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + return MV_DDR3_TRAINING_ERR_RD_LVL_WIN_PUP_UNLOCK; + } + } else { + /* No Pup was Locked */ + if (phase < MAX_PHASE_RL_UL_1TO1) { +#ifdef RL_WINDOW_WA + if (phase == 0) + phase = 4; +#else + phase++; +#endif + } else + phase = 0; + } + } else { + /* 2:1 mode */ + if (first_octet_locked) { + /* Some Pup was Locked */ + if (phase < MAX_PHASE_RL_L_2TO1) { + phase++; + } else { + DEBUG_RL_FULL_S("DDR3 - Read Leveling - ERROR - NOT all PUPs Locked\n"); + return MV_DDR3_TRAINING_ERR_RD_LVL_WIN_PUP_UNLOCK; + } + } else { + /* No Pup was Locked */ + if (phase < MAX_PHASE_RL_UL_2TO1) + phase++; + else + phase = 0; + } + } + + /* + * If we finished a full Phases cycle (so + * now phase = 0, need to increment + * rd_sample_dly + */ + if (phase == 0 && first_octet_locked == 0) { + rd_sample_delay++; + + /* Set current rd_sample_delay */ + reg = reg_read(REG_READ_DATA_SAMPLE_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_SAMPLE_DELAYS_MASK << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS + * cs)); + reg |= (rd_sample_delay << + (REG_READ_DATA_SAMPLE_DELAYS_OFFS * + cs)); + reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, + reg); + } + + /* + * Set current rdReadyDelay according to the + * hash table (Need to do this in every phase + * change) + */ + if (!ratio_2to1) { + /* 1:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_2_ADDR); + switch (phase) { + case 0: + add = add >> + REG_TRAINING_DEBUG_2_OFFS; + break; + case 1: + add = add >> + (REG_TRAINING_DEBUG_2_OFFS + + 3); + break; + case 4: + add = add >> + (REG_TRAINING_DEBUG_2_OFFS + + 6); + break; + case 5: + add = add >> + (REG_TRAINING_DEBUG_2_OFFS + + 9); + break; + } + } else { + /* 2:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_3_ADDR); + add = (add >> phase * + REG_TRAINING_DEBUG_3_OFFS); + } + add &= REG_TRAINING_DEBUG_2_MASK; + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg |= ((rd_sample_delay + add) << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + dram_info->rd_smpl_dly = rd_sample_delay; + dram_info->rd_rdy_dly = rd_sample_delay + add; + } + + /* Reset counters for pups with states<RD_STATE_COUNT */ + for (pup = 0; + pup < + (dram_info->num_of_std_pups * (1 - ecc) + ecc); + pup++) { + if (dram_info->rl_val[cs][idx][C] < RL_RETRY_COUNT) + dram_info->rl_val[cs][idx][C] = 0; + } + } + } + + phase_min = 10; + + for (pup = 0; pup < (dram_info->num_of_std_pups); pup++) { + DEBUG_RL_S("DDR3 - Read Leveling - Window info - PUP: "); + DEBUG_RL_D((u32) pup, 1); + DEBUG_RL_S(", PS: "); + DEBUG_RL_D((u32) dram_info->rl_val[cs][pup][PS], 1); + DEBUG_RL_S(", DS: "); + DEBUG_RL_D((u32) dram_info->rl_val[cs][pup][DS], 2); + DEBUG_RL_S(", PE: "); + DEBUG_RL_D((u32) dram_info->rl_val[cs][pup][PE], 1); + DEBUG_RL_S(", DE: "); + DEBUG_RL_D((u32) dram_info->rl_val[cs][pup][DE], 2); + DEBUG_RL_S("\n"); + } + + /* Find center of the window procedure */ + for (pup = 0; pup < (dram_info->num_of_std_pups * (1 - ecc) + ecc); + pup++) { +#ifdef RL_WINDOW_WA + if (!ratio_2to1) { /* 1:1 mode */ + if (dram_info->rl_val[cs][idx][PS] == 4) + dram_info->rl_val[cs][idx][PS] = 1; + if (dram_info->rl_val[cs][idx][PE] == 4) + dram_info->rl_val[cs][idx][PE] = 1; + + delay_s = dram_info->rl_val[cs][idx][PS] * + MAX_DELAY_INV + dram_info->rl_val[cs][idx][DS]; + delay_e = dram_info->rl_val[cs][idx][PE] * + MAX_DELAY_INV + dram_info->rl_val[cs][idx][DE]; + + tmp = (delay_e - delay_s) / 2 + delay_s; + phase = tmp / MAX_DELAY_INV; + if (phase == 1) /* 1:1 mode */ + phase = 4; + + if (phase < phase_min) /* for the read ready delay */ + phase_min = phase; + + dram_info->rl_val[cs][idx][P] = phase; + dram_info->rl_val[cs][idx][D] = tmp % MAX_DELAY_INV; + + } else { + delay_s = dram_info->rl_val[cs][idx][PS] * + MAX_DELAY + dram_info->rl_val[cs][idx][DS]; + delay_e = dram_info->rl_val[cs][idx][PE] * + MAX_DELAY + dram_info->rl_val[cs][idx][DE]; + + tmp = (delay_e - delay_s) / 2 + delay_s; + phase = tmp / MAX_DELAY; + + if (phase < phase_min) /* for the read ready delay */ + phase_min = phase; + + dram_info->rl_val[cs][idx][P] = phase; + dram_info->rl_val[cs][idx][D] = tmp % MAX_DELAY; + } +#else + if (!ratio_2to1) { /* 1:1 mode */ + if (dram_info->rl_val[cs][idx][PS] > 1) + dram_info->rl_val[cs][idx][PS] -= 2; + if (dram_info->rl_val[cs][idx][PE] > 1) + dram_info->rl_val[cs][idx][PE] -= 2; + } + + delay_s = dram_info->rl_val[cs][idx][PS] * MAX_DELAY + + dram_info->rl_val[cs][idx][DS]; + delay_e = dram_info->rl_val[cs][idx][PE] * MAX_DELAY + + dram_info->rl_val[cs][idx][DE]; + + tmp = (delay_e - delay_s) / 2 + delay_s; + phase = tmp / MAX_DELAY; + if (!ratio_2to1 && phase > 1) /* 1:1 mode */ + phase += 2; + + if (phase < phase_min) /* for the read ready delay */ + phase_min = phase; + + dram_info->rl_val[cs][idx][P] = phase; + dram_info->rl_val[cs][idx][D] = tmp % MAX_DELAY; +#endif + } + + /* Set current rdReadyDelay according to the hash table (Need to do this in every phase change) */ + if (!ratio_2to1) { /* 1:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_2_ADDR); + switch (phase_min) { + case 0: + add = (add >> REG_TRAINING_DEBUG_2_OFFS); + break; + case 1: + add = (add >> (REG_TRAINING_DEBUG_2_OFFS + 3)); + break; + case 4: + add = (add >> (REG_TRAINING_DEBUG_2_OFFS + 6)); + break; + case 5: + add = (add >> (REG_TRAINING_DEBUG_2_OFFS + 9)); + break; + } + } else { /* 2:1 mode */ + add = reg_read(REG_TRAINING_DEBUG_3_ADDR); + add = (add >> phase_min * REG_TRAINING_DEBUG_3_OFFS); + } + + add &= REG_TRAINING_DEBUG_2_MASK; + reg = reg_read(REG_READ_DATA_READY_DELAYS_ADDR); + reg &= + ~(REG_READ_DATA_READY_DELAYS_MASK << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg |= + ((rd_sample_delay + add) << (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + dram_info->rd_rdy_dly = rd_sample_delay + add; + + for (cs = 0; cs < dram_info->num_cs; cs++) { + for (pup = 0; pup < dram_info->num_of_total_pups; pup++) { + reg = ddr3_read_pup_reg(PUP_RL_MODE + 0x1, cs, pup); + dram_info->rl_val[cs][pup][DQS] = (reg & 0x3F); + } + } + + return MV_OK; +} +#endif diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_sdram.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_sdram.c new file mode 100644 index 000000000..0b150b20f --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_sdram.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> + +#include "ddr3_hw_training.h" +#include "xor.h" +#include "xor_regs.h" + +static void ddr3_flush_l1_line(u32 line); + +extern u32 pbs_pattern[2][LEN_16BIT_PBS_PATTERN]; +extern u32 pbs_pattern_32b[2][LEN_PBS_PATTERN]; +#if defined(MV88F78X60) +extern u32 pbs_pattern_64b[2][LEN_PBS_PATTERN]; +#endif +extern u32 pbs_dq_mapping[PUP_NUM_64BIT + 1][DQ_NUM]; + +#if defined(MV88F78X60) || defined(MV88F672X) +/* PBS locked dq (per pup) */ +u32 pbs_locked_dq[MAX_PUP_NUM][DQ_NUM] = { { 0 } }; +u32 pbs_locked_dm[MAX_PUP_NUM] = { 0 }; +u32 pbs_locked_value[MAX_PUP_NUM][DQ_NUM] = { { 0 } }; + +int per_bit_data[MAX_PUP_NUM][DQ_NUM]; +#endif + +static u32 sdram_data[LEN_KILLER_PATTERN] __aligned(32) = { 0 }; + +static struct crc_dma_desc dma_desc __aligned(32) = { 0 }; + +#define XOR_TIMEOUT 0x8000000 + +struct xor_channel_t { + struct crc_dma_desc *desc; + unsigned long desc_phys_addr; +}; + +#define XOR_CAUSE_DONE_MASK(chan) ((0x1 | 0x2) << (chan * 16)) + +void xor_waiton_eng(int chan) +{ + int timeout; + + timeout = 0; + while (!(reg_read(XOR_CAUSE_REG(XOR_UNIT(chan))) & + XOR_CAUSE_DONE_MASK(XOR_CHAN(chan)))) { + if (timeout > XOR_TIMEOUT) + goto timeout; + + timeout++; + } + + timeout = 0; + while (mv_xor_state_get(chan) != MV_IDLE) { + if (timeout > XOR_TIMEOUT) + goto timeout; + + timeout++; + } + + /* Clear int */ + reg_write(XOR_CAUSE_REG(XOR_UNIT(chan)), + ~(XOR_CAUSE_DONE_MASK(XOR_CHAN(chan)))); + +timeout: + return; +} + +static int special_compare_pattern(u32 uj) +{ + if ((uj == 30) || (uj == 31) || (uj == 61) || (uj == 62) || + (uj == 93) || (uj == 94) || (uj == 126) || (uj == 127)) + return 1; + + return 0; +} + +/* + * Compare code extracted as its used by multiple functions. This + * reduces code-size and makes it easier to maintain it. Additionally + * the code is not indented that much and therefore easier to read. + */ +static void compare_pattern_v1(u32 uj, u32 *pup, u32 *pattern, + u32 pup_groups, int debug_dqs) +{ + u32 val; + u32 uk; + u32 var1; + u32 var2; + __maybe_unused u32 dq; + + if (((sdram_data[uj]) != (pattern[uj])) && (*pup != 0xFF)) { + for (uk = 0; uk < PUP_NUM_32BIT; uk++) { + val = CMP_BYTE_SHIFT * uk; + var1 = ((sdram_data[uj] >> val) & CMP_BYTE_MASK); + var2 = ((pattern[uj] >> val) & CMP_BYTE_MASK); + + if (var1 != var2) { + *pup |= (1 << (uk + (PUP_NUM_32BIT * + (uj % pup_groups)))); + +#ifdef MV_DEBUG_DQS + if (!debug_dqs) + continue; + + for (dq = 0; dq < DQ_NUM; dq++) { + val = uk + (PUP_NUM_32BIT * + (uj % pup_groups)); + if (((var1 >> dq) & 0x1) != + ((var2 >> dq) & 0x1)) + per_bit_data[val][dq] = 1; + else + per_bit_data[val][dq] = 0; + } +#endif + } + } + } +} + +static void compare_pattern_v2(u32 uj, u32 *pup, u32 *pattern) +{ + u32 val; + u32 uk; + u32 var1; + u32 var2; + + if (((sdram_data[uj]) != (pattern[uj])) && (*pup != 0x3)) { + /* Found error */ + for (uk = 0; uk < PUP_NUM_32BIT; uk++) { + val = CMP_BYTE_SHIFT * uk; + var1 = (sdram_data[uj] >> val) & CMP_BYTE_MASK; + var2 = (pattern[uj] >> val) & CMP_BYTE_MASK; + if (var1 != var2) + *pup |= (1 << (uk % PUP_NUM_16BIT)); + } + } +} + +/* + * Name: ddr3_sdram_compare + * Desc: Execute compare per PUP + * Args: unlock_pup Bit array of the unlock pups + * new_locked_pup Output bit array of the pups with failed compare + * pattern Pattern to compare + * pattern_len Length of pattern (in bytes) + * sdram_offset offset address to the SDRAM + * write write to the SDRAM before read + * mask compare pattern with mask; + * mask_pattern Mask to compare pattern + * + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_sdram_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 pattern_len, u32 sdram_offset, int write, + int mask, u32 *mask_pattern, + int special_compare) +{ + u32 uj; + __maybe_unused u32 pup_groups; + __maybe_unused u32 dq; + +#if !defined(MV88F67XX) + if (dram_info->num_of_std_pups == PUP_NUM_64BIT) + pup_groups = 2; + else + pup_groups = 1; +#endif + + ddr3_reset_phy_read_fifo(); + + /* Check if need to write to sdram before read */ + if (write == 1) + ddr3_dram_sram_burst((u32)pattern, sdram_offset, pattern_len); + + ddr3_dram_sram_burst(sdram_offset, (u32)sdram_data, pattern_len); + + /* Compare read result to write */ + for (uj = 0; uj < pattern_len; uj++) { + if (special_compare && special_compare_pattern(uj)) + continue; + +#if defined(MV88F78X60) || defined(MV88F672X) + compare_pattern_v1(uj, new_locked_pup, pattern, pup_groups, 1); +#elif defined(MV88F67XX) + compare_pattern_v2(uj, new_locked_pup, pattern); +#endif + } + + return MV_OK; +} + +#if defined(MV88F78X60) || defined(MV88F672X) +/* + * Name: ddr3_sdram_dm_compare + * Desc: Execute compare per PUP + * Args: unlock_pup Bit array of the unlock pups + * new_locked_pup Output bit array of the pups with failed compare + * pattern Pattern to compare + * pattern_len Length of pattern (in bytes) + * sdram_offset offset address to the SDRAM + * write write to the SDRAM before read + * mask compare pattern with mask; + * mask_pattern Mask to compare pattern + * + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_sdram_dm_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 sdram_offset) +{ + u32 uj, uk, var1, var2, pup_groups; + u32 val; + u32 pup = 0; + + if (dram_info->num_of_std_pups == PUP_NUM_64BIT) + pup_groups = 2; + else + pup_groups = 1; + + ddr3_dram_sram_burst((u32)pattern, SDRAM_PBS_TX_OFFS, + LEN_PBS_PATTERN); + ddr3_dram_sram_burst(SDRAM_PBS_TX_OFFS, (u32)sdram_data, + LEN_PBS_PATTERN); + + /* Validate the correctness of the results */ + for (uj = 0; uj < LEN_PBS_PATTERN; uj++) + compare_pattern_v1(uj, &pup, pattern, pup_groups, 0); + + /* Test the DM Signals */ + *(u32 *)(SDRAM_PBS_TX_OFFS + 0x10) = 0x12345678; + *(u32 *)(SDRAM_PBS_TX_OFFS + 0x14) = 0x12345678; + + sdram_data[0] = *(u32 *)(SDRAM_PBS_TX_OFFS + 0x10); + sdram_data[1] = *(u32 *)(SDRAM_PBS_TX_OFFS + 0x14); + + for (uj = 0; uj < 2; uj++) { + if (((sdram_data[uj]) != (pattern[uj])) && + (*new_locked_pup != 0xFF)) { + for (uk = 0; uk < PUP_NUM_32BIT; uk++) { + val = CMP_BYTE_SHIFT * uk; + var1 = ((sdram_data[uj] >> val) & CMP_BYTE_MASK); + var2 = ((pattern[uj] >> val) & CMP_BYTE_MASK); + if (var1 != var2) { + *new_locked_pup |= (1 << (uk + + (PUP_NUM_32BIT * (uj % pup_groups)))); + *new_locked_pup |= pup; + } + } + } + } + + return MV_OK; +} + +/* + * Name: ddr3_sdram_pbs_compare + * Desc: Execute SRAM compare per PUP and DQ. + * Args: pup_locked bit array of locked pups + * is_tx Indicate whether Rx or Tx + * pbs_pattern_idx Index of PBS pattern + * pbs_curr_val The PBS value + * pbs_lock_val The value to set to locked PBS + * skew_array Global array to update with the compare results + * ai_unlock_pup_dq_array bit array of the locked / unlocked pups per dq. + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_sdram_pbs_compare(MV_DRAM_INFO *dram_info, u32 pup_locked, + int is_tx, u32 pbs_pattern_idx, + u32 pbs_curr_val, u32 pbs_lock_val, + u32 *skew_array, u8 *unlock_pup_dq_array, + u32 ecc) +{ + /* bit array failed dq per pup for current compare */ + u32 pbs_write_pup[DQ_NUM] = { 0 }; + u32 update_pup; /* pup as HW convention */ + u32 max_pup; /* maximal pup index */ + u32 pup_addr; + u32 ui, dq, pup; + int var1, var2; + u32 sdram_offset, pup_groups, tmp_pup; + u32 *pattern_ptr; + u32 val; + + /* Choose pattern */ + switch (dram_info->ddr_width) { +#if defined(MV88F672X) + case 16: + pattern_ptr = (u32 *)&pbs_pattern[pbs_pattern_idx]; + break; +#endif + case 32: + pattern_ptr = (u32 *)&pbs_pattern_32b[pbs_pattern_idx]; + break; +#if defined(MV88F78X60) + case 64: + pattern_ptr = (u32 *)&pbs_pattern_64b[pbs_pattern_idx]; + break; +#endif + default: + return MV_FAIL; + } + + max_pup = dram_info->num_of_std_pups; + + sdram_offset = SDRAM_PBS_I_OFFS + pbs_pattern_idx * SDRAM_PBS_NEXT_OFFS; + + if (dram_info->num_of_std_pups == PUP_NUM_64BIT) + pup_groups = 2; + else + pup_groups = 1; + + ddr3_reset_phy_read_fifo(); + + /* Check if need to write to sdram before read */ + if (is_tx == 1) { + ddr3_dram_sram_burst((u32)pattern_ptr, sdram_offset, + LEN_PBS_PATTERN); + } + + ddr3_dram_sram_read(sdram_offset, (u32)sdram_data, LEN_PBS_PATTERN); + + /* Compare read result to write */ + for (ui = 0; ui < LEN_PBS_PATTERN; ui++) { + if ((sdram_data[ui]) != (pattern_ptr[ui])) { + /* found error */ + /* error in low pup group */ + for (pup = 0; pup < PUP_NUM_32BIT; pup++) { + val = CMP_BYTE_SHIFT * pup; + var1 = ((sdram_data[ui] >> val) & + CMP_BYTE_MASK); + var2 = ((pattern_ptr[ui] >> val) & + CMP_BYTE_MASK); + + if (var1 != var2) { + if (dram_info->ddr_width > 16) { + tmp_pup = (pup + PUP_NUM_32BIT * + (ui % pup_groups)); + } else { + tmp_pup = (pup % PUP_NUM_16BIT); + } + + update_pup = (1 << tmp_pup); + if (ecc && (update_pup != 0x1)) + continue; + + /* + * Pup is failed - Go over all DQs and + * look for failures + */ + for (dq = 0; dq < DQ_NUM; dq++) { + val = tmp_pup * (1 - ecc) + + ecc * ECC_PUP; + if (((var1 >> dq) & 0x1) != + ((var2 >> dq) & 0x1)) { + if (pbs_locked_dq[val][dq] == 1 && + pbs_locked_value[val][dq] != pbs_curr_val) + continue; + + /* + * Activate write to + * update PBS to + * pbs_lock_val + */ + pbs_write_pup[dq] |= + update_pup; + + /* + * Update the + * unlock_pup_dq_array + */ + unlock_pup_dq_array[dq] &= + ~update_pup; + + /* + * Lock PBS value for + * failed bits in + * compare operation + */ + skew_array[tmp_pup * DQ_NUM + dq] = + pbs_curr_val; + } + } + } + } + } + } + + pup_addr = (is_tx == 1) ? PUP_PBS_TX : PUP_PBS_RX; + + /* Set last failed bits PBS to min / max pbs value */ + for (dq = 0; dq < DQ_NUM; dq++) { + for (pup = 0; pup < max_pup; pup++) { + if (pbs_write_pup[dq] & (1 << pup)) { + val = pup * (1 - ecc) + ecc * ECC_PUP; + if (pbs_locked_dq[val][dq] == 1 && + pbs_locked_value[val][dq] != pbs_curr_val) + continue; + + /* Mark the dq as locked */ + pbs_locked_dq[val][dq] = 1; + pbs_locked_value[val][dq] = pbs_curr_val; + ddr3_write_pup_reg(pup_addr + + pbs_dq_mapping[val][dq], + CS0, val, 0, pbs_lock_val); + } + } + } + + return MV_OK; +} +#endif + +/* + * Name: ddr3_sdram_direct_compare + * Desc: Execute compare per PUP without DMA (no burst mode) + * Args: unlock_pup Bit array of the unlock pups + * new_locked_pup Output bit array of the pups with failed compare + * pattern Pattern to compare + * pattern_len Length of pattern (in bytes) + * sdram_offset offset address to the SDRAM + * write write to the SDRAM before read + * mask compare pattern with mask; + * auiMaskPatter Mask to compare pattern + * + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_sdram_direct_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 pattern_len, u32 sdram_offset, + int write, int mask, u32 *mask_pattern) +{ + u32 uj, uk, pup_groups; + u32 *sdram_addr; /* used to read from SDRAM */ + + sdram_addr = (u32 *)sdram_offset; + + if (dram_info->num_of_std_pups == PUP_NUM_64BIT) + pup_groups = 2; + else + pup_groups = 1; + + /* Check if need to write before read */ + if (write == 1) { + for (uk = 0; uk < pattern_len; uk++) { + *sdram_addr = pattern[uk]; + sdram_addr++; + } + } + + sdram_addr = (u32 *)sdram_offset; + + for (uk = 0; uk < pattern_len; uk++) { + sdram_data[uk] = *sdram_addr; + sdram_addr++; + } + + /* Compare read result to write */ + for (uj = 0; uj < pattern_len; uj++) { + if (dram_info->ddr_width > 16) { + compare_pattern_v1(uj, new_locked_pup, pattern, + pup_groups, 0); + } else { + compare_pattern_v2(uj, new_locked_pup, pattern); + } + } + + return MV_OK; +} + +/* + * Name: ddr3_dram_sram_burst + * Desc: Read from the SDRAM in burst of 64 bytes + * Args: src + * dst + * Notes: Using the XOR mechanism + * Returns: MV_OK if success, other error code if fail. + */ +int ddr3_dram_sram_burst(u32 src, u32 dst, u32 len) +{ + u32 chan, byte_count, cs_num, byte; + struct xor_channel_t channel; + + chan = 0; + byte_count = len * 4; + + /* Wait for previous transfer completion */ + while (mv_xor_state_get(chan) != MV_IDLE) + ; + + /* Build the channel descriptor */ + channel.desc = &dma_desc; + + /* Enable Address Override and set correct src and dst */ + if (src < SRAM_BASE) { + /* src is DRAM CS, dst is SRAM */ + cs_num = (src / (1 + SDRAM_CS_SIZE)); + reg_write(XOR_ADDR_OVRD_REG(0, 0), + ((cs_num << 1) | (1 << 0))); + channel.desc->src_addr0 = (src % (1 + SDRAM_CS_SIZE)); + channel.desc->dst_addr = dst; + } else { + /* src is SRAM, dst is DRAM CS */ + cs_num = (dst / (1 + SDRAM_CS_SIZE)); + reg_write(XOR_ADDR_OVRD_REG(0, 0), + ((cs_num << 25) | (1 << 24))); + channel.desc->src_addr0 = (src); + channel.desc->dst_addr = (dst % (1 + SDRAM_CS_SIZE)); + channel.desc->src_addr0 = src; + channel.desc->dst_addr = (dst % (1 + SDRAM_CS_SIZE)); + } + + channel.desc->src_addr1 = 0; + channel.desc->byte_cnt = byte_count; + channel.desc->next_desc_ptr = 0; + channel.desc->status = 1 << 31; + channel.desc->desc_cmd = 0x0; + channel.desc_phys_addr = (unsigned long)&dma_desc; + + ddr3_flush_l1_line((u32)&dma_desc); + + /* Issue the transfer */ + if (mv_xor_transfer(chan, MV_DMA, channel.desc_phys_addr) != MV_OK) + return MV_FAIL; + + /* Wait for completion */ + xor_waiton_eng(chan); + + if (dst > SRAM_BASE) { + for (byte = 0; byte < byte_count; byte += 0x20) + cache_inv(dst + byte); + } + + return MV_OK; +} + +/* + * Name: ddr3_flush_l1_line + * Desc: + * Args: + * Notes: + * Returns: MV_OK if success, other error code if fail. + */ +static void ddr3_flush_l1_line(u32 line) +{ + u32 reg; + +#if defined(MV88F672X) + reg = 1; +#else + reg = reg_read(REG_SAMPLE_RESET_LOW_ADDR) & + (1 << REG_SAMPLE_RESET_CPU_ARCH_OFFS); +#ifdef MV88F67XX + reg = ~reg & (1 << REG_SAMPLE_RESET_CPU_ARCH_OFFS); +#endif +#endif + + if (reg) { + /* V7 Arch mode */ + flush_l1_v7(line); + flush_l1_v7(line + CACHE_LINE_SIZE); + } else { + /* V6 Arch mode */ + flush_l1_v6(line); + flush_l1_v6(line + CACHE_LINE_SIZE); + } +} + +int ddr3_dram_sram_read(u32 src, u32 dst, u32 len) +{ + u32 ui; + u32 *dst_ptr, *src_ptr; + + dst_ptr = (u32 *)dst; + src_ptr = (u32 *)src; + + for (ui = 0; ui < len; ui++) { + *dst_ptr = *src_ptr; + dst_ptr++; + src_ptr++; + } + + return MV_OK; +} + +int ddr3_sdram_dqs_compare(MV_DRAM_INFO *dram_info, u32 unlock_pup, + u32 *new_locked_pup, u32 *pattern, + u32 pattern_len, u32 sdram_offset, int write, + int mask, u32 *mask_pattern, + int special_compare) +{ + u32 uj, pup_groups; + + if (dram_info->num_of_std_pups == PUP_NUM_64BIT) + pup_groups = 2; + else + pup_groups = 1; + + ddr3_reset_phy_read_fifo(); + + /* Check if need to write to sdram before read */ + if (write == 1) + ddr3_dram_sram_burst((u32)pattern, sdram_offset, pattern_len); + + ddr3_dram_sram_burst(sdram_offset, (u32)sdram_data, pattern_len); + + /* Compare read result to write */ + for (uj = 0; uj < pattern_len; uj++) { + if (special_compare && special_compare_pattern(uj)) + continue; + + if (dram_info->ddr_width > 16) { + compare_pattern_v1(uj, new_locked_pup, pattern, + pup_groups, 1); + } else { + compare_pattern_v2(uj, new_locked_pup, pattern); + } + } + + return MV_OK; +} + +void ddr3_reset_phy_read_fifo(void) +{ + u32 reg; + + /* reset read FIFO */ + reg = reg_read(REG_DRAM_TRAINING_ADDR); + /* Start Auto Read Leveling procedure */ + reg |= (1 << REG_DRAM_TRAINING_RL_OFFS); + + /* 0x15B0 - Training Register */ + reg_write(REG_DRAM_TRAINING_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg |= ((1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS) + + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS)); + + /* [0] = 1 - Enable SW override, [4] = 1 - FIFO reset */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + do { + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + (1 << REG_DRAM_TRAINING_2_FIFO_RST_OFFS); + } while (reg); /* Wait for '0' */ + + reg = reg_read(REG_DRAM_TRAINING_ADDR); + + /* Clear Auto Read Leveling procedure */ + reg &= ~(1 << REG_DRAM_TRAINING_RL_OFFS); + + /* 0x15B0 - Training Register */ + reg_write(REG_DRAM_TRAINING_ADDR, reg); +} diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_spd.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_spd.c new file mode 100644 index 000000000..e2305d881 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_spd.c @@ -0,0 +1,1299 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> + +#include "ddr3_init.h" + +#if defined(MV88F78X60) +#include "ddr3_axp_config.h" +#elif defined(MV88F67XX) +#include "ddr3_a370_config.h" +#endif + +#if defined(MV88F672X) +#include "ddr3_a375_config.h" +#endif + +#ifdef DUNIT_SPD + +/* DIMM SPD offsets */ +#define SPD_DEV_TYPE_BYTE 2 + +#define SPD_MODULE_TYPE_BYTE 3 +#define SPD_MODULE_MASK 0xf +#define SPD_MODULE_TYPE_RDIMM 1 +#define SPD_MODULE_TYPE_UDIMM 2 + +#define SPD_DEV_DENSITY_BYTE 4 +#define SPD_DEV_DENSITY_MASK 0xf + +#define SPD_ROW_NUM_BYTE 5 +#define SPD_ROW_NUM_MIN 12 +#define SPD_ROW_NUM_OFF 3 +#define SPD_ROW_NUM_MASK (7 << SPD_ROW_NUM_OFF) + +#define SPD_COL_NUM_BYTE 5 +#define SPD_COL_NUM_MIN 9 +#define SPD_COL_NUM_OFF 0 +#define SPD_COL_NUM_MASK (7 << SPD_COL_NUM_OFF) + +#define SPD_MODULE_ORG_BYTE 7 +#define SPD_MODULE_SDRAM_DEV_WIDTH_OFF 0 +#define SPD_MODULE_SDRAM_DEV_WIDTH_MASK (7 << SPD_MODULE_SDRAM_DEV_WIDTH_OFF) +#define SPD_MODULE_BANK_NUM_MIN 1 +#define SPD_MODULE_BANK_NUM_OFF 3 +#define SPD_MODULE_BANK_NUM_MASK (7 << SPD_MODULE_BANK_NUM_OFF) + +#define SPD_BUS_WIDTH_BYTE 8 +#define SPD_BUS_WIDTH_OFF 0 +#define SPD_BUS_WIDTH_MASK (7 << SPD_BUS_WIDTH_OFF) +#define SPD_BUS_ECC_OFF 3 +#define SPD_BUS_ECC_MASK (3 << SPD_BUS_ECC_OFF) + +#define SPD_MTB_DIVIDEND_BYTE 10 +#define SPD_MTB_DIVISOR_BYTE 11 +#define SPD_TCK_BYTE 12 +#define SPD_SUP_CAS_LAT_LSB_BYTE 14 +#define SPD_SUP_CAS_LAT_MSB_BYTE 15 +#define SPD_TAA_BYTE 16 +#define SPD_TWR_BYTE 17 +#define SPD_TRCD_BYTE 18 +#define SPD_TRRD_BYTE 19 +#define SPD_TRP_BYTE 20 + +#define SPD_TRAS_MSB_BYTE 21 +#define SPD_TRAS_MSB_MASK 0xf + +#define SPD_TRC_MSB_BYTE 21 +#define SPD_TRC_MSB_MASK 0xf0 + +#define SPD_TRAS_LSB_BYTE 22 +#define SPD_TRC_LSB_BYTE 23 +#define SPD_TRFC_LSB_BYTE 24 +#define SPD_TRFC_MSB_BYTE 25 +#define SPD_TWTR_BYTE 26 +#define SPD_TRTP_BYTE 27 + +#define SPD_TFAW_MSB_BYTE 28 +#define SPD_TFAW_MSB_MASK 0xf + +#define SPD_TFAW_LSB_BYTE 29 +#define SPD_OPT_FEATURES_BYTE 30 +#define SPD_THERMAL_REFRESH_OPT_BYTE 31 + +#define SPD_ADDR_MAP_BYTE 63 +#define SPD_ADDR_MAP_MIRROR_OFFS 0 + +#define SPD_RDIMM_RC_BYTE 69 +#define SPD_RDIMM_RC_NIBBLE_MASK 0xF +#define SPD_RDIMM_RC_NUM 16 + +/* Dimm Memory Type values */ +#define SPD_MEM_TYPE_SDRAM 0x4 +#define SPD_MEM_TYPE_DDR1 0x7 +#define SPD_MEM_TYPE_DDR2 0x8 +#define SPD_MEM_TYPE_DDR3 0xB + +#define DIMM_MODULE_MANU_OFFS 64 +#define DIMM_MODULE_MANU_SIZE 8 +#define DIMM_MODULE_VEN_OFFS 73 +#define DIMM_MODULE_VEN_SIZE 25 +#define DIMM_MODULE_ID_OFFS 99 +#define DIMM_MODULE_ID_SIZE 18 + +/* enumeration for voltage levels. */ +enum dimm_volt_if { + TTL_5V_TOLERANT, + LVTTL, + HSTL_1_5V, + SSTL_3_3V, + SSTL_2_5V, + VOLTAGE_UNKNOWN, +}; + +/* enumaration for SDRAM CAS Latencies. */ +enum dimm_sdram_cas { + SD_CL_1 = 1, + SD_CL_2, + SD_CL_3, + SD_CL_4, + SD_CL_5, + SD_CL_6, + SD_CL_7, + SD_FAULT +}; + +/* enumeration for memory types */ +enum memory_type { + MEM_TYPE_SDRAM, + MEM_TYPE_DDR1, + MEM_TYPE_DDR2, + MEM_TYPE_DDR3 +}; + +/* DIMM information structure */ +typedef struct dimm_info { + /* DIMM dimensions */ + u32 num_of_module_ranks; + u32 data_width; + u32 rank_capacity; + u32 num_of_devices; + + u32 sdram_width; + u32 num_of_banks_on_each_device; + u32 sdram_capacity; + + u32 num_of_row_addr; + u32 num_of_col_addr; + + u32 addr_mirroring; + + u32 err_check_type; /* ECC , PARITY.. */ + u32 type_info; /* DDR2 only */ + + /* DIMM timing parameters */ + u32 supported_cas_latencies; + u32 refresh_interval; + u32 min_cycle_time; + u32 min_row_precharge_time; + u32 min_row_active_to_row_active; + u32 min_ras_to_cas_delay; + u32 min_write_recovery_time; /* DDR3/2 only */ + u32 min_write_to_read_cmd_delay; /* DDR3/2 only */ + u32 min_read_to_prech_cmd_delay; /* DDR3/2 only */ + u32 min_active_to_precharge; + u32 min_refresh_recovery; /* DDR3/2 only */ + u32 min_cas_lat_time; + u32 min_four_active_win_delay; + u8 dimm_rc[SPD_RDIMM_RC_NUM]; + + /* DIMM vendor ID */ + u32 vendor; +} MV_DIMM_INFO; + +static int ddr3_spd_sum_init(MV_DIMM_INFO *info, MV_DIMM_INFO *sum_info, + u32 dimm); +static u32 ddr3_get_max_val(u32 spd_val, u32 dimm_num, u32 static_val); +static u32 ddr3_get_min_val(u32 spd_val, u32 dimm_num, u32 static_val); +static int ddr3_spd_init(MV_DIMM_INFO *info, u32 dimm_addr, u32 dimm_width); +static u32 ddr3_div(u32 val, u32 divider, u32 sub); + +extern u8 spd_data[SPD_SIZE]; +extern u32 odt_config[ODT_OPT]; +extern u16 odt_static[ODT_OPT][MAX_CS]; +extern u16 odt_dynamic[ODT_OPT][MAX_CS]; + +#if !(defined(DB_88F6710) || defined(DB_88F6710_PCAC) || defined(RD_88F6710)) +/* + * Name: ddr3_get_dimm_num - Find number of dimms and their addresses + * Desc: + * Args: dimm_addr - array of dimm addresses + * Notes: + * Returns: None. + */ +static u32 ddr3_get_dimm_num(u32 *dimm_addr) +{ + u32 dimm_cur_addr; + u8 data[3]; + u32 dimm_num = 0; + int ret; + + /* Read the dimm eeprom */ + for (dimm_cur_addr = MAX_DIMM_ADDR; dimm_cur_addr > MIN_DIMM_ADDR; + dimm_cur_addr--) { + data[SPD_DEV_TYPE_BYTE] = 0; + + /* Far-End DIMM must be connected */ + if ((dimm_num == 0) && (dimm_cur_addr < FAR_END_DIMM_ADDR)) + return 0; + + ret = i2c_read(dimm_cur_addr, 0, 1, (uchar *)data, 3); + if (!ret) { + if (data[SPD_DEV_TYPE_BYTE] == SPD_MEM_TYPE_DDR3) { + dimm_addr[dimm_num] = dimm_cur_addr; + dimm_num++; + } + } + } + + return dimm_num; +} +#endif + +/* + * Name: dimmSpdInit - Get the SPD parameters. + * Desc: Read the DIMM SPD parameters into given struct parameter. + * Args: dimmNum - DIMM number. See MV_BOARD_DIMM_NUM enumerator. + * info - DIMM information structure. + * Notes: + * Returns: MV_OK if function could read DIMM parameters, 0 otherwise. + */ +int ddr3_spd_init(MV_DIMM_INFO *info, u32 dimm_addr, u32 dimm_width) +{ + u32 tmp; + u32 time_base; + int ret; + __maybe_unused u32 rc; + __maybe_unused u8 vendor_high, vendor_low; + + if (dimm_addr != 0) { + memset(spd_data, 0, SPD_SIZE * sizeof(u8)); + + ret = i2c_read(dimm_addr, 0, 1, (uchar *)spd_data, SPD_SIZE); + if (ret) + return MV_DDR3_TRAINING_ERR_TWSI_FAIL; + } + + /* Check if DDR3 */ + if (spd_data[SPD_DEV_TYPE_BYTE] != SPD_MEM_TYPE_DDR3) + return MV_DDR3_TRAINING_ERR_TWSI_BAD_TYPE; + + /* Error Check Type */ + /* No byte for error check in DDR3 SPD, use DDR2 convention */ + info->err_check_type = 0; + + /* Check if ECC */ + if ((spd_data[SPD_BUS_WIDTH_BYTE] & 0x18) >> 3) + info->err_check_type = 1; + + DEBUG_INIT_FULL_C("DRAM err_check_type ", info->err_check_type, 1); + switch (spd_data[SPD_MODULE_TYPE_BYTE]) { + case 1: + /* support RDIMM */ + info->type_info = SPD_MODULE_TYPE_RDIMM; + break; + case 2: + /* support UDIMM */ + info->type_info = SPD_MODULE_TYPE_UDIMM; + break; + case 11: /* LRDIMM current not supported */ + default: + info->type_info = (spd_data[SPD_MODULE_TYPE_BYTE]); + break; + } + + /* Size Calculations: */ + + /* Number Of Row Addresses - 12/13/14/15/16 */ + info->num_of_row_addr = + (spd_data[SPD_ROW_NUM_BYTE] & SPD_ROW_NUM_MASK) >> + SPD_ROW_NUM_OFF; + info->num_of_row_addr += SPD_ROW_NUM_MIN; + DEBUG_INIT_FULL_C("DRAM num_of_row_addr ", info->num_of_row_addr, 2); + + /* Number Of Column Addresses - 9/10/11/12 */ + info->num_of_col_addr = + (spd_data[SPD_COL_NUM_BYTE] & SPD_COL_NUM_MASK) >> + SPD_COL_NUM_OFF; + info->num_of_col_addr += SPD_COL_NUM_MIN; + DEBUG_INIT_FULL_C("DRAM num_of_col_addr ", info->num_of_col_addr, 1); + + /* Number Of Ranks = number of CS on Dimm - 1/2/3/4 Ranks */ + info->num_of_module_ranks = + (spd_data[SPD_MODULE_ORG_BYTE] & SPD_MODULE_BANK_NUM_MASK) >> + SPD_MODULE_BANK_NUM_OFF; + info->num_of_module_ranks += SPD_MODULE_BANK_NUM_MIN; + DEBUG_INIT_FULL_C("DRAM numOfModuleBanks ", info->num_of_module_ranks, + 1); + + /* Data Width - 8/16/32/64 bits */ + info->data_width = + 1 << (3 + (spd_data[SPD_BUS_WIDTH_BYTE] & SPD_BUS_WIDTH_MASK)); + DEBUG_INIT_FULL_C("DRAM data_width ", info->data_width, 1); + + /* Number Of Banks On Each Device - 8/16/32/64 banks */ + info->num_of_banks_on_each_device = + 1 << (3 + ((spd_data[SPD_DEV_DENSITY_BYTE] >> 4) & 0x7)); + DEBUG_INIT_FULL_C("DRAM num_of_banks_on_each_device ", + info->num_of_banks_on_each_device, 1); + + /* Total SDRAM capacity - 256Mb/512Mb/1Gb/2Gb/4Gb/8Gb/16Gb - MegaBits */ + info->sdram_capacity = + spd_data[SPD_DEV_DENSITY_BYTE] & SPD_DEV_DENSITY_MASK; + + /* Sdram Width - 4/8/16/32 bits */ + info->sdram_width = 1 << (2 + (spd_data[SPD_MODULE_ORG_BYTE] & + SPD_MODULE_SDRAM_DEV_WIDTH_MASK)); + DEBUG_INIT_FULL_C("DRAM sdram_width ", info->sdram_width, 1); + + /* CS (Rank) Capacity - MB */ + /* + * DDR3 device uiDensity val are: (device capacity/8) * + * (Module_width/Device_width) + */ + /* Jedec SPD DDR3 - page 7, Save spd_data in Mb - 2048=2GB */ + if (dimm_width == 32) { + info->rank_capacity = + ((1 << info->sdram_capacity) * 256 * + (info->data_width / info->sdram_width)) << 16; + /* CS size = CS size / 2 */ + } else { + info->rank_capacity = + ((1 << info->sdram_capacity) * 256 * + (info->data_width / info->sdram_width) * 0x2) << 16; + /* 0x2 => 0x100000-1Mbit / 8-bit->byte / 0x10000 */ + } + DEBUG_INIT_FULL_C("DRAM rank_capacity[31] ", info->rank_capacity, 1); + + /* Number of devices includeing Error correction */ + info->num_of_devices = + ((info->data_width / info->sdram_width) * + info->num_of_module_ranks) + info->err_check_type; + DEBUG_INIT_FULL_C("DRAM num_of_devices ", info->num_of_devices, 1); + + /* Address Mapping from Edge connector to DRAM - mirroring option */ + info->addr_mirroring = + spd_data[SPD_ADDR_MAP_BYTE] & (1 << SPD_ADDR_MAP_MIRROR_OFFS); + + /* Timings - All in ps */ + + time_base = (1000 * spd_data[SPD_MTB_DIVIDEND_BYTE]) / + spd_data[SPD_MTB_DIVISOR_BYTE]; + + /* Minimum Cycle Time At Max CasLatancy */ + info->min_cycle_time = spd_data[SPD_TCK_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM tCKmin ", info->min_cycle_time, 1); + + /* Refresh Interval */ + /* No byte for refresh interval in DDR3 SPD, use DDR2 convention */ + /* + * JEDEC param are 0 <= Tcase <= 85: 7.8uSec, 85 <= Tcase + * <= 95: 3.9uSec + */ + info->refresh_interval = 7800000; /* Set to 7.8uSec */ + DEBUG_INIT_FULL_C("DRAM refresh_interval ", info->refresh_interval, 1); + + /* Suported Cas Latencies - DDR 3: */ + + /* + * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 * + *******-******-******-******-******-******-******-*******-******* + CAS = 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 * + *********************************************************-******* + *******-******-******-******-******-******-******-*******-******* + * bit15 |bit14 |bit13 |bit12 |bit11 |bit10 | bit9 | bit8 * + *******-******-******-******-******-******-******-*******-******* + CAS = TBD | 18 | 17 | 16 | 15 | 14 | 13 | 12 * + */ + + /* DDR3 include 2 byte of CAS support */ + info->supported_cas_latencies = + (spd_data[SPD_SUP_CAS_LAT_MSB_BYTE] << 8) | + spd_data[SPD_SUP_CAS_LAT_LSB_BYTE]; + DEBUG_INIT_FULL_C("DRAM supported_cas_latencies ", + info->supported_cas_latencies, 1); + + /* Minimum Cycle Time At Max CasLatancy */ + info->min_cas_lat_time = (spd_data[SPD_TAA_BYTE] * time_base); + /* + * This field divided by the cycleTime will give us the CAS latency + * to config + */ + + /* + * For DDR3 and DDR2 includes Write Recovery Time field. + * Other SDRAM ignore + */ + info->min_write_recovery_time = spd_data[SPD_TWR_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM min_write_recovery_time ", + info->min_write_recovery_time, 1); + + /* Mininmum Ras to Cas Delay */ + info->min_ras_to_cas_delay = spd_data[SPD_TRCD_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM min_ras_to_cas_delay ", + info->min_ras_to_cas_delay, 1); + + /* Minimum Row Active to Row Active Time */ + info->min_row_active_to_row_active = + spd_data[SPD_TRRD_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM min_row_active_to_row_active ", + info->min_row_active_to_row_active, 1); + + /* Minimum Row Precharge Delay Time */ + info->min_row_precharge_time = spd_data[SPD_TRP_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM min_row_precharge_time ", + info->min_row_precharge_time, 1); + + /* Minimum Active to Precharge Delay Time - tRAS ps */ + info->min_active_to_precharge = + (spd_data[SPD_TRAS_MSB_BYTE] & SPD_TRAS_MSB_MASK) << 8; + info->min_active_to_precharge |= spd_data[SPD_TRAS_LSB_BYTE]; + info->min_active_to_precharge *= time_base; + DEBUG_INIT_FULL_C("DRAM min_active_to_precharge ", + info->min_active_to_precharge, 1); + + /* Minimum Refresh Recovery Delay Time - tRFC ps */ + info->min_refresh_recovery = spd_data[SPD_TRFC_MSB_BYTE] << 8; + info->min_refresh_recovery |= spd_data[SPD_TRFC_LSB_BYTE]; + info->min_refresh_recovery *= time_base; + DEBUG_INIT_FULL_C("DRAM min_refresh_recovery ", + info->min_refresh_recovery, 1); + + /* + * For DDR3 and DDR2 includes Internal Write To Read Command Delay + * field. + */ + info->min_write_to_read_cmd_delay = spd_data[SPD_TWTR_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM min_write_to_read_cmd_delay ", + info->min_write_to_read_cmd_delay, 1); + + /* + * For DDR3 and DDR2 includes Internal Read To Precharge Command Delay + * field. + */ + info->min_read_to_prech_cmd_delay = spd_data[SPD_TRTP_BYTE] * time_base; + DEBUG_INIT_FULL_C("DRAM min_read_to_prech_cmd_delay ", + info->min_read_to_prech_cmd_delay, 1); + + /* + * For DDR3 includes Minimum Activate to Activate/Refresh Command + * field + */ + tmp = ((spd_data[SPD_TFAW_MSB_BYTE] & SPD_TFAW_MSB_MASK) << 8) | + spd_data[SPD_TFAW_LSB_BYTE]; + info->min_four_active_win_delay = tmp * time_base; + DEBUG_INIT_FULL_C("DRAM min_four_active_win_delay ", + info->min_four_active_win_delay, 1); + +#if defined(MV88F78X60) || defined(MV88F672X) + /* Registered DIMM support */ + if (info->type_info == SPD_MODULE_TYPE_RDIMM) { + for (rc = 2; rc < 6; rc += 2) { + tmp = spd_data[SPD_RDIMM_RC_BYTE + rc / 2]; + info->dimm_rc[rc] = + spd_data[SPD_RDIMM_RC_BYTE + rc / 2] & + SPD_RDIMM_RC_NIBBLE_MASK; + info->dimm_rc[rc + 1] = + (spd_data[SPD_RDIMM_RC_BYTE + rc / 2] >> 4) & + SPD_RDIMM_RC_NIBBLE_MASK; + } + + vendor_low = spd_data[66]; + vendor_high = spd_data[65]; + info->vendor = (vendor_high << 8) + vendor_low; + DEBUG_INIT_C("DDR3 Training Sequence - Registered DIMM vendor ID 0x", + info->vendor, 4); + + info->dimm_rc[0] = RDIMM_RC0; + info->dimm_rc[1] = RDIMM_RC1; + info->dimm_rc[2] = RDIMM_RC2; + info->dimm_rc[8] = RDIMM_RC8; + info->dimm_rc[9] = RDIMM_RC9; + info->dimm_rc[10] = RDIMM_RC10; + info->dimm_rc[11] = RDIMM_RC11; + } +#endif + + return MV_OK; +} + +/* + * Name: ddr3_spd_sum_init - Get the SPD parameters. + * Desc: Read the DIMM SPD parameters into given struct parameter. + * Args: dimmNum - DIMM number. See MV_BOARD_DIMM_NUM enumerator. + * info - DIMM information structure. + * Notes: + * Returns: MV_OK if function could read DIMM parameters, 0 otherwise. + */ +int ddr3_spd_sum_init(MV_DIMM_INFO *info, MV_DIMM_INFO *sum_info, u32 dimm) +{ + if (dimm == 0) { + memcpy(sum_info, info, sizeof(MV_DIMM_INFO)); + return MV_OK; + } + if (sum_info->type_info != info->type_info) { + DEBUG_INIT_S("DDR3 Dimm Compare - DIMM type does not match - FAIL\n"); + return MV_DDR3_TRAINING_ERR_DIMM_TYPE_NO_MATCH; + } + if (sum_info->err_check_type > info->err_check_type) { + sum_info->err_check_type = info->err_check_type; + DEBUG_INIT_S("DDR3 Dimm Compare - ECC does not match. ECC is disabled\n"); + } + if (sum_info->data_width != info->data_width) { + DEBUG_INIT_S("DDR3 Dimm Compare - DRAM bus width does not match - FAIL\n"); + return MV_DDR3_TRAINING_ERR_BUS_WIDTH_NOT_MATCH; + } + if (sum_info->min_cycle_time < info->min_cycle_time) + sum_info->min_cycle_time = info->min_cycle_time; + if (sum_info->refresh_interval < info->refresh_interval) + sum_info->refresh_interval = info->refresh_interval; + sum_info->supported_cas_latencies &= info->supported_cas_latencies; + if (sum_info->min_cas_lat_time < info->min_cas_lat_time) + sum_info->min_cas_lat_time = info->min_cas_lat_time; + if (sum_info->min_write_recovery_time < info->min_write_recovery_time) + sum_info->min_write_recovery_time = + info->min_write_recovery_time; + if (sum_info->min_ras_to_cas_delay < info->min_ras_to_cas_delay) + sum_info->min_ras_to_cas_delay = info->min_ras_to_cas_delay; + if (sum_info->min_row_active_to_row_active < + info->min_row_active_to_row_active) + sum_info->min_row_active_to_row_active = + info->min_row_active_to_row_active; + if (sum_info->min_row_precharge_time < info->min_row_precharge_time) + sum_info->min_row_precharge_time = info->min_row_precharge_time; + if (sum_info->min_active_to_precharge < info->min_active_to_precharge) + sum_info->min_active_to_precharge = + info->min_active_to_precharge; + if (sum_info->min_refresh_recovery < info->min_refresh_recovery) + sum_info->min_refresh_recovery = info->min_refresh_recovery; + if (sum_info->min_write_to_read_cmd_delay < + info->min_write_to_read_cmd_delay) + sum_info->min_write_to_read_cmd_delay = + info->min_write_to_read_cmd_delay; + if (sum_info->min_read_to_prech_cmd_delay < + info->min_read_to_prech_cmd_delay) + sum_info->min_read_to_prech_cmd_delay = + info->min_read_to_prech_cmd_delay; + if (sum_info->min_four_active_win_delay < + info->min_four_active_win_delay) + sum_info->min_four_active_win_delay = + info->min_four_active_win_delay; + if (sum_info->min_write_to_read_cmd_delay < + info->min_write_to_read_cmd_delay) + sum_info->min_write_to_read_cmd_delay = + info->min_write_to_read_cmd_delay; + + return MV_OK; +} + +/* + * Name: ddr3_dunit_setup + * Desc: Set the controller with the timing values. + * Args: ecc_ena - User ECC setup + * Notes: + * Returns: + */ +int ddr3_dunit_setup(u32 ecc_ena, u32 hclk_time, u32 *ddr_width) +{ + u32 reg, tmp, cwl; + u32 ddr_clk_time; + MV_DIMM_INFO dimm_info[2]; + MV_DIMM_INFO sum_info; + u32 stat_val, spd_val; + u32 cs, cl, cs_num, cs_ena; + u32 dimm_num = 0; + int status; + u32 rc; + __maybe_unused u32 dimm_cnt, cs_count, dimm; + __maybe_unused u32 dimm_addr[2] = { 0, 0 }; + +#if defined(DB_88F6710) || defined(DB_88F6710_PCAC) || defined(RD_88F6710) + /* Armada 370 - SPD is not available on DIMM */ + /* + * Set MC registers according to Static SPD values Values - + * must be set manually + */ + /* + * We only have one optional DIMM for the DB and we already got the + * SPD matching values + */ + status = ddr3_spd_init(&dimm_info[0], 0, *ddr_width); + if (MV_OK != status) + return status; + + dimm_num = 1; + /* Use JP8 to enable multiCS support for Armada 370 DB */ + if (!ddr3_check_config(EEPROM_MODULE_ADDR, CONFIG_MULTI_CS)) + dimm_info[0].num_of_module_ranks = 1; + status = ddr3_spd_sum_init(&dimm_info[0], &sum_info, 0); + if (MV_OK != status) + return status; +#else + /* Dynamic D-Unit Setup - Read SPD values */ +#ifdef DUNIT_SPD + dimm_num = ddr3_get_dimm_num(dimm_addr); + if (dimm_num == 0) { +#ifdef MIXED_DIMM_STATIC + DEBUG_INIT_S("DDR3 Training Sequence - No DIMMs detected\n"); +#else + DEBUG_INIT_S("DDR3 Training Sequence - FAILED (Wrong DIMMs Setup)\n"); + return MV_DDR3_TRAINING_ERR_BAD_DIMM_SETUP; +#endif + } else { + DEBUG_INIT_C("DDR3 Training Sequence - Number of DIMMs detected: ", + dimm_num, 1); + } + + for (dimm = 0; dimm < dimm_num; dimm++) { + status = ddr3_spd_init(&dimm_info[dimm], dimm_addr[dimm], + *ddr_width); + if (MV_OK != status) + return status; + status = ddr3_spd_sum_init(&dimm_info[dimm], &sum_info, dimm); + if (MV_OK != status) + return status; + } +#endif +#endif + + /* Set number of enabled CS */ + cs_num = 0; +#ifdef DUNIT_STATIC + cs_num = ddr3_get_cs_num_from_reg(); +#endif +#ifdef DUNIT_SPD + for (dimm = 0; dimm < dimm_num; dimm++) + cs_num += dimm_info[dimm].num_of_module_ranks; +#endif + if (cs_num > MAX_CS) { + DEBUG_INIT_C("DDR3 Training Sequence - Number of CS exceed limit - ", + MAX_CS, 1); + return MV_DDR3_TRAINING_ERR_MAX_CS_LIMIT; + } + + /* Set bitmap of enabled CS */ + cs_ena = 0; +#ifdef DUNIT_STATIC + cs_ena = ddr3_get_cs_ena_from_reg(); +#endif +#ifdef DUNIT_SPD + dimm = 0; + + if (dimm_num) { + for (cs = 0; cs < MAX_CS; cs += 2) { + if (((1 << cs) & DIMM_CS_BITMAP) && + !(cs_ena & (1 << cs))) { + if (dimm_info[dimm].num_of_module_ranks == 1) + cs_ena |= (0x1 << cs); + else if (dimm_info[dimm].num_of_module_ranks == 2) + cs_ena |= (0x3 << cs); + else if (dimm_info[dimm].num_of_module_ranks == 3) + cs_ena |= (0x7 << cs); + else if (dimm_info[dimm].num_of_module_ranks == 4) + cs_ena |= (0xF << cs); + + dimm++; + if (dimm == dimm_num) + break; + } + } + } +#endif + + if (cs_ena > 0xF) { + DEBUG_INIT_C("DDR3 Training Sequence - Number of enabled CS exceed limit - ", + MAX_CS, 1); + return MV_DDR3_TRAINING_ERR_MAX_ENA_CS_LIMIT; + } + + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - Number of CS = ", cs_num, 1); + + /* Check Ratio - '1' - 2:1, '0' - 1:1 */ + if (reg_read(REG_DDR_IO_ADDR) & (1 << REG_DDR_IO_CLK_RATIO_OFFS)) + ddr_clk_time = hclk_time / 2; + else + ddr_clk_time = hclk_time; + +#ifdef DUNIT_STATIC + /* Get target CL value from set register */ + reg = (reg_read(REG_DDR3_MR0_ADDR) >> 2); + reg = ((((reg >> 1) & 0xE)) | (reg & 0x1)) & 0xF; + + cl = ddr3_get_max_val(ddr3_div(sum_info.min_cas_lat_time, + ddr_clk_time, 0), + dimm_num, ddr3_valid_cl_to_cl(reg)); +#else + cl = ddr3_div(sum_info.min_cas_lat_time, ddr_clk_time, 0); +#endif + if (cl < 5) + cl = 5; + + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - Cas Latency = ", cl, 1); + + /* {0x00001400} - DDR SDRAM Configuration Register */ + reg = 0x73004000; + stat_val = ddr3_get_static_mc_value( + REG_SDRAM_CONFIG_ADDR, REG_SDRAM_CONFIG_ECC_OFFS, 0x1, 0, 0); + if (ecc_ena && ddr3_get_min_val(sum_info.err_check_type, dimm_num, + stat_val)) { + reg |= (1 << REG_SDRAM_CONFIG_ECC_OFFS); + reg |= (1 << REG_SDRAM_CONFIG_IERR_OFFS); + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - ECC Enabled\n"); + } else { + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - ECC Disabled\n"); + } + + if (sum_info.type_info == SPD_MODULE_TYPE_RDIMM) { +#ifdef DUNIT_STATIC + DEBUG_INIT_S("DDR3 Training Sequence - FAIL - Illegal R-DIMM setup\n"); + return MV_DDR3_TRAINING_ERR_BAD_R_DIMM_SETUP; +#endif + reg |= (1 << REG_SDRAM_CONFIG_REGDIMM_OFFS); + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - R-DIMM\n"); + } else { + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - U-DIMM\n"); + } + +#ifndef MV88F67XX +#ifdef DUNIT_STATIC + if (ddr3_get_min_val(sum_info.data_width, dimm_num, BUS_WIDTH) == 64) { +#else + if (*ddr_width == 64) { +#endif + reg |= (1 << REG_SDRAM_CONFIG_WIDTH_OFFS); + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - Datawidth - 64Bits\n"); + } else { + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - Datawidth - 32Bits\n"); + } +#else + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - Datawidth - 16Bits\n"); +#endif + +#if defined(MV88F672X) + if (*ddr_width == 32) { + reg |= (1 << REG_SDRAM_CONFIG_WIDTH_OFFS); + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - Datawidth - 32Bits\n"); + } else { + DEBUG_INIT_FULL_S("DDR3 - DUNIT-SET - Datawidth - 16Bits\n"); + } +#endif + stat_val = ddr3_get_static_mc_value(REG_SDRAM_CONFIG_ADDR, 0, + REG_SDRAM_CONFIG_RFRS_MASK, 0, 0); + tmp = ddr3_get_min_val(sum_info.refresh_interval / hclk_time, + dimm_num, stat_val); + +#ifdef TREFI_USER_EN + tmp = min(TREFI_USER / hclk_time, tmp); +#endif + + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - RefreshInterval/Hclk = ", tmp, 4); + reg |= tmp; + + if (cl != 3) + reg |= (1 << 16); /* If 2:1 need to set P2DWr */ + +#if defined(MV88F672X) + reg |= (1 << 27); /* PhyRfRST = Disable */ +#endif + reg_write(REG_SDRAM_CONFIG_ADDR, reg); + + /*{0x00001404} - DDR SDRAM Configuration Register */ + reg = 0x3630B800; +#ifdef DUNIT_SPD + reg |= (DRAM_2T << REG_DUNIT_CTRL_LOW_2T_OFFS); +#endif + reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + + /* {0x00001408} - DDR SDRAM Timing (Low) Register */ + reg = 0x0; + + /* tRAS - (0:3,20) */ + spd_val = ddr3_div(sum_info.min_active_to_precharge, + ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 0, 0xF, 16, 0x10); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tRAS-1 = ", tmp, 1); + reg |= (tmp & 0xF); + reg |= ((tmp & 0x10) << 16); /* to bit 20 */ + + /* tRCD - (4:7) */ + spd_val = ddr3_div(sum_info.min_ras_to_cas_delay, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 4, 0xF, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tRCD-1 = ", tmp, 1); + reg |= ((tmp & 0xF) << 4); + + /* tRP - (8:11) */ + spd_val = ddr3_div(sum_info.min_row_precharge_time, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 8, 0xF, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tRP-1 = ", tmp, 1); + reg |= ((tmp & 0xF) << 8); + + /* tWR - (12:15) */ + spd_val = ddr3_div(sum_info.min_write_recovery_time, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 12, 0xF, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tWR-1 = ", tmp, 1); + reg |= ((tmp & 0xF) << 12); + + /* tWTR - (16:19) */ + spd_val = ddr3_div(sum_info.min_write_to_read_cmd_delay, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 16, 0xF, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tWTR-1 = ", tmp, 1); + reg |= ((tmp & 0xF) << 16); + + /* tRRD - (24:27) */ + spd_val = ddr3_div(sum_info.min_row_active_to_row_active, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 24, 0xF, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tRRD-1 = ", tmp, 1); + reg |= ((tmp & 0xF) << 24); + + /* tRTP - (28:31) */ + spd_val = ddr3_div(sum_info.min_read_to_prech_cmd_delay, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_LOW_ADDR, + 28, 0xF, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tRTP-1 = ", tmp, 1); + reg |= ((tmp & 0xF) << 28); + + if (cl < 7) + reg = 0x33137663; + + reg_write(REG_SDRAM_TIMING_LOW_ADDR, reg); + + /*{0x0000140C} - DDR SDRAM Timing (High) Register */ + /* Add cycles to R2R W2W */ + reg = 0x39F8FF80; + + /* tRFC - (0:6,16:18) */ + spd_val = ddr3_div(sum_info.min_refresh_recovery, ddr_clk_time, 1); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_TIMING_HIGH_ADDR, + 0, 0x7F, 9, 0x380); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tRFC-1 = ", tmp, 1); + reg |= (tmp & 0x7F); + reg |= ((tmp & 0x380) << 9); /* to bit 16 */ + reg_write(REG_SDRAM_TIMING_HIGH_ADDR, reg); + + /*{0x00001410} - DDR SDRAM Address Control Register */ + reg = 0x000F0000; + + /* tFAW - (24:28) */ +#if (defined(MV88F78X60) || defined(MV88F672X)) + tmp = sum_info.min_four_active_win_delay; + spd_val = ddr3_div(tmp, ddr_clk_time, 0); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_ADDRESS_CTRL_ADDR, + 24, 0x3F, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tFAW = ", tmp, 1); + reg |= ((tmp & 0x3F) << 24); +#else + tmp = sum_info.min_four_active_win_delay - + 4 * (sum_info.min_row_active_to_row_active); + spd_val = ddr3_div(tmp, ddr_clk_time, 0); + stat_val = ddr3_get_static_mc_value(REG_SDRAM_ADDRESS_CTRL_ADDR, + 24, 0x1F, 0, 0); + tmp = ddr3_get_max_val(spd_val, dimm_num, stat_val); + DEBUG_INIT_FULL_C("DDR3 - DUNIT-SET - tFAW-4*tRRD = ", tmp, 1); + reg |= ((tmp & 0x1F) << 24); +#endif + + /* SDRAM device capacity */ +#ifdef DUNIT_STATIC + reg |= (reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR) & 0xF0FFFF); +#endif + +#ifdef DUNIT_SPD + cs_count = 0; + dimm_cnt = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs) & DIMM_CS_BITMAP) { + if (dimm_info[dimm_cnt].num_of_module_ranks == cs_count) { + dimm_cnt++; + cs_count = 0; + } + cs_count++; + if (dimm_info[dimm_cnt].sdram_capacity < 0x3) { + reg |= ((dimm_info[dimm_cnt].sdram_capacity + 1) << + (REG_SDRAM_ADDRESS_SIZE_OFFS + + (REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs))); + } else if (dimm_info[dimm_cnt].sdram_capacity > 0x3) { + reg |= ((dimm_info[dimm_cnt].sdram_capacity & 0x3) << + (REG_SDRAM_ADDRESS_SIZE_OFFS + + (REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs))); + reg |= ((dimm_info[dimm_cnt].sdram_capacity & 0x4) << + (REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS + cs)); + } + } + } + + /* SDRAM device structure */ + cs_count = 0; + dimm_cnt = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs) & DIMM_CS_BITMAP) { + if (dimm_info[dimm_cnt].num_of_module_ranks == cs_count) { + dimm_cnt++; + cs_count = 0; + } + cs_count++; + if (dimm_info[dimm_cnt].sdram_width == 16) + reg |= (1 << (REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs)); + } + } +#endif + reg_write(REG_SDRAM_ADDRESS_CTRL_ADDR, reg); + + /*{0x00001418} - DDR SDRAM Operation Register */ + reg = 0xF00; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) + reg &= ~(1 << (cs + REG_SDRAM_OPERATION_CS_OFFS)); + } + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /*{0x00001420} - DDR SDRAM Extended Mode Register */ + reg = 0x00000004; + reg_write(REG_SDRAM_EXT_MODE_ADDR, reg); + + /*{0x00001424} - DDR Controller Control (High) Register */ +#if (defined(MV88F78X60) || defined(MV88F672X)) + reg = 0x0000D3FF; +#else + reg = 0x0100D1FF; +#endif + reg_write(REG_DDR_CONT_HIGH_ADDR, reg); + + /*{0x0000142C} - DDR3 Timing Register */ + reg = 0x014C2F38; +#if defined(MV88F78X60) || defined(MV88F672X) + reg = 0x1FEC2F38; +#endif + reg_write(0x142C, reg); + + /*{0x00001484} - MBus CPU Block Register */ +#ifdef MV88F67XX + if (reg_read(REG_DDR_IO_ADDR) & (1 << REG_DDR_IO_CLK_RATIO_OFFS)) + reg_write(REG_MBUS_CPU_BLOCK_ADDR, 0x0000E907); +#endif + + /* + * In case of mixed dimm and on-board devices setup paramters will + * be taken statically + */ + /*{0x00001494} - DDR SDRAM ODT Control (Low) Register */ + reg = odt_config[cs_ena]; + reg_write(REG_SDRAM_ODT_CTRL_LOW_ADDR, reg); + + /*{0x00001498} - DDR SDRAM ODT Control (High) Register */ + reg = 0x00000000; + reg_write(REG_SDRAM_ODT_CTRL_HIGH_ADDR, reg); + + /*{0x0000149C} - DDR Dunit ODT Control Register */ + reg = cs_ena; + reg_write(REG_DUNIT_ODT_CTRL_ADDR, reg); + + /*{0x000014A0} - DDR Dunit ODT Control Register */ +#if defined(MV88F672X) + reg = 0x000006A9; + reg_write(REG_DRAM_FIFO_CTRL_ADDR, reg); +#endif + + /*{0x000014C0} - DRAM address and Control Driving Strenght */ + reg_write(REG_DRAM_ADDR_CTRL_DRIVE_STRENGTH_ADDR, 0x192435e9); + + /*{0x000014C4} - DRAM Data and DQS Driving Strenght */ + reg_write(REG_DRAM_DATA_DQS_DRIVE_STRENGTH_ADDR, 0xB2C35E9); + +#if (defined(MV88F78X60) || defined(MV88F672X)) + /*{0x000014CC} - DRAM Main Pads Calibration Machine Control Register */ + reg = reg_read(REG_DRAM_MAIN_PADS_CAL_ADDR); + reg_write(REG_DRAM_MAIN_PADS_CAL_ADDR, reg | (1 << 0)); +#endif + +#if defined(MV88F672X) + /* DRAM Main Pads Calibration Machine Control Register */ + /* 0x14CC[4:3] - CalUpdateControl = IntOnly */ + reg = reg_read(REG_DRAM_MAIN_PADS_CAL_ADDR); + reg &= 0xFFFFFFE7; + reg |= (1 << 3); + reg_write(REG_DRAM_MAIN_PADS_CAL_ADDR, reg); +#endif + +#ifdef DUNIT_SPD + cs_count = 0; + dimm_cnt = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if ((1 << cs) & DIMM_CS_BITMAP) { + if ((1 << cs) & cs_ena) { + if (dimm_info[dimm_cnt].num_of_module_ranks == + cs_count) { + dimm_cnt++; + cs_count = 0; + } + cs_count++; + reg_write(REG_CS_SIZE_SCRATCH_ADDR + (cs * 0x8), + dimm_info[dimm_cnt].rank_capacity - 1); + } else { + reg_write(REG_CS_SIZE_SCRATCH_ADDR + (cs * 0x8), 0); + } + } + } +#endif + + /*{0x00020184} - Close FastPath - 2G */ + reg_write(REG_FASTPATH_WIN_0_CTRL_ADDR, 0); + + /*{0x00001538} - Read Data Sample Delays Register */ + reg = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) + reg |= (cl << (REG_READ_DATA_SAMPLE_DELAYS_OFFS * cs)); + } + + reg_write(REG_READ_DATA_SAMPLE_DELAYS_ADDR, reg); + DEBUG_INIT_FULL_C("DDR3 - SPD-SET - Read Data Sample Delays = ", reg, + 1); + + /*{0x0000153C} - Read Data Ready Delay Register */ + reg = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + reg |= ((cl + 2) << + (REG_READ_DATA_READY_DELAYS_OFFS * cs)); + } + } + reg_write(REG_READ_DATA_READY_DELAYS_ADDR, reg); + DEBUG_INIT_FULL_C("DDR3 - SPD-SET - Read Data Ready Delays = ", reg, 1); + + /* Set MR registers */ + /* MR0 */ + reg = 0x00000600; + tmp = ddr3_cl_to_valid_cl(cl); + reg |= ((tmp & 0x1) << 2); + reg |= ((tmp & 0xE) << 3); /* to bit 4 */ + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + reg_write(REG_DDR3_MR0_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + /* MR1 */ + reg = 0x00000044 & REG_DDR3_MR1_ODT_MASK; + if (cs_num > 1) + reg = 0x00000046 & REG_DDR3_MR1_ODT_MASK; + + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + reg |= odt_static[cs_ena][cs]; + reg_write(REG_DDR3_MR1_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + /* MR2 */ + if (reg_read(REG_DDR_IO_ADDR) & (1 << REG_DDR_IO_CLK_RATIO_OFFS)) + tmp = hclk_time / 2; + else + tmp = hclk_time; + + if (tmp >= 2500) + cwl = 5; /* CWL = 5 */ + else if (tmp >= 1875 && tmp < 2500) + cwl = 6; /* CWL = 6 */ + else if (tmp >= 1500 && tmp < 1875) + cwl = 7; /* CWL = 7 */ + else if (tmp >= 1250 && tmp < 1500) + cwl = 8; /* CWL = 8 */ + else if (tmp >= 1070 && tmp < 1250) + cwl = 9; /* CWL = 9 */ + else if (tmp >= 935 && tmp < 1070) + cwl = 10; /* CWL = 10 */ + else if (tmp >= 833 && tmp < 935) + cwl = 11; /* CWL = 11 */ + else if (tmp >= 750 && tmp < 833) + cwl = 12; /* CWL = 12 */ + else { + cwl = 12; /* CWL = 12 */ + printf("Unsupported hclk %d MHz\n", tmp); + } + + reg = ((cwl - 5) << REG_DDR3_MR2_CWL_OFFS); + + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + reg &= REG_DDR3_MR2_ODT_MASK; + reg |= odt_dynamic[cs_ena][cs]; + reg_write(REG_DDR3_MR2_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + /* MR3 */ + reg = 0x00000000; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs)) { + reg_write(REG_DDR3_MR3_CS_ADDR + + (cs << MR_CS_ADDR_OFFS), reg); + } + } + + /* {0x00001428} - DDR ODT Timing (Low) Register */ + reg = 0; + reg |= (((cl - cwl + 1) & 0xF) << 4); + reg |= (((cl - cwl + 6) & 0xF) << 8); + reg |= ((((cl - cwl + 6) >> 4) & 0x1) << 21); + reg |= (((cl - 1) & 0xF) << 12); + reg |= (((cl + 6) & 0x1F) << 16); + reg_write(REG_ODT_TIME_LOW_ADDR, reg); + + /* {0x0000147C} - DDR ODT Timing (High) Register */ + reg = 0x00000071; + reg |= ((cwl - 1) << 8); + reg |= ((cwl + 5) << 12); + reg_write(REG_ODT_TIME_HIGH_ADDR, reg); + +#ifdef DUNIT_SPD + /*{0x000015E0} - DDR3 Rank Control Register */ + reg = cs_ena; + cs_count = 0; + dimm_cnt = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (cs_ena & (1 << cs) & DIMM_CS_BITMAP) { + if (dimm_info[dimm_cnt].num_of_module_ranks == cs_count) { + dimm_cnt++; + cs_count = 0; + } + cs_count++; + + if (dimm_info[dimm_cnt].addr_mirroring && + (cs == 1 || cs == 3) && + (sum_info.type_info != SPD_MODULE_TYPE_RDIMM)) { + reg |= (1 << (REG_DDR3_RANK_CTRL_MIRROR_OFFS + cs)); + DEBUG_INIT_FULL_C("DDR3 - SPD-SET - Setting Address Mirroring for CS = ", + cs, 1); + } + } + } + reg_write(REG_DDR3_RANK_CTRL_ADDR, reg); +#endif + + /*{0xD00015E4} - ZQDS Configuration Register */ + reg = 0x00203c18; + reg_write(REG_ZQC_CONF_ADDR, reg); + + /* {0x00015EC} - DDR PHY */ +#if defined(MV88F78X60) + reg = 0xF800AAA5; + if (mv_ctrl_rev_get() == MV_78XX0_B0_REV) + reg = 0xF800A225; +#else + reg = 0xDE000025; +#if defined(MV88F672X) + reg = 0xF800A225; +#endif +#endif + reg_write(REG_DRAM_PHY_CONFIG_ADDR, reg); + +#if (defined(MV88F78X60) || defined(MV88F672X)) + /* Registered DIMM support - supported only in AXP A0 devices */ + /* Currently supported for SPD detection only */ + /* + * Flow is according to the Registered DIMM chapter in the + * Functional Spec + */ + if (sum_info.type_info == SPD_MODULE_TYPE_RDIMM) { + DEBUG_INIT_S("DDR3 Training Sequence - Registered DIMM detected\n"); + + /* Set commands parity completion */ + reg = reg_read(REG_REGISTERED_DRAM_CTRL_ADDR); + reg &= ~REG_REGISTERED_DRAM_CTRL_PARITY_MASK; + reg |= 0x8; + reg_write(REG_REGISTERED_DRAM_CTRL_ADDR, reg); + + /* De-assert M_RESETn and assert M_CKE */ + reg_write(REG_SDRAM_INIT_CTRL_ADDR, + 1 << REG_SDRAM_INIT_CKE_ASSERT_OFFS); + do { + reg = (reg_read(REG_SDRAM_INIT_CTRL_ADDR)) & + (1 << REG_SDRAM_INIT_CKE_ASSERT_OFFS); + } while (reg); + + for (rc = 0; rc < SPD_RDIMM_RC_NUM; rc++) { + if (rc != 6 && rc != 7) { + /* Set CWA Command */ + reg = (REG_SDRAM_OPERATION_CMD_CWA & + ~(0xF << REG_SDRAM_OPERATION_CS_OFFS)); + reg |= ((dimm_info[0].dimm_rc[rc] & + REG_SDRAM_OPERATION_CWA_DATA_MASK) << + REG_SDRAM_OPERATION_CWA_DATA_OFFS); + reg |= rc << REG_SDRAM_OPERATION_CWA_RC_OFFS; + /* Configure - Set Delay - tSTAB/tMRD */ + if (rc == 2 || rc == 10) + reg |= (0x1 << REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS); + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + /* + * Poll the "cmd" field in the SDRAM OP + * register for 0x0 + */ + do { + reg = reg_read(REG_SDRAM_OPERATION_ADDR) & + (REG_SDRAM_OPERATION_CMD_MASK); + } while (reg); + } + } + } +#endif + + return MV_OK; +} + +/* + * Name: ddr3_div - this function divides integers + * Desc: + * Args: val - the value + * divider - the divider + * sub - substruction value + * Notes: + * Returns: required value + */ +u32 ddr3_div(u32 val, u32 divider, u32 sub) +{ + return val / divider + (val % divider > 0 ? 1 : 0) - sub; +} + +/* + * Name: ddr3_get_max_val + * Desc: + * Args: + * Notes: + * Returns: + */ +u32 ddr3_get_max_val(u32 spd_val, u32 dimm_num, u32 static_val) +{ +#ifdef DUNIT_STATIC + if (dimm_num > 0) { + if (spd_val >= static_val) + return spd_val; + else + return static_val; + } else { + return static_val; + } +#else + return spd_val; +#endif +} + +/* + * Name: ddr3_get_min_val + * Desc: + * Args: + * Notes: + * Returns: + */ +u32 ddr3_get_min_val(u32 spd_val, u32 dimm_num, u32 static_val) +{ +#ifdef DUNIT_STATIC + if (dimm_num > 0) { + if (spd_val <= static_val) + return spd_val; + else + return static_val; + } else + return static_val; +#else + return spd_val; +#endif +} +#endif diff --git a/roms/u-boot/drivers/ddr/marvell/axp/ddr3_write_leveling.c b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_write_leveling.c new file mode 100644 index 000000000..d4add4477 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/ddr3_write_leveling.c @@ -0,0 +1,1367 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <log.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> +#include <linux/delay.h> + +#include "ddr3_hw_training.h" + +/* + * Debug + */ +#define DEBUG_WL_C(s, d, l) \ + DEBUG_WL_S(s); DEBUG_WL_D(d, l); DEBUG_WL_S("\n") +#define DEBUG_WL_FULL_C(s, d, l) \ + DEBUG_WL_FULL_S(s); DEBUG_WL_FULL_D(d, l); DEBUG_WL_FULL_S("\n") + +#ifdef MV_DEBUG_WL +#define DEBUG_WL_S(s) puts(s) +#define DEBUG_WL_D(d, l) printf("%x", d) +#define DEBUG_RL_S(s) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_2, "%s", s) +#define DEBUG_RL_D(d, l) \ + debug_cond(ddr3_get_log_level() >= MV_LOG_LEVEL_2, "%x", d) +#else +#define DEBUG_WL_S(s) +#define DEBUG_WL_D(d, l) +#endif + +#ifdef MV_DEBUG_WL_FULL +#define DEBUG_WL_FULL_S(s) puts(s) +#define DEBUG_WL_FULL_D(d, l) printf("%x", d) +#else +#define DEBUG_WL_FULL_S(s) +#define DEBUG_WL_FULL_D(d, l) +#endif + +#define WL_SUP_EXPECTED_DATA 0x21 +#define WL_SUP_READ_DRAM_ENTRY 0x8 + +static int ddr3_write_leveling_single_cs(u32 cs, u32 freq, int ratio_2to1, + u32 *result, + MV_DRAM_INFO *dram_info); +static void ddr3_write_ctrl_pup_reg(int bc_acc, u32 pup, u32 reg_addr, + u32 data); + +extern u16 odt_static[ODT_OPT][MAX_CS]; +extern u16 odt_dynamic[ODT_OPT][MAX_CS]; +extern u32 wl_sup_pattern[LEN_WL_SUP_PATTERN]; + +/* + * Name: ddr3_write_leveling_hw + * Desc: Execute Write leveling phase by HW + * Args: freq - current sequence frequency + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_write_leveling_hw(u32 freq, MV_DRAM_INFO *dram_info) +{ + u32 reg, phase, delay, cs, pup; +#ifdef MV88F67XX + int dpde_flag = 0; +#endif + /* Debug message - Start Read leveling procedure */ + DEBUG_WL_S("DDR3 - Write Leveling - Starting HW WL procedure\n"); + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + if (reg & (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)) { + dpde_flag = 1; + reg_write(REG_DUNIT_CTRL_LOW_ADDR, + reg & ~(1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)); + } +#endif + + reg = 1 << REG_DRAM_TRAINING_WL_OFFS; + /* Config the retest number */ + reg |= (COUNT_HW_WL << REG_DRAM_TRAINING_RETEST_OFFS); + reg |= (dram_info->cs_ena << (REG_DRAM_TRAINING_CS_OFFS)); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + reg = reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) | + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_SHADOW_ADDR, reg); + + /* Wait */ + do { + reg = reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) & + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + } while (reg); /* Wait for '0' */ + + reg = reg_read(REG_DRAM_TRAINING_ADDR); + /* Check if Successful */ + if (reg & (1 << REG_DRAM_TRAINING_ERROR_OFFS)) { + /* + * Read results to arrays - Results are required for WL + * High freq Supplement and DQS Centralization + */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + for (pup = 0; + pup < dram_info->num_of_total_pups; + pup++) { + if (pup == dram_info->num_of_std_pups + && dram_info->ecc_ena) + pup = ECC_PUP; + reg = + ddr3_read_pup_reg(PUP_WL_MODE, cs, + pup); + phase = + (reg >> REG_PHY_PHASE_OFFS) & + PUP_PHASE_MASK; + delay = reg & PUP_DELAY_MASK; + dram_info->wl_val[cs][pup][P] = phase; + dram_info->wl_val[cs][pup][D] = delay; + dram_info->wl_val[cs][pup][S] = + WL_HI_FREQ_STATE - 1; + reg = + ddr3_read_pup_reg(PUP_WL_MODE + 0x1, + cs, pup); + dram_info->wl_val[cs][pup][DQS] = + (reg & 0x3F); + } + +#ifdef MV_DEBUG_WL + /* Debug message - Print res for cs[i]: cs,PUP,Phase,Delay */ + DEBUG_WL_S("DDR3 - Write Leveling - Write Leveling Cs - "); + DEBUG_WL_D((u32) cs, 1); + DEBUG_WL_S(" Results:\n"); + for (pup = 0; + pup < dram_info->num_of_total_pups; + pup++) { + if (pup == dram_info->num_of_std_pups + && dram_info->ecc_ena) + pup = ECC_PUP; + DEBUG_WL_S("DDR3 - Write Leveling - PUP: "); + DEBUG_WL_D((u32) pup, 1); + DEBUG_WL_S(", Phase: "); + DEBUG_WL_D((u32) + dram_info->wl_val[cs][pup] + [P], 1); + DEBUG_WL_S(", Delay: "); + DEBUG_WL_D((u32) + dram_info->wl_val[cs][pup] + [D], 2); + DEBUG_WL_S("\n"); + } +#endif + } + } + + /* Dynamic pad issue (BTS669) during WL */ +#ifdef MV88F67XX + if (dpde_flag) { + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR) | + (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS); + reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + } +#endif + + DEBUG_WL_S("DDR3 - Write Leveling - HW WL Ended Successfully\n"); + + return MV_OK; + } else { + DEBUG_WL_S("DDR3 - Write Leveling - HW WL Error\n"); + return MV_FAIL; + } +} + +/* + * Name: ddr3_wl_supplement + * Desc: Write Leveling Supplement + * Args: dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_wl_supplement(MV_DRAM_INFO *dram_info) +{ + u32 cs, cnt, pup_num, sum, phase, delay, max_pup_num, pup, sdram_offset; + u32 tmp_count, ecc, reg; + u32 ddr_width, tmp_pup, idx; + u32 sdram_pup_val, uj; + u32 one_clk_err = 0, align_err = 0, no_err = 0, err = 0, err_n = 0; + u32 sdram_data[LEN_WL_SUP_PATTERN] __aligned(32) = { 0 }; + + ddr_width = dram_info->ddr_width; + no_err = 0; + + DEBUG_WL_S("DDR3 - Write Leveling Hi-Freq Supplement - Starting\n"); + + switch (ddr_width) { + /* Data error from pos-adge to pos-adge */ + case 16: + one_clk_err = 4; + align_err = 4; + break; + case 32: + one_clk_err = 8; + align_err = 8; + break; + case 64: + one_clk_err = 0x10; + align_err = 0x10; + break; + default: + DEBUG_WL_S("Error - bus width!!!\n"); + return MV_FAIL; + } + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_WL_S("DDR3 - Write Leveling Hi-Freq Supplement - SW Override Enabled\n"); + reg = (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + tmp_count = 0; + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + sum = 0; + /* + * 2 iterations loop: 1)actual WL results 2) fix WL + * if needed + */ + for (cnt = 0; cnt < COUNT_WL_HI_FREQ; cnt++) { + DEBUG_WL_C("COUNT = ", cnt, 1); + for (ecc = 0; ecc < (dram_info->ecc_ena + 1); + ecc++) { + if (ecc) { + DEBUG_WL_S("ECC PUP:\n"); + } else { + DEBUG_WL_S("DATA PUP:\n"); + } + + max_pup_num = + dram_info->num_of_std_pups * (1 - + ecc) + + ecc; + /* ECC Support - Switch ECC Mux on ecc=1 */ + reg = + (reg_read(REG_DRAM_TRAINING_2_ADDR) + & ~(1 << + REG_DRAM_TRAINING_2_ECC_MUX_OFFS)); + reg |= + (dram_info->ecc_ena * + ecc << + REG_DRAM_TRAINING_2_ECC_MUX_OFFS); + reg_write(REG_DRAM_TRAINING_2_ADDR, + reg); + ddr3_reset_phy_read_fifo(); + + /* Write to memory */ + sdram_offset = + tmp_count * (SDRAM_CS_SIZE + 1) + + 0x200; + if (MV_OK != ddr3_dram_sram_burst((u32) + wl_sup_pattern, + sdram_offset, + LEN_WL_SUP_PATTERN)) + return MV_FAIL; + + /* Read from memory */ + if (MV_OK != + ddr3_dram_sram_burst(sdram_offset, + (u32) + sdram_data, + LEN_WL_SUP_PATTERN)) + return MV_FAIL; + + /* Print the buffer */ + for (uj = 0; uj < LEN_WL_SUP_PATTERN; + uj++) { + if ((uj % 4 == 0) && (uj != 0)) { + DEBUG_WL_S("\n"); + } + DEBUG_WL_D(sdram_data[uj], + 8); + DEBUG_WL_S(" "); + } + + /* Check pup which DQS/DATA is error */ + for (pup = 0; pup < max_pup_num; pup++) { + /* ECC support - bit 8 */ + pup_num = (ecc) ? ECC_PUP : pup; + if (pup < 4) { /* lower 32 bit */ + tmp_pup = pup; + idx = + WL_SUP_READ_DRAM_ENTRY; + } else { /* higher 32 bit */ + tmp_pup = pup - 4; + idx = + WL_SUP_READ_DRAM_ENTRY + + 1; + } + DEBUG_WL_S("\nCS: "); + DEBUG_WL_D((u32) cs, 1); + DEBUG_WL_S(" PUP: "); + DEBUG_WL_D((u32) pup_num, 1); + DEBUG_WL_S("\n"); + sdram_pup_val = + ((sdram_data[idx] >> + ((tmp_pup) * 8)) & 0xFF); + DEBUG_WL_C("Actual Data = ", + sdram_pup_val, 2); + DEBUG_WL_C("Expected Data = ", + (WL_SUP_EXPECTED_DATA + + pup), 2); + /* + * ALINGHMENT: calculate + * expected data vs actual data + */ + err = + (WL_SUP_EXPECTED_DATA + + pup) - sdram_pup_val; + /* + * CLOCK LONG: calculate + * expected data vs actual data + */ + err_n = + sdram_pup_val - + (WL_SUP_EXPECTED_DATA + + pup); + DEBUG_WL_C("err = ", err, 2); + DEBUG_WL_C("err_n = ", err_n, + 2); + if (err == no_err) { + /* PUP is correct - increment State */ + dram_info->wl_val[cs] + [pup_num] + [S] = 1; + } else if (err_n == one_clk_err) { + /* clock is longer than DQS */ + phase = + ((dram_info->wl_val + [cs] + [pup_num][P] + + WL_HI_FREQ_SHIFT) + % MAX_PHASE_2TO1); + dram_info->wl_val[cs] + [pup_num] + [P] = phase; + delay = + dram_info->wl_val + [cs][pup_num] + [D]; + DEBUG_WL_S("#### Clock is longer than DQS more than one clk cycle ####\n"); + ddr3_write_pup_reg + (PUP_WL_MODE, cs, + pup * (1 - ecc) + + ECC_PUP * ecc, + phase, delay); + } else if (err == align_err) { + /* clock is align to DQS */ + phase = + dram_info->wl_val + [cs][pup_num] + [P]; + delay = + dram_info->wl_val + [cs][pup_num] + [D]; + DEBUG_WL_S("#### Alignment PUPS problem ####\n"); + if ((phase == 0) + || ((phase == 1) + && (delay <= + 0x10))) { + DEBUG_WL_S("#### Warning - Possible Layout Violation (DQS is longer than CLK)####\n"); + } + + phase = 0x0; + delay = 0x0; + dram_info->wl_val[cs] + [pup_num] + [P] = phase; + dram_info->wl_val[cs] + [pup_num] + [D] = delay; + ddr3_write_pup_reg + (PUP_WL_MODE, cs, + pup * (1 - ecc) + + ECC_PUP * ecc, + phase, delay); + } + /* Stop condition for ECC phase */ + pup = (ecc) ? max_pup_num : pup; + } + + /* ECC Support - Disable ECC MUX */ + reg = + (reg_read(REG_DRAM_TRAINING_2_ADDR) + & ~(1 << + REG_DRAM_TRAINING_2_ECC_MUX_OFFS)); + reg_write(REG_DRAM_TRAINING_2_ADDR, + reg); + } + } + + for (pup = 0; pup < dram_info->num_of_std_pups; pup++) + sum += dram_info->wl_val[cs][pup][S]; + + if (dram_info->ecc_ena) + sum += dram_info->wl_val[cs][ECC_PUP][S]; + + /* Checks if any pup is not locked after the change */ + if (sum < (WL_HI_FREQ_STATE * (dram_info->num_of_total_pups))) { + DEBUG_WL_C("DDR3 - Write Leveling Hi-Freq Supplement - didn't work for Cs - ", + (u32) cs, 1); + return MV_FAIL; + } + tmp_count++; + } + } + + dram_info->wl_max_phase = 0; + dram_info->wl_min_phase = 10; + + /* + * Read results to arrays - Results are required for DQS Centralization + */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + for (pup = 0; pup < dram_info->num_of_total_pups; pup++) { + if (pup == dram_info->num_of_std_pups + && dram_info->ecc_ena) + pup = ECC_PUP; + reg = ddr3_read_pup_reg(PUP_WL_MODE, cs, pup); + phase = + (reg >> REG_PHY_PHASE_OFFS) & + PUP_PHASE_MASK; + if (phase > dram_info->wl_max_phase) + dram_info->wl_max_phase = phase; + if (phase < dram_info->wl_min_phase) + dram_info->wl_min_phase = phase; + } + } + } + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + reg = reg_read(REG_DRAM_TRAINING_1_ADDR) | + (1 << REG_DRAM_TRAINING_1_TRNBPOINT_OFFS); + reg_write(REG_DRAM_TRAINING_1_ADDR, reg); + + DEBUG_WL_S("DDR3 - Write Leveling Hi-Freq Supplement - Ended Successfully\n"); + + return MV_OK; +} + +/* + * Name: ddr3_write_leveling_hw_reg_dimm + * Desc: Execute Write leveling phase by HW + * Args: freq - current sequence frequency + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_write_leveling_hw_reg_dimm(u32 freq, MV_DRAM_INFO *dram_info) +{ + u32 reg, phase, delay, cs, pup, pup_num; + __maybe_unused int dpde_flag = 0; + + /* Debug message - Start Read leveling procedure */ + DEBUG_WL_S("DDR3 - Write Leveling - Starting HW WL procedure\n"); + + if (dram_info->num_cs > 2) { + DEBUG_WL_S("DDR3 - Write Leveling - HW WL Ended Successfully\n"); + return MV_NO_CHANGE; + } + + /* If target freq = 400 move clock start point */ + /* Write to control PUP to Control Deskew Regs */ + if (freq <= DDR_400) { + for (pup = 0; pup <= dram_info->num_of_total_pups; pup++) { + /* PUP_DELAY_MASK 0x1F */ + /* reg = 0x0C10001F + (uj << 16); */ + ddr3_write_ctrl_pup_reg(1, pup, CNTRL_PUP_DESKEW + pup, + 0x1F); + } + } + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + if (reg & (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)) { + dpde_flag = 1; + reg_write(REG_DUNIT_CTRL_LOW_ADDR, + reg & ~(1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)); + } +#endif + + reg = (1 << REG_DRAM_TRAINING_WL_OFFS); + /* Config the retest number */ + reg |= (COUNT_HW_WL << REG_DRAM_TRAINING_RETEST_OFFS); + reg |= (dram_info->cs_ena << (REG_DRAM_TRAINING_CS_OFFS)); + reg_write(REG_DRAM_TRAINING_ADDR, reg); /* 0x15B0 - Training Register */ + + reg = reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) | + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + reg_write(REG_DRAM_TRAINING_SHADOW_ADDR, reg); + + /* Wait */ + do { + reg = reg_read(REG_DRAM_TRAINING_SHADOW_ADDR) & + (1 << REG_DRAM_TRAINING_AUTO_OFFS); + } while (reg); /* Wait for '0' */ + + reg = reg_read(REG_DRAM_TRAINING_ADDR); + /* Check if Successful */ + if (reg & (1 << REG_DRAM_TRAINING_ERROR_OFFS)) { + /* + * Read results to arrays - Results are required for WL High + * freq Supplement and DQS Centralization + */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + for (pup = 0; + pup < dram_info->num_of_total_pups; + pup++) { + if (pup == dram_info->num_of_std_pups + && dram_info->ecc_ena) + pup = ECC_BIT; + reg = + ddr3_read_pup_reg(PUP_WL_MODE, cs, + pup); + phase = + (reg >> REG_PHY_PHASE_OFFS) & + PUP_PHASE_MASK; + delay = reg & PUP_DELAY_MASK; + dram_info->wl_val[cs][pup][P] = phase; + dram_info->wl_val[cs][pup][D] = delay; + if ((phase == 1) && (delay >= 0x1D)) { + /* + * Need to do it here for + * uncorrect WL values + */ + ddr3_write_pup_reg(PUP_WL_MODE, + cs, pup, 0, + 0); + dram_info->wl_val[cs][pup][P] = + 0; + dram_info->wl_val[cs][pup][D] = + 0; + } + dram_info->wl_val[cs][pup][S] = + WL_HI_FREQ_STATE - 1; + reg = + ddr3_read_pup_reg(PUP_WL_MODE + 0x1, + cs, pup); + dram_info->wl_val[cs][pup][DQS] = + (reg & 0x3F); + } +#ifdef MV_DEBUG_WL + /* + * Debug message - Print res for cs[i]: + * cs,PUP,Phase,Delay + */ + DEBUG_WL_S("DDR3 - Write Leveling - Write Leveling Cs - "); + DEBUG_WL_D((u32) cs, 1); + DEBUG_WL_S(" Results:\n"); + for (pup = 0; + pup < dram_info->num_of_total_pups; + pup++) { + DEBUG_WL_S + ("DDR3 - Write Leveling - PUP: "); + DEBUG_WL_D((u32) pup, 1); + DEBUG_WL_S(", Phase: "); + DEBUG_WL_D((u32) + dram_info->wl_val[cs][pup] + [P], 1); + DEBUG_WL_S(", Delay: "); + DEBUG_WL_D((u32) + dram_info->wl_val[cs][pup] + [D], 2); + DEBUG_WL_S("\n"); + } +#endif + } + } + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + if (dpde_flag) { + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR) | + (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS); + reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + } +#endif + DEBUG_WL_S("DDR3 - Write Leveling - HW WL Ended Successfully\n"); + + /* If target freq = 400 move clock back */ + /* Write to control PUP to Control Deskew Regs */ + if (freq <= DDR_400) { + for (pup = 0; pup <= dram_info->num_of_total_pups; + pup++) { + ddr3_write_ctrl_pup_reg(1, pup, + CNTRL_PUP_DESKEW + pup, 0); + } + } + + return MV_OK; + } else { + /* Configure Each PUP with locked leveling settings */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + for (pup = 0; + pup < dram_info->num_of_total_pups; + pup++) { + /* ECC support - bit 8 */ + pup_num = (pup == dram_info->num_of_std_pups) ? + ECC_BIT : pup; + ddr3_write_pup_reg(PUP_WL_MODE, cs, + pup_num, 0, 0); + } + } + } + + reg_write(REG_DRAM_TRAINING_ADDR, 0); + + /* If target freq = 400 move clock back */ + /* Write to control PUP to Control Deskew Regs */ + if (freq <= DDR_400) { + for (pup = 0; pup <= dram_info->num_of_total_pups; + pup++) { + ddr3_write_ctrl_pup_reg(1, pup, + CNTRL_PUP_DESKEW + pup, 0); + } + } + + DEBUG_WL_S("DDR3 - Write Leveling - HW WL Ended Successfully\n"); + return MV_NO_CHANGE; + } +} + +/* + * Name: ddr3_write_leveling_sw + * Desc: Execute Write leveling phase by SW + * Args: freq - current sequence frequency + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_write_leveling_sw(u32 freq, int ratio_2to1, MV_DRAM_INFO *dram_info) +{ + u32 reg, cs, cnt, pup, max_pup_num; + u32 res[MAX_CS]; + max_pup_num = dram_info->num_of_total_pups; + __maybe_unused int dpde_flag = 0; + + /* Debug message - Start Write leveling procedure */ + DEBUG_WL_S("DDR3 - Write Leveling - Starting SW WL procedure\n"); + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + if (reg & (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)) { + dpde_flag = 1; + reg_write(REG_DUNIT_CTRL_LOW_ADDR, + reg & ~(1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)); + } +#endif + + /* Set Output buffer-off to all CS and correct ODT values */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_ODT_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + reg |= (1 << REG_DDR3_MR1_OUTBUF_DIS_OFFS); + + /* 0x15D0 - DDR3 MR0 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + } + } + + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Qoff and RTT Values are set for all Cs\n"); + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_WL_FULL_S("DDR3 - Write Leveling - SW Override Enabled\n"); + + /* Enable PHY write leveling mode */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_WL_MODE_OFFS); + /* [2] = 0 - TrnWLMode - Enable */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + /* Reset WL results arry */ + memset(dram_info->wl_val, 0, sizeof(u32) * MAX_CS * MAX_PUP_NUM * 7); + + /* Loop for each cs */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + DEBUG_WL_FULL_C("DDR3 - Write Leveling - Starting working with Cs - ", + (u32) cs, 1); + /* Refresh X9 current cs */ + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Refresh X9\n"); + for (cnt = 0; cnt < COUNT_WL_RFRS; cnt++) { + reg = + REG_SDRAM_OPERATION_CMD_RFRS & ~(1 << + (REG_SDRAM_OPERATION_CS_OFFS + + cs)); + /* [3-0] = 0x2 - refresh, [11-8] - enable current cs */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); /* 0x1418 - SDRAM Operation Register */ + + do { + reg = + ((reg_read + (REG_SDRAM_OPERATION_ADDR)) & + REG_SDRAM_OPERATION_CMD_RFRS_DONE); + } while (reg); /* Wait for '0' */ + } + + /* Configure MR1 in Cs[CsNum] - write leveling on, output buffer on */ + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Configure MR1 for current Cs: WL-on,OB-on\n"); + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_OUTBUF_WL_MASK; + /* Set ODT Values */ + reg &= REG_DDR3_MR1_ODT_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + /* Enable WL MODE */ + reg |= (1 << REG_DDR3_MR1_WL_ENA_OFFS); + /* [7]=1, [12]=0 - Output Buffer and write leveling enabled */ + reg_write(REG_DDR3_MR1_ADDR, reg); /* 0x15D4 - DDR3 MR1 Register */ + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + + /* Write leveling cs[cs] */ + if (MV_OK != + ddr3_write_leveling_single_cs(cs, freq, ratio_2to1, + (u32 *)(res + cs), + dram_info)) { + DEBUG_WL_FULL_C("DDR3 - Write Leveling single Cs - FAILED - Cs - ", + (u32) cs, 1); + for (pup = 0; pup < max_pup_num; pup++) { + if (((res[cs] >> pup) & 0x1) == 0) { + DEBUG_WL_C("Failed Byte : ", + pup, 1); + } + } + return MV_FAIL; + } + + /* Set TrnWLDeUpd - After each CS is done */ + reg = reg_read(REG_TRAINING_WL_ADDR) | + (1 << REG_TRAINING_WL_CS_DONE_OFFS); + /* 0x16AC - Training Write leveling register */ + reg_write(REG_TRAINING_WL_ADDR, reg); + + /* + * Debug message - Finished Write leveling cs[cs] - + * each PUP Fail/Success + */ + DEBUG_WL_FULL_C("DDR3 - Write Leveling - Finished Cs - ", (u32) cs, + 1); + DEBUG_WL_FULL_C("DDR3 - Write Leveling - The Results: 1-PUP locked, 0-PUP failed -", + (u32) res[cs], 3); + + /* + * Configure MR1 in cs[cs] - write leveling off (0), + * output buffer off (1) + */ + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_OUTBUF_WL_MASK; + reg |= (1 << REG_DDR3_MR1_OUTBUF_DIS_OFFS); + /* No need to sort ODT since it is same CS */ + /* 0x15D4 - DDR3 MR1 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + } + } + + /* Disable WL Mode */ + /* [2]=1 - TrnWLMode - Disable */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg |= (1 << REG_DRAM_TRAINING_2_WL_MODE_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Set Output buffer-on to all CS and correct ODT values */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_ODT_MASK; + reg &= REG_DDR3_MR1_OUTBUF_WL_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + + /* 0x15D0 - DDR3 MR1 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + } + } + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + if (dpde_flag) { + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR) | + (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS); + reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + } +#endif + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Finished WL procedure for all Cs\n"); + + return MV_OK; +} + +#if !defined(MV88F672X) +/* + * Name: ddr3_write_leveling_sw + * Desc: Execute Write leveling phase by SW + * Args: freq - current sequence frequency + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +int ddr3_write_leveling_sw_reg_dimm(u32 freq, int ratio_2to1, + MV_DRAM_INFO *dram_info) +{ + u32 reg, cs, cnt, pup; + u32 res[MAX_CS]; + __maybe_unused int dpde_flag = 0; + + /* Debug message - Start Write leveling procedure */ + DEBUG_WL_S("DDR3 - Write Leveling - Starting SW WL procedure\n"); + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR); + if (reg & (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)) { + dpde_flag = 1; + reg_write(REG_DUNIT_CTRL_LOW_ADDR, + reg & ~(1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS)); + } +#endif + + /* If target freq = 400 move clock start point */ + /* Write to control PUP to Control Deskew Regs */ + if (freq <= DDR_400) { + for (pup = 0; pup <= dram_info->num_of_total_pups; pup++) { + /* PUP_DELAY_MASK 0x1F */ + /* reg = 0x0C10001F + (uj << 16); */ + ddr3_write_ctrl_pup_reg(1, pup, CNTRL_PUP_DESKEW + pup, + 0x1F); + } + } + + /* Set Output buffer-off to all CS and correct ODT values */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_ODT_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + reg |= (1 << REG_DDR3_MR1_OUTBUF_DIS_OFFS); + + /* 0x15D0 - DDR3 MR0 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + } + } + + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Qoff and RTT Values are set for all Cs\n"); + + /* Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) | + (1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* [0] = 1 - Enable SW override */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + DEBUG_WL_FULL_S("DDR3 - Write Leveling - SW Override Enabled\n"); + + /* Enable PHY write leveling mode */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR) & + ~(1 << REG_DRAM_TRAINING_2_WL_MODE_OFFS); + /* [2] = 0 - TrnWLMode - Enable */ + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Loop for each cs */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + DEBUG_WL_FULL_C("DDR3 - Write Leveling - Starting working with Cs - ", + (u32) cs, 1); + + /* Refresh X9 current cs */ + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Refresh X9\n"); + for (cnt = 0; cnt < COUNT_WL_RFRS; cnt++) { + reg = + REG_SDRAM_OPERATION_CMD_RFRS & ~(1 << + (REG_SDRAM_OPERATION_CS_OFFS + + cs)); + /* [3-0] = 0x2 - refresh, [11-8] - enable current cs */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); /* 0x1418 - SDRAM Operation Register */ + + do { + reg = + ((reg_read + (REG_SDRAM_OPERATION_ADDR)) & + REG_SDRAM_OPERATION_CMD_RFRS_DONE); + } while (reg); /* Wait for '0' */ + } + + /* + * Configure MR1 in Cs[CsNum] - write leveling on, + * output buffer on + */ + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Configure MR1 for current Cs: WL-on,OB-on\n"); + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_OUTBUF_WL_MASK; + /* Set ODT Values */ + reg &= REG_DDR3_MR1_ODT_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + /* Enable WL MODE */ + reg |= (1 << REG_DDR3_MR1_WL_ENA_OFFS); + /* + * [7]=1, [12]=0 - Output Buffer and write leveling + * enabled + */ + /* 0x15D4 - DDR3 MR1 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + + /* Write leveling cs[cs] */ + if (MV_OK != + ddr3_write_leveling_single_cs(cs, freq, ratio_2to1, + (u32 *)(res + cs), + dram_info)) { + DEBUG_WL_FULL_C("DDR3 - Write Leveling single Cs - FAILED - Cs - ", + (u32) cs, 1); + return MV_FAIL; + } + + /* Set TrnWLDeUpd - After each CS is done */ + reg = reg_read(REG_TRAINING_WL_ADDR) | + (1 << REG_TRAINING_WL_CS_DONE_OFFS); + /* 0x16AC - Training Write leveling register */ + reg_write(REG_TRAINING_WL_ADDR, reg); + + /* + * Debug message - Finished Write leveling cs[cs] - + * each PUP Fail/Success + */ + DEBUG_WL_FULL_C("DDR3 - Write Leveling - Finished Cs - ", (u32) cs, + 1); + DEBUG_WL_FULL_C("DDR3 - Write Leveling - The Results: 1-PUP locked, 0-PUP failed -", + (u32) res[cs], 3); + + /* Configure MR1 in cs[cs] - write leveling off (0), output buffer off (1) */ + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_OUTBUF_WL_MASK; + reg |= (1 << REG_DDR3_MR1_OUTBUF_DIS_OFFS); + /* No need to sort ODT since it is same CS */ + /* 0x15D4 - DDR3 MR1 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + } + } + + /* Disable WL Mode */ + /* [2]=1 - TrnWLMode - Disable */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg |= (1 << REG_DRAM_TRAINING_2_WL_MODE_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Disable SW override - Must be in a different stage */ + /* [0]=0 - Enable SW override */ + reg = reg_read(REG_DRAM_TRAINING_2_ADDR); + reg &= ~(1 << REG_DRAM_TRAINING_2_SW_OVRD_OFFS); + /* 0x15B8 - Training SW 2 Register */ + reg_write(REG_DRAM_TRAINING_2_ADDR, reg); + + /* Set Output buffer-on to all CS and correct ODT values */ + for (cs = 0; cs < MAX_CS; cs++) { + if (dram_info->cs_ena & (1 << cs)) { + reg = reg_read(REG_DDR3_MR1_ADDR) & + REG_DDR3_MR1_ODT_MASK; + reg &= REG_DDR3_MR1_OUTBUF_WL_MASK; + reg |= odt_static[dram_info->cs_ena][cs]; + + /* 0x15D0 - DDR3 MR1 Register */ + reg_write(REG_DDR3_MR1_ADDR, reg); + /* Issue MRS Command to current cs */ + reg = REG_SDRAM_OPERATION_CMD_MR1 & + ~(1 << (REG_SDRAM_OPERATION_CS_OFFS + cs)); + /* + * [3-0] = 0x4 - MR1 Command, [11-8] - + * enable current cs + */ + /* 0x1418 - SDRAM Operation Register */ + reg_write(REG_SDRAM_OPERATION_ADDR, reg); + + udelay(MRS_DELAY); + } + } + +#ifdef MV88F67XX + /* Dynamic pad issue (BTS669) during WL */ + if (dpde_flag) { + reg = reg_read(REG_DUNIT_CTRL_LOW_ADDR) | + (1 << REG_DUNIT_CTRL_LOW_DPDE_OFFS); + reg_write(REG_DUNIT_CTRL_LOW_ADDR, reg); + } +#endif + + /* If target freq = 400 move clock back */ + /* Write to control PUP to Control Deskew Regs */ + if (freq <= DDR_400) { + for (pup = 0; pup <= dram_info->num_of_total_pups; pup++) { + ddr3_write_ctrl_pup_reg(1, pup, CNTRL_PUP_DESKEW + pup, + 0); + } + } + + DEBUG_WL_FULL_S("DDR3 - Write Leveling - Finished WL procedure for all Cs\n"); + return MV_OK; +} +#endif + +/* + * Name: ddr3_write_leveling_single_cs + * Desc: Execute Write leveling for single Chip select + * Args: cs - current chip select + * freq - current sequence frequency + * result - res array + * dram_info - main struct + * Notes: + * Returns: MV_OK if success, MV_FAIL if fail. + */ +static int ddr3_write_leveling_single_cs(u32 cs, u32 freq, int ratio_2to1, + u32 *result, MV_DRAM_INFO *dram_info) +{ + u32 reg, pup_num, delay, phase, phaseMax, max_pup_num, pup, + max_pup_mask; + + max_pup_num = dram_info->num_of_total_pups; + *result = 0; + u32 flag[MAX_PUP_NUM] = { 0 }; + + DEBUG_WL_FULL_C("DDR3 - Write Leveling Single Cs - WL for Cs - ", + (u32) cs, 1); + + switch (max_pup_num) { + case 2: + max_pup_mask = 0x3; + break; + case 4: + max_pup_mask = 0xf; + DEBUG_WL_C("max_pup_mask = ", max_pup_mask, 3); + break; + case 5: + max_pup_mask = 0x1f; + DEBUG_WL_C("max_pup_mask = ", max_pup_mask, 3); + break; + case 8: + max_pup_mask = 0xff; + DEBUG_WL_C("max_pup_mask = ", max_pup_mask, 3); + break; + case 9: + max_pup_mask = 0x1ff; + DEBUG_WL_C("max_pup_mask = ", max_pup_mask, 3); + break; + default: + DEBUG_WL_C("ddr3_write_leveling_single_cs wrong max_pup_num = ", + max_pup_num, 3); + return MV_FAIL; + } + + /* CS ODT Override */ + reg = reg_read(REG_SDRAM_ODT_CTRL_HIGH_ADDR) & + REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK; + reg |= (REG_SDRAM_ODT_CTRL_HIGH_OVRD_ENA << (2 * cs)); + /* Set 0x3 - Enable ODT on the curent cs and disable on other cs */ + /* 0x1498 - SDRAM ODT Control high */ + reg_write(REG_SDRAM_ODT_CTRL_HIGH_ADDR, reg); + + DEBUG_WL_FULL_S("DDR3 - Write Leveling Single Cs - ODT Asserted for current Cs\n"); + + /* tWLMRD Delay */ + /* Delay of minimum 40 Dram clock cycles - 20 Tclk cycles */ + udelay(1); + + /* [1:0] - current cs number */ + reg = (reg_read(REG_TRAINING_WL_ADDR) & REG_TRAINING_WL_CS_MASK) | cs; + reg |= (1 << REG_TRAINING_WL_UPD_OFFS); /* [2] - trnWLCsUpd */ + /* 0x16AC - Training Write leveling register */ + reg_write(REG_TRAINING_WL_ADDR, reg); + + /* Broadcast to all PUPs: Reset DQS phase, reset leveling delay */ + ddr3_write_pup_reg(PUP_WL_MODE, cs, PUP_BC, 0, 0); + + /* Seek Edge */ + DEBUG_WL_FULL_S("DDR3 - Write Leveling Single Cs - Seek Edge - Current Cs\n"); + + /* Drive DQS high for one cycle - All data PUPs */ + DEBUG_WL_FULL_S("DDR3 - Write Leveling Single Cs - Seek Edge - Driving DQS high for one cycle\n"); + if (!ratio_2to1) { + reg = (reg_read(REG_TRAINING_WL_ADDR) & + REG_TRAINING_WL_RATIO_MASK) | REG_TRAINING_WL_1TO1; + } else { + reg = (reg_read(REG_TRAINING_WL_ADDR) & + REG_TRAINING_WL_RATIO_MASK) | REG_TRAINING_WL_2TO1; + } + /* 0x16AC - Training Write leveling register */ + reg_write(REG_TRAINING_WL_ADDR, reg); + + /* Wait tWLdelay */ + do { + /* [29] - trnWLDelayExp */ + reg = (reg_read(REG_TRAINING_WL_ADDR)) & + REG_TRAINING_WL_DELAYEXP_MASK; + } while (reg == 0x0); /* Wait for '1' */ + + /* Read WL res */ + reg = (reg_read(REG_TRAINING_WL_ADDR) >> REG_TRAINING_WL_RESULTS_OFFS) & + REG_TRAINING_WL_RESULTS_MASK; + /* [28:20] - TrnWLResult */ + + if (!ratio_2to1) /* Different phase options for 2:1 or 1:1 modes */ + phaseMax = MAX_PHASE_1TO1; + else + phaseMax = MAX_PHASE_2TO1; + + DEBUG_WL_FULL_S("DDR3 - Write Leveling Single Cs - Seek Edge - Shift DQS + Octet Leveling\n"); + + /* Shift DQS + Octet leveling */ + for (phase = 0; phase < phaseMax; phase++) { + for (delay = 0; delay < MAX_DELAY; delay++) { + /* Broadcast to all PUPs: DQS phase,leveling delay */ + ddr3_write_pup_reg(PUP_WL_MODE, cs, PUP_BC, phase, + delay); + + udelay(1); /* Delay of 3 Tclk cycles */ + + DEBUG_WL_FULL_S("DDR3 - Write Leveling Single Cs - Seek Edge: Phase = "); + DEBUG_WL_FULL_D((u32) phase, 1); + DEBUG_WL_FULL_S(", Delay = "); + DEBUG_WL_FULL_D((u32) delay, 1); + DEBUG_WL_FULL_S("\n"); + + /* Drive DQS high for one cycle - All data PUPs */ + if (!ratio_2to1) { + reg = (reg_read(REG_TRAINING_WL_ADDR) & + REG_TRAINING_WL_RATIO_MASK) | + REG_TRAINING_WL_1TO1; + } else { + reg = (reg_read(REG_TRAINING_WL_ADDR) & + REG_TRAINING_WL_RATIO_MASK) | + REG_TRAINING_WL_2TO1; + } + reg_write(REG_TRAINING_WL_ADDR, reg); /* 0x16AC */ + + /* Wait tWLdelay */ + do { + reg = (reg_read(REG_TRAINING_WL_ADDR)) & + REG_TRAINING_WL_DELAYEXP_MASK; + } while (reg == 0x0); /* [29] Wait for '1' */ + + /* Read WL res */ + reg = reg_read(REG_TRAINING_WL_ADDR); + reg = (reg >> REG_TRAINING_WL_RESULTS_OFFS) & + REG_TRAINING_WL_RESULTS_MASK; /* [28:20] */ + + DEBUG_WL_FULL_C("DDR3 - Write Leveling Single Cs - Seek Edge: Results = ", + (u32) reg, 3); + + /* Update State machine */ + for (pup = 0; pup < (max_pup_num); pup++) { + /* ECC support - bit 8 */ + pup_num = (pup == dram_info->num_of_std_pups) ? + ECC_BIT : pup; + if (dram_info->wl_val[cs][pup][S] == 0) { + /* Update phase to PUP */ + dram_info->wl_val[cs][pup][P] = phase; + /* Update delay to PUP */ + dram_info->wl_val[cs][pup][D] = delay; + } + + if (((reg >> pup_num) & 0x1) == 0) + flag[pup_num] = 1; + + if (((reg >> pup_num) & 0x1) + && (flag[pup_num] == 1) + && (dram_info->wl_val[cs][pup][S] == 0)) { + /* + * If the PUP is locked now and in last + * counter states + */ + /* Go to next state */ + dram_info->wl_val[cs][pup][S] = 1; + /* Set res */ + *result = *result | (1 << pup_num); + } + } + + /* If all locked - Break the loops - Finished */ + if (*result == max_pup_mask) { + phase = phaseMax; + delay = MAX_DELAY; + DEBUG_WL_S("DDR3 - Write Leveling Single Cs - Seek Edge: All Locked\n"); + } + } + } + + /* Debug message - Print res for cs[i]: cs,PUP,Phase,Delay */ + DEBUG_WL_C("DDR3 - Write Leveling - Results for CS - ", (u32) cs, 1); + for (pup = 0; pup < (max_pup_num); pup++) { + DEBUG_WL_S("DDR3 - Write Leveling - PUP: "); + DEBUG_WL_D((u32) pup, 1); + DEBUG_WL_S(", Phase: "); + DEBUG_WL_D((u32) dram_info->wl_val[cs][pup][P], 1); + DEBUG_WL_S(", Delay: "); + DEBUG_WL_D((u32) dram_info->wl_val[cs][pup][D], 2); + DEBUG_WL_S("\n"); + } + + /* Check if some not locked and return error */ + if (*result != max_pup_mask) { + DEBUG_WL_S("DDR3 - Write Leveling - ERROR - not all PUPS were locked\n"); + return MV_FAIL; + } + + /* Configure Each PUP with locked leveling settings */ + for (pup = 0; pup < (max_pup_num); pup++) { + /* ECC support - bit 8 */ + pup_num = (pup == dram_info->num_of_std_pups) ? ECC_BIT : pup; + phase = dram_info->wl_val[cs][pup][P]; + delay = dram_info->wl_val[cs][pup][D]; + ddr3_write_pup_reg(PUP_WL_MODE, cs, pup_num, phase, delay); + } + + /* CS ODT Override */ + reg = reg_read(REG_SDRAM_ODT_CTRL_HIGH_ADDR) & + REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK; + /* 0x1498 - SDRAM ODT Control high */ + reg_write(REG_SDRAM_ODT_CTRL_HIGH_ADDR, reg); + + return MV_OK; +} + +/* + * Perform DDR3 Control PUP Indirect Write + */ +static void ddr3_write_ctrl_pup_reg(int bc_acc, u32 pup, u32 reg_addr, u32 data) +{ + u32 reg = 0; + + /* Store value for write */ + reg = (data & 0xFFFF); + + /* Set bit 26 for control PHY access */ + reg |= (1 << REG_PHY_CNTRL_OFFS); + + /* Configure BC or UC access to PHYs */ + if (bc_acc == 1) + reg |= (1 << REG_PHY_BC_OFFS); + else + reg |= (pup << REG_PHY_PUP_OFFS); + + /* Set PHY register address to write to */ + reg |= (reg_addr << REG_PHY_CS_OFFS); + + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + reg |= REG_PHY_REGISTRY_FILE_ACCESS_OP_WR; + reg_write(REG_PHY_REGISTRY_FILE_ACCESS_ADDR, reg); /* 0x16A0 */ + + do { + reg = (reg_read(REG_PHY_REGISTRY_FILE_ACCESS_ADDR)) & + REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE; + } while (reg); /* Wait for '0' to mark the end of the transaction */ +} diff --git a/roms/u-boot/drivers/ddr/marvell/axp/xor.c b/roms/u-boot/drivers/ddr/marvell/axp/xor.c new file mode 100644 index 000000000..76aea9668 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/xor.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#include <common.h> +#include <i2c.h> +#include <log.h> +#include <spl.h> +#include <asm/io.h> +#include <asm/arch/cpu.h> +#include <asm/arch/soc.h> + +#include "xor.h" +#include "xor_regs.h" + +static u32 xor_regs_ctrl_backup; +static u32 xor_regs_base_backup[MAX_CS]; +static u32 xor_regs_mask_backup[MAX_CS]; + +static int mv_xor_cmd_set(u32 chan, int command); +static int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl); + +void mv_sys_xor_init(MV_DRAM_INFO *dram_info) +{ + u32 reg, ui, base, cs_count; + + xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0)); + for (ui = 0; ui < MAX_CS; ui++) + xor_regs_base_backup[ui] = reg_read(XOR_BASE_ADDR_REG(0, ui)); + for (ui = 0; ui < MAX_CS; ui++) + xor_regs_mask_backup[ui] = reg_read(XOR_SIZE_MASK_REG(0, ui)); + + reg = 0; + for (ui = 0; ui < (dram_info->num_cs + 1); ui++) { + /* Enable Window x for each CS */ + reg |= (0x1 << (ui)); + /* Enable Window x for each CS */ + reg |= (0x3 << ((ui * 2) + 16)); + } + + reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg); + + /* Last window - Base - 0x40000000, Attribute 0x1E - SRAM */ + base = (SRAM_BASE & 0xFFFF0000) | 0x1E00; + reg_write(XOR_BASE_ADDR_REG(0, dram_info->num_cs), base); + /* Last window - Size - 64 MB */ + reg_write(XOR_SIZE_MASK_REG(0, dram_info->num_cs), 0x03FF0000); + + cs_count = 0; + for (ui = 0; ui < MAX_CS; ui++) { + if (dram_info->cs_ena & (1 << ui)) { + /* + * Window x - Base - 0x00000000, Attribute 0x0E - DRAM + */ + base = 0; + switch (ui) { + case 0: + base |= 0xE00; + break; + case 1: + base |= 0xD00; + break; + case 2: + base |= 0xB00; + break; + case 3: + base |= 0x700; + break; + } + + reg_write(XOR_BASE_ADDR_REG(0, cs_count), base); + + /* Window x - Size - 256 MB */ + reg_write(XOR_SIZE_MASK_REG(0, cs_count), 0x0FFF0000); + cs_count++; + } + } + + mv_xor_hal_init(1); + + return; +} + +void mv_sys_xor_finish(void) +{ + u32 ui; + + reg_write(XOR_WINDOW_CTRL_REG(0, 0), xor_regs_ctrl_backup); + for (ui = 0; ui < MAX_CS; ui++) + reg_write(XOR_BASE_ADDR_REG(0, ui), xor_regs_base_backup[ui]); + for (ui = 0; ui < MAX_CS; ui++) + reg_write(XOR_SIZE_MASK_REG(0, ui), xor_regs_mask_backup[ui]); + + reg_write(XOR_ADDR_OVRD_REG(0, 0), 0); +} + +/* + * mv_xor_hal_init - Initialize XOR engine + * + * DESCRIPTION: + * This function initialize XOR unit. + * INPUT: + * None. + * + * OUTPUT: + * None. + * + * RETURN: + * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. + */ +void mv_xor_hal_init(u32 chan_num) +{ + u32 i; + + /* Abort any XOR activity & set default configuration */ + for (i = 0; i < chan_num; i++) { + mv_xor_cmd_set(i, MV_STOP); + mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) | + (4 << XEXCR_DST_BURST_LIMIT_OFFS) | + (4 << XEXCR_SRC_BURST_LIMIT_OFFS)); + } +} + +/* + * mv_xor_ctrl_set - Set XOR channel control registers + * + * DESCRIPTION: + * + * INPUT: + * + * OUTPUT: + * None. + * + * RETURN: + * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. + * NOTE: + * This function does not modify the OperationMode field of control register. + * + */ +static int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl) +{ + u32 val; + + /* Update the XOR Engine [0..1] Configuration Registers (XExCR) */ + val = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) + & XEXCR_OPERATION_MODE_MASK; + xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK; + xor_ctrl |= val; + reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl); + + return MV_OK; +} + +int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size, + u32 init_val_high, u32 init_val_low) +{ + u32 tmp; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) + return MV_BAD_PARAM; + + if (MV_ACTIVE == mv_xor_state_get(chan)) + return MV_BUSY; + + if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) || + (block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE)) + return MV_BAD_PARAM; + + /* Set the operation mode to Memory Init */ + tmp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); + tmp &= ~XEXCR_OPERATION_MODE_MASK; + tmp |= XEXCR_OPERATION_MODE_MEM_INIT; + reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), tmp); + + /* + * Update the start_ptr field in XOR Engine [0..1] Destination Pointer + * Register (XExDPR0) + */ + reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr); + + /* + * Update the BlockSize field in the XOR Engine[0..1] Block Size + * Registers (XExBSR) + */ + reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + block_size); + + /* + * Update the field InitValL in the XOR Engine Initial Value Register + * Low (XEIVRL) + */ + reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low); + + /* + * Update the field InitValH in the XOR Engine Initial Value Register + * High (XEIVRH) + */ + reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high); + + /* Start transfer */ + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTART_MASK); + + return MV_OK; +} + +/* + * mv_xor_transfer - Transfer data from source to destination on one of + * three modes (XOR,CRC32,DMA) + * + * DESCRIPTION: + * This function initiates XOR channel, according to function parameters, + * in order to perform XOR or CRC32 or DMA transaction. + * To gain maximum performance the user is asked to keep the following + * restrictions: + * 1) Selected engine is available (not busy). + * 1) This module does not take into consideration CPU MMU issues. + * In order for the XOR engine to access the appropreate source + * and destination, address parameters must be given in system + * physical mode. + * 2) This API does not take care of cache coherency issues. The source, + * destination and in case of chain the descriptor list are assumed + * to be cache coherent. + * 4) Parameters validity. For example, does size parameter exceeds + * maximum byte count of descriptor mode (16M or 64K). + * + * INPUT: + * chan - XOR channel number. See MV_XOR_CHANNEL enumerator. + * xor_type - One of three: XOR, CRC32 and DMA operations. + * xor_chain_ptr - address of chain pointer + * + * OUTPUT: + * None. + * + * RETURS: + * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise. + * + */ +int mv_xor_transfer(u32 chan, int xor_type, u32 xor_chain_ptr) +{ + u32 tmp; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) { + debug("%s: ERR. Invalid chan num %d\n", __func__, chan); + return MV_BAD_PARAM; + } + + if (MV_ACTIVE == mv_xor_state_get(chan)) { + debug("%s: ERR. Channel is already active\n", __func__); + return MV_BUSY; + } + + if (0x0 == xor_chain_ptr) { + debug("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__); + return MV_BAD_PARAM; + } + + /* Read configuration register and mask the operation mode field */ + tmp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))); + tmp &= ~XEXCR_OPERATION_MODE_MASK; + + switch (xor_type) { + case MV_XOR: + if (0 != (xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK)) { + debug("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n", + __func__); + return MV_BAD_PARAM; + } + + /* Set the operation mode to XOR */ + tmp |= XEXCR_OPERATION_MODE_XOR; + break; + + case MV_DMA: + if (0 != (xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK)) { + debug("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", + __func__); + return MV_BAD_PARAM; + } + + /* Set the operation mode to DMA */ + tmp |= XEXCR_OPERATION_MODE_DMA; + break; + + case MV_CRC32: + if (0 != (xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK)) { + debug("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n", + __func__); + return MV_BAD_PARAM; + } + + /* Set the operation mode to CRC32 */ + tmp |= XEXCR_OPERATION_MODE_CRC; + break; + + default: + return MV_BAD_PARAM; + } + + /* Write the operation mode to the register */ + reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), tmp); + + /* + * Update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor + * Pointer Register (XExNDPR) + */ + reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + xor_chain_ptr); + + /* Start transfer */ + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTART_MASK); + + return MV_OK; +} + +/* + * mv_xor_state_get - Get XOR channel state. + * + * DESCRIPTION: + * XOR channel activity state can be active, idle, paused. + * This function retrunes the channel activity state. + * + * INPUT: + * chan - the channel number + * + * OUTPUT: + * None. + * + * RETURN: + * XOR_CHANNEL_IDLE - If the engine is idle. + * XOR_CHANNEL_ACTIVE - If the engine is busy. + * XOR_CHANNEL_PAUSED - If the engine is paused. + * MV_UNDEFINED_STATE - If the engine state is undefind or there is no + * such engine + * + */ +int mv_xor_state_get(u32 chan) +{ + u32 state; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) { + debug("%s: ERR. Invalid chan num %d\n", __func__, chan); + return MV_UNDEFINED_STATE; + } + + /* Read the current state */ + state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan))); + state &= XEXACTR_XESTATUS_MASK; + + /* Return the state */ + switch (state) { + case XEXACTR_XESTATUS_IDLE: + return MV_IDLE; + case XEXACTR_XESTATUS_ACTIVE: + return MV_ACTIVE; + case XEXACTR_XESTATUS_PAUSED: + return MV_PAUSED; + } + + return MV_UNDEFINED_STATE; +} + +/* + * mv_xor_cmd_set - Set command of XOR channel + * + * DESCRIPTION: + * XOR channel can be started, idle, paused and restarted. + * Paused can be set only if channel is active. + * Start can be set only if channel is idle or paused. + * Restart can be set only if channel is paused. + * Stop can be set only if channel is active. + * + * INPUT: + * chan - The channel number + * command - The command type (start, stop, restart, pause) + * + * OUTPUT: + * None. + * + * RETURN: + * MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on + * undefind XOR engine mode + * + */ +static int mv_xor_cmd_set(u32 chan, int command) +{ + int state; + + /* Parameter checking */ + if (chan >= MV_XOR_MAX_CHAN) { + debug("%s: ERR. Invalid chan num %d\n", __func__, chan); + return MV_BAD_PARAM; + } + + /* Get the current state */ + state = mv_xor_state_get(chan); + + /* Command is start and current state is idle */ + if ((command == MV_START) && (state == MV_IDLE)) { + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTART_MASK); + return MV_OK; + } + /* Command is stop and current state is active */ + else if ((command == MV_STOP) && (state == MV_ACTIVE)) { + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XESTOP_MASK); + return MV_OK; + } + /* Command is paused and current state is active */ + else if ((command == MV_PAUSED) && (state == MV_ACTIVE)) { + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XEPAUSE_MASK); + return MV_OK; + } + /* Command is restart and current state is paused */ + else if ((command == MV_RESTART) && (state == MV_PAUSED)) { + reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)), + XEXACTR_XERESTART_MASK); + return MV_OK; + } + /* Command is stop and current state is active */ + else if ((command == MV_STOP) && (state == MV_IDLE)) + return MV_OK; + + /* Illegal command */ + debug("%s: ERR. Illegal command\n", __func__); + + return MV_BAD_PARAM; +} diff --git a/roms/u-boot/drivers/ddr/marvell/axp/xor.h b/roms/u-boot/drivers/ddr/marvell/axp/xor.h new file mode 100644 index 000000000..a7c6ae840 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/xor.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __XOR_H +#define __XOR_H + +#include "ddr3_hw_training.h" + +#define MV_XOR_MAX_CHAN 4 /* total channels for all units together */ + +/* + * This enumerator describes the type of functionality the XOR channel + * can have while using the same data structures. + */ +enum xor_type { + MV_XOR, /* XOR channel functions as XOR accelerator */ + MV_DMA, /* XOR channel functions as IDMA channel */ + MV_CRC32 /* XOR channel functions as CRC 32 calculator */ +}; + +/* + * This enumerator describes the set of commands that can be applied on + * an engine (e.g. IDMA, XOR). Appling a comman depends on the current + * status (see MV_STATE enumerator) + * Start can be applied only when status is IDLE + * Stop can be applied only when status is IDLE, ACTIVE or PAUSED + * Pause can be applied only when status is ACTIVE + * Restart can be applied only when status is PAUSED + */ +enum mv_command { + MV_START, /* Start */ + MV_STOP, /* Stop */ + MV_PAUSE, /* Pause */ + MV_RESTART /* Restart */ +}; + +/* + * This enumerator describes the set of state conditions. + * Moving from one state to other is stricted. + */ +enum mv_state { + MV_IDLE, + MV_ACTIVE, + MV_PAUSED, + MV_UNDEFINED_STATE +}; + +/* XOR descriptor structure for CRC and DMA descriptor */ +struct crc_dma_desc { + u32 status; /* Successful descriptor execution indication */ + u32 crc32_result; /* Result of CRC-32 calculation */ + u32 desc_cmd; /* type of operation to be carried out on the data */ + u32 next_desc_ptr; /* Next descriptor address pointer */ + u32 byte_cnt; /* Size of source block part represented by the descriptor */ + u32 dst_addr; /* Destination Block address pointer (not used in CRC32 */ + u32 src_addr0; /* Mode: Source Block address pointer */ + u32 src_addr1; /* Mode: Source Block address pointer */ +} __packed; + +void mv_xor_hal_init(u32 chan_num); +int mv_xor_state_get(u32 chan); +void mv_sys_xor_init(MV_DRAM_INFO *dram_info); +void mv_sys_xor_finish(void); +int mv_xor_transfer(u32 chan, int xor_type, u32 xor_chain_ptr); +int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size, + u32 init_val_high, u32 init_val_low); + +#endif /* __XOR_H */ diff --git a/roms/u-boot/drivers/ddr/marvell/axp/xor_regs.h b/roms/u-boot/drivers/ddr/marvell/axp/xor_regs.h new file mode 100644 index 000000000..d779e5641 --- /dev/null +++ b/roms/u-boot/drivers/ddr/marvell/axp/xor_regs.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) Marvell International Ltd. and its affiliates + */ + +#ifndef __XOR_REGS_H +#define __XOR_REGS_H + +/* + * For controllers that have two XOR units, then chans 2 & 3 will be mapped + * to channels 0 & 1 of unit 1 + */ +#define XOR_UNIT(chan) ((chan) >> 1) +#define XOR_CHAN(chan) ((chan) & 1) + +#ifdef CONFIG_ARMADA_MSYS +#define MV_XOR_REGS_OFFSET(unit) (0xF0800) +#else +#define MV_XOR_REGS_OFFSET(unit) (0x60900) +#endif +#define MV_XOR_REGS_BASE(unit) (MV_XOR_REGS_OFFSET(unit)) + +/* XOR Engine Control Register Map */ +#define XOR_CHANNEL_ARBITER_REG(unit) (MV_XOR_REGS_BASE(unit)) +#define XOR_CONFIG_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x10 + ((chan) * 4))) +#define XOR_ACTIVATION_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x20 + ((chan) * 4))) + +/* XOR Engine Interrupt Register Map */ +#define XOR_CAUSE_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x30) +#define XOR_MASK_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x40) +#define XOR_ERROR_CAUSE_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x50) +#define XOR_ERROR_ADDR_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x60) + +/* XOR Engine Descriptor Register Map */ +#define XOR_NEXT_DESC_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x200 + ((chan) * 4))) +#define XOR_CURR_DESC_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x210 + ((chan) * 4))) +#define XOR_BYTE_COUNT_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x220 + ((chan) * 4))) + +#define XOR_DST_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x2B0 + ((chan) * 4))) +#define XOR_BLOCK_SIZE_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x2C0 + ((chan) * 4))) +#define XOR_TIMER_MODE_CTRL_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x2D0) +#define XOR_TIMER_MODE_INIT_VAL_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x2D4) +#define XOR_TIMER_MODE_CURR_VAL_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x2D8) +#define XOR_INIT_VAL_LOW_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x2E0) +#define XOR_INIT_VAL_HIGH_REG(unit) (MV_XOR_REGS_BASE(unit) + 0x2E4) + +/* XOR register fileds */ + +/* XOR Engine [0..1] Configuration Registers (XExCR) */ +#define XEXCR_OPERATION_MODE_OFFS (0) +#define XEXCR_OPERATION_MODE_MASK (7 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_XOR (0 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_CRC (1 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_DMA (2 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_ECC (3 << XEXCR_OPERATION_MODE_OFFS) +#define XEXCR_OPERATION_MODE_MEM_INIT (4 << XEXCR_OPERATION_MODE_OFFS) + +#define XEXCR_SRC_BURST_LIMIT_OFFS (4) +#define XEXCR_SRC_BURST_LIMIT_MASK (7 << XEXCR_SRC_BURST_LIMIT_OFFS) +#define XEXCR_DST_BURST_LIMIT_OFFS (8) +#define XEXCR_DST_BURST_LIMIT_MASK (7 << XEXCR_DST_BURST_LIMIT_OFFS) +#define XEXCR_DRD_RES_SWP_OFFS (12) +#define XEXCR_DRD_RES_SWP_MASK (1 << XEXCR_DRD_RES_SWP_OFFS) +#define XEXCR_DWR_REQ_SWP_OFFS (13) +#define XEXCR_DWR_REQ_SWP_MASK (1 << XEXCR_DWR_REQ_SWP_OFFS) +#define XEXCR_DES_SWP_OFFS (14) +#define XEXCR_DES_SWP_MASK (1 << XEXCR_DES_SWP_OFFS) +#define XEXCR_REG_ACC_PROTECT_OFFS (15) +#define XEXCR_REG_ACC_PROTECT_MASK (1 << XEXCR_REG_ACC_PROTECT_OFFS) + +/* XOR Engine [0..1] Activation Registers (XExACTR) */ +#define XEXACTR_XESTART_OFFS (0) +#define XEXACTR_XESTART_MASK (1 << XEXACTR_XESTART_OFFS) +#define XEXACTR_XESTOP_OFFS (1) +#define XEXACTR_XESTOP_MASK (1 << XEXACTR_XESTOP_OFFS) +#define XEXACTR_XEPAUSE_OFFS (2) +#define XEXACTR_XEPAUSE_MASK (1 << XEXACTR_XEPAUSE_OFFS) +#define XEXACTR_XERESTART_OFFS (3) +#define XEXACTR_XERESTART_MASK (1 << XEXACTR_XERESTART_OFFS) +#define XEXACTR_XESTATUS_OFFS (4) +#define XEXACTR_XESTATUS_MASK (3 << XEXACTR_XESTATUS_OFFS) +#define XEXACTR_XESTATUS_IDLE (0 << XEXACTR_XESTATUS_OFFS) +#define XEXACTR_XESTATUS_ACTIVE (1 << XEXACTR_XESTATUS_OFFS) +#define XEXACTR_XESTATUS_PAUSED (2 << XEXACTR_XESTATUS_OFFS) + +/* XOR Engine [0..1] Destination Pointer Register (XExDPR0) */ +#define XEXDPR_DST_PTR_OFFS (0) +#define XEXDPR_DST_PTR_MASK (0xFFFFFFFF << XEXDPR_DST_PTR_OFFS) +#define XEXDPR_DST_PTR_XOR_MASK (0x3F) +#define XEXDPR_DST_PTR_DMA_MASK (0x1F) +#define XEXDPR_DST_PTR_CRC_MASK (0x1F) + +/* XOR Engine[0..1] Block Size Registers (XExBSR) */ +#define XEXBSR_BLOCK_SIZE_OFFS (0) +#define XEXBSR_BLOCK_SIZE_MASK (0xFFFFFFFF << XEXBSR_BLOCK_SIZE_OFFS) +#define XEXBSR_BLOCK_SIZE_MIN_VALUE (128) +#define XEXBSR_BLOCK_SIZE_MAX_VALUE (0xFFFFFFFF) + +/* XOR Engine Address Decoding Register Map */ +#define XOR_WINDOW_CTRL_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + (0x240 + ((chan) * 4))) +#define XOR_BASE_ADDR_REG(unit, win) (MV_XOR_REGS_BASE(unit) + (0x250 + ((win) * 4))) +#define XOR_SIZE_MASK_REG(unit, win) (MV_XOR_REGS_BASE(unit) + (0x270 + ((win) * 4))) +#define XOR_HIGH_ADDR_REMAP_REG(unit, win) (MV_XOR_REGS_BASE(unit) + (0x290 + ((win) * 4))) +#define XOR_ADDR_OVRD_REG(unit, win) (MV_XOR_REGS_BASE(unit) + (0x2A0 + ((win) * 4))) + +#endif /* __XOR_REGS_H */ diff --git a/roms/u-boot/drivers/ddr/microchip/Makefile b/roms/u-boot/drivers/ddr/microchip/Makefile new file mode 100644 index 000000000..3e2e93ba2 --- /dev/null +++ b/roms/u-boot/drivers/ddr/microchip/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0+ +# +# Copyright (C) 2015 Microchip Technology Inc. +obj-$(CONFIG_MACH_PIC32) += ddr2.o diff --git a/roms/u-boot/drivers/ddr/microchip/ddr2.c b/roms/u-boot/drivers/ddr/microchip/ddr2.c new file mode 100644 index 000000000..149b6071c --- /dev/null +++ b/roms/u-boot/drivers/ddr/microchip/ddr2.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * (c) 2015 Paul Thacker <paul.thacker@microchip.com> + * + */ +#include <common.h> +#include <wait_bit.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <mach/pic32.h> +#include <mach/ddr.h> + +#include "ddr2_regs.h" +#include "ddr2_timing.h" + +/* init DDR2 Phy */ +void ddr2_phy_init(void) +{ + struct ddr2_phy_regs *ddr2_phy; + u32 pad_ctl; + + ddr2_phy = ioremap(PIC32_DDR2P_BASE, sizeof(*ddr2_phy)); + + /* PHY_DLL_RECALIB */ + writel(DELAY_START_VAL(3) | DISABLE_RECALIB(0) | + RECALIB_CNT(0x10), &ddr2_phy->dll_recalib); + + /* PHY_PAD_CTRL */ + pad_ctl = ODT_SEL | ODT_EN | DRIVE_SEL(0) | + ODT_PULLDOWN(2) | ODT_PULLUP(3) | + EXTRA_OEN_CLK(0) | NOEXT_DLL | + DLR_DFT_WRCMD | HALF_RATE | + DRVSTR_PFET(0xe) | DRVSTR_NFET(0xe) | + RCVR_EN | PREAMBLE_DLY(2); + writel(pad_ctl, &ddr2_phy->pad_ctrl); + + /* SCL_CONFIG_0 */ + writel(SCL_BURST8 | SCL_DDR_CONNECTED | SCL_RCAS_LAT(RL) | + SCL_ODTCSWW, &ddr2_phy->scl_config_1); + + /* SCL_CONFIG_1 */ + writel(SCL_CSEN | SCL_WCAS_LAT(WL), &ddr2_phy->scl_config_2); + + /* SCL_LAT */ + writel(SCL_CAPCLKDLY(3) | SCL_DDRCLKDLY(4), &ddr2_phy->scl_latency); +} + +/* start phy self calibration logic */ +static int ddr2_phy_calib_start(void) +{ + struct ddr2_phy_regs *ddr2_phy; + + ddr2_phy = ioremap(PIC32_DDR2P_BASE, sizeof(*ddr2_phy)); + + /* DDR Phy SCL Start */ + writel(SCL_START | SCL_EN, &ddr2_phy->scl_start); + + /* Wait for SCL for data byte to pass */ + return wait_for_bit_le32(&ddr2_phy->scl_start, SCL_LUBPASS, + true, CONFIG_SYS_HZ, false); +} + +/* DDR2 Controller initialization */ + +/* Target Agent Arbiter */ +static void ddr_set_arbiter(struct ddr2_ctrl_regs *ctrl, + const struct ddr2_arbiter_params *const param) +{ + int i; + + for (i = 0; i < NUM_AGENTS; i++) { + /* set min burst size */ + writel(i * MIN_LIM_WIDTH, &ctrl->tsel); + writel(param->min_limit, &ctrl->minlim); + + /* set request period (4 * req_period clocks) */ + writel(i * RQST_PERIOD_WIDTH, &ctrl->tsel); + writel(param->req_period, &ctrl->reqprd); + + /* set number of burst accepted */ + writel(i * MIN_CMDACPT_WIDTH, &ctrl->tsel); + writel(param->min_cmd_acpt, &ctrl->mincmd); + } +} + +const struct ddr2_arbiter_params *__weak board_get_ddr_arbiter_params(void) +{ + /* default arbiter parameters */ + static const struct ddr2_arbiter_params arb_params[] = { + { .min_limit = 0x1f, .req_period = 0xff, .min_cmd_acpt = 0x04,}, + { .min_limit = 0x1f, .req_period = 0xff, .min_cmd_acpt = 0x10,}, + { .min_limit = 0x1f, .req_period = 0xff, .min_cmd_acpt = 0x10,}, + { .min_limit = 0x04, .req_period = 0xff, .min_cmd_acpt = 0x04,}, + { .min_limit = 0x04, .req_period = 0xff, .min_cmd_acpt = 0x04,}, + }; + + return &arb_params[0]; +} + +static void host_load_cmd(struct ddr2_ctrl_regs *ctrl, u32 cmd_idx, + u32 hostcmd2, u32 hostcmd1, u32 delay) +{ + u32 hc_delay; + + hc_delay = max_t(u32, DIV_ROUND_UP(delay, T_CK), 2) - 2; + writel(hostcmd1, &ctrl->cmd10[cmd_idx]); + writel((hostcmd2 & 0x7ff) | (hc_delay << 11), &ctrl->cmd20[cmd_idx]); +} + +/* init DDR2 Controller */ +void ddr2_ctrl_init(void) +{ + u32 wr2prech, rd2prech, wr2rd, wr2rd_cs; + u32 ras2ras, ras2cas, prech2ras, temp; + const struct ddr2_arbiter_params *arb_params; + struct ddr2_ctrl_regs *ctrl; + + ctrl = ioremap(PIC32_DDR2C_BASE, sizeof(*ctrl)); + + /* PIC32 DDR2 controller always work in HALF_RATE */ + writel(HALF_RATE_MODE, &ctrl->memwidth); + + /* Set arbiter configuration per target */ + arb_params = board_get_ddr_arbiter_params(); + ddr_set_arbiter(ctrl, arb_params); + + /* Address Configuration, model {CS, ROW, BA, COL} */ + writel((ROW_ADDR_RSHIFT | (BA_RSHFT << 8) | (CS_ADDR_RSHIFT << 16) | + (COL_HI_RSHFT << 24) | (SB_PRI << 29) | + (EN_AUTO_PRECH << 30)), &ctrl->memcfg0); + + writel(ROW_ADDR_MASK, &ctrl->memcfg1); + writel(COL_HI_MASK, &ctrl->memcfg2); + writel(COL_LO_MASK, &ctrl->memcfg3); + writel(BA_MASK | (CS_ADDR_MASK << 8), &ctrl->memcfg4); + + /* Refresh Config */ + writel(REFCNT_CLK(DIV_ROUND_UP(T_RFI, T_CK_CTRL) - 2) | + REFDLY_CLK(DIV_ROUND_UP(T_RFC_MIN, T_CK_CTRL) - 2) | + MAX_PEND_REF(7), + &ctrl->refcfg); + + /* Power Config */ + writel(ECC_EN(0) | ERR_CORR_EN(0) | EN_AUTO_PWR_DN(0) | + EN_AUTO_SELF_REF(3) | PWR_DN_DLY(8) | + SELF_REF_DLY(17) | PRECH_PWR_DN_ONLY(0), + &ctrl->pwrcfg); + + /* Delay Config */ + wr2rd = max_t(u32, DIV_ROUND_UP(T_WTR, T_CK_CTRL), + DIV_ROUND_UP(T_WTR_TCK, 2)) + WL + BL; + wr2rd_cs = max_t(u32, wr2rd - 1, 3); + wr2prech = DIV_ROUND_UP(T_WR, T_CK_CTRL) + WL + BL; + rd2prech = max_t(u32, DIV_ROUND_UP(T_RTP, T_CK_CTRL), + DIV_ROUND_UP(T_RTP_TCK, 2)) + BL - 2; + ras2ras = max_t(u32, DIV_ROUND_UP(T_RRD, T_CK_CTRL), + DIV_ROUND_UP(T_RRD_TCK, 2)) - 1; + ras2cas = DIV_ROUND_UP(T_RCD, T_CK_CTRL) - 1; + prech2ras = DIV_ROUND_UP(T_RP, T_CK_CTRL) - 1; + + writel(((wr2rd & 0x0f) | + ((wr2rd_cs & 0x0f) << 4) | + ((BL - 1) << 8) | + (BL << 12) | + ((BL - 1) << 16) | + ((BL - 1) << 20) | + ((BL + 2) << 24) | + ((RL - WL + 3) << 28)), &ctrl->dlycfg0); + + writel(((T_CKE_TCK - 1) | + (((DIV_ROUND_UP(T_DLLK, 2) - 2) & 0xff) << 8) | + ((T_CKE_TCK - 1) << 16) | + ((max_t(u32, T_XP_TCK, T_CKE_TCK) - 1) << 20) | + ((wr2prech >> 4) << 26) | + ((wr2rd >> 4) << 27) | + ((wr2rd_cs >> 4) << 28) | + (((RL + 5) >> 4) << 29) | + ((DIV_ROUND_UP(T_DLLK, 2) >> 8) << 30)), &ctrl->dlycfg1); + + writel((DIV_ROUND_UP(T_RP, T_CK_CTRL) | + (rd2prech << 8) | + ((wr2prech & 0x0f) << 12) | + (ras2ras << 16) | + (ras2cas << 20) | + (prech2ras << 24) | + ((RL + 3) << 28)), &ctrl->dlycfg2); + + writel(((DIV_ROUND_UP(T_RAS_MIN, T_CK_CTRL) - 1) | + ((DIV_ROUND_UP(T_RC, T_CK_CTRL) - 1) << 8) | + ((DIV_ROUND_UP(T_FAW, T_CK_CTRL) - 1) << 16)), + &ctrl->dlycfg3); + + /* ODT Config */ + writel(0x0, &ctrl->odtcfg); + writel(BIT(16), &ctrl->odtencfg); + writel(ODTRDLY(RL - 3) | ODTWDLY(WL - 3) | ODTRLEN(2) | ODTWLEN(3), + &ctrl->odtcfg); + + /* Transfer Configuration */ + writel(NXTDATRQDLY(2) | NXDATAVDLY(4) | RDATENDLY(2) | + MAX_BURST(3) | (7 << 28) | BIG_ENDIAN(0), + &ctrl->xfercfg); + + /* DRAM Initialization */ + /* CKE high after reset and wait 400 nsec */ + host_load_cmd(ctrl, 0, 0, IDLE_NOP, 400000); + + /* issue precharge all command */ + host_load_cmd(ctrl, 1, 0x04, PRECH_ALL_CMD, T_RP + T_CK); + + /* initialize EMR2 */ + host_load_cmd(ctrl, 2, 0x200, LOAD_MODE_CMD, T_MRD_TCK * T_CK); + + /* initialize EMR3 */ + host_load_cmd(ctrl, 3, 0x300, LOAD_MODE_CMD, T_MRD_TCK * T_CK); + + /* + * RDQS disable, DQSB enable, OCD exit, 150 ohm termination, + * AL=0, DLL enable + */ + host_load_cmd(ctrl, 4, 0x100, + LOAD_MODE_CMD | (0x40 << 24), T_MRD_TCK * T_CK); + /* + * PD fast exit, WR REC = T_WR in clocks -1, + * DLL reset, CAS = RL, burst = 4 + */ + temp = ((DIV_ROUND_UP(T_WR, T_CK) - 1) << 1) | 1; + host_load_cmd(ctrl, 5, temp, LOAD_MODE_CMD | (RL << 28) | (2 << 24), + T_MRD_TCK * T_CK); + + /* issue precharge all command */ + host_load_cmd(ctrl, 6, 4, PRECH_ALL_CMD, T_RP + T_CK); + + /* issue refresh command */ + host_load_cmd(ctrl, 7, 0, REF_CMD, T_RFC_MIN); + + /* issue refresh command */ + host_load_cmd(ctrl, 8, 0, REF_CMD, T_RFC_MIN); + + /* Mode register programming as before without DLL reset */ + host_load_cmd(ctrl, 9, temp, LOAD_MODE_CMD | (RL << 28) | (3 << 24), + T_MRD_TCK * T_CK); + + /* extended mode register same as before with OCD default */ + host_load_cmd(ctrl, 10, 0x103, LOAD_MODE_CMD | (0xc << 24), + T_MRD_TCK * T_CK); + + /* extended mode register same as before with OCD exit */ + host_load_cmd(ctrl, 11, 0x100, LOAD_MODE_CMD | (0x4 << 28), + 140 * T_CK); + + writel(CMD_VALID | NUMHOSTCMD(11), &ctrl->cmdissue); + + /* start memory initialization */ + writel(INIT_START, &ctrl->memcon); + + /* wait for all host cmds to be transmitted */ + wait_for_bit_le32(&ctrl->cmdissue, CMD_VALID, false, + CONFIG_SYS_HZ, false); + + /* inform all cmds issued, ready for normal operation */ + writel(INIT_START | INIT_DONE, &ctrl->memcon); + + /* perform phy caliberation */ + if (ddr2_phy_calib_start()) + printf("ddr2: phy calib failed\n"); +} + +phys_size_t ddr2_calculate_size(void) +{ + u32 temp; + + temp = 1 << (COL_BITS + BA_BITS + ROW_BITS); + /* 16-bit data width between controller and DIMM */ + temp = temp * CS_BITS * (16 / 8); + return (phys_size_t)temp; +} diff --git a/roms/u-boot/drivers/ddr/microchip/ddr2_regs.h b/roms/u-boot/drivers/ddr/microchip/ddr2_regs.h new file mode 100644 index 000000000..0734585dc --- /dev/null +++ b/roms/u-boot/drivers/ddr/microchip/ddr2_regs.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (c) 2015 Purna Chandra Mandal <purna.mandal@microchip.com> + * + */ + +#ifndef __MICROCHIP_DDR2_REGS_H +#define __MICROCHIP_DDR2_REGS_H + +#include <linux/bitops.h> + +/* DDR2 Controller */ +struct ddr2_ctrl_regs { + u32 tsel; + u32 minlim; + u32 reqprd; + u32 mincmd; + u32 memcon; + u32 memcfg0; + u32 memcfg1; + u32 memcfg2; + u32 memcfg3; + u32 memcfg4; + u32 refcfg; + u32 pwrcfg; + u32 dlycfg0; + u32 dlycfg1; + u32 dlycfg2; + u32 dlycfg3; + u32 odtcfg; + u32 xfercfg; + u32 cmdissue; + u32 odtencfg; + u32 memwidth; + u32 unused[11]; + u32 cmd10[16]; + u32 cmd20[16]; +}; + +/* Arbiter Config */ +#define MIN_LIM_WIDTH 5 +#define RQST_PERIOD_WIDTH 8 +#define MIN_CMDACPT_WIDTH 8 + +/* Refresh Config */ +#define REFCNT_CLK(x) (x) +#define REFDLY_CLK(x) ((x) << 16) +#define MAX_PEND_REF(x) ((x) << 24) + +/* Power Config */ +#define PRECH_PWR_DN_ONLY(x) ((x) << 22) +#define SELF_REF_DLY(x) ((x) << 12) +#define PWR_DN_DLY(x) ((x) << 4) +#define EN_AUTO_SELF_REF(x) ((x) << 3) +#define EN_AUTO_PWR_DN(x) ((x) << 2) +#define ERR_CORR_EN(x) ((x) << 1) +#define ECC_EN(x) (x) + +/* Memory Width */ +#define HALF_RATE_MODE BIT(3) + +/* Delay Config */ +#define ODTWLEN(x) ((x) << 20) +#define ODTRLEN(x) ((x) << 16) +#define ODTWDLY(x) ((x) << 12) +#define ODTRDLY(x) ((x) << 8) + +/* Xfer Config */ +#define BIG_ENDIAN(x) ((x) << 31) +#define MAX_BURST(x) ((x) << 24) +#define RDATENDLY(x) ((x) << 16) +#define NXDATAVDLY(x) ((x) << 4) +#define NXTDATRQDLY(x) ((x) << 0) + +/* Host Commands */ +#define IDLE_NOP 0x00ffffff +#define PRECH_ALL_CMD 0x00fff401 +#define REF_CMD 0x00fff801 +#define LOAD_MODE_CMD 0x00fff001 +#define CKE_LOW 0x00ffeffe + +#define NUM_HOST_CMDS 12 + +/* Host CMD Issue */ +#define CMD_VALID BIT(4) +#define NUMHOSTCMD(x) (x) + +/* Memory Control */ +#define INIT_DONE BIT(1) +#define INIT_START BIT(0) + +/* Address Control */ +#define EN_AUTO_PRECH 0 +#define SB_PRI 1 + +/* DDR2 Phy Register */ +struct ddr2_phy_regs { + u32 scl_start; + u32 unused1[2]; + u32 scl_latency; + u32 unused2[2]; + u32 scl_config_1; + u32 scl_config_2; + u32 pad_ctrl; + u32 dll_recalib; +}; + +/* PHY PAD CONTROL */ +#define ODT_SEL BIT(0) +#define ODT_EN BIT(1) +#define DRIVE_SEL(x) ((x) << 2) +#define ODT_PULLDOWN(x) ((x) << 4) +#define ODT_PULLUP(x) ((x) << 6) +#define EXTRA_OEN_CLK(x) ((x) << 8) +#define NOEXT_DLL BIT(9) +#define DLR_DFT_WRCMD BIT(13) +#define HALF_RATE BIT(14) +#define DRVSTR_PFET(x) ((x) << 16) +#define DRVSTR_NFET(x) ((x) << 20) +#define RCVR_EN BIT(28) +#define PREAMBLE_DLY(x) ((x) << 29) + +/* PHY DLL RECALIBRATE */ +#define RECALIB_CNT(x) ((x) << 8) +#define DISABLE_RECALIB(x) ((x) << 26) +#define DELAY_START_VAL(x) ((x) << 28) + +/* PHY SCL CONFIG1 */ +#define SCL_BURST8 BIT(0) +#define SCL_DDR_CONNECTED BIT(1) +#define SCL_RCAS_LAT(x) ((x) << 4) +#define SCL_ODTCSWW BIT(24) + +/* PHY SCL CONFIG2 */ +#define SCL_CSEN BIT(0) +#define SCL_WCAS_LAT(x) ((x) << 8) + +/* PHY SCL Latency */ +#define SCL_CAPCLKDLY(x) ((x) << 0) +#define SCL_DDRCLKDLY(x) ((x) << 4) + +/* PHY SCL START */ +#define SCL_START BIT(28) +#define SCL_EN BIT(26) +#define SCL_LUBPASS (BIT(1) | BIT(0)) + +#endif /* __MICROCHIP_DDR2_REGS_H */ diff --git a/roms/u-boot/drivers/ddr/microchip/ddr2_timing.h b/roms/u-boot/drivers/ddr/microchip/ddr2_timing.h new file mode 100644 index 000000000..8bf1616ec --- /dev/null +++ b/roms/u-boot/drivers/ddr/microchip/ddr2_timing.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (c) 2015 Purna Chandra Mandal <purna.mandal@microchip.com> + * + */ + +#ifndef __MICROCHIP_DDR2_TIMING_H +#define __MICROCHIP_DDR2_TIMING_H + +/* MPLL freq is 400MHz */ +#define T_CK 2500 /* 2500 psec */ +#define T_CK_CTRL (T_CK * 2) + +/* Burst length in cycles */ +#define BL 2 +/* default CAS latency for all speed grades */ +#define RL 5 +/* default write latency for all speed grades = CL-1 */ +#define WL 4 + +/* From Micron MT47H64M16HR-3 data sheet */ +#define T_RFC_MIN 127500 /* psec */ +#define T_WR 15000 /* psec */ +#define T_RP 12500 /* psec */ +#define T_RCD 12500 /* psec */ +#define T_RRD 7500 /* psec */ +/* T_RRD_TCK is minimum of 2 clk periods, regardless of freq */ +#define T_RRD_TCK 2 +#define T_WTR 7500 /* psec */ +/* T_WTR_TCK is minimum of 2 clk periods, regardless of freq */ +#define T_WTR_TCK 2 +#define T_RTP 7500 /* psec */ +#define T_RTP_TCK (BL / 2) +#define T_XP_TCK 2 /* clocks */ +#define T_CKE_TCK 3 /* clocks */ +#define T_XSNR (T_RFC_MIN + 10000) /* psec */ +#define T_DLLK 200 /* clocks */ +#define T_RAS_MIN 45000 /* psec */ +#define T_RC 57500 /* psec */ +#define T_FAW 35000 /* psec */ +#define T_MRD_TCK 2 /* clocks */ +#define T_RFI 7800000 /* psec */ + +/* DDR Addressing */ +#define COL_BITS 10 +#define BA_BITS 3 +#define ROW_BITS 13 +#define CS_BITS 1 + +/* DDR Addressing scheme: {CS, ROW, BA, COL} */ +#define COL_HI_RSHFT 0 +#define COL_HI_MASK 0 +#define COL_LO_MASK ((1 << COL_BITS) - 1) + +#define BA_RSHFT COL_BITS +#define BA_MASK ((1 << BA_BITS) - 1) + +#define ROW_ADDR_RSHIFT (BA_RSHFT + BA_BITS) +#define ROW_ADDR_MASK ((1 << ROW_BITS) - 1) + +#define CS_ADDR_RSHIFT 0 +#define CS_ADDR_MASK 0 + +#endif /* __MICROCHIP_DDR2_TIMING_H */ |