Newer
Older
/*
* Copyright Altera Corporation (C) 2012-2015
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common.h>
#include <asm/io.h>
#include <asm/arch/sdram.h>
#include <errno.h>
#include "sequencer.h"
static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
(struct socfpga_sdr_rw_load_manager *)
(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
(struct socfpga_sdr_rw_load_jump_manager *)
(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
static struct socfpga_sdr_reg_file *sdr_reg_file =
(struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
(struct socfpga_sdr_scc_mgr *)
(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
(struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
(struct socfpga_phy_mgr_cfg *)
(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
static struct socfpga_data_mgr *data_mgr =
(struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
static struct socfpga_sdr_ctrl *sdr_ctrl =
(struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
const struct socfpga_sdram_rw_mgr_config *rwcfg;
const struct socfpga_sdram_io_config *iocfg;
const struct socfpga_sdram_misc_config *misccfg;
#define DELTA_D 1
/*
* In order to reduce ROM size, most of the selectable calibration steps are
* decided at compile time based on the user's calibration mode selection,
* as captured by the STATIC_CALIB_STEPS selection below.
*
* However, to support simulation-time selection of fast simulation mode, where
* we skip everything except the bare minimum, we need a few of the steps to
* be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
* check, which is based on the rtl-supplied value, or we dynamically compute
* the value to use based on the dynamically-chosen calibration mode
*/
#define DLEVEL 0
#define STATIC_IN_RTL_SIM 0
#define STATIC_SKIP_DELAY_LOOPS 0
#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
STATIC_SKIP_DELAY_LOOPS)
/* calibration steps requested by the rtl */
/*
* To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
* instead of static, we use boolean logic to select between
* non-skip and skip values
*
* The mask is set to include all bits when not-skipping, but is
* zero when skipping
*/
static u16 skip_delay_mask; /* mask off bits when skipping/not-skipping */
#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
((non_skip_value) & skip_delay_mask)
static struct gbl_type *gbl;
static struct param_type *param;
static void set_failing_group_stage(u32 group, u32 stage,
u32 substage)
{
/*
* Only set the global stage if there was not been any other
* failing group
*/
if (gbl->error_stage == CAL_STAGE_NIL) {
gbl->error_substage = substage;
gbl->error_stage = stage;
gbl->error_group = group;
}
}
static void reg_file_set_group(u16 set_group)
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
static void reg_file_set_stage(u8 set_stage)
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
static void reg_file_set_sub_stage(u8 set_sub_stage)
set_sub_stage &= 0xff;
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
/**
* phy_mgr_initialize() - Initialize PHY Manager
*
* Initialize PHY Manager.
*/
static void phy_mgr_initialize(void)
debug("%s:%d\n", __func__, __LINE__);
/* Calibration has control over path to memory */
/*
* In Hard PHY this is a 2-bit control:
* 0: AFI Mux Select
* 1: DDIO Mux Select
*/
writel(0x3, &phy_mgr_cfg->mux_sel);
/* USER memory clock is not stable we begin initialization */
writel(0, &phy_mgr_cfg->reset_mem_stbl);
/* USER calibration status all set to zero */
writel(0, &phy_mgr_cfg->cal_status);
writel(0, &phy_mgr_cfg->cal_debug_info);
/* Init params only if we do NOT skip calibration. */
if ((dyn_calib_steps & CALIB_SKIP_ALL) == CALIB_SKIP_ALL)
return;
ratio = rwcfg->mem_dq_per_read_dqs /
rwcfg->mem_virtual_groups_per_read_dqs;
param->read_correct_mask_vg = (1 << ratio) - 1;
param->write_correct_mask_vg = (1 << ratio) - 1;
param->read_correct_mask = (1 << rwcfg->mem_dq_per_read_dqs) - 1;
param->write_correct_mask = (1 << rwcfg->mem_dq_per_write_dqs) - 1;
/**
* set_rank_and_odt_mask() - Set Rank and ODT mask
* @rank: Rank mask
* @odt_mode: ODT mode, OFF or READ_WRITE
*
* Set Rank and ODT mask (On-Die Termination).
*/
static void set_rank_and_odt_mask(const u32 rank, const u32 odt_mode)
u32 odt_mask_0 = 0;
u32 odt_mask_1 = 0;
u32 cs_and_odt_mask;
if (odt_mode == RW_MGR_ODT_MODE_OFF) {
odt_mask_0 = 0x0;
odt_mask_1 = 0x0;
} else { /* RW_MGR_ODT_MODE_READ_WRITE */
switch (rwcfg->mem_number_of_ranks) {
case 1: /* 1 Rank */
/* Read: ODT = 0 ; Write: ODT = 1 */
odt_mask_0 = 0x0;
odt_mask_1 = 0x1;
break;
case 2: /* 2 Ranks */
if (rwcfg->mem_number_of_cs_per_dimm == 1) {
/*
* - Dual-Slot , Single-Rank (1 CS per DIMM)
* OR
* - RDIMM, 4 total CS (2 CS per DIMM, 2 DIMM)
*
* Since MEM_NUMBER_OF_RANKS is 2, they
* are both single rank with 2 CS each
* (special for RDIMM).
*
* Read: Turn on ODT on the opposite rank
* Write: Turn on ODT on all ranks
*/
odt_mask_0 = 0x3 & ~(1 << rank);
odt_mask_1 = 0x3;
} else {
/*
* - Single-Slot , Dual-Rank (2 CS per DIMM)
*
* Read: Turn on ODT off on all ranks
* Write: Turn on ODT on active rank
*/
odt_mask_0 = 0x0;
odt_mask_1 = 0x3 & (1 << rank);
}
break;
case 4: /* 4 Ranks */
/* Read:
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
* ----------+-----------------------+
* | ODT |
* Read From +-----------------------+
* Rank | 3 | 2 | 1 | 0 |
* ----------+-----+-----+-----+-----+
* 0 | 0 | 1 | 0 | 0 |
* 1 | 1 | 0 | 0 | 0 |
* 2 | 0 | 0 | 0 | 1 |
* 3 | 0 | 0 | 1 | 0 |
* ----------+-----+-----+-----+-----+
*
* Write:
* ----------+-----------------------+
* | ODT |
* Write To +-----------------------+
* Rank | 3 | 2 | 1 | 0 |
* ----------+-----+-----+-----+-----+
* 0 | 0 | 1 | 0 | 1 |
* 1 | 1 | 0 | 1 | 0 |
* 2 | 0 | 1 | 0 | 1 |
* 3 | 1 | 0 | 1 | 0 |
* ----------+-----+-----+-----+-----+
*/
switch (rank) {
case 0:
odt_mask_0 = 0x4;
odt_mask_1 = 0x5;
break;
case 1:
odt_mask_0 = 0x8;
odt_mask_1 = 0xA;
break;
case 2:
odt_mask_0 = 0x1;
odt_mask_1 = 0x5;
break;
case 3:
odt_mask_0 = 0x2;
odt_mask_1 = 0xA;
break;
}
cs_and_odt_mask = (0xFF & ~(1 << rank)) |
((0xFF & odt_mask_0) << 8) |
((0xFF & odt_mask_1) << 16);
writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
/**
* scc_mgr_set() - Set SCC Manager register
* @off: Base offset in SCC Manager space
* @grp: Read/Write group
* @val: Value to be set
*
* This function sets the SCC Manager (Scan Chain Control Manager) register.
*/
static void scc_mgr_set(u32 off, u32 grp, u32 val)
writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
}
/**
* scc_mgr_initialize() - Initialize SCC Manager registers
*
* Initialize SCC Manager registers.
*/
static void scc_mgr_initialize(void)
{
* Clear register file for HPS. 16 (2^4) is the size of the
* full register file in the scc mgr:
* RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
* MEM_IF_READ_DQS_WIDTH - 1);
for (i = 0; i < 16; i++) {
debug_cond(DLEVEL >= 1, "%s:%d: Clearing SCC RFILE index %u\n",
__func__, __LINE__, i);
scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, i, 0);
static void scc_mgr_set_dqdqs_output_phase(u32 write_group, u32 phase)
scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
static void scc_mgr_set_dqs_bus_in_delay(u32 read_group, u32 delay)
scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
static void scc_mgr_set_dqs_en_phase(u32 read_group, u32 phase)
scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
static void scc_mgr_set_dqs_en_delay(u32 read_group, u32 delay)
scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
static void scc_mgr_set_dq_in_delay(u32 dq_in_group, u32 delay)
{
scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
}
static void scc_mgr_set_dqs_io_in_delay(u32 delay)
scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
static void scc_mgr_set_dm_in_delay(u32 dm, u32 delay)
scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET,
rwcfg->mem_dq_per_write_dqs + 1 + dm,
delay);
static void scc_mgr_set_dq_out1_delay(u32 dq_in_group, u32 delay)
scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
static void scc_mgr_set_dqs_out1_delay(u32 delay)
scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, rwcfg->mem_dq_per_write_dqs,
static void scc_mgr_set_dm_out1_delay(u32 dm, u32 delay)
scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
rwcfg->mem_dq_per_write_dqs + 1 + dm,
}
/* load up dqs config settings */
static void scc_mgr_load_dqs(u32 dqs)
{
writel(dqs, &sdr_scc_mgr->dqs_ena);
}
/* load up dqs io config settings */
static void scc_mgr_load_dqs_io(void)
{
writel(0, &sdr_scc_mgr->dqs_io_ena);
}
/* load up dq config settings */
static void scc_mgr_load_dq(u32 dq_in_group)
{
writel(dq_in_group, &sdr_scc_mgr->dq_ena);
}
/* load up dm config settings */
static void scc_mgr_load_dm(u32 dm)
{
writel(dm, &sdr_scc_mgr->dm_ena);
/**
* scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
* @off: Base offset in SCC Manager space
* @grp: Read/Write group
* @val: Value to be set
* @update: If non-zero, trigger SCC Manager update for all ranks
*
* This function sets the SCC Manager (Scan Chain Control Manager) register
* and optionally triggers the SCC update for all ranks.
*/
static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
const int update)
for (r = 0; r < rwcfg->mem_number_of_ranks;
r += NUM_RANKS_PER_SHADOW_REG) {
scc_mgr_set(off, grp, val);
if (update || (r == 0)) {
writel(grp, &sdr_scc_mgr->dqs_ena);
writel(0, &sdr_scc_mgr->update);
static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
{
/*
* USER although the h/w doesn't support different phases per
* shadow register, for simplicity our scc manager modeling
* keeps different phase settings per shadow reg, and it's
* important for us to keep them in sync to match h/w.
* for efficiency, the scan chain update should occur only
* once to sr0.
*/
scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
read_group, phase, 0);
}
static void scc_mgr_set_dqdqs_output_phase_all_ranks(u32 write_group,
u32 phase)
/*
* USER although the h/w doesn't support different phases per
* shadow register, for simplicity our scc manager modeling
* keeps different phase settings per shadow reg, and it's
* important for us to keep them in sync to match h/w.
* for efficiency, the scan chain update should occur only
* once to sr0.
*/
scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
write_group, phase, 0);
static void scc_mgr_set_dqs_en_delay_all_ranks(u32 read_group,
u32 delay)
{
/*
* In shadow register mode, the T11 settings are stored in
* registers in the core, which are updated by the DQS_ENA
* signals. Not issuing the SCC_MGR_UPD command allows us to
* save lots of rank switching overhead, by calling
* select_shadow_regs_for_update with update_scan_chains
* set to 0.
*/
scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
read_group, delay, 1);
/**
* scc_mgr_set_oct_out1_delay() - Set OCT output delay
* @write_group: Write group
* @delay: Delay value
*
* This function sets the OCT output delay in SCC manager.
*/
static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
const int ratio = rwcfg->mem_if_read_dqs_width /
rwcfg->mem_if_write_dqs_width;
const int base = write_group * ratio;
int i;
/*
* Load the setting in the SCC manager
* Although OCT affects only write data, the OCT delay is controlled
* by the DQS logic block which is instantiated once per read group.
* For protocols where a write group consists of multiple read groups,
* the setting must be set multiple times.
*/
for (i = 0; i < ratio; i++)
scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
/**
* scc_mgr_set_hhp_extras() - Set HHP extras.
*
* Load the fixed setting in the SCC manager HHP extras.
*/
static void scc_mgr_set_hhp_extras(void)
{
/*
* Load the fixed setting in the SCC manager
* bits: 0:0 = 1'b1 - DQS bypass
* bits: 1:1 = 1'b1 - DQ bypass
* bits: 4:2 = 3'b001 - rfifo_mode
* bits: 6:5 = 2'b01 - rfifo clock_select
* bits: 7:7 = 1'b0 - separate gating from ungating setting
* bits: 8:8 = 1'b0 - separate OE from Output delay setting
const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
(1 << 2) | (1 << 1) | (1 << 0);
const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
SCC_MGR_HHP_GLOBALS_OFFSET |
SCC_MGR_HHP_EXTRAS_OFFSET;
debug_cond(DLEVEL >= 1, "%s:%d Setting HHP Extras\n",
__func__, __LINE__);
writel(value, addr);
debug_cond(DLEVEL >= 1, "%s:%d Done Setting HHP Extras\n",
/**
* scc_mgr_zero_all() - Zero all DQS config
*
* Zero all DQS config.
*/
static void scc_mgr_zero_all(void)
{
/*
* USER Zero all DQS config settings, across all groups and all
* shadow registers
*/
for (r = 0; r < rwcfg->mem_number_of_ranks;
for (i = 0; i < rwcfg->mem_if_read_dqs_width; i++) {
/*
* The phases actually don't exist on a per-rank basis,
* but there's no harm updating them several times, so
* let's keep the code simple.
*/
scc_mgr_set_dqs_bus_in_delay(i, iocfg->dqs_in_reserve);
scc_mgr_set_dqs_en_phase(i, 0);
scc_mgr_set_dqs_en_delay(i, 0);
}
for (i = 0; i < rwcfg->mem_if_write_dqs_width; i++) {
scc_mgr_set_dqdqs_output_phase(i, 0);
/* Arria V/Cyclone V don't have out2. */
scc_mgr_set_oct_out1_delay(i, iocfg->dqs_out_reserve);
/* Multicast to all DQS group enables. */
writel(0xff, &sdr_scc_mgr->dqs_ena);
writel(0, &sdr_scc_mgr->update);
/**
* scc_set_bypass_mode() - Set bypass mode and trigger SCC update
* @write_group: Write group
*
* Set bypass mode and trigger SCC update.
*/
static void scc_set_bypass_mode(const u32 write_group)
/* Multicast to all DQ enables. */
writel(0xff, &sdr_scc_mgr->dq_ena);
writel(0xff, &sdr_scc_mgr->dm_ena);
/* Update current DQS IO enable. */
writel(0, &sdr_scc_mgr->dqs_io_ena);
writel(write_group, &sdr_scc_mgr->dqs_ena);
writel(0, &sdr_scc_mgr->update);
/**
* scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
* @write_group: Write group
*
* Load DQS settings for Write Group, do not trigger SCC update.
*/
static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
const int ratio = rwcfg->mem_if_read_dqs_width /
rwcfg->mem_if_write_dqs_width;
const int base = write_group * ratio;
int i;
* Load the setting in the SCC manager
* Although OCT affects only write data, the OCT delay is controlled
* by the DQS logic block which is instantiated once per read group.
* For protocols where a write group consists of multiple read groups,
* the setting must be set multiple times.
for (i = 0; i < ratio; i++)
writel(base + i, &sdr_scc_mgr->dqs_ena);
/**
* scc_mgr_zero_group() - Zero all configs for a group
*
* Zero DQ, DM, DQS and OCT configs for a group.
*/
static void scc_mgr_zero_group(const u32 write_group, const int out_only)
for (r = 0; r < rwcfg->mem_number_of_ranks;
r += NUM_RANKS_PER_SHADOW_REG) {
/* Zero all DQ config settings. */
for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
scc_mgr_set_dq_out1_delay(i, 0);
scc_mgr_set_dq_in_delay(i, 0);
/* Multicast to all DQ enables. */
writel(0xff, &sdr_scc_mgr->dq_ena);
/* Zero all DM config settings. */
for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
if (!out_only)
scc_mgr_set_dm_in_delay(i, 0);
scc_mgr_set_dm_out1_delay(i, 0);
/* Multicast to all DM enables. */
writel(0xff, &sdr_scc_mgr->dm_ena);
/* Zero all DQS IO settings. */
scc_mgr_set_dqs_io_in_delay(0);
/* Arria V/Cyclone V don't have out2. */
scc_mgr_set_dqs_out1_delay(iocfg->dqs_out_reserve);
scc_mgr_set_oct_out1_delay(write_group, iocfg->dqs_out_reserve);
scc_mgr_load_dqs_for_write_group(write_group);
/* Multicast to all DQS IO enables (only 1 in total). */
writel(0, &sdr_scc_mgr->dqs_io_ena);
/* Hit update to zero everything. */
writel(0, &sdr_scc_mgr->update);
}
}
/*
* apply and load a particular input delay for the DQ pins in a group
* group_bgn is the index of the first dq pin (in the write group)
*/
static void scc_mgr_apply_group_dq_in_delay(u32 group_bgn, u32 delay)
for (i = 0, p = group_bgn; i < rwcfg->mem_dq_per_read_dqs; i++, p++) {
scc_mgr_set_dq_in_delay(p, delay);
scc_mgr_load_dq(p);
}
}
/**
* scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
* @delay: Delay value
*
* Apply and load a particular output delay for the DQ pins in a group.
*/
static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
scc_mgr_set_dq_out1_delay(i, delay);
scc_mgr_load_dq(i);
}
}
/* apply and load a particular output delay for the DM pins in a group */
static void scc_mgr_apply_group_dm_out1_delay(u32 delay1)
for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
scc_mgr_set_dm_out1_delay(i, delay1);
scc_mgr_load_dm(i);
}
}
/* apply and load delay on both DQS and OCT out1 */
static void scc_mgr_apply_group_dqs_io_and_oct_out1(u32 write_group,
u32 delay)
scc_mgr_set_dqs_out1_delay(delay);
scc_mgr_load_dqs_io();
scc_mgr_set_oct_out1_delay(write_group, delay);
scc_mgr_load_dqs_for_write_group(write_group);
}
/**
* scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
* @write_group: Write group
* @delay: Delay value
*
* Apply a delay to the entire output side: DQ, DM, DQS, OCT.
*/
static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
const u32 delay)
{
u32 i, new_delay;
/* DQ shift */
for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++)
scc_mgr_load_dq(i);
/* DM shift */
for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
scc_mgr_load_dm(i);
/* DQS shift */
new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
if (new_delay > iocfg->io_out2_delay_max) {
"%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
__func__, __LINE__, write_group, delay, new_delay,
iocfg->io_out2_delay_max,
new_delay - iocfg->io_out2_delay_max);
new_delay -= iocfg->io_out2_delay_max;
scc_mgr_set_dqs_out1_delay(new_delay);
}
scc_mgr_load_dqs_io();
/* OCT shift */
new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
if (new_delay > iocfg->io_out2_delay_max) {
"%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
__func__, __LINE__, write_group, delay,
new_delay, iocfg->io_out2_delay_max,
new_delay - iocfg->io_out2_delay_max);
new_delay -= iocfg->io_out2_delay_max;
scc_mgr_set_oct_out1_delay(write_group, new_delay);
}
scc_mgr_load_dqs_for_write_group(write_group);
}
/**
* scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
* @write_group: Write group
* @delay: Delay value
*
* Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
static void
scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
const u32 delay)
int r;
for (r = 0; r < rwcfg->mem_number_of_ranks;
r += NUM_RANKS_PER_SHADOW_REG) {
scc_mgr_apply_group_all_out_delay_add(write_group, delay);
writel(0, &sdr_scc_mgr->update);
/**
* set_jump_as_return() - Return instruction optimization
*
* Optimization used to recover some slots in ddr3 inst_rom could be
* applied to other protocols if we wanted to
*/
static void set_jump_as_return(void)
{
/*
* To save space, we replace return with jump to special shared
* RETURN instruction so we set the counter to large value so that
writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
writel(rwcfg->rreturn, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
/**
* delay_for_n_mem_clocks() - Delay for N memory clocks
* @clocks: Length of the delay
*
* Delay for N memory clocks.
static void delay_for_n_mem_clocks(const u32 clocks)
u16 c_loop;
u8 inner;
u8 outer;
debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
/* Scale (rounding up) to get afi clocks. */
afi_clocks = DIV_ROUND_UP(clocks, misccfg->afi_rate_ratio);
if (afi_clocks) /* Temporary underflow protection */
afi_clocks--;
* Note, we don't bother accounting for being off a little
* bit because of a few extra instructions in outer loops.
* Note, the loops have a test at the end, and do the test
* before the decrement, and so always perform the loop
* 1 time more than the counter value
*/
c_loop = afi_clocks >> 16;
outer = c_loop ? 0xff : (afi_clocks >> 8);
inner = outer ? 0xff : afi_clocks;
/*
* rom instructions are structured as follows:
*
* IDLE_LOOP2: jnz cntr0, TARGET_A
* IDLE_LOOP1: jnz cntr1, TARGET_B
* return
*
* so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
* TARGET_B is set to IDLE_LOOP2 as well
*
* if we have no outer loop, though, then we can use IDLE_LOOP1 only,
* and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
*
* a little confusing, but it helps save precious space in the inst_rom
* and sequencer rom and keeps the delays more accurate and reduces
* overhead
*/
if (afi_clocks < 0x100) {
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(rwcfg->idle_loop1,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(rwcfg->idle_loop1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
&sdr_rw_load_mgr_regs->load_cntr0);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(rwcfg->idle_loop2,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(rwcfg->idle_loop2,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(rwcfg->idle_loop2,
SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
} while (c_loop-- != 0);
}
debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
}
/**
* rw_mgr_mem_init_load_regs() - Load instruction registers
* @cntr0: Counter 0 value
* @cntr1: Counter 1 value
* @cntr2: Counter 2 value
* @jump: Jump instruction value
*
* Load instruction registers.
*/
static void rw_mgr_mem_init_load_regs(u32 cntr0, u32 cntr1, u32 cntr2, u32 jump)
{
u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
/* Load counters */
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr0),
&sdr_rw_load_mgr_regs->load_cntr0);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr1),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(cntr2),
&sdr_rw_load_mgr_regs->load_cntr2);
/* Load jump address */
writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(jump, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
/* Execute count instruction */
writel(jump, grpaddr);
}
/**
* rw_mgr_mem_load_user() - Load user calibration values
* @fin1: Final instruction 1
* @fin2: Final instruction 2
* @precharge: If 1, precharge the banks at the end
*
* Load user calibration values and optionally precharge the banks.
*/
static void rw_mgr_mem_load_user(const u32 fin1, const u32 fin2,
const int precharge)
u32 grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
u32 r;
for (r = 0; r < rwcfg->mem_number_of_ranks; r++) {
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
/* precharge all banks ... */
if (precharge)
writel(rwcfg->precharge_all, grpaddr);
/*
* USER Use Mirror-ed commands for odd ranks if address
* mirrorring is on
*/
if ((rwcfg->mem_address_mirroring >> r) & 0x1) {
writel(rwcfg->mrs2_mirr, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(rwcfg->mrs3_mirr, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(rwcfg->mrs1_mirr, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(fin1, grpaddr);
} else {
set_jump_as_return();
writel(rwcfg->mrs2, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(rwcfg->mrs3, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(rwcfg->mrs1, grpaddr);
set_jump_as_return();
writel(fin2, grpaddr);
}
if (precharge)
continue;
set_jump_as_return();
writel(rwcfg->zqcl, grpaddr);
/* tZQinit = tDLLK = 512 ck cycles */
delay_for_n_mem_clocks(512);
}
}
/**
* rw_mgr_mem_initialize() - Initialize RW Manager
*
* Initialize RW Manager.
*/
static void rw_mgr_mem_initialize(void)
{
debug("%s:%d\n", __func__, __LINE__);
/* The reset / cke part of initialization is broadcasted to all ranks */
writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
/*
* Here's how you load register for a loop
* Counters are located @ 0x800
* Jump address are located @ 0xC00
* For both, registers 0 to 3 are selected using bits 3 and 2, like
* in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
* I know this ain't pretty, but Avalon bus throws away the 2 least
* significant bits
*/
/* Start with memory RESET activated */
/* tINIT = 200us */
/*
* 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
* If a and b are the number of iteration in 2 nested loops
* it takes the following number of cycles to complete the operation:
* number_of_cycles = ((2 + n) * a + 2) * b
* where n is the number of instruction in the inner loop
* One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
* b = 6A
*/
rw_mgr_mem_init_load_regs(misccfg->tinit_cntr0_val,
misccfg->tinit_cntr1_val,
misccfg->tinit_cntr2_val,
rwcfg->init_reset_0_cke_0);
/* Indicate that memory is stable. */
writel(1, &phy_mgr_cfg->reset_mem_stbl);
/*
* transition the RESET to high
* Wait for 500us
*/
/*
* 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
* If a and b are the number of iteration in 2 nested loops
* it takes the following number of cycles to complete the operation
* number_of_cycles = ((2 + n) * a + 2) * b
* where n is the number of instruction in the inner loop
* One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
* b = FF
*/
rw_mgr_mem_init_load_regs(misccfg->treset_cntr0_val,
misccfg->treset_cntr1_val,
misccfg->treset_cntr2_val,
rwcfg->init_reset_1_cke_0);
/* Bring up clock enable. */
/* tXRP < 250 ck cycles */
delay_for_n_mem_clocks(250);
rw_mgr_mem_load_user(rwcfg->mrs0_dll_reset_mirr, rwcfg->mrs0_dll_reset,