Newer
Older
/**
* rw_mgr_mem_handoff() - Hand off the memory to user
*
* At the end of calibration we have to program the user settings in
* and hand off the memory to the user.
*/
static void rw_mgr_mem_handoff(void)
{
rw_mgr_mem_load_user(rwcfg->mrs0_user_mirr, rwcfg->mrs0_user, 1);
* Need to wait tMOD (12CK or 15ns) time before issuing other
* commands, but we will have plenty of NIOS cycles before actual
* handoff so its okay.
/**
* rw_mgr_mem_calibrate_write_test_issue() - Issue write test command
* @group: Write Group
* @use_dm: Use DM
*
* Issue write test command. Two variants are provided, one that just tests
* a write pattern and another that tests datamask functionality.
static void rw_mgr_mem_calibrate_write_test_issue(u32 group,
u32 test_dm)
const u32 quick_write_mode =
(STATIC_CALIB_STEPS & CALIB_SKIP_WRITES) &&
misccfg->enable_super_quick_calibration;
u32 mcc_instruction;
u32 rw_wl_nop_cycles;
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
/*
* Set counter and jump addresses for the right
* number of NOP cycles.
* The number of supported NOP cycles can range from -1 to infinity
* Three different cases are handled:
*
* 1. For a number of NOP cycles greater than 0, the RW Mgr looping
* mechanism will be used to insert the right number of NOPs
*
* 2. For a number of NOP cycles equals to 0, the micro-instruction
* issuing the write command will jump straight to the
* micro-instruction that turns on DQS (for DDRx), or outputs write
* data (for RLD), skipping
* the NOP micro-instruction all together
*
* 3. A number of NOP cycles equal to -1 indicates that DQS must be
* turned on in the same micro-instruction that issues the write
* command. Then we need
* to directly jump to the micro-instruction that sends out the data
*
* NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
* (2 and 3). One jump-counter (0) is used to perform multiple
* write-read operations.
* one counter left to issue this command in "multiple-group" mode
*/
rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
if (rw_wl_nop_cycles == -1) {
/*
* CNTR 2 - We want to execute the special write operation that
* turns on DQS right away and then skip directly to the
* instruction that sends out the data. We set the counter to a
* large number so that the jump is always taken.
*/
writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
/* CNTR 3 - Not used */
if (test_dm) {
mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0_wl_1;
writel(rwcfg->lfsr_wr_rd_dm_bank_0_data,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
} else {
mcc_instruction = rwcfg->lfsr_wr_rd_bank_0_wl_1;
writel(rwcfg->lfsr_wr_rd_bank_0_data,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
writel(rwcfg->lfsr_wr_rd_bank_0_nop,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
}
} else if (rw_wl_nop_cycles == 0) {
/*
* CNTR 2 - We want to skip the NOP operation and go straight
* to the DQS enable instruction. We set the counter to a large
* number so that the jump is always taken.
*/
writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
/* CNTR 3 - Not used */
if (test_dm) {
mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
writel(rwcfg->lfsr_wr_rd_dm_bank_0_dqs,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
} else {
mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
writel(rwcfg->lfsr_wr_rd_bank_0_dqs,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
}
} else {
/*
* CNTR 2 - In this case we want to execute the next instruction
* and NOT take the jump. So we set the counter to 0. The jump
* address doesn't count.
*/
writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
/*
* CNTR 3 - Set the nop counter to the number of cycles we
* need to loop for, minus 1.
*/
writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
if (test_dm) {
mcc_instruction = rwcfg->lfsr_wr_rd_dm_bank_0;
writel(rwcfg->lfsr_wr_rd_dm_bank_0_nop,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
mcc_instruction = rwcfg->lfsr_wr_rd_bank_0;
writel(rwcfg->lfsr_wr_rd_bank_0_nop,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
}
}
writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RESET_READ_DATAPATH_OFFSET);
if (quick_write_mode)
writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
else
writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
/*
* CNTR 1 - This is used to ensure enough time elapses
* for read data to come back.
*/
writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
if (test_dm) {
writel(rwcfg->lfsr_wr_rd_dm_bank_0_wait,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(rwcfg->lfsr_wr_rd_bank_0_wait,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(mcc_instruction, (SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET) +
(group << 2));
/**
* rw_mgr_mem_calibrate_write_test() - Test writes, check for single/multiple pass
* @rank_bgn: Rank number
* @write_group: Write Group
* @use_dm: Use DM
* @all_correct: All bits must be correct in the mask
* @bit_chk: Resulting bit mask after the test
* @all_ranks: Test all ranks
*
* Test writes, can check for a single bit pass or multiple bit pass.
*/
static int
rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
const u32 use_dm, const u32 all_correct,
u32 *bit_chk, const u32 all_ranks)
const u32 rank_end = all_ranks ?
rwcfg->mem_number_of_ranks :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
const u32 shift_ratio = rwcfg->mem_dq_per_write_dqs /
rwcfg->mem_virtual_groups_per_write_dqs;
const u32 correct_mask_vg = param->write_correct_mask_vg;
u32 tmp_bit_chk, base_rw_mgr;
int vg, r;
*bit_chk = param->write_correct_mask;
for (r = rank_bgn; r < rank_end; r++) {
/* Set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
tmp_bit_chk = 0;
for (vg = rwcfg->mem_virtual_groups_per_write_dqs - 1;
vg >= 0; vg--) {
/* Reset the FIFOs to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
rw_mgr_mem_calibrate_write_test_issue(
write_group *
rwcfg->mem_virtual_groups_per_write_dqs + vg,
use_dm);
base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
tmp_bit_chk <<= shift_ratio;
tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
*bit_chk &= tmp_bit_chk;
}
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
if (all_correct) {
"write_test(%u,%u,ALL) : %u == %u => %i\n",
write_group, use_dm, *bit_chk,
param->write_correct_mask,
*bit_chk == param->write_correct_mask);
return *bit_chk == param->write_correct_mask;
} else {
"write_test(%u,%u,ONE) : %u != %i => %i\n",
write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
return *bit_chk != 0x00;
}
}
/**
* rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
* @rank_bgn: Rank number
* @group: Read/Write Group
* @all_ranks: Test all ranks
*
* Performs a guaranteed read on the patterns we are going to use during a
* read test to ensure memory works.
static int
rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
const u32 all_ranks)
const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
const u32 addr_offset =
(group * rwcfg->mem_virtual_groups_per_read_dqs) << 2;
const u32 rank_end = all_ranks ?
rwcfg->mem_number_of_ranks :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
const u32 shift_ratio = rwcfg->mem_dq_per_read_dqs /
rwcfg->mem_virtual_groups_per_read_dqs;
const u32 correct_mask_vg = param->read_correct_mask_vg;
u32 tmp_bit_chk, base_rw_mgr, bit_chk;
int vg, r;
int ret = 0;
bit_chk = param->read_correct_mask;
for (r = rank_bgn; r < rank_end; r++) {
/* Set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
/* Load up a constant bursts of read commands */
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
writel(rwcfg->guaranteed_read,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
writel(rwcfg->guaranteed_read_cont,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
tmp_bit_chk = 0;
for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1;
vg >= 0; vg--) {
/* Reset the FIFOs to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RESET_READ_DATAPATH_OFFSET);
writel(rwcfg->guaranteed_read,
addr + addr_offset + (vg << 2));
base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
tmp_bit_chk <<= shift_ratio;
tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
bit_chk &= tmp_bit_chk;
writel(rwcfg->clear_dqs_enable, addr + (group << 2));
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
if (bit_chk != param->read_correct_mask)
ret = -EIO;
"%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
__func__, __LINE__, group, bit_chk,
param->read_correct_mask, ret);
return ret;
/**
* rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
* @rank_bgn: Rank number
* @all_ranks: Test all ranks
*
* Load up the patterns we are going to use during a read test.
*/
static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
const int all_ranks)
const u32 rank_end = all_ranks ?
rwcfg->mem_number_of_ranks :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
u32 r;
debug("%s:%d\n", __func__, __LINE__);
for (r = rank_bgn; r < rank_end; r++) {
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
/* Load up a constant bursts */
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
writel(rwcfg->guaranteed_write_wait0,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
writel(rwcfg->guaranteed_write_wait1,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
writel(rwcfg->guaranteed_write_wait2,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
writel(rwcfg->guaranteed_write_wait3,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
writel(rwcfg->guaranteed_write, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
}
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
}
/**
* rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
* @rank_bgn: Rank number
* @group: Read/Write group
* @num_tries: Number of retries of the test
* @all_correct: All bits must be correct in the mask
* @bit_chk: Resulting bit mask after the test
* @all_groups: Test all R/W groups
* @all_ranks: Test all ranks
*
* Try a read and see if it returns correct data back. Test has dummy reads
* inserted into the mix used to align DQS enable. Test has more thorough
* checks than the regular read test.
static int
rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
const u32 num_tries, const u32 all_correct,
u32 *bit_chk,
const u32 all_groups, const u32 all_ranks)
const u32 rank_end = all_ranks ? rwcfg->mem_number_of_ranks :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
const u32 quick_read_mode =
((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
misccfg->enable_super_quick_calibration);
u32 correct_mask_vg = param->read_correct_mask_vg;
u32 tmp_bit_chk;
u32 base_rw_mgr;
u32 addr;
int r, vg, ret;
*bit_chk = param->read_correct_mask;
for (r = rank_bgn; r < rank_end; r++) {
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
writel(rwcfg->read_b2b_wait1,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
writel(rwcfg->read_b2b_wait2,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
if (quick_read_mode)
writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
/* need at least two (1+1) reads to capture failures */
else if (all_groups)
writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
writel(rwcfg->read_b2b,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(rwcfg->mem_if_read_dqs_width *
rwcfg->mem_virtual_groups_per_read_dqs - 1,
&sdr_rw_load_mgr_regs->load_cntr3);
writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
writel(rwcfg->read_b2b,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
tmp_bit_chk = 0;
for (vg = rwcfg->mem_virtual_groups_per_read_dqs - 1; vg >= 0;
/* Reset the FIFOs to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RESET_READ_DATAPATH_OFFSET);
if (all_groups) {
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_ALL_GROUPS_OFFSET;
} else {
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
}
writel(rwcfg->read_b2b, addr +
((group *
rwcfg->mem_virtual_groups_per_read_dqs +
vg) << 2));
base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
tmp_bit_chk <<= rwcfg->mem_dq_per_read_dqs /
rwcfg->mem_virtual_groups_per_read_dqs;
tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
*bit_chk &= tmp_bit_chk;
}
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
writel(rwcfg->clear_dqs_enable, addr + (group << 2));
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
ret = (*bit_chk == param->read_correct_mask);
"%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
__func__, __LINE__, group, all_groups, *bit_chk,
param->read_correct_mask, ret);
ret = (*bit_chk != 0x00);
"%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
__func__, __LINE__, group, all_groups, *bit_chk,
0, ret);
return ret;
/**
* rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
* @grp: Read/Write group
* @num_tries: Number of retries of the test
* @all_correct: All bits must be correct in the mask
* @all_groups: Test all R/W groups
*
* Perform a READ test across all memory ranks.
*/
static int
rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
const u32 all_correct,
const u32 all_groups)
u32 bit_chk;
return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
&bit_chk, all_groups, 1);
/**
* rw_mgr_incr_vfifo() - Increase VFIFO value
* @grp: Read/Write group
*
* Increase VFIFO value.
*/
static void rw_mgr_incr_vfifo(const u32 grp)
writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
/**
* rw_mgr_decr_vfifo() - Decrease VFIFO value
* @grp: Read/Write group
*
* Decrease VFIFO value.
*/
static void rw_mgr_decr_vfifo(const u32 grp)
for (i = 0; i < misccfg->read_valid_fifo_size - 1; i++)
/**
* find_vfifo_failing_read() - Push VFIFO to get a failing read
* @grp: Read/Write group
*
* Push VFIFO until a failing read happens.
*/
static int find_vfifo_failing_read(const u32 grp)
u32 v, ret, fail_cnt = 0;
for (v = 0; v < misccfg->read_valid_fifo_size; v++) {
debug_cond(DLEVEL >= 2, "%s:%d: vfifo %u\n",
__func__, __LINE__, v);
ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT, 0);
fail_cnt++;
if (fail_cnt == 2)
/* No failing read found! Something must have gone wrong. */
debug_cond(DLEVEL >= 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
/**
* sdr_find_phase_delay() - Find DQS enable phase or delay
* @working: If 1, look for working phase/delay, if 0, look for non-working
* @delay: If 1, look for delay, if 0, look for phase
* @grp: Read/Write group
* @work: Working window position
* @work_inc: Working window increment
* @pd: DQS Phase/Delay Iterator
*
* Find working or non-working DQS enable phase setting.
*/
static int sdr_find_phase_delay(int working, int delay, const u32 grp,
u32 *work, const u32 work_inc, u32 *pd)
{
const u32 max = delay ? iocfg->dqs_en_delay_max :
iocfg->dqs_en_phase_max;
u32 ret;
for (; *pd <= max; (*pd)++) {
if (delay)
scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
else
scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT, 0);
if (!working)
ret = !ret;
if (ret)
return 0;
if (work)
*work += work_inc;
}
return -EINVAL;
}
/**
* sdr_find_phase() - Find DQS enable phase
* @working: If 1, look for working phase, if 0, look for non-working phase
* @grp: Read/Write group
* @work: Working window position
* @i: Iterator
* @p: DQS Phase Iterator
*
* Find working or non-working DQS enable phase setting.
*/
static int sdr_find_phase(int working, const u32 grp, u32 *work,
const u32 end = misccfg->read_valid_fifo_size + (working ? 0 : 1);
int ret;
for (; *i < end; (*i)++) {
if (working)
*p = 0;
ret = sdr_find_phase_delay(working, 0, grp, work,
iocfg->delay_per_opa_tap, p);
if (!ret)
return 0;
if (*p > iocfg->dqs_en_phase_max) {
/**
* sdr_working_phase() - Find working DQS enable phase
* @grp: Read/Write group
* @work_bgn: Working window start position
* @d: dtaps output value
* @p: DQS Phase Iterator
* @i: Iterator
*
* Find working DQS enable phase setting.
*/
static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
const u32 dtaps_per_ptap = iocfg->delay_per_opa_tap /
iocfg->delay_per_dqs_en_dchain_tap;
int ret;
*work_bgn = 0;
for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
*i = 0;
scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
ret = sdr_find_phase(1, grp, work_bgn, i, p);
*work_bgn += iocfg->delay_per_dqs_en_dchain_tap;
/* Cannot find working solution */
debug_cond(DLEVEL >= 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
__func__, __LINE__);
return -EINVAL;
/**
* sdr_backup_phase() - Find DQS enable backup phase
* @grp: Read/Write group
* @work_bgn: Working window start position
* @p: DQS Phase Iterator
*
* Find DQS enable backup phase setting.
*/
static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
u32 tmp_delay, d;
/* Special case code for backing up a phase */
if (*p == 0) {
*p = iocfg->dqs_en_phase_max;
} else {
(*p)--;
}
tmp_delay = *work_bgn - iocfg->delay_per_opa_tap;
scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
for (d = 0; d <= iocfg->dqs_en_delay_max && tmp_delay < *work_bgn;
d++) {
scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT, 0);
*work_bgn = tmp_delay;
break;
}
tmp_delay += iocfg->delay_per_dqs_en_dchain_tap;
/* Restore VFIFO to old state before we decremented it (if needed). */
if (*p > iocfg->dqs_en_phase_max) {
scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
/**
* sdr_nonworking_phase() - Find non-working DQS enable phase
* @grp: Read/Write group
* @work_end: Working window end position
* @p: DQS Phase Iterator
* @i: Iterator
*
* Find non-working DQS enable phase setting.
*/
static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
*work_end += iocfg->delay_per_opa_tap;
if (*p > iocfg->dqs_en_phase_max) {
ret = sdr_find_phase(0, grp, work_end, i, p);
if (ret) {
/* Cannot see edge of failing read. */
debug_cond(DLEVEL >= 2, "%s:%d: end: failed\n",
/**
* sdr_find_window_center() - Find center of the working DQS window.
* @grp: Read/Write group
* @work_bgn: First working settings
* @work_end: Last working settings
*
* Find center of the working DQS enable window.
*/
static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
u32 work_mid;
work_mid = (work_bgn + work_end) / 2;
debug_cond(DLEVEL >= 2, "work_bgn=%d work_end=%d work_mid=%d\n",
work_bgn, work_end, work_mid);
/* Get the middle delay to be less than a VFIFO delay */
tmp_delay = (iocfg->dqs_en_phase_max + 1) * iocfg->delay_per_opa_tap;
debug_cond(DLEVEL >= 2, "vfifo ptap delay %d\n", tmp_delay);
work_mid %= tmp_delay;
debug_cond(DLEVEL >= 2, "new work_mid %d\n", work_mid);
tmp_delay = rounddown(work_mid, iocfg->delay_per_opa_tap);
if (tmp_delay > iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap)
tmp_delay = iocfg->dqs_en_phase_max * iocfg->delay_per_opa_tap;
p = tmp_delay / iocfg->delay_per_opa_tap;
debug_cond(DLEVEL >= 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
d = DIV_ROUND_UP(work_mid - tmp_delay,
iocfg->delay_per_dqs_en_dchain_tap);
if (d > iocfg->dqs_en_delay_max)
d = iocfg->dqs_en_delay_max;
tmp_delay += d * iocfg->delay_per_dqs_en_dchain_tap;
debug_cond(DLEVEL >= 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
/*
* push vfifo until we can successfully calibrate. We can do this
* because the largest possible margin in 1 VFIFO cycle.
*/
for (i = 0; i < misccfg->read_valid_fifo_size; i++) {
debug_cond(DLEVEL >= 2, "find_dqs_en_phase: center\n");
if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
"%s:%d center: found: ptap=%u dtap=%u\n",
__func__, __LINE__, p, d);
/* Fiddle with FIFO. */
debug_cond(DLEVEL >= 2, "%s:%d center: failed.\n",
__func__, __LINE__);
return -EINVAL;
/**
* rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
* @grp: Read/Write Group
*
* Find a good DQS enable to use.
*/
static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
u32 d, p, i;
u32 dtaps_per_ptap;
u32 work_bgn, work_end;
u32 found_passing_read, found_failing_read = 0, initial_failing_dtap;
int ret;
debug("%s:%d %u\n", __func__, __LINE__, grp);
reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
/* Step 0: Determine number of delay taps for each phase tap. */
dtaps_per_ptap = iocfg->delay_per_opa_tap /
iocfg->delay_per_dqs_en_dchain_tap;
/* Step 1: First push vfifo until we get a failing read. */
/* Step 2: Find first working phase, increment in ptaps. */
ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
if (ret)
return ret;
work_end = work_bgn;
/*
* If d is 0 then the working window covers a phase tap and we can
* follow the old procedure. Otherwise, we've found the beginning
* and we need to increment the dtaps until we find the end.
*/
if (d == 0) {
/*
* Step 3a: If we have room, back off by one and
* increment in dtaps.
*/
sdr_backup_phase(grp, &work_bgn, &p);
/*
* Step 4a: go forward from working phase to non working
* phase, increment in ptaps.
*/
ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
if (ret)
return ret;
/* Step 5a: Back off one from last, increment in dtaps. */
/* Special case code for backing up a phase */
if (p == 0) {
p = iocfg->dqs_en_phase_max;
} else {
p = p - 1;
}
work_end -= iocfg->delay_per_opa_tap;
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
d = 0;
debug_cond(DLEVEL >= 2, "%s:%d p: ptap=%u\n",
__func__, __LINE__, p);
/* The dtap increment to find the failing edge is done here. */
sdr_find_phase_delay(0, 1, grp, &work_end,
iocfg->delay_per_dqs_en_dchain_tap, &d);
/* Go back to working dtap */
if (d != 0)
work_end -= iocfg->delay_per_dqs_en_dchain_tap;
"%s:%d p/d: ptap=%u dtap=%u end=%u\n",
__func__, __LINE__, p, d - 1, work_end);
if (work_end < work_bgn) {
/* nil range */
debug_cond(DLEVEL >= 2, "%s:%d end-2: failed\n",
__func__, __LINE__);
return -EINVAL;
debug_cond(DLEVEL >= 2, "%s:%d found range [%u,%u]\n",
__func__, __LINE__, work_bgn, work_end);
/*
* We need to calculate the number of dtaps that equal a ptap.
* To do that we'll back up a ptap and re-find the edge of the
* window using dtaps
debug_cond(DLEVEL >= 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
__func__, __LINE__);
/* Special case code for backing up a phase */
if (p == 0) {
p = iocfg->dqs_en_phase_max;
debug_cond(DLEVEL >= 2, "%s:%d backedup cycle/phase: p=%u\n",
__func__, __LINE__, p);
} else {
p = p - 1;
debug_cond(DLEVEL >= 2, "%s:%d backedup phase only: p=%u",
__func__, __LINE__, p);
}
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
/*
* Increase dtap until we first see a passing read (in case the
* window is smaller than a ptap), and then a failing read to
* mark the edge of the window again.
/* Find a passing read. */
debug_cond(DLEVEL >= 2, "%s:%d find passing read\n",
__func__, __LINE__);
initial_failing_dtap = d;
found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
if (found_passing_read) {
/* Find a failing read. */
debug_cond(DLEVEL >= 2, "%s:%d find failing read\n",
__func__, __LINE__);
d++;
found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
&d);
"%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
__func__, __LINE__);
}
/*
* The dynamically calculated dtaps_per_ptap is only valid if we
* found a passing/failing read. If we didn't, it means d hit the max
* (iocfg->dqs_en_delay_max). Otherwise, dtaps_per_ptap retains its
* statically calculated value.
*/
if (found_passing_read && found_failing_read)
dtaps_per_ptap = d - initial_failing_dtap;
writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
debug_cond(DLEVEL >= 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
__func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
/* Step 6: Find the centre of the window. */
ret = sdr_find_window_center(grp, work_bgn, work_end);
return ret;
/**
* search_stop_check() - Check if the detected edge is valid
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @d: DQS delay
* @rank_bgn: Rank number
* @write_group: Write Group
* @read_group: Read Group
* @bit_chk: Resulting bit mask after the test
* @sticky_bit_chk: Resulting sticky bit mask after the test
* @use_read_test: Perform read test
*
* Test if the found edge is valid.
*/
static u32 search_stop_check(const int write, const int d, const int rank_bgn,
const u32 write_group, const u32 read_group,
u32 *bit_chk, u32 *sticky_bit_chk,
const u32 use_read_test)
{
const u32 ratio = rwcfg->mem_if_read_dqs_width /
rwcfg->mem_if_write_dqs_width;
const u32 correct_mask = write ? param->write_correct_mask :
param->read_correct_mask;
const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
rwcfg->mem_dq_per_read_dqs;
u32 ret;
/*
* Stop searching when the read test doesn't pass AND when
* we've seen a passing read on every bit.
*/
if (write) { /* WRITE-ONLY */
ret = !rw_mgr_mem_calibrate_write_test(rank_bgn, write_group,
0, PASS_ONE_BIT,
bit_chk, 0);
} else if (use_read_test) { /* READ-ONLY */
ret = !rw_mgr_mem_calibrate_read_test(rank_bgn, read_group,
NUM_READ_PB_TESTS,
PASS_ONE_BIT, bit_chk,