Newer
Older
*/
/*
* 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
* If a and b are the number of iteration in 2 nested loops
* it takes the following number of cycles to complete the operation
* number_of_cycles = ((2 + n) * a + 2) * b
* where n is the number of instruction in the inner loop
* One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
* b = FF
*/
rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
SEQ_TRESET_CNTR2_VAL,
RW_MGR_INIT_RESET_1_CKE_0);
/* Bring up clock enable. */
/* tXRP < 250 ck cycles */
delay_for_n_mem_clocks(250);
rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
0);
}
/*
* At the end of calibration we have to program the user settings in, and
* USER hand off the memory to the user.
*/
static void rw_mgr_mem_handoff(void)
{
rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
/*
* USER need to wait tMOD (12CK or 15ns) time before issuing
* other commands, but we will have plenty of NIOS cycles before
* actual handoff so its okay.
*/
/**
* rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
* @rank_bgn: Rank number
* @group: Read/Write Group
* @all_ranks: Test all ranks
*
* Performs a guaranteed read on the patterns we are going to use during a
* read test to ensure memory works.
static int
rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
const u32 all_ranks)
const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
const u32 addr_offset =
(group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
const u32 rank_end = all_ranks ?
RW_MGR_MEM_NUMBER_OF_RANKS :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
const u32 correct_mask_vg = param->read_correct_mask_vg;
u32 tmp_bit_chk, base_rw_mgr, bit_chk;
int vg, r;
int ret = 0;
bit_chk = param->read_correct_mask;
for (r = rank_bgn; r < rank_end; r++) {
/* Request to skip the rank */
if (param->skip_ranks[r])
continue;
/* Set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
/* Load up a constant bursts of read commands */
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
writel(RW_MGR_GUARANTEED_READ,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
writel(RW_MGR_GUARANTEED_READ_CONT,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
tmp_bit_chk = 0;
for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
vg >= 0; vg--) {
/* Reset the FIFOs to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RESET_READ_DATAPATH_OFFSET);
writel(RW_MGR_GUARANTEED_READ,
addr + addr_offset + (vg << 2));
base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
tmp_bit_chk <<= shift_ratio;
tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
bit_chk &= tmp_bit_chk;
writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
if (bit_chk != param->read_correct_mask)
ret = -EIO;
debug_cond(DLEVEL == 1,
"%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
__func__, __LINE__, group, bit_chk,
param->read_correct_mask, ret);
return ret;
/**
* rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
* @rank_bgn: Rank number
* @all_ranks: Test all ranks
*
* Load up the patterns we are going to use during a read test.
*/
static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
const int all_ranks)
const u32 rank_end = all_ranks ?
RW_MGR_MEM_NUMBER_OF_RANKS :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
u32 r;
debug("%s:%d\n", __func__, __LINE__);
for (r = rank_bgn; r < rank_end; r++) {
if (param->skip_ranks[r])
/* request to skip the rank */
continue;
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
/* Load up a constant bursts */
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
}
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
}
/**
* rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
* @rank_bgn: Rank number
* @group: Read/Write group
* @num_tries: Number of retries of the test
* @all_correct: All bits must be correct in the mask
* @bit_chk: Resulting bit mask after the test
* @all_groups: Test all R/W groups
* @all_ranks: Test all ranks
*
* Try a read and see if it returns correct data back. Test has dummy reads
* inserted into the mix used to align DQS enable. Test has more thorough
* checks than the regular read test.
static int
rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
const u32 num_tries, const u32 all_correct,
u32 *bit_chk,
const u32 all_groups, const u32 all_ranks)
const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
const u32 quick_read_mode =
((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
ENABLE_SUPER_QUICK_CALIBRATION);
u32 correct_mask_vg = param->read_correct_mask_vg;
u32 tmp_bit_chk;
u32 base_rw_mgr;
u32 addr;
int r, vg, ret;
*bit_chk = param->read_correct_mask;
for (r = rank_bgn; r < rank_end; r++) {
if (param->skip_ranks[r])
/* request to skip the rank */
continue;
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
writel(RW_MGR_READ_B2B_WAIT1,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
writel(RW_MGR_READ_B2B_WAIT2,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
if (quick_read_mode)
writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
/* need at least two (1+1) reads to capture failures */
else if (all_groups)
writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
writel(RW_MGR_READ_B2B,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
if (all_groups)
writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
&sdr_rw_load_mgr_regs->load_cntr3);
writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
writel(RW_MGR_READ_B2B,
&sdr_rw_load_jump_mgr_regs->load_jump_add3);
tmp_bit_chk = 0;
for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
vg--) {
/* Reset the FIFOs to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RESET_READ_DATAPATH_OFFSET);
if (all_groups) {
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_ALL_GROUPS_OFFSET;
} else {
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
}
writel(RW_MGR_READ_B2B, addr +
((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
vg) << 2));
base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
*bit_chk &= tmp_bit_chk;
}
addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
ret = (*bit_chk == param->read_correct_mask);
debug_cond(DLEVEL == 2,
"%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
__func__, __LINE__, group, all_groups, *bit_chk,
param->read_correct_mask, ret);
ret = (*bit_chk != 0x00);
debug_cond(DLEVEL == 2,
"%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
__func__, __LINE__, group, all_groups, *bit_chk,
0, ret);
return ret;
/**
* rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
* @grp: Read/Write group
* @num_tries: Number of retries of the test
* @all_correct: All bits must be correct in the mask
* @all_groups: Test all R/W groups
*
* Perform a READ test across all memory ranks.
*/
static int
rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
const u32 all_correct,
const u32 all_groups)
u32 bit_chk;
return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
&bit_chk, all_groups, 1);
/**
* rw_mgr_incr_vfifo() - Increase VFIFO value
* @grp: Read/Write group
*
* Increase VFIFO value.
*/
static void rw_mgr_incr_vfifo(const u32 grp)
writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
/**
* rw_mgr_decr_vfifo() - Decrease VFIFO value
* @grp: Read/Write group
*
* Decrease VFIFO value.
*/
static void rw_mgr_decr_vfifo(const u32 grp)
for (i = 0; i < VFIFO_SIZE - 1; i++)
/**
* find_vfifo_failing_read() - Push VFIFO to get a failing read
* @grp: Read/Write group
*
* Push VFIFO until a failing read happens.
*/
static int find_vfifo_failing_read(const u32 grp)
u32 v, ret, fail_cnt = 0;
for (v = 0; v < VFIFO_SIZE; v++) {
debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
__func__, __LINE__, v);
ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT, 0);
fail_cnt++;
if (fail_cnt == 2)
/* No failing read found! Something must have gone wrong. */
debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
return 0;
/**
* sdr_find_phase_delay() - Find DQS enable phase or delay
* @working: If 1, look for working phase/delay, if 0, look for non-working
* @delay: If 1, look for delay, if 0, look for phase
* @grp: Read/Write group
* @work: Working window position
* @work_inc: Working window increment
* @pd: DQS Phase/Delay Iterator
*
* Find working or non-working DQS enable phase setting.
*/
static int sdr_find_phase_delay(int working, int delay, const u32 grp,
u32 *work, const u32 work_inc, u32 *pd)
{
const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
u32 ret;
for (; *pd <= max; (*pd)++) {
if (delay)
scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
else
scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT, 0);
if (!working)
ret = !ret;
if (ret)
return 0;
if (work)
*work += work_inc;
}
return -EINVAL;
}
/**
* sdr_find_phase() - Find DQS enable phase
* @working: If 1, look for working phase, if 0, look for non-working phase
* @grp: Read/Write group
* @work: Working window position
* @i: Iterator
* @p: DQS Phase Iterator
*
* Find working or non-working DQS enable phase setting.
*/
static int sdr_find_phase(int working, const u32 grp, u32 *work,
const u32 end = VFIFO_SIZE + (working ? 0 : 1);
int ret;
for (; *i < end; (*i)++) {
if (working)
*p = 0;
ret = sdr_find_phase_delay(working, 0, grp, work,
IO_DELAY_PER_OPA_TAP, p);
if (!ret)
return 0;
if (*p > IO_DQS_EN_PHASE_MAX) {
/* Fiddle with FIFO. */
/**
* sdr_working_phase() - Find working DQS enable phase
* @grp: Read/Write group
* @work_bgn: Working window start position
* @d: dtaps output value
* @p: DQS Phase Iterator
* @i: Iterator
*
* Find working DQS enable phase setting.
*/
static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
int ret;
*work_bgn = 0;
for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
*i = 0;
scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
ret = sdr_find_phase(1, grp, work_bgn, i, p);
if (!ret)
return 0;
*work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
}
/* Cannot find working solution */
debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
__func__, __LINE__);
return -EINVAL;
/**
* sdr_backup_phase() - Find DQS enable backup phase
* @grp: Read/Write group
* @work_bgn: Working window start position
* @p: DQS Phase Iterator
*
* Find DQS enable backup phase setting.
*/
static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
u32 tmp_delay, d;
/* Special case code for backing up a phase */
if (*p == 0) {
*p = IO_DQS_EN_PHASE_MAX;
} else {
(*p)--;
}
tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
PASS_ONE_BIT, 0);
*work_bgn = tmp_delay;
break;
}
tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
/* Restore VFIFO to old state before we decremented it (if needed). */
(*p)++;
if (*p > IO_DQS_EN_PHASE_MAX) {
*p = 0;
scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
/**
* sdr_nonworking_phase() - Find non-working DQS enable phase
* @grp: Read/Write group
* @work_end: Working window end position
* @p: DQS Phase Iterator
* @i: Iterator
*
* Find non-working DQS enable phase setting.
*/
static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
(*p)++;
*work_end += IO_DELAY_PER_OPA_TAP;
if (*p > IO_DQS_EN_PHASE_MAX) {
ret = sdr_find_phase(0, grp, work_end, i, p);
if (ret) {
/* Cannot see edge of failing read. */
debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
__func__, __LINE__);
/**
* sdr_find_window_center() - Find center of the working DQS window.
* @grp: Read/Write group
* @work_bgn: First working settings
* @work_end: Last working settings
*
* Find center of the working DQS enable window.
*/
static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
u32 work_mid;
work_mid = (work_bgn + work_end) / 2;
debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
work_bgn, work_end, work_mid);
/* Get the middle delay to be less than a VFIFO delay */
tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
work_mid %= tmp_delay;
debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
p = tmp_delay / IO_DELAY_PER_OPA_TAP;
debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
if (d > IO_DQS_EN_DELAY_MAX)
d = IO_DQS_EN_DELAY_MAX;
tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
/*
* push vfifo until we can successfully calibrate. We can do this
* because the largest possible margin in 1 VFIFO cycle.
*/
for (i = 0; i < VFIFO_SIZE; i++) {
debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
debug_cond(DLEVEL == 2,
"%s:%d center: found: ptap=%u dtap=%u\n",
__func__, __LINE__, p, d);
/* Fiddle with FIFO. */
debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
__func__, __LINE__);
return -EINVAL;
/**
* rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
* @grp: Read/Write Group
*
* Find a good DQS enable to use.
*/
static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
u32 d, p, i;
u32 dtaps_per_ptap;
u32 work_bgn, work_end;
u32 found_passing_read, found_failing_read, initial_failing_dtap;
int ret;
debug("%s:%d %u\n", __func__, __LINE__, grp);
reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
/* Step 0: Determine number of delay taps for each phase tap. */
dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
/* Step 1: First push vfifo until we get a failing read. */
/* Step 2: Find first working phase, increment in ptaps. */
ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
if (ret)
return ret;
work_end = work_bgn;
/*
* If d is 0 then the working window covers a phase tap and we can
* follow the old procedure. Otherwise, we've found the beginning
* and we need to increment the dtaps until we find the end.
*/
if (d == 0) {
/*
* Step 3a: If we have room, back off by one and
* increment in dtaps.
*/
sdr_backup_phase(grp, &work_bgn, &p);
/*
* Step 4a: go forward from working phase to non working
* phase, increment in ptaps.
*/
ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
if (ret)
return ret;
/* Step 5a: Back off one from last, increment in dtaps. */
/* Special case code for backing up a phase */
if (p == 0) {
p = IO_DQS_EN_PHASE_MAX;
} else {
p = p - 1;
}
work_end -= IO_DELAY_PER_OPA_TAP;
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
d = 0;
debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
__func__, __LINE__, p);
/* The dtap increment to find the failing edge is done here. */
sdr_find_phase_delay(0, 1, grp, &work_end,
IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
/* Go back to working dtap */
if (d != 0)
work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
debug_cond(DLEVEL == 2,
"%s:%d p/d: ptap=%u dtap=%u end=%u\n",
__func__, __LINE__, p, d - 1, work_end);
if (work_end < work_bgn) {
/* nil range */
debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
__func__, __LINE__);
return -EINVAL;
debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
__func__, __LINE__, work_bgn, work_end);
/*
* We need to calculate the number of dtaps that equal a ptap.
* To do that we'll back up a ptap and re-find the edge of the
* window using dtaps
debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
__func__, __LINE__);
/* Special case code for backing up a phase */
if (p == 0) {
p = IO_DQS_EN_PHASE_MAX;
debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
__func__, __LINE__, p);
} else {
p = p - 1;
debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
__func__, __LINE__, p);
}
scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
/*
* Increase dtap until we first see a passing read (in case the
* window is smaller than a ptap), and then a failing read to
* mark the edge of the window again.
/* Find a passing read. */
debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
__func__, __LINE__);
initial_failing_dtap = d;
found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
if (found_passing_read) {
/* Find a failing read. */
debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
__func__, __LINE__);
d++;
found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
&d);
debug_cond(DLEVEL == 1,
"%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
__func__, __LINE__);
}
/*
* The dynamically calculated dtaps_per_ptap is only valid if we
* found a passing/failing read. If we didn't, it means d hit the max
* (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
* statically calculated value.
*/
if (found_passing_read && found_failing_read)
dtaps_per_ptap = d - initial_failing_dtap;
writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
__func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
/* Step 6: Find the centre of the window. */
ret = sdr_find_window_center(grp, work_bgn, work_end);
return ret;
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
/**
* search_left_edge() - Find left edge of DQ/DQS working phase
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @rank_bgn: Rank number
* @write_group: Write Group
* @read_group: Read Group
* @test_bgn: Rank number to begin the test
* @bit_chk: Resulting bit mask after the test
* @sticky_bit_chk: Resulting sticky bit mask after the test
* @left_edge: Left edge of the DQ/DQS phase
* @right_edge: Right edge of the DQ/DQS phase
* @use_read_test: Perform read test
*
* Find left edge of DQ/DQS working phase.
*/
static void search_left_edge(const int write, const int rank_bgn,
const u32 write_group, const u32 read_group, const u32 test_bgn,
u32 *bit_chk, u32 *sticky_bit_chk,
int *left_edge, int *right_edge, const u32 use_read_test)
{
const u32 correct_mask = write ? param->write_correct_mask :
param->read_correct_mask;
const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
RW_MGR_MEM_DQ_PER_READ_DQS;
u32 stop;
int i, d;
for (d = 0; d <= dqs_max; d++) {
if (write)
scc_mgr_apply_group_dq_out1_delay(d);
else
scc_mgr_apply_group_dq_in_delay(test_bgn, d);
writel(0, &sdr_scc_mgr->update);
/*
* Stop searching when the read test doesn't pass AND when
* we've seen a passing read on every bit.
*/
if (write) { /* WRITE-ONLY */
stop = !rw_mgr_mem_calibrate_write_test(rank_bgn,
write_group,
0, PASS_ONE_BIT,
bit_chk, 0);
} else if (use_read_test) { /* READ-ONLY */
stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
bit_chk, 0, 0);
} else { /* READ-ONLY */
rw_mgr_mem_calibrate_write_test(rank_bgn,
write_group,
0, PASS_ONE_BIT,
bit_chk, 0);
*bit_chk = *bit_chk >> (per_dqs *
(read_group - (write_group *
RW_MGR_MEM_IF_READ_DQS_WIDTH /
RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
stop = (*bit_chk == 0);
}
*sticky_bit_chk = *sticky_bit_chk | *bit_chk;
stop = stop && (*sticky_bit_chk == correct_mask);
debug_cond(DLEVEL == 2,
"%s:%d center(left): dtap=%u => %u == %u && %u", __func__, __LINE__, d,
*sticky_bit_chk, correct_mask, stop);
if (stop == 1)
break;
/* stop != 1 */
for (i = 0; i < per_dqs; i++) {
if (*bit_chk & 1) {
/*
* Remember a passing test as
* the left_edge.
*/
left_edge[i] = d;
} else {
/*
* If a left edge has not been seen
* yet, then a future passing test
* will mark this edge as the right
* edge.
*/
if (left_edge[i] == delay_max + 1)
right_edge[i] = -(d + 1);
}
*bit_chk = *bit_chk >> 1;
}
}
/* Reset DQ delay chains to 0 */
if (write)
scc_mgr_apply_group_dq_out1_delay(0);
else
scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
*sticky_bit_chk = 0;
for (i = per_dqs - 1; i >= 0; i--) {
debug_cond(DLEVEL == 2,
"%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
__func__, __LINE__, i, left_edge[i],
i, right_edge[i]);
/*
* Check for cases where we haven't found the left edge,
* which makes our assignment of the the right edge invalid.
* Reset it to the illegal value.
*/
if ((left_edge[i] == delay_max + 1) &&
(right_edge[i] != delay_max + 1)) {
right_edge[i] = delay_max + 1;
debug_cond(DLEVEL == 2,
"%s:%d vfifo_center: reset right_edge[%u]: %d\n",
__func__, __LINE__, i, right_edge[i]);
}
/*
* Reset sticky bit
* READ: except for bits where we have seen both
* the left and right edge.
* WRITE: except for bits where we have seen the
* left edge.
*/
*sticky_bit_chk <<= 1;
if (write) {
if (left_edge[i] != delay_max + 1)
*sticky_bit_chk |= 1;
} else {
if ((left_edge[i] != delay_max + 1) &&
(right_edge[i] != delay_max + 1))
*sticky_bit_chk |= 1;
}
}
}
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
/**
* search_right_edge() - Find right edge of DQ/DQS working phase
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @rank_bgn: Rank number
* @write_group: Write Group
* @read_group: Read Group
* @start_dqs: DQS start phase
* @start_dqs_en: DQS enable start phase
* @bit_chk: Resulting bit mask after the test
* @sticky_bit_chk: Resulting sticky bit mask after the test
* @left_edge: Left edge of the DQ/DQS phase
* @right_edge: Right edge of the DQ/DQS phase
* @use_read_test: Perform read test
*
* Find right edge of DQ/DQS working phase.
*/
static int search_right_edge(const int write, const int rank_bgn,
const u32 write_group, const u32 read_group,
const int start_dqs, const int start_dqs_en,
u32 *bit_chk, u32 *sticky_bit_chk,
int *left_edge, int *right_edge, const u32 use_read_test)
{
const u32 correct_mask = write ? param->write_correct_mask :
param->read_correct_mask;
const u32 delay_max = write ? IO_IO_OUT1_DELAY_MAX : IO_IO_IN_DELAY_MAX;
const u32 dqs_max = write ? IO_IO_OUT1_DELAY_MAX : IO_DQS_IN_DELAY_MAX;
const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
RW_MGR_MEM_DQ_PER_READ_DQS;
u32 stop;
int i, d;
for (d = 0; d <= dqs_max - start_dqs; d++) {
if (write) { /* WRITE-ONLY */
scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
d + start_dqs);
} else { /* READ-ONLY */
scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
if (IO_SHIFT_DQS_EN_WHEN_SHIFT_DQS) {
uint32_t delay = d + start_dqs_en;
if (delay > IO_DQS_EN_DELAY_MAX)
delay = IO_DQS_EN_DELAY_MAX;
scc_mgr_set_dqs_en_delay(read_group, delay);
}
scc_mgr_load_dqs(read_group);
}
writel(0, &sdr_scc_mgr->update);
/*
* Stop searching when the read test doesn't pass AND when
* we've seen a passing read on every bit.
*/
if (write) { /* WRITE-ONLY */
stop = !rw_mgr_mem_calibrate_write_test(rank_bgn,
write_group,
0, PASS_ONE_BIT,
bit_chk, 0);
} else if (use_read_test) { /* READ-ONLY */
stop = !rw_mgr_mem_calibrate_read_test(rank_bgn,
read_group, NUM_READ_PB_TESTS, PASS_ONE_BIT,
bit_chk, 0, 0);
} else { /* READ-ONLY */
rw_mgr_mem_calibrate_write_test(rank_bgn,
write_group,
0, PASS_ONE_BIT,
bit_chk, 0);
*bit_chk = *bit_chk >> (per_dqs *
(read_group - (write_group *
RW_MGR_MEM_IF_READ_DQS_WIDTH /
RW_MGR_MEM_IF_WRITE_DQS_WIDTH)));
stop = (*bit_chk == 0);
}
*sticky_bit_chk = *sticky_bit_chk | *bit_chk;
stop = stop && (*sticky_bit_chk == correct_mask);
debug_cond(DLEVEL == 2,
"%s:%d center(right): dtap=%u => %u == %u && %u", __func__, __LINE__, d,
*sticky_bit_chk, correct_mask, stop);