Newer
Older
0, 0);
} else { /* READ-ONLY */
rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 0,
PASS_ONE_BIT, bit_chk, 0);
*bit_chk = *bit_chk >> (per_dqs *
(read_group - (write_group * ratio)));
ret = (*bit_chk == 0);
}
*sticky_bit_chk = *sticky_bit_chk | *bit_chk;
ret = ret && (*sticky_bit_chk == correct_mask);
"%s:%d center(left): dtap=%u => %u == %u && %u",
__func__, __LINE__, d,
*sticky_bit_chk, correct_mask, ret);
return ret;
}
/**
* search_left_edge() - Find left edge of DQ/DQS working phase
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @rank_bgn: Rank number
* @write_group: Write Group
* @read_group: Read Group
* @test_bgn: Rank number to begin the test
* @sticky_bit_chk: Resulting sticky bit mask after the test
* @left_edge: Left edge of the DQ/DQS phase
* @right_edge: Right edge of the DQ/DQS phase
* @use_read_test: Perform read test
*
* Find left edge of DQ/DQS working phase.
*/
static void search_left_edge(const int write, const int rank_bgn,
const u32 write_group, const u32 read_group, const u32 test_bgn,
u32 *sticky_bit_chk,
int *left_edge, int *right_edge, const u32 use_read_test)
{
const u32 delay_max = write ? iocfg->io_out1_delay_max :
iocfg->io_in_delay_max;
const u32 dqs_max = write ? iocfg->io_out1_delay_max :
iocfg->dqs_in_delay_max;
const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
rwcfg->mem_dq_per_read_dqs;
u32 stop, bit_chk;
int i, d;
for (d = 0; d <= dqs_max; d++) {
if (write)
scc_mgr_apply_group_dq_out1_delay(d);
else
scc_mgr_apply_group_dq_in_delay(test_bgn, d);
writel(0, &sdr_scc_mgr->update);
stop = search_stop_check(write, d, rank_bgn, write_group,
read_group, &bit_chk, sticky_bit_chk,
use_read_test);
if (stop == 1)
break;
/* stop != 1 */
for (i = 0; i < per_dqs; i++) {
if (bit_chk & 1) {
/*
* Remember a passing test as
* the left_edge.
*/
left_edge[i] = d;
} else {
/*
* If a left edge has not been seen
* yet, then a future passing test
* will mark this edge as the right
* edge.
*/
if (left_edge[i] == delay_max + 1)
right_edge[i] = -(d + 1);
}
bit_chk >>= 1;
}
}
/* Reset DQ delay chains to 0 */
if (write)
scc_mgr_apply_group_dq_out1_delay(0);
else
scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
*sticky_bit_chk = 0;
for (i = per_dqs - 1; i >= 0; i--) {
"%s:%d vfifo_center: left_edge[%u]: %d right_edge[%u]: %d\n",
__func__, __LINE__, i, left_edge[i],
i, right_edge[i]);
/*
* Check for cases where we haven't found the left edge,
* which makes our assignment of the the right edge invalid.
* Reset it to the illegal value.
*/
if ((left_edge[i] == delay_max + 1) &&
(right_edge[i] != delay_max + 1)) {
right_edge[i] = delay_max + 1;
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
"%s:%d vfifo_center: reset right_edge[%u]: %d\n",
__func__, __LINE__, i, right_edge[i]);
}
/*
* Reset sticky bit
* READ: except for bits where we have seen both
* the left and right edge.
* WRITE: except for bits where we have seen the
* left edge.
*/
*sticky_bit_chk <<= 1;
if (write) {
if (left_edge[i] != delay_max + 1)
*sticky_bit_chk |= 1;
} else {
if ((left_edge[i] != delay_max + 1) &&
(right_edge[i] != delay_max + 1))
*sticky_bit_chk |= 1;
}
}
}
/**
* search_right_edge() - Find right edge of DQ/DQS working phase
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @rank_bgn: Rank number
* @write_group: Write Group
* @read_group: Read Group
* @start_dqs: DQS start phase
* @start_dqs_en: DQS enable start phase
* @sticky_bit_chk: Resulting sticky bit mask after the test
* @left_edge: Left edge of the DQ/DQS phase
* @right_edge: Right edge of the DQ/DQS phase
* @use_read_test: Perform read test
*
* Find right edge of DQ/DQS working phase.
*/
static int search_right_edge(const int write, const int rank_bgn,
const u32 write_group, const u32 read_group,
const int start_dqs, const int start_dqs_en,
u32 *sticky_bit_chk,
int *left_edge, int *right_edge, const u32 use_read_test)
{
const u32 delay_max = write ? iocfg->io_out1_delay_max :
iocfg->io_in_delay_max;
const u32 dqs_max = write ? iocfg->io_out1_delay_max :
iocfg->dqs_in_delay_max;
const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
rwcfg->mem_dq_per_read_dqs;
u32 stop, bit_chk;
int i, d;
for (d = 0; d <= dqs_max - start_dqs; d++) {
if (write) { /* WRITE-ONLY */
scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
d + start_dqs);
} else { /* READ-ONLY */
scc_mgr_set_dqs_bus_in_delay(read_group, d + start_dqs);
if (iocfg->shift_dqs_en_when_shift_dqs) {
if (delay > iocfg->dqs_en_delay_max)
delay = iocfg->dqs_en_delay_max;
scc_mgr_set_dqs_en_delay(read_group, delay);
}
scc_mgr_load_dqs(read_group);
}
writel(0, &sdr_scc_mgr->update);
stop = search_stop_check(write, d, rank_bgn, write_group,
read_group, &bit_chk, sticky_bit_chk,
use_read_test);
if (stop == 1) {
if (write && (d == 0)) { /* WRITE-ONLY */
for (i = 0; i < rwcfg->mem_dq_per_write_dqs;
i++) {
/*
* d = 0 failed, but it passed when
* testing the left edge, so it must be
* marginal, set it to -1
*/
if (right_edge[i] == delay_max + 1 &&
left_edge[i] != delay_max + 1)
right_edge[i] = -1;
}
}
break;
}
/* stop != 1 */
for (i = 0; i < per_dqs; i++) {
if (bit_chk & 1) {
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
/*
* Remember a passing test as
* the right_edge.
*/
right_edge[i] = d;
} else {
if (d != 0) {
/*
* If a right edge has not
* been seen yet, then a future
* passing test will mark this
* edge as the left edge.
*/
if (right_edge[i] == delay_max + 1)
left_edge[i] = -(d + 1);
} else {
/*
* d = 0 failed, but it passed
* when testing the left edge,
* so it must be marginal, set
* it to -1
*/
if (right_edge[i] == delay_max + 1 &&
left_edge[i] != delay_max + 1)
right_edge[i] = -1;
/*
* If a right edge has not been
* seen yet, then a future
* passing test will mark this
* edge as the left edge.
*/
else if (right_edge[i] == delay_max + 1)
left_edge[i] = -(d + 1);
}
}
debug_cond(DLEVEL >= 2, "%s:%d center[r,d=%u]: ",
__func__, __LINE__, d);
"bit_chk_test=%i left_edge[%u]: %d ",
bit_chk & 1, i, left_edge[i]);
debug_cond(DLEVEL >= 2, "right_edge[%u]: %d\n", i,
right_edge[i]);
bit_chk >>= 1;
}
}
/* Check that all bits have a window */
for (i = 0; i < per_dqs; i++) {
"%s:%d write_center: left_edge[%u]: %d right_edge[%u]: %d",
__func__, __LINE__, i, left_edge[i],
i, right_edge[i]);
if ((left_edge[i] == dqs_max + 1) ||
(right_edge[i] == dqs_max + 1))
return i + 1; /* FIXME: If we fail, retval > 0 */
}
return 0;
}
/**
* get_window_mid_index() - Find the best middle setting of DQ/DQS phase
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @left_edge: Left edge of the DQ/DQS phase
* @right_edge: Right edge of the DQ/DQS phase
* @mid_min: Best DQ/DQS phase middle setting
*
* Find index and value of the middle of the DQ/DQS working phase.
*/
static int get_window_mid_index(const int write, int *left_edge,
int *right_edge, int *mid_min)
{
const u32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
rwcfg->mem_dq_per_read_dqs;
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
int i, mid, min_index;
/* Find middle of window for each DQ bit */
*mid_min = left_edge[0] - right_edge[0];
min_index = 0;
for (i = 1; i < per_dqs; i++) {
mid = left_edge[i] - right_edge[i];
if (mid < *mid_min) {
*mid_min = mid;
min_index = i;
}
}
/*
* -mid_min/2 represents the amount that we need to move DQS.
* If mid_min is odd and positive we'll need to add one to make
* sure the rounding in further calculations is correct (always
* bias to the right), so just add 1 for all positive values.
*/
if (*mid_min > 0)
(*mid_min)++;
*mid_min = *mid_min / 2;
debug_cond(DLEVEL >= 1, "%s:%d vfifo_center: *mid_min=%d (index=%u)\n",
__func__, __LINE__, *mid_min, min_index);
return min_index;
}
/**
* center_dq_windows() - Center the DQ/DQS windows
* @write: Perform read (Stage 2) or write (Stage 3) calibration
* @left_edge: Left edge of the DQ/DQS phase
* @right_edge: Right edge of the DQ/DQS phase
* @mid_min: Adjusted DQ/DQS phase middle setting
* @orig_mid_min: Original DQ/DQS phase middle setting
* @min_index: DQ/DQS phase middle setting index
* @test_bgn: Rank number to begin the test
* @dq_margin: Amount of shift for the DQ
* @dqs_margin: Amount of shift for the DQS
*
* Align the DQ/DQS windows in each group.
*/
static void center_dq_windows(const int write, int *left_edge, int *right_edge,
const int mid_min, const int orig_mid_min,
const int min_index, const int test_bgn,
int *dq_margin, int *dqs_margin)
{
const s32 delay_max = write ? iocfg->io_out1_delay_max :
const s32 per_dqs = write ? rwcfg->mem_dq_per_write_dqs :
rwcfg->mem_dq_per_read_dqs;
const s32 delay_off = write ? SCC_MGR_IO_OUT1_DELAY_OFFSET :
SCC_MGR_IO_IN_DELAY_OFFSET;
const s32 addr = SDR_PHYGRP_SCCGRP_ADDRESS | delay_off;
int shift_dq, i, p;
/* Initialize data for export structures */
*dqs_margin = delay_max + 1;
*dq_margin = delay_max + 1;
/* add delay to bring centre of all DQ windows to the same "level" */
for (i = 0, p = test_bgn; i < per_dqs; i++, p++) {
/* Use values before divide by 2 to reduce round off error */
shift_dq = (left_edge[i] - right_edge[i] -
(left_edge[min_index] - right_edge[min_index]))/2 +
(orig_mid_min - mid_min);
"vfifo_center: before: shift_dq[%u]=%d\n",
i, shift_dq);
temp_dq_io_delay1 = readl(addr + (i << 2));
if (shift_dq + temp_dq_io_delay1 > delay_max)
shift_dq = delay_max - temp_dq_io_delay1;
else if (shift_dq + temp_dq_io_delay1 < 0)
shift_dq = -temp_dq_io_delay1;
"vfifo_center: after: shift_dq[%u]=%d\n",
i, shift_dq);
if (write)
scc_mgr_set_dq_out1_delay(i,
temp_dq_io_delay1 + shift_dq);
scc_mgr_set_dq_in_delay(p,
temp_dq_io_delay1 + shift_dq);
scc_mgr_load_dq(p);
"vfifo_center: margin[%u]=[%d,%d]\n", i,
left_edge[i] - shift_dq + (-mid_min),
right_edge[i] + shift_dq - (-mid_min));
/* To determine values for export structures */
if (left_edge[i] - shift_dq + (-mid_min) < *dq_margin)
*dq_margin = left_edge[i] - shift_dq + (-mid_min);
if (right_edge[i] + shift_dq - (-mid_min) < *dqs_margin)
*dqs_margin = right_edge[i] + shift_dq - (-mid_min);
}
}
/**
* rw_mgr_mem_calibrate_vfifo_center() - Per-bit deskew DQ and centering
* @rank_bgn: Rank number
* @rw_group: Read/Write Group
* @test_bgn: Rank at which the test begins
* @use_read_test: Perform a read test
* @update_fom: Update FOM
*
* Per-bit deskew DQ and centering.
*/
static int rw_mgr_mem_calibrate_vfifo_center(const u32 rank_bgn,
const u32 rw_group, const u32 test_bgn,
const int use_read_test, const int update_fom)
const u32 addr =
SDR_PHYGRP_SCCGRP_ADDRESS + SCC_MGR_DQS_IN_DELAY_OFFSET +
(rw_group << 2);
/*
* Store these as signed since there are comparisons with
* signed numbers.
*/
int32_t left_edge[rwcfg->mem_dq_per_read_dqs];
int32_t right_edge[rwcfg->mem_dq_per_read_dqs];
int32_t orig_mid_min, mid_min;
int32_t new_dqs, start_dqs, start_dqs_en = 0, final_dqs_en;
int32_t dq_margin, dqs_margin;
int i, min_index;
int ret;
debug("%s:%d: %u %u", __func__, __LINE__, rw_group, test_bgn);
start_dqs = readl(addr);
if (iocfg->shift_dqs_en_when_shift_dqs)
start_dqs_en = readl(addr - iocfg->dqs_en_delay_offset);
/* set the left and right edge of each bit to an illegal value */
/* use (iocfg->io_in_delay_max + 1) as an illegal value */
for (i = 0; i < rwcfg->mem_dq_per_read_dqs; i++) {
left_edge[i] = iocfg->io_in_delay_max + 1;
right_edge[i] = iocfg->io_in_delay_max + 1;
}
/* Search for the left edge of the window for each bit */
search_left_edge(0, rank_bgn, rw_group, rw_group, test_bgn,
&sticky_bit_chk,
left_edge, right_edge, use_read_test);
/* Search for the right edge of the window for each bit */
ret = search_right_edge(0, rank_bgn, rw_group, rw_group,
start_dqs, start_dqs_en,
&sticky_bit_chk,
left_edge, right_edge, use_read_test);
if (ret) {
* Restore delay chain settings before letting the loop
* in rw_mgr_mem_calibrate_vfifo to retry different
* dqs/ck relationships.
scc_mgr_set_dqs_bus_in_delay(rw_group, start_dqs);
if (iocfg->shift_dqs_en_when_shift_dqs)
scc_mgr_set_dqs_en_delay(rw_group, start_dqs_en);
scc_mgr_load_dqs(rw_group);
writel(0, &sdr_scc_mgr->update);
"%s:%d vfifo_center: failed to find edge [%u]: %d %d",
__func__, __LINE__, i, left_edge[i], right_edge[i]);
if (use_read_test) {
set_failing_group_stage(rw_group *
rwcfg->mem_dq_per_read_dqs + i,
CAL_STAGE_VFIFO,
CAL_SUBSTAGE_VFIFO_CENTER);
set_failing_group_stage(rw_group *
rwcfg->mem_dq_per_read_dqs + i,
CAL_STAGE_VFIFO_AFTER_WRITES,
CAL_SUBSTAGE_VFIFO_CENTER);
return -EIO;
min_index = get_window_mid_index(0, left_edge, right_edge, &mid_min);
/* Determine the amount we can change DQS (which is -mid_min) */
orig_mid_min = mid_min;
new_dqs = start_dqs - mid_min;
if (new_dqs > iocfg->dqs_in_delay_max)
new_dqs = iocfg->dqs_in_delay_max;
else if (new_dqs < 0)
new_dqs = 0;
mid_min = start_dqs - new_dqs;
debug_cond(DLEVEL >= 1, "vfifo_center: new mid_min=%d new_dqs=%d\n",
mid_min, new_dqs);
if (iocfg->shift_dqs_en_when_shift_dqs) {
if (start_dqs_en - mid_min > iocfg->dqs_en_delay_max)
mid_min += start_dqs_en - mid_min -
iocfg->dqs_en_delay_max;
else if (start_dqs_en - mid_min < 0)
mid_min += start_dqs_en - mid_min;
}
new_dqs = start_dqs - mid_min;
"vfifo_center: start_dqs=%d start_dqs_en=%d new_dqs=%d mid_min=%d\n",
start_dqs,
iocfg->shift_dqs_en_when_shift_dqs ? start_dqs_en : -1,
new_dqs, mid_min);
/* Add delay to bring centre of all DQ windows to the same "level". */
center_dq_windows(0, left_edge, right_edge, mid_min, orig_mid_min,
min_index, test_bgn, &dq_margin, &dqs_margin);
/* Move DQS-en */
if (iocfg->shift_dqs_en_when_shift_dqs) {
final_dqs_en = start_dqs_en - mid_min;
scc_mgr_set_dqs_en_delay(rw_group, final_dqs_en);
scc_mgr_load_dqs(rw_group);
}
/* Move DQS */
scc_mgr_set_dqs_bus_in_delay(rw_group, new_dqs);
scc_mgr_load_dqs(rw_group);
"%s:%d vfifo_center: dq_margin=%d dqs_margin=%d",
__func__, __LINE__, dq_margin, dqs_margin);
/*
* Do not remove this line as it makes sure all of our decisions
* have been applied. Apply the update bit.
*/
writel(0, &sdr_scc_mgr->update);
if ((dq_margin < 0) || (dqs_margin < 0))
return -EINVAL;
return 0;
/**
* rw_mgr_mem_calibrate_guaranteed_write() - Perform guaranteed write into the device
* @rw_group: Read/Write Group
* @phase: DQ/DQS phase
*
* Because initially no communication ca be reliably performed with the memory
* device, the sequencer uses a guaranteed write mechanism to write data into
* the memory device.
*/
static int rw_mgr_mem_calibrate_guaranteed_write(const u32 rw_group,
const u32 phase)
{
int ret;
/* Set a particular DQ/DQS phase. */
scc_mgr_set_dqdqs_output_phase_all_ranks(rw_group, phase);
debug_cond(DLEVEL >= 1, "%s:%d guaranteed write: g=%u p=%u\n",
__func__, __LINE__, rw_group, phase);
/*
* Altera EMI_RM 2015.05.04 :: Figure 1-25
* Load up the patterns used by read calibration using the
* current DQDQS phase.
*/
rw_mgr_mem_calibrate_read_load_patterns(0, 1);
if (gbl->phy_debug_mode_flags & PHY_DEBUG_DISABLE_GUARANTEED_READ)
return 0;
/*
* Altera EMI_RM 2015.05.04 :: Figure 1-26
* Back-to-Back reads of the patterns used for calibration.
*/
ret = rw_mgr_mem_calibrate_read_test_patterns(0, rw_group, 1);
if (ret)
"%s:%d Guaranteed read test failed: g=%u p=%u\n",
__func__, __LINE__, rw_group, phase);
return ret;
}
/**
* rw_mgr_mem_calibrate_dqs_enable_calibration() - DQS Enable Calibration
* @rw_group: Read/Write Group
* @test_bgn: Rank at which the test begins
*
* DQS enable calibration ensures reliable capture of the DQ signal without
* glitches on the DQS line.
*/
static int rw_mgr_mem_calibrate_dqs_enable_calibration(const u32 rw_group,
const u32 test_bgn)
{
/*
* Altera EMI_RM 2015.05.04 :: Figure 1-27
* DQS and DQS Eanble Signal Relationships.
*/
Marek Vasut
committed
/* We start at zero, so have one less dq to devide among */
const u32 delay_step = iocfg->io_in_delay_max /
(rwcfg->mem_dq_per_read_dqs - 1);
int ret;
Marek Vasut
committed
u32 i, p, d, r;
debug("%s:%d (%u,%u)\n", __func__, __LINE__, rw_group, test_bgn);
/* Try different dq_in_delays since the DQ path is shorter than DQS. */
for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasut
committed
r += NUM_RANKS_PER_SHADOW_REG) {
for (i = 0, p = test_bgn, d = 0;
i < rwcfg->mem_dq_per_read_dqs;
Marek Vasut
committed
i++, p++, d += delay_step) {
Marek Vasut
committed
"%s:%d: g=%u r=%u i=%u p=%u d=%u\n",
__func__, __LINE__, rw_group, r, i, p, d);
scc_mgr_set_dq_in_delay(p, d);
scc_mgr_load_dq(p);
}
writel(0, &sdr_scc_mgr->update);
}
/*
* Try rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase across different
* dq_in_delay values
*/
ret = rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(rw_group);
Marek Vasut
committed
Marek Vasut
committed
"%s:%d: g=%u found=%u; Reseting delay chain to zero\n",
__func__, __LINE__, rw_group, !ret);
Marek Vasut
committed
for (r = 0; r < rwcfg->mem_number_of_ranks;
Marek Vasut
committed
r += NUM_RANKS_PER_SHADOW_REG) {
scc_mgr_apply_group_dq_in_delay(test_bgn, 0);
writel(0, &sdr_scc_mgr->update);
}
return ret;
}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
/**
* rw_mgr_mem_calibrate_dq_dqs_centering() - Centering DQ/DQS
* @rw_group: Read/Write Group
* @test_bgn: Rank at which the test begins
* @use_read_test: Perform a read test
* @update_fom: Update FOM
*
* The centerin DQ/DQS stage attempts to align DQ and DQS signals on reads
* within a group.
*/
static int
rw_mgr_mem_calibrate_dq_dqs_centering(const u32 rw_group, const u32 test_bgn,
const int use_read_test,
const int update_fom)
{
int ret, grp_calibrated;
u32 rank_bgn, sr;
/*
* Altera EMI_RM 2015.05.04 :: Figure 1-28
* Read per-bit deskew can be done on a per shadow register basis.
*/
grp_calibrated = 1;
for (rank_bgn = 0, sr = 0;
rank_bgn < rwcfg->mem_number_of_ranks;
rank_bgn += NUM_RANKS_PER_SHADOW_REG, sr++) {
ret = rw_mgr_mem_calibrate_vfifo_center(rank_bgn, rw_group,
test_bgn,
use_read_test,
update_fom);
if (!ret)
continue;
grp_calibrated = 0;
}
if (!grp_calibrated)
return -EIO;
return 0;
}
/**
* rw_mgr_mem_calibrate_vfifo() - Calibrate the read valid prediction FIFO
* @rw_group: Read/Write Group
* @test_bgn: Rank at which the test begins
*
* Stage 1: Calibrate the read valid prediction FIFO.
*
* This function implements UniPHY calibration Stage 1, as explained in
* detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
* - read valid prediction will consist of finding:
* - DQS enable phase and DQS enable delay (DQS Enable Calibration)
* - DQS input phase and DQS input delay (DQ/DQS Centering)
* - we also do a per-bit deskew on the DQ lines.
*/
static int rw_mgr_mem_calibrate_vfifo(const u32 rw_group, const u32 test_bgn)
u32 p, d;
u32 dtaps_per_ptap;
u32 failed_substage;
int ret;
debug("%s:%d: %u %u\n", __func__, __LINE__, rw_group, test_bgn);
/* Update info for sims */
reg_file_set_group(rw_group);
reg_file_set_stage(CAL_STAGE_VFIFO);
reg_file_set_sub_stage(CAL_SUBSTAGE_GUARANTEED_READ);
failed_substage = CAL_SUBSTAGE_GUARANTEED_READ;
/* USER Determine number of delay taps for each phase tap. */
dtaps_per_ptap = DIV_ROUND_UP(iocfg->delay_per_opa_tap,
iocfg->delay_per_dqs_en_dchain_tap) - 1;
for (d = 0; d <= dtaps_per_ptap; d += 2) {
/*
* In RLDRAMX we may be messing the delay of pins in
* the same write rw_group but outside of the current read
* the rw_group, but that's ok because we haven't calibrated
* output side yet.
*/
if (d > 0) {
scc_mgr_apply_group_all_out_delay_add_all_ranks(
rw_group, d);
for (p = 0; p <= iocfg->dqdqs_out_phase_max; p++) {
/* 1) Guaranteed Write */
ret = rw_mgr_mem_calibrate_guaranteed_write(rw_group, p);
if (ret)
break;
/* 2) DQS Enable Calibration */
ret = rw_mgr_mem_calibrate_dqs_enable_calibration(rw_group,
test_bgn);
if (ret) {
failed_substage = CAL_SUBSTAGE_DQS_EN_PHASE;
continue;
}
/* 3) Centering DQ/DQS */
* If doing read after write calibration, do not update
* FOM now. Do it then.
ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group,
test_bgn, 1, 0);
if (ret) {
failed_substage = CAL_SUBSTAGE_VFIFO_CENTER;
continue;
/* All done. */
goto cal_done_ok;
/* Calibration Stage 1 failed. */
set_failing_group_stage(rw_group, CAL_STAGE_VFIFO, failed_substage);
return 0;
/* Calibration Stage 1 completed OK. */
cal_done_ok:
/*
* Reset the delay chains back to zero if they have moved > 1
* (check for > 1 because loop will increase d even when pass in
* first case).
*/
if (d > 2)
scc_mgr_zero_group(rw_group, 1);
return 1;
}
/**
* rw_mgr_mem_calibrate_vfifo_end() - DQ/DQS Centering.
* @rw_group: Read/Write Group
* @test_bgn: Rank at which the test begins
*
* Stage 3: DQ/DQS Centering.
*
* This function implements UniPHY calibration Stage 3, as explained in
* detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
*/
static int rw_mgr_mem_calibrate_vfifo_end(const u32 rw_group,
const u32 test_bgn)
debug("%s:%d %u %u", __func__, __LINE__, rw_group, test_bgn);
/* Update info for sims. */
reg_file_set_group(rw_group);
reg_file_set_stage(CAL_STAGE_VFIFO_AFTER_WRITES);
reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
ret = rw_mgr_mem_calibrate_dq_dqs_centering(rw_group, test_bgn, 0, 1);
if (ret)
set_failing_group_stage(rw_group,
CAL_STAGE_VFIFO_AFTER_WRITES,
CAL_SUBSTAGE_VFIFO_CENTER);
/**
* rw_mgr_mem_calibrate_lfifo() - Minimize latency
*
* Stage 4: Minimize latency.
*
* This function implements UniPHY calibration Stage 4, as explained in
* detail in Altera EMI_RM 2015.05.04 , "UniPHY Calibration Stages".
* Calibrate LFIFO to find smallest read latency.
*/
static u32 rw_mgr_mem_calibrate_lfifo(void)
debug("%s:%d\n", __func__, __LINE__);
/* Update info for sims. */
reg_file_set_stage(CAL_STAGE_LFIFO);
reg_file_set_sub_stage(CAL_SUBSTAGE_READ_LATENCY);
/* Load up the patterns used by read calibration for all ranks */
rw_mgr_mem_calibrate_read_load_patterns(0, 1);
do {
writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
debug_cond(DLEVEL >= 2, "%s:%d lfifo: read_lat=%u",
__func__, __LINE__, gbl->curr_read_lat);
if (!rw_mgr_mem_calibrate_read_test_all_ranks(0, NUM_READ_TESTS,
PASS_ALL_BITS, 1))
break;
found_one = 1;
/*
* Reduce read latency and see if things are
* working correctly.
*/
gbl->curr_read_lat--;
} while (gbl->curr_read_lat > 0);
/* Reset the fifos to get pointers to known state. */
writel(0, &phy_mgr_cmd->fifo_reset);
if (found_one) {
/* Add a fudge factor to the read latency that was determined */
gbl->curr_read_lat += 2;
writel(gbl->curr_read_lat, &phy_mgr_cfg->phy_rlat);
"%s:%d lfifo: success: using read_lat=%u\n",
__func__, __LINE__, gbl->curr_read_lat);
} else {
set_failing_group_stage(0xff, CAL_STAGE_LFIFO,
CAL_SUBSTAGE_READ_LATENCY);
"%s:%d lfifo: failed at initial read_lat=%u\n",
__func__, __LINE__, gbl->curr_read_lat);
return found_one;
/**
* search_window() - Search for the/part of the window with DM/DQS shift
* @search_dm: If 1, search for the DM shift, if 0, search for DQS shift
* @rank_bgn: Rank number
* @write_group: Write Group
* @bgn_curr: Current window begin
* @end_curr: Current window end
* @bgn_best: Current best window begin
* @end_best: Current best window end
* @win_best: Size of the best window
* @new_dqs: New DQS value (only applicable if search_dm = 0).
*
* Search for the/part of the window with DM/DQS shift.
*/
static void search_window(const int search_dm,
const u32 rank_bgn, const u32 write_group,
int *bgn_curr, int *end_curr, int *bgn_best,
int *end_best, int *win_best, int new_dqs)
{
u32 bit_chk;
const int max = iocfg->io_out1_delay_max - new_dqs;
int d, di;
/* Search for the/part of the window with DM/DQS shift. */
for (di = max; di >= 0; di -= DELTA_D) {
if (search_dm) {
d = di;
scc_mgr_apply_group_dm_out1_delay(d);
} else {
/* For DQS, we go from 0...max */
d = max - di;
/*
* Note: This only shifts DQS, so are we limiting
* ourselves to width of DQ unnecessarily.
*/
scc_mgr_apply_group_dqs_io_and_oct_out1(write_group,
d + new_dqs);
}
writel(0, &sdr_scc_mgr->update);
if (rw_mgr_mem_calibrate_write_test(rank_bgn, write_group, 1,
PASS_ALL_BITS, &bit_chk,
0)) {
/* Set current end of the window. */
*end_curr = search_dm ? -d : d;
/*
* If a starting edge of our window has not been seen
* this is our current start of the DM window.
*/
if (*bgn_curr == iocfg->io_out1_delay_max + 1)
*bgn_curr = search_dm ? -d : d;
/*
* If current window is bigger than best seen.
* Set best seen to be current window.
*/
if ((*end_curr - *bgn_curr + 1) > *win_best) {
*win_best = *end_curr - *bgn_curr + 1;
*bgn_best = *bgn_curr;
*end_best = *end_curr;
}
} else {
/* We just saw a failing test. Reset temp edge. */
*bgn_curr = iocfg->io_out1_delay_max + 1;
*end_curr = iocfg->io_out1_delay_max + 1;
/* Early exit is only applicable to DQS. */
if (search_dm)
continue;
/*
* Early exit optimization: if the remaining delay
* chain space is less than already seen largest
* window we can exit.
*/
if (*win_best - 1 > iocfg->io_out1_delay_max - new_dqs - d)
break;
}
}
}
* rw_mgr_mem_calibrate_writes_center() - Center all windows
* @rank_bgn: Rank number
* @write_group: Write group
* @test_bgn: Rank at which the test begins
*
* Center all windows. Do per-bit-deskew to possibly increase size of
* certain windows.
*/
static int
rw_mgr_mem_calibrate_writes_center(const u32 rank_bgn, const u32 write_group,
const u32 test_bgn)
u32 sticky_bit_chk;
u32 min_index;
int left_edge[rwcfg->mem_dq_per_write_dqs];
int right_edge[rwcfg->mem_dq_per_write_dqs];
int mid;
int mid_min, orig_mid_min;
int new_dqs, start_dqs;
int dq_margin, dqs_margin, dm_margin;
int bgn_curr = iocfg->io_out1_delay_max + 1;
int end_curr = iocfg->io_out1_delay_max + 1;
int bgn_best = iocfg->io_out1_delay_max + 1;
int end_best = iocfg->io_out1_delay_max + 1;
int win_best = 0;
int ret;
debug("%s:%d %u %u", __func__, __LINE__, write_group, test_bgn);
dm_margin = 0;
start_dqs = readl((SDR_PHYGRP_SCCGRP_ADDRESS |
SCC_MGR_IO_OUT1_DELAY_OFFSET) +
(rwcfg->mem_dq_per_write_dqs << 2));
/* Per-bit deskew. */
* Set the left and right edge of each bit to an illegal value.
* Use (iocfg->io_out1_delay_max + 1) as an illegal value.
*/
sticky_bit_chk = 0;
for (i = 0; i < rwcfg->mem_dq_per_write_dqs; i++) {
left_edge[i] = iocfg->io_out1_delay_max + 1;
right_edge[i] = iocfg->io_out1_delay_max + 1;
/* Search for the left edge of the window for each bit. */
search_left_edge(1, rank_bgn, write_group, 0, test_bgn,
&sticky_bit_chk,
left_edge, right_edge, 0);
/* Search for the right edge of the window for each bit. */
ret = search_right_edge(1, rank_bgn, write_group, 0,
start_dqs, 0,
&sticky_bit_chk,
left_edge, right_edge, 0);
if (ret) {
set_failing_group_stage(test_bgn + ret - 1, CAL_STAGE_WRITES,
CAL_SUBSTAGE_WRITES_CENTER);