Newer
Older
/*
* Copyright Altera Corporation (C) 2012-2015
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <common.h>
#include <asm/io.h>
#include <asm/arch/sdram.h>
#include "sequencer.h"
#include "sequencer_auto.h"
#include "sequencer_auto_ac_init.h"
#include "sequencer_auto_inst_init.h"
#include "sequencer_defines.h"
static struct socfpga_sdr_rw_load_manager *sdr_rw_load_mgr_regs =
(struct socfpga_sdr_rw_load_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0x800);
static struct socfpga_sdr_rw_load_jump_manager *sdr_rw_load_jump_mgr_regs =
(struct socfpga_sdr_rw_load_jump_manager *)(SDR_PHYGRP_RWMGRGRP_ADDRESS | 0xC00);
static struct socfpga_sdr_reg_file *sdr_reg_file =
(struct socfpga_sdr_reg_file *)SDR_PHYGRP_REGFILEGRP_ADDRESS;
static struct socfpga_sdr_scc_mgr *sdr_scc_mgr =
(struct socfpga_sdr_scc_mgr *)(SDR_PHYGRP_SCCGRP_ADDRESS | 0xe00);
static struct socfpga_phy_mgr_cmd *phy_mgr_cmd =
(struct socfpga_phy_mgr_cmd *)SDR_PHYGRP_PHYMGRGRP_ADDRESS;
static struct socfpga_phy_mgr_cfg *phy_mgr_cfg =
(struct socfpga_phy_mgr_cfg *)(SDR_PHYGRP_PHYMGRGRP_ADDRESS | 0x40);
static struct socfpga_data_mgr *data_mgr =
(struct socfpga_data_mgr *)SDR_PHYGRP_DATAMGRGRP_ADDRESS;
static struct socfpga_sdr_ctrl *sdr_ctrl =
(struct socfpga_sdr_ctrl *)SDR_CTRLGRP_ADDRESS;
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#define DELTA_D 1
/*
* In order to reduce ROM size, most of the selectable calibration steps are
* decided at compile time based on the user's calibration mode selection,
* as captured by the STATIC_CALIB_STEPS selection below.
*
* However, to support simulation-time selection of fast simulation mode, where
* we skip everything except the bare minimum, we need a few of the steps to
* be dynamic. In those cases, we either use the DYNAMIC_CALIB_STEPS for the
* check, which is based on the rtl-supplied value, or we dynamically compute
* the value to use based on the dynamically-chosen calibration mode
*/
#define DLEVEL 0
#define STATIC_IN_RTL_SIM 0
#define STATIC_SKIP_DELAY_LOOPS 0
#define STATIC_CALIB_STEPS (STATIC_IN_RTL_SIM | CALIB_SKIP_FULL_TEST | \
STATIC_SKIP_DELAY_LOOPS)
/* calibration steps requested by the rtl */
uint16_t dyn_calib_steps;
/*
* To make CALIB_SKIP_DELAY_LOOPS a dynamic conditional option
* instead of static, we use boolean logic to select between
* non-skip and skip values
*
* The mask is set to include all bits when not-skipping, but is
* zero when skipping
*/
uint16_t skip_delay_mask; /* mask off bits when skipping/not-skipping */
#define SKIP_DELAY_LOOP_VALUE_OR_ZERO(non_skip_value) \
((non_skip_value) & skip_delay_mask)
struct gbl_type *gbl;
struct param_type *param;
uint32_t curr_shadow_reg;
static uint32_t rw_mgr_mem_calibrate_write_test(uint32_t rank_bgn,
uint32_t write_group, uint32_t use_dm,
uint32_t all_correct, uint32_t *bit_chk, uint32_t all_ranks);
static void set_failing_group_stage(uint32_t group, uint32_t stage,
uint32_t substage)
{
/*
* Only set the global stage if there was not been any other
* failing group
*/
if (gbl->error_stage == CAL_STAGE_NIL) {
gbl->error_substage = substage;
gbl->error_stage = stage;
gbl->error_group = group;
}
}
static void reg_file_set_group(u16 set_group)
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff0000, set_group << 16);
static void reg_file_set_stage(u8 set_stage)
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xffff, set_stage & 0xff);
static void reg_file_set_sub_stage(u8 set_sub_stage)
set_sub_stage &= 0xff;
clrsetbits_le32(&sdr_reg_file->cur_stage, 0xff00, set_sub_stage << 8);
}
static void initialize(void)
{
debug("%s:%d\n", __func__, __LINE__);
/* USER calibration has control over path to memory */
/*
* In Hard PHY this is a 2-bit control:
* 0: AFI Mux Select
* 1: DDIO Mux Select
*/
writel(0x3, &phy_mgr_cfg->mux_sel);
/* USER memory clock is not stable we begin initialization */
writel(0, &phy_mgr_cfg->reset_mem_stbl);
/* USER calibration status all set to zero */
writel(0, &phy_mgr_cfg->cal_status);
writel(0, &phy_mgr_cfg->cal_debug_info);
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
if ((dyn_calib_steps & CALIB_SKIP_ALL) != CALIB_SKIP_ALL) {
param->read_correct_mask_vg = ((uint32_t)1 <<
(RW_MGR_MEM_DQ_PER_READ_DQS /
RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
param->write_correct_mask_vg = ((uint32_t)1 <<
(RW_MGR_MEM_DQ_PER_READ_DQS /
RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS)) - 1;
param->read_correct_mask = ((uint32_t)1 <<
RW_MGR_MEM_DQ_PER_READ_DQS) - 1;
param->write_correct_mask = ((uint32_t)1 <<
RW_MGR_MEM_DQ_PER_WRITE_DQS) - 1;
param->dm_correct_mask = ((uint32_t)1 <<
(RW_MGR_MEM_DATA_WIDTH / RW_MGR_MEM_DATA_MASK_WIDTH))
- 1;
}
}
static void set_rank_and_odt_mask(uint32_t rank, uint32_t odt_mode)
{
uint32_t odt_mask_0 = 0;
uint32_t odt_mask_1 = 0;
uint32_t cs_and_odt_mask;
if (odt_mode == RW_MGR_ODT_MODE_READ_WRITE) {
if (RW_MGR_MEM_NUMBER_OF_RANKS == 1) {
/*
* 1 Rank
* Read: ODT = 0
* Write: ODT = 1
*/
odt_mask_0 = 0x0;
odt_mask_1 = 0x1;
} else if (RW_MGR_MEM_NUMBER_OF_RANKS == 2) {
/* 2 Ranks */
if (RW_MGR_MEM_NUMBER_OF_CS_PER_DIMM == 1) {
/* - Dual-Slot , Single-Rank
* (1 chip-select per DIMM)
* OR
* - RDIMM, 4 total CS (2 CS per DIMM)
* means 2 DIMM
* Since MEM_NUMBER_OF_RANKS is 2 they are
* both single rank
* with 2 CS each (special for RDIMM)
* Read: Turn on ODT on the opposite rank
* Write: Turn on ODT on all ranks
*/
odt_mask_0 = 0x3 & ~(1 << rank);
odt_mask_1 = 0x3;
} else {
/*
* USER - Single-Slot , Dual-rank DIMMs
* (2 chip-selects per DIMM)
* USER Read: Turn on ODT off on all ranks
* USER Write: Turn on ODT on active rank
*/
odt_mask_0 = 0x0;
odt_mask_1 = 0x3 & (1 << rank);
}
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
/* 4 Ranks
* Read:
* ----------+-----------------------+
* | |
* | ODT |
* Read From +-----------------------+
* Rank | 3 | 2 | 1 | 0 |
* ----------+-----+-----+-----+-----+
* 0 | 0 | 1 | 0 | 0 |
* 1 | 1 | 0 | 0 | 0 |
* 2 | 0 | 0 | 0 | 1 |
* 3 | 0 | 0 | 1 | 0 |
* ----------+-----+-----+-----+-----+
*
* Write:
* ----------+-----------------------+
* | |
* | ODT |
* Write To +-----------------------+
* Rank | 3 | 2 | 1 | 0 |
* ----------+-----+-----+-----+-----+
* 0 | 0 | 1 | 0 | 1 |
* 1 | 1 | 0 | 1 | 0 |
* 2 | 0 | 1 | 0 | 1 |
* 3 | 1 | 0 | 1 | 0 |
* ----------+-----+-----+-----+-----+
*/
switch (rank) {
case 0:
odt_mask_0 = 0x4;
odt_mask_1 = 0x5;
break;
case 1:
odt_mask_0 = 0x8;
odt_mask_1 = 0xA;
break;
case 2:
odt_mask_0 = 0x1;
odt_mask_1 = 0x5;
break;
case 3:
odt_mask_0 = 0x2;
odt_mask_1 = 0xA;
break;
}
}
} else {
odt_mask_0 = 0x0;
odt_mask_1 = 0x0;
}
cs_and_odt_mask =
(0xFF & ~(1 << rank)) |
((0xFF & odt_mask_0) << 8) |
((0xFF & odt_mask_1) << 16);
writel(cs_and_odt_mask, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
/**
* scc_mgr_set() - Set SCC Manager register
* @off: Base offset in SCC Manager space
* @grp: Read/Write group
* @val: Value to be set
*
* This function sets the SCC Manager (Scan Chain Control Manager) register.
*/
static void scc_mgr_set(u32 off, u32 grp, u32 val)
writel(val, SDR_PHYGRP_SCCGRP_ADDRESS | off | (grp << 2));
}
/**
* scc_mgr_initialize() - Initialize SCC Manager registers
*
* Initialize SCC Manager registers.
*/
static void scc_mgr_initialize(void)
{
* Clear register file for HPS. 16 (2^4) is the size of the
* full register file in the scc mgr:
* RFILE_DEPTH = 1 + log2(MEM_DQ_PER_DQS + 1 + MEM_DM_PER_DQS +
* MEM_IF_READ_DQS_WIDTH - 1);
for (i = 0; i < 16; i++) {
debug_cond(DLEVEL == 1, "%s:%d: Clearing SCC RFILE index %u\n",
__func__, __LINE__, i);
scc_mgr_set(SCC_MGR_HHP_RFILE_OFFSET, 0, i);
static void scc_mgr_set_dqdqs_output_phase(uint32_t write_group, uint32_t phase)
{
scc_mgr_set(SCC_MGR_DQDQS_OUT_PHASE_OFFSET, write_group, phase);
}
static void scc_mgr_set_dqs_bus_in_delay(uint32_t read_group, uint32_t delay)
scc_mgr_set(SCC_MGR_DQS_IN_DELAY_OFFSET, read_group, delay);
static void scc_mgr_set_dqs_en_phase(uint32_t read_group, uint32_t phase)
{
scc_mgr_set(SCC_MGR_DQS_EN_PHASE_OFFSET, read_group, phase);
}
static void scc_mgr_set_dqs_en_delay(uint32_t read_group, uint32_t delay)
{
scc_mgr_set(SCC_MGR_DQS_EN_DELAY_OFFSET, read_group, delay);
static void scc_mgr_set_dqs_io_in_delay(uint32_t delay)
scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
delay);
static void scc_mgr_set_dq_in_delay(uint32_t dq_in_group, uint32_t delay)
scc_mgr_set(SCC_MGR_IO_IN_DELAY_OFFSET, dq_in_group, delay);
}
static void scc_mgr_set_dq_out1_delay(uint32_t dq_in_group, uint32_t delay)
{
scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, dq_in_group, delay);
static void scc_mgr_set_dqs_out1_delay(uint32_t delay)
scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET, RW_MGR_MEM_DQ_PER_WRITE_DQS,
delay);
}
static void scc_mgr_set_dm_out1_delay(uint32_t dm, uint32_t delay)
{
scc_mgr_set(SCC_MGR_IO_OUT1_DELAY_OFFSET,
RW_MGR_MEM_DQ_PER_WRITE_DQS + 1 + dm,
delay);
}
/* load up dqs config settings */
static void scc_mgr_load_dqs(uint32_t dqs)
{
writel(dqs, &sdr_scc_mgr->dqs_ena);
}
/* load up dqs io config settings */
static void scc_mgr_load_dqs_io(void)
{
writel(0, &sdr_scc_mgr->dqs_io_ena);
}
/* load up dq config settings */
static void scc_mgr_load_dq(uint32_t dq_in_group)
{
writel(dq_in_group, &sdr_scc_mgr->dq_ena);
}
/* load up dm config settings */
static void scc_mgr_load_dm(uint32_t dm)
{
writel(dm, &sdr_scc_mgr->dm_ena);
/**
* scc_mgr_set_all_ranks() - Set SCC Manager register for all ranks
* @off: Base offset in SCC Manager space
* @grp: Read/Write group
* @val: Value to be set
* @update: If non-zero, trigger SCC Manager update for all ranks
*
* This function sets the SCC Manager (Scan Chain Control Manager) register
* and optionally triggers the SCC update for all ranks.
*/
static void scc_mgr_set_all_ranks(const u32 off, const u32 grp, const u32 val,
const int update)
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
r += NUM_RANKS_PER_SHADOW_REG) {
scc_mgr_set(off, grp, val);
if (update || (r == 0)) {
writel(grp, &sdr_scc_mgr->dqs_ena);
writel(0, &sdr_scc_mgr->update);
static void scc_mgr_set_dqs_en_phase_all_ranks(u32 read_group, u32 phase)
{
/*
* USER although the h/w doesn't support different phases per
* shadow register, for simplicity our scc manager modeling
* keeps different phase settings per shadow reg, and it's
* important for us to keep them in sync to match h/w.
* for efficiency, the scan chain update should occur only
* once to sr0.
*/
scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_PHASE_OFFSET,
read_group, phase, 0);
}
static void scc_mgr_set_dqdqs_output_phase_all_ranks(uint32_t write_group,
uint32_t phase)
{
/*
* USER although the h/w doesn't support different phases per
* shadow register, for simplicity our scc manager modeling
* keeps different phase settings per shadow reg, and it's
* important for us to keep them in sync to match h/w.
* for efficiency, the scan chain update should occur only
* once to sr0.
*/
scc_mgr_set_all_ranks(SCC_MGR_DQDQS_OUT_PHASE_OFFSET,
write_group, phase, 0);
}
static void scc_mgr_set_dqs_en_delay_all_ranks(uint32_t read_group,
uint32_t delay)
{
/*
* In shadow register mode, the T11 settings are stored in
* registers in the core, which are updated by the DQS_ENA
* signals. Not issuing the SCC_MGR_UPD command allows us to
* save lots of rank switching overhead, by calling
* select_shadow_regs_for_update with update_scan_chains
* set to 0.
*/
scc_mgr_set_all_ranks(SCC_MGR_DQS_EN_DELAY_OFFSET,
read_group, delay, 1);
writel(0, &sdr_scc_mgr->update);
/**
* scc_mgr_set_oct_out1_delay() - Set OCT output delay
* @write_group: Write group
* @delay: Delay value
*
* This function sets the OCT output delay in SCC manager.
*/
static void scc_mgr_set_oct_out1_delay(const u32 write_group, const u32 delay)
const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
const int base = write_group * ratio;
int i;
/*
* Load the setting in the SCC manager
* Although OCT affects only write data, the OCT delay is controlled
* by the DQS logic block which is instantiated once per read group.
* For protocols where a write group consists of multiple read groups,
* the setting must be set multiple times.
*/
for (i = 0; i < ratio; i++)
scc_mgr_set(SCC_MGR_OCT_OUT1_DELAY_OFFSET, base + i, delay);
/**
* scc_mgr_set_hhp_extras() - Set HHP extras.
*
* Load the fixed setting in the SCC manager HHP extras.
*/
static void scc_mgr_set_hhp_extras(void)
{
/*
* Load the fixed setting in the SCC manager
* bits: 0:0 = 1'b1 - DQS bypass
* bits: 1:1 = 1'b1 - DQ bypass
* bits: 4:2 = 3'b001 - rfifo_mode
* bits: 6:5 = 2'b01 - rfifo clock_select
* bits: 7:7 = 1'b0 - separate gating from ungating setting
* bits: 8:8 = 1'b0 - separate OE from Output delay setting
const u32 value = (0 << 8) | (0 << 7) | (1 << 5) |
(1 << 2) | (1 << 1) | (1 << 0);
const u32 addr = SDR_PHYGRP_SCCGRP_ADDRESS |
SCC_MGR_HHP_GLOBALS_OFFSET |
SCC_MGR_HHP_EXTRAS_OFFSET;
debug_cond(DLEVEL == 1, "%s:%d Setting HHP Extras\n",
__func__, __LINE__);
writel(value, addr);
debug_cond(DLEVEL == 1, "%s:%d Done Setting HHP Extras\n",
__func__, __LINE__);
/**
* scc_mgr_zero_all() - Zero all DQS config
*
* Zero all DQS config.
*/
static void scc_mgr_zero_all(void)
{
/*
* USER Zero all DQS config settings, across all groups and all
* shadow registers
*/
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
r += NUM_RANKS_PER_SHADOW_REG) {
for (i = 0; i < RW_MGR_MEM_IF_READ_DQS_WIDTH; i++) {
/*
* The phases actually don't exist on a per-rank basis,
* but there's no harm updating them several times, so
* let's keep the code simple.
*/
scc_mgr_set_dqs_bus_in_delay(i, IO_DQS_IN_RESERVE);
scc_mgr_set_dqs_en_phase(i, 0);
scc_mgr_set_dqs_en_delay(i, 0);
}
for (i = 0; i < RW_MGR_MEM_IF_WRITE_DQS_WIDTH; i++) {
scc_mgr_set_dqdqs_output_phase(i, 0);
/* Arria V/Cyclone V don't have out2. */
scc_mgr_set_oct_out1_delay(i, IO_DQS_OUT_RESERVE);
}
}
/* Multicast to all DQS group enables. */
writel(0xff, &sdr_scc_mgr->dqs_ena);
writel(0, &sdr_scc_mgr->update);
/**
* scc_set_bypass_mode() - Set bypass mode and trigger SCC update
* @write_group: Write group
*
* Set bypass mode and trigger SCC update.
*/
static void scc_set_bypass_mode(const u32 write_group)
/* Multicast to all DQ enables. */
writel(0xff, &sdr_scc_mgr->dq_ena);
writel(0xff, &sdr_scc_mgr->dm_ena);
/* Update current DQS IO enable. */
writel(0, &sdr_scc_mgr->dqs_io_ena);
writel(write_group, &sdr_scc_mgr->dqs_ena);
writel(0, &sdr_scc_mgr->update);
/**
* scc_mgr_load_dqs_for_write_group() - Load DQS settings for Write Group
* @write_group: Write group
*
* Load DQS settings for Write Group, do not trigger SCC update.
*/
static void scc_mgr_load_dqs_for_write_group(const u32 write_group)
const int ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
const int base = write_group * ratio;
int i;
* Load the setting in the SCC manager
* Although OCT affects only write data, the OCT delay is controlled
* by the DQS logic block which is instantiated once per read group.
* For protocols where a write group consists of multiple read groups,
* the setting must be set multiple times.
for (i = 0; i < ratio; i++)
writel(base + i, &sdr_scc_mgr->dqs_ena);
/**
* scc_mgr_zero_group() - Zero all configs for a group
*
* Zero DQ, DM, DQS and OCT configs for a group.
*/
static void scc_mgr_zero_group(const u32 write_group, const int out_only)
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
r += NUM_RANKS_PER_SHADOW_REG) {
/* Zero all DQ config settings. */
for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
scc_mgr_set_dq_out1_delay(i, 0);
scc_mgr_set_dq_in_delay(i, 0);
/* Multicast to all DQ enables. */
writel(0xff, &sdr_scc_mgr->dq_ena);
/* Zero all DM config settings. */
for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
scc_mgr_set_dm_out1_delay(i, 0);
/* Multicast to all DM enables. */
writel(0xff, &sdr_scc_mgr->dm_ena);
/* Zero all DQS IO settings. */
scc_mgr_set_dqs_io_in_delay(0);
/* Arria V/Cyclone V don't have out2. */
scc_mgr_set_dqs_out1_delay(IO_DQS_OUT_RESERVE);
scc_mgr_set_oct_out1_delay(write_group, IO_DQS_OUT_RESERVE);
scc_mgr_load_dqs_for_write_group(write_group);
/* Multicast to all DQS IO enables (only 1 in total). */
writel(0, &sdr_scc_mgr->dqs_io_ena);
/* Hit update to zero everything. */
writel(0, &sdr_scc_mgr->update);
}
}
/*
* apply and load a particular input delay for the DQ pins in a group
* group_bgn is the index of the first dq pin (in the write group)
*/
static void scc_mgr_apply_group_dq_in_delay(uint32_t group_bgn, uint32_t delay)
{
uint32_t i, p;
for (i = 0, p = group_bgn; i < RW_MGR_MEM_DQ_PER_READ_DQS; i++, p++) {
scc_mgr_set_dq_in_delay(p, delay);
scc_mgr_load_dq(p);
}
}
/**
* scc_mgr_apply_group_dq_out1_delay() - Apply and load an output delay for the DQ pins in a group
* @delay: Delay value
*
* Apply and load a particular output delay for the DQ pins in a group.
*/
static void scc_mgr_apply_group_dq_out1_delay(const u32 delay)
for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++) {
scc_mgr_set_dq_out1_delay(i, delay);
scc_mgr_load_dq(i);
}
}
/* apply and load a particular output delay for the DM pins in a group */
static void scc_mgr_apply_group_dm_out1_delay(uint32_t delay1)
{
uint32_t i;
for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++) {
scc_mgr_set_dm_out1_delay(i, delay1);
scc_mgr_load_dm(i);
}
}
/* apply and load delay on both DQS and OCT out1 */
static void scc_mgr_apply_group_dqs_io_and_oct_out1(uint32_t write_group,
uint32_t delay)
{
scc_mgr_set_dqs_out1_delay(delay);
scc_mgr_load_dqs_io();
scc_mgr_set_oct_out1_delay(write_group, delay);
scc_mgr_load_dqs_for_write_group(write_group);
}
/**
* scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side: DQ, DM, DQS, OCT
* @write_group: Write group
* @delay: Delay value
*
* Apply a delay to the entire output side: DQ, DM, DQS, OCT.
*/
static void scc_mgr_apply_group_all_out_delay_add(const u32 write_group,
const u32 delay)
{
u32 i, new_delay;
/* DQ shift */
for (i = 0; i < RW_MGR_MEM_DQ_PER_WRITE_DQS; i++)
scc_mgr_load_dq(i);
/* DM shift */
for (i = 0; i < RW_MGR_NUM_DM_PER_WRITE_GROUP; i++)
scc_mgr_load_dm(i);
/* DQS shift */
new_delay = READ_SCC_DQS_IO_OUT2_DELAY + delay;
if (new_delay > IO_IO_OUT2_DELAY_MAX) {
debug_cond(DLEVEL == 1,
"%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
__func__, __LINE__, write_group, delay, new_delay,
IO_IO_OUT2_DELAY_MAX,
new_delay - IO_IO_OUT2_DELAY_MAX);
new_delay -= IO_IO_OUT2_DELAY_MAX;
scc_mgr_set_dqs_out1_delay(new_delay);
}
scc_mgr_load_dqs_io();
/* OCT shift */
new_delay = READ_SCC_OCT_OUT2_DELAY + delay;
if (new_delay > IO_IO_OUT2_DELAY_MAX) {
debug_cond(DLEVEL == 1,
"%s:%d (%u, %u) DQS: %u > %d; adding %u to OUT1\n",
__func__, __LINE__, write_group, delay,
new_delay, IO_IO_OUT2_DELAY_MAX,
new_delay - IO_IO_OUT2_DELAY_MAX);
new_delay -= IO_IO_OUT2_DELAY_MAX;
scc_mgr_set_oct_out1_delay(write_group, new_delay);
}
scc_mgr_load_dqs_for_write_group(write_group);
}
/**
* scc_mgr_apply_group_all_out_delay_add() - Apply a delay to the entire output side to all ranks
* @write_group: Write group
* @delay: Delay value
*
* Apply a delay to the entire output side (DQ, DM, DQS, OCT) to all ranks.
static void
scc_mgr_apply_group_all_out_delay_add_all_ranks(const u32 write_group,
const u32 delay)
int r;
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS;
r += NUM_RANKS_PER_SHADOW_REG) {
scc_mgr_apply_group_all_out_delay_add(write_group, delay);
writel(0, &sdr_scc_mgr->update);
}
}
/* optimization used to recover some slots in ddr3 inst_rom */
/* could be applied to other protocols if we wanted to */
static void set_jump_as_return(void)
{
/*
* to save space, we replace return with jump to special shared
* RETURN instruction so we set the counter to large value so that
* we always jump
*/
writel(0xff, &sdr_rw_load_mgr_regs->load_cntr0);
writel(RW_MGR_RETURN, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
}
/*
* should always use constants as argument to ensure all computations are
* performed at compile time
*/
static void delay_for_n_mem_clocks(const uint32_t clocks)
{
uint32_t afi_clocks;
uint8_t inner = 0;
uint8_t outer = 0;
uint16_t c_loop = 0;
debug("%s:%d: clocks=%u ... start\n", __func__, __LINE__, clocks);
afi_clocks = (clocks + AFI_RATE_RATIO-1) / AFI_RATE_RATIO;
/* scale (rounding up) to get afi clocks */
/*
* Note, we don't bother accounting for being off a little bit
* because of a few extra instructions in outer loops
* Note, the loops have a test at the end, and do the test before
* the decrement, and so always perform the loop
* 1 time more than the counter value
*/
if (afi_clocks == 0) {
;
} else if (afi_clocks <= 0x100) {
inner = afi_clocks-1;
outer = 0;
c_loop = 0;
} else if (afi_clocks <= 0x10000) {
inner = 0xff;
outer = (afi_clocks-1) >> 8;
c_loop = 0;
} else {
inner = 0xff;
outer = 0xff;
c_loop = (afi_clocks-1) >> 16;
}
/*
* rom instructions are structured as follows:
*
* IDLE_LOOP2: jnz cntr0, TARGET_A
* IDLE_LOOP1: jnz cntr1, TARGET_B
* return
*
* so, when doing nested loops, TARGET_A is set to IDLE_LOOP2, and
* TARGET_B is set to IDLE_LOOP2 as well
*
* if we have no outer loop, though, then we can use IDLE_LOOP1 only,
* and set TARGET_B to IDLE_LOOP1 and we skip IDLE_LOOP2 entirely
*
* a little confusing, but it helps save precious space in the inst_rom
* and sequencer rom and keeps the delays more accurate and reduces
* overhead
*/
if (afi_clocks <= 0x100) {
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(RW_MGR_IDLE_LOOP1,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(RW_MGR_IDLE_LOOP1, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(inner),
&sdr_rw_load_mgr_regs->load_cntr0);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(outer),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(RW_MGR_IDLE_LOOP2,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(RW_MGR_IDLE_LOOP2,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
/* hack to get around compiler not being smart enough */
if (afi_clocks <= 0x10000) {
/* only need to run once */
writel(RW_MGR_IDLE_LOOP2, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
writel(RW_MGR_IDLE_LOOP2,
SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET);
} while (c_loop-- != 0);
}
}
debug("%s:%d clocks=%u ... end\n", __func__, __LINE__, clocks);
}
static void rw_mgr_mem_initialize(void)
{
uint32_t r;
uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
debug("%s:%d\n", __func__, __LINE__);
/* The reset / cke part of initialization is broadcasted to all ranks */
writel(RW_MGR_RANK_ALL, SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_SET_CS_AND_ODT_MASK_OFFSET);
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
/*
* Here's how you load register for a loop
* Counters are located @ 0x800
* Jump address are located @ 0xC00
* For both, registers 0 to 3 are selected using bits 3 and 2, like
* in 0x800, 0x804, 0x808, 0x80C and 0xC00, 0xC04, 0xC08, 0xC0C
* I know this ain't pretty, but Avalon bus throws away the 2 least
* significant bits
*/
/* start with memory RESET activated */
/* tINIT = 200us */
/*
* 200us @ 266MHz (3.75 ns) ~ 54000 clock cycles
* If a and b are the number of iteration in 2 nested loops
* it takes the following number of cycles to complete the operation:
* number_of_cycles = ((2 + n) * a + 2) * b
* where n is the number of instruction in the inner loop
* One possible solution is n = 0 , a = 256 , b = 106 => a = FF,
* b = 6A
*/
/* Load counters */
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR0_VAL),
&sdr_rw_load_mgr_regs->load_cntr0);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR1_VAL),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TINIT_CNTR2_VAL),
&sdr_rw_load_mgr_regs->load_cntr2);
/* Load jump address */
writel(RW_MGR_INIT_RESET_0_CKE_0,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(RW_MGR_INIT_RESET_0_CKE_0,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(RW_MGR_INIT_RESET_0_CKE_0,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
/* Execute count instruction */
writel(RW_MGR_INIT_RESET_0_CKE_0, grpaddr);
/* indicate that memory is stable */
writel(1, &phy_mgr_cfg->reset_mem_stbl);
/*
* transition the RESET to high
* Wait for 500us
*/
/*
* 500us @ 266MHz (3.75 ns) ~ 134000 clock cycles
* If a and b are the number of iteration in 2 nested loops
* it takes the following number of cycles to complete the operation
* number_of_cycles = ((2 + n) * a + 2) * b
* where n is the number of instruction in the inner loop
* One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
* b = FF
*/
/* Load counters */
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR0_VAL),
&sdr_rw_load_mgr_regs->load_cntr0);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR1_VAL),
&sdr_rw_load_mgr_regs->load_cntr1);
writel(SKIP_DELAY_LOOP_VALUE_OR_ZERO(SEQ_TRESET_CNTR2_VAL),
&sdr_rw_load_mgr_regs->load_cntr2);
/* Load jump address */
writel(RW_MGR_INIT_RESET_1_CKE_0,
&sdr_rw_load_jump_mgr_regs->load_jump_add0);
writel(RW_MGR_INIT_RESET_1_CKE_0,
&sdr_rw_load_jump_mgr_regs->load_jump_add1);
writel(RW_MGR_INIT_RESET_1_CKE_0,
&sdr_rw_load_jump_mgr_regs->load_jump_add2);
writel(RW_MGR_INIT_RESET_1_CKE_0, grpaddr);
/* bring up clock enable */
/* tXRP < 250 ck cycles */
delay_for_n_mem_clocks(250);
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
if (param->skip_ranks[r]) {
/* request to skip the rank */
continue;
}
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);
/*
* USER Use Mirror-ed commands for odd ranks if address
* mirrorring is on
*/
if ((RW_MGR_MEM_ADDRESS_MIRRORING >> r) & 0x1) {
set_jump_as_return();
writel(RW_MGR_MRS2_MIRR, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(RW_MGR_MRS3_MIRR, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(RW_MGR_MRS1_MIRR, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(RW_MGR_MRS0_DLL_RESET_MIRR, grpaddr);
} else {
set_jump_as_return();
writel(RW_MGR_MRS2, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(RW_MGR_MRS3, grpaddr);
delay_for_n_mem_clocks(4);
set_jump_as_return();
writel(RW_MGR_MRS1, grpaddr);
set_jump_as_return();
writel(RW_MGR_MRS0_DLL_RESET, grpaddr);
}
set_jump_as_return();
writel(RW_MGR_ZQCL, grpaddr);
/* tZQinit = tDLLK = 512 ck cycles */
delay_for_n_mem_clocks(512);
}
}
/*
* At the end of calibration we have to program the user settings in, and
* USER hand off the memory to the user.
*/
static void rw_mgr_mem_handoff(void)
{
uint32_t r;
uint32_t grpaddr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
RW_MGR_RUN_SINGLE_GROUP_OFFSET;
debug("%s:%d\n", __func__, __LINE__);
for (r = 0; r < RW_MGR_MEM_NUMBER_OF_RANKS; r++) {
if (param->skip_ranks[r])
/* request to skip the rank */
continue;
/* set rank */
set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_OFF);