Skip to content
Snippets Groups Projects
sequencer.c 105 KiB
Newer Older
  • Learn to ignore specific revisions
  • 	 * If a and b are the number of iteration in 2 nested loops
    	 * it takes the following number of cycles to complete the operation
    	 * number_of_cycles = ((2 + n) * a + 2) * b
    	 * where n is the number of instruction in the inner loop
    	 * One possible solution is n = 2 , a = 131 , b = 256 => a = 83,
    	 * b = FF
    	 */
    
    	rw_mgr_mem_init_load_regs(SEQ_TRESET_CNTR0_VAL, SEQ_TRESET_CNTR1_VAL,
    				  SEQ_TRESET_CNTR2_VAL,
    				  RW_MGR_INIT_RESET_1_CKE_0);
    
    	/* Bring up clock enable. */
    
    
    	/* tXRP < 250 ck cycles */
    	delay_for_n_mem_clocks(250);
    
    
    	rw_mgr_mem_load_user(RW_MGR_MRS0_DLL_RESET_MIRR, RW_MGR_MRS0_DLL_RESET,
    			     0);
    
    }
    
    /*
     * At the end of calibration we have to program the user settings in, and
     * USER  hand off the memory to the user.
     */
    static void rw_mgr_mem_handoff(void)
    {
    
    	rw_mgr_mem_load_user(RW_MGR_MRS0_USER_MIRR, RW_MGR_MRS0_USER, 1);
    	/*
    	 * USER  need to wait tMOD (12CK or 15ns) time before issuing
    	 * other commands, but we will have plenty of NIOS cycles before
    	 * actual handoff so its okay.
    	 */
    
    /*
     * issue write test command.
     * two variants are provided. one that just tests a write pattern and
     * another that tests datamask functionality.
     */
    static void rw_mgr_mem_calibrate_write_test_issue(uint32_t group,
    						  uint32_t test_dm)
    {
    	uint32_t mcc_instruction;
    	uint32_t quick_write_mode = (((STATIC_CALIB_STEPS) & CALIB_SKIP_WRITES) &&
    		ENABLE_SUPER_QUICK_CALIBRATION);
    	uint32_t rw_wl_nop_cycles;
    	uint32_t addr;
    
    	/*
    	 * Set counter and jump addresses for the right
    	 * number of NOP cycles.
    	 * The number of supported NOP cycles can range from -1 to infinity
    	 * Three different cases are handled:
    	 *
    	 * 1. For a number of NOP cycles greater than 0, the RW Mgr looping
    	 *    mechanism will be used to insert the right number of NOPs
    	 *
    	 * 2. For a number of NOP cycles equals to 0, the micro-instruction
    	 *    issuing the write command will jump straight to the
    	 *    micro-instruction that turns on DQS (for DDRx), or outputs write
    	 *    data (for RLD), skipping
    	 *    the NOP micro-instruction all together
    	 *
    	 * 3. A number of NOP cycles equal to -1 indicates that DQS must be
    	 *    turned on in the same micro-instruction that issues the write
    	 *    command. Then we need
    	 *    to directly jump to the micro-instruction that sends out the data
    	 *
    	 * NOTE: Implementing this mechanism uses 2 RW Mgr jump-counters
    	 *       (2 and 3). One jump-counter (0) is used to perform multiple
    	 *       write-read operations.
    	 *       one counter left to issue this command in "multiple-group" mode
    	 */
    
    	rw_wl_nop_cycles = gbl->rw_wl_nop_cycles;
    
    	if (rw_wl_nop_cycles == -1) {
    		/*
    		 * CNTR 2 - We want to execute the special write operation that
    		 * turns on DQS right away and then skip directly to the
    		 * instruction that sends out the data. We set the counter to a
    		 * large number so that the jump is always taken.
    		 */
    		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
    
    		/* CNTR 3 - Not used */
    		if (test_dm) {
    			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0_WL_1;
    			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DATA,
    			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
    			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
    			       &sdr_rw_load_jump_mgr_regs->load_jump_add3);
    		} else {
    			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0_WL_1;
    			writel(RW_MGR_LFSR_WR_RD_BANK_0_DATA,
    				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
    			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
    				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
    		}
    	} else if (rw_wl_nop_cycles == 0) {
    		/*
    		 * CNTR 2 - We want to skip the NOP operation and go straight
    		 * to the DQS enable instruction. We set the counter to a large
    		 * number so that the jump is always taken.
    		 */
    		writel(0xFF, &sdr_rw_load_mgr_regs->load_cntr2);
    
    		/* CNTR 3 - Not used */
    		if (test_dm) {
    			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
    			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_DQS,
    			       &sdr_rw_load_jump_mgr_regs->load_jump_add2);
    		} else {
    			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
    			writel(RW_MGR_LFSR_WR_RD_BANK_0_DQS,
    				&sdr_rw_load_jump_mgr_regs->load_jump_add2);
    		}
    	} else {
    		/*
    		 * CNTR 2 - In this case we want to execute the next instruction
    		 * and NOT take the jump. So we set the counter to 0. The jump
    		 * address doesn't count.
    		 */
    		writel(0x0, &sdr_rw_load_mgr_regs->load_cntr2);
    		writel(0x0, &sdr_rw_load_jump_mgr_regs->load_jump_add2);
    
    		/*
    		 * CNTR 3 - Set the nop counter to the number of cycles we
    		 * need to loop for, minus 1.
    		 */
    		writel(rw_wl_nop_cycles - 1, &sdr_rw_load_mgr_regs->load_cntr3);
    		if (test_dm) {
    			mcc_instruction = RW_MGR_LFSR_WR_RD_DM_BANK_0;
    			writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_NOP,
    				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
    		} else {
    			mcc_instruction = RW_MGR_LFSR_WR_RD_BANK_0;
    			writel(RW_MGR_LFSR_WR_RD_BANK_0_NOP,
    				&sdr_rw_load_jump_mgr_regs->load_jump_add3);
    		}
    	}
    
    	writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
    		  RW_MGR_RESET_READ_DATAPATH_OFFSET);
    
    	if (quick_write_mode)
    		writel(0x08, &sdr_rw_load_mgr_regs->load_cntr0);
    	else
    		writel(0x40, &sdr_rw_load_mgr_regs->load_cntr0);
    
    	writel(mcc_instruction, &sdr_rw_load_jump_mgr_regs->load_jump_add0);
    
    	/*
    	 * CNTR 1 - This is used to ensure enough time elapses
    	 * for read data to come back.
    	 */
    	writel(0x30, &sdr_rw_load_mgr_regs->load_cntr1);
    
    	if (test_dm) {
    		writel(RW_MGR_LFSR_WR_RD_DM_BANK_0_WAIT,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
    	} else {
    		writel(RW_MGR_LFSR_WR_RD_BANK_0_WAIT,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
    	}
    
    	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
    	writel(mcc_instruction, addr + (group << 2));
    }
    
    /* Test writes, can check for a single bit pass or multiple bit pass */
    
    static int
    rw_mgr_mem_calibrate_write_test(const u32 rank_bgn, const u32 write_group,
    				const u32 use_dm, const u32 all_correct,
    				u32 *bit_chk, const u32 all_ranks)
    
    	const u32 rank_end = all_ranks ?
    				RW_MGR_MEM_NUMBER_OF_RANKS :
    				(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
    	const u32 shift_ratio = RW_MGR_MEM_DQ_PER_WRITE_DQS /
    				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS;
    	const u32 correct_mask_vg = param->write_correct_mask_vg;
    
    	u32 tmp_bit_chk, base_rw_mgr;
    	int vg, r;
    
    
    	*bit_chk = param->write_correct_mask;
    
    	for (r = rank_bgn; r < rank_end; r++) {
    
    		/* Request to skip the rank */
    		if (param->skip_ranks[r])
    
    		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
    
    		tmp_bit_chk = 0;
    
    		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS - 1;
    		     vg >= 0; vg--) {
    			/* Reset the FIFOs to get pointers to known state. */
    
    			writel(0, &phy_mgr_cmd->fifo_reset);
    
    
    			rw_mgr_mem_calibrate_write_test_issue(
    				write_group *
    				RW_MGR_MEM_VIRTUAL_GROUPS_PER_WRITE_DQS + vg,
    
    			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
    			tmp_bit_chk <<= shift_ratio;
    			tmp_bit_chk |= (correct_mask_vg & ~(base_rw_mgr));
    
    	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
    
    		debug_cond(DLEVEL == 2,
    			   "write_test(%u,%u,ALL) : %u == %u => %i\n",
    			   write_group, use_dm, *bit_chk,
    			   param->write_correct_mask,
    			   *bit_chk == param->write_correct_mask);
    
    		return *bit_chk == param->write_correct_mask;
    	} else {
    		set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
    
    		debug_cond(DLEVEL == 2,
    			   "write_test(%u,%u,ONE) : %u != %i => %i\n",
    			   write_group, use_dm, *bit_chk, 0, *bit_chk != 0);
    
    /**
     * rw_mgr_mem_calibrate_read_test_patterns() - Read back test patterns
     * @rank_bgn:	Rank number
     * @group:	Read/Write Group
     * @all_ranks:	Test all ranks
     *
     * Performs a guaranteed read on the patterns we are going to use during a
     * read test to ensure memory works.
    
    static int
    rw_mgr_mem_calibrate_read_test_patterns(const u32 rank_bgn, const u32 group,
    					const u32 all_ranks)
    
    	const u32 addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
    			 RW_MGR_RUN_SINGLE_GROUP_OFFSET;
    	const u32 addr_offset =
    			 (group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS) << 2;
    	const u32 rank_end = all_ranks ?
    				RW_MGR_MEM_NUMBER_OF_RANKS :
    				(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
    	const u32 shift_ratio = RW_MGR_MEM_DQ_PER_READ_DQS /
    				RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
    	const u32 correct_mask_vg = param->read_correct_mask_vg;
    
    	u32 tmp_bit_chk, base_rw_mgr, bit_chk;
    	int vg, r;
    	int ret = 0;
    
    	bit_chk = param->read_correct_mask;
    
    
    	for (r = rank_bgn; r < rank_end; r++) {
    
    		/* Request to skip the rank */
    
    		if (param->skip_ranks[r])
    			continue;
    
    
    		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
    
    		/* Load up a constant bursts of read commands */
    
    		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
    		writel(RW_MGR_GUARANTEED_READ,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
    
    		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
    		writel(RW_MGR_GUARANTEED_READ_CONT,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
    
    		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1;
    		     vg >= 0; vg--) {
    			/* Reset the FIFOs to get pointers to known state. */
    
    			writel(0, &phy_mgr_cmd->fifo_reset);
    			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
    				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
    
    			writel(RW_MGR_GUARANTEED_READ,
    			       addr + addr_offset + (vg << 2));
    
    			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
    
    			tmp_bit_chk <<= shift_ratio;
    			tmp_bit_chk |= correct_mask_vg & ~base_rw_mgr;
    
    	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
    
    
    	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
    
    
    	if (bit_chk != param->read_correct_mask)
    		ret = -EIO;
    
    	debug_cond(DLEVEL == 1,
    		   "%s:%d test_load_patterns(%u,ALL) => (%u == %u) => %i\n",
    		   __func__, __LINE__, group, bit_chk,
    		   param->read_correct_mask, ret);
    
    	return ret;
    
    /**
     * rw_mgr_mem_calibrate_read_load_patterns() - Load up the patterns for read test
     * @rank_bgn:	Rank number
     * @all_ranks:	Test all ranks
     *
     * Load up the patterns we are going to use during a read test.
     */
    static void rw_mgr_mem_calibrate_read_load_patterns(const u32 rank_bgn,
    						    const int all_ranks)
    
    	const u32 rank_end = all_ranks ?
    			RW_MGR_MEM_NUMBER_OF_RANKS :
    			(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
    	u32 r;
    
    
    	debug("%s:%d\n", __func__, __LINE__);
    
    	for (r = rank_bgn; r < rank_end; r++) {
    		if (param->skip_ranks[r])
    			/* request to skip the rank */
    			continue;
    
    		/* set rank */
    		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
    
    		/* Load up a constant bursts */
    
    		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr0);
    
    		writel(RW_MGR_GUARANTEED_WRITE_WAIT0,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
    
    		writel(0x20, &sdr_rw_load_mgr_regs->load_cntr1);
    
    		writel(RW_MGR_GUARANTEED_WRITE_WAIT1,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
    
    		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr2);
    
    		writel(RW_MGR_GUARANTEED_WRITE_WAIT2,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
    
    		writel(0x04, &sdr_rw_load_mgr_regs->load_cntr3);
    
    		writel(RW_MGR_GUARANTEED_WRITE_WAIT3,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
    
    		writel(RW_MGR_GUARANTEED_WRITE, SDR_PHYGRP_RWMGRGRP_ADDRESS |
    						RW_MGR_RUN_SINGLE_GROUP_OFFSET);
    
    	}
    
    	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
    }
    
    
    /**
     * rw_mgr_mem_calibrate_read_test() - Perform READ test on single rank
     * @rank_bgn:		Rank number
     * @group:		Read/Write group
     * @num_tries:		Number of retries of the test
     * @all_correct:	All bits must be correct in the mask
     * @bit_chk:		Resulting bit mask after the test
     * @all_groups:		Test all R/W groups
     * @all_ranks:		Test all ranks
     *
     * Try a read and see if it returns correct data back. Test has dummy reads
     * inserted into the mix used to align DQS enable. Test has more thorough
     * checks than the regular read test.
    
    static int
    rw_mgr_mem_calibrate_read_test(const u32 rank_bgn, const u32 group,
    			       const u32 num_tries, const u32 all_correct,
    			       u32 *bit_chk,
    			       const u32 all_groups, const u32 all_ranks)
    
    	const u32 rank_end = all_ranks ? RW_MGR_MEM_NUMBER_OF_RANKS :
    
    		(rank_bgn + NUM_RANKS_PER_SHADOW_REG);
    
    	const u32 quick_read_mode =
    		((STATIC_CALIB_STEPS & CALIB_SKIP_DELAY_SWEEPS) &&
    		 ENABLE_SUPER_QUICK_CALIBRATION);
    	u32 correct_mask_vg = param->read_correct_mask_vg;
    	u32 tmp_bit_chk;
    	u32 base_rw_mgr;
    	u32 addr;
    
    	*bit_chk = param->read_correct_mask;
    
    
    	for (r = rank_bgn; r < rank_end; r++) {
    		if (param->skip_ranks[r])
    			/* request to skip the rank */
    			continue;
    
    		/* set rank */
    		set_rank_and_odt_mask(r, RW_MGR_ODT_MODE_READ_WRITE);
    
    
    		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr1);
    
    		writel(RW_MGR_READ_B2B_WAIT1,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add1);
    
    		writel(0x10, &sdr_rw_load_mgr_regs->load_cntr2);
    		writel(RW_MGR_READ_B2B_WAIT2,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add2);
    
    			writel(0x1, &sdr_rw_load_mgr_regs->load_cntr0);
    
    			/* need at least two (1+1) reads to capture failures */
    		else if (all_groups)
    
    			writel(0x06, &sdr_rw_load_mgr_regs->load_cntr0);
    
    			writel(0x32, &sdr_rw_load_mgr_regs->load_cntr0);
    
    		writel(RW_MGR_READ_B2B,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add0);
    
    		if (all_groups)
    			writel(RW_MGR_MEM_IF_READ_DQS_WIDTH *
    			       RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1,
    
    			       &sdr_rw_load_mgr_regs->load_cntr3);
    
    			writel(0x0, &sdr_rw_load_mgr_regs->load_cntr3);
    
    		writel(RW_MGR_READ_B2B,
    			&sdr_rw_load_jump_mgr_regs->load_jump_add3);
    
    		for (vg = RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS - 1; vg >= 0;
    		     vg--) {
    
    			/* Reset the FIFOs to get pointers to known state. */
    
    			writel(0, &phy_mgr_cmd->fifo_reset);
    			writel(0, SDR_PHYGRP_RWMGRGRP_ADDRESS |
    				  RW_MGR_RESET_READ_DATAPATH_OFFSET);
    
    			if (all_groups) {
    				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
    				       RW_MGR_RUN_ALL_GROUPS_OFFSET;
    			} else {
    				addr = SDR_PHYGRP_RWMGRGRP_ADDRESS |
    				       RW_MGR_RUN_SINGLE_GROUP_OFFSET;
    			}
    
    			writel(RW_MGR_READ_B2B, addr +
    
    			       ((group * RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS +
    			       vg) << 2));
    
    
    			base_rw_mgr = readl(SDR_PHYGRP_RWMGRGRP_ADDRESS);
    
    			tmp_bit_chk <<= RW_MGR_MEM_DQ_PER_READ_DQS /
    					RW_MGR_MEM_VIRTUAL_GROUPS_PER_READ_DQS;
    			tmp_bit_chk |= correct_mask_vg & ~(base_rw_mgr);
    
    	addr = SDR_PHYGRP_RWMGRGRP_ADDRESS | RW_MGR_RUN_SINGLE_GROUP_OFFSET;
    
    	writel(RW_MGR_CLEAR_DQS_ENABLE, addr + (group << 2));
    
    	set_rank_and_odt_mask(0, RW_MGR_ODT_MODE_OFF);
    
    
    		ret = (*bit_chk == param->read_correct_mask);
    		debug_cond(DLEVEL == 2,
    			   "%s:%d read_test(%u,ALL,%u) => (%u == %u) => %i\n",
    			   __func__, __LINE__, group, all_groups, *bit_chk,
    			   param->read_correct_mask, ret);
    
    		ret = (*bit_chk != 0x00);
    		debug_cond(DLEVEL == 2,
    			   "%s:%d read_test(%u,ONE,%u) => (%u != %u) => %i\n",
    			   __func__, __LINE__, group, all_groups, *bit_chk,
    			   0, ret);
    
    /**
     * rw_mgr_mem_calibrate_read_test_all_ranks() - Perform READ test on all ranks
     * @grp:		Read/Write group
     * @num_tries:		Number of retries of the test
     * @all_correct:	All bits must be correct in the mask
     * @all_groups:		Test all R/W groups
     *
     * Perform a READ test across all memory ranks.
     */
    static int
    rw_mgr_mem_calibrate_read_test_all_ranks(const u32 grp, const u32 num_tries,
    					 const u32 all_correct,
    					 const u32 all_groups)
    
    	u32 bit_chk;
    	return rw_mgr_mem_calibrate_read_test(0, grp, num_tries, all_correct,
    					      &bit_chk, all_groups, 1);
    
    /**
     * rw_mgr_incr_vfifo() - Increase VFIFO value
     * @grp:	Read/Write group
     *
     * Increase VFIFO value.
     */
    
    static void rw_mgr_incr_vfifo(const u32 grp)
    
    	writel(grp, &phy_mgr_cmd->inc_vfifo_hard_phy);
    
    /**
     * rw_mgr_decr_vfifo() - Decrease VFIFO value
     * @grp:	Read/Write group
     *
     * Decrease VFIFO value.
     */
    
    static void rw_mgr_decr_vfifo(const u32 grp)
    
    	for (i = 0; i < VFIFO_SIZE - 1; i++)
    
    		rw_mgr_incr_vfifo(grp);
    
    /**
     * find_vfifo_failing_read() - Push VFIFO to get a failing read
     * @grp:	Read/Write group
     *
     * Push VFIFO until a failing read happens.
     */
    static int find_vfifo_failing_read(const u32 grp)
    
    	for (v = 0; v < VFIFO_SIZE; v++) {
    
    		debug_cond(DLEVEL == 2, "%s:%d: vfifo %u\n",
    
    			   __func__, __LINE__, v);
    
    		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
    
    		/* Fiddle with FIFO. */
    
    		rw_mgr_incr_vfifo(grp);
    
    	/* No failing read found! Something must have gone wrong. */
    	debug_cond(DLEVEL == 2, "%s:%d: vfifo failed\n", __func__, __LINE__);
    	return 0;
    
    /**
     * sdr_find_phase_delay() - Find DQS enable phase or delay
     * @working:	If 1, look for working phase/delay, if 0, look for non-working
     * @delay:	If 1, look for delay, if 0, look for phase
     * @grp:	Read/Write group
     * @work:	Working window position
     * @work_inc:	Working window increment
     * @pd:		DQS Phase/Delay Iterator
     *
     * Find working or non-working DQS enable phase setting.
     */
    static int sdr_find_phase_delay(int working, int delay, const u32 grp,
    				u32 *work, const u32 work_inc, u32 *pd)
    {
    	const u32 max = delay ? IO_DQS_EN_DELAY_MAX : IO_DQS_EN_PHASE_MAX;
    
    
    	for (; *pd <= max; (*pd)++) {
    		if (delay)
    			scc_mgr_set_dqs_en_delay_all_ranks(grp, *pd);
    		else
    			scc_mgr_set_dqs_en_phase_all_ranks(grp, *pd);
    
    		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
    
    		if (!working)
    			ret = !ret;
    
    		if (ret)
    			return 0;
    
    		if (work)
    			*work += work_inc;
    	}
    
    	return -EINVAL;
    }
    
    /**
     * sdr_find_phase() - Find DQS enable phase
     * @working:	If 1, look for working phase, if 0, look for non-working phase
     * @grp:	Read/Write group
     * @work:	Working window position
     * @i:		Iterator
     * @p:		DQS Phase Iterator
     *
     * Find working or non-working DQS enable phase setting.
     */
    
    static int sdr_find_phase(int working, const u32 grp, u32 *work,
    
    			  u32 *i, u32 *p)
    
    	const u32 end = VFIFO_SIZE + (working ? 0 : 1);
    
    	for (; *i < end; (*i)++) {
    		if (working)
    			*p = 0;
    
    
    		ret = sdr_find_phase_delay(working, 0, grp, work,
    					   IO_DELAY_PER_OPA_TAP, p);
    		if (!ret)
    			return 0;
    
    
    		if (*p > IO_DQS_EN_PHASE_MAX) {
    			/* Fiddle with FIFO. */
    
    			rw_mgr_incr_vfifo(grp);
    
    			if (!working)
    				*p = 0;
    
    /**
     * sdr_working_phase() - Find working DQS enable phase
     * @grp:	Read/Write group
     * @work_bgn:	Working window start position
     * @d:		dtaps output value
     * @p:		DQS Phase Iterator
     * @i:		Iterator
     *
     * Find working DQS enable phase setting.
     */
    
    static int sdr_working_phase(const u32 grp, u32 *work_bgn, u32 *d,
    
    	const u32 dtaps_per_ptap = IO_DELAY_PER_OPA_TAP /
    				   IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
    
    	int ret;
    
    	*work_bgn = 0;
    
    	for (*d = 0; *d <= dtaps_per_ptap; (*d)++) {
    		*i = 0;
    		scc_mgr_set_dqs_en_delay_all_ranks(grp, *d);
    
    		ret = sdr_find_phase(1, grp, work_bgn, i, p);
    
    		if (!ret)
    			return 0;
    		*work_bgn += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
    	}
    
    
    	/* Cannot find working solution */
    
    	debug_cond(DLEVEL == 2, "%s:%d find_dqs_en_phase: no vfifo/ptap/dtap\n",
    		   __func__, __LINE__);
    	return -EINVAL;
    
    /**
     * sdr_backup_phase() - Find DQS enable backup phase
     * @grp:	Read/Write group
     * @work_bgn:	Working window start position
     * @p:		DQS Phase Iterator
     *
     * Find DQS enable backup phase setting.
     */
    
    static void sdr_backup_phase(const u32 grp, u32 *work_bgn, u32 *p)
    
    
    	/* Special case code for backing up a phase */
    	if (*p == 0) {
    		*p = IO_DQS_EN_PHASE_MAX;
    
    		rw_mgr_decr_vfifo(grp);
    
    	} else {
    		(*p)--;
    	}
    	tmp_delay = *work_bgn - IO_DELAY_PER_OPA_TAP;
    
    	scc_mgr_set_dqs_en_phase_all_ranks(grp, *p);
    
    	for (d = 0; d <= IO_DQS_EN_DELAY_MAX && tmp_delay < *work_bgn; d++) {
    		scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
    
    		ret = rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
    
    			*work_bgn = tmp_delay;
    			break;
    		}
    
    
    		tmp_delay += IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
    
    	/* Restore VFIFO to old state before we decremented it (if needed). */
    
    	(*p)++;
    	if (*p > IO_DQS_EN_PHASE_MAX) {
    		*p = 0;
    
    		rw_mgr_incr_vfifo(grp);
    
    	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
    
    /**
     * sdr_nonworking_phase() - Find non-working DQS enable phase
     * @grp:	Read/Write group
     * @work_end:	Working window end position
     * @p:		DQS Phase Iterator
     * @i:		Iterator
     *
     * Find non-working DQS enable phase setting.
     */
    
    static int sdr_nonworking_phase(const u32 grp, u32 *work_end, u32 *p, u32 *i)
    
    
    	(*p)++;
    	*work_end += IO_DELAY_PER_OPA_TAP;
    	if (*p > IO_DQS_EN_PHASE_MAX) {
    
    		/* Fiddle with FIFO. */
    
    		rw_mgr_incr_vfifo(grp);
    
    	ret = sdr_find_phase(0, grp, work_end, i, p);
    
    	if (ret) {
    		/* Cannot see edge of failing read. */
    		debug_cond(DLEVEL == 2, "%s:%d: end: failed\n",
    			   __func__, __LINE__);
    
    /**
     * sdr_find_window_center() - Find center of the working DQS window.
     * @grp:	Read/Write group
     * @work_bgn:	First working settings
     * @work_end:	Last working settings
     *
     * Find center of the working DQS enable window.
     */
    static int sdr_find_window_center(const u32 grp, const u32 work_bgn,
    
    				  const u32 work_end)
    
    	work_mid = (work_bgn + work_end) / 2;
    
    
    	debug_cond(DLEVEL == 2, "work_bgn=%d work_end=%d work_mid=%d\n",
    
    		   work_bgn, work_end, work_mid);
    
    	/* Get the middle delay to be less than a VFIFO delay */
    
    	tmp_delay = (IO_DQS_EN_PHASE_MAX + 1) * IO_DELAY_PER_OPA_TAP;
    
    	debug_cond(DLEVEL == 2, "vfifo ptap delay %d\n", tmp_delay);
    
    	work_mid %= tmp_delay;
    
    	debug_cond(DLEVEL == 2, "new work_mid %d\n", work_mid);
    
    	tmp_delay = rounddown(work_mid, IO_DELAY_PER_OPA_TAP);
    	if (tmp_delay > IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP)
    		tmp_delay = IO_DQS_EN_PHASE_MAX * IO_DELAY_PER_OPA_TAP;
    	p = tmp_delay / IO_DELAY_PER_OPA_TAP;
    
    	debug_cond(DLEVEL == 2, "new p %d, tmp_delay=%d\n", p, tmp_delay);
    
    	d = DIV_ROUND_UP(work_mid - tmp_delay, IO_DELAY_PER_DQS_EN_DCHAIN_TAP);
    	if (d > IO_DQS_EN_DELAY_MAX)
    		d = IO_DQS_EN_DELAY_MAX;
    	tmp_delay += d * IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
    
    
    	debug_cond(DLEVEL == 2, "new d %d, tmp_delay=%d\n", d, tmp_delay);
    
    	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
    
    	scc_mgr_set_dqs_en_delay_all_ranks(grp, d);
    
    
    	/*
    	 * push vfifo until we can successfully calibrate. We can do this
    	 * because the largest possible margin in 1 VFIFO cycle.
    	 */
    	for (i = 0; i < VFIFO_SIZE; i++) {
    
    		debug_cond(DLEVEL == 2, "find_dqs_en_phase: center\n");
    
    		if (rw_mgr_mem_calibrate_read_test_all_ranks(grp, 1,
    
    			debug_cond(DLEVEL == 2,
    
    				   "%s:%d center: found: ptap=%u dtap=%u\n",
    				   __func__, __LINE__, p, d);
    
    		/* Fiddle with FIFO. */
    
    		rw_mgr_incr_vfifo(grp);
    
    	debug_cond(DLEVEL == 2, "%s:%d center: failed.\n",
    		   __func__, __LINE__);
    	return -EINVAL;
    
    /**
     * rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase() - Find a good DQS enable to use
     * @grp:	Read/Write Group
     *
     * Find a good DQS enable to use.
     */
    
    static int rw_mgr_mem_calibrate_vfifo_find_dqs_en_phase(const u32 grp)
    
    	u32 d, p, i;
    	u32 dtaps_per_ptap;
    	u32 work_bgn, work_end;
    	u32 found_passing_read, found_failing_read, initial_failing_dtap;
    	int ret;
    
    
    	debug("%s:%d %u\n", __func__, __LINE__, grp);
    
    	reg_file_set_sub_stage(CAL_SUBSTAGE_VFIFO_CENTER);
    
    	scc_mgr_set_dqs_en_delay_all_ranks(grp, 0);
    	scc_mgr_set_dqs_en_phase_all_ranks(grp, 0);
    
    
    	/* Step 0: Determine number of delay taps for each phase tap. */
    	dtaps_per_ptap = IO_DELAY_PER_OPA_TAP / IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
    
    	/* Step 1: First push vfifo until we get a failing read. */
    
    	find_vfifo_failing_read(grp);
    
    	/* Step 2: Find first working phase, increment in ptaps. */
    
    	ret = sdr_working_phase(grp, &work_bgn, &d, &p, &i);
    	if (ret)
    		return ret;
    
    	 * If d is 0 then the working window covers a phase tap and we can
    	 * follow the old procedure. Otherwise, we've found the beginning
    
    	 * and we need to increment the dtaps until we find the end.
    	 */
    	if (d == 0) {
    
    		/*
    		 * Step 3a: If we have room, back off by one and
    		 *          increment in dtaps.
    		 */
    
    		sdr_backup_phase(grp, &work_bgn, &p);
    
    		/*
    		 * Step 4a: go forward from working phase to non working
    		 * phase, increment in ptaps.
    		 */
    
    		ret = sdr_nonworking_phase(grp, &work_end, &p, &i);
    		if (ret)
    			return ret;
    
    		/* Step 5a: Back off one from last, increment in dtaps. */
    
    
    		/* Special case code for backing up a phase */
    		if (p == 0) {
    			p = IO_DQS_EN_PHASE_MAX;
    
    			rw_mgr_decr_vfifo(grp);
    
    		} else {
    			p = p - 1;
    		}
    
    		work_end -= IO_DELAY_PER_OPA_TAP;
    		scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
    
    		d = 0;
    
    
    		debug_cond(DLEVEL == 2, "%s:%d p: ptap=%u\n",
    			   __func__, __LINE__, p);
    
    	/* The dtap increment to find the failing edge is done here. */
    
    	sdr_find_phase_delay(0, 1, grp, &work_end,
    			     IO_DELAY_PER_DQS_EN_DCHAIN_TAP, &d);
    
    
    	/* Go back to working dtap */
    	if (d != 0)
    		work_end -= IO_DELAY_PER_DQS_EN_DCHAIN_TAP;
    
    
    	debug_cond(DLEVEL == 2,
    		   "%s:%d p/d: ptap=%u dtap=%u end=%u\n",
    		   __func__, __LINE__, p, d - 1, work_end);
    
    
    	if (work_end < work_bgn) {
    		/* nil range */
    
    		debug_cond(DLEVEL == 2, "%s:%d end-2: failed\n",
    			   __func__, __LINE__);
    
    	debug_cond(DLEVEL == 2, "%s:%d found range [%u,%u]\n",
    
    		   __func__, __LINE__, work_bgn, work_end);
    
    	/*
    
    	 * We need to calculate the number of dtaps that equal a ptap.
    	 * To do that we'll back up a ptap and re-find the edge of the
    	 * window using dtaps
    
    	debug_cond(DLEVEL == 2, "%s:%d calculate dtaps_per_ptap for tracking\n",
    		   __func__, __LINE__);
    
    
    	/* Special case code for backing up a phase */
    	if (p == 0) {
    		p = IO_DQS_EN_PHASE_MAX;
    
    		rw_mgr_decr_vfifo(grp);
    
    		debug_cond(DLEVEL == 2, "%s:%d backedup cycle/phase: p=%u\n",
    			   __func__, __LINE__, p);
    
    		debug_cond(DLEVEL == 2, "%s:%d backedup phase only: p=%u",
    			   __func__, __LINE__, p);
    
    	}
    
    	scc_mgr_set_dqs_en_phase_all_ranks(grp, p);
    
    	/*
    	 * Increase dtap until we first see a passing read (in case the
    
    	 * window is smaller than a ptap), and then a failing read to
    	 * mark the edge of the window again.
    
    	/* Find a passing read. */
    	debug_cond(DLEVEL == 2, "%s:%d find passing read\n",
    
    	found_passing_read = !sdr_find_phase_delay(1, 1, grp, NULL, 0, &d);
    
    	if (found_passing_read) {
    
    		/* Find a failing read. */
    		debug_cond(DLEVEL == 2, "%s:%d find failing read\n",
    			   __func__, __LINE__);
    
    		d++;
    		found_failing_read = !sdr_find_phase_delay(0, 1, grp, NULL, 0,
    							   &d);
    
    		debug_cond(DLEVEL == 1,
    			   "%s:%d failed to calculate dtaps per ptap. Fall back on static value\n",
    			   __func__, __LINE__);
    
    	}
    
    	/*
    	 * The dynamically calculated dtaps_per_ptap is only valid if we
    	 * found a passing/failing read. If we didn't, it means d hit the max
    	 * (IO_DQS_EN_DELAY_MAX). Otherwise, dtaps_per_ptap retains its
    	 * statically calculated value.
    	 */
    	if (found_passing_read && found_failing_read)
    		dtaps_per_ptap = d - initial_failing_dtap;
    
    
    	writel(dtaps_per_ptap, &sdr_reg_file->dtaps_per_ptap);
    
    	debug_cond(DLEVEL == 2, "%s:%d dtaps_per_ptap=%u - %u = %u",
    		   __func__, __LINE__, d, initial_failing_dtap, dtaps_per_ptap);
    
    	/* Step 6: Find the centre of the window. */
    
    	ret = sdr_find_window_center(grp, work_bgn, work_end);
    
    /**
     * search_stop_check() - Check if the detected edge is valid
     * @write:		Perform read (Stage 2) or write (Stage 3) calibration
     * @d:			DQS delay
     * @rank_bgn:		Rank number
     * @write_group:	Write Group
     * @read_group:		Read Group
     * @bit_chk:		Resulting bit mask after the test
     * @sticky_bit_chk:	Resulting sticky bit mask after the test
     * @use_read_test:	Perform read test
     *
     * Test if the found edge is valid.
     */
    static u32 search_stop_check(const int write, const int d, const int rank_bgn,
    			     const u32 write_group, const u32 read_group,
    			     u32 *bit_chk, u32 *sticky_bit_chk,
    			     const u32 use_read_test)
    {
    	const u32 ratio = RW_MGR_MEM_IF_READ_DQS_WIDTH /
    			  RW_MGR_MEM_IF_WRITE_DQS_WIDTH;
    	const u32 correct_mask = write ? param->write_correct_mask :
    					 param->read_correct_mask;
    	const u32 per_dqs = write ? RW_MGR_MEM_DQ_PER_WRITE_DQS :
    				    RW_MGR_MEM_DQ_PER_READ_DQS;