Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memory: emif: remove unused frequency and voltage notifiers

The driver defined several functions related to handling of frequency
and voltage changes:
- freq_post_notify_handling
- freq_pre_notify_handling
- volt_notify_handling

All these are static, not used inside or outside of driver, and marked
as unused with comment: "TODO: voltage notify handling should be hooked
up to regulator framework as soon as the necessary support is available
in mainline kernel. This function is un-used right now.".

These have been added with commit a93de288aad3 ("memory: emif: handle
frequency and voltage change events") in 2012 and are unused since then.
Additionally mentioned regulator and clock hooking did not happen since
then. If it did not happen for nine years, let's assume it will not
happen suddenly now.

Remove all unused functions which also allows removal of "t_ck" static
variable "t_ck" and "addressing" member of private structure.

No functionality is lost.

Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
Link: https://lore.kernel.org/r/20210527154101.80556-1-krzysztof.kozlowski@canonical.com

-678
-678
drivers/memory/emif.c
··· 41 41 * @node: node in the device list 42 42 * @base: base address of memory-mapped IO registers. 43 43 * @dev: device pointer. 44 - * @addressing table with addressing information from the spec 45 44 * @regs_cache: An array of 'struct emif_regs' that stores 46 45 * calculated register values for different 47 46 * frequencies, to avoid re-calculating them on ··· 60 61 unsigned long irq_state; 61 62 void __iomem *base; 62 63 struct device *dev; 63 - const struct lpddr2_addressing *addressing; 64 64 struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES]; 65 65 struct emif_regs *curr_regs; 66 66 struct emif_platform_data *plat_data; ··· 70 72 static struct emif_data *emif1; 71 73 static DEFINE_SPINLOCK(emif_lock); 72 74 static unsigned long irq_state; 73 - static u32 t_ck; /* DDR clock period in ps */ 74 75 static LIST_HEAD(device_list); 75 76 76 77 #ifdef CONFIG_DEBUG_FS ··· 167 170 #endif 168 171 169 172 /* 170 - * Calculate the period of DDR clock from frequency value 171 - */ 172 - static void set_ddr_clk_period(u32 freq) 173 - { 174 - /* Divide 10^12 by frequency to get period in ps */ 175 - t_ck = (u32)DIV_ROUND_UP_ULL(1000000000000ull, freq); 176 - } 177 - 178 - /* 179 173 * Get bus width used by EMIF. Note that this may be different from the 180 174 * bus width of the DDR devices used. For instance two 16-bit DDR devices 181 175 * may be connected to a given CS of EMIF. In this case bus width as far ··· 182 194 width = width == 0 ? 32 : 16; 183 195 184 196 return width; 185 - } 186 - 187 - /* 188 - * Get the CL from SDRAM_CONFIG register 189 - */ 190 - static u32 get_cl(struct emif_data *emif) 191 - { 192 - u32 cl; 193 - void __iomem *base = emif->base; 194 - 195 - cl = (readl(base + EMIF_SDRAM_CONFIG) & CL_MASK) >> CL_SHIFT; 196 - 197 - return cl; 198 197 } 199 198 200 199 static void set_lpmode(struct emif_data *emif, u8 lpmode) ··· 303 328 return &lpddr2_jedec_addressing_table[index]; 304 329 } 305 330 306 - /* 307 - * Find the the right timing table from the array of timing 308 - * tables of the device using DDR clock frequency 309 - */ 310 - static const struct lpddr2_timings *get_timings_table(struct emif_data *emif, 311 - u32 freq) 312 - { 313 - u32 i, min, max, freq_nearest; 314 - const struct lpddr2_timings *timings = NULL; 315 - const struct lpddr2_timings *timings_arr = emif->plat_data->timings; 316 - struct device *dev = emif->dev; 317 - 318 - /* Start with a very high frequency - 1GHz */ 319 - freq_nearest = 1000000000; 320 - 321 - /* 322 - * Find the timings table such that: 323 - * 1. the frequency range covers the required frequency(safe) AND 324 - * 2. the max_freq is closest to the required frequency(optimal) 325 - */ 326 - for (i = 0; i < emif->plat_data->timings_arr_size; i++) { 327 - max = timings_arr[i].max_freq; 328 - min = timings_arr[i].min_freq; 329 - if ((freq >= min) && (freq <= max) && (max < freq_nearest)) { 330 - freq_nearest = max; 331 - timings = &timings_arr[i]; 332 - } 333 - } 334 - 335 - if (!timings) 336 - dev_err(dev, "%s: couldn't find timings for - %dHz\n", 337 - __func__, freq); 338 - 339 - dev_dbg(dev, "%s: timings table: freq %d, speed bin freq %d\n", 340 - __func__, freq, freq_nearest); 341 - 342 - return timings; 343 - } 344 - 345 - static u32 get_sdram_ref_ctrl_shdw(u32 freq, 346 - const struct lpddr2_addressing *addressing) 347 - { 348 - u32 ref_ctrl_shdw = 0, val = 0, freq_khz, t_refi; 349 - 350 - /* Scale down frequency and t_refi to avoid overflow */ 351 - freq_khz = freq / 1000; 352 - t_refi = addressing->tREFI_ns / 100; 353 - 354 - /* 355 - * refresh rate to be set is 'tREFI(in us) * freq in MHz 356 - * division by 10000 to account for change in units 357 - */ 358 - val = t_refi * freq_khz / 10000; 359 - ref_ctrl_shdw |= val << REFRESH_RATE_SHIFT; 360 - 361 - return ref_ctrl_shdw; 362 - } 363 - 364 - static u32 get_sdram_tim_1_shdw(const struct lpddr2_timings *timings, 365 - const struct lpddr2_min_tck *min_tck, 366 - const struct lpddr2_addressing *addressing) 367 - { 368 - u32 tim1 = 0, val = 0; 369 - 370 - val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1; 371 - tim1 |= val << T_WTR_SHIFT; 372 - 373 - if (addressing->num_banks == B8) 374 - val = DIV_ROUND_UP(timings->tFAW, t_ck*4); 375 - else 376 - val = max(min_tck->tRRD, DIV_ROUND_UP(timings->tRRD, t_ck)); 377 - tim1 |= (val - 1) << T_RRD_SHIFT; 378 - 379 - val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab, t_ck) - 1; 380 - tim1 |= val << T_RC_SHIFT; 381 - 382 - val = max(min_tck->tRASmin, DIV_ROUND_UP(timings->tRAS_min, t_ck)); 383 - tim1 |= (val - 1) << T_RAS_SHIFT; 384 - 385 - val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1; 386 - tim1 |= val << T_WR_SHIFT; 387 - 388 - val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD, t_ck)) - 1; 389 - tim1 |= val << T_RCD_SHIFT; 390 - 391 - val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab, t_ck)) - 1; 392 - tim1 |= val << T_RP_SHIFT; 393 - 394 - return tim1; 395 - } 396 - 397 - static u32 get_sdram_tim_1_shdw_derated(const struct lpddr2_timings *timings, 398 - const struct lpddr2_min_tck *min_tck, 399 - const struct lpddr2_addressing *addressing) 400 - { 401 - u32 tim1 = 0, val = 0; 402 - 403 - val = max(min_tck->tWTR, DIV_ROUND_UP(timings->tWTR, t_ck)) - 1; 404 - tim1 = val << T_WTR_SHIFT; 405 - 406 - /* 407 - * tFAW is approximately 4 times tRRD. So add 1875*4 = 7500ps 408 - * to tFAW for de-rating 409 - */ 410 - if (addressing->num_banks == B8) { 411 - val = DIV_ROUND_UP(timings->tFAW + 7500, 4 * t_ck) - 1; 412 - } else { 413 - val = DIV_ROUND_UP(timings->tRRD + 1875, t_ck); 414 - val = max(min_tck->tRRD, val) - 1; 415 - } 416 - tim1 |= val << T_RRD_SHIFT; 417 - 418 - val = DIV_ROUND_UP(timings->tRAS_min + timings->tRPab + 1875, t_ck); 419 - tim1 |= (val - 1) << T_RC_SHIFT; 420 - 421 - val = DIV_ROUND_UP(timings->tRAS_min + 1875, t_ck); 422 - val = max(min_tck->tRASmin, val) - 1; 423 - tim1 |= val << T_RAS_SHIFT; 424 - 425 - val = max(min_tck->tWR, DIV_ROUND_UP(timings->tWR, t_ck)) - 1; 426 - tim1 |= val << T_WR_SHIFT; 427 - 428 - val = max(min_tck->tRCD, DIV_ROUND_UP(timings->tRCD + 1875, t_ck)); 429 - tim1 |= (val - 1) << T_RCD_SHIFT; 430 - 431 - val = max(min_tck->tRPab, DIV_ROUND_UP(timings->tRPab + 1875, t_ck)); 432 - tim1 |= (val - 1) << T_RP_SHIFT; 433 - 434 - return tim1; 435 - } 436 - 437 - static u32 get_sdram_tim_2_shdw(const struct lpddr2_timings *timings, 438 - const struct lpddr2_min_tck *min_tck, 439 - const struct lpddr2_addressing *addressing, 440 - u32 type) 441 - { 442 - u32 tim2 = 0, val = 0; 443 - 444 - val = min_tck->tCKE - 1; 445 - tim2 |= val << T_CKE_SHIFT; 446 - 447 - val = max(min_tck->tRTP, DIV_ROUND_UP(timings->tRTP, t_ck)) - 1; 448 - tim2 |= val << T_RTP_SHIFT; 449 - 450 - /* tXSNR = tRFCab_ps + 10 ns(tRFCab_ps for LPDDR2). */ 451 - val = DIV_ROUND_UP(addressing->tRFCab_ps + 10000, t_ck) - 1; 452 - tim2 |= val << T_XSNR_SHIFT; 453 - 454 - /* XSRD same as XSNR for LPDDR2 */ 455 - tim2 |= val << T_XSRD_SHIFT; 456 - 457 - val = max(min_tck->tXP, DIV_ROUND_UP(timings->tXP, t_ck)) - 1; 458 - tim2 |= val << T_XP_SHIFT; 459 - 460 - return tim2; 461 - } 462 - 463 - static u32 get_sdram_tim_3_shdw(const struct lpddr2_timings *timings, 464 - const struct lpddr2_min_tck *min_tck, 465 - const struct lpddr2_addressing *addressing, 466 - u32 type, u32 ip_rev, u32 derated) 467 - { 468 - u32 tim3 = 0, val = 0, t_dqsck; 469 - 470 - val = timings->tRAS_max_ns / addressing->tREFI_ns - 1; 471 - val = val > 0xF ? 0xF : val; 472 - tim3 |= val << T_RAS_MAX_SHIFT; 473 - 474 - val = DIV_ROUND_UP(addressing->tRFCab_ps, t_ck) - 1; 475 - tim3 |= val << T_RFC_SHIFT; 476 - 477 - t_dqsck = (derated == EMIF_DERATED_TIMINGS) ? 478 - timings->tDQSCK_max_derated : timings->tDQSCK_max; 479 - if (ip_rev == EMIF_4D5) 480 - val = DIV_ROUND_UP(t_dqsck + 1000, t_ck) - 1; 481 - else 482 - val = DIV_ROUND_UP(t_dqsck, t_ck) - 1; 483 - 484 - tim3 |= val << T_TDQSCKMAX_SHIFT; 485 - 486 - val = DIV_ROUND_UP(timings->tZQCS, t_ck) - 1; 487 - tim3 |= val << ZQ_ZQCS_SHIFT; 488 - 489 - val = DIV_ROUND_UP(timings->tCKESR, t_ck); 490 - val = max(min_tck->tCKESR, val) - 1; 491 - tim3 |= val << T_CKESR_SHIFT; 492 - 493 - if (ip_rev == EMIF_4D5) { 494 - tim3 |= (EMIF_T_CSTA - 1) << T_CSTA_SHIFT; 495 - 496 - val = DIV_ROUND_UP(EMIF_T_PDLL_UL, 128) - 1; 497 - tim3 |= val << T_PDLL_UL_SHIFT; 498 - } 499 - 500 - return tim3; 501 - } 502 - 503 331 static u32 get_zq_config_reg(const struct lpddr2_addressing *addressing, 504 332 bool cs1_used, bool cal_resistors_per_cs) 505 333 { ··· 365 587 alert |= (cs1_used ? 1 : 0) << TA_CS1EN_SHIFT; 366 588 367 589 return alert; 368 - } 369 - 370 - static u32 get_read_idle_ctrl_shdw(u8 volt_ramp) 371 - { 372 - u32 idle = 0, val = 0; 373 - 374 - /* 375 - * Maximum value in normal conditions and increased frequency 376 - * when voltage is ramping 377 - */ 378 - if (volt_ramp) 379 - val = READ_IDLE_INTERVAL_DVFS / t_ck / 64 - 1; 380 - else 381 - val = 0x1FF; 382 - 383 - /* 384 - * READ_IDLE_CTRL register in EMIF4D has same offset and fields 385 - * as DLL_CALIB_CTRL in EMIF4D5, so use the same shifts 386 - */ 387 - idle |= val << DLL_CALIB_INTERVAL_SHIFT; 388 - idle |= EMIF_READ_IDLE_LEN_VAL << ACK_WAIT_SHIFT; 389 - 390 - return idle; 391 - } 392 - 393 - static u32 get_dll_calib_ctrl_shdw(u8 volt_ramp) 394 - { 395 - u32 calib = 0, val = 0; 396 - 397 - if (volt_ramp == DDR_VOLTAGE_RAMPING) 398 - val = DLL_CALIB_INTERVAL_DVFS / t_ck / 16 - 1; 399 - else 400 - val = 0; /* Disabled when voltage is stable */ 401 - 402 - calib |= val << DLL_CALIB_INTERVAL_SHIFT; 403 - calib |= DLL_CALIB_ACK_WAIT_VAL << ACK_WAIT_SHIFT; 404 - 405 - return calib; 406 - } 407 - 408 - static u32 get_ddr_phy_ctrl_1_attilaphy_4d(const struct lpddr2_timings *timings, 409 - u32 freq, u8 RL) 410 - { 411 - u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_ATTILAPHY, val = 0; 412 - 413 - val = RL + DIV_ROUND_UP(timings->tDQSCK_max, t_ck) - 1; 414 - phy |= val << READ_LATENCY_SHIFT_4D; 415 - 416 - if (freq <= 100000000) 417 - val = EMIF_DLL_SLAVE_DLY_CTRL_100_MHZ_AND_LESS_ATTILAPHY; 418 - else if (freq <= 200000000) 419 - val = EMIF_DLL_SLAVE_DLY_CTRL_200_MHZ_ATTILAPHY; 420 - else 421 - val = EMIF_DLL_SLAVE_DLY_CTRL_400_MHZ_ATTILAPHY; 422 - 423 - phy |= val << DLL_SLAVE_DLY_CTRL_SHIFT_4D; 424 - 425 - return phy; 426 - } 427 - 428 - static u32 get_phy_ctrl_1_intelliphy_4d5(u32 freq, u8 cl) 429 - { 430 - u32 phy = EMIF_DDR_PHY_CTRL_1_BASE_VAL_INTELLIPHY, half_delay; 431 - 432 - /* 433 - * DLL operates at 266 MHz. If DDR frequency is near 266 MHz, 434 - * half-delay is not needed else set half-delay 435 - */ 436 - if (freq >= 265000000 && freq < 267000000) 437 - half_delay = 0; 438 - else 439 - half_delay = 1; 440 - 441 - phy |= half_delay << DLL_HALF_DELAY_SHIFT_4D5; 442 - phy |= ((cl + DIV_ROUND_UP(EMIF_PHY_TOTAL_READ_LATENCY_INTELLIPHY_PS, 443 - t_ck) - 1) << READ_LATENCY_SHIFT_4D5); 444 - 445 - return phy; 446 - } 447 - 448 - static u32 get_ext_phy_ctrl_2_intelliphy_4d5(void) 449 - { 450 - u32 fifo_we_slave_ratio; 451 - 452 - fifo_we_slave_ratio = DIV_ROUND_CLOSEST( 453 - EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck); 454 - 455 - return fifo_we_slave_ratio | fifo_we_slave_ratio << 11 | 456 - fifo_we_slave_ratio << 22; 457 - } 458 - 459 - static u32 get_ext_phy_ctrl_3_intelliphy_4d5(void) 460 - { 461 - u32 fifo_we_slave_ratio; 462 - 463 - fifo_we_slave_ratio = DIV_ROUND_CLOSEST( 464 - EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck); 465 - 466 - return fifo_we_slave_ratio >> 10 | fifo_we_slave_ratio << 1 | 467 - fifo_we_slave_ratio << 12 | fifo_we_slave_ratio << 23; 468 - } 469 - 470 - static u32 get_ext_phy_ctrl_4_intelliphy_4d5(void) 471 - { 472 - u32 fifo_we_slave_ratio; 473 - 474 - fifo_we_slave_ratio = DIV_ROUND_CLOSEST( 475 - EMIF_INTELLI_PHY_DQS_GATE_OPENING_DELAY_PS * 256, t_ck); 476 - 477 - return fifo_we_slave_ratio >> 9 | fifo_we_slave_ratio << 2 | 478 - fifo_we_slave_ratio << 13; 479 590 } 480 591 481 592 static u32 get_pwr_mgmt_ctrl(u32 freq, struct emif_data *emif, u32 ip_rev) ··· 486 819 /* if we get reserved value in MR4 persist with the existing value */ 487 820 if (likely(temperature_level != SDRAM_TEMP_RESERVED_4)) 488 821 emif->temperature_level = temperature_level; 489 - } 490 - 491 - /* 492 - * Program EMIF shadow registers that are not dependent on temperature 493 - * or voltage 494 - */ 495 - static void setup_registers(struct emif_data *emif, struct emif_regs *regs) 496 - { 497 - void __iomem *base = emif->base; 498 - 499 - writel(regs->sdram_tim2_shdw, base + EMIF_SDRAM_TIMING_2_SHDW); 500 - writel(regs->phy_ctrl_1_shdw, base + EMIF_DDR_PHY_CTRL_1_SHDW); 501 - writel(regs->pwr_mgmt_ctrl_shdw, 502 - base + EMIF_POWER_MANAGEMENT_CTRL_SHDW); 503 - 504 - /* Settings specific for EMIF4D5 */ 505 - if (emif->plat_data->ip_rev != EMIF_4D5) 506 - return; 507 - writel(regs->ext_phy_ctrl_2_shdw, base + EMIF_EXT_PHY_CTRL_2_SHDW); 508 - writel(regs->ext_phy_ctrl_3_shdw, base + EMIF_EXT_PHY_CTRL_3_SHDW); 509 - writel(regs->ext_phy_ctrl_4_shdw, base + EMIF_EXT_PHY_CTRL_4_SHDW); 510 - } 511 - 512 - /* 513 - * When voltage ramps dll calibration and forced read idle should 514 - * happen more often 515 - */ 516 - static void setup_volt_sensitive_regs(struct emif_data *emif, 517 - struct emif_regs *regs, u32 volt_state) 518 - { 519 - u32 calib_ctrl; 520 - void __iomem *base = emif->base; 521 - 522 - /* 523 - * EMIF_READ_IDLE_CTRL in EMIF4D refers to the same register as 524 - * EMIF_DLL_CALIB_CTRL in EMIF4D5 and dll_calib_ctrl_shadow_* 525 - * is an alias of the respective read_idle_ctrl_shdw_* (members of 526 - * a union). So, the below code takes care of both cases 527 - */ 528 - if (volt_state == DDR_VOLTAGE_RAMPING) 529 - calib_ctrl = regs->dll_calib_ctrl_shdw_volt_ramp; 530 - else 531 - calib_ctrl = regs->dll_calib_ctrl_shdw_normal; 532 - 533 - writel(calib_ctrl, base + EMIF_DLL_CALIB_CTRL_SHDW); 534 822 } 535 823 536 824 /* ··· 1130 1508 } 1131 1509 1132 1510 list_add(&emif->node, &device_list); 1133 - emif->addressing = get_addressing_table(emif->plat_data->device_info); 1134 1511 1135 1512 /* Save pointers to each other in emif and device structures */ 1136 1513 emif->dev = &pdev->dev; ··· 1182 1561 struct emif_data *emif = platform_get_drvdata(pdev); 1183 1562 1184 1563 disable_and_clear_all_interrupts(emif); 1185 - } 1186 - 1187 - static int get_emif_reg_values(struct emif_data *emif, u32 freq, 1188 - struct emif_regs *regs) 1189 - { 1190 - u32 ip_rev, phy_type; 1191 - u32 cl, type; 1192 - const struct lpddr2_timings *timings; 1193 - const struct lpddr2_min_tck *min_tck; 1194 - const struct ddr_device_info *device_info; 1195 - const struct lpddr2_addressing *addressing; 1196 - struct emif_data *emif_for_calc; 1197 - struct device *dev; 1198 - 1199 - dev = emif->dev; 1200 - /* 1201 - * If the devices on this EMIF instance is duplicate of EMIF1, 1202 - * use EMIF1 details for the calculation 1203 - */ 1204 - emif_for_calc = emif->duplicate ? emif1 : emif; 1205 - timings = get_timings_table(emif_for_calc, freq); 1206 - addressing = emif_for_calc->addressing; 1207 - if (!timings || !addressing) { 1208 - dev_err(dev, "%s: not enough data available for %dHz", 1209 - __func__, freq); 1210 - return -1; 1211 - } 1212 - 1213 - device_info = emif_for_calc->plat_data->device_info; 1214 - type = device_info->type; 1215 - ip_rev = emif_for_calc->plat_data->ip_rev; 1216 - phy_type = emif_for_calc->plat_data->phy_type; 1217 - 1218 - min_tck = emif_for_calc->plat_data->min_tck; 1219 - 1220 - set_ddr_clk_period(freq); 1221 - 1222 - regs->ref_ctrl_shdw = get_sdram_ref_ctrl_shdw(freq, addressing); 1223 - regs->sdram_tim1_shdw = get_sdram_tim_1_shdw(timings, min_tck, 1224 - addressing); 1225 - regs->sdram_tim2_shdw = get_sdram_tim_2_shdw(timings, min_tck, 1226 - addressing, type); 1227 - regs->sdram_tim3_shdw = get_sdram_tim_3_shdw(timings, min_tck, 1228 - addressing, type, ip_rev, EMIF_NORMAL_TIMINGS); 1229 - 1230 - cl = get_cl(emif); 1231 - 1232 - if (phy_type == EMIF_PHY_TYPE_ATTILAPHY && ip_rev == EMIF_4D) { 1233 - regs->phy_ctrl_1_shdw = get_ddr_phy_ctrl_1_attilaphy_4d( 1234 - timings, freq, cl); 1235 - } else if (phy_type == EMIF_PHY_TYPE_INTELLIPHY && ip_rev == EMIF_4D5) { 1236 - regs->phy_ctrl_1_shdw = get_phy_ctrl_1_intelliphy_4d5(freq, cl); 1237 - regs->ext_phy_ctrl_2_shdw = get_ext_phy_ctrl_2_intelliphy_4d5(); 1238 - regs->ext_phy_ctrl_3_shdw = get_ext_phy_ctrl_3_intelliphy_4d5(); 1239 - regs->ext_phy_ctrl_4_shdw = get_ext_phy_ctrl_4_intelliphy_4d5(); 1240 - } else { 1241 - return -1; 1242 - } 1243 - 1244 - /* Only timeout values in pwr_mgmt_ctrl_shdw register */ 1245 - regs->pwr_mgmt_ctrl_shdw = 1246 - get_pwr_mgmt_ctrl(freq, emif_for_calc, ip_rev) & 1247 - (CS_TIM_MASK | SR_TIM_MASK | PD_TIM_MASK); 1248 - 1249 - if (ip_rev & EMIF_4D) { 1250 - regs->read_idle_ctrl_shdw_normal = 1251 - get_read_idle_ctrl_shdw(DDR_VOLTAGE_STABLE); 1252 - 1253 - regs->read_idle_ctrl_shdw_volt_ramp = 1254 - get_read_idle_ctrl_shdw(DDR_VOLTAGE_RAMPING); 1255 - } else if (ip_rev & EMIF_4D5) { 1256 - regs->dll_calib_ctrl_shdw_normal = 1257 - get_dll_calib_ctrl_shdw(DDR_VOLTAGE_STABLE); 1258 - 1259 - regs->dll_calib_ctrl_shdw_volt_ramp = 1260 - get_dll_calib_ctrl_shdw(DDR_VOLTAGE_RAMPING); 1261 - } 1262 - 1263 - if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4) { 1264 - regs->ref_ctrl_shdw_derated = get_sdram_ref_ctrl_shdw(freq / 4, 1265 - addressing); 1266 - 1267 - regs->sdram_tim1_shdw_derated = 1268 - get_sdram_tim_1_shdw_derated(timings, min_tck, 1269 - addressing); 1270 - 1271 - regs->sdram_tim3_shdw_derated = get_sdram_tim_3_shdw(timings, 1272 - min_tck, addressing, type, ip_rev, 1273 - EMIF_DERATED_TIMINGS); 1274 - } 1275 - 1276 - regs->freq = freq; 1277 - 1278 - return 0; 1279 - } 1280 - 1281 - /* 1282 - * get_regs() - gets the cached emif_regs structure for a given EMIF instance 1283 - * given frequency(freq): 1284 - * 1285 - * As an optimisation, every EMIF instance other than EMIF1 shares the 1286 - * register cache with EMIF1 if the devices connected on this instance 1287 - * are same as that on EMIF1(indicated by the duplicate flag) 1288 - * 1289 - * If we do not have an entry corresponding to the frequency given, we 1290 - * allocate a new entry and calculate the values 1291 - * 1292 - * Upon finding the right reg dump, save it in curr_regs. It can be 1293 - * directly used for thermal de-rating and voltage ramping changes. 1294 - */ 1295 - static struct emif_regs *get_regs(struct emif_data *emif, u32 freq) 1296 - { 1297 - int i; 1298 - struct emif_regs **regs_cache; 1299 - struct emif_regs *regs = NULL; 1300 - struct device *dev; 1301 - 1302 - dev = emif->dev; 1303 - if (emif->curr_regs && emif->curr_regs->freq == freq) { 1304 - dev_dbg(dev, "%s: using curr_regs - %u Hz", __func__, freq); 1305 - return emif->curr_regs; 1306 - } 1307 - 1308 - if (emif->duplicate) 1309 - regs_cache = emif1->regs_cache; 1310 - else 1311 - regs_cache = emif->regs_cache; 1312 - 1313 - for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) { 1314 - if (regs_cache[i]->freq == freq) { 1315 - regs = regs_cache[i]; 1316 - dev_dbg(dev, 1317 - "%s: reg dump found in reg cache for %u Hz\n", 1318 - __func__, freq); 1319 - break; 1320 - } 1321 - } 1322 - 1323 - /* 1324 - * If we don't have an entry for this frequency in the cache create one 1325 - * and calculate the values 1326 - */ 1327 - if (!regs) { 1328 - regs = devm_kzalloc(emif->dev, sizeof(*regs), GFP_ATOMIC); 1329 - if (!regs) 1330 - return NULL; 1331 - 1332 - if (get_emif_reg_values(emif, freq, regs)) { 1333 - devm_kfree(emif->dev, regs); 1334 - return NULL; 1335 - } 1336 - 1337 - /* 1338 - * Now look for an un-used entry in the cache and save the 1339 - * newly created struct. If there are no free entries 1340 - * over-write the last entry 1341 - */ 1342 - for (i = 0; i < EMIF_MAX_NUM_FREQUENCIES && regs_cache[i]; i++) 1343 - ; 1344 - 1345 - if (i >= EMIF_MAX_NUM_FREQUENCIES) { 1346 - dev_warn(dev, "%s: regs_cache full - reusing a slot!!\n", 1347 - __func__); 1348 - i = EMIF_MAX_NUM_FREQUENCIES - 1; 1349 - devm_kfree(emif->dev, regs_cache[i]); 1350 - } 1351 - regs_cache[i] = regs; 1352 - } 1353 - 1354 - return regs; 1355 - } 1356 - 1357 - static void do_volt_notify_handling(struct emif_data *emif, u32 volt_state) 1358 - { 1359 - dev_dbg(emif->dev, "%s: voltage notification : %d", __func__, 1360 - volt_state); 1361 - 1362 - if (!emif->curr_regs) { 1363 - dev_err(emif->dev, 1364 - "%s: volt-notify before registers are ready: %d\n", 1365 - __func__, volt_state); 1366 - return; 1367 - } 1368 - 1369 - setup_volt_sensitive_regs(emif, emif->curr_regs, volt_state); 1370 - } 1371 - 1372 - /* 1373 - * TODO: voltage notify handling should be hooked up to 1374 - * regulator framework as soon as the necessary support 1375 - * is available in mainline kernel. This function is un-used 1376 - * right now. 1377 - */ 1378 - static void __attribute__((unused)) volt_notify_handling(u32 volt_state) 1379 - { 1380 - struct emif_data *emif; 1381 - 1382 - spin_lock_irqsave(&emif_lock, irq_state); 1383 - 1384 - list_for_each_entry(emif, &device_list, node) 1385 - do_volt_notify_handling(emif, volt_state); 1386 - do_freq_update(); 1387 - 1388 - spin_unlock_irqrestore(&emif_lock, irq_state); 1389 - } 1390 - 1391 - static void do_freq_pre_notify_handling(struct emif_data *emif, u32 new_freq) 1392 - { 1393 - struct emif_regs *regs; 1394 - 1395 - regs = get_regs(emif, new_freq); 1396 - if (!regs) 1397 - return; 1398 - 1399 - emif->curr_regs = regs; 1400 - 1401 - /* 1402 - * Update the shadow registers: 1403 - * Temperature and voltage-ramp sensitive settings are also configured 1404 - * in terms of DDR cycles. So, we need to update them too when there 1405 - * is a freq change 1406 - */ 1407 - dev_dbg(emif->dev, "%s: setting up shadow registers for %uHz", 1408 - __func__, new_freq); 1409 - setup_registers(emif, regs); 1410 - setup_temperature_sensitive_regs(emif, regs); 1411 - setup_volt_sensitive_regs(emif, regs, DDR_VOLTAGE_STABLE); 1412 - 1413 - /* 1414 - * Part of workaround for errata i728. See do_freq_update() 1415 - * for more details 1416 - */ 1417 - if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) 1418 - set_lpmode(emif, EMIF_LP_MODE_DISABLE); 1419 - } 1420 - 1421 - /* 1422 - * TODO: frequency notify handling should be hooked up to 1423 - * clock framework as soon as the necessary support is 1424 - * available in mainline kernel. This function is un-used 1425 - * right now. 1426 - */ 1427 - static void __attribute__((unused)) freq_pre_notify_handling(u32 new_freq) 1428 - { 1429 - struct emif_data *emif; 1430 - 1431 - /* 1432 - * NOTE: we are taking the spin-lock here and releases it 1433 - * only in post-notifier. This doesn't look good and 1434 - * Sparse complains about it, but this seems to be 1435 - * un-avoidable. We need to lock a sequence of events 1436 - * that is split between EMIF and clock framework. 1437 - * 1438 - * 1. EMIF driver updates EMIF timings in shadow registers in the 1439 - * frequency pre-notify callback from clock framework 1440 - * 2. clock framework sets up the registers for the new frequency 1441 - * 3. clock framework initiates a hw-sequence that updates 1442 - * the frequency EMIF timings synchronously. 1443 - * 1444 - * All these 3 steps should be performed as an atomic operation 1445 - * vis-a-vis similar sequence in the EMIF interrupt handler 1446 - * for temperature events. Otherwise, there could be race 1447 - * conditions that could result in incorrect EMIF timings for 1448 - * a given frequency 1449 - */ 1450 - spin_lock_irqsave(&emif_lock, irq_state); 1451 - 1452 - list_for_each_entry(emif, &device_list, node) 1453 - do_freq_pre_notify_handling(emif, new_freq); 1454 - } 1455 - 1456 - static void do_freq_post_notify_handling(struct emif_data *emif) 1457 - { 1458 - /* 1459 - * Part of workaround for errata i728. See do_freq_update() 1460 - * for more details 1461 - */ 1462 - if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH) 1463 - set_lpmode(emif, EMIF_LP_MODE_SELF_REFRESH); 1464 - } 1465 - 1466 - /* 1467 - * TODO: frequency notify handling should be hooked up to 1468 - * clock framework as soon as the necessary support is 1469 - * available in mainline kernel. This function is un-used 1470 - * right now. 1471 - */ 1472 - static void __attribute__((unused)) freq_post_notify_handling(void) 1473 - { 1474 - struct emif_data *emif; 1475 - 1476 - list_for_each_entry(emif, &device_list, node) 1477 - do_freq_post_notify_handling(emif); 1478 - 1479 - /* 1480 - * Lock is done in pre-notify handler. See freq_pre_notify_handling() 1481 - * for more details 1482 - */ 1483 - spin_unlock_irqrestore(&emif_lock, irq_state); 1484 1564 } 1485 1565 1486 1566 #if defined(CONFIG_OF)