Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/i915: split out intel_dram.[ch] from i915_drv.c

The DRAM related routines are pretty isolated from the rest of the
i915_drv.c, split it out to a separate file. Put the eDRAM stuff in the
same bag, and rename the visible functions to have intel_dram_
prefix. Do some benign whitespace fixes and dev_priv -> i915 conversions
while at it.

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200225111509.21879-1-jani.nikula@intel.com

+505 -490
+1
drivers/gpu/drm/i915/Makefile
··· 47 47 i915_sysfs.o \ 48 48 i915_utils.o \ 49 49 intel_device_info.o \ 50 + intel_dram.o \ 50 51 intel_memory_region.o \ 51 52 intel_pch.o \ 52 53 intel_pm.o \
+3 -490
drivers/gpu/drm/i915/i915_drv.c
··· 79 79 #include "i915_sysfs.h" 80 80 #include "i915_trace.h" 81 81 #include "i915_vgpu.h" 82 + #include "intel_dram.h" 82 83 #include "intel_memory_region.h" 83 84 #include "intel_pm.h" 84 85 #include "vlv_suspend.h" ··· 564 563 intel_gvt_sanitize_options(dev_priv); 565 564 } 566 565 567 - #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type 568 - 569 - static const char *intel_dram_type_str(enum intel_dram_type type) 570 - { 571 - static const char * const str[] = { 572 - DRAM_TYPE_STR(UNKNOWN), 573 - DRAM_TYPE_STR(DDR3), 574 - DRAM_TYPE_STR(DDR4), 575 - DRAM_TYPE_STR(LPDDR3), 576 - DRAM_TYPE_STR(LPDDR4), 577 - }; 578 - 579 - if (type >= ARRAY_SIZE(str)) 580 - type = INTEL_DRAM_UNKNOWN; 581 - 582 - return str[type]; 583 - } 584 - 585 - #undef DRAM_TYPE_STR 586 - 587 - static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) 588 - { 589 - return dimm->ranks * 64 / (dimm->width ?: 1); 590 - } 591 - 592 - /* Returns total GB for the whole DIMM */ 593 - static int skl_get_dimm_size(u16 val) 594 - { 595 - return val & SKL_DRAM_SIZE_MASK; 596 - } 597 - 598 - static int skl_get_dimm_width(u16 val) 599 - { 600 - if (skl_get_dimm_size(val) == 0) 601 - return 0; 602 - 603 - switch (val & SKL_DRAM_WIDTH_MASK) { 604 - case SKL_DRAM_WIDTH_X8: 605 - case SKL_DRAM_WIDTH_X16: 606 - case SKL_DRAM_WIDTH_X32: 607 - val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; 608 - return 8 << val; 609 - default: 610 - MISSING_CASE(val); 611 - return 0; 612 - } 613 - } 614 - 615 - static int skl_get_dimm_ranks(u16 val) 616 - { 617 - if (skl_get_dimm_size(val) == 0) 618 - return 0; 619 - 620 - val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; 621 - 622 - return val + 1; 623 - } 624 - 625 - /* Returns total GB for the whole DIMM */ 626 - static int cnl_get_dimm_size(u16 val) 627 - { 628 - return (val & CNL_DRAM_SIZE_MASK) / 2; 629 - } 630 - 631 - static int cnl_get_dimm_width(u16 val) 632 - { 633 - if (cnl_get_dimm_size(val) == 0) 634 - return 0; 635 - 636 - switch (val & CNL_DRAM_WIDTH_MASK) { 637 - case CNL_DRAM_WIDTH_X8: 638 - case CNL_DRAM_WIDTH_X16: 639 - case CNL_DRAM_WIDTH_X32: 640 - val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; 641 - return 8 << val; 642 - default: 643 - MISSING_CASE(val); 644 - return 0; 645 - } 646 - } 647 - 648 - static int cnl_get_dimm_ranks(u16 val) 649 - { 650 - if (cnl_get_dimm_size(val) == 0) 651 - return 0; 652 - 653 - val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; 654 - 655 - return val + 1; 656 - } 657 - 658 - static bool 659 - skl_is_16gb_dimm(const struct dram_dimm_info *dimm) 660 - { 661 - /* Convert total GB to Gb per DRAM device */ 662 - return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; 663 - } 664 - 665 - static void 666 - skl_dram_get_dimm_info(struct drm_i915_private *dev_priv, 667 - struct dram_dimm_info *dimm, 668 - int channel, char dimm_name, u16 val) 669 - { 670 - if (INTEL_GEN(dev_priv) >= 10) { 671 - dimm->size = cnl_get_dimm_size(val); 672 - dimm->width = cnl_get_dimm_width(val); 673 - dimm->ranks = cnl_get_dimm_ranks(val); 674 - } else { 675 - dimm->size = skl_get_dimm_size(val); 676 - dimm->width = skl_get_dimm_width(val); 677 - dimm->ranks = skl_get_dimm_ranks(val); 678 - } 679 - 680 - drm_dbg_kms(&dev_priv->drm, 681 - "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", 682 - channel, dimm_name, dimm->size, dimm->width, dimm->ranks, 683 - yesno(skl_is_16gb_dimm(dimm))); 684 - } 685 - 686 - static int 687 - skl_dram_get_channel_info(struct drm_i915_private *dev_priv, 688 - struct dram_channel_info *ch, 689 - int channel, u32 val) 690 - { 691 - skl_dram_get_dimm_info(dev_priv, &ch->dimm_l, 692 - channel, 'L', val & 0xffff); 693 - skl_dram_get_dimm_info(dev_priv, &ch->dimm_s, 694 - channel, 'S', val >> 16); 695 - 696 - if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { 697 - drm_dbg_kms(&dev_priv->drm, "CH%u not populated\n", channel); 698 - return -EINVAL; 699 - } 700 - 701 - if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) 702 - ch->ranks = 2; 703 - else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) 704 - ch->ranks = 2; 705 - else 706 - ch->ranks = 1; 707 - 708 - ch->is_16gb_dimm = 709 - skl_is_16gb_dimm(&ch->dimm_l) || 710 - skl_is_16gb_dimm(&ch->dimm_s); 711 - 712 - drm_dbg_kms(&dev_priv->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n", 713 - channel, ch->ranks, yesno(ch->is_16gb_dimm)); 714 - 715 - return 0; 716 - } 717 - 718 - static bool 719 - intel_is_dram_symmetric(const struct dram_channel_info *ch0, 720 - const struct dram_channel_info *ch1) 721 - { 722 - return !memcmp(ch0, ch1, sizeof(*ch0)) && 723 - (ch0->dimm_s.size == 0 || 724 - !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); 725 - } 726 - 727 - static int 728 - skl_dram_get_channels_info(struct drm_i915_private *dev_priv) 729 - { 730 - struct dram_info *dram_info = &dev_priv->dram_info; 731 - struct dram_channel_info ch0 = {}, ch1 = {}; 732 - u32 val; 733 - int ret; 734 - 735 - val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); 736 - ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val); 737 - if (ret == 0) 738 - dram_info->num_channels++; 739 - 740 - val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); 741 - ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val); 742 - if (ret == 0) 743 - dram_info->num_channels++; 744 - 745 - if (dram_info->num_channels == 0) { 746 - drm_info(&dev_priv->drm, 747 - "Number of memory channels is zero\n"); 748 - return -EINVAL; 749 - } 750 - 751 - /* 752 - * If any of the channel is single rank channel, worst case output 753 - * will be same as if single rank memory, so consider single rank 754 - * memory. 755 - */ 756 - if (ch0.ranks == 1 || ch1.ranks == 1) 757 - dram_info->ranks = 1; 758 - else 759 - dram_info->ranks = max(ch0.ranks, ch1.ranks); 760 - 761 - if (dram_info->ranks == 0) { 762 - drm_info(&dev_priv->drm, 763 - "couldn't get memory rank information\n"); 764 - return -EINVAL; 765 - } 766 - 767 - dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; 768 - 769 - dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); 770 - 771 - drm_dbg_kms(&dev_priv->drm, "Memory configuration is symmetric? %s\n", 772 - yesno(dram_info->symmetric_memory)); 773 - return 0; 774 - } 775 - 776 - static enum intel_dram_type 777 - skl_get_dram_type(struct drm_i915_private *dev_priv) 778 - { 779 - u32 val; 780 - 781 - val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); 782 - 783 - switch (val & SKL_DRAM_DDR_TYPE_MASK) { 784 - case SKL_DRAM_DDR_TYPE_DDR3: 785 - return INTEL_DRAM_DDR3; 786 - case SKL_DRAM_DDR_TYPE_DDR4: 787 - return INTEL_DRAM_DDR4; 788 - case SKL_DRAM_DDR_TYPE_LPDDR3: 789 - return INTEL_DRAM_LPDDR3; 790 - case SKL_DRAM_DDR_TYPE_LPDDR4: 791 - return INTEL_DRAM_LPDDR4; 792 - default: 793 - MISSING_CASE(val); 794 - return INTEL_DRAM_UNKNOWN; 795 - } 796 - } 797 - 798 - static int 799 - skl_get_dram_info(struct drm_i915_private *dev_priv) 800 - { 801 - struct dram_info *dram_info = &dev_priv->dram_info; 802 - u32 mem_freq_khz, val; 803 - int ret; 804 - 805 - dram_info->type = skl_get_dram_type(dev_priv); 806 - drm_dbg_kms(&dev_priv->drm, "DRAM type: %s\n", 807 - intel_dram_type_str(dram_info->type)); 808 - 809 - ret = skl_dram_get_channels_info(dev_priv); 810 - if (ret) 811 - return ret; 812 - 813 - val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); 814 - mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) * 815 - SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000); 816 - 817 - dram_info->bandwidth_kbps = dram_info->num_channels * 818 - mem_freq_khz * 8; 819 - 820 - if (dram_info->bandwidth_kbps == 0) { 821 - drm_info(&dev_priv->drm, 822 - "Couldn't get system memory bandwidth\n"); 823 - return -EINVAL; 824 - } 825 - 826 - dram_info->valid = true; 827 - return 0; 828 - } 829 - 830 - /* Returns Gb per DRAM device */ 831 - static int bxt_get_dimm_size(u32 val) 832 - { 833 - switch (val & BXT_DRAM_SIZE_MASK) { 834 - case BXT_DRAM_SIZE_4GBIT: 835 - return 4; 836 - case BXT_DRAM_SIZE_6GBIT: 837 - return 6; 838 - case BXT_DRAM_SIZE_8GBIT: 839 - return 8; 840 - case BXT_DRAM_SIZE_12GBIT: 841 - return 12; 842 - case BXT_DRAM_SIZE_16GBIT: 843 - return 16; 844 - default: 845 - MISSING_CASE(val); 846 - return 0; 847 - } 848 - } 849 - 850 - static int bxt_get_dimm_width(u32 val) 851 - { 852 - if (!bxt_get_dimm_size(val)) 853 - return 0; 854 - 855 - val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; 856 - 857 - return 8 << val; 858 - } 859 - 860 - static int bxt_get_dimm_ranks(u32 val) 861 - { 862 - if (!bxt_get_dimm_size(val)) 863 - return 0; 864 - 865 - switch (val & BXT_DRAM_RANK_MASK) { 866 - case BXT_DRAM_RANK_SINGLE: 867 - return 1; 868 - case BXT_DRAM_RANK_DUAL: 869 - return 2; 870 - default: 871 - MISSING_CASE(val); 872 - return 0; 873 - } 874 - } 875 - 876 - static enum intel_dram_type bxt_get_dimm_type(u32 val) 877 - { 878 - if (!bxt_get_dimm_size(val)) 879 - return INTEL_DRAM_UNKNOWN; 880 - 881 - switch (val & BXT_DRAM_TYPE_MASK) { 882 - case BXT_DRAM_TYPE_DDR3: 883 - return INTEL_DRAM_DDR3; 884 - case BXT_DRAM_TYPE_LPDDR3: 885 - return INTEL_DRAM_LPDDR3; 886 - case BXT_DRAM_TYPE_DDR4: 887 - return INTEL_DRAM_DDR4; 888 - case BXT_DRAM_TYPE_LPDDR4: 889 - return INTEL_DRAM_LPDDR4; 890 - default: 891 - MISSING_CASE(val); 892 - return INTEL_DRAM_UNKNOWN; 893 - } 894 - } 895 - 896 - static void bxt_get_dimm_info(struct dram_dimm_info *dimm, 897 - u32 val) 898 - { 899 - dimm->width = bxt_get_dimm_width(val); 900 - dimm->ranks = bxt_get_dimm_ranks(val); 901 - 902 - /* 903 - * Size in register is Gb per DRAM device. Convert to total 904 - * GB to match the way we report this for non-LP platforms. 905 - */ 906 - dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; 907 - } 908 - 909 - static int 910 - bxt_get_dram_info(struct drm_i915_private *dev_priv) 911 - { 912 - struct dram_info *dram_info = &dev_priv->dram_info; 913 - u32 dram_channels; 914 - u32 mem_freq_khz, val; 915 - u8 num_active_channels; 916 - int i; 917 - 918 - val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0); 919 - mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) * 920 - BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000); 921 - 922 - dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK; 923 - num_active_channels = hweight32(dram_channels); 924 - 925 - /* Each active bit represents 4-byte channel */ 926 - dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4); 927 - 928 - if (dram_info->bandwidth_kbps == 0) { 929 - drm_info(&dev_priv->drm, 930 - "Couldn't get system memory bandwidth\n"); 931 - return -EINVAL; 932 - } 933 - 934 - /* 935 - * Now read each DUNIT8/9/10/11 to check the rank of each dimms. 936 - */ 937 - for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { 938 - struct dram_dimm_info dimm; 939 - enum intel_dram_type type; 940 - 941 - val = I915_READ(BXT_D_CR_DRP0_DUNIT(i)); 942 - if (val == 0xFFFFFFFF) 943 - continue; 944 - 945 - dram_info->num_channels++; 946 - 947 - bxt_get_dimm_info(&dimm, val); 948 - type = bxt_get_dimm_type(val); 949 - 950 - drm_WARN_ON(&dev_priv->drm, type != INTEL_DRAM_UNKNOWN && 951 - dram_info->type != INTEL_DRAM_UNKNOWN && 952 - dram_info->type != type); 953 - 954 - drm_dbg_kms(&dev_priv->drm, 955 - "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", 956 - i - BXT_D_CR_DRP0_DUNIT_START, 957 - dimm.size, dimm.width, dimm.ranks, 958 - intel_dram_type_str(type)); 959 - 960 - /* 961 - * If any of the channel is single rank channel, 962 - * worst case output will be same as if single rank 963 - * memory, so consider single rank memory. 964 - */ 965 - if (dram_info->ranks == 0) 966 - dram_info->ranks = dimm.ranks; 967 - else if (dimm.ranks == 1) 968 - dram_info->ranks = 1; 969 - 970 - if (type != INTEL_DRAM_UNKNOWN) 971 - dram_info->type = type; 972 - } 973 - 974 - if (dram_info->type == INTEL_DRAM_UNKNOWN || 975 - dram_info->ranks == 0) { 976 - drm_info(&dev_priv->drm, "couldn't get memory information\n"); 977 - return -EINVAL; 978 - } 979 - 980 - dram_info->valid = true; 981 - return 0; 982 - } 983 - 984 - static void 985 - intel_get_dram_info(struct drm_i915_private *dev_priv) 986 - { 987 - struct dram_info *dram_info = &dev_priv->dram_info; 988 - int ret; 989 - 990 - /* 991 - * Assume 16Gb DIMMs are present until proven otherwise. 992 - * This is only used for the level 0 watermark latency 993 - * w/a which does not apply to bxt/glk. 994 - */ 995 - dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); 996 - 997 - if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv)) 998 - return; 999 - 1000 - if (IS_GEN9_LP(dev_priv)) 1001 - ret = bxt_get_dram_info(dev_priv); 1002 - else 1003 - ret = skl_get_dram_info(dev_priv); 1004 - if (ret) 1005 - return; 1006 - 1007 - drm_dbg_kms(&dev_priv->drm, "DRAM bandwidth: %u kBps, channels: %u\n", 1008 - dram_info->bandwidth_kbps, 1009 - dram_info->num_channels); 1010 - 1011 - drm_dbg_kms(&dev_priv->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n", 1012 - dram_info->ranks, yesno(dram_info->is_16gb_dimm)); 1013 - } 1014 - 1015 - static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap) 1016 - { 1017 - static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 1018 - static const u8 sets[4] = { 1, 1, 2, 2 }; 1019 - 1020 - return EDRAM_NUM_BANKS(cap) * 1021 - ways[EDRAM_WAYS_IDX(cap)] * 1022 - sets[EDRAM_SETS_IDX(cap)]; 1023 - } 1024 - 1025 - static void edram_detect(struct drm_i915_private *dev_priv) 1026 - { 1027 - u32 edram_cap = 0; 1028 - 1029 - if (!(IS_HASWELL(dev_priv) || 1030 - IS_BROADWELL(dev_priv) || 1031 - INTEL_GEN(dev_priv) >= 9)) 1032 - return; 1033 - 1034 - edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP); 1035 - 1036 - /* NB: We can't write IDICR yet because we don't have gt funcs set up */ 1037 - 1038 - if (!(edram_cap & EDRAM_ENABLED)) 1039 - return; 1040 - 1041 - /* 1042 - * The needed capability bits for size calculation are not there with 1043 - * pre gen9 so return 128MB always. 1044 - */ 1045 - if (INTEL_GEN(dev_priv) < 9) 1046 - dev_priv->edram_size_mb = 128; 1047 - else 1048 - dev_priv->edram_size_mb = 1049 - gen9_edram_size_mb(dev_priv, edram_cap); 1050 - 1051 - dev_info(dev_priv->drm.dev, 1052 - "Found %uMB of eDRAM\n", dev_priv->edram_size_mb); 1053 - } 1054 - 1055 566 /** 1056 567 * i915_driver_hw_probe - setup state requiring device access 1057 568 * @dev_priv: device private ··· 607 1094 intel_sanitize_options(dev_priv); 608 1095 609 1096 /* needs to be done before ggtt probe */ 610 - edram_detect(dev_priv); 1097 + intel_dram_edram_detect(dev_priv); 611 1098 612 1099 i915_perf_init(dev_priv); 613 1100 ··· 709 1196 * Fill the dram structure to get the system raw bandwidth and 710 1197 * dram info. This will be used for memory latency calculation. 711 1198 */ 712 - intel_get_dram_info(dev_priv); 1199 + intel_dram_detect(dev_priv); 713 1200 714 1201 intel_bw_init_hw(dev_priv); 715 1202
+487
drivers/gpu/drm/i915/intel_dram.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "intel_dram.h" 8 + 9 + #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type 10 + 11 + static const char *intel_dram_type_str(enum intel_dram_type type) 12 + { 13 + static const char * const str[] = { 14 + DRAM_TYPE_STR(UNKNOWN), 15 + DRAM_TYPE_STR(DDR3), 16 + DRAM_TYPE_STR(DDR4), 17 + DRAM_TYPE_STR(LPDDR3), 18 + DRAM_TYPE_STR(LPDDR4), 19 + }; 20 + 21 + if (type >= ARRAY_SIZE(str)) 22 + type = INTEL_DRAM_UNKNOWN; 23 + 24 + return str[type]; 25 + } 26 + 27 + #undef DRAM_TYPE_STR 28 + 29 + static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) 30 + { 31 + return dimm->ranks * 64 / (dimm->width ?: 1); 32 + } 33 + 34 + /* Returns total GB for the whole DIMM */ 35 + static int skl_get_dimm_size(u16 val) 36 + { 37 + return val & SKL_DRAM_SIZE_MASK; 38 + } 39 + 40 + static int skl_get_dimm_width(u16 val) 41 + { 42 + if (skl_get_dimm_size(val) == 0) 43 + return 0; 44 + 45 + switch (val & SKL_DRAM_WIDTH_MASK) { 46 + case SKL_DRAM_WIDTH_X8: 47 + case SKL_DRAM_WIDTH_X16: 48 + case SKL_DRAM_WIDTH_X32: 49 + val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; 50 + return 8 << val; 51 + default: 52 + MISSING_CASE(val); 53 + return 0; 54 + } 55 + } 56 + 57 + static int skl_get_dimm_ranks(u16 val) 58 + { 59 + if (skl_get_dimm_size(val) == 0) 60 + return 0; 61 + 62 + val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; 63 + 64 + return val + 1; 65 + } 66 + 67 + /* Returns total GB for the whole DIMM */ 68 + static int cnl_get_dimm_size(u16 val) 69 + { 70 + return (val & CNL_DRAM_SIZE_MASK) / 2; 71 + } 72 + 73 + static int cnl_get_dimm_width(u16 val) 74 + { 75 + if (cnl_get_dimm_size(val) == 0) 76 + return 0; 77 + 78 + switch (val & CNL_DRAM_WIDTH_MASK) { 79 + case CNL_DRAM_WIDTH_X8: 80 + case CNL_DRAM_WIDTH_X16: 81 + case CNL_DRAM_WIDTH_X32: 82 + val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; 83 + return 8 << val; 84 + default: 85 + MISSING_CASE(val); 86 + return 0; 87 + } 88 + } 89 + 90 + static int cnl_get_dimm_ranks(u16 val) 91 + { 92 + if (cnl_get_dimm_size(val) == 0) 93 + return 0; 94 + 95 + val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; 96 + 97 + return val + 1; 98 + } 99 + 100 + static bool 101 + skl_is_16gb_dimm(const struct dram_dimm_info *dimm) 102 + { 103 + /* Convert total GB to Gb per DRAM device */ 104 + return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; 105 + } 106 + 107 + static void 108 + skl_dram_get_dimm_info(struct drm_i915_private *i915, 109 + struct dram_dimm_info *dimm, 110 + int channel, char dimm_name, u16 val) 111 + { 112 + if (INTEL_GEN(i915) >= 10) { 113 + dimm->size = cnl_get_dimm_size(val); 114 + dimm->width = cnl_get_dimm_width(val); 115 + dimm->ranks = cnl_get_dimm_ranks(val); 116 + } else { 117 + dimm->size = skl_get_dimm_size(val); 118 + dimm->width = skl_get_dimm_width(val); 119 + dimm->ranks = skl_get_dimm_ranks(val); 120 + } 121 + 122 + drm_dbg_kms(&i915->drm, 123 + "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", 124 + channel, dimm_name, dimm->size, dimm->width, dimm->ranks, 125 + yesno(skl_is_16gb_dimm(dimm))); 126 + } 127 + 128 + static int 129 + skl_dram_get_channel_info(struct drm_i915_private *i915, 130 + struct dram_channel_info *ch, 131 + int channel, u32 val) 132 + { 133 + skl_dram_get_dimm_info(i915, &ch->dimm_l, 134 + channel, 'L', val & 0xffff); 135 + skl_dram_get_dimm_info(i915, &ch->dimm_s, 136 + channel, 'S', val >> 16); 137 + 138 + if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { 139 + drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel); 140 + return -EINVAL; 141 + } 142 + 143 + if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) 144 + ch->ranks = 2; 145 + else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) 146 + ch->ranks = 2; 147 + else 148 + ch->ranks = 1; 149 + 150 + ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) || 151 + skl_is_16gb_dimm(&ch->dimm_s); 152 + 153 + drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n", 154 + channel, ch->ranks, yesno(ch->is_16gb_dimm)); 155 + 156 + return 0; 157 + } 158 + 159 + static bool 160 + intel_is_dram_symmetric(const struct dram_channel_info *ch0, 161 + const struct dram_channel_info *ch1) 162 + { 163 + return !memcmp(ch0, ch1, sizeof(*ch0)) && 164 + (ch0->dimm_s.size == 0 || 165 + !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); 166 + } 167 + 168 + static int 169 + skl_dram_get_channels_info(struct drm_i915_private *dev_priv) 170 + { 171 + struct dram_info *dram_info = &dev_priv->dram_info; 172 + struct dram_channel_info ch0 = {}, ch1 = {}; 173 + u32 val; 174 + int ret; 175 + 176 + val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); 177 + ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val); 178 + if (ret == 0) 179 + dram_info->num_channels++; 180 + 181 + val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); 182 + ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val); 183 + if (ret == 0) 184 + dram_info->num_channels++; 185 + 186 + if (dram_info->num_channels == 0) { 187 + drm_info(&dev_priv->drm, "Number of memory channels is zero\n"); 188 + return -EINVAL; 189 + } 190 + 191 + /* 192 + * If any of the channel is single rank channel, worst case output 193 + * will be same as if single rank memory, so consider single rank 194 + * memory. 195 + */ 196 + if (ch0.ranks == 1 || ch1.ranks == 1) 197 + dram_info->ranks = 1; 198 + else 199 + dram_info->ranks = max(ch0.ranks, ch1.ranks); 200 + 201 + if (dram_info->ranks == 0) { 202 + drm_info(&dev_priv->drm, 203 + "couldn't get memory rank information\n"); 204 + return -EINVAL; 205 + } 206 + 207 + dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; 208 + 209 + dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); 210 + 211 + drm_dbg_kms(&dev_priv->drm, "Memory configuration is symmetric? %s\n", 212 + yesno(dram_info->symmetric_memory)); 213 + 214 + return 0; 215 + } 216 + 217 + static enum intel_dram_type 218 + skl_get_dram_type(struct drm_i915_private *dev_priv) 219 + { 220 + u32 val; 221 + 222 + val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); 223 + 224 + switch (val & SKL_DRAM_DDR_TYPE_MASK) { 225 + case SKL_DRAM_DDR_TYPE_DDR3: 226 + return INTEL_DRAM_DDR3; 227 + case SKL_DRAM_DDR_TYPE_DDR4: 228 + return INTEL_DRAM_DDR4; 229 + case SKL_DRAM_DDR_TYPE_LPDDR3: 230 + return INTEL_DRAM_LPDDR3; 231 + case SKL_DRAM_DDR_TYPE_LPDDR4: 232 + return INTEL_DRAM_LPDDR4; 233 + default: 234 + MISSING_CASE(val); 235 + return INTEL_DRAM_UNKNOWN; 236 + } 237 + } 238 + 239 + static int 240 + skl_get_dram_info(struct drm_i915_private *dev_priv) 241 + { 242 + struct dram_info *dram_info = &dev_priv->dram_info; 243 + u32 mem_freq_khz, val; 244 + int ret; 245 + 246 + dram_info->type = skl_get_dram_type(dev_priv); 247 + drm_dbg_kms(&dev_priv->drm, "DRAM type: %s\n", 248 + intel_dram_type_str(dram_info->type)); 249 + 250 + ret = skl_dram_get_channels_info(dev_priv); 251 + if (ret) 252 + return ret; 253 + 254 + val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); 255 + mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) * 256 + SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000); 257 + 258 + dram_info->bandwidth_kbps = dram_info->num_channels * 259 + mem_freq_khz * 8; 260 + 261 + if (dram_info->bandwidth_kbps == 0) { 262 + drm_info(&dev_priv->drm, 263 + "Couldn't get system memory bandwidth\n"); 264 + return -EINVAL; 265 + } 266 + 267 + dram_info->valid = true; 268 + return 0; 269 + } 270 + 271 + /* Returns Gb per DRAM device */ 272 + static int bxt_get_dimm_size(u32 val) 273 + { 274 + switch (val & BXT_DRAM_SIZE_MASK) { 275 + case BXT_DRAM_SIZE_4GBIT: 276 + return 4; 277 + case BXT_DRAM_SIZE_6GBIT: 278 + return 6; 279 + case BXT_DRAM_SIZE_8GBIT: 280 + return 8; 281 + case BXT_DRAM_SIZE_12GBIT: 282 + return 12; 283 + case BXT_DRAM_SIZE_16GBIT: 284 + return 16; 285 + default: 286 + MISSING_CASE(val); 287 + return 0; 288 + } 289 + } 290 + 291 + static int bxt_get_dimm_width(u32 val) 292 + { 293 + if (!bxt_get_dimm_size(val)) 294 + return 0; 295 + 296 + val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; 297 + 298 + return 8 << val; 299 + } 300 + 301 + static int bxt_get_dimm_ranks(u32 val) 302 + { 303 + if (!bxt_get_dimm_size(val)) 304 + return 0; 305 + 306 + switch (val & BXT_DRAM_RANK_MASK) { 307 + case BXT_DRAM_RANK_SINGLE: 308 + return 1; 309 + case BXT_DRAM_RANK_DUAL: 310 + return 2; 311 + default: 312 + MISSING_CASE(val); 313 + return 0; 314 + } 315 + } 316 + 317 + static enum intel_dram_type bxt_get_dimm_type(u32 val) 318 + { 319 + if (!bxt_get_dimm_size(val)) 320 + return INTEL_DRAM_UNKNOWN; 321 + 322 + switch (val & BXT_DRAM_TYPE_MASK) { 323 + case BXT_DRAM_TYPE_DDR3: 324 + return INTEL_DRAM_DDR3; 325 + case BXT_DRAM_TYPE_LPDDR3: 326 + return INTEL_DRAM_LPDDR3; 327 + case BXT_DRAM_TYPE_DDR4: 328 + return INTEL_DRAM_DDR4; 329 + case BXT_DRAM_TYPE_LPDDR4: 330 + return INTEL_DRAM_LPDDR4; 331 + default: 332 + MISSING_CASE(val); 333 + return INTEL_DRAM_UNKNOWN; 334 + } 335 + } 336 + 337 + static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val) 338 + { 339 + dimm->width = bxt_get_dimm_width(val); 340 + dimm->ranks = bxt_get_dimm_ranks(val); 341 + 342 + /* 343 + * Size in register is Gb per DRAM device. Convert to total 344 + * GB to match the way we report this for non-LP platforms. 345 + */ 346 + dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; 347 + } 348 + 349 + static int bxt_get_dram_info(struct drm_i915_private *dev_priv) 350 + { 351 + struct dram_info *dram_info = &dev_priv->dram_info; 352 + u32 dram_channels; 353 + u32 mem_freq_khz, val; 354 + u8 num_active_channels; 355 + int i; 356 + 357 + val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0); 358 + mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) * 359 + BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000); 360 + 361 + dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK; 362 + num_active_channels = hweight32(dram_channels); 363 + 364 + /* Each active bit represents 4-byte channel */ 365 + dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4); 366 + 367 + if (dram_info->bandwidth_kbps == 0) { 368 + drm_info(&dev_priv->drm, 369 + "Couldn't get system memory bandwidth\n"); 370 + return -EINVAL; 371 + } 372 + 373 + /* 374 + * Now read each DUNIT8/9/10/11 to check the rank of each dimms. 375 + */ 376 + for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { 377 + struct dram_dimm_info dimm; 378 + enum intel_dram_type type; 379 + 380 + val = I915_READ(BXT_D_CR_DRP0_DUNIT(i)); 381 + if (val == 0xFFFFFFFF) 382 + continue; 383 + 384 + dram_info->num_channels++; 385 + 386 + bxt_get_dimm_info(&dimm, val); 387 + type = bxt_get_dimm_type(val); 388 + 389 + drm_WARN_ON(&dev_priv->drm, type != INTEL_DRAM_UNKNOWN && 390 + dram_info->type != INTEL_DRAM_UNKNOWN && 391 + dram_info->type != type); 392 + 393 + drm_dbg_kms(&dev_priv->drm, 394 + "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", 395 + i - BXT_D_CR_DRP0_DUNIT_START, 396 + dimm.size, dimm.width, dimm.ranks, 397 + intel_dram_type_str(type)); 398 + 399 + /* 400 + * If any of the channel is single rank channel, 401 + * worst case output will be same as if single rank 402 + * memory, so consider single rank memory. 403 + */ 404 + if (dram_info->ranks == 0) 405 + dram_info->ranks = dimm.ranks; 406 + else if (dimm.ranks == 1) 407 + dram_info->ranks = 1; 408 + 409 + if (type != INTEL_DRAM_UNKNOWN) 410 + dram_info->type = type; 411 + } 412 + 413 + if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) { 414 + drm_info(&dev_priv->drm, "couldn't get memory information\n"); 415 + return -EINVAL; 416 + } 417 + 418 + dram_info->valid = true; 419 + 420 + return 0; 421 + } 422 + 423 + void intel_dram_detect(struct drm_i915_private *i915) 424 + { 425 + struct dram_info *dram_info = &i915->dram_info; 426 + int ret; 427 + 428 + /* 429 + * Assume 16Gb DIMMs are present until proven otherwise. 430 + * This is only used for the level 0 watermark latency 431 + * w/a which does not apply to bxt/glk. 432 + */ 433 + dram_info->is_16gb_dimm = !IS_GEN9_LP(i915); 434 + 435 + if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915)) 436 + return; 437 + 438 + if (IS_GEN9_LP(i915)) 439 + ret = bxt_get_dram_info(i915); 440 + else 441 + ret = skl_get_dram_info(i915); 442 + if (ret) 443 + return; 444 + 445 + drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n", 446 + dram_info->bandwidth_kbps, dram_info->num_channels); 447 + 448 + drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n", 449 + dram_info->ranks, yesno(dram_info->is_16gb_dimm)); 450 + } 451 + 452 + static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap) 453 + { 454 + static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; 455 + static const u8 sets[4] = { 1, 1, 2, 2 }; 456 + 457 + return EDRAM_NUM_BANKS(cap) * 458 + ways[EDRAM_WAYS_IDX(cap)] * 459 + sets[EDRAM_SETS_IDX(cap)]; 460 + } 461 + 462 + void intel_dram_edram_detect(struct drm_i915_private *i915) 463 + { 464 + u32 edram_cap = 0; 465 + 466 + if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9)) 467 + return; 468 + 469 + edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP); 470 + 471 + /* NB: We can't write IDICR yet because we don't have gt funcs set up */ 472 + 473 + if (!(edram_cap & EDRAM_ENABLED)) 474 + return; 475 + 476 + /* 477 + * The needed capability bits for size calculation are not there with 478 + * pre gen9 so return 128MB always. 479 + */ 480 + if (INTEL_GEN(i915) < 9) 481 + i915->edram_size_mb = 128; 482 + else 483 + i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap); 484 + 485 + dev_info(i915->drm.dev, 486 + "Found %uMB of eDRAM\n", i915->edram_size_mb); 487 + }
+14
drivers/gpu/drm/i915/intel_dram.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2020 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DRAM_H__ 7 + #define __INTEL_DRAM_H__ 8 + 9 + struct drm_i915_private; 10 + 11 + void intel_dram_edram_detect(struct drm_i915_private *i915); 12 + void intel_dram_detect(struct drm_i915_private *i915); 13 + 14 + #endif /* __INTEL_DRAM_H__ */