Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2018-05-15' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for v4.18:

UAPI Changes:
- Fix render node number regression from control node removal.

Driver Changes:
- Small header fix for virgl, used by qemu.
- Use vm_fault_t in qxl.

Signed-off-by: Dave Airlie <airlied@redhat.com>

# gpg: Signature made Tue 15 May 2018 06:16:03 PM AEST
# gpg: using RSA key FE558C72A67013C3
# gpg: Can't check signature: public key not found
Link: https://patchwork.freedesktop.org/patch/msgid/e63306b9-67a0-74ab-8883-08b3d9db72d2@mblankhorst.nl

+4614 -708
+28
Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
··· 1 + Broadcom V3D GPU 2 + 3 + Only the Broadcom V3D 3.x and newer GPUs are covered by this binding. 4 + For V3D 2.x, see brcm,bcm-vc4.txt. 5 + 6 + Required properties: 7 + - compatible: Should be "brcm,7268-v3d" or "brcm,7278-v3d" 8 + - reg: Physical base addresses and lengths of the register areas 9 + - reg-names: Names for the register areas. The "hub", "bridge", and "core0" 10 + register areas are always required. The "gca" register area 11 + is required if the GCA cache controller is present. 12 + - interrupts: The interrupt numbers. The first interrupt is for the hub, 13 + while the following interrupts are for the cores. 14 + See bindings/interrupt-controller/interrupts.txt 15 + 16 + Optional properties: 17 + - clocks: The core clock the unit runs on 18 + 19 + v3d { 20 + compatible = "brcm,7268-v3d"; 21 + reg = <0xf1204000 0x100>, 22 + <0xf1200000 0x4000>, 23 + <0xf1208000 0x4000>, 24 + <0xf1204100 0x100>; 25 + reg-names = "bridge", "hub", "core0", "gca"; 26 + interrupts = <0 78 4>, 27 + <0 77 4>; 28 + };
+1
Documentation/gpu/drivers.rst
··· 10 10 tegra 11 11 tinydrm 12 12 tve200 13 + v3d 13 14 vc4 14 15 bridge/dw-hdmi 15 16 xen-front
+8
MAINTAINERS
··· 4786 4786 F: drivers/gpu/drm/omapdrm/ 4787 4787 F: Documentation/devicetree/bindings/display/ti/ 4788 4788 4789 + DRM DRIVERS FOR V3D 4790 + M: Eric Anholt <eric@anholt.net> 4791 + S: Supported 4792 + F: drivers/gpu/drm/v3d/ 4793 + F: include/uapi/drm/v3d_drm.h 4794 + F: Documentation/devicetree/bindings/display/brcm,bcm-v3d.txt 4795 + T: git git://anongit.freedesktop.org/drm/drm-misc 4796 + 4789 4797 DRM DRIVERS FOR VC4 4790 4798 M: Eric Anholt <eric@anholt.net> 4791 4799 T: git git://github.com/anholt/linux
-10
drivers/dma-buf/sync_debug.h
··· 62 62 struct rb_node node; 63 63 }; 64 64 65 - #ifdef CONFIG_SW_SYNC 66 - 67 65 extern const struct file_operations sw_sync_debugfs_fops; 68 66 69 67 void sync_timeline_debug_add(struct sync_timeline *obj); ··· 69 71 void sync_file_debug_add(struct sync_file *fence); 70 72 void sync_file_debug_remove(struct sync_file *fence); 71 73 void sync_dump(void); 72 - 73 - #else 74 - # define sync_timeline_debug_add(obj) 75 - # define sync_timeline_debug_remove(obj) 76 - # define sync_file_debug_add(fence) 77 - # define sync_file_debug_remove(fence) 78 - # define sync_dump() 79 - #endif 80 74 81 75 #endif /* _LINUX_SYNC_H */
+7 -4
drivers/gpu/drm/Kconfig
··· 49 49 50 50 If in doubt, say "N". 51 51 52 - config DRM_DEBUG_MM_SELFTEST 53 - tristate "kselftests for DRM range manager (struct drm_mm)" 52 + config DRM_DEBUG_SELFTEST 53 + tristate "kselftests for DRM" 54 54 depends on DRM 55 55 depends on DEBUG_KERNEL 56 56 select PRIME_NUMBERS 57 57 select DRM_LIB_RANDOM 58 + select DRM_KMS_HELPER 58 59 default n 59 60 help 60 - This option provides a kernel module that can be used to test 61 - the DRM range manager (drm_mm) and its API. This option is not 61 + This option provides kernel modules that can be used to run 62 + various selftests on parts of the DRM api. This option is not 62 63 useful for distributions or general kernels, but only for kernel 63 64 developers working on DRM and associated drivers. 64 65 ··· 267 266 source "drivers/gpu/drm/amd/amdkfd/Kconfig" 268 267 269 268 source "drivers/gpu/drm/imx/Kconfig" 269 + 270 + source "drivers/gpu/drm/v3d/Kconfig" 270 271 271 272 source "drivers/gpu/drm/vc4/Kconfig" 272 273
+2 -1
drivers/gpu/drm/Makefile
··· 43 43 drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o 44 44 45 45 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 46 - obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/ 46 + obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/ 47 47 48 48 obj-$(CONFIG_DRM) += drm.o 49 49 obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o ··· 61 61 obj-$(CONFIG_DRM_I810) += i810/ 62 62 obj-$(CONFIG_DRM_I915) += i915/ 63 63 obj-$(CONFIG_DRM_MGAG200) += mgag200/ 64 + obj-$(CONFIG_DRM_V3D) += v3d/ 64 65 obj-$(CONFIG_DRM_VC4) += vc4/ 65 66 obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/ 66 67 obj-$(CONFIG_DRM_SIS) += sis/
+1 -1
drivers/gpu/drm/bridge/adv7511/Kconfig
··· 1 1 config DRM_I2C_ADV7511 2 - tristate "AV7511 encoder" 2 + tristate "ADV7511 encoder" 3 3 depends on OF 4 4 select DRM_KMS_HELPER 5 5 select REGMAP_I2C
+1 -1
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 1127 1127 } 1128 1128 1129 1129 if (adv7511->gpio_pd) { 1130 - mdelay(5); 1130 + usleep_range(5000, 6000); 1131 1131 gpiod_set_value_cansleep(adv7511->gpio_pd, 0); 1132 1132 } 1133 1133
+3 -1
drivers/gpu/drm/drm_atomic.c
··· 1425 1425 { 1426 1426 struct drm_plane *plane = plane_state->plane; 1427 1427 struct drm_crtc_state *crtc_state; 1428 - 1428 + /* Nothing to do for same crtc*/ 1429 + if (plane_state->crtc == crtc) 1430 + return 0; 1429 1431 if (plane_state->crtc) { 1430 1432 crtc_state = drm_atomic_get_crtc_state(plane_state->state, 1431 1433 plane_state->crtc);
+1 -1
drivers/gpu/drm/drm_atomic_helper.c
··· 766 766 if (crtc_state->enable) 767 767 drm_mode_get_hv_timing(&crtc_state->mode, &clip.x2, &clip.y2); 768 768 769 - plane_state->visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale); 769 + plane_state->visible = drm_rect_clip_scaled(src, dst, &clip); 770 770 771 771 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation); 772 772
+40 -10
drivers/gpu/drm/drm_connector.c
··· 1069 1069 goto nomem; 1070 1070 1071 1071 for (i = 0; i < num_modes; i++) 1072 - drm_property_add_enum(dev->mode_config.tv_mode_property, i, 1072 + drm_property_add_enum(dev->mode_config.tv_mode_property, 1073 1073 i, modes[i]); 1074 1074 1075 1075 dev->mode_config.tv_brightness_property = ··· 1156 1156 { 1157 1157 struct drm_device *dev = connector->dev; 1158 1158 struct drm_property *scaling_mode_property; 1159 - int i, j = 0; 1159 + int i; 1160 1160 const unsigned valid_scaling_mode_mask = 1161 1161 (1U << ARRAY_SIZE(drm_scaling_mode_enum_list)) - 1; 1162 1162 ··· 1177 1177 if (!(BIT(i) & scaling_mode_mask)) 1178 1178 continue; 1179 1179 1180 - ret = drm_property_add_enum(scaling_mode_property, j++, 1180 + ret = drm_property_add_enum(scaling_mode_property, 1181 1181 drm_scaling_mode_enum_list[i].type, 1182 1182 drm_scaling_mode_enum_list[i].name); 1183 1183 ··· 1531 1531 return connector->encoder; 1532 1532 } 1533 1533 1534 - static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, 1535 - const struct drm_file *file_priv) 1534 + static bool 1535 + drm_mode_expose_to_userspace(const struct drm_display_mode *mode, 1536 + const struct list_head *export_list, 1537 + const struct drm_file *file_priv) 1536 1538 { 1537 1539 /* 1538 1540 * If user-space hasn't configured the driver to expose the stereo 3D ··· 1542 1540 */ 1543 1541 if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode)) 1544 1542 return false; 1543 + /* 1544 + * If user-space hasn't configured the driver to expose the modes 1545 + * with aspect-ratio, don't expose them. However if such a mode 1546 + * is unique, let it be exposed, but reset the aspect-ratio flags 1547 + * while preparing the list of user-modes. 1548 + */ 1549 + if (!file_priv->aspect_ratio_allowed) { 1550 + struct drm_display_mode *mode_itr; 1551 + 1552 + list_for_each_entry(mode_itr, export_list, export_head) 1553 + if (drm_mode_match(mode_itr, mode, 1554 + DRM_MODE_MATCH_TIMINGS | 1555 + DRM_MODE_MATCH_CLOCK | 1556 + DRM_MODE_MATCH_FLAGS | 1557 + DRM_MODE_MATCH_3D_FLAGS)) 1558 + return false; 1559 + } 1545 1560 1546 1561 return true; 1547 1562 } ··· 1578 1559 struct drm_mode_modeinfo u_mode; 1579 1560 struct drm_mode_modeinfo __user *mode_ptr; 1580 1561 uint32_t __user *encoder_ptr; 1562 + LIST_HEAD(export_list); 1581 1563 1582 1564 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1583 1565 return -EINVAL; ··· 1627 1607 1628 1608 /* delayed so we get modes regardless of pre-fill_modes state */ 1629 1609 list_for_each_entry(mode, &connector->modes, head) 1630 - if (drm_mode_expose_to_userspace(mode, file_priv)) 1610 + if (drm_mode_expose_to_userspace(mode, &export_list, 1611 + file_priv)) { 1612 + list_add_tail(&mode->export_head, &export_list); 1631 1613 mode_count++; 1614 + } 1632 1615 1633 1616 /* 1634 1617 * This ioctl is called twice, once to determine how much space is 1635 1618 * needed, and the 2nd time to fill it. 1619 + * The modes that need to be exposed to the user are maintained in the 1620 + * 'export_list'. When the ioctl is called first time to determine the, 1621 + * space, the export_list gets filled, to find the no.of modes. In the 1622 + * 2nd time, the user modes are filled, one by one from the export_list. 1636 1623 */ 1637 1624 if ((out_resp->count_modes >= mode_count) && mode_count) { 1638 1625 copied = 0; 1639 1626 mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; 1640 - list_for_each_entry(mode, &connector->modes, head) { 1641 - if (!drm_mode_expose_to_userspace(mode, file_priv)) 1642 - continue; 1643 - 1627 + list_for_each_entry(mode, &export_list, export_head) { 1644 1628 drm_mode_convert_to_umode(&u_mode, mode); 1629 + /* 1630 + * Reset aspect ratio flags of user-mode, if modes with 1631 + * aspect-ratio are not supported. 1632 + */ 1633 + if (!file_priv->aspect_ratio_allowed) 1634 + u_mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; 1645 1635 if (copy_to_user(mode_ptr + copied, 1646 1636 &u_mode, sizeof(u_mode))) { 1647 1637 ret = -EFAULT;
+9
drivers/gpu/drm/drm_crtc.c
··· 449 449 crtc_resp->mode_valid = 0; 450 450 } 451 451 } 452 + if (!file_priv->aspect_ratio_allowed) 453 + crtc_resp->mode.flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; 452 454 drm_modeset_unlock(&crtc->mutex); 453 455 454 456 return 0; ··· 630 628 ret = -ENOMEM; 631 629 goto out; 632 630 } 631 + if (!file_priv->aspect_ratio_allowed && 632 + (crtc_req->mode.flags & DRM_MODE_FLAG_PIC_AR_MASK) != DRM_MODE_FLAG_PIC_AR_NONE) { 633 + DRM_DEBUG_KMS("Unexpected aspect-ratio flag bits\n"); 634 + ret = -EINVAL; 635 + goto out; 636 + } 637 + 633 638 634 639 ret = drm_mode_convert_umode(dev, mode, &crtc_req->mode); 635 640 if (ret) {
+18 -4
drivers/gpu/drm/drm_dp_helper.c
··· 119 119 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); 120 120 121 121 void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 122 - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 122 + int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 123 + DP_TRAINING_AUX_RD_MASK; 124 + 125 + if (rd_interval > 4) 126 + DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", 127 + rd_interval); 128 + 129 + if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) 123 130 udelay(100); 124 131 else 125 - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); 132 + mdelay(rd_interval * 4); 126 133 } 127 134 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); 128 135 129 136 void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { 130 - if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0) 137 + int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & 138 + DP_TRAINING_AUX_RD_MASK; 139 + 140 + if (rd_interval > 4) 141 + DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", 142 + rd_interval); 143 + 144 + if (rd_interval == 0) 131 145 udelay(400); 132 146 else 133 - mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4); 147 + mdelay(rd_interval * 4); 134 148 } 135 149 EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); 136 150
-10
drivers/gpu/drm/drm_drv.c
··· 99 99 return &dev->primary; 100 100 case DRM_MINOR_RENDER: 101 101 return &dev->render; 102 - case DRM_MINOR_CONTROL: 103 - return &dev->control; 104 102 default: 105 103 BUG(); 106 104 } ··· 565 567 err_minors: 566 568 drm_minor_free(dev, DRM_MINOR_PRIMARY); 567 569 drm_minor_free(dev, DRM_MINOR_RENDER); 568 - drm_minor_free(dev, DRM_MINOR_CONTROL); 569 570 drm_fs_inode_free(dev->anon_inode); 570 571 err_free: 571 572 mutex_destroy(&dev->master_mutex); ··· 600 603 601 604 drm_minor_free(dev, DRM_MINOR_PRIMARY); 602 605 drm_minor_free(dev, DRM_MINOR_RENDER); 603 - drm_minor_free(dev, DRM_MINOR_CONTROL); 604 606 605 607 mutex_destroy(&dev->master_mutex); 606 608 mutex_destroy(&dev->ctxlist_mutex); ··· 792 796 793 797 mutex_lock(&drm_global_mutex); 794 798 795 - ret = drm_minor_register(dev, DRM_MINOR_CONTROL); 796 - if (ret) 797 - goto err_minors; 798 - 799 799 ret = drm_minor_register(dev, DRM_MINOR_RENDER); 800 800 if (ret) 801 801 goto err_minors; ··· 829 837 remove_compat_control_link(dev); 830 838 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 831 839 drm_minor_unregister(dev, DRM_MINOR_RENDER); 832 - drm_minor_unregister(dev, DRM_MINOR_CONTROL); 833 840 out_unlock: 834 841 mutex_unlock(&drm_global_mutex); 835 842 return ret; ··· 873 882 remove_compat_control_link(dev); 874 883 drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 875 884 drm_minor_unregister(dev, DRM_MINOR_RENDER); 876 - drm_minor_unregister(dev, DRM_MINOR_CONTROL); 877 885 } 878 886 EXPORT_SYMBOL(drm_dev_unregister); 879 887
+31 -10
drivers/gpu/drm/drm_edid.c
··· 2930 2930 static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_match, 2931 2931 unsigned int clock_tolerance) 2932 2932 { 2933 + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; 2933 2934 u8 vic; 2934 2935 2935 2936 if (!to_match->clock) 2936 2937 return 0; 2938 + 2939 + if (to_match->picture_aspect_ratio) 2940 + match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; 2937 2941 2938 2942 for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { 2939 2943 struct drm_display_mode cea_mode = edid_cea_modes[vic]; ··· 2952 2948 continue; 2953 2949 2954 2950 do { 2955 - if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode)) 2951 + if (drm_mode_match(to_match, &cea_mode, match_flags)) 2956 2952 return vic; 2957 2953 } while (cea_mode_alternate_timings(vic, &cea_mode)); 2958 2954 } ··· 2969 2965 */ 2970 2966 u8 drm_match_cea_mode(const struct drm_display_mode *to_match) 2971 2967 { 2968 + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; 2972 2969 u8 vic; 2973 2970 2974 2971 if (!to_match->clock) 2975 2972 return 0; 2973 + 2974 + if (to_match->picture_aspect_ratio) 2975 + match_flags |= DRM_MODE_MATCH_ASPECT_RATIO; 2976 2976 2977 2977 for (vic = 1; vic < ARRAY_SIZE(edid_cea_modes); vic++) { 2978 2978 struct drm_display_mode cea_mode = edid_cea_modes[vic]; ··· 2991 2983 continue; 2992 2984 2993 2985 do { 2994 - if (drm_mode_equal_no_clocks_no_stereo(to_match, &cea_mode)) 2986 + if (drm_mode_match(to_match, &cea_mode, match_flags)) 2995 2987 return vic; 2996 2988 } while (cea_mode_alternate_timings(vic, &cea_mode)); 2997 2989 } ··· 3038 3030 static u8 drm_match_hdmi_mode_clock_tolerance(const struct drm_display_mode *to_match, 3039 3031 unsigned int clock_tolerance) 3040 3032 { 3033 + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; 3041 3034 u8 vic; 3042 3035 3043 3036 if (!to_match->clock) ··· 3056 3047 abs(to_match->clock - clock2) > clock_tolerance) 3057 3048 continue; 3058 3049 3059 - if (drm_mode_equal_no_clocks(to_match, hdmi_mode)) 3050 + if (drm_mode_match(to_match, hdmi_mode, match_flags)) 3060 3051 return vic; 3061 3052 } 3062 3053 ··· 3073 3064 */ 3074 3065 static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match) 3075 3066 { 3067 + unsigned int match_flags = DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_FLAGS; 3076 3068 u8 vic; 3077 3069 3078 3070 if (!to_match->clock) ··· 3089 3079 3090 3080 if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) || 3091 3081 KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) && 3092 - drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode)) 3082 + drm_mode_match(to_match, hdmi_mode, match_flags)) 3093 3083 return vic; 3094 3084 } 3095 3085 return 0; ··· 4833 4823 const struct drm_display_mode *mode, 4834 4824 bool is_hdmi2_sink) 4835 4825 { 4826 + enum hdmi_picture_aspect picture_aspect; 4836 4827 int err; 4837 4828 4838 4829 if (!frame || !mode) ··· 4876 4865 * Populate picture aspect ratio from either 4877 4866 * user input (if specified) or from the CEA mode list. 4878 4867 */ 4879 - if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 || 4880 - mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9) 4881 - frame->picture_aspect = mode->picture_aspect_ratio; 4882 - else if (frame->video_code > 0) 4883 - frame->picture_aspect = drm_get_cea_aspect_ratio( 4884 - frame->video_code); 4868 + picture_aspect = mode->picture_aspect_ratio; 4869 + if (picture_aspect == HDMI_PICTURE_ASPECT_NONE) 4870 + picture_aspect = drm_get_cea_aspect_ratio(frame->video_code); 4885 4871 4872 + /* 4873 + * The infoframe can't convey anything but none, 4:3 4874 + * and 16:9, so if the user has asked for anything else 4875 + * we can only satisfy it by specifying the right VIC. 4876 + */ 4877 + if (picture_aspect > HDMI_PICTURE_ASPECT_16_9) { 4878 + if (picture_aspect != 4879 + drm_get_cea_aspect_ratio(frame->video_code)) 4880 + return -EINVAL; 4881 + picture_aspect = HDMI_PICTURE_ASPECT_NONE; 4882 + } 4883 + 4884 + frame->picture_aspect = picture_aspect; 4886 4885 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; 4887 4886 frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN; 4888 4887
+10 -2
drivers/gpu/drm/drm_fb_helper.c
··· 2183 2183 for (j = 0; j < i; j++) { 2184 2184 if (!enabled[j]) 2185 2185 continue; 2186 - if (!drm_mode_equal(modes[j], modes[i])) 2186 + if (!drm_mode_match(modes[j], modes[i], 2187 + DRM_MODE_MATCH_TIMINGS | 2188 + DRM_MODE_MATCH_CLOCK | 2189 + DRM_MODE_MATCH_FLAGS | 2190 + DRM_MODE_MATCH_3D_FLAGS)) 2187 2191 can_clone = false; 2188 2192 } 2189 2193 } ··· 2207 2203 2208 2204 fb_helper_conn = fb_helper->connector_info[i]; 2209 2205 list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { 2210 - if (drm_mode_equal(mode, dmt_mode)) 2206 + if (drm_mode_match(mode, dmt_mode, 2207 + DRM_MODE_MATCH_TIMINGS | 2208 + DRM_MODE_MATCH_CLOCK | 2209 + DRM_MODE_MATCH_FLAGS | 2210 + DRM_MODE_MATCH_3D_FLAGS)) 2211 2211 modes[i] = mode; 2212 2212 } 2213 2213 if (!modes[i])
+1 -2
drivers/gpu/drm/drm_framebuffer.c
··· 484 484 * backwards-compatibility reasons, we cannot make GET_FB() privileged, 485 485 * so just return an invalid handle for non-masters. 486 486 */ 487 - if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN) && 488 - !drm_is_control_client(file_priv)) { 487 + if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) { 489 488 r->handle = 0; 490 489 ret = 0; 491 490 goto out;
+2 -2
drivers/gpu/drm/drm_ioc32.c
··· 105 105 .desc = compat_ptr(v32.desc), 106 106 }; 107 107 err = drm_ioctl_kernel(file, drm_version, &v, 108 - DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW); 108 + DRM_UNLOCKED|DRM_RENDER_ALLOW); 109 109 if (err) 110 110 return err; 111 111 ··· 885 885 return -EFAULT; 886 886 887 887 err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, 888 - DRM_CONTROL_ALLOW|DRM_UNLOCKED); 888 + DRM_UNLOCKED); 889 889 if (err) 890 890 return err; 891 891
+44 -41
drivers/gpu/drm/drm_ioctl.c
··· 324 324 return -EINVAL; 325 325 file_priv->atomic = req->value; 326 326 file_priv->universal_planes = req->value; 327 + /* 328 + * No atomic user-space blows up on aspect ratio mode bits. 329 + */ 330 + file_priv->aspect_ratio_allowed = req->value; 331 + break; 332 + case DRM_CLIENT_CAP_ASPECT_RATIO: 333 + if (req->value > 1) 334 + return -EINVAL; 335 + file_priv->aspect_ratio_allowed = req->value; 327 336 break; 328 337 default: 329 338 return -EINVAL; ··· 519 510 520 511 /* MASTER is only for master or control clients */ 521 512 if (unlikely((flags & DRM_MASTER) && 522 - !drm_is_current_master(file_priv) && 523 - !drm_is_control_client(file_priv))) 524 - return -EACCES; 525 - 526 - /* Control clients must be explicitly allowed */ 527 - if (unlikely(!(flags & DRM_CONTROL_ALLOW) && 528 - drm_is_control_client(file_priv))) 513 + !drm_is_current_master(file_priv))) 529 514 return -EACCES; 530 515 531 516 /* Render clients must be explicitly allowed */ ··· 542 539 /* Ioctl table */ 543 540 static const struct drm_ioctl_desc drm_ioctls[] = { 544 541 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 545 - DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), 542 + DRM_UNLOCKED|DRM_RENDER_ALLOW), 546 543 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), 547 544 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), 548 545 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), ··· 616 613 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 617 614 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 618 615 619 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 616 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED), 620 617 621 618 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 622 619 DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 623 620 624 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 625 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 626 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 627 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 628 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 629 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 621 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED), 622 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED), 623 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED), 624 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED), 625 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED), 626 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED), 630 627 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), 631 628 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 632 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 633 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 634 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 635 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 636 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 637 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 638 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 639 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 640 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 641 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 642 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 643 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 644 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 645 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 646 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 647 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 648 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 649 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 650 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 651 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 652 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 653 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 629 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED), 630 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED), 631 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), 632 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED), 633 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED), 634 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 635 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED), 636 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED), 637 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_UNLOCKED), 638 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_UNLOCKED), 639 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_UNLOCKED), 640 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED), 641 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED), 642 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED), 643 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED), 644 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED), 645 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED), 646 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED), 647 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED), 648 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED), 649 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED), 650 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED), 654 651 655 652 DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, 656 653 DRM_UNLOCKED|DRM_RENDER_ALLOW), ··· 668 665 DRM_UNLOCKED|DRM_RENDER_ALLOW), 669 666 DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED), 670 667 DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED), 671 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 672 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 673 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 674 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 668 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), 669 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED), 670 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), 671 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED), 675 672 }; 676 673 677 674 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+154 -37
drivers/gpu/drm/drm_modes.c
··· 939 939 } 940 940 EXPORT_SYMBOL(drm_mode_duplicate); 941 941 942 + static bool drm_mode_match_timings(const struct drm_display_mode *mode1, 943 + const struct drm_display_mode *mode2) 944 + { 945 + return mode1->hdisplay == mode2->hdisplay && 946 + mode1->hsync_start == mode2->hsync_start && 947 + mode1->hsync_end == mode2->hsync_end && 948 + mode1->htotal == mode2->htotal && 949 + mode1->hskew == mode2->hskew && 950 + mode1->vdisplay == mode2->vdisplay && 951 + mode1->vsync_start == mode2->vsync_start && 952 + mode1->vsync_end == mode2->vsync_end && 953 + mode1->vtotal == mode2->vtotal && 954 + mode1->vscan == mode2->vscan; 955 + } 956 + 957 + static bool drm_mode_match_clock(const struct drm_display_mode *mode1, 958 + const struct drm_display_mode *mode2) 959 + { 960 + /* 961 + * do clock check convert to PICOS 962 + * so fb modes get matched the same 963 + */ 964 + if (mode1->clock && mode2->clock) 965 + return KHZ2PICOS(mode1->clock) == KHZ2PICOS(mode2->clock); 966 + else 967 + return mode1->clock == mode2->clock; 968 + } 969 + 970 + static bool drm_mode_match_flags(const struct drm_display_mode *mode1, 971 + const struct drm_display_mode *mode2) 972 + { 973 + return (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) == 974 + (mode2->flags & ~DRM_MODE_FLAG_3D_MASK); 975 + } 976 + 977 + static bool drm_mode_match_3d_flags(const struct drm_display_mode *mode1, 978 + const struct drm_display_mode *mode2) 979 + { 980 + return (mode1->flags & DRM_MODE_FLAG_3D_MASK) == 981 + (mode2->flags & DRM_MODE_FLAG_3D_MASK); 982 + } 983 + 984 + static bool drm_mode_match_aspect_ratio(const struct drm_display_mode *mode1, 985 + const struct drm_display_mode *mode2) 986 + { 987 + return mode1->picture_aspect_ratio == mode2->picture_aspect_ratio; 988 + } 989 + 990 + /** 991 + * drm_mode_match - test modes for (partial) equality 992 + * @mode1: first mode 993 + * @mode2: second mode 994 + * @match_flags: which parts need to match (DRM_MODE_MATCH_*) 995 + * 996 + * Check to see if @mode1 and @mode2 are equivalent. 997 + * 998 + * Returns: 999 + * True if the modes are (partially) equal, false otherwise. 1000 + */ 1001 + bool drm_mode_match(const struct drm_display_mode *mode1, 1002 + const struct drm_display_mode *mode2, 1003 + unsigned int match_flags) 1004 + { 1005 + if (!mode1 && !mode2) 1006 + return true; 1007 + 1008 + if (!mode1 || !mode2) 1009 + return false; 1010 + 1011 + if (match_flags & DRM_MODE_MATCH_TIMINGS && 1012 + !drm_mode_match_timings(mode1, mode2)) 1013 + return false; 1014 + 1015 + if (match_flags & DRM_MODE_MATCH_CLOCK && 1016 + !drm_mode_match_clock(mode1, mode2)) 1017 + return false; 1018 + 1019 + if (match_flags & DRM_MODE_MATCH_FLAGS && 1020 + !drm_mode_match_flags(mode1, mode2)) 1021 + return false; 1022 + 1023 + if (match_flags & DRM_MODE_MATCH_3D_FLAGS && 1024 + !drm_mode_match_3d_flags(mode1, mode2)) 1025 + return false; 1026 + 1027 + if (match_flags & DRM_MODE_MATCH_ASPECT_RATIO && 1028 + !drm_mode_match_aspect_ratio(mode1, mode2)) 1029 + return false; 1030 + 1031 + return true; 1032 + } 1033 + EXPORT_SYMBOL(drm_mode_match); 1034 + 942 1035 /** 943 1036 * drm_mode_equal - test modes for equality 944 1037 * @mode1: first mode ··· 1042 949 * Returns: 1043 950 * True if the modes are equal, false otherwise. 1044 951 */ 1045 - bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 952 + bool drm_mode_equal(const struct drm_display_mode *mode1, 953 + const struct drm_display_mode *mode2) 1046 954 { 1047 - if (!mode1 && !mode2) 1048 - return true; 1049 - 1050 - if (!mode1 || !mode2) 1051 - return false; 1052 - 1053 - /* do clock check convert to PICOS so fb modes get matched 1054 - * the same */ 1055 - if (mode1->clock && mode2->clock) { 1056 - if (KHZ2PICOS(mode1->clock) != KHZ2PICOS(mode2->clock)) 1057 - return false; 1058 - } else if (mode1->clock != mode2->clock) 1059 - return false; 1060 - 1061 - return drm_mode_equal_no_clocks(mode1, mode2); 955 + return drm_mode_match(mode1, mode2, 956 + DRM_MODE_MATCH_TIMINGS | 957 + DRM_MODE_MATCH_CLOCK | 958 + DRM_MODE_MATCH_FLAGS | 959 + DRM_MODE_MATCH_3D_FLAGS| 960 + DRM_MODE_MATCH_ASPECT_RATIO); 1062 961 } 1063 962 EXPORT_SYMBOL(drm_mode_equal); 1064 963 ··· 1065 980 * Returns: 1066 981 * True if the modes are equal, false otherwise. 1067 982 */ 1068 - bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) 983 + bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, 984 + const struct drm_display_mode *mode2) 1069 985 { 1070 - if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) != 1071 - (mode2->flags & DRM_MODE_FLAG_3D_MASK)) 1072 - return false; 1073 - 1074 - return drm_mode_equal_no_clocks_no_stereo(mode1, mode2); 986 + return drm_mode_match(mode1, mode2, 987 + DRM_MODE_MATCH_TIMINGS | 988 + DRM_MODE_MATCH_FLAGS | 989 + DRM_MODE_MATCH_3D_FLAGS); 1075 990 } 1076 991 EXPORT_SYMBOL(drm_mode_equal_no_clocks); 1077 992 ··· 1089 1004 bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, 1090 1005 const struct drm_display_mode *mode2) 1091 1006 { 1092 - if (mode1->hdisplay == mode2->hdisplay && 1093 - mode1->hsync_start == mode2->hsync_start && 1094 - mode1->hsync_end == mode2->hsync_end && 1095 - mode1->htotal == mode2->htotal && 1096 - mode1->hskew == mode2->hskew && 1097 - mode1->vdisplay == mode2->vdisplay && 1098 - mode1->vsync_start == mode2->vsync_start && 1099 - mode1->vsync_end == mode2->vsync_end && 1100 - mode1->vtotal == mode2->vtotal && 1101 - mode1->vscan == mode2->vscan && 1102 - (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) == 1103 - (mode2->flags & ~DRM_MODE_FLAG_3D_MASK)) 1104 - return true; 1105 - 1106 - return false; 1007 + return drm_mode_match(mode1, mode2, 1008 + DRM_MODE_MATCH_TIMINGS | 1009 + DRM_MODE_MATCH_FLAGS); 1107 1010 } 1108 1011 EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo); 1109 1012 ··· 1648 1575 out->vrefresh = in->vrefresh; 1649 1576 out->flags = in->flags; 1650 1577 out->type = in->type; 1578 + 1579 + switch (in->picture_aspect_ratio) { 1580 + case HDMI_PICTURE_ASPECT_4_3: 1581 + out->flags |= DRM_MODE_FLAG_PIC_AR_4_3; 1582 + break; 1583 + case HDMI_PICTURE_ASPECT_16_9: 1584 + out->flags |= DRM_MODE_FLAG_PIC_AR_16_9; 1585 + break; 1586 + case HDMI_PICTURE_ASPECT_64_27: 1587 + out->flags |= DRM_MODE_FLAG_PIC_AR_64_27; 1588 + break; 1589 + case HDMI_PICTURE_ASPECT_256_135: 1590 + out->flags |= DRM_MODE_FLAG_PIC_AR_256_135; 1591 + break; 1592 + case HDMI_PICTURE_ASPECT_RESERVED: 1593 + default: 1594 + out->flags |= DRM_MODE_FLAG_PIC_AR_NONE; 1595 + break; 1596 + } 1597 + 1651 1598 strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); 1652 1599 out->name[DRM_DISPLAY_MODE_LEN-1] = 0; 1653 1600 } ··· 1713 1620 out->type = in->type & DRM_MODE_TYPE_ALL; 1714 1621 strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN); 1715 1622 out->name[DRM_DISPLAY_MODE_LEN-1] = 0; 1623 + 1624 + /* Clearing picture aspect ratio bits from out flags, 1625 + * as the aspect-ratio information is not stored in 1626 + * flags for kernel-mode, but in picture_aspect_ratio. 1627 + */ 1628 + out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK; 1629 + 1630 + switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) { 1631 + case DRM_MODE_FLAG_PIC_AR_4_3: 1632 + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3; 1633 + break; 1634 + case DRM_MODE_FLAG_PIC_AR_16_9: 1635 + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9; 1636 + break; 1637 + case DRM_MODE_FLAG_PIC_AR_64_27: 1638 + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27; 1639 + break; 1640 + case DRM_MODE_FLAG_PIC_AR_256_135: 1641 + out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135; 1642 + break; 1643 + default: 1644 + out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 1645 + break; 1646 + } 1716 1647 1717 1648 out->status = drm_mode_validate_driver(dev, out); 1718 1649 if (out->status != MODE_OK)
+3 -4
drivers/gpu/drm/drm_panel_orientation_quirks.c
··· 172 172 if (!bios_date) 173 173 continue; 174 174 175 - for (i = 0; data->bios_dates[i]; i++) { 176 - if (!strcmp(data->bios_dates[i], bios_date)) 177 - return data->orientation; 178 - } 175 + i = match_string(data->bios_dates, -1, bios_date); 176 + if (i >= 0) 177 + return data->orientation; 179 178 } 180 179 181 180 return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+6 -2
drivers/gpu/drm/drm_prime.c
··· 409 409 struct drm_gem_object *obj = dma_buf->priv; 410 410 struct drm_device *dev = obj->dev; 411 411 412 - return dev->driver->gem_prime_vmap(obj); 412 + if (dev->driver->gem_prime_vmap) 413 + return dev->driver->gem_prime_vmap(obj); 414 + else 415 + return NULL; 413 416 } 414 417 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 415 418 ··· 429 426 struct drm_gem_object *obj = dma_buf->priv; 430 427 struct drm_device *dev = obj->dev; 431 428 432 - dev->driver->gem_prime_vunmap(obj, vaddr); 429 + if (dev->driver->gem_prime_vunmap) 430 + dev->driver->gem_prime_vunmap(obj, vaddr); 433 431 } 434 432 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 435 433
+13 -14
drivers/gpu/drm/drm_property.c
··· 169 169 return NULL; 170 170 171 171 for (i = 0; i < num_values; i++) { 172 - ret = drm_property_add_enum(property, i, 173 - props[i].type, 174 - props[i].name); 172 + ret = drm_property_add_enum(property, 173 + props[i].type, 174 + props[i].name); 175 175 if (ret) { 176 176 drm_property_destroy(dev, property); 177 177 return NULL; ··· 209 209 uint64_t supported_bits) 210 210 { 211 211 struct drm_property *property; 212 - int i, ret, index = 0; 212 + int i, ret; 213 213 int num_values = hweight64(supported_bits); 214 214 215 215 flags |= DRM_MODE_PROP_BITMASK; ··· 221 221 if (!(supported_bits & (1ULL << props[i].type))) 222 222 continue; 223 223 224 - if (WARN_ON(index >= num_values)) { 225 - drm_property_destroy(dev, property); 226 - return NULL; 227 - } 228 - 229 - ret = drm_property_add_enum(property, index++, 230 - props[i].type, 231 - props[i].name); 224 + ret = drm_property_add_enum(property, 225 + props[i].type, 226 + props[i].name); 232 227 if (ret) { 233 228 drm_property_destroy(dev, property); 234 229 return NULL; ··· 371 376 /** 372 377 * drm_property_add_enum - add a possible value to an enumeration property 373 378 * @property: enumeration property to change 374 - * @index: index of the new enumeration 375 379 * @value: value of the new enumeration 376 380 * @name: symbolic name of the new enumeration 377 381 * ··· 382 388 * Returns: 383 389 * Zero on success, error code on failure. 384 390 */ 385 - int drm_property_add_enum(struct drm_property *property, int index, 391 + int drm_property_add_enum(struct drm_property *property, 386 392 uint64_t value, const char *name) 387 393 { 388 394 struct drm_property_enum *prop_enum; 395 + int index = 0; 389 396 390 397 if (WARN_ON(strlen(name) >= DRM_PROP_NAME_LEN)) 391 398 return -EINVAL; ··· 406 411 list_for_each_entry(prop_enum, &property->enum_list, head) { 407 412 if (WARN_ON(prop_enum->value == value)) 408 413 return -EINVAL; 414 + index++; 409 415 } 416 + 417 + if (WARN_ON(index >= property->num_values)) 418 + return -EINVAL; 410 419 411 420 prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); 412 421 if (!prop_enum)
+58 -16
drivers/gpu/drm/drm_rect.c
··· 50 50 } 51 51 EXPORT_SYMBOL(drm_rect_intersect); 52 52 53 + static u32 clip_scaled(u32 src, u32 dst, u32 clip) 54 + { 55 + u64 tmp = mul_u32_u32(src, dst - clip); 56 + 57 + /* 58 + * Round toward 1.0 when clipping so that we don't accidentally 59 + * change upscaling to downscaling or vice versa. 60 + */ 61 + if (src < (dst << 16)) 62 + return DIV_ROUND_UP_ULL(tmp, dst); 63 + else 64 + return DIV_ROUND_DOWN_ULL(tmp, dst); 65 + } 66 + 53 67 /** 54 68 * drm_rect_clip_scaled - perform a scaled clip operation 55 69 * @src: source window rectangle 56 70 * @dst: destination window rectangle 57 71 * @clip: clip rectangle 58 - * @hscale: horizontal scaling factor 59 - * @vscale: vertical scaling factor 60 72 * 61 73 * Clip rectangle @dst by rectangle @clip. Clip rectangle @src by the 62 74 * same amounts multiplied by @hscale and @vscale. ··· 78 66 * %false otherwise 79 67 */ 80 68 bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, 81 - const struct drm_rect *clip, 82 - int hscale, int vscale) 69 + const struct drm_rect *clip) 83 70 { 84 71 int diff; 85 72 86 73 diff = clip->x1 - dst->x1; 87 74 if (diff > 0) { 88 - int64_t tmp = src->x1 + (int64_t) diff * hscale; 89 - src->x1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); 75 + u32 new_src_w = clip_scaled(drm_rect_width(src), 76 + drm_rect_width(dst), diff); 77 + 78 + src->x1 = clamp_t(int64_t, src->x2 - new_src_w, INT_MIN, INT_MAX); 79 + dst->x1 = clip->x1; 90 80 } 91 81 diff = clip->y1 - dst->y1; 92 82 if (diff > 0) { 93 - int64_t tmp = src->y1 + (int64_t) diff * vscale; 94 - src->y1 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); 83 + u32 new_src_h = clip_scaled(drm_rect_height(src), 84 + drm_rect_height(dst), diff); 85 + 86 + src->y1 = clamp_t(int64_t, src->y2 - new_src_h, INT_MIN, INT_MAX); 87 + dst->y1 = clip->y1; 95 88 } 96 89 diff = dst->x2 - clip->x2; 97 90 if (diff > 0) { 98 - int64_t tmp = src->x2 - (int64_t) diff * hscale; 99 - src->x2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); 91 + u32 new_src_w = clip_scaled(drm_rect_width(src), 92 + drm_rect_width(dst), diff); 93 + 94 + src->x2 = clamp_t(int64_t, src->x1 + new_src_w, INT_MIN, INT_MAX); 95 + dst->x2 = clip->x2; 100 96 } 101 97 diff = dst->y2 - clip->y2; 102 98 if (diff > 0) { 103 - int64_t tmp = src->y2 - (int64_t) diff * vscale; 104 - src->y2 = clamp_t(int64_t, tmp, INT_MIN, INT_MAX); 99 + u32 new_src_h = clip_scaled(drm_rect_height(src), 100 + drm_rect_height(dst), diff); 101 + 102 + src->y2 = clamp_t(int64_t, src->y1 + new_src_h, INT_MIN, INT_MAX); 103 + dst->y2 = clip->y2; 105 104 } 106 105 107 - return drm_rect_intersect(dst, clip); 106 + return drm_rect_visible(dst); 108 107 } 109 108 EXPORT_SYMBOL(drm_rect_clip_scaled); 110 109 ··· 129 106 if (dst == 0) 130 107 return 0; 131 108 132 - scale = src / dst; 109 + if (src > (dst << 16)) 110 + return DIV_ROUND_UP(src, dst); 111 + else 112 + scale = src / dst; 133 113 134 114 return scale; 135 115 } ··· 146 120 * 147 121 * Calculate the horizontal scaling factor as 148 122 * (@src width) / (@dst width). 123 + * 124 + * If the scale is below 1 << 16, round down. If the scale is above 125 + * 1 << 16, round up. This will calculate the scale with the most 126 + * pessimistic limit calculation. 149 127 * 150 128 * RETURNS: 151 129 * The horizontal scaling factor, or errno of out of limits. ··· 181 151 * 182 152 * Calculate the vertical scaling factor as 183 153 * (@src height) / (@dst height). 154 + * 155 + * If the scale is below 1 << 16, round down. If the scale is above 156 + * 1 << 16, round up. This will calculate the scale with the most 157 + * pessimistic limit calculation. 184 158 * 185 159 * RETURNS: 186 160 * The vertical scaling factor, or errno of out of limits. ··· 222 188 * 223 189 * If the calculated scaling factor is above @max_vscale, 224 190 * decrease the height of rectangle @src to compensate. 191 + * 192 + * If the scale is below 1 << 16, round down. If the scale is above 193 + * 1 << 16, round up. This will calculate the scale with the most 194 + * pessimistic limit calculation. 225 195 * 226 196 * RETURNS: 227 197 * The horizontal scaling factor. ··· 276 238 * 277 239 * If the calculated scaling factor is above @max_vscale, 278 240 * decrease the height of rectangle @src to compensate. 241 + * 242 + * If the scale is below 1 << 16, round down. If the scale is above 243 + * 1 << 16, round up. This will calculate the scale with the most 244 + * pessimistic limit calculation. 279 245 * 280 246 * RETURNS: 281 247 * The vertical scaling factor. ··· 415 373 * them when doing a rotatation and its inverse. 416 374 * That is, if you do :: 417 375 * 418 - * DRM_MODE_PROP_ROTATE(&r, width, height, rotation); 419 - * DRM_MODE_ROTATE_inv(&r, width, height, rotation); 376 + * drm_rect_rotate(&r, width, height, rotation); 377 + * drm_rect_rotate_inv(&r, width, height, rotation); 420 378 * 421 379 * you will always get back the original rectangle. 422 380 */
+1 -3
drivers/gpu/drm/drm_sysfs.c
··· 331 331 struct device *kdev; 332 332 int r; 333 333 334 - if (minor->type == DRM_MINOR_CONTROL) 335 - minor_str = "controlD%d"; 336 - else if (minor->type == DRM_MINOR_RENDER) 334 + if (minor->type == DRM_MINOR_RENDER) 337 335 minor_str = "renderD%d"; 338 336 else 339 337 minor_str = "card%d";
+2 -2
drivers/gpu/drm/gma500/cdv_device.c
··· 485 485 return; 486 486 487 487 for (i = 0; i < ARRAY_SIZE(force_audio_names); i++) 488 - drm_property_add_enum(prop, i, i-1, force_audio_names[i]); 488 + drm_property_add_enum(prop, i-1, force_audio_names[i]); 489 489 490 490 dev_priv->force_audio_property = prop; 491 491 } ··· 514 514 return; 515 515 516 516 for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++) 517 - drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]); 517 + drm_property_add_enum(prop, i, broadcast_rgb_names[i]); 518 518 519 519 dev_priv->broadcast_rgb_property = prop; 520 520 }
+1 -1
drivers/gpu/drm/gma500/psb_intel_sdvo.c
··· 2281 2281 2282 2282 for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++) 2283 2283 drm_property_add_enum( 2284 - psb_intel_sdvo_connector->tv_format, i, 2284 + psb_intel_sdvo_connector->tv_format, 2285 2285 i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); 2286 2286 2287 2287 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+1 -1
drivers/gpu/drm/i915/Kconfig.debug
··· 26 26 select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) 27 27 select DRM_DEBUG_MM if DRM=y 28 28 select STACKDEPOT if DRM=y # for DRM_DEBUG_MM 29 - select DRM_DEBUG_MM_SELFTEST 29 + select DRM_DEBUG_SELFTEST 30 30 select SW_SYNC # signaling validation framework (igt/syncobj*) 31 31 select DRM_I915_SW_FENCE_DEBUG_OBJECTS 32 32 select DRM_I915_SELFTEST
+4 -4
drivers/gpu/drm/i915/i915_drv.c
··· 2822 2822 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2823 2823 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), 2824 2824 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2825 - DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2826 - DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2827 - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2828 - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2825 + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER), 2826 + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER), 2827 + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), 2828 + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), 2829 2829 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2830 2830 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 2831 2831 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+2 -3
drivers/gpu/drm/i915/intel_sdvo.c
··· 2779 2779 return false; 2780 2780 2781 2781 for (i = 0; i < intel_sdvo_connector->format_supported_num; i++) 2782 - drm_property_add_enum( 2783 - intel_sdvo_connector->tv_format, i, 2784 - i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2782 + drm_property_add_enum(intel_sdvo_connector->tv_format, i, 2783 + tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2785 2784 2786 2785 intel_sdvo_connector->base.base.state->tv.mode = intel_sdvo_connector->tv_format_supported[0]; 2787 2786 drm_object_attach_property(&intel_sdvo_connector->base.base.base,
+38 -112
drivers/gpu/drm/i915/intel_sprite.c
··· 936 936 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 937 937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 938 938 struct drm_framebuffer *fb = state->base.fb; 939 - int crtc_x, crtc_y; 940 - unsigned int crtc_w, crtc_h; 941 - uint32_t src_x, src_y, src_w, src_h; 942 - struct drm_rect *src = &state->base.src; 943 - struct drm_rect *dst = &state->base.dst; 944 - struct drm_rect clip = {}; 945 939 int max_stride = INTEL_GEN(dev_priv) >= 9 ? 32768 : 16384; 946 - int hscale, vscale; 947 940 int max_scale, min_scale; 948 941 bool can_scale; 949 942 int ret; 950 943 uint32_t pixel_format = 0; 951 - 952 - *src = drm_plane_state_src(&state->base); 953 - *dst = drm_plane_state_dest(&state->base); 954 944 955 945 if (!fb) { 956 946 state->base.visible = false; ··· 980 990 min_scale = plane->can_scale ? 1 : (1 << 16); 981 991 } 982 992 983 - /* 984 - * FIXME the following code does a bunch of fuzzy adjustments to the 985 - * coordinates and sizes. We probably need some way to decide whether 986 - * more strict checking should be done instead. 987 - */ 988 - drm_rect_rotate(src, fb->width << 16, fb->height << 16, 989 - state->base.rotation); 990 - 991 - hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale); 992 - BUG_ON(hscale < 0); 993 - 994 - vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale); 995 - BUG_ON(vscale < 0); 996 - 997 - if (crtc_state->base.enable) 998 - drm_mode_get_hv_timing(&crtc_state->base.mode, 999 - &clip.x2, &clip.y2); 1000 - 1001 - state->base.visible = drm_rect_clip_scaled(src, dst, &clip, hscale, vscale); 1002 - 1003 - crtc_x = dst->x1; 1004 - crtc_y = dst->y1; 1005 - crtc_w = drm_rect_width(dst); 1006 - crtc_h = drm_rect_height(dst); 993 + ret = drm_atomic_helper_check_plane_state(&state->base, 994 + &crtc_state->base, 995 + min_scale, max_scale, 996 + true, true); 997 + if (ret) 998 + return ret; 1007 999 1008 1000 if (state->base.visible) { 1009 - /* check again in case clipping clamped the results */ 1010 - hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); 1011 - if (hscale < 0) { 1012 - DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); 1013 - drm_rect_debug_print("src: ", src, true); 1014 - drm_rect_debug_print("dst: ", dst, false); 1015 - 1016 - return hscale; 1017 - } 1018 - 1019 - vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); 1020 - if (vscale < 0) { 1021 - DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); 1022 - drm_rect_debug_print("src: ", src, true); 1023 - drm_rect_debug_print("dst: ", dst, false); 1024 - 1025 - return vscale; 1026 - } 1027 - 1028 - /* Make the source viewport size an exact multiple of the scaling factors. */ 1029 - drm_rect_adjust_size(src, 1030 - drm_rect_width(dst) * hscale - drm_rect_width(src), 1031 - drm_rect_height(dst) * vscale - drm_rect_height(src)); 1032 - 1033 - drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, 1034 - state->base.rotation); 1035 - 1036 - /* sanity check to make sure the src viewport wasn't enlarged */ 1037 - WARN_ON(src->x1 < (int) state->base.src_x || 1038 - src->y1 < (int) state->base.src_y || 1039 - src->x2 > (int) state->base.src_x + state->base.src_w || 1040 - src->y2 > (int) state->base.src_y + state->base.src_h); 1001 + struct drm_rect *src = &state->base.src; 1002 + struct drm_rect *dst = &state->base.dst; 1003 + unsigned int crtc_w = drm_rect_width(dst); 1004 + unsigned int crtc_h = drm_rect_height(dst); 1005 + uint32_t src_x, src_y, src_w, src_h; 1041 1006 1042 1007 /* 1043 1008 * Hardware doesn't handle subpixel coordinates. ··· 1005 1060 src_y = src->y1 >> 16; 1006 1061 src_h = drm_rect_height(src) >> 16; 1007 1062 1008 - if (intel_format_is_yuv(fb->format->format)) { 1009 - src_x &= ~1; 1010 - src_w &= ~1; 1011 - 1012 - /* 1013 - * Must keep src and dst the 1014 - * same if we can't scale. 1015 - */ 1016 - if (!can_scale) 1017 - crtc_w &= ~1; 1018 - 1019 - if (crtc_w == 0) 1020 - state->base.visible = false; 1021 - } 1022 - } 1023 - 1024 - /* Check size restrictions when scaling */ 1025 - if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) { 1026 - unsigned int width_bytes; 1027 - int cpp = fb->format->cpp[0]; 1028 - 1029 - WARN_ON(!can_scale); 1030 - 1031 - /* FIXME interlacing min height is 6 */ 1032 - 1033 - if (crtc_w < 3 || crtc_h < 3) 1034 - state->base.visible = false; 1035 - 1036 - if (src_w < 3 || src_h < 3) 1037 - state->base.visible = false; 1038 - 1039 - width_bytes = ((src_x * cpp) & 63) + src_w * cpp; 1040 - 1041 - if (INTEL_GEN(dev_priv) < 9 && (src_w > 2048 || src_h > 2048 || 1042 - width_bytes > 4096 || fb->pitches[0] > 4096)) { 1043 - DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n"); 1044 - return -EINVAL; 1045 - } 1046 - } 1047 - 1048 - if (state->base.visible) { 1049 1063 src->x1 = src_x << 16; 1050 1064 src->x2 = (src_x + src_w) << 16; 1051 1065 src->y1 = src_y << 16; 1052 1066 src->y2 = (src_y + src_h) << 16; 1053 - } 1054 1067 1055 - dst->x1 = crtc_x; 1056 - dst->x2 = crtc_x + crtc_w; 1057 - dst->y1 = crtc_y; 1058 - dst->y2 = crtc_y + crtc_h; 1068 + if (intel_format_is_yuv(fb->format->format) && 1069 + (src_x % 2 || src_w % 2)) { 1070 + DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", 1071 + src_x, src_w); 1072 + return -EINVAL; 1073 + } 1074 + 1075 + /* Check size restrictions when scaling */ 1076 + if (src_w != crtc_w || src_h != crtc_h) { 1077 + unsigned int width_bytes; 1078 + int cpp = fb->format->cpp[0]; 1079 + 1080 + WARN_ON(!can_scale); 1081 + 1082 + width_bytes = ((src_x * cpp) & 63) + src_w * cpp; 1083 + 1084 + /* FIXME interlacing min height is 6 */ 1085 + if (INTEL_GEN(dev_priv) < 9 && ( 1086 + src_w < 3 || src_h < 3 || 1087 + src_w > 2048 || src_h > 2048 || 1088 + crtc_w < 3 || crtc_h < 3 || 1089 + width_bytes > 4096 || fb->pitches[0] > 4096)) { 1090 + DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n"); 1091 + return -EINVAL; 1092 + } 1093 + } 1094 + } 1059 1095 1060 1096 if (INTEL_GEN(dev_priv) >= 9) { 1061 1097 ret = skl_check_plane_surface(crtc_state, state);
-1
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 168 168 if (gpu->funcs->debugfs_init) { 169 169 gpu->funcs->debugfs_init(gpu, dev->primary); 170 170 gpu->funcs->debugfs_init(gpu, dev->render); 171 - gpu->funcs->debugfs_init(gpu, dev->control); 172 171 } 173 172 #endif 174 173
-3
drivers/gpu/drm/msm/msm_debugfs.c
··· 140 140 if (ret) 141 141 return ret; 142 142 ret = late_init_minor(dev->render); 143 - if (ret) 144 - return ret; 145 - ret = late_init_minor(dev->control); 146 143 return ret; 147 144 } 148 145
+1 -3
drivers/gpu/drm/nouveau/nouveau_display.c
··· 338 338 if (c) { \ 339 339 p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \ 340 340 l = (list); \ 341 - c = 0; \ 342 341 while (p && l->gen_mask) { \ 343 342 if (l->gen_mask & (1 << (gen))) { \ 344 - drm_property_add_enum(p, c, l->type, l->name); \ 345 - c++; \ 343 + drm_property_add_enum(p, l->type, l->name); \ 346 344 } \ 347 345 l++; \ 348 346 } \
+1 -13
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
··· 238 238 239 239 static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val) 240 240 { 241 - #if 0 242 - /* The firmware uses LP DSI transactions like this to bring up 243 - * the hardware, which should be faster than using I2C to then 244 - * pass to the Toshiba. However, I was unable to get it to 245 - * work. 246 - */ 247 241 u8 msg[] = { 248 242 reg, 249 243 reg >> 8, ··· 247 253 val >> 24, 248 254 }; 249 255 250 - mipi_dsi_dcs_write_buffer(ts->dsi, msg, sizeof(msg)); 251 - #else 252 - rpi_touchscreen_i2c_write(ts, REG_WR_ADDRH, reg >> 8); 253 - rpi_touchscreen_i2c_write(ts, REG_WR_ADDRL, reg); 254 - rpi_touchscreen_i2c_write(ts, REG_WRITEH, val >> 8); 255 - rpi_touchscreen_i2c_write(ts, REG_WRITEL, val); 256 - #endif 256 + mipi_dsi_generic_write(ts->dsi, msg, sizeof(msg)); 257 257 258 258 return 0; 259 259 }
+1
drivers/gpu/drm/pl111/Makefile
··· 3 3 pl111_versatile.o \ 4 4 pl111_drv.o 5 5 6 + pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o 6 7 pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o 7 8 8 9 obj-$(CONFIG_DRM_PL111) += pl111_drm.o
+1
drivers/gpu/drm/pl111/pl111_drm.h
··· 79 79 const struct pl111_variant_data *variant; 80 80 void (*variant_display_enable) (struct drm_device *drm, u32 format); 81 81 void (*variant_display_disable) (struct drm_device *drm); 82 + bool use_device_memory; 82 83 }; 83 84 84 85 int pl111_display_init(struct drm_device *dev);
+32 -2
drivers/gpu/drm/pl111/pl111_drv.c
··· 60 60 #include <linux/slab.h> 61 61 #include <linux/of.h> 62 62 #include <linux/of_graph.h> 63 + #include <linux/of_reserved_mem.h> 63 64 64 65 #include <drm/drmP.h> 65 66 #include <drm/drm_atomic_helper.h> ··· 208 207 return ret; 209 208 } 210 209 210 + static struct drm_gem_object * 211 + pl111_gem_import_sg_table(struct drm_device *dev, 212 + struct dma_buf_attachment *attach, 213 + struct sg_table *sgt) 214 + { 215 + struct pl111_drm_dev_private *priv = dev->dev_private; 216 + 217 + /* 218 + * When using device-specific reserved memory we can't import 219 + * DMA buffers: those are passed by reference in any global 220 + * memory and we can only handle a specific range of memory. 221 + */ 222 + if (priv->use_device_memory) 223 + return ERR_PTR(-EINVAL); 224 + 225 + return drm_gem_cma_prime_import_sg_table(dev, attach, sgt); 226 + } 227 + 211 228 DEFINE_DRM_GEM_CMA_FOPS(drm_fops); 212 229 213 230 static struct drm_driver pl111_drm_driver = { ··· 246 227 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 247 228 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 248 229 .gem_prime_import = drm_gem_prime_import, 249 - .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 230 + .gem_prime_import_sg_table = pl111_gem_import_sg_table, 250 231 .gem_prime_export = drm_gem_prime_export, 251 232 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 252 233 ··· 276 257 drm->dev_private = priv; 277 258 priv->variant = variant; 278 259 260 + ret = of_reserved_mem_device_init(dev); 261 + if (!ret) { 262 + dev_info(dev, "using device-specific reserved memory\n"); 263 + priv->use_device_memory = true; 264 + } 265 + 279 266 if (of_property_read_u32(dev->of_node, "max-memory-bandwidth", 280 267 &priv->memory_bw)) { 281 268 dev_info(dev, "no max memory bandwidth specified, assume unlimited\n"); ··· 300 275 priv->regs = devm_ioremap_resource(dev, &amba_dev->res); 301 276 if (IS_ERR(priv->regs)) { 302 277 dev_err(dev, "%s failed mmio\n", __func__); 303 - return PTR_ERR(priv->regs); 278 + ret = PTR_ERR(priv->regs); 279 + goto dev_unref; 304 280 } 305 281 306 282 /* This may override some variant settings */ ··· 331 305 332 306 dev_unref: 333 307 drm_dev_unref(drm); 308 + of_reserved_mem_device_release(dev); 309 + 334 310 return ret; 335 311 } 336 312 337 313 static int pl111_amba_remove(struct amba_device *amba_dev) 338 314 { 315 + struct device *dev = &amba_dev->dev; 339 316 struct drm_device *drm = amba_get_drvdata(amba_dev); 340 317 struct pl111_drm_dev_private *priv = drm->dev_private; 341 318 ··· 348 319 drm_panel_bridge_remove(priv->bridge); 349 320 drm_mode_config_cleanup(drm); 350 321 drm_dev_unref(drm); 322 + of_reserved_mem_device_release(dev); 351 323 352 324 return 0; 353 325 }
+55 -1
drivers/gpu/drm/pl111/pl111_versatile.c
··· 1 1 #include <linux/amba/clcd-regs.h> 2 2 #include <linux/device.h> 3 3 #include <linux/of.h> 4 + #include <linux/of_platform.h> 4 5 #include <linux/regmap.h> 5 6 #include <linux/mfd/syscon.h> 6 7 #include <linux/bitops.h> 7 8 #include <linux/module.h> 8 9 #include <drm/drmP.h> 9 10 #include "pl111_versatile.h" 11 + #include "pl111_vexpress.h" 10 12 #include "pl111_drm.h" 11 13 12 14 static struct regmap *versatile_syscon_map; ··· 24 22 REALVIEW_CLCD_PB11MP, 25 23 REALVIEW_CLCD_PBA8, 26 24 REALVIEW_CLCD_PBX, 25 + VEXPRESS_CLCD_V2M, 27 26 }; 28 27 29 28 static const struct of_device_id versatile_clcd_of_match[] = { ··· 55 52 { 56 53 .compatible = "arm,realview-pbx-syscon", 57 54 .data = (void *)REALVIEW_CLCD_PBX, 55 + }, 56 + { 57 + .compatible = "arm,vexpress-muxfpga", 58 + .data = (void *)VEXPRESS_CLCD_V2M, 58 59 }, 59 60 {}, 60 61 }; ··· 293 286 .fb_bpp = 16, 294 287 }; 295 288 289 + /* 290 + * Versatile Express PL111 variant, again we just push the maximum 291 + * BPP to 16 to be able to get 1024x768 without saturating the memory 292 + * bus. The clockdivider also seems broken on the Versatile Express. 293 + */ 294 + static const struct pl111_variant_data pl111_vexpress = { 295 + .name = "PL111 Versatile Express", 296 + .formats = pl111_realview_pixel_formats, 297 + .nformats = ARRAY_SIZE(pl111_realview_pixel_formats), 298 + .fb_bpp = 16, 299 + .broken_clockdivider = true, 300 + }; 301 + 296 302 int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) 297 303 { 298 304 const struct of_device_id *clcd_id; 299 305 enum versatile_clcd versatile_clcd_type; 300 306 struct device_node *np; 301 307 struct regmap *map; 308 + int ret; 302 309 303 310 np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, 304 311 &clcd_id); ··· 322 301 } 323 302 versatile_clcd_type = (enum versatile_clcd)clcd_id->data; 324 303 325 - map = syscon_node_to_regmap(np); 304 + /* Versatile Express special handling */ 305 + if (versatile_clcd_type == VEXPRESS_CLCD_V2M) { 306 + struct platform_device *pdev; 307 + 308 + /* Registers a driver for the muxfpga */ 309 + ret = vexpress_muxfpga_init(); 310 + if (ret) { 311 + dev_err(dev, "unable to initialize muxfpga driver\n"); 312 + return ret; 313 + } 314 + 315 + /* Call into deep Vexpress configuration API */ 316 + pdev = of_find_device_by_node(np); 317 + if (!pdev) { 318 + dev_err(dev, "can't find the sysreg device, deferring\n"); 319 + return -EPROBE_DEFER; 320 + } 321 + map = dev_get_drvdata(&pdev->dev); 322 + if (!map) { 323 + dev_err(dev, "sysreg has not yet probed\n"); 324 + platform_device_put(pdev); 325 + return -EPROBE_DEFER; 326 + } 327 + } else { 328 + map = syscon_node_to_regmap(np); 329 + } 330 + 326 331 if (IS_ERR(map)) { 327 332 dev_err(dev, "no Versatile syscon regmap\n"); 328 333 return PTR_ERR(map); ··· 386 339 priv->variant_display_enable = pl111_realview_clcd_enable; 387 340 priv->variant_display_disable = pl111_realview_clcd_disable; 388 341 dev_info(dev, "set up callbacks for RealView PL111\n"); 342 + break; 343 + case VEXPRESS_CLCD_V2M: 344 + priv->variant = &pl111_vexpress; 345 + dev_info(dev, "initializing Versatile Express PL111\n"); 346 + ret = pl111_vexpress_clcd_init(dev, priv, map); 347 + if (ret) 348 + return ret; 389 349 break; 390 350 default: 391 351 dev_info(dev, "unknown Versatile system controller\n");
+134
drivers/gpu/drm/pl111/pl111_vexpress.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Versatile Express PL111 handling 4 + * Copyright (C) 2018 Linus Walleij 5 + * 6 + * This module binds to the "arm,vexpress-muxfpga" device on the 7 + * Versatile Express configuration bus and sets up which CLCD instance 8 + * gets muxed out on the DVI bridge. 9 + */ 10 + #include <linux/device.h> 11 + #include <linux/module.h> 12 + #include <linux/regmap.h> 13 + #include <linux/vexpress.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/of.h> 16 + #include <linux/of_address.h> 17 + #include <linux/of_platform.h> 18 + #include "pl111_drm.h" 19 + #include "pl111_vexpress.h" 20 + 21 + #define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00 22 + #define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01 23 + #define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02 24 + 25 + int pl111_vexpress_clcd_init(struct device *dev, 26 + struct pl111_drm_dev_private *priv, 27 + struct regmap *map) 28 + { 29 + struct device_node *root; 30 + struct device_node *child; 31 + struct device_node *ct_clcd = NULL; 32 + bool has_coretile_clcd = false; 33 + bool has_coretile_hdlcd = false; 34 + bool mux_motherboard = true; 35 + u32 val; 36 + int ret; 37 + 38 + /* 39 + * Check if we have a CLCD or HDLCD on the core tile by checking if a 40 + * CLCD or HDLCD is available in the root of the device tree. 41 + */ 42 + root = of_find_node_by_path("/"); 43 + if (!root) 44 + return -EINVAL; 45 + 46 + for_each_available_child_of_node(root, child) { 47 + if (of_device_is_compatible(child, "arm,pl111")) { 48 + has_coretile_clcd = true; 49 + ct_clcd = child; 50 + break; 51 + } 52 + if (of_device_is_compatible(child, "arm,hdlcd")) { 53 + has_coretile_hdlcd = true; 54 + break; 55 + } 56 + } 57 + 58 + /* 59 + * If there is a coretile HDLCD and it has a driver, 60 + * do not mux the CLCD on the motherboard to the DVI. 61 + */ 62 + if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD)) 63 + mux_motherboard = false; 64 + 65 + /* 66 + * On the Vexpress CA9 we let the CLCD on the coretile 67 + * take precedence, so also in this case do not mux the 68 + * motherboard to the DVI. 69 + */ 70 + if (has_coretile_clcd) 71 + mux_motherboard = false; 72 + 73 + if (mux_motherboard) { 74 + dev_info(dev, "DVI muxed to motherboard CLCD\n"); 75 + val = VEXPRESS_FPGAMUX_MOTHERBOARD; 76 + } else if (ct_clcd == dev->of_node) { 77 + dev_info(dev, 78 + "DVI muxed to daughterboard 1 (core tile) CLCD\n"); 79 + val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1; 80 + } else { 81 + dev_info(dev, "core tile graphics present\n"); 82 + dev_info(dev, "this device will be deactivated\n"); 83 + return -ENODEV; 84 + } 85 + 86 + ret = regmap_write(map, 0, val); 87 + if (ret) { 88 + dev_err(dev, "error setting DVI muxmode\n"); 89 + return -ENODEV; 90 + } 91 + 92 + return 0; 93 + } 94 + 95 + /* 96 + * This sets up the regmap pointer that will then be retrieved by 97 + * the detection code in pl111_versatile.c and passed in to the 98 + * pl111_vexpress_clcd_init() function above. 99 + */ 100 + static int vexpress_muxfpga_probe(struct platform_device *pdev) 101 + { 102 + struct device *dev = &pdev->dev; 103 + struct regmap *map; 104 + 105 + map = devm_regmap_init_vexpress_config(&pdev->dev); 106 + if (IS_ERR(map)) 107 + return PTR_ERR(map); 108 + dev_set_drvdata(dev, map); 109 + 110 + return 0; 111 + } 112 + 113 + static const struct of_device_id vexpress_muxfpga_match[] = { 114 + { .compatible = "arm,vexpress-muxfpga", } 115 + }; 116 + 117 + static struct platform_driver vexpress_muxfpga_driver = { 118 + .driver = { 119 + .name = "vexpress-muxfpga", 120 + .of_match_table = of_match_ptr(vexpress_muxfpga_match), 121 + }, 122 + .probe = vexpress_muxfpga_probe, 123 + }; 124 + 125 + int vexpress_muxfpga_init(void) 126 + { 127 + int ret; 128 + 129 + ret = platform_driver_register(&vexpress_muxfpga_driver); 130 + /* -EBUSY just means this driver is already registered */ 131 + if (ret == -EBUSY) 132 + ret = 0; 133 + return ret; 134 + }
+29
drivers/gpu/drm/pl111/pl111_vexpress.h
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + struct device; 4 + struct pl111_drm_dev_private; 5 + struct regmap; 6 + 7 + #ifdef CONFIG_ARCH_VEXPRESS 8 + 9 + int pl111_vexpress_clcd_init(struct device *dev, 10 + struct pl111_drm_dev_private *priv, 11 + struct regmap *map); 12 + 13 + int vexpress_muxfpga_init(void); 14 + 15 + #else 16 + 17 + static inline int pl111_vexpress_clcd_init(struct device *dev, 18 + struct pl111_drm_dev_private *priv, 19 + struct regmap *map) 20 + { 21 + return -ENODEV; 22 + } 23 + 24 + static inline int vexpress_muxfpga_init(void) 25 + { 26 + return 0; 27 + } 28 + 29 + #endif
+4 -32
drivers/gpu/drm/qxl/qxl_cmd.c
··· 339 339 surface_height = surf->surf.height; 340 340 341 341 if (area->left < 0 || area->top < 0 || 342 - area->right > surface_width || area->bottom > surface_height) { 343 - qxl_io_log(qdev, "%s: not doing area update for " 344 - "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left, 345 - area->top, area->right, area->bottom, surface_width, surface_height); 342 + area->right > surface_width || area->bottom > surface_height) 346 343 return -EINVAL; 347 - } 344 + 348 345 mutex_lock(&qdev->update_area_mutex); 349 346 qdev->ram_header->update_area = *area; 350 347 qdev->ram_header->update_surface = surface_id; ··· 369 372 void qxl_io_destroy_primary(struct qxl_device *qdev) 370 373 { 371 374 wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); 375 + qdev->primary_created = false; 372 376 } 373 377 374 378 void qxl_io_create_primary(struct qxl_device *qdev, ··· 395 397 create->type = QXL_SURF_TYPE_PRIMARY; 396 398 397 399 wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); 400 + qdev->primary_created = true; 398 401 } 399 402 400 403 void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) 401 404 { 402 405 DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id); 403 406 wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); 404 - } 405 - 406 - void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...) 407 - { 408 - va_list args; 409 - 410 - va_start(args, fmt); 411 - vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args); 412 - va_end(args); 413 - /* 414 - * DO not do a DRM output here - this will call printk, which will 415 - * call back into qxl for rendering (qxl_fb) 416 - */ 417 - outb(0, qdev->io_base + QXL_IO_LOG); 418 407 } 419 408 420 409 void qxl_io_reset(struct qxl_device *qdev) ··· 411 426 412 427 void qxl_io_monitors_config(struct qxl_device *qdev) 413 428 { 414 - qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__, 415 - qdev->monitors_config ? 416 - qdev->monitors_config->count : -1, 417 - qdev->monitors_config && qdev->monitors_config->count ? 418 - qdev->monitors_config->heads[0].width : -1, 419 - qdev->monitors_config && qdev->monitors_config->count ? 420 - qdev->monitors_config->heads[0].height : -1, 421 - qdev->monitors_config && qdev->monitors_config->count ? 422 - qdev->monitors_config->heads[0].x : -1, 423 - qdev->monitors_config && qdev->monitors_config->count ? 424 - qdev->monitors_config->heads[0].y : -1 425 - ); 426 - 427 429 wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); 428 430 } 429 431
+89 -187
drivers/gpu/drm/qxl/qxl_display.c
··· 48 48 qdev->client_monitors_config = kzalloc( 49 49 sizeof(struct qxl_monitors_config) + 50 50 sizeof(struct qxl_head) * count, GFP_KERNEL); 51 - if (!qdev->client_monitors_config) { 52 - qxl_io_log(qdev, 53 - "%s: allocation failure for %u heads\n", 54 - __func__, count); 51 + if (!qdev->client_monitors_config) 55 52 return; 56 - } 57 53 } 58 54 qdev->client_monitors_config->count = count; 59 55 } ··· 70 74 num_monitors = qdev->rom->client_monitors_config.count; 71 75 crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, 72 76 sizeof(qdev->rom->client_monitors_config)); 73 - if (crc != qdev->rom->client_monitors_config_crc) { 74 - qxl_io_log(qdev, "crc mismatch: have %X (%zd) != %X\n", crc, 75 - sizeof(qdev->rom->client_monitors_config), 76 - qdev->rom->client_monitors_config_crc); 77 + if (crc != qdev->rom->client_monitors_config_crc) 77 78 return MONITORS_CONFIG_BAD_CRC; 78 - } 79 79 if (!num_monitors) { 80 80 DRM_DEBUG_KMS("no client monitors configured\n"); 81 81 return status; ··· 162 170 udelay(5); 163 171 } 164 172 if (status == MONITORS_CONFIG_BAD_CRC) { 165 - qxl_io_log(qdev, "config: bad crc\n"); 166 173 DRM_DEBUG_KMS("ignoring client monitors config: bad crc"); 167 174 return; 168 175 } 169 176 if (status == MONITORS_CONFIG_UNCHANGED) { 170 - qxl_io_log(qdev, "config: unchanged\n"); 171 177 DRM_DEBUG_KMS("ignoring client monitors config: unchanged"); 172 178 return; 173 179 } ··· 258 268 return i - 1; 259 269 } 260 270 271 + static void qxl_send_monitors_config(struct qxl_device *qdev) 272 + { 273 + int i; 274 + 275 + BUG_ON(!qdev->ram_header->monitors_config); 276 + 277 + if (qdev->monitors_config->count == 0) 278 + return; 279 + 280 + for (i = 0 ; i < qdev->monitors_config->count ; ++i) { 281 + struct qxl_head *head = &qdev->monitors_config->heads[i]; 282 + 283 + if (head->y > 8192 || head->x > 8192 || 284 + head->width > 8192 || head->height > 8192) { 285 + DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", 286 + i, head->width, head->height, 287 + head->x, head->y); 288 + return; 289 + } 290 + } 291 + qxl_io_monitors_config(qdev); 292 + } 293 + 294 + static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, 295 + const char *reason) 296 + { 297 + struct drm_device *dev = crtc->dev; 298 + struct qxl_device *qdev = dev->dev_private; 299 + struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 300 + struct qxl_head head; 301 + int oldcount, i = qcrtc->index; 302 + 303 + if (!qdev->primary_created) { 304 + DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason); 305 + return; 306 + } 307 + 308 + if (!qdev->monitors_config || 309 + qdev->monitors_config->max_allowed <= i) 310 + return; 311 + 312 + head.id = i; 313 + head.flags = 0; 314 + oldcount = qdev->monitors_config->count; 315 + if (crtc->state->active) { 316 + struct drm_display_mode *mode = &crtc->mode; 317 + head.width = mode->hdisplay; 318 + head.height = mode->vdisplay; 319 + head.x = crtc->x; 320 + head.y = crtc->y; 321 + if (qdev->monitors_config->count < i + 1) 322 + qdev->monitors_config->count = i + 1; 323 + } else if (i > 0) { 324 + head.width = 0; 325 + head.height = 0; 326 + head.x = 0; 327 + head.y = 0; 328 + if (qdev->monitors_config->count == i + 1) 329 + qdev->monitors_config->count = i; 330 + } else { 331 + DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason); 332 + return; 333 + } 334 + 335 + if (head.width == qdev->monitors_config->heads[i].width && 336 + head.height == qdev->monitors_config->heads[i].height && 337 + head.x == qdev->monitors_config->heads[i].x && 338 + head.y == qdev->monitors_config->heads[i].y && 339 + oldcount == qdev->monitors_config->count) 340 + return; 341 + 342 + DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n", 343 + i, head.width, head.height, head.x, head.y, 344 + crtc->state->active ? "on" : "off", reason); 345 + if (oldcount != qdev->monitors_config->count) 346 + DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n", 347 + oldcount, qdev->monitors_config->count, 348 + qdev->monitors_config->max_allowed); 349 + 350 + qdev->monitors_config->heads[i] = head; 351 + qxl_send_monitors_config(qdev); 352 + } 353 + 261 354 static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, 262 355 struct drm_crtc_state *old_crtc_state) 263 356 { ··· 356 283 drm_crtc_send_vblank_event(crtc, event); 357 284 spin_unlock_irqrestore(&dev->event_lock, flags); 358 285 } 286 + 287 + qxl_crtc_update_monitors_config(crtc, "flush"); 359 288 } 360 289 361 290 static void qxl_crtc_destroy(struct drm_crtc *crtc) ··· 456 381 return 0; 457 382 } 458 383 459 - static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, 460 - const struct drm_display_mode *mode, 461 - struct drm_display_mode *adjusted_mode) 462 - { 463 - struct drm_device *dev = crtc->dev; 464 - struct qxl_device *qdev = dev->dev_private; 465 - 466 - qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n", 467 - __func__, 468 - mode->hdisplay, mode->vdisplay, 469 - adjusted_mode->hdisplay, 470 - adjusted_mode->vdisplay); 471 - return true; 472 - } 473 - 474 - static void 475 - qxl_send_monitors_config(struct qxl_device *qdev) 476 - { 477 - int i; 478 - 479 - BUG_ON(!qdev->ram_header->monitors_config); 480 - 481 - if (qdev->monitors_config->count == 0) { 482 - qxl_io_log(qdev, "%s: 0 monitors??\n", __func__); 483 - return; 484 - } 485 - for (i = 0 ; i < qdev->monitors_config->count ; ++i) { 486 - struct qxl_head *head = &qdev->monitors_config->heads[i]; 487 - 488 - if (head->y > 8192 || head->x > 8192 || 489 - head->width > 8192 || head->height > 8192) { 490 - DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", 491 - i, head->width, head->height, 492 - head->x, head->y); 493 - return; 494 - } 495 - } 496 - qxl_io_monitors_config(qdev); 497 - } 498 - 499 - static void qxl_monitors_config_set(struct qxl_device *qdev, 500 - int index, 501 - unsigned x, unsigned y, 502 - unsigned width, unsigned height, 503 - unsigned surf_id) 504 - { 505 - DRM_DEBUG_KMS("%d:%dx%d+%d+%d\n", index, width, height, x, y); 506 - qdev->monitors_config->heads[index].x = x; 507 - qdev->monitors_config->heads[index].y = y; 508 - qdev->monitors_config->heads[index].width = width; 509 - qdev->monitors_config->heads[index].height = height; 510 - qdev->monitors_config->heads[index].surface_id = surf_id; 511 - 512 - } 513 - 514 - static void qxl_mode_set_nofb(struct drm_crtc *crtc) 515 - { 516 - struct qxl_device *qdev = crtc->dev->dev_private; 517 - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 518 - struct drm_display_mode *mode = &crtc->mode; 519 - 520 - DRM_DEBUG("Mode set (%d,%d)\n", 521 - mode->hdisplay, mode->vdisplay); 522 - 523 - qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 524 - mode->hdisplay, mode->vdisplay, 0); 525 - 526 - } 527 - 528 384 static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, 529 385 struct drm_crtc_state *old_state) 530 386 { 531 - DRM_DEBUG("\n"); 387 + qxl_crtc_update_monitors_config(crtc, "enable"); 532 388 } 533 389 534 390 static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, 535 391 struct drm_crtc_state *old_state) 536 392 { 537 - struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 538 - struct qxl_device *qdev = crtc->dev->dev_private; 539 - 540 - qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0); 541 - 542 - qxl_send_monitors_config(qdev); 393 + qxl_crtc_update_monitors_config(crtc, "disable"); 543 394 } 544 395 545 396 static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { 546 - .mode_fixup = qxl_crtc_mode_fixup, 547 - .mode_set_nofb = qxl_mode_set_nofb, 548 397 .atomic_flush = qxl_crtc_atomic_flush, 549 398 .atomic_enable = qxl_crtc_atomic_enable, 550 399 .atomic_disable = qxl_crtc_atomic_disable, ··· 610 611 bo->is_primary = false; 611 612 } 612 613 } 613 - } 614 - 615 - static int qxl_plane_atomic_check(struct drm_plane *plane, 616 - struct drm_plane_state *state) 617 - { 618 - return 0; 619 614 } 620 615 621 616 static void qxl_cursor_atomic_update(struct drm_plane *plane, ··· 817 824 }; 818 825 819 826 static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = { 820 - .atomic_check = qxl_plane_atomic_check, 821 827 .atomic_update = qxl_cursor_atomic_update, 822 828 .atomic_disable = qxl_cursor_atomic_disable, 823 829 .prepare_fb = qxl_plane_prepare_fb, ··· 941 949 return r; 942 950 } 943 951 944 - static void qxl_enc_dpms(struct drm_encoder *encoder, int mode) 945 - { 946 - DRM_DEBUG("\n"); 947 - } 948 - 949 - static void qxl_enc_prepare(struct drm_encoder *encoder) 950 - { 951 - DRM_DEBUG("\n"); 952 - } 953 - 954 - static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev, 955 - struct drm_encoder *encoder) 956 - { 957 - int i; 958 - struct qxl_output *output = drm_encoder_to_qxl_output(encoder); 959 - struct qxl_head *head; 960 - struct drm_display_mode *mode; 961 - 962 - BUG_ON(!encoder); 963 - /* TODO: ugly, do better */ 964 - i = output->index; 965 - if (!qdev->monitors_config || 966 - qdev->monitors_config->max_allowed <= i) { 967 - DRM_ERROR( 968 - "head number too large or missing monitors config: %p, %d", 969 - qdev->monitors_config, 970 - qdev->monitors_config ? 971 - qdev->monitors_config->max_allowed : -1); 972 - return; 973 - } 974 - if (!encoder->crtc) { 975 - DRM_ERROR("missing crtc on encoder %p\n", encoder); 976 - return; 977 - } 978 - if (i != 0) 979 - DRM_DEBUG("missing for multiple monitors: no head holes\n"); 980 - head = &qdev->monitors_config->heads[i]; 981 - head->id = i; 982 - if (encoder->crtc->enabled) { 983 - mode = &encoder->crtc->mode; 984 - head->width = mode->hdisplay; 985 - head->height = mode->vdisplay; 986 - head->x = encoder->crtc->x; 987 - head->y = encoder->crtc->y; 988 - if (qdev->monitors_config->count < i + 1) 989 - qdev->monitors_config->count = i + 1; 990 - } else { 991 - head->width = 0; 992 - head->height = 0; 993 - head->x = 0; 994 - head->y = 0; 995 - } 996 - DRM_DEBUG_KMS("setting head %d to +%d+%d %dx%d out of %d\n", 997 - i, head->x, head->y, head->width, head->height, qdev->monitors_config->count); 998 - head->flags = 0; 999 - /* TODO - somewhere else to call this for multiple monitors 1000 - * (config_commit?) */ 1001 - qxl_send_monitors_config(qdev); 1002 - } 1003 - 1004 - static void qxl_enc_commit(struct drm_encoder *encoder) 1005 - { 1006 - struct qxl_device *qdev = encoder->dev->dev_private; 1007 - 1008 - qxl_write_monitors_config_for_encoder(qdev, encoder); 1009 - DRM_DEBUG("\n"); 1010 - } 1011 - 1012 - static void qxl_enc_mode_set(struct drm_encoder *encoder, 1013 - struct drm_display_mode *mode, 1014 - struct drm_display_mode *adjusted_mode) 1015 - { 1016 - DRM_DEBUG("\n"); 1017 - } 1018 - 1019 952 static int qxl_conn_get_modes(struct drm_connector *connector) 1020 953 { 1021 954 unsigned pwidth = 1024; ··· 986 1069 987 1070 988 1071 static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = { 989 - .dpms = qxl_enc_dpms, 990 - .prepare = qxl_enc_prepare, 991 - .mode_set = qxl_enc_mode_set, 992 - .commit = qxl_enc_commit, 993 1072 }; 994 1073 995 1074 static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = { ··· 1013 1100 qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); 1014 1101 1015 1102 DRM_DEBUG("#%d connected: %d\n", output->index, connected); 1016 - if (!connected) 1017 - qxl_monitors_config_set(qdev, output->index, 0, 0, 0, 0, 0); 1018 1103 1019 1104 return connected ? connector_status_connected 1020 1105 : connector_status_disconnected; 1021 - } 1022 - 1023 - static int qxl_conn_set_property(struct drm_connector *connector, 1024 - struct drm_property *property, 1025 - uint64_t value) 1026 - { 1027 - DRM_DEBUG("\n"); 1028 - return 0; 1029 1106 } 1030 1107 1031 1108 static void qxl_conn_destroy(struct drm_connector *connector) ··· 1032 1129 .dpms = drm_helper_connector_dpms, 1033 1130 .detect = qxl_conn_detect, 1034 1131 .fill_modes = drm_helper_probe_single_connector_modes, 1035 - .set_property = qxl_conn_set_property, 1036 1132 .destroy = qxl_conn_destroy, 1037 1133 .reset = drm_atomic_helper_connector_reset, 1038 1134 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
-3
drivers/gpu/drm/qxl/qxl_drv.h
··· 299 299 int monitors_config_height; 300 300 }; 301 301 302 - /* forward declaration for QXL_INFO_IO */ 303 - __printf(2,3) void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); 304 - 305 302 extern const struct drm_ioctl_desc qxl_ioctls[]; 306 303 extern int qxl_max_ioctl; 307 304
-2
drivers/gpu/drm/qxl/qxl_fb.c
··· 185 185 /* 186 186 * we are using a shadow draw buffer, at qdev->surface0_shadow 187 187 */ 188 - qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]\n", clips->x1, clips->x2, 189 - clips->y1, clips->y2); 190 188 image->dx = clips->x1; 191 189 image->dy = clips->y1; 192 190 image->width = clips->x2 - clips->x1;
+1 -2
drivers/gpu/drm/qxl/qxl_irq.c
··· 57 57 * to avoid endless loops). 58 58 */ 59 59 qdev->irq_received_error++; 60 - qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__); 60 + DRM_WARN("driver is in bug mode\n"); 61 61 } 62 62 if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { 63 - qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n"); 64 63 schedule_work(&qdev->client_monitors_config_work); 65 64 } 66 65 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
+4 -4
drivers/gpu/drm/qxl/qxl_ttm.c
··· 105 105 static struct vm_operations_struct qxl_ttm_vm_ops; 106 106 static const struct vm_operations_struct *ttm_vm_ops; 107 107 108 - static int qxl_ttm_fault(struct vm_fault *vmf) 108 + static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf) 109 109 { 110 110 struct ttm_buffer_object *bo; 111 - int r; 111 + vm_fault_t ret; 112 112 113 113 bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; 114 114 if (bo == NULL) 115 115 return VM_FAULT_NOPAGE; 116 - r = ttm_vm_ops->fault(vmf); 117 - return r; 116 + ret = ttm_vm_ops->fault(vmf); 117 + return ret; 118 118 } 119 119 120 120 int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
+12 -1
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 76 76 #define VOP_WIN_GET_YRGBADDR(vop, win) \ 77 77 vop_readl(vop, win->base + win->phy->yrgb_mst.offset) 78 78 79 + #define VOP_WIN_TO_INDEX(vop_win) \ 80 + ((vop_win) - (vop_win)->vop->win) 81 + 79 82 #define to_vop(x) container_of(x, struct vop, crtc) 80 83 #define to_vop_win(x) container_of(x, struct vop_win, base) 81 84 ··· 711 708 dma_addr_t dma_addr; 712 709 uint32_t val; 713 710 bool rb_swap; 711 + int win_index = VOP_WIN_TO_INDEX(vop_win); 714 712 int format; 715 713 716 714 /* ··· 781 777 rb_swap = has_rb_swapped(fb->format->format); 782 778 VOP_WIN_SET(vop, win, rb_swap, rb_swap); 783 779 784 - if (fb->format->has_alpha) { 780 + /* 781 + * Blending win0 with the background color doesn't seem to work 782 + * correctly. We only get the background color, no matter the contents 783 + * of the win0 framebuffer. However, blending pre-multiplied color 784 + * with the default opaque black default background color is a no-op, 785 + * so we can just disable blending to get the correct result. 786 + */ 787 + if (fb->format->has_alpha && win_index > 0) { 785 788 VOP_WIN_SET(vop, win, dst_alpha_ctl, 786 789 DST_FACTOR_M0(ALPHA_SRC_INVERSE)); 787 790 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
+1 -1
drivers/gpu/drm/selftests/Makefile
··· 1 - obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o 1 + obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o
+9
drivers/gpu/drm/selftests/drm_helper_selftests.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* List each unit test as selftest(name, function) 3 + * 4 + * The name is used as both an enum and expanded as igt__name to create 5 + * a module parameter. It must be unique and legal for a C identifier. 6 + * 7 + * Tests are executed in order by igt/drm_selftests_helper 8 + */ 9 + selftest(check_plane_state, igt_check_plane_state)
+247
drivers/gpu/drm/selftests/test-drm-helper.c
··· 1 + /* 2 + * Test cases for the drm_kms_helper functions 3 + */ 4 + 5 + #define pr_fmt(fmt) "drm_kms_helper: " fmt 6 + 7 + #include <linux/module.h> 8 + 9 + #include <drm/drm_atomic_helper.h> 10 + #include <drm/drm_plane_helper.h> 11 + #include <drm/drm_modes.h> 12 + 13 + #define TESTS "drm_helper_selftests.h" 14 + #include "drm_selftest.h" 15 + 16 + #define FAIL(test, msg, ...) \ 17 + do { \ 18 + if (test) { \ 19 + pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ 20 + return -EINVAL; \ 21 + } \ 22 + } while (0) 23 + 24 + #define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n") 25 + 26 + static void set_src(struct drm_plane_state *plane_state, 27 + unsigned src_x, unsigned src_y, 28 + unsigned src_w, unsigned src_h) 29 + { 30 + plane_state->src_x = src_x; 31 + plane_state->src_y = src_y; 32 + plane_state->src_w = src_w; 33 + plane_state->src_h = src_h; 34 + } 35 + 36 + static bool check_src_eq(struct drm_plane_state *plane_state, 37 + unsigned src_x, unsigned src_y, 38 + unsigned src_w, unsigned src_h) 39 + { 40 + if (plane_state->src.x1 < 0) { 41 + pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1); 42 + drm_rect_debug_print("src: ", &plane_state->src, true); 43 + return false; 44 + } 45 + if (plane_state->src.y1 < 0) { 46 + pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1); 47 + drm_rect_debug_print("src: ", &plane_state->src, true); 48 + return false; 49 + } 50 + 51 + if (plane_state->src.x1 != src_x || 52 + plane_state->src.y1 != src_y || 53 + drm_rect_width(&plane_state->src) != src_w || 54 + drm_rect_height(&plane_state->src) != src_h) { 55 + drm_rect_debug_print("src: ", &plane_state->src, true); 56 + return false; 57 + } 58 + 59 + return true; 60 + } 61 + 62 + static void set_crtc(struct drm_plane_state *plane_state, 63 + int crtc_x, int crtc_y, 64 + unsigned crtc_w, unsigned crtc_h) 65 + { 66 + plane_state->crtc_x = crtc_x; 67 + plane_state->crtc_y = crtc_y; 68 + plane_state->crtc_w = crtc_w; 69 + plane_state->crtc_h = crtc_h; 70 + } 71 + 72 + static bool check_crtc_eq(struct drm_plane_state *plane_state, 73 + int crtc_x, int crtc_y, 74 + unsigned crtc_w, unsigned crtc_h) 75 + { 76 + if (plane_state->dst.x1 != crtc_x || 77 + plane_state->dst.y1 != crtc_y || 78 + drm_rect_width(&plane_state->dst) != crtc_w || 79 + drm_rect_height(&plane_state->dst) != crtc_h) { 80 + drm_rect_debug_print("dst: ", &plane_state->dst, false); 81 + 82 + return false; 83 + } 84 + 85 + return true; 86 + } 87 + 88 + static int igt_check_plane_state(void *ignored) 89 + { 90 + int ret; 91 + 92 + const struct drm_crtc_state crtc_state = { 93 + .crtc = ZERO_SIZE_PTR, 94 + .enable = true, 95 + .active = true, 96 + .mode = { 97 + DRM_MODE("1024x768", 0, 65000, 1024, 1048, 98 + 1184, 1344, 0, 768, 771, 777, 806, 0, 99 + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) 100 + }, 101 + }; 102 + struct drm_framebuffer fb = { 103 + .width = 2048, 104 + .height = 2048 105 + }; 106 + struct drm_plane_state plane_state = { 107 + .crtc = ZERO_SIZE_PTR, 108 + .fb = &fb, 109 + .rotation = DRM_MODE_ROTATE_0 110 + }; 111 + 112 + /* Simple clipping, no scaling. */ 113 + set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16); 114 + set_crtc(&plane_state, 0, 0, fb.width, fb.height); 115 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 116 + DRM_PLANE_HELPER_NO_SCALING, 117 + DRM_PLANE_HELPER_NO_SCALING, 118 + false, false); 119 + FAIL(ret < 0, "Simple clipping check should pass\n"); 120 + FAIL_ON(!plane_state.visible); 121 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16)); 122 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); 123 + 124 + /* Rotated clipping + reflection, no scaling. */ 125 + plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X; 126 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 127 + DRM_PLANE_HELPER_NO_SCALING, 128 + DRM_PLANE_HELPER_NO_SCALING, 129 + false, false); 130 + FAIL(ret < 0, "Rotated clipping check should pass\n"); 131 + FAIL_ON(!plane_state.visible); 132 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16)); 133 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); 134 + plane_state.rotation = DRM_MODE_ROTATE_0; 135 + 136 + /* Check whether positioning works correctly. */ 137 + set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16); 138 + set_crtc(&plane_state, 0, 0, 1023, 767); 139 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 140 + DRM_PLANE_HELPER_NO_SCALING, 141 + DRM_PLANE_HELPER_NO_SCALING, 142 + false, false); 143 + FAIL(!ret, "Should not be able to position on the crtc with can_position=false\n"); 144 + 145 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 146 + DRM_PLANE_HELPER_NO_SCALING, 147 + DRM_PLANE_HELPER_NO_SCALING, 148 + true, false); 149 + FAIL(ret < 0, "Simple positioning should work\n"); 150 + FAIL_ON(!plane_state.visible); 151 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16)); 152 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1023, 767)); 153 + 154 + /* Simple scaling tests. */ 155 + set_src(&plane_state, 0, 0, 512 << 16, 384 << 16); 156 + set_crtc(&plane_state, 0, 0, 1024, 768); 157 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 158 + 0x8001, 159 + DRM_PLANE_HELPER_NO_SCALING, 160 + false, false); 161 + FAIL(!ret, "Upscaling out of range should fail.\n"); 162 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 163 + 0x8000, 164 + DRM_PLANE_HELPER_NO_SCALING, 165 + false, false); 166 + FAIL(ret < 0, "Upscaling exactly 2x should work\n"); 167 + FAIL_ON(!plane_state.visible); 168 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16)); 169 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); 170 + 171 + set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16); 172 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 173 + DRM_PLANE_HELPER_NO_SCALING, 174 + 0x1ffff, false, false); 175 + FAIL(!ret, "Downscaling out of range should fail.\n"); 176 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 177 + DRM_PLANE_HELPER_NO_SCALING, 178 + 0x20000, false, false); 179 + FAIL(ret < 0, "Should succeed with exact scaling limit\n"); 180 + FAIL_ON(!plane_state.visible); 181 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16)); 182 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); 183 + 184 + /* Testing rounding errors. */ 185 + set_src(&plane_state, 0, 0, 0x40001, 0x40001); 186 + set_crtc(&plane_state, 1022, 766, 4, 4); 187 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 188 + DRM_PLANE_HELPER_NO_SCALING, 189 + 0x10001, 190 + true, false); 191 + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); 192 + FAIL_ON(!plane_state.visible); 193 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16)); 194 + FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2)); 195 + 196 + set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001); 197 + set_crtc(&plane_state, -2, -2, 1028, 772); 198 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 199 + DRM_PLANE_HELPER_NO_SCALING, 200 + 0x10001, 201 + false, false); 202 + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); 203 + FAIL_ON(!plane_state.visible); 204 + FAIL_ON(!check_src_eq(&plane_state, 0x40002, 0x40002, 1024 << 16, 768 << 16)); 205 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); 206 + 207 + set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff); 208 + set_crtc(&plane_state, 1022, 766, 4, 4); 209 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 210 + 0xffff, 211 + DRM_PLANE_HELPER_NO_SCALING, 212 + true, false); 213 + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); 214 + FAIL_ON(!plane_state.visible); 215 + /* Should not be rounded to 0x20001, which would be upscaling. */ 216 + FAIL_ON(!check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16)); 217 + FAIL_ON(!check_crtc_eq(&plane_state, 1022, 766, 2, 2)); 218 + 219 + set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff); 220 + set_crtc(&plane_state, -2, -2, 1028, 772); 221 + ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, 222 + 0xffff, 223 + DRM_PLANE_HELPER_NO_SCALING, 224 + false, false); 225 + FAIL(ret < 0, "Should succeed by clipping to exact multiple"); 226 + FAIL_ON(!plane_state.visible); 227 + FAIL_ON(!check_src_eq(&plane_state, 0x3fffe, 0x3fffe, 1024 << 16, 768 << 16)); 228 + FAIL_ON(!check_crtc_eq(&plane_state, 0, 0, 1024, 768)); 229 + 230 + return 0; 231 + } 232 + 233 + #include "drm_selftest.c" 234 + 235 + static int __init test_drm_helper_init(void) 236 + { 237 + int err; 238 + 239 + err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL); 240 + 241 + return err > 0 ? 0 : err; 242 + } 243 + 244 + module_init(test_drm_helper_init); 245 + 246 + MODULE_AUTHOR("Intel Corporation"); 247 + MODULE_LICENSE("GPL");
+1 -1
drivers/gpu/drm/sti/sti_crtc.c
··· 357 357 res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor, 358 358 &sti_crtc_funcs, NULL); 359 359 if (res) { 360 - DRM_ERROR("Can't initialze CRTC\n"); 360 + DRM_ERROR("Can't initialize CRTC\n"); 361 361 return -EINVAL; 362 362 } 363 363
+45 -8
drivers/gpu/drm/stm/ltdc.c
··· 445 445 reg_set(ldev->regs, LTDC_SRCR, SRCR_IMR); 446 446 } 447 447 448 + #define CLK_TOLERANCE_HZ 50 449 + 450 + static enum drm_mode_status 451 + ltdc_crtc_mode_valid(struct drm_crtc *crtc, 452 + const struct drm_display_mode *mode) 453 + { 454 + struct ltdc_device *ldev = crtc_to_ltdc(crtc); 455 + int target = mode->clock * 1000; 456 + int target_min = target - CLK_TOLERANCE_HZ; 457 + int target_max = target + CLK_TOLERANCE_HZ; 458 + int result; 459 + 460 + /* 461 + * Accept all "preferred" modes: 462 + * - this is important for panels because panel clock tolerances are 463 + * bigger than hdmi ones and there is no reason to not accept them 464 + * (the fps may vary a little but it is not a problem). 465 + * - the hdmi preferred mode will be accepted too, but userland will 466 + * be able to use others hdmi "valid" modes if necessary. 467 + */ 468 + if (mode->type & DRM_MODE_TYPE_PREFERRED) 469 + return MODE_OK; 470 + 471 + result = clk_round_rate(ldev->pixel_clk, target); 472 + 473 + DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result); 474 + 475 + /* 476 + * Filter modes according to the clock value, particularly useful for 477 + * hdmi modes that require precise pixel clocks. 478 + */ 479 + if (result < target_min || result > target_max) 480 + return MODE_CLOCK_RANGE; 481 + 482 + return MODE_OK; 483 + } 484 + 448 485 static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc, 449 486 const struct drm_display_mode *mode, 450 487 struct drm_display_mode *adjusted_mode) ··· 596 559 } 597 560 598 561 static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = { 562 + .mode_valid = ltdc_crtc_mode_valid, 599 563 .mode_fixup = ltdc_crtc_mode_fixup, 600 564 .mode_set_nofb = ltdc_crtc_mode_set_nofb, 601 565 .atomic_flush = ltdc_crtc_atomic_flush, ··· 860 822 861 823 plane = devm_kzalloc(dev, sizeof(*plane), GFP_KERNEL); 862 824 if (!plane) 863 - return 0; 825 + return NULL; 864 826 865 827 ret = drm_universal_plane_init(ddev, plane, possible_crtcs, 866 828 &ltdc_plane_funcs, formats, nb_fmt, 867 829 NULL, type, NULL); 868 830 if (ret < 0) 869 - return 0; 831 + return NULL; 870 832 871 833 drm_plane_helper_add(plane, &ltdc_plane_helper_funcs); 872 834 ··· 1025 987 &bridge[i]); 1026 988 1027 989 /* 1028 - * If at least one endpoint is ready, continue probing, 1029 - * else if at least one endpoint is -EPROBE_DEFER and 1030 - * there is no previous ready endpoints, defer probing. 990 + * If at least one endpoint is -EPROBE_DEFER, defer probing, 991 + * else if at least one endpoint is ready, continue probing. 1031 992 */ 1032 - if (!ret) 993 + if (ret == -EPROBE_DEFER) 994 + return ret; 995 + else if (!ret) 1033 996 endpoint_not_ready = 0; 1034 - else if (ret == -EPROBE_DEFER && endpoint_not_ready) 1035 - endpoint_not_ready = -EPROBE_DEFER; 1036 997 } 1037 998 1038 999 if (endpoint_not_ready)
+23 -18
drivers/gpu/drm/tinydrm/mi0283qt.c
··· 85 85 /* Memory Access Control */ 86 86 mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT); 87 87 88 - switch (mipi->rotation) { 89 - default: 90 - addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | 91 - ILI9341_MADCTL_MX; 92 - break; 93 - case 90: 94 - addr_mode = ILI9341_MADCTL_MY; 95 - break; 96 - case 180: 97 - addr_mode = ILI9341_MADCTL_MV; 98 - break; 99 - case 270: 100 - addr_mode = ILI9341_MADCTL_MX; 101 - break; 102 - } 103 - addr_mode |= ILI9341_MADCTL_BGR; 104 - mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); 105 - 106 88 /* Frame Rate */ 107 89 mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b); 108 90 ··· 110 128 msleep(100); 111 129 112 130 out_enable: 131 + /* The PiTFT (ili9340) has a hardware reset circuit that 132 + * resets only on power-on and not on each reboot through 133 + * a gpio like the rpi-display does. 134 + * As a result, we need to always apply the rotation value 135 + * regardless of the display "on/off" state. 136 + */ 137 + switch (mipi->rotation) { 138 + default: 139 + addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | 140 + ILI9341_MADCTL_MX; 141 + break; 142 + case 90: 143 + addr_mode = ILI9341_MADCTL_MY; 144 + break; 145 + case 180: 146 + addr_mode = ILI9341_MADCTL_MV; 147 + break; 148 + case 270: 149 + addr_mode = ILI9341_MADCTL_MX; 150 + break; 151 + } 152 + addr_mode |= ILI9341_MADCTL_BGR; 153 + mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); 113 154 mipi_dbi_enable_flush(mipi, crtc_state, plane_state); 114 155 } 115 156
+9
drivers/gpu/drm/v3d/Kconfig
··· 1 + config DRM_V3D 2 + tristate "Broadcom V3D 3.x and newer" 3 + depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST 4 + depends on DRM 5 + depends on COMMON_CLK 6 + select DRM_SCHED 7 + help 8 + Choose this option if you have a system that has a Broadcom 9 + V3D 3.x or newer GPU, such as BCM7268.
+18
drivers/gpu/drm/v3d/Makefile
··· 1 + # Please keep these build lists sorted! 2 + 3 + # core driver code 4 + v3d-y := \ 5 + v3d_bo.o \ 6 + v3d_drv.o \ 7 + v3d_fence.o \ 8 + v3d_gem.o \ 9 + v3d_irq.o \ 10 + v3d_mmu.o \ 11 + v3d_trace_points.o \ 12 + v3d_sched.o 13 + 14 + v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o 15 + 16 + obj-$(CONFIG_DRM_V3D) += v3d.o 17 + 18 + CFLAGS_v3d_trace_points.o := -I$(src)
+389
drivers/gpu/drm/v3d/v3d_bo.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2015-2018 Broadcom */ 3 + 4 + /** 5 + * DOC: V3D GEM BO management support 6 + * 7 + * Compared to VC4 (V3D 2.x), V3D 3.3 introduces an MMU between the 8 + * GPU and the bus, allowing us to use shmem objects for our storage 9 + * instead of CMA. 10 + * 11 + * Physically contiguous objects may still be imported to V3D, but the 12 + * driver doesn't allocate physically contiguous objects on its own. 13 + * Display engines requiring physically contiguous allocations should 14 + * look into Mesa's "renderonly" support (as used by the Mesa pl111 15 + * driver) for an example of how to integrate with V3D. 16 + * 17 + * Long term, we should support evicting pages from the MMU when under 18 + * memory pressure (thus the v3d_bo_get_pages() refcounting), but 19 + * that's not a high priority since our systems tend to not have swap. 20 + */ 21 + 22 + #include <linux/dma-buf.h> 23 + #include <linux/pfn_t.h> 24 + 25 + #include "v3d_drv.h" 26 + #include "uapi/drm/v3d_drm.h" 27 + 28 + /* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps 29 + * it for DMA. 30 + */ 31 + static int 32 + v3d_bo_get_pages(struct v3d_bo *bo) 33 + { 34 + struct drm_gem_object *obj = &bo->base; 35 + struct drm_device *dev = obj->dev; 36 + int npages = obj->size >> PAGE_SHIFT; 37 + int ret = 0; 38 + 39 + mutex_lock(&bo->lock); 40 + if (bo->pages_refcount++ != 0) 41 + goto unlock; 42 + 43 + if (!obj->import_attach) { 44 + bo->pages = drm_gem_get_pages(obj); 45 + if (IS_ERR(bo->pages)) { 46 + ret = PTR_ERR(bo->pages); 47 + goto unlock; 48 + } 49 + 50 + bo->sgt = drm_prime_pages_to_sg(bo->pages, npages); 51 + if (IS_ERR(bo->sgt)) { 52 + ret = PTR_ERR(bo->sgt); 53 + goto put_pages; 54 + } 55 + 56 + /* Map the pages for use by the GPU. */ 57 + dma_map_sg(dev->dev, bo->sgt->sgl, 58 + bo->sgt->nents, DMA_BIDIRECTIONAL); 59 + } else { 60 + bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); 61 + if (!bo->pages) 62 + goto put_pages; 63 + 64 + drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages, 65 + NULL, npages); 66 + 67 + /* Note that dma-bufs come in mapped. */ 68 + } 69 + 70 + mutex_unlock(&bo->lock); 71 + 72 + return 0; 73 + 74 + put_pages: 75 + drm_gem_put_pages(obj, bo->pages, true, true); 76 + bo->pages = NULL; 77 + unlock: 78 + bo->pages_refcount--; 79 + mutex_unlock(&bo->lock); 80 + return ret; 81 + } 82 + 83 + static void 84 + v3d_bo_put_pages(struct v3d_bo *bo) 85 + { 86 + struct drm_gem_object *obj = &bo->base; 87 + 88 + mutex_lock(&bo->lock); 89 + if (--bo->pages_refcount == 0) { 90 + if (!obj->import_attach) { 91 + dma_unmap_sg(obj->dev->dev, bo->sgt->sgl, 92 + bo->sgt->nents, DMA_BIDIRECTIONAL); 93 + sg_free_table(bo->sgt); 94 + kfree(bo->sgt); 95 + drm_gem_put_pages(obj, bo->pages, true, true); 96 + } else { 97 + kfree(bo->pages); 98 + } 99 + } 100 + mutex_unlock(&bo->lock); 101 + } 102 + 103 + static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev, 104 + size_t unaligned_size) 105 + { 106 + struct v3d_dev *v3d = to_v3d_dev(dev); 107 + struct drm_gem_object *obj; 108 + struct v3d_bo *bo; 109 + size_t size = roundup(unaligned_size, PAGE_SIZE); 110 + int ret; 111 + 112 + if (size == 0) 113 + return ERR_PTR(-EINVAL); 114 + 115 + bo = kzalloc(sizeof(*bo), GFP_KERNEL); 116 + if (!bo) 117 + return ERR_PTR(-ENOMEM); 118 + obj = &bo->base; 119 + 120 + INIT_LIST_HEAD(&bo->vmas); 121 + INIT_LIST_HEAD(&bo->unref_head); 122 + mutex_init(&bo->lock); 123 + 124 + ret = drm_gem_object_init(dev, obj, size); 125 + if (ret) 126 + goto free_bo; 127 + 128 + spin_lock(&v3d->mm_lock); 129 + ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, 130 + obj->size >> PAGE_SHIFT, 131 + GMP_GRANULARITY >> PAGE_SHIFT, 0, 0); 132 + spin_unlock(&v3d->mm_lock); 133 + if (ret) 134 + goto free_obj; 135 + 136 + return bo; 137 + 138 + free_obj: 139 + drm_gem_object_release(obj); 140 + free_bo: 141 + kfree(bo); 142 + return ERR_PTR(ret); 143 + } 144 + 145 + struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, 146 + size_t unaligned_size) 147 + { 148 + struct v3d_dev *v3d = to_v3d_dev(dev); 149 + struct drm_gem_object *obj; 150 + struct v3d_bo *bo; 151 + int ret; 152 + 153 + bo = v3d_bo_create_struct(dev, unaligned_size); 154 + if (IS_ERR(bo)) 155 + return bo; 156 + obj = &bo->base; 157 + 158 + bo->resv = &bo->_resv; 159 + reservation_object_init(bo->resv); 160 + 161 + ret = v3d_bo_get_pages(bo); 162 + if (ret) 163 + goto free_mm; 164 + 165 + v3d_mmu_insert_ptes(bo); 166 + 167 + mutex_lock(&v3d->bo_lock); 168 + v3d->bo_stats.num_allocated++; 169 + v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT; 170 + mutex_unlock(&v3d->bo_lock); 171 + 172 + return bo; 173 + 174 + free_mm: 175 + spin_lock(&v3d->mm_lock); 176 + drm_mm_remove_node(&bo->node); 177 + spin_unlock(&v3d->mm_lock); 178 + 179 + drm_gem_object_release(obj); 180 + kfree(bo); 181 + return ERR_PTR(ret); 182 + } 183 + 184 + /* Called DRM core on the last userspace/kernel unreference of the 185 + * BO. 186 + */ 187 + void v3d_free_object(struct drm_gem_object *obj) 188 + { 189 + struct v3d_dev *v3d = to_v3d_dev(obj->dev); 190 + struct v3d_bo *bo = to_v3d_bo(obj); 191 + 192 + mutex_lock(&v3d->bo_lock); 193 + v3d->bo_stats.num_allocated--; 194 + v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT; 195 + mutex_unlock(&v3d->bo_lock); 196 + 197 + reservation_object_fini(&bo->_resv); 198 + 199 + v3d_bo_put_pages(bo); 200 + 201 + if (obj->import_attach) 202 + drm_prime_gem_destroy(obj, bo->sgt); 203 + 204 + v3d_mmu_remove_ptes(bo); 205 + spin_lock(&v3d->mm_lock); 206 + drm_mm_remove_node(&bo->node); 207 + spin_unlock(&v3d->mm_lock); 208 + 209 + mutex_destroy(&bo->lock); 210 + 211 + drm_gem_object_release(obj); 212 + kfree(bo); 213 + } 214 + 215 + struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj) 216 + { 217 + struct v3d_bo *bo = to_v3d_bo(obj); 218 + 219 + return bo->resv; 220 + } 221 + 222 + static void 223 + v3d_set_mmap_vma_flags(struct vm_area_struct *vma) 224 + { 225 + vma->vm_flags &= ~VM_PFNMAP; 226 + vma->vm_flags |= VM_MIXEDMAP; 227 + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 228 + } 229 + 230 + int v3d_gem_fault(struct vm_fault *vmf) 231 + { 232 + struct vm_area_struct *vma = vmf->vma; 233 + struct drm_gem_object *obj = vma->vm_private_data; 234 + struct v3d_bo *bo = to_v3d_bo(obj); 235 + unsigned long pfn; 236 + pgoff_t pgoff; 237 + int ret; 238 + 239 + /* We don't use vmf->pgoff since that has the fake offset: */ 240 + pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 241 + pfn = page_to_pfn(bo->pages[pgoff]); 242 + 243 + ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 244 + 245 + switch (ret) { 246 + case -EAGAIN: 247 + case 0: 248 + case -ERESTARTSYS: 249 + case -EINTR: 250 + case -EBUSY: 251 + /* 252 + * EBUSY is ok: this just means that another thread 253 + * already did the job. 254 + */ 255 + return VM_FAULT_NOPAGE; 256 + case -ENOMEM: 257 + return VM_FAULT_OOM; 258 + default: 259 + return VM_FAULT_SIGBUS; 260 + } 261 + } 262 + 263 + int v3d_mmap(struct file *filp, struct vm_area_struct *vma) 264 + { 265 + int ret; 266 + 267 + ret = drm_gem_mmap(filp, vma); 268 + if (ret) 269 + return ret; 270 + 271 + v3d_set_mmap_vma_flags(vma); 272 + 273 + return ret; 274 + } 275 + 276 + int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 277 + { 278 + int ret; 279 + 280 + ret = drm_gem_mmap_obj(obj, obj->size, vma); 281 + if (ret < 0) 282 + return ret; 283 + 284 + v3d_set_mmap_vma_flags(vma); 285 + 286 + return 0; 287 + } 288 + 289 + struct sg_table * 290 + v3d_prime_get_sg_table(struct drm_gem_object *obj) 291 + { 292 + struct v3d_bo *bo = to_v3d_bo(obj); 293 + int npages = obj->size >> PAGE_SHIFT; 294 + 295 + return drm_prime_pages_to_sg(bo->pages, npages); 296 + } 297 + 298 + struct drm_gem_object * 299 + v3d_prime_import_sg_table(struct drm_device *dev, 300 + struct dma_buf_attachment *attach, 301 + struct sg_table *sgt) 302 + { 303 + struct drm_gem_object *obj; 304 + struct v3d_bo *bo; 305 + 306 + bo = v3d_bo_create_struct(dev, attach->dmabuf->size); 307 + if (IS_ERR(bo)) 308 + return ERR_CAST(bo); 309 + obj = &bo->base; 310 + 311 + bo->resv = attach->dmabuf->resv; 312 + 313 + bo->sgt = sgt; 314 + v3d_bo_get_pages(bo); 315 + 316 + v3d_mmu_insert_ptes(bo); 317 + 318 + return obj; 319 + } 320 + 321 + int v3d_create_bo_ioctl(struct drm_device *dev, void *data, 322 + struct drm_file *file_priv) 323 + { 324 + struct drm_v3d_create_bo *args = data; 325 + struct v3d_bo *bo = NULL; 326 + int ret; 327 + 328 + if (args->flags != 0) { 329 + DRM_INFO("unknown create_bo flags: %d\n", args->flags); 330 + return -EINVAL; 331 + } 332 + 333 + bo = v3d_bo_create(dev, file_priv, PAGE_ALIGN(args->size)); 334 + if (IS_ERR(bo)) 335 + return PTR_ERR(bo); 336 + 337 + args->offset = bo->node.start << PAGE_SHIFT; 338 + 339 + ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle); 340 + drm_gem_object_put_unlocked(&bo->base); 341 + 342 + return ret; 343 + } 344 + 345 + int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, 346 + struct drm_file *file_priv) 347 + { 348 + struct drm_v3d_mmap_bo *args = data; 349 + struct drm_gem_object *gem_obj; 350 + int ret; 351 + 352 + if (args->flags != 0) { 353 + DRM_INFO("unknown mmap_bo flags: %d\n", args->flags); 354 + return -EINVAL; 355 + } 356 + 357 + gem_obj = drm_gem_object_lookup(file_priv, args->handle); 358 + if (!gem_obj) { 359 + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 360 + return -ENOENT; 361 + } 362 + 363 + ret = drm_gem_create_mmap_offset(gem_obj); 364 + if (ret == 0) 365 + args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 366 + drm_gem_object_put_unlocked(gem_obj); 367 + 368 + return ret; 369 + } 370 + 371 + int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 372 + struct drm_file *file_priv) 373 + { 374 + struct drm_v3d_get_bo_offset *args = data; 375 + struct drm_gem_object *gem_obj; 376 + struct v3d_bo *bo; 377 + 378 + gem_obj = drm_gem_object_lookup(file_priv, args->handle); 379 + if (!gem_obj) { 380 + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 381 + return -ENOENT; 382 + } 383 + bo = to_v3d_bo(gem_obj); 384 + 385 + args->offset = bo->node.start << PAGE_SHIFT; 386 + 387 + drm_gem_object_put_unlocked(gem_obj); 388 + return 0; 389 + }
+191
drivers/gpu/drm/v3d/v3d_debugfs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2014-2018 Broadcom */ 3 + 4 + #include <linux/circ_buf.h> 5 + #include <linux/ctype.h> 6 + #include <linux/debugfs.h> 7 + #include <linux/pm_runtime.h> 8 + #include <linux/seq_file.h> 9 + #include <drm/drmP.h> 10 + 11 + #include "v3d_drv.h" 12 + #include "v3d_regs.h" 13 + 14 + #define REGDEF(reg) { reg, #reg } 15 + struct v3d_reg_def { 16 + u32 reg; 17 + const char *name; 18 + }; 19 + 20 + static const struct v3d_reg_def v3d_hub_reg_defs[] = { 21 + REGDEF(V3D_HUB_AXICFG), 22 + REGDEF(V3D_HUB_UIFCFG), 23 + REGDEF(V3D_HUB_IDENT0), 24 + REGDEF(V3D_HUB_IDENT1), 25 + REGDEF(V3D_HUB_IDENT2), 26 + REGDEF(V3D_HUB_IDENT3), 27 + REGDEF(V3D_HUB_INT_STS), 28 + REGDEF(V3D_HUB_INT_MSK_STS), 29 + }; 30 + 31 + static const struct v3d_reg_def v3d_gca_reg_defs[] = { 32 + REGDEF(V3D_GCA_SAFE_SHUTDOWN), 33 + REGDEF(V3D_GCA_SAFE_SHUTDOWN_ACK), 34 + }; 35 + 36 + static const struct v3d_reg_def v3d_core_reg_defs[] = { 37 + REGDEF(V3D_CTL_IDENT0), 38 + REGDEF(V3D_CTL_IDENT1), 39 + REGDEF(V3D_CTL_IDENT2), 40 + REGDEF(V3D_CTL_MISCCFG), 41 + REGDEF(V3D_CTL_INT_STS), 42 + REGDEF(V3D_CTL_INT_MSK_STS), 43 + REGDEF(V3D_CLE_CT0CS), 44 + REGDEF(V3D_CLE_CT0CA), 45 + REGDEF(V3D_CLE_CT0EA), 46 + REGDEF(V3D_CLE_CT1CS), 47 + REGDEF(V3D_CLE_CT1CA), 48 + REGDEF(V3D_CLE_CT1EA), 49 + 50 + REGDEF(V3D_PTB_BPCA), 51 + REGDEF(V3D_PTB_BPCS), 52 + 53 + REGDEF(V3D_MMU_CTL), 54 + REGDEF(V3D_MMU_VIO_ADDR), 55 + 56 + REGDEF(V3D_GMP_STATUS), 57 + REGDEF(V3D_GMP_CFG), 58 + REGDEF(V3D_GMP_VIO_ADDR), 59 + }; 60 + 61 + static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) 62 + { 63 + struct drm_info_node *node = (struct drm_info_node *)m->private; 64 + struct drm_device *dev = node->minor->dev; 65 + struct v3d_dev *v3d = to_v3d_dev(dev); 66 + int i, core; 67 + 68 + for (i = 0; i < ARRAY_SIZE(v3d_hub_reg_defs); i++) { 69 + seq_printf(m, "%s (0x%04x): 0x%08x\n", 70 + v3d_hub_reg_defs[i].name, v3d_hub_reg_defs[i].reg, 71 + V3D_READ(v3d_hub_reg_defs[i].reg)); 72 + } 73 + 74 + for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) { 75 + seq_printf(m, "%s (0x%04x): 0x%08x\n", 76 + v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg, 77 + V3D_GCA_READ(v3d_gca_reg_defs[i].reg)); 78 + } 79 + 80 + for (core = 0; core < v3d->cores; core++) { 81 + for (i = 0; i < ARRAY_SIZE(v3d_core_reg_defs); i++) { 82 + seq_printf(m, "core %d %s (0x%04x): 0x%08x\n", 83 + core, 84 + v3d_core_reg_defs[i].name, 85 + v3d_core_reg_defs[i].reg, 86 + V3D_CORE_READ(core, 87 + v3d_core_reg_defs[i].reg)); 88 + } 89 + } 90 + 91 + return 0; 92 + } 93 + 94 + static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) 95 + { 96 + struct drm_info_node *node = (struct drm_info_node *)m->private; 97 + struct drm_device *dev = node->minor->dev; 98 + struct v3d_dev *v3d = to_v3d_dev(dev); 99 + u32 ident0, ident1, ident2, ident3, cores; 100 + int ret, core; 101 + 102 + ret = pm_runtime_get_sync(v3d->dev); 103 + if (ret < 0) 104 + return ret; 105 + 106 + ident0 = V3D_READ(V3D_HUB_IDENT0); 107 + ident1 = V3D_READ(V3D_HUB_IDENT1); 108 + ident2 = V3D_READ(V3D_HUB_IDENT2); 109 + ident3 = V3D_READ(V3D_HUB_IDENT3); 110 + cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); 111 + 112 + seq_printf(m, "Revision: %d.%d.%d.%d\n", 113 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER), 114 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV), 115 + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV), 116 + V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPIDX)); 117 + seq_printf(m, "MMU: %s\n", 118 + (ident2 & V3D_HUB_IDENT2_WITH_MMU) ? "yes" : "no"); 119 + seq_printf(m, "TFU: %s\n", 120 + (ident1 & V3D_HUB_IDENT1_WITH_TFU) ? "yes" : "no"); 121 + seq_printf(m, "TSY: %s\n", 122 + (ident1 & V3D_HUB_IDENT1_WITH_TSY) ? "yes" : "no"); 123 + seq_printf(m, "MSO: %s\n", 124 + (ident1 & V3D_HUB_IDENT1_WITH_MSO) ? "yes" : "no"); 125 + seq_printf(m, "L3C: %s (%dkb)\n", 126 + (ident1 & V3D_HUB_IDENT1_WITH_L3C) ? "yes" : "no", 127 + V3D_GET_FIELD(ident2, V3D_HUB_IDENT2_L3C_NKB)); 128 + 129 + for (core = 0; core < cores; core++) { 130 + u32 misccfg; 131 + u32 nslc, ntmu, qups; 132 + 133 + ident0 = V3D_CORE_READ(core, V3D_CTL_IDENT0); 134 + ident1 = V3D_CORE_READ(core, V3D_CTL_IDENT1); 135 + ident2 = V3D_CORE_READ(core, V3D_CTL_IDENT2); 136 + misccfg = V3D_CORE_READ(core, V3D_CTL_MISCCFG); 137 + 138 + nslc = V3D_GET_FIELD(ident1, V3D_IDENT1_NSLC); 139 + ntmu = V3D_GET_FIELD(ident1, V3D_IDENT1_NTMU); 140 + qups = V3D_GET_FIELD(ident1, V3D_IDENT1_QUPS); 141 + 142 + seq_printf(m, "Core %d:\n", core); 143 + seq_printf(m, " Revision: %d.%d\n", 144 + V3D_GET_FIELD(ident0, V3D_IDENT0_VER), 145 + V3D_GET_FIELD(ident1, V3D_IDENT1_REV)); 146 + seq_printf(m, " Slices: %d\n", nslc); 147 + seq_printf(m, " TMUs: %d\n", nslc * ntmu); 148 + seq_printf(m, " QPUs: %d\n", nslc * qups); 149 + seq_printf(m, " Semaphores: %d\n", 150 + V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); 151 + seq_printf(m, " BCG int: %d\n", 152 + (ident2 & V3D_IDENT2_BCG_INT) != 0); 153 + seq_printf(m, " Override TMU: %d\n", 154 + (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); 155 + } 156 + 157 + pm_runtime_mark_last_busy(v3d->dev); 158 + pm_runtime_put_autosuspend(v3d->dev); 159 + 160 + return 0; 161 + } 162 + 163 + static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused) 164 + { 165 + struct drm_info_node *node = (struct drm_info_node *)m->private; 166 + struct drm_device *dev = node->minor->dev; 167 + struct v3d_dev *v3d = to_v3d_dev(dev); 168 + 169 + mutex_lock(&v3d->bo_lock); 170 + seq_printf(m, "allocated bos: %d\n", 171 + v3d->bo_stats.num_allocated); 172 + seq_printf(m, "allocated bo size (kb): %ld\n", 173 + (long)v3d->bo_stats.pages_allocated << (PAGE_SHIFT - 10)); 174 + mutex_unlock(&v3d->bo_lock); 175 + 176 + return 0; 177 + } 178 + 179 + static const struct drm_info_list v3d_debugfs_list[] = { 180 + {"v3d_ident", v3d_v3d_debugfs_ident, 0}, 181 + {"v3d_regs", v3d_v3d_debugfs_regs, 0}, 182 + {"bo_stats", v3d_debugfs_bo_stats, 0}, 183 + }; 184 + 185 + int 186 + v3d_debugfs_init(struct drm_minor *minor) 187 + { 188 + return drm_debugfs_create_files(v3d_debugfs_list, 189 + ARRAY_SIZE(v3d_debugfs_list), 190 + minor->debugfs_root, minor); 191 + }
+371
drivers/gpu/drm/v3d/v3d_drv.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2014-2018 Broadcom */ 3 + 4 + /** 5 + * DOC: Broadcom V3D Graphics Driver 6 + * 7 + * This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs. 8 + * For V3D 2.x support, see the VC4 driver. 9 + * 10 + * Currently only single-core rendering using the binner and renderer 11 + * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD 12 + * (compute shader dispatch) are not yet supported. 13 + */ 14 + 15 + #include <linux/clk.h> 16 + #include <linux/device.h> 17 + #include <linux/io.h> 18 + #include <linux/module.h> 19 + #include <linux/of_platform.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/pm_runtime.h> 22 + #include <drm/drm_fb_cma_helper.h> 23 + #include <drm/drm_fb_helper.h> 24 + 25 + #include "uapi/drm/v3d_drm.h" 26 + #include "v3d_drv.h" 27 + #include "v3d_regs.h" 28 + 29 + #define DRIVER_NAME "v3d" 30 + #define DRIVER_DESC "Broadcom V3D graphics" 31 + #define DRIVER_DATE "20180419" 32 + #define DRIVER_MAJOR 1 33 + #define DRIVER_MINOR 0 34 + #define DRIVER_PATCHLEVEL 0 35 + 36 + #ifdef CONFIG_PM 37 + static int v3d_runtime_suspend(struct device *dev) 38 + { 39 + struct drm_device *drm = dev_get_drvdata(dev); 40 + struct v3d_dev *v3d = to_v3d_dev(drm); 41 + 42 + v3d_irq_disable(v3d); 43 + 44 + clk_disable_unprepare(v3d->clk); 45 + 46 + return 0; 47 + } 48 + 49 + static int v3d_runtime_resume(struct device *dev) 50 + { 51 + struct drm_device *drm = dev_get_drvdata(dev); 52 + struct v3d_dev *v3d = to_v3d_dev(drm); 53 + int ret; 54 + 55 + ret = clk_prepare_enable(v3d->clk); 56 + if (ret != 0) 57 + return ret; 58 + 59 + /* XXX: VPM base */ 60 + 61 + v3d_mmu_set_page_table(v3d); 62 + v3d_irq_enable(v3d); 63 + 64 + return 0; 65 + } 66 + #endif 67 + 68 + static const struct dev_pm_ops v3d_v3d_pm_ops = { 69 + SET_RUNTIME_PM_OPS(v3d_runtime_suspend, v3d_runtime_resume, NULL) 70 + }; 71 + 72 + static int v3d_get_param_ioctl(struct drm_device *dev, void *data, 73 + struct drm_file *file_priv) 74 + { 75 + struct v3d_dev *v3d = to_v3d_dev(dev); 76 + struct drm_v3d_get_param *args = data; 77 + int ret; 78 + static const u32 reg_map[] = { 79 + [DRM_V3D_PARAM_V3D_UIFCFG] = V3D_HUB_UIFCFG, 80 + [DRM_V3D_PARAM_V3D_HUB_IDENT1] = V3D_HUB_IDENT1, 81 + [DRM_V3D_PARAM_V3D_HUB_IDENT2] = V3D_HUB_IDENT2, 82 + [DRM_V3D_PARAM_V3D_HUB_IDENT3] = V3D_HUB_IDENT3, 83 + [DRM_V3D_PARAM_V3D_CORE0_IDENT0] = V3D_CTL_IDENT0, 84 + [DRM_V3D_PARAM_V3D_CORE0_IDENT1] = V3D_CTL_IDENT1, 85 + [DRM_V3D_PARAM_V3D_CORE0_IDENT2] = V3D_CTL_IDENT2, 86 + }; 87 + 88 + if (args->pad != 0) 89 + return -EINVAL; 90 + 91 + /* Note that DRM_V3D_PARAM_V3D_CORE0_IDENT0 is 0, so we need 92 + * to explicitly allow it in the "the register in our 93 + * parameter map" check. 94 + */ 95 + if (args->param < ARRAY_SIZE(reg_map) && 96 + (reg_map[args->param] || 97 + args->param == DRM_V3D_PARAM_V3D_CORE0_IDENT0)) { 98 + u32 offset = reg_map[args->param]; 99 + 100 + if (args->value != 0) 101 + return -EINVAL; 102 + 103 + ret = pm_runtime_get_sync(v3d->dev); 104 + if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 && 105 + args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) { 106 + args->value = V3D_CORE_READ(0, offset); 107 + } else { 108 + args->value = V3D_READ(offset); 109 + } 110 + pm_runtime_mark_last_busy(v3d->dev); 111 + pm_runtime_put_autosuspend(v3d->dev); 112 + return 0; 113 + } 114 + 115 + /* Any params that aren't just register reads would go here. */ 116 + 117 + DRM_DEBUG("Unknown parameter %d\n", args->param); 118 + return -EINVAL; 119 + } 120 + 121 + static int 122 + v3d_open(struct drm_device *dev, struct drm_file *file) 123 + { 124 + struct v3d_dev *v3d = to_v3d_dev(dev); 125 + struct v3d_file_priv *v3d_priv; 126 + int i; 127 + 128 + v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); 129 + if (!v3d_priv) 130 + return -ENOMEM; 131 + 132 + v3d_priv->v3d = v3d; 133 + 134 + for (i = 0; i < V3D_MAX_QUEUES; i++) { 135 + drm_sched_entity_init(&v3d->queue[i].sched, 136 + &v3d_priv->sched_entity[i], 137 + &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL], 138 + 32, NULL); 139 + } 140 + 141 + file->driver_priv = v3d_priv; 142 + 143 + return 0; 144 + } 145 + 146 + static void 147 + v3d_postclose(struct drm_device *dev, struct drm_file *file) 148 + { 149 + struct v3d_dev *v3d = to_v3d_dev(dev); 150 + struct v3d_file_priv *v3d_priv = file->driver_priv; 151 + enum v3d_queue q; 152 + 153 + for (q = 0; q < V3D_MAX_QUEUES; q++) { 154 + drm_sched_entity_fini(&v3d->queue[q].sched, 155 + &v3d_priv->sched_entity[q]); 156 + } 157 + 158 + kfree(v3d_priv); 159 + } 160 + 161 + static const struct file_operations v3d_drm_fops = { 162 + .owner = THIS_MODULE, 163 + .open = drm_open, 164 + .release = drm_release, 165 + .unlocked_ioctl = drm_ioctl, 166 + .mmap = v3d_mmap, 167 + .poll = drm_poll, 168 + .read = drm_read, 169 + .compat_ioctl = drm_compat_ioctl, 170 + .llseek = noop_llseek, 171 + }; 172 + 173 + /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP 174 + * protection between clients. Note that render nodes would be be 175 + * able to submit CLs that could access BOs from clients authenticated 176 + * with the master node. 177 + */ 178 + static const struct drm_ioctl_desc v3d_drm_ioctls[] = { 179 + DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), 180 + DRM_IOCTL_DEF_DRV(V3D_WAIT_BO, v3d_wait_bo_ioctl, DRM_RENDER_ALLOW), 181 + DRM_IOCTL_DEF_DRV(V3D_CREATE_BO, v3d_create_bo_ioctl, DRM_RENDER_ALLOW), 182 + DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW), 183 + DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW), 184 + DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW), 185 + }; 186 + 187 + static const struct vm_operations_struct v3d_vm_ops = { 188 + .fault = v3d_gem_fault, 189 + .open = drm_gem_vm_open, 190 + .close = drm_gem_vm_close, 191 + }; 192 + 193 + static struct drm_driver v3d_drm_driver = { 194 + .driver_features = (DRIVER_GEM | 195 + DRIVER_RENDER | 196 + DRIVER_PRIME | 197 + DRIVER_SYNCOBJ), 198 + 199 + .open = v3d_open, 200 + .postclose = v3d_postclose, 201 + 202 + #if defined(CONFIG_DEBUG_FS) 203 + .debugfs_init = v3d_debugfs_init, 204 + #endif 205 + 206 + .gem_free_object_unlocked = v3d_free_object, 207 + .gem_vm_ops = &v3d_vm_ops, 208 + 209 + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 210 + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 211 + .gem_prime_import = drm_gem_prime_import, 212 + .gem_prime_export = drm_gem_prime_export, 213 + .gem_prime_res_obj = v3d_prime_res_obj, 214 + .gem_prime_get_sg_table = v3d_prime_get_sg_table, 215 + .gem_prime_import_sg_table = v3d_prime_import_sg_table, 216 + .gem_prime_mmap = v3d_prime_mmap, 217 + 218 + .ioctls = v3d_drm_ioctls, 219 + .num_ioctls = ARRAY_SIZE(v3d_drm_ioctls), 220 + .fops = &v3d_drm_fops, 221 + 222 + .name = DRIVER_NAME, 223 + .desc = DRIVER_DESC, 224 + .date = DRIVER_DATE, 225 + .major = DRIVER_MAJOR, 226 + .minor = DRIVER_MINOR, 227 + .patchlevel = DRIVER_PATCHLEVEL, 228 + }; 229 + 230 + static const struct of_device_id v3d_of_match[] = { 231 + { .compatible = "brcm,7268-v3d" }, 232 + { .compatible = "brcm,7278-v3d" }, 233 + {}, 234 + }; 235 + MODULE_DEVICE_TABLE(of, v3d_of_match); 236 + 237 + static int 238 + map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) 239 + { 240 + struct resource *res = 241 + platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name); 242 + 243 + *regs = devm_ioremap_resource(v3d->dev, res); 244 + return PTR_ERR_OR_ZERO(*regs); 245 + } 246 + 247 + static int v3d_platform_drm_probe(struct platform_device *pdev) 248 + { 249 + struct device *dev = &pdev->dev; 250 + struct drm_device *drm; 251 + struct v3d_dev *v3d; 252 + int ret; 253 + u32 ident1; 254 + 255 + dev->coherent_dma_mask = DMA_BIT_MASK(36); 256 + 257 + v3d = kzalloc(sizeof(*v3d), GFP_KERNEL); 258 + if (!v3d) 259 + return -ENOMEM; 260 + v3d->dev = dev; 261 + v3d->pdev = pdev; 262 + drm = &v3d->drm; 263 + 264 + ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); 265 + if (ret) 266 + goto dev_free; 267 + 268 + ret = map_regs(v3d, &v3d->hub_regs, "hub"); 269 + if (ret) 270 + goto dev_free; 271 + 272 + ret = map_regs(v3d, &v3d->core_regs[0], "core0"); 273 + if (ret) 274 + goto dev_free; 275 + 276 + ident1 = V3D_READ(V3D_HUB_IDENT1); 277 + v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + 278 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); 279 + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); 280 + WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ 281 + 282 + if (v3d->ver < 41) { 283 + ret = map_regs(v3d, &v3d->gca_regs, "gca"); 284 + if (ret) 285 + goto dev_free; 286 + } 287 + 288 + v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, 289 + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 290 + if (!v3d->mmu_scratch) { 291 + dev_err(dev, "Failed to allocate MMU scratch page\n"); 292 + ret = -ENOMEM; 293 + goto dev_free; 294 + } 295 + 296 + pm_runtime_use_autosuspend(dev); 297 + pm_runtime_set_autosuspend_delay(dev, 50); 298 + pm_runtime_enable(dev); 299 + 300 + ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev); 301 + if (ret) 302 + goto dma_free; 303 + 304 + platform_set_drvdata(pdev, drm); 305 + drm->dev_private = v3d; 306 + 307 + ret = v3d_gem_init(drm); 308 + if (ret) 309 + goto dev_destroy; 310 + 311 + v3d_irq_init(v3d); 312 + 313 + ret = drm_dev_register(drm, 0); 314 + if (ret) 315 + goto gem_destroy; 316 + 317 + return 0; 318 + 319 + gem_destroy: 320 + v3d_gem_destroy(drm); 321 + dev_destroy: 322 + drm_dev_put(drm); 323 + dma_free: 324 + dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); 325 + dev_free: 326 + kfree(v3d); 327 + return ret; 328 + } 329 + 330 + static int v3d_platform_drm_remove(struct platform_device *pdev) 331 + { 332 + struct drm_device *drm = platform_get_drvdata(pdev); 333 + struct v3d_dev *v3d = to_v3d_dev(drm); 334 + 335 + drm_dev_unregister(drm); 336 + 337 + v3d_gem_destroy(drm); 338 + 339 + drm_dev_put(drm); 340 + 341 + dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); 342 + 343 + return 0; 344 + } 345 + 346 + static struct platform_driver v3d_platform_driver = { 347 + .probe = v3d_platform_drm_probe, 348 + .remove = v3d_platform_drm_remove, 349 + .driver = { 350 + .name = "v3d", 351 + .of_match_table = v3d_of_match, 352 + }, 353 + }; 354 + 355 + static int __init v3d_drm_register(void) 356 + { 357 + return platform_driver_register(&v3d_platform_driver); 358 + } 359 + 360 + static void __exit v3d_drm_unregister(void) 361 + { 362 + platform_driver_unregister(&v3d_platform_driver); 363 + } 364 + 365 + module_init(v3d_drm_register); 366 + module_exit(v3d_drm_unregister); 367 + 368 + MODULE_ALIAS("platform:v3d-drm"); 369 + MODULE_DESCRIPTION("Broadcom V3D DRM Driver"); 370 + MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); 371 + MODULE_LICENSE("GPL v2");
+294
drivers/gpu/drm/v3d/v3d_drv.h
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2015-2018 Broadcom */ 3 + 4 + #include <linux/reservation.h> 5 + #include <drm/drmP.h> 6 + #include <drm/drm_encoder.h> 7 + #include <drm/drm_gem.h> 8 + #include <drm/gpu_scheduler.h> 9 + 10 + #define GMP_GRANULARITY (128 * 1024) 11 + 12 + /* Enum for each of the V3D queues. We maintain various queue 13 + * tracking as an array because at some point we'll want to support 14 + * the TFU (texture formatting unit) as another queue. 15 + */ 16 + enum v3d_queue { 17 + V3D_BIN, 18 + V3D_RENDER, 19 + }; 20 + 21 + #define V3D_MAX_QUEUES (V3D_RENDER + 1) 22 + 23 + struct v3d_queue_state { 24 + struct drm_gpu_scheduler sched; 25 + 26 + u64 fence_context; 27 + u64 emit_seqno; 28 + u64 finished_seqno; 29 + }; 30 + 31 + struct v3d_dev { 32 + struct drm_device drm; 33 + 34 + /* Short representation (e.g. 33, 41) of the V3D tech version 35 + * and revision. 36 + */ 37 + int ver; 38 + 39 + struct device *dev; 40 + struct platform_device *pdev; 41 + void __iomem *hub_regs; 42 + void __iomem *core_regs[3]; 43 + void __iomem *bridge_regs; 44 + void __iomem *gca_regs; 45 + struct clk *clk; 46 + 47 + /* Virtual and DMA addresses of the single shared page table. */ 48 + volatile u32 *pt; 49 + dma_addr_t pt_paddr; 50 + 51 + /* Virtual and DMA addresses of the MMU's scratch page. When 52 + * a read or write is invalid in the MMU, it will be 53 + * redirected here. 54 + */ 55 + void *mmu_scratch; 56 + dma_addr_t mmu_scratch_paddr; 57 + 58 + /* Number of V3D cores. */ 59 + u32 cores; 60 + 61 + /* Allocator managing the address space. All units are in 62 + * number of pages. 63 + */ 64 + struct drm_mm mm; 65 + spinlock_t mm_lock; 66 + 67 + struct work_struct overflow_mem_work; 68 + 69 + struct v3d_exec_info *bin_job; 70 + struct v3d_exec_info *render_job; 71 + 72 + struct v3d_queue_state queue[V3D_MAX_QUEUES]; 73 + 74 + /* Spinlock used to synchronize the overflow memory 75 + * management against bin job submission. 76 + */ 77 + spinlock_t job_lock; 78 + 79 + /* Protects bo_stats */ 80 + struct mutex bo_lock; 81 + 82 + /* Lock taken when resetting the GPU, to keep multiple 83 + * processes from trying to park the scheduler threads and 84 + * reset at once. 85 + */ 86 + struct mutex reset_lock; 87 + 88 + struct { 89 + u32 num_allocated; 90 + u32 pages_allocated; 91 + } bo_stats; 92 + }; 93 + 94 + static inline struct v3d_dev * 95 + to_v3d_dev(struct drm_device *dev) 96 + { 97 + return (struct v3d_dev *)dev->dev_private; 98 + } 99 + 100 + /* The per-fd struct, which tracks the MMU mappings. */ 101 + struct v3d_file_priv { 102 + struct v3d_dev *v3d; 103 + 104 + struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; 105 + }; 106 + 107 + /* Tracks a mapping of a BO into a per-fd address space */ 108 + struct v3d_vma { 109 + struct v3d_page_table *pt; 110 + struct list_head list; /* entry in v3d_bo.vmas */ 111 + }; 112 + 113 + struct v3d_bo { 114 + struct drm_gem_object base; 115 + 116 + struct mutex lock; 117 + 118 + struct drm_mm_node node; 119 + 120 + u32 pages_refcount; 121 + struct page **pages; 122 + struct sg_table *sgt; 123 + void *vaddr; 124 + 125 + struct list_head vmas; /* list of v3d_vma */ 126 + 127 + /* List entry for the BO's position in 128 + * v3d_exec_info->unref_list 129 + */ 130 + struct list_head unref_head; 131 + 132 + /* normally (resv == &_resv) except for imported bo's */ 133 + struct reservation_object *resv; 134 + struct reservation_object _resv; 135 + }; 136 + 137 + static inline struct v3d_bo * 138 + to_v3d_bo(struct drm_gem_object *bo) 139 + { 140 + return (struct v3d_bo *)bo; 141 + } 142 + 143 + struct v3d_fence { 144 + struct dma_fence base; 145 + struct drm_device *dev; 146 + /* v3d seqno for signaled() test */ 147 + u64 seqno; 148 + enum v3d_queue queue; 149 + }; 150 + 151 + static inline struct v3d_fence * 152 + to_v3d_fence(struct dma_fence *fence) 153 + { 154 + return (struct v3d_fence *)fence; 155 + } 156 + 157 + #define V3D_READ(offset) readl(v3d->hub_regs + offset) 158 + #define V3D_WRITE(offset, val) writel(val, v3d->hub_regs + offset) 159 + 160 + #define V3D_BRIDGE_READ(offset) readl(v3d->bridge_regs + offset) 161 + #define V3D_BRIDGE_WRITE(offset, val) writel(val, v3d->bridge_regs + offset) 162 + 163 + #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) 164 + #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) 165 + 166 + #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) 167 + #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) 168 + 169 + struct v3d_job { 170 + struct drm_sched_job base; 171 + 172 + struct v3d_exec_info *exec; 173 + 174 + /* An optional fence userspace can pass in for the job to depend on. */ 175 + struct dma_fence *in_fence; 176 + 177 + /* v3d fence to be signaled by IRQ handler when the job is complete. */ 178 + struct dma_fence *done_fence; 179 + 180 + /* GPU virtual addresses of the start/end of the CL job. */ 181 + u32 start, end; 182 + }; 183 + 184 + struct v3d_exec_info { 185 + struct v3d_dev *v3d; 186 + 187 + struct v3d_job bin, render; 188 + 189 + /* Fence for when the scheduler considers the binner to be 190 + * done, for render to depend on. 191 + */ 192 + struct dma_fence *bin_done_fence; 193 + 194 + struct kref refcount; 195 + 196 + /* This is the array of BOs that were looked up at the start of exec. */ 197 + struct v3d_bo **bo; 198 + u32 bo_count; 199 + 200 + /* List of overflow BOs used in the job that need to be 201 + * released once the job is complete. 202 + */ 203 + struct list_head unref_list; 204 + 205 + /* Submitted tile memory allocation start/size, tile state. */ 206 + u32 qma, qms, qts; 207 + }; 208 + 209 + /** 210 + * _wait_for - magic (register) wait macro 211 + * 212 + * Does the right thing for modeset paths when run under kdgb or similar atomic 213 + * contexts. Note that it's important that we check the condition again after 214 + * having timed out, since the timeout could be due to preemption or similar and 215 + * we've never had a chance to check the condition before the timeout. 216 + */ 217 + #define wait_for(COND, MS) ({ \ 218 + unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ 219 + int ret__ = 0; \ 220 + while (!(COND)) { \ 221 + if (time_after(jiffies, timeout__)) { \ 222 + if (!(COND)) \ 223 + ret__ = -ETIMEDOUT; \ 224 + break; \ 225 + } \ 226 + msleep(1); \ 227 + } \ 228 + ret__; \ 229 + }) 230 + 231 + static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 232 + { 233 + /* nsecs_to_jiffies64() does not guard against overflow */ 234 + if (NSEC_PER_SEC % HZ && 235 + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 236 + return MAX_JIFFY_OFFSET; 237 + 238 + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 239 + } 240 + 241 + /* v3d_bo.c */ 242 + void v3d_free_object(struct drm_gem_object *gem_obj); 243 + struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, 244 + size_t size); 245 + int v3d_create_bo_ioctl(struct drm_device *dev, void *data, 246 + struct drm_file *file_priv); 247 + int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data, 248 + struct drm_file *file_priv); 249 + int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data, 250 + struct drm_file *file_priv); 251 + int v3d_gem_fault(struct vm_fault *vmf); 252 + int v3d_mmap(struct file *filp, struct vm_area_struct *vma); 253 + struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj); 254 + int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 255 + struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj); 256 + struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, 257 + struct dma_buf_attachment *attach, 258 + struct sg_table *sgt); 259 + 260 + /* v3d_debugfs.c */ 261 + int v3d_debugfs_init(struct drm_minor *minor); 262 + 263 + /* v3d_fence.c */ 264 + extern const struct dma_fence_ops v3d_fence_ops; 265 + struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); 266 + 267 + /* v3d_gem.c */ 268 + int v3d_gem_init(struct drm_device *dev); 269 + void v3d_gem_destroy(struct drm_device *dev); 270 + int v3d_submit_cl_ioctl(struct drm_device *dev, void *data, 271 + struct drm_file *file_priv); 272 + int v3d_wait_bo_ioctl(struct drm_device *dev, void *data, 273 + struct drm_file *file_priv); 274 + void v3d_exec_put(struct v3d_exec_info *exec); 275 + void v3d_reset(struct v3d_dev *v3d); 276 + void v3d_invalidate_caches(struct v3d_dev *v3d); 277 + void v3d_flush_caches(struct v3d_dev *v3d); 278 + 279 + /* v3d_irq.c */ 280 + void v3d_irq_init(struct v3d_dev *v3d); 281 + void v3d_irq_enable(struct v3d_dev *v3d); 282 + void v3d_irq_disable(struct v3d_dev *v3d); 283 + void v3d_irq_reset(struct v3d_dev *v3d); 284 + 285 + /* v3d_mmu.c */ 286 + int v3d_mmu_get_offset(struct drm_file *file_priv, struct v3d_bo *bo, 287 + u32 *offset); 288 + int v3d_mmu_set_page_table(struct v3d_dev *v3d); 289 + void v3d_mmu_insert_ptes(struct v3d_bo *bo); 290 + void v3d_mmu_remove_ptes(struct v3d_bo *bo); 291 + 292 + /* v3d_sched.c */ 293 + int v3d_sched_init(struct v3d_dev *v3d); 294 + void v3d_sched_fini(struct v3d_dev *v3d);
+58
drivers/gpu/drm/v3d/v3d_fence.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2017-2018 Broadcom */ 3 + 4 + #include "v3d_drv.h" 5 + 6 + struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) 7 + { 8 + struct v3d_fence *fence; 9 + 10 + fence = kzalloc(sizeof(*fence), GFP_KERNEL); 11 + if (!fence) 12 + return ERR_PTR(-ENOMEM); 13 + 14 + fence->dev = &v3d->drm; 15 + fence->queue = queue; 16 + fence->seqno = ++v3d->queue[queue].emit_seqno; 17 + dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock, 18 + v3d->queue[queue].fence_context, fence->seqno); 19 + 20 + return &fence->base; 21 + } 22 + 23 + static const char *v3d_fence_get_driver_name(struct dma_fence *fence) 24 + { 25 + return "v3d"; 26 + } 27 + 28 + static const char *v3d_fence_get_timeline_name(struct dma_fence *fence) 29 + { 30 + struct v3d_fence *f = to_v3d_fence(fence); 31 + 32 + if (f->queue == V3D_BIN) 33 + return "v3d-bin"; 34 + else 35 + return "v3d-render"; 36 + } 37 + 38 + static bool v3d_fence_enable_signaling(struct dma_fence *fence) 39 + { 40 + return true; 41 + } 42 + 43 + static bool v3d_fence_signaled(struct dma_fence *fence) 44 + { 45 + struct v3d_fence *f = to_v3d_fence(fence); 46 + struct v3d_dev *v3d = to_v3d_dev(f->dev); 47 + 48 + return v3d->queue[f->queue].finished_seqno >= f->seqno; 49 + } 50 + 51 + const struct dma_fence_ops v3d_fence_ops = { 52 + .get_driver_name = v3d_fence_get_driver_name, 53 + .get_timeline_name = v3d_fence_get_timeline_name, 54 + .enable_signaling = v3d_fence_enable_signaling, 55 + .signaled = v3d_fence_signaled, 56 + .wait = dma_fence_default_wait, 57 + .release = dma_fence_free, 58 + };
+668
drivers/gpu/drm/v3d/v3d_gem.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2014-2018 Broadcom */ 3 + 4 + #include <drm/drmP.h> 5 + #include <drm/drm_syncobj.h> 6 + #include <linux/module.h> 7 + #include <linux/platform_device.h> 8 + #include <linux/pm_runtime.h> 9 + #include <linux/device.h> 10 + #include <linux/io.h> 11 + #include <linux/sched/signal.h> 12 + 13 + #include "uapi/drm/v3d_drm.h" 14 + #include "v3d_drv.h" 15 + #include "v3d_regs.h" 16 + #include "v3d_trace.h" 17 + 18 + static void 19 + v3d_init_core(struct v3d_dev *v3d, int core) 20 + { 21 + /* Set OVRTMUOUT, which means that the texture sampler uniform 22 + * configuration's tmu output type field is used, instead of 23 + * using the hardware default behavior based on the texture 24 + * type. If you want the default behavior, you can still put 25 + * "2" in the indirect texture state's output_type field. 26 + */ 27 + V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); 28 + 29 + /* Whenever we flush the L2T cache, we always want to flush 30 + * the whole thing. 31 + */ 32 + V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0); 33 + V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0); 34 + } 35 + 36 + /* Sets invariant state for the HW. */ 37 + static void 38 + v3d_init_hw_state(struct v3d_dev *v3d) 39 + { 40 + v3d_init_core(v3d, 0); 41 + } 42 + 43 + static void 44 + v3d_idle_axi(struct v3d_dev *v3d, int core) 45 + { 46 + V3D_CORE_WRITE(core, V3D_GMP_CFG, V3D_GMP_CFG_STOP_REQ); 47 + 48 + if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS) & 49 + (V3D_GMP_STATUS_RD_COUNT_MASK | 50 + V3D_GMP_STATUS_WR_COUNT_MASK | 51 + V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) { 52 + DRM_ERROR("Failed to wait for safe GMP shutdown\n"); 53 + } 54 + } 55 + 56 + static void 57 + v3d_idle_gca(struct v3d_dev *v3d) 58 + { 59 + if (v3d->ver >= 41) 60 + return; 61 + 62 + V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); 63 + 64 + if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) & 65 + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) == 66 + V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) { 67 + DRM_ERROR("Failed to wait for safe GCA shutdown\n"); 68 + } 69 + } 70 + 71 + static void 72 + v3d_reset_v3d(struct v3d_dev *v3d) 73 + { 74 + int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION); 75 + 76 + if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) { 77 + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 78 + V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT); 79 + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0); 80 + 81 + /* GFXH-1383: The SW_INIT may cause a stray write to address 0 82 + * of the unit, so reset it to its power-on value here. 83 + */ 84 + V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK); 85 + } else { 86 + WARN_ON_ONCE(V3D_GET_FIELD(version, 87 + V3D_TOP_GR_BRIDGE_MAJOR) != 7); 88 + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 89 + V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT); 90 + V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0); 91 + } 92 + 93 + v3d_init_hw_state(v3d); 94 + } 95 + 96 + void 97 + v3d_reset(struct v3d_dev *v3d) 98 + { 99 + struct drm_device *dev = &v3d->drm; 100 + 101 + DRM_ERROR("Resetting GPU.\n"); 102 + trace_v3d_reset_begin(dev); 103 + 104 + /* XXX: only needed for safe powerdown, not reset. */ 105 + if (false) 106 + v3d_idle_axi(v3d, 0); 107 + 108 + v3d_idle_gca(v3d); 109 + v3d_reset_v3d(v3d); 110 + 111 + v3d_mmu_set_page_table(v3d); 112 + v3d_irq_reset(v3d); 113 + 114 + trace_v3d_reset_end(dev); 115 + } 116 + 117 + static void 118 + v3d_flush_l3(struct v3d_dev *v3d) 119 + { 120 + if (v3d->ver < 41) { 121 + u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); 122 + 123 + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, 124 + gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); 125 + 126 + if (v3d->ver < 33) { 127 + V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, 128 + gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); 129 + } 130 + } 131 + } 132 + 133 + /* Invalidates the (read-only) L2 cache. */ 134 + static void 135 + v3d_invalidate_l2(struct v3d_dev *v3d, int core) 136 + { 137 + V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, 138 + V3D_L2CACTL_L2CCLR | 139 + V3D_L2CACTL_L2CENA); 140 + } 141 + 142 + static void 143 + v3d_invalidate_l1td(struct v3d_dev *v3d, int core) 144 + { 145 + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF); 146 + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 147 + V3D_L2TCACTL_L2TFLS), 100)) { 148 + DRM_ERROR("Timeout waiting for L1T write combiner flush\n"); 149 + } 150 + } 151 + 152 + /* Invalidates texture L2 cachelines */ 153 + static void 154 + v3d_flush_l2t(struct v3d_dev *v3d, int core) 155 + { 156 + v3d_invalidate_l1td(v3d, core); 157 + 158 + V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, 159 + V3D_L2TCACTL_L2TFLS | 160 + V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); 161 + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 162 + V3D_L2TCACTL_L2TFLS), 100)) { 163 + DRM_ERROR("Timeout waiting for L2T flush\n"); 164 + } 165 + } 166 + 167 + /* Invalidates the slice caches. These are read-only caches. */ 168 + static void 169 + v3d_invalidate_slices(struct v3d_dev *v3d, int core) 170 + { 171 + V3D_CORE_WRITE(core, V3D_CTL_SLCACTL, 172 + V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) | 173 + V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) | 174 + V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) | 175 + V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); 176 + } 177 + 178 + /* Invalidates texture L2 cachelines */ 179 + static void 180 + v3d_invalidate_l2t(struct v3d_dev *v3d, int core) 181 + { 182 + V3D_CORE_WRITE(core, 183 + V3D_CTL_L2TCACTL, 184 + V3D_L2TCACTL_L2TFLS | 185 + V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM)); 186 + if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) & 187 + V3D_L2TCACTL_L2TFLS), 100)) { 188 + DRM_ERROR("Timeout waiting for L2T invalidate\n"); 189 + } 190 + } 191 + 192 + void 193 + v3d_invalidate_caches(struct v3d_dev *v3d) 194 + { 195 + v3d_flush_l3(v3d); 196 + 197 + v3d_invalidate_l2(v3d, 0); 198 + v3d_invalidate_slices(v3d, 0); 199 + v3d_flush_l2t(v3d, 0); 200 + } 201 + 202 + void 203 + v3d_flush_caches(struct v3d_dev *v3d) 204 + { 205 + v3d_invalidate_l1td(v3d, 0); 206 + v3d_invalidate_l2t(v3d, 0); 207 + } 208 + 209 + static void 210 + v3d_attach_object_fences(struct v3d_exec_info *exec) 211 + { 212 + struct dma_fence *out_fence = &exec->render.base.s_fence->finished; 213 + struct v3d_bo *bo; 214 + int i; 215 + 216 + for (i = 0; i < exec->bo_count; i++) { 217 + bo = to_v3d_bo(&exec->bo[i]->base); 218 + 219 + /* XXX: Use shared fences for read-only objects. */ 220 + reservation_object_add_excl_fence(bo->resv, out_fence); 221 + } 222 + } 223 + 224 + static void 225 + v3d_unlock_bo_reservations(struct drm_device *dev, 226 + struct v3d_exec_info *exec, 227 + struct ww_acquire_ctx *acquire_ctx) 228 + { 229 + int i; 230 + 231 + for (i = 0; i < exec->bo_count; i++) { 232 + struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base); 233 + 234 + ww_mutex_unlock(&bo->resv->lock); 235 + } 236 + 237 + ww_acquire_fini(acquire_ctx); 238 + } 239 + 240 + /* Takes the reservation lock on all the BOs being referenced, so that 241 + * at queue submit time we can update the reservations. 242 + * 243 + * We don't lock the RCL the tile alloc/state BOs, or overflow memory 244 + * (all of which are on exec->unref_list). They're entirely private 245 + * to v3d, so we don't attach dma-buf fences to them. 246 + */ 247 + static int 248 + v3d_lock_bo_reservations(struct drm_device *dev, 249 + struct v3d_exec_info *exec, 250 + struct ww_acquire_ctx *acquire_ctx) 251 + { 252 + int contended_lock = -1; 253 + int i, ret; 254 + struct v3d_bo *bo; 255 + 256 + ww_acquire_init(acquire_ctx, &reservation_ww_class); 257 + 258 + retry: 259 + if (contended_lock != -1) { 260 + bo = to_v3d_bo(&exec->bo[contended_lock]->base); 261 + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, 262 + acquire_ctx); 263 + if (ret) { 264 + ww_acquire_done(acquire_ctx); 265 + return ret; 266 + } 267 + } 268 + 269 + for (i = 0; i < exec->bo_count; i++) { 270 + if (i == contended_lock) 271 + continue; 272 + 273 + bo = to_v3d_bo(&exec->bo[i]->base); 274 + 275 + ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx); 276 + if (ret) { 277 + int j; 278 + 279 + for (j = 0; j < i; j++) { 280 + bo = to_v3d_bo(&exec->bo[j]->base); 281 + ww_mutex_unlock(&bo->resv->lock); 282 + } 283 + 284 + if (contended_lock != -1 && contended_lock >= i) { 285 + bo = to_v3d_bo(&exec->bo[contended_lock]->base); 286 + 287 + ww_mutex_unlock(&bo->resv->lock); 288 + } 289 + 290 + if (ret == -EDEADLK) { 291 + contended_lock = i; 292 + goto retry; 293 + } 294 + 295 + ww_acquire_done(acquire_ctx); 296 + return ret; 297 + } 298 + } 299 + 300 + ww_acquire_done(acquire_ctx); 301 + 302 + /* Reserve space for our shared (read-only) fence references, 303 + * before we commit the CL to the hardware. 304 + */ 305 + for (i = 0; i < exec->bo_count; i++) { 306 + bo = to_v3d_bo(&exec->bo[i]->base); 307 + 308 + ret = reservation_object_reserve_shared(bo->resv); 309 + if (ret) { 310 + v3d_unlock_bo_reservations(dev, exec, acquire_ctx); 311 + return ret; 312 + } 313 + } 314 + 315 + return 0; 316 + } 317 + 318 + /** 319 + * v3d_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects 320 + * referenced by the job. 321 + * @dev: DRM device 322 + * @file_priv: DRM file for this fd 323 + * @exec: V3D job being set up 324 + * 325 + * The command validator needs to reference BOs by their index within 326 + * the submitted job's BO list. This does the validation of the job's 327 + * BO list and reference counting for the lifetime of the job. 328 + * 329 + * Note that this function doesn't need to unreference the BOs on 330 + * failure, because that will happen at v3d_exec_cleanup() time. 331 + */ 332 + static int 333 + v3d_cl_lookup_bos(struct drm_device *dev, 334 + struct drm_file *file_priv, 335 + struct drm_v3d_submit_cl *args, 336 + struct v3d_exec_info *exec) 337 + { 338 + u32 *handles; 339 + int ret = 0; 340 + int i; 341 + 342 + exec->bo_count = args->bo_handle_count; 343 + 344 + if (!exec->bo_count) { 345 + /* See comment on bo_index for why we have to check 346 + * this. 347 + */ 348 + DRM_DEBUG("Rendering requires BOs\n"); 349 + return -EINVAL; 350 + } 351 + 352 + exec->bo = kvmalloc_array(exec->bo_count, 353 + sizeof(struct drm_gem_cma_object *), 354 + GFP_KERNEL | __GFP_ZERO); 355 + if (!exec->bo) { 356 + DRM_DEBUG("Failed to allocate validated BO pointers\n"); 357 + return -ENOMEM; 358 + } 359 + 360 + handles = kvmalloc_array(exec->bo_count, sizeof(u32), GFP_KERNEL); 361 + if (!handles) { 362 + ret = -ENOMEM; 363 + DRM_DEBUG("Failed to allocate incoming GEM handles\n"); 364 + goto fail; 365 + } 366 + 367 + if (copy_from_user(handles, 368 + (void __user *)(uintptr_t)args->bo_handles, 369 + exec->bo_count * sizeof(u32))) { 370 + ret = -EFAULT; 371 + DRM_DEBUG("Failed to copy in GEM handles\n"); 372 + goto fail; 373 + } 374 + 375 + spin_lock(&file_priv->table_lock); 376 + for (i = 0; i < exec->bo_count; i++) { 377 + struct drm_gem_object *bo = idr_find(&file_priv->object_idr, 378 + handles[i]); 379 + if (!bo) { 380 + DRM_DEBUG("Failed to look up GEM BO %d: %d\n", 381 + i, handles[i]); 382 + ret = -ENOENT; 383 + spin_unlock(&file_priv->table_lock); 384 + goto fail; 385 + } 386 + drm_gem_object_get(bo); 387 + exec->bo[i] = to_v3d_bo(bo); 388 + } 389 + spin_unlock(&file_priv->table_lock); 390 + 391 + fail: 392 + kvfree(handles); 393 + return ret; 394 + } 395 + 396 + static void 397 + v3d_exec_cleanup(struct kref *ref) 398 + { 399 + struct v3d_exec_info *exec = container_of(ref, struct v3d_exec_info, 400 + refcount); 401 + struct v3d_dev *v3d = exec->v3d; 402 + unsigned int i; 403 + struct v3d_bo *bo, *save; 404 + 405 + dma_fence_put(exec->bin.in_fence); 406 + dma_fence_put(exec->render.in_fence); 407 + 408 + dma_fence_put(exec->bin.done_fence); 409 + dma_fence_put(exec->render.done_fence); 410 + 411 + dma_fence_put(exec->bin_done_fence); 412 + 413 + for (i = 0; i < exec->bo_count; i++) 414 + drm_gem_object_put_unlocked(&exec->bo[i]->base); 415 + kvfree(exec->bo); 416 + 417 + list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) { 418 + drm_gem_object_put_unlocked(&bo->base); 419 + } 420 + 421 + pm_runtime_mark_last_busy(v3d->dev); 422 + pm_runtime_put_autosuspend(v3d->dev); 423 + 424 + kfree(exec); 425 + } 426 + 427 + void v3d_exec_put(struct v3d_exec_info *exec) 428 + { 429 + kref_put(&exec->refcount, v3d_exec_cleanup); 430 + } 431 + 432 + int 433 + v3d_wait_bo_ioctl(struct drm_device *dev, void *data, 434 + struct drm_file *file_priv) 435 + { 436 + int ret; 437 + struct drm_v3d_wait_bo *args = data; 438 + struct drm_gem_object *gem_obj; 439 + struct v3d_bo *bo; 440 + ktime_t start = ktime_get(); 441 + u64 delta_ns; 442 + unsigned long timeout_jiffies = 443 + nsecs_to_jiffies_timeout(args->timeout_ns); 444 + 445 + if (args->pad != 0) 446 + return -EINVAL; 447 + 448 + gem_obj = drm_gem_object_lookup(file_priv, args->handle); 449 + if (!gem_obj) { 450 + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); 451 + return -EINVAL; 452 + } 453 + bo = to_v3d_bo(gem_obj); 454 + 455 + ret = reservation_object_wait_timeout_rcu(bo->resv, 456 + true, true, 457 + timeout_jiffies); 458 + 459 + if (ret == 0) 460 + ret = -ETIME; 461 + else if (ret > 0) 462 + ret = 0; 463 + 464 + /* Decrement the user's timeout, in case we got interrupted 465 + * such that the ioctl will be restarted. 466 + */ 467 + delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start)); 468 + if (delta_ns < args->timeout_ns) 469 + args->timeout_ns -= delta_ns; 470 + else 471 + args->timeout_ns = 0; 472 + 473 + /* Asked to wait beyond the jiffie/scheduler precision? */ 474 + if (ret == -ETIME && args->timeout_ns) 475 + ret = -EAGAIN; 476 + 477 + drm_gem_object_put_unlocked(gem_obj); 478 + 479 + return ret; 480 + } 481 + 482 + /** 483 + * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D. 484 + * @dev: DRM device 485 + * @data: ioctl argument 486 + * @file_priv: DRM file for this fd 487 + * 488 + * This is the main entrypoint for userspace to submit a 3D frame to 489 + * the GPU. Userspace provides the binner command list (if 490 + * applicable), and the kernel sets up the render command list to draw 491 + * to the framebuffer described in the ioctl, using the command lists 492 + * that the 3D engine's binner will produce. 493 + */ 494 + int 495 + v3d_submit_cl_ioctl(struct drm_device *dev, void *data, 496 + struct drm_file *file_priv) 497 + { 498 + struct v3d_dev *v3d = to_v3d_dev(dev); 499 + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; 500 + struct drm_v3d_submit_cl *args = data; 501 + struct v3d_exec_info *exec; 502 + struct ww_acquire_ctx acquire_ctx; 503 + struct drm_syncobj *sync_out; 504 + int ret = 0; 505 + 506 + if (args->pad != 0) { 507 + DRM_INFO("pad must be zero: %d\n", args->pad); 508 + return -EINVAL; 509 + } 510 + 511 + exec = kcalloc(1, sizeof(*exec), GFP_KERNEL); 512 + if (!exec) 513 + return -ENOMEM; 514 + 515 + ret = pm_runtime_get_sync(v3d->dev); 516 + if (ret < 0) { 517 + kfree(exec); 518 + return ret; 519 + } 520 + 521 + kref_init(&exec->refcount); 522 + 523 + ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl, 524 + &exec->bin.in_fence); 525 + if (ret == -EINVAL) 526 + goto fail; 527 + 528 + ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl, 529 + &exec->render.in_fence); 530 + if (ret == -EINVAL) 531 + goto fail; 532 + 533 + exec->qma = args->qma; 534 + exec->qms = args->qms; 535 + exec->qts = args->qts; 536 + exec->bin.exec = exec; 537 + exec->bin.start = args->bcl_start; 538 + exec->bin.end = args->bcl_end; 539 + exec->render.exec = exec; 540 + exec->render.start = args->rcl_start; 541 + exec->render.end = args->rcl_end; 542 + exec->v3d = v3d; 543 + INIT_LIST_HEAD(&exec->unref_list); 544 + 545 + ret = v3d_cl_lookup_bos(dev, file_priv, args, exec); 546 + if (ret) 547 + goto fail; 548 + 549 + ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx); 550 + if (ret) 551 + goto fail; 552 + 553 + if (exec->bin.start != exec->bin.end) { 554 + ret = drm_sched_job_init(&exec->bin.base, 555 + &v3d->queue[V3D_BIN].sched, 556 + &v3d_priv->sched_entity[V3D_BIN], 557 + v3d_priv); 558 + if (ret) 559 + goto fail_unreserve; 560 + 561 + exec->bin_done_fence = 562 + dma_fence_get(&exec->bin.base.s_fence->finished); 563 + 564 + kref_get(&exec->refcount); /* put by scheduler job completion */ 565 + drm_sched_entity_push_job(&exec->bin.base, 566 + &v3d_priv->sched_entity[V3D_BIN]); 567 + } 568 + 569 + ret = drm_sched_job_init(&exec->render.base, 570 + &v3d->queue[V3D_RENDER].sched, 571 + &v3d_priv->sched_entity[V3D_RENDER], 572 + v3d_priv); 573 + if (ret) 574 + goto fail_unreserve; 575 + 576 + kref_get(&exec->refcount); /* put by scheduler job completion */ 577 + drm_sched_entity_push_job(&exec->render.base, 578 + &v3d_priv->sched_entity[V3D_RENDER]); 579 + 580 + v3d_attach_object_fences(exec); 581 + 582 + v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); 583 + 584 + /* Update the return sync object for the */ 585 + sync_out = drm_syncobj_find(file_priv, args->out_sync); 586 + if (sync_out) { 587 + drm_syncobj_replace_fence(sync_out, 588 + &exec->render.base.s_fence->finished); 589 + drm_syncobj_put(sync_out); 590 + } 591 + 592 + v3d_exec_put(exec); 593 + 594 + return 0; 595 + 596 + fail_unreserve: 597 + v3d_unlock_bo_reservations(dev, exec, &acquire_ctx); 598 + fail: 599 + v3d_exec_put(exec); 600 + 601 + return ret; 602 + } 603 + 604 + int 605 + v3d_gem_init(struct drm_device *dev) 606 + { 607 + struct v3d_dev *v3d = to_v3d_dev(dev); 608 + u32 pt_size = 4096 * 1024; 609 + int ret, i; 610 + 611 + for (i = 0; i < V3D_MAX_QUEUES; i++) 612 + v3d->queue[i].fence_context = dma_fence_context_alloc(1); 613 + 614 + spin_lock_init(&v3d->mm_lock); 615 + spin_lock_init(&v3d->job_lock); 616 + mutex_init(&v3d->bo_lock); 617 + mutex_init(&v3d->reset_lock); 618 + 619 + /* Note: We don't allocate address 0. Various bits of HW 620 + * treat 0 as special, such as the occlusion query counters 621 + * where 0 means "disabled". 622 + */ 623 + drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1); 624 + 625 + v3d->pt = dma_alloc_wc(v3d->dev, pt_size, 626 + &v3d->pt_paddr, 627 + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); 628 + if (!v3d->pt) { 629 + drm_mm_takedown(&v3d->mm); 630 + dev_err(v3d->dev, 631 + "Failed to allocate page tables. " 632 + "Please ensure you have CMA enabled.\n"); 633 + return -ENOMEM; 634 + } 635 + 636 + v3d_init_hw_state(v3d); 637 + v3d_mmu_set_page_table(v3d); 638 + 639 + ret = v3d_sched_init(v3d); 640 + if (ret) { 641 + drm_mm_takedown(&v3d->mm); 642 + dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, 643 + v3d->pt_paddr); 644 + } 645 + 646 + return 0; 647 + } 648 + 649 + void 650 + v3d_gem_destroy(struct drm_device *dev) 651 + { 652 + struct v3d_dev *v3d = to_v3d_dev(dev); 653 + enum v3d_queue q; 654 + 655 + v3d_sched_fini(v3d); 656 + 657 + /* Waiting for exec to finish would need to be done before 658 + * unregistering V3D. 659 + */ 660 + for (q = 0; q < V3D_MAX_QUEUES; q++) { 661 + WARN_ON(v3d->queue[q].emit_seqno != 662 + v3d->queue[q].finished_seqno); 663 + } 664 + 665 + drm_mm_takedown(&v3d->mm); 666 + 667 + dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr); 668 + }
+206
drivers/gpu/drm/v3d/v3d_irq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2014-2018 Broadcom */ 3 + 4 + /** 5 + * DOC: Interrupt management for the V3D engine 6 + * 7 + * When we take a binning or rendering flush done interrupt, we need 8 + * to signal the fence for that job so that the scheduler can queue up 9 + * the next one and unblock any waiters. 10 + * 11 + * When we take the binner out of memory interrupt, we need to 12 + * allocate some new memory and pass it to the binner so that the 13 + * current job can make progress. 14 + */ 15 + 16 + #include "v3d_drv.h" 17 + #include "v3d_regs.h" 18 + 19 + #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ 20 + V3D_INT_FLDONE | \ 21 + V3D_INT_FRDONE | \ 22 + V3D_INT_GMPV)) 23 + 24 + #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ 25 + V3D_HUB_INT_MMU_PTI | \ 26 + V3D_HUB_INT_MMU_CAP)) 27 + 28 + static void 29 + v3d_overflow_mem_work(struct work_struct *work) 30 + { 31 + struct v3d_dev *v3d = 32 + container_of(work, struct v3d_dev, overflow_mem_work); 33 + struct drm_device *dev = &v3d->drm; 34 + struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 35 + unsigned long irqflags; 36 + 37 + if (IS_ERR(bo)) { 38 + DRM_ERROR("Couldn't allocate binner overflow mem\n"); 39 + return; 40 + } 41 + 42 + /* We lost a race, and our work task came in after the bin job 43 + * completed and exited. This can happen because the HW 44 + * signals OOM before it's fully OOM, so the binner might just 45 + * barely complete. 46 + * 47 + * If we lose the race and our work task comes in after a new 48 + * bin job got scheduled, that's fine. We'll just give them 49 + * some binner pool anyway. 50 + */ 51 + spin_lock_irqsave(&v3d->job_lock, irqflags); 52 + if (!v3d->bin_job) { 53 + spin_unlock_irqrestore(&v3d->job_lock, irqflags); 54 + goto out; 55 + } 56 + 57 + drm_gem_object_get(&bo->base); 58 + list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); 59 + spin_unlock_irqrestore(&v3d->job_lock, irqflags); 60 + 61 + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); 62 + V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); 63 + 64 + out: 65 + drm_gem_object_put_unlocked(&bo->base); 66 + } 67 + 68 + static irqreturn_t 69 + v3d_irq(int irq, void *arg) 70 + { 71 + struct v3d_dev *v3d = arg; 72 + u32 intsts; 73 + irqreturn_t status = IRQ_NONE; 74 + 75 + intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); 76 + 77 + /* Acknowledge the interrupts we're handling here. */ 78 + V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); 79 + 80 + if (intsts & V3D_INT_OUTOMEM) { 81 + /* Note that the OOM status is edge signaled, so the 82 + * interrupt won't happen again until the we actually 83 + * add more memory. 84 + */ 85 + schedule_work(&v3d->overflow_mem_work); 86 + status = IRQ_HANDLED; 87 + } 88 + 89 + if (intsts & V3D_INT_FLDONE) { 90 + v3d->queue[V3D_BIN].finished_seqno++; 91 + dma_fence_signal(v3d->bin_job->bin.done_fence); 92 + status = IRQ_HANDLED; 93 + } 94 + 95 + if (intsts & V3D_INT_FRDONE) { 96 + v3d->queue[V3D_RENDER].finished_seqno++; 97 + dma_fence_signal(v3d->render_job->render.done_fence); 98 + 99 + status = IRQ_HANDLED; 100 + } 101 + 102 + /* We shouldn't be triggering these if we have GMP in 103 + * always-allowed mode. 104 + */ 105 + if (intsts & V3D_INT_GMPV) 106 + dev_err(v3d->dev, "GMP violation\n"); 107 + 108 + return status; 109 + } 110 + 111 + static irqreturn_t 112 + v3d_hub_irq(int irq, void *arg) 113 + { 114 + struct v3d_dev *v3d = arg; 115 + u32 intsts; 116 + irqreturn_t status = IRQ_NONE; 117 + 118 + intsts = V3D_READ(V3D_HUB_INT_STS); 119 + 120 + /* Acknowledge the interrupts we're handling here. */ 121 + V3D_WRITE(V3D_HUB_INT_CLR, intsts); 122 + 123 + if (intsts & (V3D_HUB_INT_MMU_WRV | 124 + V3D_HUB_INT_MMU_PTI | 125 + V3D_HUB_INT_MMU_CAP)) { 126 + u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); 127 + u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; 128 + 129 + dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", 130 + axi_id, (long long)vio_addr, 131 + ((intsts & V3D_HUB_INT_MMU_WRV) ? 132 + ", write violation" : ""), 133 + ((intsts & V3D_HUB_INT_MMU_PTI) ? 134 + ", pte invalid" : ""), 135 + ((intsts & V3D_HUB_INT_MMU_CAP) ? 136 + ", cap exceeded" : "")); 137 + status = IRQ_HANDLED; 138 + } 139 + 140 + return status; 141 + } 142 + 143 + void 144 + v3d_irq_init(struct v3d_dev *v3d) 145 + { 146 + int ret, core; 147 + 148 + INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); 149 + 150 + /* Clear any pending interrupts someone might have left around 151 + * for us. 152 + */ 153 + for (core = 0; core < v3d->cores; core++) 154 + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 155 + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 156 + 157 + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 158 + v3d_hub_irq, IRQF_SHARED, 159 + "v3d_hub", v3d); 160 + ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), 161 + v3d_irq, IRQF_SHARED, 162 + "v3d_core0", v3d); 163 + if (ret) 164 + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 165 + 166 + v3d_irq_enable(v3d); 167 + } 168 + 169 + void 170 + v3d_irq_enable(struct v3d_dev *v3d) 171 + { 172 + int core; 173 + 174 + /* Enable our set of interrupts, masking out any others. */ 175 + for (core = 0; core < v3d->cores; core++) { 176 + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); 177 + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); 178 + } 179 + 180 + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); 181 + V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); 182 + } 183 + 184 + void 185 + v3d_irq_disable(struct v3d_dev *v3d) 186 + { 187 + int core; 188 + 189 + /* Disable all interrupts. */ 190 + for (core = 0; core < v3d->cores; core++) 191 + V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); 192 + V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); 193 + 194 + /* Clear any pending interrupts we might have left. */ 195 + for (core = 0; core < v3d->cores; core++) 196 + V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 197 + V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 198 + 199 + cancel_work_sync(&v3d->overflow_mem_work); 200 + } 201 + 202 + /** Reinitializes interrupt registers when a GPU reset is performed. */ 203 + void v3d_irq_reset(struct v3d_dev *v3d) 204 + { 205 + v3d_irq_enable(v3d); 206 + }
+122
drivers/gpu/drm/v3d/v3d_mmu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2017-2018 Broadcom */ 3 + 4 + /** 5 + * DOC: Broadcom V3D MMU 6 + * 7 + * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has 8 + * a single level of page tables for the V3D's 4GB address space to 9 + * map to AXI bus addresses, thus it could need up to 4MB of 10 + * physically contiguous memory to store the PTEs. 11 + * 12 + * Because the 4MB of contiguous memory for page tables is precious, 13 + * and switching between them is expensive, we load all BOs into the 14 + * same 4GB address space. 15 + * 16 + * To protect clients from each other, we should use the GMP to 17 + * quickly mask out (at 128kb granularity) what pages are available to 18 + * each client. This is not yet implemented. 19 + */ 20 + 21 + #include "v3d_drv.h" 22 + #include "v3d_regs.h" 23 + 24 + #define V3D_MMU_PAGE_SHIFT 12 25 + 26 + /* Note: All PTEs for the 1MB superpage must be filled with the 27 + * superpage bit set. 28 + */ 29 + #define V3D_PTE_SUPERPAGE BIT(31) 30 + #define V3D_PTE_WRITEABLE BIT(29) 31 + #define V3D_PTE_VALID BIT(28) 32 + 33 + static int v3d_mmu_flush_all(struct v3d_dev *v3d) 34 + { 35 + int ret; 36 + 37 + /* Make sure that another flush isn't already running when we 38 + * start this one. 39 + */ 40 + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & 41 + V3D_MMU_CTL_TLB_CLEARING), 100); 42 + if (ret) 43 + dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n"); 44 + 45 + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | 46 + V3D_MMU_CTL_TLB_CLEAR); 47 + 48 + V3D_WRITE(V3D_MMUC_CONTROL, 49 + V3D_MMUC_CONTROL_FLUSH | 50 + V3D_MMUC_CONTROL_ENABLE); 51 + 52 + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & 53 + V3D_MMU_CTL_TLB_CLEARING), 100); 54 + if (ret) { 55 + dev_err(v3d->dev, "TLB clear wait idle failed\n"); 56 + return ret; 57 + } 58 + 59 + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & 60 + V3D_MMUC_CONTROL_FLUSHING), 100); 61 + if (ret) 62 + dev_err(v3d->dev, "MMUC flush wait idle failed\n"); 63 + 64 + return ret; 65 + } 66 + 67 + int v3d_mmu_set_page_table(struct v3d_dev *v3d) 68 + { 69 + V3D_WRITE(V3D_MMU_PT_PA_BASE, v3d->pt_paddr >> V3D_MMU_PAGE_SHIFT); 70 + V3D_WRITE(V3D_MMU_CTL, 71 + V3D_MMU_CTL_ENABLE | 72 + V3D_MMU_CTL_PT_INVALID | 73 + V3D_MMU_CTL_PT_INVALID_ABORT | 74 + V3D_MMU_CTL_WRITE_VIOLATION_ABORT | 75 + V3D_MMU_CTL_CAP_EXCEEDED_ABORT); 76 + V3D_WRITE(V3D_MMU_ILLEGAL_ADDR, 77 + (v3d->mmu_scratch_paddr >> V3D_MMU_PAGE_SHIFT) | 78 + V3D_MMU_ILLEGAL_ADDR_ENABLE); 79 + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_ENABLE); 80 + 81 + return v3d_mmu_flush_all(v3d); 82 + } 83 + 84 + void v3d_mmu_insert_ptes(struct v3d_bo *bo) 85 + { 86 + struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); 87 + u32 page = bo->node.start; 88 + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; 89 + unsigned int count; 90 + struct scatterlist *sgl; 91 + 92 + for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) { 93 + u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT; 94 + u32 pte = page_prot | page_address; 95 + u32 i; 96 + 97 + BUG_ON(page_address + (sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT) >= 98 + BIT(24)); 99 + 100 + for (i = 0; i < sg_dma_len(sgl) >> V3D_MMU_PAGE_SHIFT; i++) 101 + v3d->pt[page++] = pte + i; 102 + } 103 + 104 + WARN_ON_ONCE(page - bo->node.start != 105 + bo->base.size >> V3D_MMU_PAGE_SHIFT); 106 + 107 + if (v3d_mmu_flush_all(v3d)) 108 + dev_err(v3d->dev, "MMU flush timeout\n"); 109 + } 110 + 111 + void v3d_mmu_remove_ptes(struct v3d_bo *bo) 112 + { 113 + struct v3d_dev *v3d = to_v3d_dev(bo->base.dev); 114 + u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT; 115 + u32 page; 116 + 117 + for (page = bo->node.start; page < bo->node.start + npages; page++) 118 + v3d->pt[page] = 0; 119 + 120 + if (v3d_mmu_flush_all(v3d)) 121 + dev_err(v3d->dev, "MMU flush timeout\n"); 122 + }
+295
drivers/gpu/drm/v3d/v3d_regs.h
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2017-2018 Broadcom */ 3 + 4 + #ifndef V3D_REGS_H 5 + #define V3D_REGS_H 6 + 7 + #include <linux/bitops.h> 8 + 9 + #define V3D_MASK(high, low) ((u32)GENMASK(high, low)) 10 + /* Using the GNU statement expression extension */ 11 + #define V3D_SET_FIELD(value, field) \ 12 + ({ \ 13 + u32 fieldval = (value) << field##_SHIFT; \ 14 + WARN_ON((fieldval & ~field##_MASK) != 0); \ 15 + fieldval & field##_MASK; \ 16 + }) 17 + 18 + #define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \ 19 + field##_SHIFT) 20 + 21 + /* Hub registers for shared hardware between V3D cores. */ 22 + 23 + #define V3D_HUB_AXICFG 0x00000 24 + # define V3D_HUB_AXICFG_MAX_LEN_MASK V3D_MASK(3, 0) 25 + # define V3D_HUB_AXICFG_MAX_LEN_SHIFT 0 26 + #define V3D_HUB_UIFCFG 0x00004 27 + #define V3D_HUB_IDENT0 0x00008 28 + 29 + #define V3D_HUB_IDENT1 0x0000c 30 + # define V3D_HUB_IDENT1_WITH_MSO BIT(19) 31 + # define V3D_HUB_IDENT1_WITH_TSY BIT(18) 32 + # define V3D_HUB_IDENT1_WITH_TFU BIT(17) 33 + # define V3D_HUB_IDENT1_WITH_L3C BIT(16) 34 + # define V3D_HUB_IDENT1_NHOSTS_MASK V3D_MASK(15, 12) 35 + # define V3D_HUB_IDENT1_NHOSTS_SHIFT 12 36 + # define V3D_HUB_IDENT1_NCORES_MASK V3D_MASK(11, 8) 37 + # define V3D_HUB_IDENT1_NCORES_SHIFT 8 38 + # define V3D_HUB_IDENT1_REV_MASK V3D_MASK(7, 4) 39 + # define V3D_HUB_IDENT1_REV_SHIFT 4 40 + # define V3D_HUB_IDENT1_TVER_MASK V3D_MASK(3, 0) 41 + # define V3D_HUB_IDENT1_TVER_SHIFT 0 42 + 43 + #define V3D_HUB_IDENT2 0x00010 44 + # define V3D_HUB_IDENT2_WITH_MMU BIT(8) 45 + # define V3D_HUB_IDENT2_L3C_NKB_MASK V3D_MASK(7, 0) 46 + # define V3D_HUB_IDENT2_L3C_NKB_SHIFT 0 47 + 48 + #define V3D_HUB_IDENT3 0x00014 49 + # define V3D_HUB_IDENT3_IPREV_MASK V3D_MASK(15, 8) 50 + # define V3D_HUB_IDENT3_IPREV_SHIFT 8 51 + # define V3D_HUB_IDENT3_IPIDX_MASK V3D_MASK(7, 0) 52 + # define V3D_HUB_IDENT3_IPIDX_SHIFT 0 53 + 54 + #define V3D_HUB_INT_STS 0x00050 55 + #define V3D_HUB_INT_SET 0x00054 56 + #define V3D_HUB_INT_CLR 0x00058 57 + #define V3D_HUB_INT_MSK_STS 0x0005c 58 + #define V3D_HUB_INT_MSK_SET 0x00060 59 + #define V3D_HUB_INT_MSK_CLR 0x00064 60 + # define V3D_HUB_INT_MMU_WRV BIT(5) 61 + # define V3D_HUB_INT_MMU_PTI BIT(4) 62 + # define V3D_HUB_INT_MMU_CAP BIT(3) 63 + # define V3D_HUB_INT_MSO BIT(2) 64 + # define V3D_HUB_INT_TFUC BIT(1) 65 + # define V3D_HUB_INT_TFUF BIT(0) 66 + 67 + #define V3D_GCA_CACHE_CTRL 0x0000c 68 + # define V3D_GCA_CACHE_CTRL_FLUSH BIT(0) 69 + 70 + #define V3D_GCA_SAFE_SHUTDOWN 0x000b0 71 + # define V3D_GCA_SAFE_SHUTDOWN_EN BIT(0) 72 + 73 + #define V3D_GCA_SAFE_SHUTDOWN_ACK 0x000b4 74 + # define V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED 3 75 + 76 + # define V3D_TOP_GR_BRIDGE_REVISION 0x00000 77 + # define V3D_TOP_GR_BRIDGE_MAJOR_MASK V3D_MASK(15, 8) 78 + # define V3D_TOP_GR_BRIDGE_MAJOR_SHIFT 8 79 + # define V3D_TOP_GR_BRIDGE_MINOR_MASK V3D_MASK(7, 0) 80 + # define V3D_TOP_GR_BRIDGE_MINOR_SHIFT 0 81 + 82 + /* 7268 reset reg */ 83 + # define V3D_TOP_GR_BRIDGE_SW_INIT_0 0x00008 84 + # define V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT BIT(0) 85 + /* 7278 reset reg */ 86 + # define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c 87 + # define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0) 88 + 89 + /* Per-MMU registers. */ 90 + 91 + #define V3D_MMUC_CONTROL 0x01000 92 + # define V3D_MMUC_CONTROL_CLEAR BIT(3) 93 + # define V3D_MMUC_CONTROL_FLUSHING BIT(2) 94 + # define V3D_MMUC_CONTROL_FLUSH BIT(1) 95 + # define V3D_MMUC_CONTROL_ENABLE BIT(0) 96 + 97 + #define V3D_MMU_CTL 0x01200 98 + # define V3D_MMU_CTL_CAP_EXCEEDED BIT(27) 99 + # define V3D_MMU_CTL_CAP_EXCEEDED_ABORT BIT(26) 100 + # define V3D_MMU_CTL_CAP_EXCEEDED_INT BIT(25) 101 + # define V3D_MMU_CTL_CAP_EXCEEDED_EXCEPTION BIT(24) 102 + # define V3D_MMU_CTL_PT_INVALID BIT(20) 103 + # define V3D_MMU_CTL_PT_INVALID_ABORT BIT(19) 104 + # define V3D_MMU_CTL_PT_INVALID_INT BIT(18) 105 + # define V3D_MMU_CTL_PT_INVALID_EXCEPTION BIT(17) 106 + # define V3D_MMU_CTL_WRITE_VIOLATION BIT(16) 107 + # define V3D_MMU_CTL_WRITE_VIOLATION_ABORT BIT(11) 108 + # define V3D_MMU_CTL_WRITE_VIOLATION_INT BIT(10) 109 + # define V3D_MMU_CTL_WRITE_VIOLATION_EXCEPTION BIT(9) 110 + # define V3D_MMU_CTL_TLB_CLEARING BIT(7) 111 + # define V3D_MMU_CTL_TLB_STATS_CLEAR BIT(3) 112 + # define V3D_MMU_CTL_TLB_CLEAR BIT(2) 113 + # define V3D_MMU_CTL_TLB_STATS_ENABLE BIT(1) 114 + # define V3D_MMU_CTL_ENABLE BIT(0) 115 + 116 + #define V3D_MMU_PT_PA_BASE 0x01204 117 + #define V3D_MMU_HIT 0x01208 118 + #define V3D_MMU_MISSES 0x0120c 119 + #define V3D_MMU_STALLS 0x01210 120 + 121 + #define V3D_MMU_ADDR_CAP 0x01214 122 + # define V3D_MMU_ADDR_CAP_ENABLE BIT(31) 123 + # define V3D_MMU_ADDR_CAP_MPAGE_MASK V3D_MASK(11, 0) 124 + # define V3D_MMU_ADDR_CAP_MPAGE_SHIFT 0 125 + 126 + #define V3D_MMU_SHOOT_DOWN 0x01218 127 + # define V3D_MMU_SHOOT_DOWN_SHOOTING BIT(29) 128 + # define V3D_MMU_SHOOT_DOWN_SHOOT BIT(28) 129 + # define V3D_MMU_SHOOT_DOWN_PAGE_MASK V3D_MASK(27, 0) 130 + # define V3D_MMU_SHOOT_DOWN_PAGE_SHIFT 0 131 + 132 + #define V3D_MMU_BYPASS_START 0x0121c 133 + #define V3D_MMU_BYPASS_END 0x01220 134 + 135 + /* AXI ID of the access that faulted */ 136 + #define V3D_MMU_VIO_ID 0x0122c 137 + 138 + /* Address for illegal PTEs to return */ 139 + #define V3D_MMU_ILLEGAL_ADDR 0x01230 140 + # define V3D_MMU_ILLEGAL_ADDR_ENABLE BIT(31) 141 + 142 + /* Address that faulted */ 143 + #define V3D_MMU_VIO_ADDR 0x01234 144 + 145 + /* Per-V3D-core registers */ 146 + 147 + #define V3D_CTL_IDENT0 0x00000 148 + # define V3D_IDENT0_VER_MASK V3D_MASK(31, 24) 149 + # define V3D_IDENT0_VER_SHIFT 24 150 + 151 + #define V3D_CTL_IDENT1 0x00004 152 + /* Multiples of 1kb */ 153 + # define V3D_IDENT1_VPM_SIZE_MASK V3D_MASK(31, 28) 154 + # define V3D_IDENT1_VPM_SIZE_SHIFT 28 155 + # define V3D_IDENT1_NSEM_MASK V3D_MASK(23, 16) 156 + # define V3D_IDENT1_NSEM_SHIFT 16 157 + # define V3D_IDENT1_NTMU_MASK V3D_MASK(15, 12) 158 + # define V3D_IDENT1_NTMU_SHIFT 12 159 + # define V3D_IDENT1_QUPS_MASK V3D_MASK(11, 8) 160 + # define V3D_IDENT1_QUPS_SHIFT 8 161 + # define V3D_IDENT1_NSLC_MASK V3D_MASK(7, 4) 162 + # define V3D_IDENT1_NSLC_SHIFT 4 163 + # define V3D_IDENT1_REV_MASK V3D_MASK(3, 0) 164 + # define V3D_IDENT1_REV_SHIFT 0 165 + 166 + #define V3D_CTL_IDENT2 0x00008 167 + # define V3D_IDENT2_BCG_INT BIT(28) 168 + 169 + #define V3D_CTL_MISCCFG 0x00018 170 + # define V3D_MISCCFG_OVRTMUOUT BIT(0) 171 + 172 + #define V3D_CTL_L2CACTL 0x00020 173 + # define V3D_L2CACTL_L2CCLR BIT(2) 174 + # define V3D_L2CACTL_L2CDIS BIT(1) 175 + # define V3D_L2CACTL_L2CENA BIT(0) 176 + 177 + #define V3D_CTL_SLCACTL 0x00024 178 + # define V3D_SLCACTL_TVCCS_MASK V3D_MASK(27, 24) 179 + # define V3D_SLCACTL_TVCCS_SHIFT 24 180 + # define V3D_SLCACTL_TDCCS_MASK V3D_MASK(19, 16) 181 + # define V3D_SLCACTL_TDCCS_SHIFT 16 182 + # define V3D_SLCACTL_UCC_MASK V3D_MASK(11, 8) 183 + # define V3D_SLCACTL_UCC_SHIFT 8 184 + # define V3D_SLCACTL_ICC_MASK V3D_MASK(3, 0) 185 + # define V3D_SLCACTL_ICC_SHIFT 0 186 + 187 + #define V3D_CTL_L2TCACTL 0x00030 188 + # define V3D_L2TCACTL_TMUWCF BIT(8) 189 + # define V3D_L2TCACTL_L2T_NO_WM BIT(4) 190 + # define V3D_L2TCACTL_FLM_FLUSH 0 191 + # define V3D_L2TCACTL_FLM_CLEAR 1 192 + # define V3D_L2TCACTL_FLM_CLEAN 2 193 + # define V3D_L2TCACTL_FLM_MASK V3D_MASK(2, 1) 194 + # define V3D_L2TCACTL_FLM_SHIFT 1 195 + # define V3D_L2TCACTL_L2TFLS BIT(0) 196 + #define V3D_CTL_L2TFLSTA 0x00034 197 + #define V3D_CTL_L2TFLEND 0x00038 198 + 199 + #define V3D_CTL_INT_STS 0x00050 200 + #define V3D_CTL_INT_SET 0x00054 201 + #define V3D_CTL_INT_CLR 0x00058 202 + #define V3D_CTL_INT_MSK_STS 0x0005c 203 + #define V3D_CTL_INT_MSK_SET 0x00060 204 + #define V3D_CTL_INT_MSK_CLR 0x00064 205 + # define V3D_INT_QPU_MASK V3D_MASK(27, 16) 206 + # define V3D_INT_QPU_SHIFT 16 207 + # define V3D_INT_GMPV BIT(5) 208 + # define V3D_INT_TRFB BIT(4) 209 + # define V3D_INT_SPILLUSE BIT(3) 210 + # define V3D_INT_OUTOMEM BIT(2) 211 + # define V3D_INT_FLDONE BIT(1) 212 + # define V3D_INT_FRDONE BIT(0) 213 + 214 + #define V3D_CLE_CT0CS 0x00100 215 + #define V3D_CLE_CT1CS 0x00104 216 + #define V3D_CLE_CTNCS(n) (V3D_CLE_CT0CS + 4 * n) 217 + #define V3D_CLE_CT0EA 0x00108 218 + #define V3D_CLE_CT1EA 0x0010c 219 + #define V3D_CLE_CTNEA(n) (V3D_CLE_CT0EA + 4 * n) 220 + #define V3D_CLE_CT0CA 0x00110 221 + #define V3D_CLE_CT1CA 0x00114 222 + #define V3D_CLE_CTNCA(n) (V3D_CLE_CT0CA + 4 * n) 223 + #define V3D_CLE_CT0RA 0x00118 224 + #define V3D_CLE_CT1RA 0x0011c 225 + #define V3D_CLE_CT0LC 0x00120 226 + #define V3D_CLE_CT1LC 0x00124 227 + #define V3D_CLE_CT0PC 0x00128 228 + #define V3D_CLE_CT1PC 0x0012c 229 + #define V3D_CLE_PCS 0x00130 230 + #define V3D_CLE_BFC 0x00134 231 + #define V3D_CLE_RFC 0x00138 232 + #define V3D_CLE_TFBC 0x0013c 233 + #define V3D_CLE_TFIT 0x00140 234 + #define V3D_CLE_CT1CFG 0x00144 235 + #define V3D_CLE_CT1TILECT 0x00148 236 + #define V3D_CLE_CT1TSKIP 0x0014c 237 + #define V3D_CLE_CT1PTCT 0x00150 238 + #define V3D_CLE_CT0SYNC 0x00154 239 + #define V3D_CLE_CT1SYNC 0x00158 240 + #define V3D_CLE_CT0QTS 0x0015c 241 + # define V3D_CLE_CT0QTS_ENABLE BIT(1) 242 + #define V3D_CLE_CT0QBA 0x00160 243 + #define V3D_CLE_CT1QBA 0x00164 244 + #define V3D_CLE_CTNQBA(n) (V3D_CLE_CT0QBA + 4 * n) 245 + #define V3D_CLE_CT0QEA 0x00168 246 + #define V3D_CLE_CT1QEA 0x0016c 247 + #define V3D_CLE_CTNQEA(n) (V3D_CLE_CT0QEA + 4 * n) 248 + #define V3D_CLE_CT0QMA 0x00170 249 + #define V3D_CLE_CT0QMS 0x00174 250 + #define V3D_CLE_CT1QCFG 0x00178 251 + /* If set without ETPROC, entirely skip tiles with no primitives. */ 252 + # define V3D_CLE_QCFG_ETFILT BIT(7) 253 + /* If set with ETFILT, just write the clear color to tiles with no 254 + * primitives. 255 + */ 256 + # define V3D_CLE_QCFG_ETPROC BIT(6) 257 + # define V3D_CLE_QCFG_ETSFLUSH BIT(1) 258 + # define V3D_CLE_QCFG_MCDIS BIT(0) 259 + 260 + #define V3D_PTB_BPCA 0x00300 261 + #define V3D_PTB_BPCS 0x00304 262 + #define V3D_PTB_BPOA 0x00308 263 + #define V3D_PTB_BPOS 0x0030c 264 + 265 + #define V3D_PTB_BXCF 0x00310 266 + # define V3D_PTB_BXCF_RWORDERDISA BIT(1) 267 + # define V3D_PTB_BXCF_CLIPDISA BIT(0) 268 + 269 + #define V3D_GMP_STATUS 0x00800 270 + # define V3D_GMP_STATUS_GMPRST BIT(31) 271 + # define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24) 272 + # define V3D_GMP_STATUS_WR_COUNT_SHIFT 24 273 + # define V3D_GMP_STATUS_RD_COUNT_MASK V3D_MASK(22, 16) 274 + # define V3D_GMP_STATUS_RD_COUNT_SHIFT 16 275 + # define V3D_GMP_STATUS_WR_ACTIVE BIT(5) 276 + # define V3D_GMP_STATUS_RD_ACTIVE BIT(4) 277 + # define V3D_GMP_STATUS_CFG_BUSY BIT(3) 278 + # define V3D_GMP_STATUS_CNTOVF BIT(2) 279 + # define V3D_GMP_STATUS_INVPROT BIT(1) 280 + # define V3D_GMP_STATUS_VIO BIT(0) 281 + 282 + #define V3D_GMP_CFG 0x00804 283 + # define V3D_GMP_CFG_LBURSTEN BIT(3) 284 + # define V3D_GMP_CFG_PGCRSEN BIT() 285 + # define V3D_GMP_CFG_STOP_REQ BIT(1) 286 + # define V3D_GMP_CFG_PROT_ENABLE BIT(0) 287 + 288 + #define V3D_GMP_VIO_ADDR 0x00808 289 + #define V3D_GMP_VIO_TYPE 0x0080c 290 + #define V3D_GMP_TABLE_ADDR 0x00810 291 + #define V3D_GMP_CLEAR_LOAD 0x00814 292 + #define V3D_GMP_PRESERVE_LOAD 0x00818 293 + #define V3D_GMP_VALID_LINES 0x00820 294 + 295 + #endif /* V3D_REGS_H */
+228
drivers/gpu/drm/v3d/v3d_sched.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2018 Broadcom */ 3 + 4 + /** 5 + * DOC: Broadcom V3D scheduling 6 + * 7 + * The shared DRM GPU scheduler is used to coordinate submitting jobs 8 + * to the hardware. Each DRM fd (roughly a client process) gets its 9 + * own scheduler entity, which will process jobs in order. The GPU 10 + * scheduler will round-robin between clients to submit the next job. 11 + * 12 + * For simplicity, and in order to keep latency low for interactive 13 + * jobs when bulk background jobs are queued up, we submit a new job 14 + * to the HW only when it has completed the last one, instead of 15 + * filling up the CT[01]Q FIFOs with jobs. Similarly, we use 16 + * v3d_job_dependency() to manage the dependency between bin and 17 + * render, instead of having the clients submit jobs with using the 18 + * HW's semaphores to interlock between them. 19 + */ 20 + 21 + #include <linux/kthread.h> 22 + 23 + #include "v3d_drv.h" 24 + #include "v3d_regs.h" 25 + #include "v3d_trace.h" 26 + 27 + static struct v3d_job * 28 + to_v3d_job(struct drm_sched_job *sched_job) 29 + { 30 + return container_of(sched_job, struct v3d_job, base); 31 + } 32 + 33 + static void 34 + v3d_job_free(struct drm_sched_job *sched_job) 35 + { 36 + struct v3d_job *job = to_v3d_job(sched_job); 37 + 38 + v3d_exec_put(job->exec); 39 + } 40 + 41 + /** 42 + * Returns the fences that the bin job depends on, one by one. 43 + * v3d_job_run() won't be called until all of them have been signaled. 44 + */ 45 + static struct dma_fence * 46 + v3d_job_dependency(struct drm_sched_job *sched_job, 47 + struct drm_sched_entity *s_entity) 48 + { 49 + struct v3d_job *job = to_v3d_job(sched_job); 50 + struct v3d_exec_info *exec = job->exec; 51 + enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; 52 + struct dma_fence *fence; 53 + 54 + fence = job->in_fence; 55 + if (fence) { 56 + job->in_fence = NULL; 57 + return fence; 58 + } 59 + 60 + if (q == V3D_RENDER) { 61 + /* If we had a bin job, the render job definitely depends on 62 + * it. We first have to wait for bin to be scheduled, so that 63 + * its done_fence is created. 64 + */ 65 + fence = exec->bin_done_fence; 66 + if (fence) { 67 + exec->bin_done_fence = NULL; 68 + return fence; 69 + } 70 + } 71 + 72 + /* XXX: Wait on a fence for switching the GMP if necessary, 73 + * and then do so. 74 + */ 75 + 76 + return fence; 77 + } 78 + 79 + static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job) 80 + { 81 + struct v3d_job *job = to_v3d_job(sched_job); 82 + struct v3d_exec_info *exec = job->exec; 83 + enum v3d_queue q = job == &exec->bin ? V3D_BIN : V3D_RENDER; 84 + struct v3d_dev *v3d = exec->v3d; 85 + struct drm_device *dev = &v3d->drm; 86 + struct dma_fence *fence; 87 + unsigned long irqflags; 88 + 89 + if (unlikely(job->base.s_fence->finished.error)) 90 + return NULL; 91 + 92 + /* Lock required around bin_job update vs 93 + * v3d_overflow_mem_work(). 94 + */ 95 + spin_lock_irqsave(&v3d->job_lock, irqflags); 96 + if (q == V3D_BIN) { 97 + v3d->bin_job = job->exec; 98 + 99 + /* Clear out the overflow allocation, so we don't 100 + * reuse the overflow attached to a previous job. 101 + */ 102 + V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); 103 + } else { 104 + v3d->render_job = job->exec; 105 + } 106 + spin_unlock_irqrestore(&v3d->job_lock, irqflags); 107 + 108 + /* Can we avoid this flush when q==RENDER? We need to be 109 + * careful of scheduling, though -- imagine job0 rendering to 110 + * texture and job1 reading, and them being executed as bin0, 111 + * bin1, render0, render1, so that render1's flush at bin time 112 + * wasn't enough. 113 + */ 114 + v3d_invalidate_caches(v3d); 115 + 116 + fence = v3d_fence_create(v3d, q); 117 + if (!fence) 118 + return fence; 119 + 120 + if (job->done_fence) 121 + dma_fence_put(job->done_fence); 122 + job->done_fence = dma_fence_get(fence); 123 + 124 + trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno, 125 + job->start, job->end); 126 + 127 + if (q == V3D_BIN) { 128 + if (exec->qma) { 129 + V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, exec->qma); 130 + V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, exec->qms); 131 + } 132 + if (exec->qts) { 133 + V3D_CORE_WRITE(0, V3D_CLE_CT0QTS, 134 + V3D_CLE_CT0QTS_ENABLE | 135 + exec->qts); 136 + } 137 + } else { 138 + /* XXX: Set the QCFG */ 139 + } 140 + 141 + /* Set the current and end address of the control list. 142 + * Writing the end register is what starts the job. 143 + */ 144 + V3D_CORE_WRITE(0, V3D_CLE_CTNQBA(q), job->start); 145 + V3D_CORE_WRITE(0, V3D_CLE_CTNQEA(q), job->end); 146 + 147 + return fence; 148 + } 149 + 150 + static void 151 + v3d_job_timedout(struct drm_sched_job *sched_job) 152 + { 153 + struct v3d_job *job = to_v3d_job(sched_job); 154 + struct v3d_exec_info *exec = job->exec; 155 + struct v3d_dev *v3d = exec->v3d; 156 + enum v3d_queue q; 157 + 158 + mutex_lock(&v3d->reset_lock); 159 + 160 + /* block scheduler */ 161 + for (q = 0; q < V3D_MAX_QUEUES; q++) { 162 + struct drm_gpu_scheduler *sched = &v3d->queue[q].sched; 163 + 164 + kthread_park(sched->thread); 165 + drm_sched_hw_job_reset(sched, (sched_job->sched == sched ? 166 + sched_job : NULL)); 167 + } 168 + 169 + /* get the GPU back into the init state */ 170 + v3d_reset(v3d); 171 + 172 + /* Unblock schedulers and restart their jobs. */ 173 + for (q = 0; q < V3D_MAX_QUEUES; q++) { 174 + drm_sched_job_recovery(&v3d->queue[q].sched); 175 + kthread_unpark(v3d->queue[q].sched.thread); 176 + } 177 + 178 + mutex_unlock(&v3d->reset_lock); 179 + } 180 + 181 + static const struct drm_sched_backend_ops v3d_sched_ops = { 182 + .dependency = v3d_job_dependency, 183 + .run_job = v3d_job_run, 184 + .timedout_job = v3d_job_timedout, 185 + .free_job = v3d_job_free 186 + }; 187 + 188 + int 189 + v3d_sched_init(struct v3d_dev *v3d) 190 + { 191 + int hw_jobs_limit = 1; 192 + int job_hang_limit = 0; 193 + int hang_limit_ms = 500; 194 + int ret; 195 + 196 + ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, 197 + &v3d_sched_ops, 198 + hw_jobs_limit, job_hang_limit, 199 + msecs_to_jiffies(hang_limit_ms), 200 + "v3d_bin"); 201 + if (ret) { 202 + dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret); 203 + return ret; 204 + } 205 + 206 + ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, 207 + &v3d_sched_ops, 208 + hw_jobs_limit, job_hang_limit, 209 + msecs_to_jiffies(hang_limit_ms), 210 + "v3d_render"); 211 + if (ret) { 212 + dev_err(v3d->dev, "Failed to create render scheduler: %d.", 213 + ret); 214 + drm_sched_fini(&v3d->queue[V3D_BIN].sched); 215 + return ret; 216 + } 217 + 218 + return 0; 219 + } 220 + 221 + void 222 + v3d_sched_fini(struct v3d_dev *v3d) 223 + { 224 + enum v3d_queue q; 225 + 226 + for (q = 0; q < V3D_MAX_QUEUES; q++) 227 + drm_sched_fini(&v3d->queue[q].sched); 228 + }
+82
drivers/gpu/drm/v3d/v3d_trace.h
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2015-2018 Broadcom */ 3 + 4 + #if !defined(_V3D_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _V3D_TRACE_H_ 6 + 7 + #include <linux/stringify.h> 8 + #include <linux/types.h> 9 + #include <linux/tracepoint.h> 10 + 11 + #undef TRACE_SYSTEM 12 + #define TRACE_SYSTEM v3d 13 + #define TRACE_INCLUDE_FILE v3d_trace 14 + 15 + TRACE_EVENT(v3d_submit_cl, 16 + TP_PROTO(struct drm_device *dev, bool is_render, 17 + uint64_t seqno, 18 + u32 ctnqba, u32 ctnqea), 19 + TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea), 20 + 21 + TP_STRUCT__entry( 22 + __field(u32, dev) 23 + __field(bool, is_render) 24 + __field(u64, seqno) 25 + __field(u32, ctnqba) 26 + __field(u32, ctnqea) 27 + ), 28 + 29 + TP_fast_assign( 30 + __entry->dev = dev->primary->index; 31 + __entry->is_render = is_render; 32 + __entry->seqno = seqno; 33 + __entry->ctnqba = ctnqba; 34 + __entry->ctnqea = ctnqea; 35 + ), 36 + 37 + TP_printk("dev=%u, %s, seqno=%llu, 0x%08x..0x%08x", 38 + __entry->dev, 39 + __entry->is_render ? "RCL" : "BCL", 40 + __entry->seqno, 41 + __entry->ctnqba, 42 + __entry->ctnqea) 43 + ); 44 + 45 + TRACE_EVENT(v3d_reset_begin, 46 + TP_PROTO(struct drm_device *dev), 47 + TP_ARGS(dev), 48 + 49 + TP_STRUCT__entry( 50 + __field(u32, dev) 51 + ), 52 + 53 + TP_fast_assign( 54 + __entry->dev = dev->primary->index; 55 + ), 56 + 57 + TP_printk("dev=%u", 58 + __entry->dev) 59 + ); 60 + 61 + TRACE_EVENT(v3d_reset_end, 62 + TP_PROTO(struct drm_device *dev), 63 + TP_ARGS(dev), 64 + 65 + TP_STRUCT__entry( 66 + __field(u32, dev) 67 + ), 68 + 69 + TP_fast_assign( 70 + __entry->dev = dev->primary->index; 71 + ), 72 + 73 + TP_printk("dev=%u", 74 + __entry->dev) 75 + ); 76 + 77 + #endif /* _V3D_TRACE_H_ */ 78 + 79 + /* This part must be outside protection */ 80 + #undef TRACE_INCLUDE_PATH 81 + #define TRACE_INCLUDE_PATH . 82 + #include <trace/define_trace.h>
+9
drivers/gpu/drm/v3d/v3d_trace_points.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* Copyright (C) 2015 Broadcom */ 3 + 4 + #include "v3d_drv.h" 5 + 6 + #ifndef __CHECKER__ 7 + #define CREATE_TRACE_POINTS 8 + #include "v3d_trace.h" 9 + #endif
+2 -1
drivers/gpu/drm/vc4/vc4_drv.c
··· 175 175 DRIVER_GEM | 176 176 DRIVER_HAVE_IRQ | 177 177 DRIVER_RENDER | 178 - DRIVER_PRIME), 178 + DRIVER_PRIME | 179 + DRIVER_SYNCOBJ), 179 180 .lastclose = drm_fb_helper_lastclose, 180 181 .open = vc4_open, 181 182 .postclose = vc4_close,
+1
drivers/gpu/drm/vc4/vc4_drv.h
··· 11 11 #include <drm/drm_encoder.h> 12 12 #include <drm/drm_gem_cma_helper.h> 13 13 #include <drm/drm_atomic.h> 14 + #include <drm/drm_syncobj.h> 14 15 15 16 #include "uapi/drm/vc4_drm.h" 16 17
+5
drivers/gpu/drm/vc4/vc4_dsi.c
··· 753 753 (dsi->lanes > 2 ? DSI1_STAT_PHY_D2_STOP : 0) | 754 754 (dsi->lanes > 3 ? DSI1_STAT_PHY_D3_STOP : 0)); 755 755 int ret; 756 + bool ulps_currently_enabled = (DSI_PORT_READ(PHY_AFEC0) & 757 + DSI_PORT_BIT(PHY_AFEC0_LATCH_ULPS)); 758 + 759 + if (ulps == ulps_currently_enabled) 760 + return; 756 761 757 762 DSI_PORT_WRITE(STAT, stat_ulps); 758 763 DSI_PORT_WRITE(PHYC, DSI_PORT_READ(PHYC) | phyc_ulps);
+54 -3
drivers/gpu/drm/vc4/vc4_gem.c
··· 27 27 #include <linux/device.h> 28 28 #include <linux/io.h> 29 29 #include <linux/sched/signal.h> 30 + #include <linux/dma-fence-array.h> 30 31 31 32 #include "uapi/drm/vc4_drm.h" 32 33 #include "vc4_drv.h" ··· 656 655 */ 657 656 static int 658 657 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec, 659 - struct ww_acquire_ctx *acquire_ctx) 658 + struct ww_acquire_ctx *acquire_ctx, 659 + struct drm_syncobj *out_sync) 660 660 { 661 661 struct vc4_dev *vc4 = to_vc4_dev(dev); 662 662 struct vc4_exec_info *renderjob; ··· 679 677 vc4->dma_fence_context, exec->seqno); 680 678 fence->seqno = exec->seqno; 681 679 exec->fence = &fence->base; 680 + 681 + if (out_sync) 682 + drm_syncobj_replace_fence(out_sync, exec->fence); 682 683 683 684 vc4_update_bo_seqnos(exec, seqno); 684 685 ··· 1118 1113 struct vc4_dev *vc4 = to_vc4_dev(dev); 1119 1114 struct vc4_file *vc4file = file_priv->driver_priv; 1120 1115 struct drm_vc4_submit_cl *args = data; 1116 + struct drm_syncobj *out_sync = NULL; 1121 1117 struct vc4_exec_info *exec; 1122 1118 struct ww_acquire_ctx acquire_ctx; 1119 + struct dma_fence *in_fence; 1123 1120 int ret = 0; 1124 1121 1125 1122 if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR | ··· 1133 1126 } 1134 1127 1135 1128 if (args->pad2 != 0) { 1136 - DRM_DEBUG("->pad2 must be set to zero\n"); 1129 + DRM_DEBUG("Invalid pad: 0x%08x\n", args->pad2); 1137 1130 return -EINVAL; 1138 1131 } 1139 1132 ··· 1171 1164 } 1172 1165 } 1173 1166 1167 + if (args->in_sync) { 1168 + ret = drm_syncobj_find_fence(file_priv, args->in_sync, 1169 + &in_fence); 1170 + if (ret) 1171 + goto fail; 1172 + 1173 + /* When the fence (or fence array) is exclusively from our 1174 + * context we can skip the wait since jobs are executed in 1175 + * order of their submission through this ioctl and this can 1176 + * only have fences from a prior job. 1177 + */ 1178 + if (!dma_fence_match_context(in_fence, 1179 + vc4->dma_fence_context)) { 1180 + ret = dma_fence_wait(in_fence, true); 1181 + if (ret) { 1182 + dma_fence_put(in_fence); 1183 + goto fail; 1184 + } 1185 + } 1186 + 1187 + dma_fence_put(in_fence); 1188 + } 1189 + 1174 1190 if (exec->args->bin_cl_size != 0) { 1175 1191 ret = vc4_get_bcl(dev, exec); 1176 1192 if (ret) ··· 1211 1181 if (ret) 1212 1182 goto fail; 1213 1183 1184 + if (args->out_sync) { 1185 + out_sync = drm_syncobj_find(file_priv, args->out_sync); 1186 + if (!out_sync) { 1187 + ret = -EINVAL; 1188 + goto fail; 1189 + } 1190 + 1191 + /* We replace the fence in out_sync in vc4_queue_submit since 1192 + * the render job could execute immediately after that call. 1193 + * If it finishes before our ioctl processing resumes the 1194 + * render job fence could already have been freed. 1195 + */ 1196 + } 1197 + 1214 1198 /* Clear this out of the struct we'll be putting in the queue, 1215 1199 * since it's part of our stack. 1216 1200 */ 1217 1201 exec->args = NULL; 1218 1202 1219 - ret = vc4_queue_submit(dev, exec, &acquire_ctx); 1203 + ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync); 1204 + 1205 + /* The syncobj isn't part of the exec data and we need to free our 1206 + * reference even if job submission failed. 1207 + */ 1208 + if (out_sync) 1209 + drm_syncobj_put(out_sync); 1210 + 1220 1211 if (ret) 1221 1212 goto fail; 1222 1213
+1 -2
drivers/gpu/drm/vc4/vc4_v3d.c
··· 218 218 * overall CMA pool before they make scenes complicated enough to run 219 219 * out of bin space. 220 220 */ 221 - int 222 - vc4_allocate_bin_bo(struct drm_device *drm) 221 + static int vc4_allocate_bin_bo(struct drm_device *drm) 223 222 { 224 223 struct vc4_dev *vc4 = to_vc4_dev(drm); 225 224 struct vc4_v3d *v3d = vc4->v3d;
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 159 159 DRM_RENDER_ALLOW), 160 160 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS, 161 161 vmw_kms_cursor_bypass_ioctl, 162 - DRM_MASTER | DRM_CONTROL_ALLOW), 162 + DRM_MASTER), 163 163 164 164 VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl, 165 - DRM_MASTER | DRM_CONTROL_ALLOW), 165 + DRM_MASTER), 166 166 VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, 167 - DRM_MASTER | DRM_CONTROL_ALLOW), 167 + DRM_MASTER), 168 168 VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl, 169 - DRM_MASTER | DRM_CONTROL_ALLOW), 169 + DRM_MASTER), 170 170 171 171 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl, 172 172 DRM_AUTH | DRM_RENDER_ALLOW),
+5 -5
drivers/gpu/drm/xen/xen_drm_front.c
··· 188 188 buf_cfg.be_alloc = front_info->cfg.be_alloc; 189 189 190 190 shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); 191 - if (!shbuf) 192 - return -ENOMEM; 191 + if (IS_ERR(shbuf)) 192 + return PTR_ERR(shbuf); 193 193 194 194 ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie); 195 195 if (ret < 0) { ··· 543 543 front_info->drm_info = drm_info; 544 544 545 545 drm_dev = drm_dev_alloc(&xen_drm_driver, dev); 546 - if (!drm_dev) { 547 - ret = -ENOMEM; 546 + if (IS_ERR(drm_dev)) { 547 + ret = PTR_ERR(drm_dev); 548 548 goto fail; 549 549 } 550 550 ··· 778 778 */ 779 779 while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state", 780 780 XenbusStateUnknown) != XenbusStateInitWait) && 781 - to--) 781 + --to) 782 782 msleep(10); 783 783 784 784 if (!to) {
+1 -1
drivers/gpu/drm/xen/xen_drm_front_shbuf.c
··· 383 383 384 384 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 385 385 if (!buf) 386 - return NULL; 386 + return ERR_PTR(-ENOMEM); 387 387 388 388 if (cfg->be_alloc) 389 389 buf->ops = &backend_ops;
+3
drivers/video/hdmi.c
··· 93 93 if (size < length) 94 94 return -ENOSPC; 95 95 96 + if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9) 97 + return -EINVAL; 98 + 96 99 memset(buffer, 0, size); 97 100 98 101 ptr[0] = frame->type;
-1
include/drm/drm_device.h
··· 38 38 struct device *dev; /**< Device structure of bus-device */ 39 39 struct drm_driver *driver; /**< DRM driver managing the device */ 40 40 void *dev_private; /**< DRM driver private data */ 41 - struct drm_minor *control; /**< Control node */ 42 41 struct drm_minor *primary; /**< Primary node */ 43 42 struct drm_minor *render; /**< Render node */ 44 43 bool registered;
+10 -4
include/drm/drm_dp_helper.h
··· 64 64 /* AUX CH addresses */ 65 65 /* DPCD */ 66 66 #define DP_DPCD_REV 0x000 67 + # define DP_DPCD_REV_10 0x10 68 + # define DP_DPCD_REV_11 0x11 69 + # define DP_DPCD_REV_12 0x12 70 + # define DP_DPCD_REV_13 0x13 71 + # define DP_DPCD_REV_14 0x14 67 72 68 73 #define DP_MAX_LINK_RATE 0x001 69 74 ··· 124 119 # define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */ 125 120 126 121 #define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ 122 + # define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */ 127 123 128 124 #define DP_ADAPTER_CAP 0x00f /* 1.2 */ 129 125 # define DP_FORCE_LOAD_SENSE_CAP (1 << 0) ··· 983 977 #define DP_SDP_VSC_EXT_CEA 0x21 /* DP 1.4 */ 984 978 /* 0x80+ CEA-861 infoframe types */ 985 979 986 - struct edp_sdp_header { 980 + struct dp_sdp_header { 987 981 u8 HB0; /* Secondary Data Packet ID */ 988 982 u8 HB1; /* Secondary Data Packet Type */ 989 - u8 HB2; /* 7:5 reserved, 4:0 revision number */ 990 - u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */ 983 + u8 HB2; /* Secondary Data Packet Specific header, Byte 0 */ 984 + u8 HB3; /* Secondary Data packet Specific header, Byte 1 */ 991 985 } __packed; 992 986 993 987 #define EDP_SDP_HEADER_REVISION_MASK 0x1F 994 988 #define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F 995 989 996 990 struct edp_vsc_psr { 997 - struct edp_sdp_header sdp_header; 991 + struct dp_sdp_header sdp_header; 998 992 u8 DB0; /* Stereo Interface */ 999 993 u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */ 1000 994 u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
+11 -12
include/drm/drm_file.h
··· 47 47 * header include loops we need it here for now. 48 48 */ 49 49 50 + /* Note that the order of this enum is ABI (it determines 51 + * /dev/dri/renderD* numbers). 52 + */ 50 53 enum drm_minor_type { 51 54 DRM_MINOR_PRIMARY, 52 55 DRM_MINOR_CONTROL, ··· 183 180 184 181 /** @atomic: True if client understands atomic properties. */ 185 182 unsigned atomic:1; 183 + 184 + /** 185 + * @aspect_ratio_allowed: 186 + * 187 + * True, if client can handle picture aspect ratios, and has requested 188 + * to pass this information along with the mode. 189 + */ 190 + unsigned aspect_ratio_allowed:1; 186 191 187 192 /** 188 193 * @is_master: ··· 357 346 static inline bool drm_is_render_client(const struct drm_file *file_priv) 358 347 { 359 348 return file_priv->minor->type == DRM_MINOR_RENDER; 360 - } 361 - 362 - /** 363 - * drm_is_control_client - is this an open file of the control node 364 - * @file_priv: DRM file 365 - * 366 - * Control nodes are deprecated and in the process of getting removed from the 367 - * DRM userspace API. Do not ever use! 368 - */ 369 - static inline bool drm_is_control_client(const struct drm_file *file_priv) 370 - { 371 - return file_priv->minor->type == DRM_MINOR_CONTROL; 372 349 } 373 350 374 351 int drm_open(struct inode *inode, struct file *filp);
-7
include/drm/drm_ioctl.h
··· 109 109 */ 110 110 DRM_ROOT_ONLY = BIT(2), 111 111 /** 112 - * @DRM_CONTROL_ALLOW: 113 - * 114 - * Deprecated, do not use. Control nodes are in the process of getting 115 - * removed. 116 - */ 117 - DRM_CONTROL_ALLOW = BIT(3), 118 - /** 119 112 * @DRM_UNLOCKED: 120 113 * 121 114 * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
+22
include/drm/drm_modes.h
··· 147 147 148 148 #define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF 149 149 150 + #define DRM_MODE_MATCH_TIMINGS (1 << 0) 151 + #define DRM_MODE_MATCH_CLOCK (1 << 1) 152 + #define DRM_MODE_MATCH_FLAGS (1 << 2) 153 + #define DRM_MODE_MATCH_3D_FLAGS (1 << 3) 154 + #define DRM_MODE_MATCH_ASPECT_RATIO (1 << 4) 155 + 150 156 /** 151 157 * struct drm_display_mode - DRM kernel-internal display mode structure 152 158 * @hdisplay: horizontal display size ··· 411 405 * Field for setting the HDMI picture aspect ratio of a mode. 412 406 */ 413 407 enum hdmi_picture_aspect picture_aspect_ratio; 408 + 409 + /** 410 + * @export_head: 411 + * 412 + * struct list_head for modes to be exposed to the userspace. 413 + * This is to maintain a list of exposed modes while preparing 414 + * user-mode's list in drm_mode_getconnector ioctl. The purpose of this 415 + * list_head only lies in the ioctl function, and is not expected to be 416 + * used outside the function. 417 + * Once used, the stale pointers are not reset, but left as it is, to 418 + * avoid overhead of protecting it by mode_config.mutex. 419 + */ 420 + struct list_head export_head; 414 421 }; 415 422 416 423 /** ··· 509 490 const struct drm_display_mode *src); 510 491 struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, 511 492 const struct drm_display_mode *mode); 493 + bool drm_mode_match(const struct drm_display_mode *mode1, 494 + const struct drm_display_mode *mode2, 495 + unsigned int match_flags); 512 496 bool drm_mode_equal(const struct drm_display_mode *mode1, 513 497 const struct drm_display_mode *mode2); 514 498 bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1,
+1 -1
include/drm/drm_property.h
··· 260 260 uint32_t type); 261 261 struct drm_property *drm_property_create_bool(struct drm_device *dev, 262 262 u32 flags, const char *name); 263 - int drm_property_add_enum(struct drm_property *property, int index, 263 + int drm_property_add_enum(struct drm_property *property, 264 264 uint64_t value, const char *name); 265 265 void drm_property_destroy(struct drm_device *dev, struct drm_property *property); 266 266
+1 -2
include/drm/drm_rect.h
··· 175 175 176 176 bool drm_rect_intersect(struct drm_rect *r, const struct drm_rect *clip); 177 177 bool drm_rect_clip_scaled(struct drm_rect *src, struct drm_rect *dst, 178 - const struct drm_rect *clip, 179 - int hscale, int vscale); 178 + const struct drm_rect *clip); 180 179 int drm_rect_calc_hscale(const struct drm_rect *src, 181 180 const struct drm_rect *dst, 182 181 int min_hscale, int max_hscale);
+155 -81
include/linux/dma-fence.h
··· 94 94 struct dma_fence_cb *cb); 95 95 96 96 /** 97 - * struct dma_fence_cb - callback for dma_fence_add_callback 98 - * @node: used by dma_fence_add_callback to append this struct to fence::cb_list 97 + * struct dma_fence_cb - callback for dma_fence_add_callback() 98 + * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list 99 99 * @func: dma_fence_func_t to call 100 100 * 101 - * This struct will be initialized by dma_fence_add_callback, additional 101 + * This struct will be initialized by dma_fence_add_callback(), additional 102 102 * data can be passed along by embedding dma_fence_cb in another struct. 103 103 */ 104 104 struct dma_fence_cb { ··· 108 108 109 109 /** 110 110 * struct dma_fence_ops - operations implemented for fence 111 - * @get_driver_name: returns the driver name. 112 - * @get_timeline_name: return the name of the context this fence belongs to. 113 - * @enable_signaling: enable software signaling of fence. 114 - * @signaled: [optional] peek whether the fence is signaled, can be null. 115 - * @wait: custom wait implementation, or dma_fence_default_wait. 116 - * @release: [optional] called on destruction of fence, can be null 117 - * @fill_driver_data: [optional] callback to fill in free-form debug info 118 - * Returns amount of bytes filled, or -errno. 119 - * @fence_value_str: [optional] fills in the value of the fence as a string 120 - * @timeline_value_str: [optional] fills in the current value of the timeline 121 - * as a string 122 111 * 123 - * Notes on enable_signaling: 124 - * For fence implementations that have the capability for hw->hw 125 - * signaling, they can implement this op to enable the necessary 126 - * irqs, or insert commands into cmdstream, etc. This is called 127 - * in the first wait() or add_callback() path to let the fence 128 - * implementation know that there is another driver waiting on 129 - * the signal (ie. hw->sw case). 130 - * 131 - * This function can be called from atomic context, but not 132 - * from irq context, so normal spinlocks can be used. 133 - * 134 - * A return value of false indicates the fence already passed, 135 - * or some failure occurred that made it impossible to enable 136 - * signaling. True indicates successful enabling. 137 - * 138 - * fence->error may be set in enable_signaling, but only when false is 139 - * returned. 140 - * 141 - * Calling dma_fence_signal before enable_signaling is called allows 142 - * for a tiny race window in which enable_signaling is called during, 143 - * before, or after dma_fence_signal. To fight this, it is recommended 144 - * that before enable_signaling returns true an extra reference is 145 - * taken on the fence, to be released when the fence is signaled. 146 - * This will mean dma_fence_signal will still be called twice, but 147 - * the second time will be a noop since it was already signaled. 148 - * 149 - * Notes on signaled: 150 - * May set fence->error if returning true. 151 - * 152 - * Notes on wait: 153 - * Must not be NULL, set to dma_fence_default_wait for default implementation. 154 - * the dma_fence_default_wait implementation should work for any fence, as long 155 - * as enable_signaling works correctly. 156 - * 157 - * Must return -ERESTARTSYS if the wait is intr = true and the wait was 158 - * interrupted, and remaining jiffies if fence has signaled, or 0 if wait 159 - * timed out. Can also return other error values on custom implementations, 160 - * which should be treated as if the fence is signaled. For example a hardware 161 - * lockup could be reported like that. 162 - * 163 - * Notes on release: 164 - * Can be NULL, this function allows additional commands to run on 165 - * destruction of the fence. Can be called from irq context. 166 - * If pointer is set to NULL, kfree will get called instead. 167 112 */ 168 - 169 113 struct dma_fence_ops { 114 + /** 115 + * @get_driver_name: 116 + * 117 + * Returns the driver name. This is a callback to allow drivers to 118 + * compute the name at runtime, without having it to store permanently 119 + * for each fence, or build a cache of some sort. 120 + * 121 + * This callback is mandatory. 122 + */ 170 123 const char * (*get_driver_name)(struct dma_fence *fence); 124 + 125 + /** 126 + * @get_timeline_name: 127 + * 128 + * Return the name of the context this fence belongs to. This is a 129 + * callback to allow drivers to compute the name at runtime, without 130 + * having it to store permanently for each fence, or build a cache of 131 + * some sort. 132 + * 133 + * This callback is mandatory. 134 + */ 171 135 const char * (*get_timeline_name)(struct dma_fence *fence); 136 + 137 + /** 138 + * @enable_signaling: 139 + * 140 + * Enable software signaling of fence. 141 + * 142 + * For fence implementations that have the capability for hw->hw 143 + * signaling, they can implement this op to enable the necessary 144 + * interrupts, or insert commands into cmdstream, etc, to avoid these 145 + * costly operations for the common case where only hw->hw 146 + * synchronization is required. This is called in the first 147 + * dma_fence_wait() or dma_fence_add_callback() path to let the fence 148 + * implementation know that there is another driver waiting on the 149 + * signal (ie. hw->sw case). 150 + * 151 + * This function can be called from atomic context, but not 152 + * from irq context, so normal spinlocks can be used. 153 + * 154 + * A return value of false indicates the fence already passed, 155 + * or some failure occurred that made it impossible to enable 156 + * signaling. True indicates successful enabling. 157 + * 158 + * &dma_fence.error may be set in enable_signaling, but only when false 159 + * is returned. 160 + * 161 + * Since many implementations can call dma_fence_signal() even when before 162 + * @enable_signaling has been called there's a race window, where the 163 + * dma_fence_signal() might result in the final fence reference being 164 + * released and its memory freed. To avoid this, implementations of this 165 + * callback should grab their own reference using dma_fence_get(), to be 166 + * released when the fence is signalled (through e.g. the interrupt 167 + * handler). 168 + * 169 + * This callback is mandatory. 170 + */ 172 171 bool (*enable_signaling)(struct dma_fence *fence); 172 + 173 + /** 174 + * @signaled: 175 + * 176 + * Peek whether the fence is signaled, as a fastpath optimization for 177 + * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this 178 + * callback does not need to make any guarantees beyond that a fence 179 + * once indicates as signalled must always return true from this 180 + * callback. This callback may return false even if the fence has 181 + * completed already, in this case information hasn't propogated throug 182 + * the system yet. See also dma_fence_is_signaled(). 183 + * 184 + * May set &dma_fence.error if returning true. 185 + * 186 + * This callback is optional. 187 + */ 173 188 bool (*signaled)(struct dma_fence *fence); 189 + 190 + /** 191 + * @wait: 192 + * 193 + * Custom wait implementation, or dma_fence_default_wait. 194 + * 195 + * Must not be NULL, set to dma_fence_default_wait for default implementation. 196 + * the dma_fence_default_wait implementation should work for any fence, as long 197 + * as enable_signaling works correctly. 198 + * 199 + * Must return -ERESTARTSYS if the wait is intr = true and the wait was 200 + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait 201 + * timed out. Can also return other error values on custom implementations, 202 + * which should be treated as if the fence is signaled. For example a hardware 203 + * lockup could be reported like that. 204 + * 205 + * This callback is mandatory. 206 + */ 174 207 signed long (*wait)(struct dma_fence *fence, 175 208 bool intr, signed long timeout); 209 + 210 + /** 211 + * @release: 212 + * 213 + * Called on destruction of fence to release additional resources. 214 + * Can be called from irq context. This callback is optional. If it is 215 + * NULL, then dma_fence_free() is instead called as the default 216 + * implementation. 217 + */ 176 218 void (*release)(struct dma_fence *fence); 177 219 220 + /** 221 + * @fill_driver_data: 222 + * 223 + * Callback to fill in free-form debug info. 224 + * 225 + * Returns amount of bytes filled, or negative error on failure. 226 + * 227 + * This callback is optional. 228 + */ 178 229 int (*fill_driver_data)(struct dma_fence *fence, void *data, int size); 230 + 231 + /** 232 + * @fence_value_str: 233 + * 234 + * Callback to fill in free-form debug info specific to this fence, like 235 + * the sequence number. 236 + * 237 + * This callback is optional. 238 + */ 179 239 void (*fence_value_str)(struct dma_fence *fence, char *str, int size); 240 + 241 + /** 242 + * @timeline_value_str: 243 + * 244 + * Fills in the current value of the timeline as a string, like the 245 + * sequence number. This should match what @fill_driver_data prints for 246 + * the most recently signalled fence (assuming no delayed signalling). 247 + */ 180 248 void (*timeline_value_str)(struct dma_fence *fence, 181 249 char *str, int size); 182 250 }; ··· 257 189 258 190 /** 259 191 * dma_fence_put - decreases refcount of the fence 260 - * @fence: [in] fence to reduce refcount of 192 + * @fence: fence to reduce refcount of 261 193 */ 262 194 static inline void dma_fence_put(struct dma_fence *fence) 263 195 { ··· 267 199 268 200 /** 269 201 * dma_fence_get - increases refcount of the fence 270 - * @fence: [in] fence to increase refcount of 202 + * @fence: fence to increase refcount of 271 203 * 272 204 * Returns the same fence, with refcount increased by 1. 273 205 */ ··· 281 213 /** 282 214 * dma_fence_get_rcu - get a fence from a reservation_object_list with 283 215 * rcu read lock 284 - * @fence: [in] fence to increase refcount of 216 + * @fence: fence to increase refcount of 285 217 * 286 218 * Function returns NULL if no refcount could be obtained, or the fence. 287 219 */ ··· 295 227 296 228 /** 297 229 * dma_fence_get_rcu_safe - acquire a reference to an RCU tracked fence 298 - * @fencep: [in] pointer to fence to increase refcount of 230 + * @fencep: pointer to fence to increase refcount of 299 231 * 300 232 * Function returns NULL if no refcount could be obtained, or the fence. 301 233 * This function handles acquiring a reference to a fence that may be ··· 357 289 /** 358 290 * dma_fence_is_signaled_locked - Return an indication if the fence 359 291 * is signaled yet. 360 - * @fence: [in] the fence to check 292 + * @fence: the fence to check 361 293 * 362 294 * Returns true if the fence was already signaled, false if not. Since this 363 295 * function doesn't enable signaling, it is not guaranteed to ever return 364 - * true if dma_fence_add_callback, dma_fence_wait or 365 - * dma_fence_enable_sw_signaling haven't been called before. 296 + * true if dma_fence_add_callback(), dma_fence_wait() or 297 + * dma_fence_enable_sw_signaling() haven't been called before. 366 298 * 367 - * This function requires fence->lock to be held. 299 + * This function requires &dma_fence.lock to be held. 300 + * 301 + * See also dma_fence_is_signaled(). 368 302 */ 369 303 static inline bool 370 304 dma_fence_is_signaled_locked(struct dma_fence *fence) ··· 384 314 385 315 /** 386 316 * dma_fence_is_signaled - Return an indication if the fence is signaled yet. 387 - * @fence: [in] the fence to check 317 + * @fence: the fence to check 388 318 * 389 319 * Returns true if the fence was already signaled, false if not. Since this 390 320 * function doesn't enable signaling, it is not guaranteed to ever return 391 - * true if dma_fence_add_callback, dma_fence_wait or 392 - * dma_fence_enable_sw_signaling haven't been called before. 321 + * true if dma_fence_add_callback(), dma_fence_wait() or 322 + * dma_fence_enable_sw_signaling() haven't been called before. 393 323 * 394 324 * It's recommended for seqno fences to call dma_fence_signal when the 395 325 * operation is complete, it makes it possible to prevent issues from 396 326 * wraparound between time of issue and time of use by checking the return 397 327 * value of this function before calling hardware-specific wait instructions. 328 + * 329 + * See also dma_fence_is_signaled_locked(). 398 330 */ 399 331 static inline bool 400 332 dma_fence_is_signaled(struct dma_fence *fence) ··· 414 342 415 343 /** 416 344 * __dma_fence_is_later - return if f1 is chronologically later than f2 417 - * @f1: [in] the first fence's seqno 418 - * @f2: [in] the second fence's seqno from the same context 345 + * @f1: the first fence's seqno 346 + * @f2: the second fence's seqno from the same context 419 347 * 420 348 * Returns true if f1 is chronologically later than f2. Both fences must be 421 349 * from the same context, since a seqno is not common across contexts. ··· 427 355 428 356 /** 429 357 * dma_fence_is_later - return if f1 is chronologically later than f2 430 - * @f1: [in] the first fence from the same context 431 - * @f2: [in] the second fence from the same context 358 + * @f1: the first fence from the same context 359 + * @f2: the second fence from the same context 432 360 * 433 361 * Returns true if f1 is chronologically later than f2. Both fences must be 434 362 * from the same context, since a seqno is not re-used across contexts. ··· 444 372 445 373 /** 446 374 * dma_fence_later - return the chronologically later fence 447 - * @f1: [in] the first fence from the same context 448 - * @f2: [in] the second fence from the same context 375 + * @f1: the first fence from the same context 376 + * @f2: the second fence from the same context 449 377 * 450 378 * Returns NULL if both fences are signaled, otherwise the fence that would be 451 379 * signaled last. Both fences must be from the same context, since a seqno is ··· 470 398 471 399 /** 472 400 * dma_fence_get_status_locked - returns the status upon completion 473 - * @fence: [in] the dma_fence to query 401 + * @fence: the dma_fence to query 474 402 * 475 403 * Drivers can supply an optional error status condition before they signal 476 404 * the fence (to indicate whether the fence was completed due to an error ··· 494 422 495 423 /** 496 424 * dma_fence_set_error - flag an error condition on the fence 497 - * @fence: [in] the dma_fence 498 - * @error: [in] the error to store 425 + * @fence: the dma_fence 426 + * @error: the error to store 499 427 * 500 428 * Drivers can supply an optional error status condition before they signal 501 429 * the fence, to indicate that the fence was completed due to an error ··· 521 449 522 450 /** 523 451 * dma_fence_wait - sleep until the fence gets signaled 524 - * @fence: [in] the fence to wait on 525 - * @intr: [in] if true, do an interruptible wait 452 + * @fence: the fence to wait on 453 + * @intr: if true, do an interruptible wait 526 454 * 527 455 * This function will return -ERESTARTSYS if interrupted by a signal, 528 456 * or 0 if the fence was signaled. Other error values may be ··· 531 459 * Performs a synchronous wait on this fence. It is assumed the caller 532 460 * directly or indirectly holds a reference to the fence, otherwise the 533 461 * fence might be freed before return, resulting in undefined behavior. 462 + * 463 + * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout(). 534 464 */ 535 465 static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) 536 466 {
+7
include/uapi/drm/drm.h
··· 680 680 */ 681 681 #define DRM_CLIENT_CAP_ATOMIC 3 682 682 683 + /** 684 + * DRM_CLIENT_CAP_ASPECT_RATIO 685 + * 686 + * If set to 1, the DRM core will provide aspect ratio information in modes. 687 + */ 688 + #define DRM_CLIENT_CAP_ASPECT_RATIO 4 689 + 683 690 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ 684 691 struct drm_set_client_cap { 685 692 __u64 capability;
+6
include/uapi/drm/drm_mode.h
··· 93 93 #define DRM_MODE_PICTURE_ASPECT_NONE 0 94 94 #define DRM_MODE_PICTURE_ASPECT_4_3 1 95 95 #define DRM_MODE_PICTURE_ASPECT_16_9 2 96 + #define DRM_MODE_PICTURE_ASPECT_64_27 3 97 + #define DRM_MODE_PICTURE_ASPECT_256_135 4 96 98 97 99 /* Aspect ratio flag bitmask (4 bits 22:19) */ 98 100 #define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19) ··· 104 102 (DRM_MODE_PICTURE_ASPECT_4_3<<19) 105 103 #define DRM_MODE_FLAG_PIC_AR_16_9 \ 106 104 (DRM_MODE_PICTURE_ASPECT_16_9<<19) 105 + #define DRM_MODE_FLAG_PIC_AR_64_27 \ 106 + (DRM_MODE_PICTURE_ASPECT_64_27<<19) 107 + #define DRM_MODE_FLAG_PIC_AR_256_135 \ 108 + (DRM_MODE_PICTURE_ASPECT_256_135<<19) 107 109 108 110 #define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \ 109 111 DRM_MODE_FLAG_NHSYNC | \
+194
include/uapi/drm/v3d_drm.h
··· 1 + /* 2 + * Copyright © 2014-2018 Broadcom 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + */ 23 + 24 + #ifndef _V3D_DRM_H_ 25 + #define _V3D_DRM_H_ 26 + 27 + #include "drm.h" 28 + 29 + #if defined(__cplusplus) 30 + extern "C" { 31 + #endif 32 + 33 + #define DRM_V3D_SUBMIT_CL 0x00 34 + #define DRM_V3D_WAIT_BO 0x01 35 + #define DRM_V3D_CREATE_BO 0x02 36 + #define DRM_V3D_MMAP_BO 0x03 37 + #define DRM_V3D_GET_PARAM 0x04 38 + #define DRM_V3D_GET_BO_OFFSET 0x05 39 + 40 + #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl) 41 + #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo) 42 + #define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo) 43 + #define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo) 44 + #define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param) 45 + #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset) 46 + 47 + /** 48 + * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D 49 + * engine. 50 + * 51 + * This asks the kernel to have the GPU execute an optional binner 52 + * command list, and a render command list. 53 + */ 54 + struct drm_v3d_submit_cl { 55 + /* Pointer to the binner command list. 56 + * 57 + * This is the first set of commands executed, which runs the 58 + * coordinate shader to determine where primitives land on the screen, 59 + * then writes out the state updates and draw calls necessary per tile 60 + * to the tile allocation BO. 61 + */ 62 + __u32 bcl_start; 63 + 64 + /** End address of the BCL (first byte after the BCL) */ 65 + __u32 bcl_end; 66 + 67 + /* Offset of the render command list. 68 + * 69 + * This is the second set of commands executed, which will either 70 + * execute the tiles that have been set up by the BCL, or a fixed set 71 + * of tiles (in the case of RCL-only blits). 72 + */ 73 + __u32 rcl_start; 74 + 75 + /** End address of the RCL (first byte after the RCL) */ 76 + __u32 rcl_end; 77 + 78 + /** An optional sync object to wait on before starting the BCL. */ 79 + __u32 in_sync_bcl; 80 + /** An optional sync object to wait on before starting the RCL. */ 81 + __u32 in_sync_rcl; 82 + /** An optional sync object to place the completion fence in. */ 83 + __u32 out_sync; 84 + 85 + /* Offset of the tile alloc memory 86 + * 87 + * This is optional on V3D 3.3 (where the CL can set the value) but 88 + * required on V3D 4.1. 89 + */ 90 + __u32 qma; 91 + 92 + /** Size of the tile alloc memory. */ 93 + __u32 qms; 94 + 95 + /** Offset of the tile state data array. */ 96 + __u32 qts; 97 + 98 + /* Pointer to a u32 array of the BOs that are referenced by the job. 99 + */ 100 + __u64 bo_handles; 101 + 102 + /* Number of BO handles passed in (size is that times 4). */ 103 + __u32 bo_handle_count; 104 + 105 + /* Pad, must be zero-filled. */ 106 + __u32 pad; 107 + }; 108 + 109 + /** 110 + * struct drm_v3d_wait_bo - ioctl argument for waiting for 111 + * completion of the last DRM_V3D_SUBMIT_CL on a BO. 112 + * 113 + * This is useful for cases where multiple processes might be 114 + * rendering to a BO and you want to wait for all rendering to be 115 + * completed. 116 + */ 117 + struct drm_v3d_wait_bo { 118 + __u32 handle; 119 + __u32 pad; 120 + __u64 timeout_ns; 121 + }; 122 + 123 + /** 124 + * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs. 125 + * 126 + * There are currently no values for the flags argument, but it may be 127 + * used in a future extension. 128 + */ 129 + struct drm_v3d_create_bo { 130 + __u32 size; 131 + __u32 flags; 132 + /** Returned GEM handle for the BO. */ 133 + __u32 handle; 134 + /** 135 + * Returned offset for the BO in the V3D address space. This offset 136 + * is private to the DRM fd and is valid for the lifetime of the GEM 137 + * handle. 138 + * 139 + * This offset value will always be nonzero, since various HW 140 + * units treat 0 specially. 141 + */ 142 + __u32 offset; 143 + }; 144 + 145 + /** 146 + * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs. 147 + * 148 + * This doesn't actually perform an mmap. Instead, it returns the 149 + * offset you need to use in an mmap on the DRM device node. This 150 + * means that tools like valgrind end up knowing about the mapped 151 + * memory. 152 + * 153 + * There are currently no values for the flags argument, but it may be 154 + * used in a future extension. 155 + */ 156 + struct drm_v3d_mmap_bo { 157 + /** Handle for the object being mapped. */ 158 + __u32 handle; 159 + __u32 flags; 160 + /** offset into the drm node to use for subsequent mmap call. */ 161 + __u64 offset; 162 + }; 163 + 164 + enum drm_v3d_param { 165 + DRM_V3D_PARAM_V3D_UIFCFG, 166 + DRM_V3D_PARAM_V3D_HUB_IDENT1, 167 + DRM_V3D_PARAM_V3D_HUB_IDENT2, 168 + DRM_V3D_PARAM_V3D_HUB_IDENT3, 169 + DRM_V3D_PARAM_V3D_CORE0_IDENT0, 170 + DRM_V3D_PARAM_V3D_CORE0_IDENT1, 171 + DRM_V3D_PARAM_V3D_CORE0_IDENT2, 172 + }; 173 + 174 + struct drm_v3d_get_param { 175 + __u32 param; 176 + __u32 pad; 177 + __u64 value; 178 + }; 179 + 180 + /** 181 + * Returns the offset for the BO in the V3D address space for this DRM fd. 182 + * This is the same value returned by drm_v3d_create_bo, if that was called 183 + * from this DRM fd. 184 + */ 185 + struct drm_v3d_get_bo_offset { 186 + __u32 handle; 187 + __u32 offset; 188 + }; 189 + 190 + #if defined(__cplusplus) 191 + } 192 + #endif 193 + 194 + #endif /* _V3D_DRM_H_ */
+10 -3
include/uapi/drm/vc4_drm.h
··· 183 183 /* ID of the perfmon to attach to this job. 0 means no perfmon. */ 184 184 __u32 perfmonid; 185 185 186 - /* Unused field to align this struct on 64 bits. Must be set to 0. 187 - * If one ever needs to add an u32 field to this struct, this field 188 - * can be used. 186 + /* Syncobj handle to wait on. If set, processing of this render job 187 + * will not start until the syncobj is signaled. 0 means ignore. 189 188 */ 189 + __u32 in_sync; 190 + 191 + /* Syncobj handle to export fence to. If set, the fence in the syncobj 192 + * will be replaced with a fence that signals upon completion of this 193 + * render job. 0 means ignore. 194 + */ 195 + __u32 out_sync; 196 + 190 197 __u32 pad2; 191 198 }; 192 199
+1
include/uapi/linux/virtio_gpu.h
··· 260 260 }; 261 261 262 262 #define VIRTIO_GPU_CAPSET_VIRGL 1 263 + #define VIRTIO_GPU_CAPSET_VIRGL2 2 263 264 264 265 /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ 265 266 struct virtio_gpu_get_capset_info {