Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2022-11-10-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 6.2:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
- atomic-helper: Add begin_fb_access and end_fb_access hooks
- fb-helper: Rework to move fb emulation into helpers
- scheduler: rework entity flush, kill and fini
- ttm: Optimize pool allocations

Driver Changes:
- amdgpu: scheduler rework
- hdlcd: Switch to DRM-managed resources
- ingenic: Fix registration error path
- lcdif: FIFO threshold tuning
- meson: Fix return type of cvbs' mode_valid
- ofdrm: multiple fixes (kconfig, types, endianness)
- sun4i: A100 and D1 support
- panel:
- New Panel: Jadard JD9365DA-H3

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20221110083612.g63eaocoaa554soh@houat

+2925 -1385
+21 -9
Documentation/devicetree/bindings/display/allwinner,sun6i-a31-mipi-dsi.yaml
··· 12 12 13 13 properties: 14 14 compatible: 15 - enum: 16 - - allwinner,sun6i-a31-mipi-dsi 17 - - allwinner,sun50i-a64-mipi-dsi 15 + oneOf: 16 + - enum: 17 + - allwinner,sun6i-a31-mipi-dsi 18 + - allwinner,sun50i-a64-mipi-dsi 19 + - allwinner,sun50i-a100-mipi-dsi 20 + - items: 21 + - const: allwinner,sun20i-d1-mipi-dsi 22 + - const: allwinner,sun50i-a100-mipi-dsi 18 23 19 24 reg: 20 25 maxItems: 1 ··· 64 59 - phys 65 60 - phy-names 66 61 - resets 67 - - vcc-dsi-supply 68 62 - port 69 63 70 64 allOf: ··· 72 68 properties: 73 69 compatible: 74 70 contains: 75 - const: allwinner,sun6i-a31-mipi-dsi 71 + enum: 72 + - allwinner,sun6i-a31-mipi-dsi 73 + - allwinner,sun50i-a100-mipi-dsi 76 74 77 75 then: 78 76 properties: ··· 84 78 required: 85 79 - clock-names 86 80 81 + else: 82 + properties: 83 + clocks: 84 + maxItems: 1 85 + 87 86 - if: 88 87 properties: 89 88 compatible: 90 89 contains: 91 - const: allwinner,sun50i-a64-mipi-dsi 90 + enum: 91 + - allwinner,sun6i-a31-mipi-dsi 92 + - allwinner,sun50i-a64-mipi-dsi 92 93 93 94 then: 94 - properties: 95 - clocks: 96 - minItems: 1 95 + required: 96 + - vcc-dsi-supply 97 97 98 98 unevaluatedProperties: false 99 99
+70
Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/display/panel/jadard,jd9365da-h3.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: Jadard JD9365DA-HE WXGA DSI panel 8 + 9 + maintainers: 10 + - Jagan Teki <jagan@edgeble.ai> 11 + 12 + allOf: 13 + - $ref: panel-common.yaml# 14 + 15 + properties: 16 + compatible: 17 + items: 18 + - enum: 19 + - chongzhou,cz101b4001 20 + - const: jadard,jd9365da-h3 21 + 22 + reg: true 23 + 24 + vdd-supply: 25 + description: supply regulator for VDD, usually 3.3V 26 + 27 + vccio-supply: 28 + description: supply regulator for VCCIO, usually 1.8V 29 + 30 + reset-gpios: true 31 + 32 + backlight: true 33 + 34 + port: true 35 + 36 + required: 37 + - compatible 38 + - reg 39 + - vdd-supply 40 + - vccio-supply 41 + - reset-gpios 42 + 43 + additionalProperties: false 44 + 45 + examples: 46 + - | 47 + #include <dt-bindings/gpio/gpio.h> 48 + #include <dt-bindings/pinctrl/rockchip.h> 49 + 50 + dsi { 51 + #address-cells = <1>; 52 + #size-cells = <0>; 53 + 54 + panel@0 { 55 + compatible = "chongzhou,cz101b4001", "jadard,jd9365da-h3"; 56 + reg = <0>; 57 + vdd-supply = <&lcd_3v3>; 58 + vccio-supply = <&vcca_1v8>; 59 + reset-gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>; 60 + backlight = <&backlight>; 61 + 62 + port { 63 + mipi_in_panel: endpoint { 64 + remote-endpoint = <&mipi_out_panel>; 65 + }; 66 + }; 67 + }; 68 + }; 69 + 70 + ...
+4
Documentation/devicetree/bindings/vendor-prefixes.yaml
··· 246 246 description: ChipOne 247 247 "^chipspark,.*": 248 248 description: ChipSPARK 249 + "^chongzhou,.*": 250 + description: Shenzhen Chongzhou Electronic Technology Co., Ltd 249 251 "^chrontel,.*": 250 252 description: Chrontel, Inc. 251 253 "^chrp,.*": ··· 641 639 description: ITian Corporation 642 640 "^iwave,.*": 643 641 description: iWave Systems Technologies Pvt. Ltd. 642 + "^jadard,.*": 643 + description: Jadard Technology Inc. 644 644 "^jdi,.*": 645 645 description: Japan Display Inc. 646 646 "^jedec,.*":
+3
Documentation/gpu/drm-kms-helpers.rst
··· 116 116 .. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c 117 117 :export: 118 118 119 + .. kernel-doc:: drivers/gpu/drm/drm_fbdev_generic.c 120 + :export: 121 + 119 122 format Helper Functions Reference 120 123 ================================= 121 124
+7 -1
MAINTAINERS
··· 6503 6503 F: drivers/gpu/drm/i810/ 6504 6504 F: include/uapi/drm/i810_drm.h 6505 6505 6506 + DRM DRIVER FOR JADARD JD9365DA-H3 MIPI-DSI LCD PANELS 6507 + M: Jagan Teki <jagan@edgeble.ai> 6508 + S: Maintained 6509 + F: Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml 6510 + F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c 6511 + 6506 6512 DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER 6507 6513 M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> 6508 6514 S: Supported ··· 7119 7113 F: include/drm/ttm/ 7120 7114 7121 7115 DRM GPU SCHEDULER 7122 - M: Andrey Grodzovsky <andrey.grodzovsky@amd.com> 7116 + M: Luben Tuikov <luben.tuikov@amd.com> 7123 7117 L: dri-devel@lists.freedesktop.org 7124 7118 S: Maintained 7125 7119 T: git git://anongit.freedesktop.org/drm/drm-misc
+3 -1
drivers/gpu/drm/Makefile
··· 117 117 drm_self_refresh_helper.o \ 118 118 drm_simple_kms_helper.o 119 119 drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o 120 - drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 120 + drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \ 121 + drm_fbdev_generic.o \ 122 + drm_fb_helper.o 121 123 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 122 124 123 125 #
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 673 673 goto err; 674 674 } 675 675 676 - ret = amdgpu_job_alloc(adev, 1, &job, NULL); 676 + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); 677 677 if (ret) 678 678 goto err; 679 679
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
··· 26 26 27 27 #include <drm/display/drm_dp_helper.h> 28 28 #include <drm/drm_edid.h> 29 - #include <drm/drm_fb_helper.h> 30 29 #include <drm/drm_probe_helper.h> 31 30 #include <drm/amdgpu_drm.h> 32 31 #include "amdgpu.h"
+24 -18
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 291 291 return -EINVAL; 292 292 293 293 for (i = 0; i < p->gang_size; ++i) { 294 - ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); 295 - if (ret) 296 - goto free_all_kdata; 297 - 298 - ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], 299 - &fpriv->vm); 294 + ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, 295 + num_ibs[i], &p->jobs[i]); 300 296 if (ret) 301 297 goto free_all_kdata; 302 298 } ··· 426 430 dma_fence_put(old); 427 431 } 428 432 429 - r = amdgpu_sync_fence(&p->gang_leader->sync, fence); 433 + r = amdgpu_sync_fence(&p->sync, fence); 430 434 dma_fence_put(fence); 431 435 if (r) 432 436 return r; ··· 448 452 return r; 449 453 } 450 454 451 - r = amdgpu_sync_fence(&p->gang_leader->sync, fence); 452 - dma_fence_put(fence); 455 + r = amdgpu_sync_fence(&p->sync, fence); 456 + if (r) 457 + goto error; 453 458 459 + /* 460 + * When we have an explicit dependency it might be necessary to insert a 461 + * pipeline sync to make sure that all caches etc are flushed and the 462 + * next job actually sees the results from the previous one. 463 + */ 464 + if (fence->context == p->gang_leader->base.entity->fence_context) 465 + r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); 466 + 467 + error: 468 + dma_fence_put(fence); 454 469 return r; 455 470 } 456 471 ··· 1108 1101 if (r) 1109 1102 return r; 1110 1103 1111 - r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update); 1104 + r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update); 1112 1105 if (r) 1113 1106 return r; 1114 1107 ··· 1119 1112 if (r) 1120 1113 return r; 1121 1114 1122 - r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); 1115 + r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); 1123 1116 if (r) 1124 1117 return r; 1125 1118 } ··· 1138 1131 if (r) 1139 1132 return r; 1140 1133 1141 - r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); 1134 + r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); 1142 1135 if (r) 1143 1136 return r; 1144 1137 } ··· 1151 1144 if (r) 1152 1145 return r; 1153 1146 1154 - r = amdgpu_sync_fence(&job->sync, vm->last_update); 1147 + r = amdgpu_sync_fence(&p->sync, vm->last_update); 1155 1148 if (r) 1156 1149 return r; 1157 1150 ··· 1183 1176 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) 1184 1177 { 1185 1178 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 1186 - struct amdgpu_job *leader = p->gang_leader; 1187 1179 struct amdgpu_bo_list_entry *e; 1188 1180 unsigned int i; 1189 1181 int r; ··· 1194 1188 1195 1189 sync_mode = amdgpu_bo_explicit_sync(bo) ? 1196 1190 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; 1197 - r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode, 1191 + r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode, 1198 1192 &fpriv->vm); 1199 1193 if (r) 1200 1194 return r; 1201 1195 } 1202 1196 1203 - for (i = 0; i < p->gang_size - 1; ++i) { 1204 - r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); 1197 + for (i = 0; i < p->gang_size; ++i) { 1198 + r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]); 1205 1199 if (r) 1206 1200 return r; 1207 1201 } ··· 1247 1241 struct dma_fence *fence; 1248 1242 1249 1243 fence = &p->jobs[i]->base.s_fence->scheduled; 1250 - r = amdgpu_sync_fence(&leader->sync, fence); 1244 + r = drm_sched_job_add_dependency(&leader->base, fence); 1251 1245 if (r) 1252 1246 goto error_cleanup; 1253 1247 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
··· 75 75 76 76 unsigned num_post_deps; 77 77 struct amdgpu_cs_post_dep *post_deps; 78 + 79 + struct amdgpu_sync sync; 78 80 }; 79 81 80 82 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 37 37 #include <linux/pci-p2pdma.h> 38 38 39 39 #include <drm/drm_atomic_helper.h> 40 + #include <drm/drm_fb_helper.h> 40 41 #include <drm/drm_probe_helper.h> 41 42 #include <drm/amdgpu_drm.h> 42 43 #include <linux/vgaarb.h>
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 39 39 #include <linux/pm_runtime.h> 40 40 #include <drm/drm_crtc_helper.h> 41 41 #include <drm/drm_edid.h> 42 - #include <drm/drm_gem_framebuffer_helper.h> 43 42 #include <drm/drm_fb_helper.h> 43 + #include <drm/drm_gem_framebuffer_helper.h> 44 44 #include <drm/drm_fourcc.h> 45 45 #include <drm/drm_vblank.h> 46 46 ··· 1214 1214 1215 1215 const struct drm_mode_config_funcs amdgpu_mode_funcs = { 1216 1216 .fb_create = amdgpu_display_user_framebuffer_create, 1217 - .output_poll_changed = drm_fb_helper_output_poll_changed, 1218 1217 }; 1219 1218 1220 1219 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 25 25 #include <drm/amdgpu_drm.h> 26 26 #include <drm/drm_aperture.h> 27 27 #include <drm/drm_drv.h> 28 + #include <drm/drm_fbdev_generic.h> 28 29 #include <drm/drm_gem.h> 29 30 #include <drm/drm_vblank.h> 30 31 #include <drm/drm_managed.h>
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 182 182 183 183 need_ctx_switch = ring->current_ctx != fence_ctx; 184 184 if (ring->funcs->emit_pipeline_sync && job && 185 - ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || 185 + ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || 186 186 (amdgpu_sriov_vf(adev) && need_ctx_switch) || 187 187 amdgpu_vm_need_pipeline_sync(ring, job))) { 188 188 need_pipe_sync = true;
+27 -29
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 170 170 * 171 171 * @vm: vm to allocate id for 172 172 * @ring: ring we want to submit job to 173 - * @sync: sync object where we add dependencies 174 173 * @idle: resulting idle VMID 174 + * @fence: fence to wait for if no id could be grabbed 175 175 * 176 176 * Try to find an idle VMID, if none is idle add a fence to wait to the sync 177 177 * object. Returns -ENOMEM when we are out of memory. 178 178 */ 179 179 static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, 180 180 struct amdgpu_ring *ring, 181 - struct amdgpu_sync *sync, 182 - struct amdgpu_vmid **idle) 181 + struct amdgpu_vmid **idle, 182 + struct dma_fence **fence) 183 183 { 184 184 struct amdgpu_device *adev = ring->adev; 185 185 unsigned vmhub = ring->funcs->vmhub; 186 186 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 187 187 struct dma_fence **fences; 188 188 unsigned i; 189 - int r; 190 189 191 - if (!dma_fence_is_signaled(ring->vmid_wait)) 192 - return amdgpu_sync_fence(sync, ring->vmid_wait); 190 + if (!dma_fence_is_signaled(ring->vmid_wait)) { 191 + *fence = dma_fence_get(ring->vmid_wait); 192 + return 0; 193 + } 193 194 194 195 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); 195 196 if (!fences) ··· 229 228 return -ENOMEM; 230 229 } 231 230 232 - r = amdgpu_sync_fence(sync, &array->base); 231 + *fence = dma_fence_get(&array->base); 233 232 dma_fence_put(ring->vmid_wait); 234 233 ring->vmid_wait = &array->base; 235 - return r; 234 + return 0; 236 235 } 237 236 kfree(fences); 238 237 ··· 244 243 * 245 244 * @vm: vm to allocate id for 246 245 * @ring: ring we want to submit job to 247 - * @sync: sync object where we add dependencies 248 - * @fence: fence protecting ID from reuse 249 246 * @job: job who wants to use the VMID 250 247 * @id: resulting VMID 248 + * @fence: fence to wait for if no id could be grabbed 251 249 * 252 250 * Try to assign a reserved VMID. 253 251 */ 254 252 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, 255 253 struct amdgpu_ring *ring, 256 - struct amdgpu_sync *sync, 257 - struct dma_fence *fence, 258 254 struct amdgpu_job *job, 259 - struct amdgpu_vmid **id) 255 + struct amdgpu_vmid **id, 256 + struct dma_fence **fence) 260 257 { 261 258 struct amdgpu_device *adev = ring->adev; 262 259 unsigned vmhub = ring->funcs->vmhub; ··· 281 282 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); 282 283 if (tmp) { 283 284 *id = NULL; 284 - return amdgpu_sync_fence(sync, tmp); 285 + *fence = dma_fence_get(tmp); 286 + return 0; 285 287 } 286 288 needs_flush = true; 287 289 } ··· 290 290 /* Good we can use this VMID. Remember this submission as 291 291 * user of the VMID. 292 292 */ 293 - r = amdgpu_sync_fence(&(*id)->active, fence); 293 + r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); 294 294 if (r) 295 295 return r; 296 296 ··· 304 304 * 305 305 * @vm: vm to allocate id for 306 306 * @ring: ring we want to submit job to 307 - * @sync: sync object where we add dependencies 308 - * @fence: fence protecting ID from reuse 309 307 * @job: job who wants to use the VMID 310 308 * @id: resulting VMID 309 + * @fence: fence to wait for if no id could be grabbed 311 310 * 312 311 * Try to reuse a VMID for this submission. 313 312 */ 314 313 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, 315 314 struct amdgpu_ring *ring, 316 - struct amdgpu_sync *sync, 317 - struct dma_fence *fence, 318 315 struct amdgpu_job *job, 319 - struct amdgpu_vmid **id) 316 + struct amdgpu_vmid **id, 317 + struct dma_fence **fence) 320 318 { 321 319 struct amdgpu_device *adev = ring->adev; 322 320 unsigned vmhub = ring->funcs->vmhub; ··· 350 352 /* Good, we can use this VMID. Remember this submission as 351 353 * user of the VMID. 352 354 */ 353 - r = amdgpu_sync_fence(&(*id)->active, fence); 355 + r = amdgpu_sync_fence(&(*id)->active, 356 + &job->base.s_fence->finished); 354 357 if (r) 355 358 return r; 356 359 ··· 369 370 * 370 371 * @vm: vm to allocate id for 371 372 * @ring: ring we want to submit job to 372 - * @sync: sync object where we add dependencies 373 - * @fence: fence protecting ID from reuse 374 373 * @job: job who wants to use the VMID 374 + * @fence: fence to wait for if no id could be grabbed 375 375 * 376 376 * Allocate an id for the vm, adding fences to the sync obj as necessary. 377 377 */ 378 378 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 379 - struct amdgpu_sync *sync, struct dma_fence *fence, 380 - struct amdgpu_job *job) 379 + struct amdgpu_job *job, struct dma_fence **fence) 381 380 { 382 381 struct amdgpu_device *adev = ring->adev; 383 382 unsigned vmhub = ring->funcs->vmhub; ··· 385 388 int r = 0; 386 389 387 390 mutex_lock(&id_mgr->lock); 388 - r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); 391 + r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence); 389 392 if (r || !idle) 390 393 goto error; 391 394 392 395 if (vm->reserved_vmid[vmhub]) { 393 - r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id); 396 + r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); 394 397 if (r || !id) 395 398 goto error; 396 399 } else { 397 - r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); 400 + r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); 398 401 if (r) 399 402 goto error; 400 403 ··· 403 406 id = idle; 404 407 405 408 /* Remember this submission as user of the VMID */ 406 - r = amdgpu_sync_fence(&id->active, fence); 409 + r = amdgpu_sync_fence(&id->active, 410 + &job->base.s_fence->finished); 407 411 if (r) 408 412 goto error; 409 413
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
··· 84 84 struct amdgpu_vm *vm, 85 85 unsigned vmhub); 86 86 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 87 - struct amdgpu_sync *sync, struct dma_fence *fence, 88 - struct amdgpu_job *job); 87 + struct amdgpu_job *job, struct dma_fence **fence); 89 88 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, 90 89 unsigned vmid); 91 90 void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
+33 -48
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 88 88 return DRM_GPU_SCHED_STAT_NOMINAL; 89 89 } 90 90 91 - int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 92 - struct amdgpu_job **job, struct amdgpu_vm *vm) 91 + int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 92 + struct drm_sched_entity *entity, void *owner, 93 + unsigned int num_ibs, struct amdgpu_job **job) 93 94 { 94 95 if (num_ibs == 0) 95 96 return -EINVAL; ··· 106 105 (*job)->base.sched = &adev->rings[0]->sched; 107 106 (*job)->vm = vm; 108 107 109 - amdgpu_sync_create(&(*job)->sync); 110 - amdgpu_sync_create(&(*job)->sched_sync); 108 + amdgpu_sync_create(&(*job)->explicit_sync); 111 109 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 112 110 (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; 113 111 114 - return 0; 112 + if (!entity) 113 + return 0; 114 + 115 + return drm_sched_job_init(&(*job)->base, entity, owner); 115 116 } 116 117 117 - int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 118 - enum amdgpu_ib_pool_type pool_type, 119 - struct amdgpu_job **job) 118 + int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 119 + struct drm_sched_entity *entity, void *owner, 120 + size_t size, enum amdgpu_ib_pool_type pool_type, 121 + struct amdgpu_job **job) 120 122 { 121 123 int r; 122 124 123 - r = amdgpu_job_alloc(adev, 1, job, NULL); 125 + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); 124 126 if (r) 125 127 return r; 126 128 127 129 (*job)->num_ibs = 1; 128 130 r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); 129 - if (r) 131 + if (r) { 132 + if (entity) 133 + drm_sched_job_cleanup(&(*job)->base); 130 134 kfree(*job); 135 + } 131 136 132 137 return r; 133 138 } ··· 173 166 174 167 drm_sched_job_cleanup(s_job); 175 168 176 - amdgpu_sync_free(&job->sync); 177 - amdgpu_sync_free(&job->sched_sync); 178 - 169 + amdgpu_sync_free(&job->explicit_sync); 179 170 dma_fence_put(&job->hw_fence); 180 171 } 181 172 ··· 195 190 196 191 void amdgpu_job_free(struct amdgpu_job *job) 197 192 { 193 + if (job->base.entity) 194 + drm_sched_job_cleanup(&job->base); 195 + 198 196 amdgpu_job_free_resources(job); 199 - amdgpu_sync_free(&job->sync); 200 - amdgpu_sync_free(&job->sched_sync); 197 + amdgpu_sync_free(&job->explicit_sync); 201 198 if (job->gang_submit != &job->base.s_fence->scheduled) 202 199 dma_fence_put(job->gang_submit); 203 200 ··· 209 202 dma_fence_put(&job->hw_fence); 210 203 } 211 204 212 - int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 213 - void *owner, struct dma_fence **f) 205 + struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) 214 206 { 215 - int r; 216 - 217 - if (!f) 218 - return -EINVAL; 219 - 220 - r = drm_sched_job_init(&job->base, entity, owner); 221 - if (r) 222 - return r; 207 + struct dma_fence *f; 223 208 224 209 drm_sched_job_arm(&job->base); 225 - 226 - *f = dma_fence_get(&job->base.s_fence->finished); 210 + f = dma_fence_get(&job->base.s_fence->finished); 227 211 amdgpu_job_free_resources(job); 228 212 drm_sched_entity_push_job(&job->base); 229 213 230 - return 0; 214 + return f; 231 215 } 232 216 233 217 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, ··· 236 238 return 0; 237 239 } 238 240 239 - static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, 240 - struct drm_sched_entity *s_entity) 241 + static struct dma_fence * 242 + amdgpu_job_prepare_job(struct drm_sched_job *sched_job, 243 + struct drm_sched_entity *s_entity) 241 244 { 242 245 struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); 243 246 struct amdgpu_job *job = to_amdgpu_job(sched_job); 244 - struct amdgpu_vm *vm = job->vm; 245 - struct dma_fence *fence; 247 + struct dma_fence *fence = NULL; 246 248 int r; 247 249 248 - fence = amdgpu_sync_get_fence(&job->sync); 249 - if (fence && drm_sched_dependency_optimized(fence, s_entity)) { 250 - r = amdgpu_sync_fence(&job->sched_sync, fence); 251 - if (r) 252 - DRM_ERROR("Error adding fence (%d)\n", r); 253 - } 254 - 255 - while (fence == NULL && vm && !job->vmid) { 256 - r = amdgpu_vmid_grab(vm, ring, &job->sync, 257 - &job->base.s_fence->finished, 258 - job); 250 + while (!fence && job->vm && !job->vmid) { 251 + r = amdgpu_vmid_grab(job->vm, ring, job, &fence); 259 252 if (r) 260 253 DRM_ERROR("Error getting VM ID (%d)\n", r); 261 - 262 - fence = amdgpu_sync_get_fence(&job->sync); 263 254 } 264 255 265 256 if (!fence && job->gang_submit) ··· 267 280 268 281 job = to_amdgpu_job(sched_job); 269 282 finished = &job->base.s_fence->finished; 270 - 271 - BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); 272 283 273 284 trace_amdgpu_sched_run_job(job); 274 285 ··· 326 341 } 327 342 328 343 const struct drm_sched_backend_ops amdgpu_sched_ops = { 329 - .dependency = amdgpu_job_dependency, 344 + .prepare_job = amdgpu_job_prepare_job, 330 345 .run_job = amdgpu_job_run, 331 346 .timedout_job = amdgpu_job_timedout, 332 347 .free_job = amdgpu_job_free_cb
+9 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
··· 47 47 struct amdgpu_job { 48 48 struct drm_sched_job base; 49 49 struct amdgpu_vm *vm; 50 - struct amdgpu_sync sync; 51 - struct amdgpu_sync sched_sync; 50 + struct amdgpu_sync explicit_sync; 52 51 struct dma_fence hw_fence; 53 52 struct dma_fence *gang_submit; 54 53 uint32_t preamble_status; ··· 77 78 return to_amdgpu_ring(job->base.entity->rq->sched); 78 79 } 79 80 80 - int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 81 - struct amdgpu_job **job, struct amdgpu_vm *vm); 82 - int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 83 - enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); 81 + int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, 82 + struct drm_sched_entity *entity, void *owner, 83 + unsigned int num_ibs, struct amdgpu_job **job); 84 + int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, 85 + struct drm_sched_entity *entity, void *owner, 86 + size_t size, enum amdgpu_ib_pool_type pool_type, 87 + struct amdgpu_job **job); 84 88 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, 85 89 struct amdgpu_bo *gws, struct amdgpu_bo *oa); 86 90 void amdgpu_job_free_resources(struct amdgpu_job *job); 87 91 void amdgpu_job_set_gang_leader(struct amdgpu_job *job, 88 92 struct amdgpu_job *leader); 89 93 void amdgpu_job_free(struct amdgpu_job *job); 90 - int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, 91 - void *owner, struct dma_fence **f); 94 + struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job); 92 95 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, 93 96 struct dma_fence **fence); 94 97
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
··· 150 150 const unsigned ib_size_dw = 16; 151 151 int i, r; 152 152 153 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 154 - AMDGPU_IB_POOL_DIRECT, &job); 153 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 154 + AMDGPU_IB_POOL_DIRECT, &job); 155 155 if (r) 156 156 return r; 157 157 158 158 ib = &job->ibs[0]; 159 159 160 - ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); 160 + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, 161 + PACKETJ_TYPE0); 161 162 ib->ptr[1] = 0xDEADBEEF; 162 163 for (i = 2; i < 16; i += 2) { 163 164 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 29 29 #include "amdgpu.h" 30 30 #include <drm/amdgpu_drm.h> 31 31 #include <drm/drm_drv.h> 32 + #include <drm/drm_fb_helper.h> 32 33 #include "amdgpu_uvd.h" 33 34 #include "amdgpu_vce.h" 34 35 #include "atom.h"
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 36 36 #include <drm/drm_encoder.h> 37 37 #include <drm/drm_fixed.h> 38 38 #include <drm/drm_crtc_helper.h> 39 - #include <drm/drm_fb_helper.h> 40 39 #include <drm/drm_framebuffer.h> 41 40 #include <drm/drm_probe_helper.h> 42 41 #include <linux/i2c.h>
+42 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
··· 259 259 return 0; 260 260 } 261 261 262 + /* Free the entry back to the slab */ 263 + static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) 264 + { 265 + hash_del(&e->node); 266 + dma_fence_put(e->fence); 267 + kmem_cache_free(amdgpu_sync_slab, e); 268 + } 269 + 262 270 /** 263 271 * amdgpu_sync_peek_fence - get the next fence not signaled yet 264 272 * ··· 288 280 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); 289 281 290 282 if (dma_fence_is_signaled(f)) { 291 - hash_del(&e->node); 292 - dma_fence_put(f); 293 - kmem_cache_free(amdgpu_sync_slab, e); 283 + amdgpu_sync_entry_free(e); 294 284 continue; 295 285 } 296 286 if (ring && s_fence) { ··· 361 355 if (r) 362 356 return r; 363 357 } else { 364 - hash_del(&e->node); 365 - dma_fence_put(f); 366 - kmem_cache_free(amdgpu_sync_slab, e); 358 + amdgpu_sync_entry_free(e); 367 359 } 368 360 } 369 361 362 + return 0; 363 + } 364 + 365 + /** 366 + * amdgpu_sync_push_to_job - push fences into job 367 + * @sync: sync object to get the fences from 368 + * @job: job to push the fences into 369 + * 370 + * Add all unsignaled fences from sync to job. 371 + */ 372 + int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) 373 + { 374 + struct amdgpu_sync_entry *e; 375 + struct hlist_node *tmp; 376 + struct dma_fence *f; 377 + int i, r; 378 + 379 + hash_for_each_safe(sync->fences, i, tmp, e, node) { 380 + f = e->fence; 381 + if (dma_fence_is_signaled(f)) { 382 + amdgpu_sync_entry_free(e); 383 + continue; 384 + } 385 + 386 + dma_fence_get(f); 387 + r = drm_sched_job_add_dependency(&job->base, f); 388 + if (r) 389 + return r; 390 + } 370 391 return 0; 371 392 } 372 393 ··· 408 375 if (r) 409 376 return r; 410 377 411 - hash_del(&e->node); 412 - dma_fence_put(e->fence); 413 - kmem_cache_free(amdgpu_sync_slab, e); 378 + amdgpu_sync_entry_free(e); 414 379 } 415 380 416 381 return 0; ··· 427 396 struct hlist_node *tmp; 428 397 unsigned int i; 429 398 430 - hash_for_each_safe(sync->fences, i, tmp, e, node) { 431 - hash_del(&e->node); 432 - dma_fence_put(e->fence); 433 - kmem_cache_free(amdgpu_sync_slab, e); 434 - } 399 + hash_for_each_safe(sync->fences, i, tmp, e, node) 400 + amdgpu_sync_entry_free(e); 435 401 } 436 402 437 403 /**
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
··· 30 30 struct dma_resv; 31 31 struct amdgpu_device; 32 32 struct amdgpu_ring; 33 + struct amdgpu_job; 33 34 34 35 enum amdgpu_sync_mode { 35 36 AMDGPU_SYNC_ALWAYS, ··· 55 54 struct amdgpu_ring *ring); 56 55 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 57 56 int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); 57 + int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job); 58 58 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); 59 59 void amdgpu_sync_free(struct amdgpu_sync *sync); 60 60 int amdgpu_sync_init(void);
+26 -46
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 189 189 struct amdgpu_device *adev = ring->adev; 190 190 unsigned offset, num_pages, num_dw, num_bytes; 191 191 uint64_t src_addr, dst_addr; 192 - struct dma_fence *fence; 193 192 struct amdgpu_job *job; 194 193 void *cpu_addr; 195 194 uint64_t flags; ··· 228 229 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 229 230 num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; 230 231 231 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 232 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 233 + AMDGPU_FENCE_OWNER_UNDEFINED, 234 + num_dw * 4 + num_bytes, 232 235 AMDGPU_IB_POOL_DELAYED, &job); 233 236 if (r) 234 237 return r; ··· 270 269 } 271 270 } 272 271 273 - r = amdgpu_job_submit(job, &adev->mman.entity, 274 - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 275 - if (r) 276 - goto error_free; 277 - 278 - dma_fence_put(fence); 279 - 280 - return r; 281 - 282 - error_free: 283 - amdgpu_job_free(job); 284 - return r; 272 + dma_fence_put(amdgpu_job_submit(job)); 273 + return 0; 285 274 } 286 275 287 276 /** ··· 1408 1417 } 1409 1418 1410 1419 static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, 1411 - unsigned long offset, void *buf, int len, int write) 1420 + unsigned long offset, void *buf, 1421 + int len, int write) 1412 1422 { 1413 1423 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 1414 1424 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); ··· 1433 1441 memcpy(adev->mman.sdma_access_ptr, buf, len); 1434 1442 1435 1443 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 1436 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); 1444 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 1445 + AMDGPU_FENCE_OWNER_UNDEFINED, 1446 + num_dw * 4, AMDGPU_IB_POOL_DELAYED, 1447 + &job); 1437 1448 if (r) 1438 1449 goto out; 1439 1450 1440 1451 amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); 1441 - src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; 1452 + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + 1453 + src_mm.start; 1442 1454 dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); 1443 1455 if (write) 1444 1456 swap(src_addr, dst_addr); 1445 1457 1446 - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); 1458 + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, 1459 + PAGE_SIZE, false); 1447 1460 1448 1461 amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); 1449 1462 WARN_ON(job->ibs[0].length_dw > num_dw); 1450 1463 1451 - r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 1452 - if (r) { 1453 - amdgpu_job_free(job); 1454 - goto out; 1455 - } 1464 + fence = amdgpu_job_submit(job); 1456 1465 1457 1466 if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) 1458 1467 r = -ETIMEDOUT; ··· 1952 1959 AMDGPU_IB_POOL_DELAYED; 1953 1960 int r; 1954 1961 1955 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); 1962 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 1963 + AMDGPU_FENCE_OWNER_UNDEFINED, 1964 + num_dw * 4, pool, job); 1956 1965 if (r) 1957 1966 return r; 1958 1967 ··· 1964 1969 adev->gart.bo); 1965 1970 (*job)->vm_needs_flush = true; 1966 1971 } 1967 - if (resv) { 1968 - r = amdgpu_sync_resv(adev, &(*job)->sync, resv, 1969 - AMDGPU_SYNC_ALWAYS, 1970 - AMDGPU_FENCE_OWNER_UNDEFINED); 1971 - if (r) { 1972 - DRM_ERROR("sync failed (%d).\n", r); 1973 - amdgpu_job_free(*job); 1974 - return r; 1975 - } 1976 - } 1977 - return 0; 1972 + if (!resv) 1973 + return 0; 1974 + 1975 + return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, 1976 + DMA_RESV_USAGE_BOOKKEEP); 1978 1977 } 1979 1978 1980 1979 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, ··· 2013 2024 if (direct_submit) 2014 2025 r = amdgpu_job_submit_direct(job, ring, fence); 2015 2026 else 2016 - r = amdgpu_job_submit(job, &adev->mman.entity, 2017 - AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2027 + *fence = amdgpu_job_submit(job); 2018 2028 if (r) 2019 2029 goto error_free; 2020 2030 ··· 2058 2070 2059 2071 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 2060 2072 WARN_ON(job->ibs[0].length_dw > num_dw); 2061 - r = amdgpu_job_submit(job, &adev->mman.entity, 2062 - AMDGPU_FENCE_OWNER_UNDEFINED, fence); 2063 - if (r) 2064 - goto error_free; 2065 - 2073 + *fence = amdgpu_job_submit(job); 2066 2074 return 0; 2067 - 2068 - error_free: 2069 - amdgpu_job_free(job); 2070 - return r; 2071 2075 } 2072 2076 2073 2077 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+7 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 1132 1132 unsigned offset_idx = 0; 1133 1133 unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; 1134 1134 1135 - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : 1135 + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, 1136 + AMDGPU_FENCE_OWNER_UNDEFINED, 1137 + 64, direct ? AMDGPU_IB_POOL_DIRECT : 1136 1138 AMDGPU_IB_POOL_DELAYED, &job); 1137 1139 if (r) 1138 1140 return r; ··· 1177 1175 if (r) 1178 1176 goto err_free; 1179 1177 } else { 1180 - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, 1181 - AMDGPU_SYNC_ALWAYS, 1182 - AMDGPU_FENCE_OWNER_UNDEFINED); 1178 + r = drm_sched_job_add_resv_dependencies(&job->base, 1179 + bo->tbo.base.resv, 1180 + DMA_RESV_USAGE_KERNEL); 1183 1181 if (r) 1184 1182 goto err_free; 1185 1183 1186 - r = amdgpu_job_submit(job, &adev->uvd.entity, 1187 - AMDGPU_FENCE_OWNER_UNDEFINED, &f); 1188 - if (r) 1189 - goto err_free; 1184 + f = amdgpu_job_submit(job); 1190 1185 } 1191 1186 1192 1187 amdgpu_bo_reserve(bo, true);
+8 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 450 450 uint64_t addr; 451 451 int i, r; 452 452 453 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 454 - AMDGPU_IB_POOL_DIRECT, &job); 453 + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, 454 + AMDGPU_FENCE_OWNER_UNDEFINED, 455 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 456 + &job); 455 457 if (r) 456 458 return r; 457 459 ··· 540 538 struct dma_fence *f = NULL; 541 539 int i, r; 542 540 543 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 541 + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, 542 + AMDGPU_FENCE_OWNER_UNDEFINED, 543 + ib_size_dw * 4, 544 544 direct ? AMDGPU_IB_POOL_DIRECT : 545 545 AMDGPU_IB_POOL_DELAYED, &job); 546 546 if (r) ··· 574 570 if (direct) 575 571 r = amdgpu_job_submit_direct(job, ring, &f); 576 572 else 577 - r = amdgpu_job_submit(job, &ring->adev->vce.entity, 578 - AMDGPU_FENCE_OWNER_UNDEFINED, &f); 573 + f = amdgpu_job_submit(job); 579 574 if (r) 580 575 goto err; 581 576
+13 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 600 600 struct amdgpu_ib *ib_msg, 601 601 struct dma_fence **fence) 602 602 { 603 + u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 603 604 struct amdgpu_device *adev = ring->adev; 604 605 struct dma_fence *f = NULL; 605 606 struct amdgpu_job *job; 606 607 struct amdgpu_ib *ib; 607 - uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); 608 608 int i, r; 609 609 610 - r = amdgpu_job_alloc_with_ib(adev, 64, 611 - AMDGPU_IB_POOL_DIRECT, &job); 610 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 611 + 64, AMDGPU_IB_POOL_DIRECT, 612 + &job); 612 613 if (r) 613 614 goto err; 614 615 ··· 788 787 if (sq) 789 788 ib_size_dw += 8; 790 789 791 - r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, 792 - AMDGPU_IB_POOL_DIRECT, &job); 790 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 791 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 792 + &job); 793 793 if (r) 794 794 goto err; 795 795 ··· 918 916 if (sq) 919 917 ib_size_dw += 8; 920 918 921 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 922 - AMDGPU_IB_POOL_DIRECT, &job); 919 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 920 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 921 + &job); 923 922 if (r) 924 923 return r; 925 924 ··· 985 982 if (sq) 986 983 ib_size_dw += 8; 987 984 988 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 989 - AMDGPU_IB_POOL_DIRECT, &job); 985 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 986 + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, 987 + &job); 990 988 if (r) 991 989 return r; 992 990
+39 -32
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
··· 47 47 return r; 48 48 } 49 49 50 + /* Allocate a new job for @count PTE updates */ 51 + static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, 52 + unsigned int count) 53 + { 54 + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 55 + : AMDGPU_IB_POOL_DELAYED; 56 + struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate 57 + : &p->vm->delayed; 58 + unsigned int ndw; 59 + int r; 60 + 61 + /* estimate how many dw we need */ 62 + ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 63 + if (p->pages_addr) 64 + ndw += count * 2; 65 + ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 66 + 67 + r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, 68 + ndw * 4, pool, &p->job); 69 + if (r) 70 + return r; 71 + 72 + p->num_dw_left = ndw; 73 + return 0; 74 + } 75 + 50 76 /** 51 77 * amdgpu_vm_sdma_prepare - prepare SDMA command submission 52 78 * ··· 87 61 struct dma_resv *resv, 88 62 enum amdgpu_sync_mode sync_mode) 89 63 { 90 - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 91 - : AMDGPU_IB_POOL_DELAYED; 92 - unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; 64 + struct amdgpu_sync sync; 93 65 int r; 94 66 95 - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); 67 + r = amdgpu_vm_sdma_alloc_job(p, 0); 96 68 if (r) 97 69 return r; 98 - 99 - p->num_dw_left = ndw; 100 70 101 71 if (!resv) 102 72 return 0; 103 73 104 - return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); 74 + amdgpu_sync_create(&sync); 75 + r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); 76 + if (!r) 77 + r = amdgpu_sync_push_to_job(&sync, p->job); 78 + amdgpu_sync_free(&sync); 79 + return r; 105 80 } 106 81 107 82 /** ··· 118 91 struct dma_fence **fence) 119 92 { 120 93 struct amdgpu_ib *ib = p->job->ibs; 121 - struct drm_sched_entity *entity; 122 94 struct amdgpu_ring *ring; 123 95 struct dma_fence *f; 124 - int r; 125 96 126 - entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; 127 - ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); 97 + ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, 98 + sched); 128 99 129 100 WARN_ON(ib->length_dw == 0); 130 101 amdgpu_ring_pad_ib(ring, ib); 131 102 WARN_ON(ib->length_dw > p->num_dw_left); 132 - r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); 133 - if (r) 134 - goto error; 103 + f = amdgpu_job_submit(p->job); 135 104 136 105 if (p->unlocked) { 137 106 struct dma_fence *tmp = dma_fence_get(f); ··· 150 127 } 151 128 dma_fence_put(f); 152 129 return 0; 153 - 154 - error: 155 - amdgpu_job_free(p->job); 156 - return r; 157 130 } 158 131 159 132 /** ··· 229 210 uint64_t flags) 230 211 { 231 212 struct amdgpu_bo *bo = &vmbo->bo; 232 - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE 233 - : AMDGPU_IB_POOL_DELAYED; 234 213 struct dma_resv_iter cursor; 235 214 unsigned int i, ndw, nptes; 236 215 struct dma_fence *fence; ··· 238 221 /* Wait for PD/PT moves to be completed */ 239 222 dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL); 240 223 dma_resv_for_each_fence_unlocked(&cursor, fence) { 241 - r = amdgpu_sync_fence(&p->job->sync, fence); 224 + r = drm_sched_job_add_dependency(&p->job->base, fence); 242 225 if (r) { 243 226 dma_resv_iter_end(&cursor); 244 227 return r; ··· 255 238 if (r) 256 239 return r; 257 240 258 - /* estimate how many dw we need */ 259 - ndw = 32; 260 - if (p->pages_addr) 261 - ndw += count * 2; 262 - ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); 263 - ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); 264 - 265 - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, 266 - &p->job); 241 + r = amdgpu_vm_sdma_alloc_job(p, count); 267 242 if (r) 268 243 return r; 269 - 270 - p->num_dw_left = ndw; 271 244 } 272 245 273 246 if (!p->pages_addr) {
+1
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 21 21 * 22 22 */ 23 23 24 + #include <drm/drm_fb_helper.h> 24 25 #include <drm/drm_fourcc.h> 25 26 #include <drm/drm_vblank.h> 26 27
+1
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 21 21 * 22 22 */ 23 23 24 + #include <drm/drm_fb_helper.h> 24 25 #include <drm/drm_fourcc.h> 25 26 #include <drm/drm_vblank.h> 26 27
+1
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 23 23 24 24 #include <linux/pci.h> 25 25 26 + #include <drm/drm_fb_helper.h> 26 27 #include <drm/drm_fourcc.h> 27 28 #include <drm/drm_vblank.h> 28 29
+1
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 21 21 * 22 22 */ 23 23 24 + #include <drm/drm_fb_helper.h> 24 25 #include <drm/drm_fourcc.h> 25 26 #include <drm/drm_vblank.h> 26 27
+4 -8
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 371 371 * translation. Avoid this by doing the invalidation from the SDMA 372 372 * itself. 373 373 */ 374 - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, 374 + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, 375 + AMDGPU_FENCE_OWNER_UNDEFINED, 376 + 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, 375 377 &job); 376 378 if (r) 377 379 goto error_alloc; ··· 382 380 job->vm_needs_flush = true; 383 381 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; 384 382 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 385 - r = amdgpu_job_submit(job, &adev->mman.entity, 386 - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 387 - if (r) 388 - goto error_submit; 383 + fence = amdgpu_job_submit(job); 389 384 390 385 mutex_unlock(&adev->mman.gtt_window_lock); 391 386 ··· 390 391 dma_fence_put(fence); 391 392 392 393 return; 393 - 394 - error_submit: 395 - amdgpu_job_free(job); 396 394 397 395 error_alloc: 398 396 mutex_unlock(&adev->mman.gtt_window_lock);
+4 -4
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 216 216 uint64_t addr; 217 217 int i, r; 218 218 219 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 220 - AMDGPU_IB_POOL_DIRECT, &job); 219 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 220 + AMDGPU_IB_POOL_DIRECT, &job); 221 221 if (r) 222 222 return r; 223 223 ··· 280 280 uint64_t addr; 281 281 int i, r; 282 282 283 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 284 - AMDGPU_IB_POOL_DIRECT, &job); 283 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 284 + AMDGPU_IB_POOL_DIRECT, &job); 285 285 if (r) 286 286 return r; 287 287
+6 -6
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 213 213 * 214 214 * Open up a stream for HW test 215 215 */ 216 - static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 216 + static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, 217 217 struct amdgpu_bo *bo, 218 218 struct dma_fence **fence) 219 219 { ··· 224 224 uint64_t addr; 225 225 int i, r; 226 226 227 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 228 - AMDGPU_IB_POOL_DIRECT, &job); 227 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 228 + AMDGPU_IB_POOL_DIRECT, &job); 229 229 if (r) 230 230 return r; 231 231 ··· 276 276 * 277 277 * Close up a stream for HW test or if userspace failed to do so 278 278 */ 279 - static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 279 + static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, 280 280 struct amdgpu_bo *bo, 281 281 struct dma_fence **fence) 282 282 { ··· 287 287 uint64_t addr; 288 288 int i, r; 289 289 290 - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, 291 - AMDGPU_IB_POOL_DIRECT, &job); 290 + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, 291 + AMDGPU_IB_POOL_DIRECT, &job); 292 292 if (r) 293 293 return r; 294 294
+6 -11
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 65 65 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 66 66 num_bytes = npages * 8; 67 67 68 - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 69 - AMDGPU_IB_POOL_DELAYED, &job); 68 + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, 69 + AMDGPU_FENCE_OWNER_UNDEFINED, 70 + num_dw * 4 + num_bytes, 71 + AMDGPU_IB_POOL_DELAYED, 72 + &job); 70 73 if (r) 71 74 return r; 72 75 ··· 92 89 cpu_addr = &job->ibs[0].ptr[num_dw]; 93 90 94 91 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 95 - r = amdgpu_job_submit(job, &adev->mman.entity, 96 - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 97 - if (r) 98 - goto error_free; 99 - 92 + fence = amdgpu_job_submit(job); 100 93 dma_fence_put(fence); 101 94 102 - return r; 103 - 104 - error_free: 105 - amdgpu_job_free(job); 106 95 return r; 107 96 } 108 97
-2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 82 82 #include <drm/drm_atomic_uapi.h> 83 83 #include <drm/drm_atomic_helper.h> 84 84 #include <drm/drm_blend.h> 85 - #include <drm/drm_fb_helper.h> 86 85 #include <drm/drm_fourcc.h> 87 86 #include <drm/drm_edid.h> 88 87 #include <drm/drm_vblank.h> ··· 2811 2812 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { 2812 2813 .fb_create = amdgpu_display_user_framebuffer_create, 2813 2814 .get_format_info = amd_get_format_info, 2814 - .output_poll_changed = drm_fb_helper_output_poll_changed, 2815 2815 .atomic_check = amdgpu_dm_atomic_check, 2816 2816 .atomic_commit = drm_atomic_helper_commit, 2817 2817 };
+1 -1
drivers/gpu/drm/arm/display/komeda/komeda_drv.c
··· 9 9 #include <linux/platform_device.h> 10 10 #include <linux/component.h> 11 11 #include <linux/pm_runtime.h> 12 - #include <drm/drm_fb_helper.h> 12 + #include <drm/drm_fbdev_generic.h> 13 13 #include <drm/drm_module.h> 14 14 #include <drm/drm_of.h> 15 15 #include "komeda_dev.h"
-2
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
··· 10 10 #include <drm/drm_atomic.h> 11 11 #include <drm/drm_atomic_helper.h> 12 12 #include <drm/drm_drv.h> 13 - #include <drm/drm_fb_helper.h> 14 13 #include <drm/drm_gem_dma_helper.h> 15 14 #include <drm/drm_gem_framebuffer_helper.h> 16 15 #include <drm/drm_managed.h> ··· 58 59 59 60 static const struct drm_driver komeda_kms_driver = { 60 61 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 61 - .lastclose = drm_fb_helper_lastclose, 62 62 DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create), 63 63 .fops = &komeda_cma_fops, 64 64 .name = "komeda",
+9 -16
drivers/gpu/drm/arm/hdlcd_crtc.c
··· 19 19 #include <drm/drm_atomic_helper.h> 20 20 #include <drm/drm_crtc.h> 21 21 #include <drm/drm_fb_dma_helper.h> 22 - #include <drm/drm_fb_helper.h> 23 22 #include <drm/drm_framebuffer.h> 24 23 #include <drm/drm_gem_dma_helper.h> 25 24 #include <drm/drm_of.h> ··· 274 275 dest_h = drm_rect_height(&new_plane_state->dst); 275 276 scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0); 276 277 277 - hdlcd = plane->dev->dev_private; 278 + hdlcd = drm_to_hdlcd_priv(plane->dev); 278 279 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); 279 280 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]); 280 281 hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); ··· 289 290 static const struct drm_plane_funcs hdlcd_plane_funcs = { 290 291 .update_plane = drm_atomic_helper_update_plane, 291 292 .disable_plane = drm_atomic_helper_disable_plane, 292 - .destroy = drm_plane_cleanup, 293 293 .reset = drm_atomic_helper_plane_reset, 294 294 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 295 295 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, ··· 296 298 297 299 static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) 298 300 { 299 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 301 + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); 300 302 struct drm_plane *plane = NULL; 301 303 u32 formats[ARRAY_SIZE(supported_formats)], i; 302 - int ret; 303 - 304 - plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); 305 - if (!plane) 306 - return ERR_PTR(-ENOMEM); 307 304 308 305 for (i = 0; i < ARRAY_SIZE(supported_formats); i++) 309 306 formats[i] = supported_formats[i].fourcc; 310 307 311 - ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs, 312 - formats, ARRAY_SIZE(formats), 313 - NULL, 314 - DRM_PLANE_TYPE_PRIMARY, NULL); 315 - if (ret) 316 - return ERR_PTR(ret); 308 + plane = drmm_universal_plane_alloc(drm, struct drm_plane, dev, 0xff, 309 + &hdlcd_plane_funcs, 310 + formats, ARRAY_SIZE(formats), 311 + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); 312 + if (IS_ERR(plane)) 313 + return plane; 317 314 318 315 drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); 319 316 hdlcd->plane = plane; ··· 318 325 319 326 int hdlcd_setup_crtc(struct drm_device *drm) 320 327 { 321 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 328 + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); 322 329 struct drm_plane *primary; 323 330 int ret; 324 331
+22 -21
drivers/gpu/drm/arm/hdlcd_drv.c
··· 26 26 #include <drm/drm_crtc.h> 27 27 #include <drm/drm_debugfs.h> 28 28 #include <drm/drm_drv.h> 29 - #include <drm/drm_fb_helper.h> 29 + #include <drm/drm_fbdev_generic.h> 30 30 #include <drm/drm_gem_dma_helper.h> 31 31 #include <drm/drm_gem_framebuffer_helper.h> 32 32 #include <drm/drm_modeset_helper.h> ··· 98 98 99 99 static int hdlcd_load(struct drm_device *drm, unsigned long flags) 100 100 { 101 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 101 + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); 102 102 struct platform_device *pdev = to_platform_device(drm->dev); 103 103 struct resource *res; 104 104 u32 version; ··· 175 175 .atomic_commit = drm_atomic_helper_commit, 176 176 }; 177 177 178 - static void hdlcd_setup_mode_config(struct drm_device *drm) 178 + static int hdlcd_setup_mode_config(struct drm_device *drm) 179 179 { 180 - drm_mode_config_init(drm); 180 + int ret; 181 + 182 + ret = drmm_mode_config_init(drm); 183 + if (ret) 184 + return ret; 185 + 181 186 drm->mode_config.min_width = 0; 182 187 drm->mode_config.min_height = 0; 183 188 drm->mode_config.max_width = HDLCD_MAX_XRES; 184 189 drm->mode_config.max_height = HDLCD_MAX_YRES; 185 190 drm->mode_config.funcs = &hdlcd_mode_config_funcs; 191 + 192 + return 0; 186 193 } 187 194 188 195 #ifdef CONFIG_DEBUG_FS ··· 197 190 { 198 191 struct drm_info_node *node = (struct drm_info_node *)m->private; 199 192 struct drm_device *drm = node->minor->dev; 200 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 193 + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); 201 194 202 195 seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count)); 203 196 seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count)); ··· 210 203 { 211 204 struct drm_info_node *node = (struct drm_info_node *)m->private; 212 205 struct drm_device *drm = node->minor->dev; 213 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 206 + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); 214 207 unsigned long clkrate = clk_get_rate(hdlcd->clk); 215 208 unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000; 216 209 ··· 254 247 struct hdlcd_drm_private *hdlcd; 255 248 int ret; 256 249 257 - hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL); 258 - if (!hdlcd) 259 - return -ENOMEM; 250 + hdlcd = devm_drm_dev_alloc(dev, &hdlcd_driver, typeof(*hdlcd), base); 251 + if (IS_ERR(hdlcd)) 252 + return PTR_ERR(hdlcd); 260 253 261 - drm = drm_dev_alloc(&hdlcd_driver, dev); 262 - if (IS_ERR(drm)) 263 - return PTR_ERR(drm); 254 + drm = &hdlcd->base; 264 255 265 - drm->dev_private = hdlcd; 266 256 dev_set_drvdata(dev, drm); 267 257 268 - hdlcd_setup_mode_config(drm); 258 + ret = hdlcd_setup_mode_config(drm); 259 + if (ret) 260 + goto err_free; 261 + 269 262 ret = hdlcd_load(drm, 0); 270 263 if (ret) 271 264 goto err_free; ··· 324 317 hdlcd_irq_uninstall(hdlcd); 325 318 of_reserved_mem_device_release(drm->dev); 326 319 err_free: 327 - drm_mode_config_cleanup(drm); 328 320 dev_set_drvdata(dev, NULL); 329 - drm_dev_put(drm); 330 - 331 321 return ret; 332 322 } 333 323 334 324 static void hdlcd_drm_unbind(struct device *dev) 335 325 { 336 326 struct drm_device *drm = dev_get_drvdata(dev); 337 - struct hdlcd_drm_private *hdlcd = drm->dev_private; 327 + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); 338 328 339 329 drm_dev_unregister(drm); 340 330 drm_kms_helper_poll_fini(drm); ··· 345 341 if (pm_runtime_enabled(dev)) 346 342 pm_runtime_disable(dev); 347 343 of_reserved_mem_device_release(dev); 348 - drm_mode_config_cleanup(drm); 349 - drm->dev_private = NULL; 350 344 dev_set_drvdata(dev, NULL); 351 - drm_dev_put(drm); 352 345 } 353 346 354 347 static const struct component_master_ops hdlcd_master_ops = {
+2
drivers/gpu/drm/arm/hdlcd_drv.h
··· 7 7 #define __HDLCD_DRV_H__ 8 8 9 9 struct hdlcd_drm_private { 10 + struct drm_device base; 10 11 void __iomem *mmio; 11 12 struct clk *clk; 12 13 struct drm_crtc crtc; ··· 21 20 #endif 22 21 }; 23 22 23 + #define drm_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, base) 24 24 #define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc) 25 25 26 26 static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd,
+1 -1
drivers/gpu/drm/arm/malidp_drv.c
··· 19 19 #include <drm/drm_atomic_helper.h> 20 20 #include <drm/drm_crtc.h> 21 21 #include <drm/drm_drv.h> 22 - #include <drm/drm_fb_helper.h> 22 + #include <drm/drm_fbdev_generic.h> 23 23 #include <drm/drm_fourcc.h> 24 24 #include <drm/drm_gem_dma_helper.h> 25 25 #include <drm/drm_gem_framebuffer_helper.h>
+4 -2
drivers/gpu/drm/armada/armada_fbdev.c
··· 19 19 static const struct fb_ops armada_fb_ops = { 20 20 .owner = THIS_MODULE, 21 21 DRM_FB_HELPER_DEFAULT_OPS, 22 + .fb_read = drm_fb_helper_cfb_read, 23 + .fb_write = drm_fb_helper_cfb_write, 22 24 .fb_fillrect = drm_fb_helper_cfb_fillrect, 23 25 .fb_copyarea = drm_fb_helper_cfb_copyarea, 24 26 .fb_imageblit = drm_fb_helper_cfb_imageblit, ··· 74 72 if (IS_ERR(dfb)) 75 73 return PTR_ERR(dfb); 76 74 77 - info = drm_fb_helper_alloc_fbi(fbh); 75 + info = drm_fb_helper_alloc_info(fbh); 78 76 if (IS_ERR(info)) { 79 77 ret = PTR_ERR(info); 80 78 goto err_fballoc; ··· 157 155 struct drm_fb_helper *fbh = priv->fbdev; 158 156 159 157 if (fbh) { 160 - drm_fb_helper_unregister_fbi(fbh); 158 + drm_fb_helper_unregister_info(fbh); 161 159 162 160 drm_fb_helper_fini(fbh); 163 161
+1 -1
drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
··· 16 16 #include <drm/drm_atomic_helper.h> 17 17 #include <drm/drm_crtc_helper.h> 18 18 #include <drm/drm_device.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_gem_dma_helper.h> 21 21 #include <drm/drm_gem_framebuffer_helper.h> 22 22 #include <drm/drm_module.h>
+1
drivers/gpu/drm/ast/ast_drv.c
··· 33 33 #include <drm/drm_atomic_helper.h> 34 34 #include <drm/drm_crtc_helper.h> 35 35 #include <drm/drm_drv.h> 36 + #include <drm/drm_fbdev_generic.h> 36 37 #include <drm/drm_gem_shmem_helper.h> 37 38 #include <drm/drm_module.h> 38 39 #include <drm/drm_probe_helper.h>
-1
drivers/gpu/drm/ast/ast_drv.h
··· 38 38 #include <drm/drm_encoder.h> 39 39 #include <drm/drm_mode.h> 40 40 #include <drm/drm_framebuffer.h> 41 - #include <drm/drm_fb_helper.h> 42 41 43 42 #define DRIVER_AUTHOR "Dave Airlie" 44 43
+1 -1
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
··· 19 19 #include <drm/drm_atomic.h> 20 20 #include <drm/drm_atomic_helper.h> 21 21 #include <drm/drm_drv.h> 22 - #include <drm/drm_fb_helper.h> 22 + #include <drm/drm_fbdev_generic.h> 23 23 #include <drm/drm_gem_dma_helper.h> 24 24 #include <drm/drm_gem_framebuffer_helper.h> 25 25 #include <drm/drm_module.h>
+1 -1
drivers/gpu/drm/bridge/tc358762.c
··· 11 11 */ 12 12 13 13 #include <linux/delay.h> 14 + #include <linux/mod_devicetable.h> 14 15 #include <linux/module.h> 15 16 #include <linux/of_graph.h> 16 17 #include <linux/regulator/consumer.h> ··· 20 19 21 20 #include <drm/drm_atomic_helper.h> 22 21 #include <drm/drm_crtc.h> 23 - #include <drm/drm_fb_helper.h> 24 22 #include <drm/drm_mipi_dsi.h> 25 23 #include <drm/drm_of.h> 26 24 #include <drm/drm_panel.h>
+31 -3
drivers/gpu/drm/drm_atomic_helper.c
··· 2536 2536 if (funcs->prepare_fb) { 2537 2537 ret = funcs->prepare_fb(plane, new_plane_state); 2538 2538 if (ret) 2539 - goto fail; 2539 + goto fail_prepare_fb; 2540 2540 } else { 2541 2541 WARN_ON_ONCE(funcs->cleanup_fb); 2542 2542 ··· 2545 2545 2546 2546 ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state); 2547 2547 if (ret) 2548 - goto fail; 2548 + goto fail_prepare_fb; 2549 + } 2550 + } 2551 + 2552 + for_each_new_plane_in_state(state, plane, new_plane_state, i) { 2553 + const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2554 + 2555 + if (funcs->begin_fb_access) { 2556 + ret = funcs->begin_fb_access(plane, new_plane_state); 2557 + if (ret) 2558 + goto fail_begin_fb_access; 2549 2559 } 2550 2560 } 2551 2561 2552 2562 return 0; 2553 2563 2554 - fail: 2564 + fail_begin_fb_access: 2565 + for_each_new_plane_in_state(state, plane, new_plane_state, j) { 2566 + const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2567 + 2568 + if (j >= i) 2569 + continue; 2570 + 2571 + if (funcs->end_fb_access) 2572 + funcs->end_fb_access(plane, new_plane_state); 2573 + } 2574 + i = j; /* set i to upper limit to cleanup all planes */ 2575 + fail_prepare_fb: 2555 2576 for_each_new_plane_in_state(state, plane, new_plane_state, j) { 2556 2577 const struct drm_plane_helper_funcs *funcs; 2557 2578 ··· 2847 2826 struct drm_plane *plane; 2848 2827 struct drm_plane_state *old_plane_state, *new_plane_state; 2849 2828 int i; 2829 + 2830 + for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { 2831 + const struct drm_plane_helper_funcs *funcs = plane->helper_private; 2832 + 2833 + if (funcs->end_fb_access) 2834 + funcs->end_fb_access(plane, new_plane_state); 2835 + } 2850 2836 2851 2837 for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { 2852 2838 const struct drm_plane_helper_funcs *funcs;
-1
drivers/gpu/drm/drm_crtc_helper.c
··· 43 43 #include <drm/drm_drv.h> 44 44 #include <drm/drm_edid.h> 45 45 #include <drm/drm_encoder.h> 46 - #include <drm/drm_fb_helper.h> 47 46 #include <drm/drm_fourcc.h> 48 47 #include <drm/drm_framebuffer.h> 49 48 #include <drm/drm_print.h>
+316 -692
drivers/gpu/drm/drm_fb_helper.c
··· 30 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 31 32 32 #include <linux/console.h> 33 - #include <linux/dma-buf.h> 34 - #include <linux/kernel.h> 35 - #include <linux/module.h> 36 - #include <linux/slab.h> 37 33 #include <linux/sysrq.h> 38 - #include <linux/vmalloc.h> 39 34 40 35 #include <drm/drm_atomic.h> 41 - #include <drm/drm_crtc.h> 42 - #include <drm/drm_crtc_helper.h> 43 36 #include <drm/drm_drv.h> 44 37 #include <drm/drm_fb_helper.h> 45 38 #include <drm/drm_fourcc.h> 46 39 #include <drm/drm_framebuffer.h> 40 + #include <drm/drm_modeset_helper_vtables.h> 47 41 #include <drm/drm_print.h> 48 42 #include <drm/drm_vblank.h> 49 43 50 - #include "drm_crtc_helper_internal.h" 51 44 #include "drm_internal.h" 52 45 53 46 static bool drm_fbdev_emulation = true; ··· 67 74 * considered as a broken and legacy behaviour from a modern fbdev device. 68 75 */ 69 76 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 70 - static bool drm_leak_fbdev_smem = false; 77 + static bool drm_leak_fbdev_smem; 71 78 module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); 72 79 MODULE_PARM_DESC(drm_leak_fbdev_smem, 73 80 "Allow unsafe leaking fbdev physical smem address [default=false]"); ··· 89 96 * It will automatically set up deferred I/O if the driver requires a shadow 90 97 * buffer. 91 98 * 92 - * At runtime drivers should restore the fbdev console by using 99 + * Existing fbdev implementations should restore the fbdev console by using 93 100 * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. 94 101 * They should also notify the fb helper code from updates to the output 95 102 * configuration by using drm_fb_helper_output_poll_changed() as their 96 - * &drm_mode_config_funcs.output_poll_changed callback. 103 + * &drm_mode_config_funcs.output_poll_changed callback. New implementations 104 + * of fbdev should be build on top of struct &drm_client_funcs, which handles 105 + * this automatically. Setting the old callbacks should be avoided. 97 106 * 98 107 * For suspend/resume consider using drm_mode_config_helper_suspend() and 99 108 * drm_mode_config_helper_resume() which takes care of fbdev as well. ··· 363 368 resume_work); 364 369 365 370 console_lock(); 366 - fb_set_suspend(helper->fbdev, 0); 371 + fb_set_suspend(helper->info, 0); 367 372 console_unlock(); 368 - } 369 - 370 - static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, 371 - struct drm_clip_rect *clip, 372 - struct iosys_map *dst) 373 - { 374 - struct drm_framebuffer *fb = fb_helper->fb; 375 - size_t offset = clip->y1 * fb->pitches[0]; 376 - size_t len = clip->x2 - clip->x1; 377 - unsigned int y; 378 - void *src; 379 - 380 - switch (drm_format_info_bpp(fb->format, 0)) { 381 - case 1: 382 - offset += clip->x1 / 8; 383 - len = DIV_ROUND_UP(len + clip->x1 % 8, 8); 384 - break; 385 - case 2: 386 - offset += clip->x1 / 4; 387 - len = DIV_ROUND_UP(len + clip->x1 % 4, 4); 388 - break; 389 - case 4: 390 - offset += clip->x1 / 2; 391 - len = DIV_ROUND_UP(len + clip->x1 % 2, 2); 392 - break; 393 - default: 394 - offset += clip->x1 * fb->format->cpp[0]; 395 - len *= fb->format->cpp[0]; 396 - break; 397 - } 398 - 399 - src = fb_helper->fbdev->screen_buffer + offset; 400 - iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ 401 - 402 - for (y = clip->y1; y < clip->y2; y++) { 403 - iosys_map_memcpy_to(dst, 0, src, len); 404 - iosys_map_incr(dst, fb->pitches[0]); 405 - src += fb->pitches[0]; 406 - } 407 - } 408 - 409 - static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, 410 - struct drm_clip_rect *clip) 411 - { 412 - struct drm_client_buffer *buffer = fb_helper->buffer; 413 - struct iosys_map map, dst; 414 - int ret; 415 - 416 - /* 417 - * We have to pin the client buffer to its current location while 418 - * flushing the shadow buffer. In the general case, concurrent 419 - * modesetting operations could try to move the buffer and would 420 - * fail. The modeset has to be serialized by acquiring the reservation 421 - * object of the underlying BO here. 422 - * 423 - * For fbdev emulation, we only have to protect against fbdev modeset 424 - * operations. Nothing else will involve the client buffer's BO. So it 425 - * is sufficient to acquire struct drm_fb_helper.lock here. 426 - */ 427 - mutex_lock(&fb_helper->lock); 428 - 429 - ret = drm_client_buffer_vmap(buffer, &map); 430 - if (ret) 431 - goto out; 432 - 433 - dst = map; 434 - drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); 435 - 436 - drm_client_buffer_vunmap(buffer); 437 - 438 - out: 439 - mutex_unlock(&fb_helper->lock); 440 - 441 - return ret; 442 373 } 443 374 444 375 static void drm_fb_helper_damage_work(struct work_struct *work) 445 376 { 446 - struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 447 - damage_work); 377 + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work); 448 378 struct drm_device *dev = helper->dev; 449 379 struct drm_clip_rect *clip = &helper->damage_clip; 450 380 struct drm_clip_rect clip_copy; 451 381 unsigned long flags; 452 382 int ret; 383 + 384 + if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) 385 + return; 453 386 454 387 spin_lock_irqsave(&helper->damage_lock, flags); 455 388 clip_copy = *clip; ··· 385 462 clip->x2 = clip->y2 = 0; 386 463 spin_unlock_irqrestore(&helper->damage_lock, flags); 387 464 388 - /* Call damage handlers only if necessary */ 389 - if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) 390 - return; 391 - 392 - if (helper->buffer) { 393 - ret = drm_fb_helper_damage_blit(helper, &clip_copy); 394 - if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) 395 - goto err; 396 - } 397 - 398 - if (helper->fb->funcs->dirty) { 399 - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 400 - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) 401 - goto err; 402 - } 465 + ret = helper->funcs->fb_dirty(helper, &clip_copy); 466 + if (ret) 467 + goto err; 403 468 404 469 return; 405 470 ··· 447 536 { 448 537 int ret; 449 538 450 - if (!drm_fbdev_emulation) { 451 - dev->fb_helper = fb_helper; 452 - return 0; 453 - } 454 - 455 539 /* 456 540 * If this is not the generic fbdev client, initialize a drm_client 457 541 * without callbacks so we can use the modesets. ··· 464 558 EXPORT_SYMBOL(drm_fb_helper_init); 465 559 466 560 /** 467 - * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members 561 + * drm_fb_helper_alloc_info - allocate fb_info and some of its members 468 562 * @fb_helper: driver-allocated fbdev helper 469 563 * 470 564 * A helper to alloc fb_info and the members cmap and apertures. Called ··· 476 570 * fb_info pointer if things went okay, pointer containing error code 477 571 * otherwise 478 572 */ 479 - struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) 573 + struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) 480 574 { 481 575 struct device *dev = fb_helper->dev->dev; 482 576 struct fb_info *info; ··· 504 598 goto err_free_cmap; 505 599 } 506 600 507 - fb_helper->fbdev = info; 601 + fb_helper->info = info; 508 602 info->skip_vt_switch = true; 509 603 510 604 return info; ··· 515 609 framebuffer_release(info); 516 610 return ERR_PTR(ret); 517 611 } 518 - EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); 612 + EXPORT_SYMBOL(drm_fb_helper_alloc_info); 519 613 520 614 /** 521 - * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device 615 + * drm_fb_helper_unregister_info - unregister fb_info framebuffer device 522 616 * @fb_helper: driver-allocated fbdev helper, can be NULL 523 617 * 524 618 * A wrapper around unregister_framebuffer, to release the fb_info 525 619 * framebuffer device. This must be called before releasing all resources for 526 620 * @fb_helper by calling drm_fb_helper_fini(). 527 621 */ 528 - void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) 622 + void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) 529 623 { 530 - if (fb_helper && fb_helper->fbdev) 531 - unregister_framebuffer(fb_helper->fbdev); 624 + if (fb_helper && fb_helper->info) 625 + unregister_framebuffer(fb_helper->info); 532 626 } 533 - EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); 627 + EXPORT_SYMBOL(drm_fb_helper_unregister_info); 534 628 535 629 /** 536 630 * drm_fb_helper_fini - finialize a &struct drm_fb_helper ··· 553 647 cancel_work_sync(&fb_helper->resume_work); 554 648 cancel_work_sync(&fb_helper->damage_work); 555 649 556 - info = fb_helper->fbdev; 650 + info = fb_helper->info; 557 651 if (info) { 558 652 if (info->cmap.len) 559 653 fb_dealloc_cmap(&info->cmap); 560 654 framebuffer_release(info); 561 655 } 562 - fb_helper->fbdev = NULL; 656 + fb_helper->info = NULL; 563 657 564 658 mutex_lock(&kernel_fb_helper_lock); 565 659 if (!list_empty(&fb_helper->kernel_fb_list)) { ··· 576 670 } 577 671 EXPORT_SYMBOL(drm_fb_helper_fini); 578 672 579 - static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) 580 - { 581 - struct drm_device *dev = fb_helper->dev; 582 - struct drm_framebuffer *fb = fb_helper->fb; 583 - 584 - return dev->mode_config.prefer_shadow_fbdev || 585 - dev->mode_config.prefer_shadow || 586 - fb->funcs->dirty; 587 - } 588 - 589 - static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, 673 + static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, 590 674 u32 width, u32 height) 591 675 { 592 - struct drm_fb_helper *helper = info->par; 593 676 struct drm_clip_rect *clip = &helper->damage_clip; 594 677 unsigned long flags; 595 - 596 - if (!drm_fbdev_use_shadow_fb(helper)) 597 - return; 598 678 599 679 spin_lock_irqsave(&helper->damage_lock, flags); 600 680 clip->x1 = min_t(u32, clip->x1, x); ··· 631 739 */ 632 740 void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist) 633 741 { 742 + struct drm_fb_helper *helper = info->par; 634 743 unsigned long start, end, min_off, max_off; 635 744 struct fb_deferred_io_pageref *pageref; 636 745 struct drm_rect damage_area; ··· 647 754 if (min_off >= max_off) 648 755 return; 649 756 650 - /* 651 - * As we can only track pages, we might reach beyond the end 652 - * of the screen and account for non-existing scanlines. Hence, 653 - * keep the covered memory area within the screen buffer. 654 - */ 655 - max_off = min(max_off, info->screen_size); 757 + if (helper->funcs->fb_dirty) { 758 + /* 759 + * As we can only track pages, we might reach beyond the end 760 + * of the screen and account for non-existing scanlines. Hence, 761 + * keep the covered memory area within the screen buffer. 762 + */ 763 + max_off = min(max_off, info->screen_size); 656 764 657 - drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); 658 - drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 659 - drm_rect_width(&damage_area), 660 - drm_rect_height(&damage_area)); 765 + drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); 766 + drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, 767 + drm_rect_width(&damage_area), 768 + drm_rect_height(&damage_area)); 769 + } 661 770 } 662 771 EXPORT_SYMBOL(drm_fb_helper_deferred_io); 663 772 773 + typedef ssize_t (*drm_fb_helper_read_screen)(struct fb_info *info, char __user *buf, 774 + size_t count, loff_t pos); 775 + 776 + static ssize_t __drm_fb_helper_read(struct fb_info *info, char __user *buf, size_t count, 777 + loff_t *ppos, drm_fb_helper_read_screen read_screen) 778 + { 779 + loff_t pos = *ppos; 780 + size_t total_size; 781 + ssize_t ret; 782 + 783 + if (info->screen_size) 784 + total_size = info->screen_size; 785 + else 786 + total_size = info->fix.smem_len; 787 + 788 + if (pos >= total_size) 789 + return 0; 790 + if (count >= total_size) 791 + count = total_size; 792 + if (total_size - count < pos) 793 + count = total_size - pos; 794 + 795 + if (info->fbops->fb_sync) 796 + info->fbops->fb_sync(info); 797 + 798 + ret = read_screen(info, buf, count, pos); 799 + if (ret > 0) 800 + *ppos += ret; 801 + 802 + return ret; 803 + } 804 + 805 + typedef ssize_t (*drm_fb_helper_write_screen)(struct fb_info *info, const char __user *buf, 806 + size_t count, loff_t pos); 807 + 808 + static ssize_t __drm_fb_helper_write(struct fb_info *info, const char __user *buf, size_t count, 809 + loff_t *ppos, drm_fb_helper_write_screen write_screen) 810 + { 811 + loff_t pos = *ppos; 812 + size_t total_size; 813 + ssize_t ret; 814 + int err = 0; 815 + 816 + if (info->screen_size) 817 + total_size = info->screen_size; 818 + else 819 + total_size = info->fix.smem_len; 820 + 821 + if (pos > total_size) 822 + return -EFBIG; 823 + if (count > total_size) { 824 + err = -EFBIG; 825 + count = total_size; 826 + } 827 + if (total_size - count < pos) { 828 + if (!err) 829 + err = -ENOSPC; 830 + count = total_size - pos; 831 + } 832 + 833 + if (info->fbops->fb_sync) 834 + info->fbops->fb_sync(info); 835 + 836 + /* 837 + * Copy to framebuffer even if we already logged an error. Emulates 838 + * the behavior of the original fbdev implementation. 839 + */ 840 + ret = write_screen(info, buf, count, pos); 841 + if (ret < 0) 842 + return ret; /* return last error, if any */ 843 + else if (!ret) 844 + return err; /* return previous error, if any */ 845 + 846 + *ppos += ret; 847 + 848 + return ret; 849 + } 850 + 851 + static ssize_t drm_fb_helper_read_screen_buffer(struct fb_info *info, char __user *buf, 852 + size_t count, loff_t pos) 853 + { 854 + const char *src = info->screen_buffer + pos; 855 + 856 + if (copy_to_user(buf, src, count)) 857 + return -EFAULT; 858 + 859 + return count; 860 + } 861 + 664 862 /** 665 - * drm_fb_helper_sys_read - wrapper around fb_sys_read 863 + * drm_fb_helper_sys_read - Implements struct &fb_ops.fb_read for system memory 666 864 * @info: fb_info struct pointer 667 865 * @buf: userspace buffer to read from framebuffer memory 668 866 * @count: number of bytes to read from framebuffer memory 669 867 * @ppos: read offset within framebuffer memory 670 868 * 671 - * A wrapper around fb_sys_read implemented by fbdev core 869 + * Returns: 870 + * The number of bytes read on success, or an error code otherwise. 672 871 */ 673 872 ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 674 873 size_t count, loff_t *ppos) 675 874 { 676 - return fb_sys_read(info, buf, count, ppos); 875 + return __drm_fb_helper_read(info, buf, count, ppos, drm_fb_helper_read_screen_buffer); 677 876 } 678 877 EXPORT_SYMBOL(drm_fb_helper_sys_read); 679 878 879 + static ssize_t drm_fb_helper_write_screen_buffer(struct fb_info *info, const char __user *buf, 880 + size_t count, loff_t pos) 881 + { 882 + char *dst = info->screen_buffer + pos; 883 + 884 + if (copy_from_user(dst, buf, count)) 885 + return -EFAULT; 886 + 887 + return count; 888 + } 889 + 680 890 /** 681 - * drm_fb_helper_sys_write - wrapper around fb_sys_write 891 + * drm_fb_helper_sys_write - Implements struct &fb_ops.fb_write for system memory 682 892 * @info: fb_info struct pointer 683 893 * @buf: userspace buffer to write to framebuffer memory 684 894 * @count: number of bytes to write to framebuffer memory 685 895 * @ppos: write offset within framebuffer memory 686 896 * 687 - * A wrapper around fb_sys_write implemented by fbdev core 897 + * Returns: 898 + * The number of bytes written on success, or an error code otherwise. 688 899 */ 689 900 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 690 901 size_t count, loff_t *ppos) 691 902 { 903 + struct drm_fb_helper *helper = info->par; 692 904 loff_t pos = *ppos; 693 905 ssize_t ret; 694 906 struct drm_rect damage_area; 695 907 696 - ret = fb_sys_write(info, buf, count, ppos); 908 + ret = __drm_fb_helper_write(info, buf, count, ppos, drm_fb_helper_write_screen_buffer); 697 909 if (ret <= 0) 698 910 return ret; 699 911 700 - drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 701 - drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 702 - drm_rect_width(&damage_area), 703 - drm_rect_height(&damage_area)); 912 + if (helper->funcs->fb_dirty) { 913 + drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 914 + drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, 915 + drm_rect_width(&damage_area), 916 + drm_rect_height(&damage_area)); 917 + } 704 918 705 919 return ret; 706 920 } ··· 823 823 void drm_fb_helper_sys_fillrect(struct fb_info *info, 824 824 const struct fb_fillrect *rect) 825 825 { 826 + struct drm_fb_helper *helper = info->par; 827 + 826 828 sys_fillrect(info, rect); 827 - drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 829 + 830 + if (helper->funcs->fb_dirty) 831 + drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height); 828 832 } 829 833 EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); 830 834 ··· 842 838 void drm_fb_helper_sys_copyarea(struct fb_info *info, 843 839 const struct fb_copyarea *area) 844 840 { 841 + struct drm_fb_helper *helper = info->par; 842 + 845 843 sys_copyarea(info, area); 846 - drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 844 + 845 + if (helper->funcs->fb_dirty) 846 + drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height); 847 847 } 848 848 EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); 849 849 ··· 861 853 void drm_fb_helper_sys_imageblit(struct fb_info *info, 862 854 const struct fb_image *image) 863 855 { 856 + struct drm_fb_helper *helper = info->par; 857 + 864 858 sys_imageblit(info, image); 865 - drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 859 + 860 + if (helper->funcs->fb_dirty) 861 + drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height); 866 862 } 867 863 EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); 864 + 865 + static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, 866 + loff_t pos) 867 + { 868 + const char __iomem *src = info->screen_base + pos; 869 + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 870 + ssize_t ret = 0; 871 + int err = 0; 872 + char *tmp; 873 + 874 + tmp = kmalloc(alloc_size, GFP_KERNEL); 875 + if (!tmp) 876 + return -ENOMEM; 877 + 878 + while (count) { 879 + size_t c = min_t(size_t, count, alloc_size); 880 + 881 + memcpy_fromio(tmp, src, c); 882 + if (copy_to_user(buf, tmp, c)) { 883 + err = -EFAULT; 884 + break; 885 + } 886 + 887 + src += c; 888 + buf += c; 889 + ret += c; 890 + count -= c; 891 + } 892 + 893 + kfree(tmp); 894 + 895 + return ret ? ret : err; 896 + } 897 + 898 + /** 899 + * drm_fb_helper_cfb_read - Implements struct &fb_ops.fb_read for I/O memory 900 + * @info: fb_info struct pointer 901 + * @buf: userspace buffer to read from framebuffer memory 902 + * @count: number of bytes to read from framebuffer memory 903 + * @ppos: read offset within framebuffer memory 904 + * 905 + * Returns: 906 + * The number of bytes read on success, or an error code otherwise. 907 + */ 908 + ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, 909 + size_t count, loff_t *ppos) 910 + { 911 + return __drm_fb_helper_read(info, buf, count, ppos, fb_read_screen_base); 912 + } 913 + EXPORT_SYMBOL(drm_fb_helper_cfb_read); 914 + 915 + static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, 916 + loff_t pos) 917 + { 918 + char __iomem *dst = info->screen_base + pos; 919 + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 920 + ssize_t ret = 0; 921 + int err = 0; 922 + u8 *tmp; 923 + 924 + tmp = kmalloc(alloc_size, GFP_KERNEL); 925 + if (!tmp) 926 + return -ENOMEM; 927 + 928 + while (count) { 929 + size_t c = min_t(size_t, count, alloc_size); 930 + 931 + if (copy_from_user(tmp, buf, c)) { 932 + err = -EFAULT; 933 + break; 934 + } 935 + memcpy_toio(dst, tmp, c); 936 + 937 + dst += c; 938 + buf += c; 939 + ret += c; 940 + count -= c; 941 + } 942 + 943 + kfree(tmp); 944 + 945 + return ret ? ret : err; 946 + } 947 + 948 + /** 949 + * drm_fb_helper_cfb_write - Implements struct &fb_ops.fb_write for I/O memory 950 + * @info: fb_info struct pointer 951 + * @buf: userspace buffer to write to framebuffer memory 952 + * @count: number of bytes to write to framebuffer memory 953 + * @ppos: write offset within framebuffer memory 954 + * 955 + * Returns: 956 + * The number of bytes written on success, or an error code otherwise. 957 + */ 958 + ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, 959 + size_t count, loff_t *ppos) 960 + { 961 + struct drm_fb_helper *helper = info->par; 962 + loff_t pos = *ppos; 963 + ssize_t ret; 964 + struct drm_rect damage_area; 965 + 966 + ret = __drm_fb_helper_write(info, buf, count, ppos, fb_write_screen_base); 967 + if (ret <= 0) 968 + return ret; 969 + 970 + if (helper->funcs->fb_dirty) { 971 + drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 972 + drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, 973 + drm_rect_width(&damage_area), 974 + drm_rect_height(&damage_area)); 975 + } 976 + 977 + return ret; 978 + } 979 + EXPORT_SYMBOL(drm_fb_helper_cfb_write); 868 980 869 981 /** 870 982 * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect ··· 996 868 void drm_fb_helper_cfb_fillrect(struct fb_info *info, 997 869 const struct fb_fillrect *rect) 998 870 { 871 + struct drm_fb_helper *helper = info->par; 872 + 999 873 cfb_fillrect(info, rect); 1000 - drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 874 + 875 + if (helper->funcs->fb_dirty) 876 + drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height); 1001 877 } 1002 878 EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); 1003 879 ··· 1015 883 void drm_fb_helper_cfb_copyarea(struct fb_info *info, 1016 884 const struct fb_copyarea *area) 1017 885 { 886 + struct drm_fb_helper *helper = info->par; 887 + 1018 888 cfb_copyarea(info, area); 1019 - drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 889 + 890 + if (helper->funcs->fb_dirty) 891 + drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height); 1020 892 } 1021 893 EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); 1022 894 ··· 1034 898 void drm_fb_helper_cfb_imageblit(struct fb_info *info, 1035 899 const struct fb_image *image) 1036 900 { 901 + struct drm_fb_helper *helper = info->par; 902 + 1037 903 cfb_imageblit(info, image); 1038 - drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 904 + 905 + if (helper->funcs->fb_dirty) 906 + drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height); 1039 907 } 1040 908 EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); 1041 909 ··· 1054 914 */ 1055 915 void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) 1056 916 { 1057 - if (fb_helper && fb_helper->fbdev) 1058 - fb_set_suspend(fb_helper->fbdev, suspend); 917 + if (fb_helper && fb_helper->info) 918 + fb_set_suspend(fb_helper->info, suspend); 1059 919 } 1060 920 EXPORT_SYMBOL(drm_fb_helper_set_suspend); 1061 921 ··· 1078 938 void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, 1079 939 bool suspend) 1080 940 { 1081 - if (!fb_helper || !fb_helper->fbdev) 941 + if (!fb_helper || !fb_helper->info) 1082 942 return; 1083 943 1084 944 /* make sure there's no pending/ongoing resume */ 1085 945 flush_work(&fb_helper->resume_work); 1086 946 1087 947 if (suspend) { 1088 - if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING) 948 + if (fb_helper->info->state != FBINFO_STATE_RUNNING) 1089 949 return; 1090 950 1091 951 console_lock(); 1092 952 1093 953 } else { 1094 - if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING) 954 + if (fb_helper->info->state == FBINFO_STATE_RUNNING) 1095 955 return; 1096 956 1097 957 if (!console_trylock()) { ··· 1100 960 } 1101 961 } 1102 962 1103 - fb_set_suspend(fb_helper->fbdev, suspend); 963 + fb_set_suspend(fb_helper->info, suspend); 1104 964 console_unlock(); 1105 965 } 1106 966 EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); ··· 1889 1749 sizes.surface_height = config->max_height; 1890 1750 } 1891 1751 1752 + #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 1753 + fb_helper->hint_leak_smem_start = drm_leak_fbdev_smem; 1754 + #endif 1755 + 1892 1756 /* push down into drivers */ 1893 1757 ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); 1894 1758 if (ret < 0) ··· 1994 1850 /* 1995 1851 * This is a continuation of drm_setup_crtcs() that sets up anything related 1996 1852 * to the framebuffer. During initialization, drm_setup_crtcs() is called before 1997 - * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev). 1853 + * the framebuffer has been allocated (fb_helper->fb and fb_helper->info). 1998 1854 * So, any setup that touches those fields needs to be done here instead of in 1999 1855 * drm_setup_crtcs(). 2000 1856 */ ··· 2002 1858 { 2003 1859 struct drm_client_dev *client = &fb_helper->client; 2004 1860 struct drm_connector_list_iter conn_iter; 2005 - struct fb_info *info = fb_helper->fbdev; 1861 + struct fb_info *info = fb_helper->info; 2006 1862 unsigned int rotation, sw_rotations = 0; 2007 1863 struct drm_connector *connector; 2008 1864 struct drm_mode_set *modeset; ··· 2086 1942 2087 1943 fb_helper->deferred_setup = false; 2088 1944 2089 - info = fb_helper->fbdev; 1945 + info = fb_helper->info; 2090 1946 info->var.pixclock = 0; 2091 1947 /* Shamelessly allow physical address leaking to userspace */ 2092 1948 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 2093 - if (!drm_leak_fbdev_smem) 1949 + if (!fb_helper->hint_leak_smem_start) 2094 1950 #endif 2095 1951 /* don't leak any physical addresses to userspace */ 2096 1952 info->flags |= FBINFO_HIDE_SMEM_START; ··· 2221 2077 drm_setup_crtcs_fb(fb_helper); 2222 2078 mutex_unlock(&fb_helper->lock); 2223 2079 2224 - drm_fb_helper_set_par(fb_helper->fbdev); 2080 + drm_fb_helper_set_par(fb_helper->info); 2225 2081 2226 2082 return 0; 2227 2083 } ··· 2247 2103 * 2248 2104 * This function can be used as the 2249 2105 * &drm_mode_config_funcs.output_poll_changed callback for drivers that only 2250 - * need to call drm_fb_helper_hotplug_event(). 2106 + * need to call drm_fbdev.hotplug_event(). 2251 2107 */ 2252 2108 void drm_fb_helper_output_poll_changed(struct drm_device *dev) 2253 2109 { 2254 2110 drm_fb_helper_hotplug_event(dev->fb_helper); 2255 2111 } 2256 2112 EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); 2257 - 2258 - /* @user: 1=userspace, 0=fbcon */ 2259 - static int drm_fbdev_fb_open(struct fb_info *info, int user) 2260 - { 2261 - struct drm_fb_helper *fb_helper = info->par; 2262 - 2263 - /* No need to take a ref for fbcon because it unbinds on unregister */ 2264 - if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) 2265 - return -ENODEV; 2266 - 2267 - return 0; 2268 - } 2269 - 2270 - static int drm_fbdev_fb_release(struct fb_info *info, int user) 2271 - { 2272 - struct drm_fb_helper *fb_helper = info->par; 2273 - 2274 - if (user) 2275 - module_put(fb_helper->dev->driver->fops->owner); 2276 - 2277 - return 0; 2278 - } 2279 - 2280 - static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) 2281 - { 2282 - struct fb_info *fbi = fb_helper->fbdev; 2283 - void *shadow = NULL; 2284 - 2285 - if (!fb_helper->dev) 2286 - return; 2287 - 2288 - if (fbi) { 2289 - if (fbi->fbdefio) 2290 - fb_deferred_io_cleanup(fbi); 2291 - if (drm_fbdev_use_shadow_fb(fb_helper)) 2292 - shadow = fbi->screen_buffer; 2293 - } 2294 - 2295 - drm_fb_helper_fini(fb_helper); 2296 - 2297 - if (shadow) 2298 - vfree(shadow); 2299 - else if (fb_helper->buffer) 2300 - drm_client_buffer_vunmap(fb_helper->buffer); 2301 - 2302 - drm_client_framebuffer_delete(fb_helper->buffer); 2303 - } 2304 - 2305 - static void drm_fbdev_release(struct drm_fb_helper *fb_helper) 2306 - { 2307 - drm_fbdev_cleanup(fb_helper); 2308 - drm_client_release(&fb_helper->client); 2309 - kfree(fb_helper); 2310 - } 2311 - 2312 - /* 2313 - * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of 2314 - * unregister_framebuffer() or fb_release(). 2315 - */ 2316 - static void drm_fbdev_fb_destroy(struct fb_info *info) 2317 - { 2318 - drm_fbdev_release(info->par); 2319 - } 2320 - 2321 - static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 2322 - { 2323 - struct drm_fb_helper *fb_helper = info->par; 2324 - 2325 - if (drm_fbdev_use_shadow_fb(fb_helper)) 2326 - return fb_deferred_io_mmap(info, vma); 2327 - else if (fb_helper->dev->driver->gem_prime_mmap) 2328 - return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); 2329 - else 2330 - return -ENODEV; 2331 - } 2332 - 2333 - static bool drm_fbdev_use_iomem(struct fb_info *info) 2334 - { 2335 - struct drm_fb_helper *fb_helper = info->par; 2336 - struct drm_client_buffer *buffer = fb_helper->buffer; 2337 - 2338 - return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; 2339 - } 2340 - 2341 - static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, 2342 - loff_t pos) 2343 - { 2344 - const char __iomem *src = info->screen_base + pos; 2345 - size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 2346 - ssize_t ret = 0; 2347 - int err = 0; 2348 - char *tmp; 2349 - 2350 - tmp = kmalloc(alloc_size, GFP_KERNEL); 2351 - if (!tmp) 2352 - return -ENOMEM; 2353 - 2354 - while (count) { 2355 - size_t c = min_t(size_t, count, alloc_size); 2356 - 2357 - memcpy_fromio(tmp, src, c); 2358 - if (copy_to_user(buf, tmp, c)) { 2359 - err = -EFAULT; 2360 - break; 2361 - } 2362 - 2363 - src += c; 2364 - buf += c; 2365 - ret += c; 2366 - count -= c; 2367 - } 2368 - 2369 - kfree(tmp); 2370 - 2371 - return ret ? ret : err; 2372 - } 2373 - 2374 - static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count, 2375 - loff_t pos) 2376 - { 2377 - const char *src = info->screen_buffer + pos; 2378 - 2379 - if (copy_to_user(buf, src, count)) 2380 - return -EFAULT; 2381 - 2382 - return count; 2383 - } 2384 - 2385 - static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, 2386 - size_t count, loff_t *ppos) 2387 - { 2388 - loff_t pos = *ppos; 2389 - size_t total_size; 2390 - ssize_t ret; 2391 - 2392 - if (info->screen_size) 2393 - total_size = info->screen_size; 2394 - else 2395 - total_size = info->fix.smem_len; 2396 - 2397 - if (pos >= total_size) 2398 - return 0; 2399 - if (count >= total_size) 2400 - count = total_size; 2401 - if (total_size - count < pos) 2402 - count = total_size - pos; 2403 - 2404 - if (drm_fbdev_use_iomem(info)) 2405 - ret = fb_read_screen_base(info, buf, count, pos); 2406 - else 2407 - ret = fb_read_screen_buffer(info, buf, count, pos); 2408 - 2409 - if (ret > 0) 2410 - *ppos += ret; 2411 - 2412 - return ret; 2413 - } 2414 - 2415 - static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, 2416 - loff_t pos) 2417 - { 2418 - char __iomem *dst = info->screen_base + pos; 2419 - size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 2420 - ssize_t ret = 0; 2421 - int err = 0; 2422 - u8 *tmp; 2423 - 2424 - tmp = kmalloc(alloc_size, GFP_KERNEL); 2425 - if (!tmp) 2426 - return -ENOMEM; 2427 - 2428 - while (count) { 2429 - size_t c = min_t(size_t, count, alloc_size); 2430 - 2431 - if (copy_from_user(tmp, buf, c)) { 2432 - err = -EFAULT; 2433 - break; 2434 - } 2435 - memcpy_toio(dst, tmp, c); 2436 - 2437 - dst += c; 2438 - buf += c; 2439 - ret += c; 2440 - count -= c; 2441 - } 2442 - 2443 - kfree(tmp); 2444 - 2445 - return ret ? ret : err; 2446 - } 2447 - 2448 - static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count, 2449 - loff_t pos) 2450 - { 2451 - char *dst = info->screen_buffer + pos; 2452 - 2453 - if (copy_from_user(dst, buf, count)) 2454 - return -EFAULT; 2455 - 2456 - return count; 2457 - } 2458 - 2459 - static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, 2460 - size_t count, loff_t *ppos) 2461 - { 2462 - loff_t pos = *ppos; 2463 - size_t total_size; 2464 - ssize_t ret; 2465 - struct drm_rect damage_area; 2466 - int err = 0; 2467 - 2468 - if (info->screen_size) 2469 - total_size = info->screen_size; 2470 - else 2471 - total_size = info->fix.smem_len; 2472 - 2473 - if (pos > total_size) 2474 - return -EFBIG; 2475 - if (count > total_size) { 2476 - err = -EFBIG; 2477 - count = total_size; 2478 - } 2479 - if (total_size - count < pos) { 2480 - if (!err) 2481 - err = -ENOSPC; 2482 - count = total_size - pos; 2483 - } 2484 - 2485 - /* 2486 - * Copy to framebuffer even if we already logged an error. Emulates 2487 - * the behavior of the original fbdev implementation. 2488 - */ 2489 - if (drm_fbdev_use_iomem(info)) 2490 - ret = fb_write_screen_base(info, buf, count, pos); 2491 - else 2492 - ret = fb_write_screen_buffer(info, buf, count, pos); 2493 - 2494 - if (ret < 0) 2495 - return ret; /* return last error, if any */ 2496 - else if (!ret) 2497 - return err; /* return previous error, if any */ 2498 - 2499 - *ppos += ret; 2500 - 2501 - drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 2502 - drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 2503 - drm_rect_width(&damage_area), 2504 - drm_rect_height(&damage_area)); 2505 - 2506 - return ret; 2507 - } 2508 - 2509 - static void drm_fbdev_fb_fillrect(struct fb_info *info, 2510 - const struct fb_fillrect *rect) 2511 - { 2512 - if (drm_fbdev_use_iomem(info)) 2513 - drm_fb_helper_cfb_fillrect(info, rect); 2514 - else 2515 - drm_fb_helper_sys_fillrect(info, rect); 2516 - } 2517 - 2518 - static void drm_fbdev_fb_copyarea(struct fb_info *info, 2519 - const struct fb_copyarea *area) 2520 - { 2521 - if (drm_fbdev_use_iomem(info)) 2522 - drm_fb_helper_cfb_copyarea(info, area); 2523 - else 2524 - drm_fb_helper_sys_copyarea(info, area); 2525 - } 2526 - 2527 - static void drm_fbdev_fb_imageblit(struct fb_info *info, 2528 - const struct fb_image *image) 2529 - { 2530 - if (drm_fbdev_use_iomem(info)) 2531 - drm_fb_helper_cfb_imageblit(info, image); 2532 - else 2533 - drm_fb_helper_sys_imageblit(info, image); 2534 - } 2535 - 2536 - static const struct fb_ops drm_fbdev_fb_ops = { 2537 - .owner = THIS_MODULE, 2538 - DRM_FB_HELPER_DEFAULT_OPS, 2539 - .fb_open = drm_fbdev_fb_open, 2540 - .fb_release = drm_fbdev_fb_release, 2541 - .fb_destroy = drm_fbdev_fb_destroy, 2542 - .fb_mmap = drm_fbdev_fb_mmap, 2543 - .fb_read = drm_fbdev_fb_read, 2544 - .fb_write = drm_fbdev_fb_write, 2545 - .fb_fillrect = drm_fbdev_fb_fillrect, 2546 - .fb_copyarea = drm_fbdev_fb_copyarea, 2547 - .fb_imageblit = drm_fbdev_fb_imageblit, 2548 - }; 2549 - 2550 - static struct fb_deferred_io drm_fbdev_defio = { 2551 - .delay = HZ / 20, 2552 - .deferred_io = drm_fb_helper_deferred_io, 2553 - }; 2554 - 2555 - /* 2556 - * This function uses the client API to create a framebuffer backed by a dumb buffer. 2557 - * 2558 - * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, 2559 - * fb_copyarea, fb_imageblit. 2560 - */ 2561 - static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, 2562 - struct drm_fb_helper_surface_size *sizes) 2563 - { 2564 - struct drm_client_dev *client = &fb_helper->client; 2565 - struct drm_device *dev = fb_helper->dev; 2566 - struct drm_client_buffer *buffer; 2567 - struct drm_framebuffer *fb; 2568 - struct fb_info *fbi; 2569 - u32 format; 2570 - struct iosys_map map; 2571 - int ret; 2572 - 2573 - drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", 2574 - sizes->surface_width, sizes->surface_height, 2575 - sizes->surface_bpp); 2576 - 2577 - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 2578 - buffer = drm_client_framebuffer_create(client, sizes->surface_width, 2579 - sizes->surface_height, format); 2580 - if (IS_ERR(buffer)) 2581 - return PTR_ERR(buffer); 2582 - 2583 - fb_helper->buffer = buffer; 2584 - fb_helper->fb = buffer->fb; 2585 - fb = buffer->fb; 2586 - 2587 - fbi = drm_fb_helper_alloc_fbi(fb_helper); 2588 - if (IS_ERR(fbi)) 2589 - return PTR_ERR(fbi); 2590 - 2591 - fbi->fbops = &drm_fbdev_fb_ops; 2592 - fbi->screen_size = sizes->surface_height * fb->pitches[0]; 2593 - fbi->fix.smem_len = fbi->screen_size; 2594 - fbi->flags = FBINFO_DEFAULT; 2595 - 2596 - drm_fb_helper_fill_info(fbi, fb_helper, sizes); 2597 - 2598 - if (drm_fbdev_use_shadow_fb(fb_helper)) { 2599 - fbi->screen_buffer = vzalloc(fbi->screen_size); 2600 - if (!fbi->screen_buffer) 2601 - return -ENOMEM; 2602 - fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; 2603 - 2604 - fbi->fbdefio = &drm_fbdev_defio; 2605 - fb_deferred_io_init(fbi); 2606 - } else { 2607 - /* buffer is mapped for HW framebuffer */ 2608 - ret = drm_client_buffer_vmap(fb_helper->buffer, &map); 2609 - if (ret) 2610 - return ret; 2611 - if (map.is_iomem) { 2612 - fbi->screen_base = map.vaddr_iomem; 2613 - } else { 2614 - fbi->screen_buffer = map.vaddr; 2615 - fbi->flags |= FBINFO_VIRTFB; 2616 - } 2617 - 2618 - /* 2619 - * Shamelessly leak the physical address to user-space. As 2620 - * page_to_phys() is undefined for I/O memory, warn in this 2621 - * case. 2622 - */ 2623 - #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 2624 - if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 && 2625 - !drm_WARN_ON_ONCE(dev, map.is_iomem)) 2626 - fbi->fix.smem_start = 2627 - page_to_phys(virt_to_page(fbi->screen_buffer)); 2628 - #endif 2629 - } 2630 - 2631 - return 0; 2632 - } 2633 - 2634 - static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { 2635 - .fb_probe = drm_fb_helper_generic_probe, 2636 - }; 2637 - 2638 - static void drm_fbdev_client_unregister(struct drm_client_dev *client) 2639 - { 2640 - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 2641 - 2642 - if (fb_helper->fbdev) 2643 - /* drm_fbdev_fb_destroy() takes care of cleanup */ 2644 - drm_fb_helper_unregister_fbi(fb_helper); 2645 - else 2646 - drm_fbdev_release(fb_helper); 2647 - } 2648 - 2649 - static int drm_fbdev_client_restore(struct drm_client_dev *client) 2650 - { 2651 - drm_fb_helper_lastclose(client->dev); 2652 - 2653 - return 0; 2654 - } 2655 - 2656 - static int drm_fbdev_client_hotplug(struct drm_client_dev *client) 2657 - { 2658 - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 2659 - struct drm_device *dev = client->dev; 2660 - int ret; 2661 - 2662 - /* Setup is not retried if it has failed */ 2663 - if (!fb_helper->dev && fb_helper->funcs) 2664 - return 0; 2665 - 2666 - if (dev->fb_helper) 2667 - return drm_fb_helper_hotplug_event(dev->fb_helper); 2668 - 2669 - if (!dev->mode_config.num_connector) { 2670 - drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n"); 2671 - return 0; 2672 - } 2673 - 2674 - drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); 2675 - 2676 - ret = drm_fb_helper_init(dev, fb_helper); 2677 - if (ret) 2678 - goto err; 2679 - 2680 - if (!drm_drv_uses_atomic_modeset(dev)) 2681 - drm_helper_disable_unused_functions(dev); 2682 - 2683 - ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); 2684 - if (ret) 2685 - goto err_cleanup; 2686 - 2687 - return 0; 2688 - 2689 - err_cleanup: 2690 - drm_fbdev_cleanup(fb_helper); 2691 - err: 2692 - fb_helper->dev = NULL; 2693 - fb_helper->fbdev = NULL; 2694 - 2695 - drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); 2696 - 2697 - return ret; 2698 - } 2699 - 2700 - static const struct drm_client_funcs drm_fbdev_client_funcs = { 2701 - .owner = THIS_MODULE, 2702 - .unregister = drm_fbdev_client_unregister, 2703 - .restore = drm_fbdev_client_restore, 2704 - .hotplug = drm_fbdev_client_hotplug, 2705 - }; 2706 - 2707 - /** 2708 - * drm_fbdev_generic_setup() - Setup generic fbdev emulation 2709 - * @dev: DRM device 2710 - * @preferred_bpp: Preferred bits per pixel for the device. 2711 - * @dev->mode_config.preferred_depth is used if this is zero. 2712 - * 2713 - * This function sets up generic fbdev emulation for drivers that supports 2714 - * dumb buffers with a virtual address and that can be mmap'ed. 2715 - * drm_fbdev_generic_setup() shall be called after the DRM driver registered 2716 - * the new DRM device with drm_dev_register(). 2717 - * 2718 - * Restore, hotplug events and teardown are all taken care of. Drivers that do 2719 - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. 2720 - * Simple drivers might use drm_mode_config_helper_suspend(). 2721 - * 2722 - * Drivers that set the dirty callback on their framebuffer will get a shadow 2723 - * fbdev buffer that is blitted onto the real buffer. This is done in order to 2724 - * make deferred I/O work with all kinds of buffers. A shadow buffer can be 2725 - * requested explicitly by setting struct drm_mode_config.prefer_shadow or 2726 - * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is 2727 - * required to use generic fbdev emulation with SHMEM helpers. 2728 - * 2729 - * This function is safe to call even when there are no connectors present. 2730 - * Setup will be retried on the next hotplug event. 2731 - * 2732 - * The fbdev is destroyed by drm_dev_unregister(). 2733 - */ 2734 - void drm_fbdev_generic_setup(struct drm_device *dev, 2735 - unsigned int preferred_bpp) 2736 - { 2737 - struct drm_fb_helper *fb_helper; 2738 - int ret; 2739 - 2740 - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); 2741 - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); 2742 - 2743 - if (!drm_fbdev_emulation) 2744 - return; 2745 - 2746 - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); 2747 - if (!fb_helper) { 2748 - drm_err(dev, "Failed to allocate fb_helper\n"); 2749 - return; 2750 - } 2751 - 2752 - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 2753 - if (ret) { 2754 - kfree(fb_helper); 2755 - drm_err(dev, "Failed to register client: %d\n", ret); 2756 - return; 2757 - } 2758 - 2759 - /* 2760 - * FIXME: This mixes up depth with bpp, which results in a glorious 2761 - * mess, resulting in some drivers picking wrong fbdev defaults and 2762 - * others wrong preferred_depth defaults. 2763 - */ 2764 - if (!preferred_bpp) 2765 - preferred_bpp = dev->mode_config.preferred_depth; 2766 - if (!preferred_bpp) 2767 - preferred_bpp = 32; 2768 - fb_helper->preferred_bpp = preferred_bpp; 2769 - 2770 - ret = drm_fbdev_client_hotplug(&fb_helper->client); 2771 - if (ret) 2772 - drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); 2773 - 2774 - drm_client_register(&fb_helper->client); 2775 - } 2776 - EXPORT_SYMBOL(drm_fbdev_generic_setup);
+494
drivers/gpu/drm/drm_fbdev_generic.c
··· 1 + // SPDX-License-Identifier: MIT 2 + 3 + #include <linux/moduleparam.h> 4 + #include <linux/vmalloc.h> 5 + 6 + #include <drm/drm_crtc_helper.h> 7 + #include <drm/drm_drv.h> 8 + #include <drm/drm_fb_helper.h> 9 + #include <drm/drm_framebuffer.h> 10 + #include <drm/drm_print.h> 11 + 12 + #include <drm/drm_fbdev_generic.h> 13 + 14 + static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) 15 + { 16 + struct drm_device *dev = fb_helper->dev; 17 + struct drm_framebuffer *fb = fb_helper->fb; 18 + 19 + return dev->mode_config.prefer_shadow_fbdev || 20 + dev->mode_config.prefer_shadow || 21 + fb->funcs->dirty; 22 + } 23 + 24 + /* @user: 1=userspace, 0=fbcon */ 25 + static int drm_fbdev_fb_open(struct fb_info *info, int user) 26 + { 27 + struct drm_fb_helper *fb_helper = info->par; 28 + 29 + /* No need to take a ref for fbcon because it unbinds on unregister */ 30 + if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) 31 + return -ENODEV; 32 + 33 + return 0; 34 + } 35 + 36 + static int drm_fbdev_fb_release(struct fb_info *info, int user) 37 + { 38 + struct drm_fb_helper *fb_helper = info->par; 39 + 40 + if (user) 41 + module_put(fb_helper->dev->driver->fops->owner); 42 + 43 + return 0; 44 + } 45 + 46 + static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) 47 + { 48 + struct fb_info *fbi = fb_helper->info; 49 + void *shadow = NULL; 50 + 51 + if (!fb_helper->dev) 52 + return; 53 + 54 + if (fbi) { 55 + if (fbi->fbdefio) 56 + fb_deferred_io_cleanup(fbi); 57 + if (drm_fbdev_use_shadow_fb(fb_helper)) 58 + shadow = fbi->screen_buffer; 59 + } 60 + 61 + drm_fb_helper_fini(fb_helper); 62 + 63 + if (shadow) 64 + vfree(shadow); 65 + else if (fb_helper->buffer) 66 + drm_client_buffer_vunmap(fb_helper->buffer); 67 + 68 + drm_client_framebuffer_delete(fb_helper->buffer); 69 + } 70 + 71 + static void drm_fbdev_release(struct drm_fb_helper *fb_helper) 72 + { 73 + drm_fbdev_cleanup(fb_helper); 74 + drm_client_release(&fb_helper->client); 75 + kfree(fb_helper); 76 + } 77 + 78 + /* 79 + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of 80 + * unregister_framebuffer() or fb_release(). 81 + */ 82 + static void drm_fbdev_fb_destroy(struct fb_info *info) 83 + { 84 + drm_fbdev_release(info->par); 85 + } 86 + 87 + static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 88 + { 89 + struct drm_fb_helper *fb_helper = info->par; 90 + 91 + if (drm_fbdev_use_shadow_fb(fb_helper)) 92 + return fb_deferred_io_mmap(info, vma); 93 + else if (fb_helper->dev->driver->gem_prime_mmap) 94 + return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); 95 + else 96 + return -ENODEV; 97 + } 98 + 99 + static bool drm_fbdev_use_iomem(struct fb_info *info) 100 + { 101 + struct drm_fb_helper *fb_helper = info->par; 102 + struct drm_client_buffer *buffer = fb_helper->buffer; 103 + 104 + return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; 105 + } 106 + 107 + static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, 108 + size_t count, loff_t *ppos) 109 + { 110 + ssize_t ret; 111 + 112 + if (drm_fbdev_use_iomem(info)) 113 + ret = drm_fb_helper_cfb_read(info, buf, count, ppos); 114 + else 115 + ret = drm_fb_helper_sys_read(info, buf, count, ppos); 116 + 117 + return ret; 118 + } 119 + 120 + static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, 121 + size_t count, loff_t *ppos) 122 + { 123 + ssize_t ret; 124 + 125 + if (drm_fbdev_use_iomem(info)) 126 + ret = drm_fb_helper_cfb_write(info, buf, count, ppos); 127 + else 128 + ret = drm_fb_helper_sys_write(info, buf, count, ppos); 129 + 130 + return ret; 131 + } 132 + 133 + static void drm_fbdev_fb_fillrect(struct fb_info *info, 134 + const struct fb_fillrect *rect) 135 + { 136 + if (drm_fbdev_use_iomem(info)) 137 + drm_fb_helper_cfb_fillrect(info, rect); 138 + else 139 + drm_fb_helper_sys_fillrect(info, rect); 140 + } 141 + 142 + static void drm_fbdev_fb_copyarea(struct fb_info *info, 143 + const struct fb_copyarea *area) 144 + { 145 + if (drm_fbdev_use_iomem(info)) 146 + drm_fb_helper_cfb_copyarea(info, area); 147 + else 148 + drm_fb_helper_sys_copyarea(info, area); 149 + } 150 + 151 + static void drm_fbdev_fb_imageblit(struct fb_info *info, 152 + const struct fb_image *image) 153 + { 154 + if (drm_fbdev_use_iomem(info)) 155 + drm_fb_helper_cfb_imageblit(info, image); 156 + else 157 + drm_fb_helper_sys_imageblit(info, image); 158 + } 159 + 160 + static const struct fb_ops drm_fbdev_fb_ops = { 161 + .owner = THIS_MODULE, 162 + DRM_FB_HELPER_DEFAULT_OPS, 163 + .fb_open = drm_fbdev_fb_open, 164 + .fb_release = drm_fbdev_fb_release, 165 + .fb_destroy = drm_fbdev_fb_destroy, 166 + .fb_mmap = drm_fbdev_fb_mmap, 167 + .fb_read = drm_fbdev_fb_read, 168 + .fb_write = drm_fbdev_fb_write, 169 + .fb_fillrect = drm_fbdev_fb_fillrect, 170 + .fb_copyarea = drm_fbdev_fb_copyarea, 171 + .fb_imageblit = drm_fbdev_fb_imageblit, 172 + }; 173 + 174 + static struct fb_deferred_io drm_fbdev_defio = { 175 + .delay = HZ / 20, 176 + .deferred_io = drm_fb_helper_deferred_io, 177 + }; 178 + 179 + /* 180 + * This function uses the client API to create a framebuffer backed by a dumb buffer. 181 + */ 182 + static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper, 183 + struct drm_fb_helper_surface_size *sizes) 184 + { 185 + struct drm_client_dev *client = &fb_helper->client; 186 + struct drm_device *dev = fb_helper->dev; 187 + struct drm_client_buffer *buffer; 188 + struct drm_framebuffer *fb; 189 + struct fb_info *fbi; 190 + u32 format; 191 + struct iosys_map map; 192 + int ret; 193 + 194 + drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", 195 + sizes->surface_width, sizes->surface_height, 196 + sizes->surface_bpp); 197 + 198 + format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 199 + buffer = drm_client_framebuffer_create(client, sizes->surface_width, 200 + sizes->surface_height, format); 201 + if (IS_ERR(buffer)) 202 + return PTR_ERR(buffer); 203 + 204 + fb_helper->buffer = buffer; 205 + fb_helper->fb = buffer->fb; 206 + fb = buffer->fb; 207 + 208 + fbi = drm_fb_helper_alloc_info(fb_helper); 209 + if (IS_ERR(fbi)) 210 + return PTR_ERR(fbi); 211 + 212 + fbi->fbops = &drm_fbdev_fb_ops; 213 + fbi->screen_size = sizes->surface_height * fb->pitches[0]; 214 + fbi->fix.smem_len = fbi->screen_size; 215 + fbi->flags = FBINFO_DEFAULT; 216 + 217 + drm_fb_helper_fill_info(fbi, fb_helper, sizes); 218 + 219 + if (drm_fbdev_use_shadow_fb(fb_helper)) { 220 + fbi->screen_buffer = vzalloc(fbi->screen_size); 221 + if (!fbi->screen_buffer) 222 + return -ENOMEM; 223 + fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; 224 + 225 + fbi->fbdefio = &drm_fbdev_defio; 226 + fb_deferred_io_init(fbi); 227 + } else { 228 + /* buffer is mapped for HW framebuffer */ 229 + ret = drm_client_buffer_vmap(fb_helper->buffer, &map); 230 + if (ret) 231 + return ret; 232 + if (map.is_iomem) { 233 + fbi->screen_base = map.vaddr_iomem; 234 + } else { 235 + fbi->screen_buffer = map.vaddr; 236 + fbi->flags |= FBINFO_VIRTFB; 237 + } 238 + 239 + /* 240 + * Shamelessly leak the physical address to user-space. As 241 + * page_to_phys() is undefined for I/O memory, warn in this 242 + * case. 243 + */ 244 + #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 245 + if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 && 246 + !drm_WARN_ON_ONCE(dev, map.is_iomem)) 247 + fbi->fix.smem_start = 248 + page_to_phys(virt_to_page(fbi->screen_buffer)); 249 + #endif 250 + } 251 + 252 + return 0; 253 + } 254 + 255 + static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper, 256 + struct drm_clip_rect *clip, 257 + struct iosys_map *dst) 258 + { 259 + struct drm_framebuffer *fb = fb_helper->fb; 260 + size_t offset = clip->y1 * fb->pitches[0]; 261 + size_t len = clip->x2 - clip->x1; 262 + unsigned int y; 263 + void *src; 264 + 265 + switch (drm_format_info_bpp(fb->format, 0)) { 266 + case 1: 267 + offset += clip->x1 / 8; 268 + len = DIV_ROUND_UP(len + clip->x1 % 8, 8); 269 + break; 270 + case 2: 271 + offset += clip->x1 / 4; 272 + len = DIV_ROUND_UP(len + clip->x1 % 4, 4); 273 + break; 274 + case 4: 275 + offset += clip->x1 / 2; 276 + len = DIV_ROUND_UP(len + clip->x1 % 2, 2); 277 + break; 278 + default: 279 + offset += clip->x1 * fb->format->cpp[0]; 280 + len *= fb->format->cpp[0]; 281 + break; 282 + } 283 + 284 + src = fb_helper->info->screen_buffer + offset; 285 + iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ 286 + 287 + for (y = clip->y1; y < clip->y2; y++) { 288 + iosys_map_memcpy_to(dst, 0, src, len); 289 + iosys_map_incr(dst, fb->pitches[0]); 290 + src += fb->pitches[0]; 291 + } 292 + } 293 + 294 + static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper, 295 + struct drm_clip_rect *clip) 296 + { 297 + struct drm_client_buffer *buffer = fb_helper->buffer; 298 + struct iosys_map map, dst; 299 + int ret; 300 + 301 + /* 302 + * We have to pin the client buffer to its current location while 303 + * flushing the shadow buffer. In the general case, concurrent 304 + * modesetting operations could try to move the buffer and would 305 + * fail. The modeset has to be serialized by acquiring the reservation 306 + * object of the underlying BO here. 307 + * 308 + * For fbdev emulation, we only have to protect against fbdev modeset 309 + * operations. Nothing else will involve the client buffer's BO. So it 310 + * is sufficient to acquire struct drm_fb_helper.lock here. 311 + */ 312 + mutex_lock(&fb_helper->lock); 313 + 314 + ret = drm_client_buffer_vmap(buffer, &map); 315 + if (ret) 316 + goto out; 317 + 318 + dst = map; 319 + drm_fbdev_damage_blit_real(fb_helper, clip, &dst); 320 + 321 + drm_client_buffer_vunmap(buffer); 322 + 323 + out: 324 + mutex_unlock(&fb_helper->lock); 325 + 326 + return ret; 327 + } 328 + 329 + static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) 330 + { 331 + struct drm_device *dev = helper->dev; 332 + int ret; 333 + 334 + if (!drm_fbdev_use_shadow_fb(helper)) 335 + return 0; 336 + 337 + /* Call damage handlers only if necessary */ 338 + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) 339 + return 0; 340 + 341 + if (helper->buffer) { 342 + ret = drm_fbdev_damage_blit(helper, clip); 343 + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) 344 + return ret; 345 + } 346 + 347 + if (helper->fb->funcs->dirty) { 348 + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); 349 + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) 350 + return ret; 351 + } 352 + 353 + return 0; 354 + } 355 + 356 + static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { 357 + .fb_probe = drm_fbdev_fb_probe, 358 + .fb_dirty = drm_fbdev_fb_dirty, 359 + }; 360 + 361 + static void drm_fbdev_client_unregister(struct drm_client_dev *client) 362 + { 363 + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 364 + 365 + if (fb_helper->info) 366 + /* drm_fbdev_fb_destroy() takes care of cleanup */ 367 + drm_fb_helper_unregister_info(fb_helper); 368 + else 369 + drm_fbdev_release(fb_helper); 370 + } 371 + 372 + static int drm_fbdev_client_restore(struct drm_client_dev *client) 373 + { 374 + drm_fb_helper_lastclose(client->dev); 375 + 376 + return 0; 377 + } 378 + 379 + static int drm_fbdev_client_hotplug(struct drm_client_dev *client) 380 + { 381 + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 382 + struct drm_device *dev = client->dev; 383 + int ret; 384 + 385 + /* Setup is not retried if it has failed */ 386 + if (!fb_helper->dev && fb_helper->funcs) 387 + return 0; 388 + 389 + if (dev->fb_helper) 390 + return drm_fb_helper_hotplug_event(dev->fb_helper); 391 + 392 + if (!dev->mode_config.num_connector) { 393 + drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n"); 394 + return 0; 395 + } 396 + 397 + drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); 398 + 399 + ret = drm_fb_helper_init(dev, fb_helper); 400 + if (ret) 401 + goto err; 402 + 403 + if (!drm_drv_uses_atomic_modeset(dev)) 404 + drm_helper_disable_unused_functions(dev); 405 + 406 + ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); 407 + if (ret) 408 + goto err_cleanup; 409 + 410 + return 0; 411 + 412 + err_cleanup: 413 + drm_fbdev_cleanup(fb_helper); 414 + err: 415 + fb_helper->dev = NULL; 416 + fb_helper->info = NULL; 417 + 418 + drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); 419 + 420 + return ret; 421 + } 422 + 423 + static const struct drm_client_funcs drm_fbdev_client_funcs = { 424 + .owner = THIS_MODULE, 425 + .unregister = drm_fbdev_client_unregister, 426 + .restore = drm_fbdev_client_restore, 427 + .hotplug = drm_fbdev_client_hotplug, 428 + }; 429 + 430 + /** 431 + * drm_fbdev_generic_setup() - Setup generic fbdev emulation 432 + * @dev: DRM device 433 + * @preferred_bpp: Preferred bits per pixel for the device. 434 + * @dev->mode_config.preferred_depth is used if this is zero. 435 + * 436 + * This function sets up generic fbdev emulation for drivers that supports 437 + * dumb buffers with a virtual address and that can be mmap'ed. 438 + * drm_fbdev_generic_setup() shall be called after the DRM driver registered 439 + * the new DRM device with drm_dev_register(). 440 + * 441 + * Restore, hotplug events and teardown are all taken care of. Drivers that do 442 + * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. 443 + * Simple drivers might use drm_mode_config_helper_suspend(). 444 + * 445 + * Drivers that set the dirty callback on their framebuffer will get a shadow 446 + * fbdev buffer that is blitted onto the real buffer. This is done in order to 447 + * make deferred I/O work with all kinds of buffers. A shadow buffer can be 448 + * requested explicitly by setting struct drm_mode_config.prefer_shadow or 449 + * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is 450 + * required to use generic fbdev emulation with SHMEM helpers. 451 + * 452 + * This function is safe to call even when there are no connectors present. 453 + * Setup will be retried on the next hotplug event. 454 + * 455 + * The fbdev is destroyed by drm_dev_unregister(). 456 + */ 457 + void drm_fbdev_generic_setup(struct drm_device *dev, 458 + unsigned int preferred_bpp) 459 + { 460 + struct drm_fb_helper *fb_helper; 461 + int ret; 462 + 463 + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); 464 + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); 465 + 466 + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); 467 + if (!fb_helper) 468 + return; 469 + 470 + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 471 + if (ret) { 472 + kfree(fb_helper); 473 + drm_err(dev, "Failed to register client: %d\n", ret); 474 + return; 475 + } 476 + 477 + /* 478 + * FIXME: This mixes up depth with bpp, which results in a glorious 479 + * mess, resulting in some drivers picking wrong fbdev defaults and 480 + * others wrong preferred_depth defaults. 481 + */ 482 + if (!preferred_bpp) 483 + preferred_bpp = dev->mode_config.preferred_depth; 484 + if (!preferred_bpp) 485 + preferred_bpp = 32; 486 + fb_helper->preferred_bpp = preferred_bpp; 487 + 488 + ret = drm_fbdev_client_hotplug(&fb_helper->client); 489 + if (ret) 490 + drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); 491 + 492 + drm_client_register(&fb_helper->client); 493 + } 494 + EXPORT_SYMBOL(drm_fbdev_generic_setup);
+30 -36
drivers/gpu/drm/drm_gem_atomic_helper.c
··· 360 360 EXPORT_SYMBOL(drm_gem_reset_shadow_plane); 361 361 362 362 /** 363 - * drm_gem_prepare_shadow_fb - prepares shadow framebuffers 363 + * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access 364 364 * @plane: the plane 365 365 * @plane_state: the plane state of type struct drm_shadow_plane_state 366 366 * 367 - * This function implements struct &drm_plane_helper_funcs.prepare_fb. It 367 + * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It 368 368 * maps all buffer objects of the plane's framebuffer into kernel address 369 - * space and stores them in &struct drm_shadow_plane_state.map. The 370 - * framebuffer will be synchronized as part of the atomic commit. 369 + * space and stores them in struct &drm_shadow_plane_state.map. The first data 370 + * bytes are available in struct &drm_shadow_plane_state.data. 371 371 * 372 - * See drm_gem_cleanup_shadow_fb() for cleanup. 372 + * See drm_gem_end_shadow_fb_access() for cleanup. 373 373 * 374 374 * Returns: 375 375 * 0 on success, or a negative errno code otherwise. 376 376 */ 377 - int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) 377 + int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) 378 378 { 379 379 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 380 380 struct drm_framebuffer *fb = plane_state->fb; 381 - int ret; 382 381 383 382 if (!fb) 384 383 return 0; 385 384 386 - ret = drm_gem_plane_helper_prepare_fb(plane, plane_state); 387 - if (ret) 388 - return ret; 389 - 390 385 return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); 391 386 } 392 - EXPORT_SYMBOL(drm_gem_prepare_shadow_fb); 387 + EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access); 393 388 394 389 /** 395 - * drm_gem_cleanup_shadow_fb - releases shadow framebuffers 390 + * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access 396 391 * @plane: the plane 397 392 * @plane_state: the plane state of type struct drm_shadow_plane_state 398 393 * 399 - * This function implements struct &drm_plane_helper_funcs.cleanup_fb. 400 - * This function unmaps all buffer objects of the plane's framebuffer. 394 + * This function implements struct &drm_plane_helper_funcs.end_fb_access. It 395 + * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order. 401 396 * 402 - * See drm_gem_prepare_shadow_fb() for more information. 397 + * See drm_gem_begin_shadow_fb_access() for more information. 403 398 */ 404 - void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) 399 + void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) 405 400 { 406 401 struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); 407 402 struct drm_framebuffer *fb = plane_state->fb; ··· 406 411 407 412 drm_gem_fb_vunmap(fb, shadow_plane_state->map); 408 413 } 409 - EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb); 414 + EXPORT_SYMBOL(drm_gem_end_shadow_fb_access); 410 415 411 416 /** 412 - * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers 417 + * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access 413 418 * @pipe: the simple display pipe 414 419 * @plane_state: the plane state of type struct drm_shadow_plane_state 415 420 * 416 - * This function implements struct drm_simple_display_funcs.prepare_fb. It 417 - * maps all buffer objects of the plane's framebuffer into kernel address 418 - * space and stores them in struct drm_shadow_plane_state.map. The 419 - * framebuffer will be synchronized as part of the atomic commit. 421 + * This function implements struct drm_simple_display_funcs.begin_fb_access. 420 422 * 421 - * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. 423 + * See drm_gem_begin_shadow_fb_access() for details and 424 + * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. 422 425 * 423 426 * Returns: 424 427 * 0 on success, or a negative errno code otherwise. 425 428 */ 426 - int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, 427 - struct drm_plane_state *plane_state) 429 + int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, 430 + struct drm_plane_state *plane_state) 428 431 { 429 - return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state); 432 + return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state); 430 433 } 431 - EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb); 434 + EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access); 432 435 433 436 /** 434 - * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers 437 + * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access 435 438 * @pipe: the simple display pipe 436 439 * @plane_state: the plane state of type struct drm_shadow_plane_state 437 440 * 438 - * This function implements struct drm_simple_display_funcs.cleanup_fb. 439 - * This function unmaps all buffer objects of the plane's framebuffer. 441 + * This function implements struct drm_simple_display_funcs.end_fb_access. 442 + * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in 443 + * reverse order. 440 444 * 441 - * See drm_gem_simple_kms_prepare_shadow_fb(). 445 + * See drm_gem_simple_kms_begin_shadow_fb_access(). 442 446 */ 443 - void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, 444 - struct drm_plane_state *plane_state) 447 + void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, 448 + struct drm_plane_state *plane_state) 445 449 { 446 - drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state); 450 + drm_gem_end_shadow_fb_access(&pipe->plane, plane_state); 447 451 } 448 - EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb); 452 + EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access); 449 453 450 454 /** 451 455 * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
-1
drivers/gpu/drm/drm_gem_framebuffer_helper.c
··· 9 9 #include <linux/module.h> 10 10 11 11 #include <drm/drm_damage_helper.h> 12 - #include <drm/drm_fb_helper.h> 13 12 #include <drm/drm_fourcc.h> 14 13 #include <drm/drm_framebuffer.h> 15 14 #include <drm/drm_gem.h>
-1
drivers/gpu/drm/drm_probe_helper.c
··· 36 36 #include <drm/drm_client.h> 37 37 #include <drm/drm_crtc.h> 38 38 #include <drm/drm_edid.h> 39 - #include <drm/drm_fb_helper.h> 40 39 #include <drm/drm_fourcc.h> 41 40 #include <drm/drm_modeset_helper_vtables.h> 42 41 #include <drm/drm_print.h>
+26
drivers/gpu/drm/drm_simple_kms_helper.c
··· 285 285 pipe->funcs->cleanup_fb(pipe, state); 286 286 } 287 287 288 + static int drm_simple_kms_plane_begin_fb_access(struct drm_plane *plane, 289 + struct drm_plane_state *new_plane_state) 290 + { 291 + struct drm_simple_display_pipe *pipe; 292 + 293 + pipe = container_of(plane, struct drm_simple_display_pipe, plane); 294 + if (!pipe->funcs || !pipe->funcs->begin_fb_access) 295 + return 0; 296 + 297 + return pipe->funcs->begin_fb_access(pipe, new_plane_state); 298 + } 299 + 300 + static void drm_simple_kms_plane_end_fb_access(struct drm_plane *plane, 301 + struct drm_plane_state *new_plane_state) 302 + { 303 + struct drm_simple_display_pipe *pipe; 304 + 305 + pipe = container_of(plane, struct drm_simple_display_pipe, plane); 306 + if (!pipe->funcs || !pipe->funcs->end_fb_access) 307 + return; 308 + 309 + pipe->funcs->end_fb_access(pipe, new_plane_state); 310 + } 311 + 288 312 static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane, 289 313 uint32_t format, 290 314 uint64_t modifier) ··· 319 295 static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = { 320 296 .prepare_fb = drm_simple_kms_plane_prepare_fb, 321 297 .cleanup_fb = drm_simple_kms_plane_cleanup_fb, 298 + .begin_fb_access = drm_simple_kms_plane_begin_fb_access, 299 + .end_fb_access = drm_simple_kms_plane_end_fb_access, 322 300 .atomic_check = drm_simple_kms_plane_atomic_check, 323 301 .atomic_update = drm_simple_kms_plane_atomic_update, 324 302 };
+2 -1
drivers/gpu/drm/etnaviv/etnaviv_drv.h
··· 6 6 #ifndef __ETNAVIV_DRV_H__ 7 7 #define __ETNAVIV_DRV_H__ 8 8 9 + #include <linux/io.h> 9 10 #include <linux/list.h> 10 11 #include <linux/mm_types.h> 11 12 #include <linux/sizes.h> 12 13 #include <linux/time64.h> 13 14 #include <linux/types.h> 14 15 15 - #include <drm/drm_fb_helper.h> 16 + #include <drm/drm_drv.h> 16 17 #include <drm/drm_gem.h> 17 18 #include <drm/etnaviv_drm.h> 18 19 #include <drm/gpu_scheduler.h>
+4 -2
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 49 49 .owner = THIS_MODULE, 50 50 DRM_FB_HELPER_DEFAULT_OPS, 51 51 .fb_mmap = exynos_drm_fb_mmap, 52 + .fb_read = drm_fb_helper_cfb_read, 53 + .fb_write = drm_fb_helper_cfb_write, 52 54 .fb_fillrect = drm_fb_helper_cfb_fillrect, 53 55 .fb_copyarea = drm_fb_helper_cfb_copyarea, 54 56 .fb_imageblit = drm_fb_helper_cfb_imageblit, ··· 65 63 unsigned int size = fb->width * fb->height * fb->format->cpp[0]; 66 64 unsigned long offset; 67 65 68 - fbi = drm_fb_helper_alloc_fbi(helper); 66 + fbi = drm_fb_helper_alloc_info(helper); 69 67 if (IS_ERR(fbi)) { 70 68 DRM_DEV_ERROR(to_dma_dev(helper->dev), 71 69 "failed to allocate fb info.\n"); ··· 203 201 drm_framebuffer_remove(fb); 204 202 } 205 203 206 - drm_fb_helper_unregister_fbi(fb_helper); 204 + drm_fb_helper_unregister_info(fb_helper); 207 205 208 206 drm_fb_helper_fini(fb_helper); 209 207 }
+1 -1
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
··· 20 20 21 21 #include <drm/drm_atomic_helper.h> 22 22 #include <drm/drm_drv.h> 23 - #include <drm/drm_fb_helper.h> 23 + #include <drm/drm_fbdev_generic.h> 24 24 #include <drm/drm_gem_dma_helper.h> 25 25 #include <drm/drm_modeset_helper.h> 26 26 #include <drm/drm_module.h>
+4 -2
drivers/gpu/drm/gma500/framebuffer.c
··· 147 147 .owner = THIS_MODULE, 148 148 DRM_FB_HELPER_DEFAULT_OPS, 149 149 .fb_setcolreg = psbfb_setcolreg, 150 + .fb_read = drm_fb_helper_cfb_read, 151 + .fb_write = drm_fb_helper_cfb_write, 150 152 .fb_fillrect = drm_fb_helper_cfb_fillrect, 151 153 .fb_copyarea = drm_fb_helper_cfb_copyarea, 152 154 .fb_imageblit = drm_fb_helper_cfb_imageblit, ··· 270 268 271 269 memset(dev_priv->vram_addr + backing->offset, 0, size); 272 270 273 - info = drm_fb_helper_alloc_fbi(fb_helper); 271 + info = drm_fb_helper_alloc_info(fb_helper); 274 272 if (IS_ERR(info)) { 275 273 ret = PTR_ERR(info); 276 274 goto err_drm_gem_object_put; ··· 385 383 { 386 384 struct drm_framebuffer *fb = fb_helper->fb; 387 385 388 - drm_fb_helper_unregister_fbi(fb_helper); 386 + drm_fb_helper_unregister_info(fb_helper); 389 387 390 388 drm_fb_helper_fini(fb_helper); 391 389 drm_framebuffer_unregister_private(fb);
+1 -1
drivers/gpu/drm/gud/gud_drv.c
··· 18 18 #include <drm/drm_damage_helper.h> 19 19 #include <drm/drm_debugfs.h> 20 20 #include <drm/drm_drv.h> 21 - #include <drm/drm_fb_helper.h> 21 + #include <drm/drm_fbdev_generic.h> 22 22 #include <drm/drm_fourcc.h> 23 23 #include <drm/drm_gem_atomic_helper.h> 24 24 #include <drm/drm_gem_framebuffer_helper.h>
+1
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
··· 17 17 #include <drm/drm_aperture.h> 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 + #include <drm/drm_fbdev_generic.h> 20 21 #include <drm/drm_gem_framebuffer_helper.h> 21 22 #include <drm/drm_gem_vram_helper.h> 22 23 #include <drm/drm_managed.h>
-1
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
··· 19 19 #include <linux/i2c.h> 20 20 21 21 #include <drm/drm_edid.h> 22 - #include <drm/drm_fb_helper.h> 23 22 #include <drm/drm_framebuffer.h> 24 23 25 24 struct hibmc_connector {
+2
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
··· 11 11 * Jianhua Li <lijianhua@huawei.com> 12 12 */ 13 13 14 + #include <linux/io.h> 15 + 14 16 #include <drm/drm_atomic_helper.h> 15 17 #include <drm/drm_probe_helper.h> 16 18 #include <drm/drm_print.h>
+1 -1
drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
··· 19 19 20 20 #include <drm/drm_atomic_helper.h> 21 21 #include <drm/drm_drv.h> 22 - #include <drm/drm_fb_helper.h> 22 + #include <drm/drm_fbdev_generic.h> 23 23 #include <drm/drm_gem_dma_helper.h> 24 24 #include <drm/drm_gem_framebuffer_helper.h> 25 25 #include <drm/drm_module.h>
+1 -1
drivers/gpu/drm/hyperv/hyperv_drm_drv.c
··· 11 11 #include <drm/drm_aperture.h> 12 12 #include <drm/drm_atomic_helper.h> 13 13 #include <drm/drm_drv.h> 14 - #include <drm/drm_fb_helper.h> 14 + #include <drm/drm_fbdev_generic.h> 15 15 #include <drm/drm_gem_shmem_helper.h> 16 16 #include <drm/drm_simple_kms_helper.h> 17 17
-1
drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
··· 8 8 #include <drm/drm_damage_helper.h> 9 9 #include <drm/drm_drv.h> 10 10 #include <drm/drm_edid.h> 11 - #include <drm/drm_fb_helper.h> 12 11 #include <drm/drm_format_helper.h> 13 12 #include <drm/drm_fourcc.h> 14 13 #include <drm/drm_framebuffer.h>
+5 -3
drivers/gpu/drm/i915/display/intel_fbdev.c
··· 124 124 .owner = THIS_MODULE, 125 125 DRM_FB_HELPER_DEFAULT_OPS, 126 126 .fb_set_par = intel_fbdev_set_par, 127 + .fb_read = drm_fb_helper_cfb_read, 128 + .fb_write = drm_fb_helper_cfb_write, 127 129 .fb_fillrect = drm_fb_helper_cfb_fillrect, 128 130 .fb_copyarea = drm_fb_helper_cfb_copyarea, 129 131 .fb_imageblit = drm_fb_helper_cfb_imageblit, ··· 256 254 goto out_unlock; 257 255 } 258 256 259 - info = drm_fb_helper_alloc_fbi(helper); 257 + info = drm_fb_helper_alloc_info(helper); 260 258 if (IS_ERR(info)) { 261 259 drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info); 262 260 ret = PTR_ERR(info); ··· 586 584 if (!current_is_async()) 587 585 intel_fbdev_sync(ifbdev); 588 586 589 - drm_fb_helper_unregister_fbi(&ifbdev->helper); 587 + drm_fb_helper_unregister_info(&ifbdev->helper); 590 588 } 591 589 592 590 void intel_fbdev_fini(struct drm_i915_private *dev_priv) ··· 629 627 if (!ifbdev || !ifbdev->vma) 630 628 goto set_suspend; 631 629 632 - info = ifbdev->helper.fbdev; 630 + info = ifbdev->helper.info; 633 631 634 632 if (synchronous) { 635 633 /* Flush any pending work to turn the console on, and then
+1 -2
drivers/gpu/drm/imx/dcss/dcss-kms.c
··· 7 7 #include <drm/drm_atomic_helper.h> 8 8 #include <drm/drm_bridge_connector.h> 9 9 #include <drm/drm_drv.h> 10 - #include <drm/drm_fb_helper.h> 10 + #include <drm/drm_fbdev_generic.h> 11 11 #include <drm/drm_gem_dma_helper.h> 12 12 #include <drm/drm_gem_framebuffer_helper.h> 13 13 #include <drm/drm_of.h> ··· 21 21 22 22 static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { 23 23 .fb_create = drm_gem_fb_create, 24 - .output_poll_changed = drm_fb_helper_output_poll_changed, 25 24 .atomic_check = drm_atomic_helper_check, 26 25 .atomic_commit = drm_atomic_helper_commit, 27 26 };
+1 -1
drivers/gpu/drm/imx/imx-drm-core.c
··· 16 16 #include <drm/drm_atomic.h> 17 17 #include <drm/drm_atomic_helper.h> 18 18 #include <drm/drm_drv.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_gem_dma_helper.h> 21 21 #include <drm/drm_gem_framebuffer_helper.h> 22 22 #include <drm/drm_managed.h>
+1 -1
drivers/gpu/drm/imx/imx-ldb.c
··· 7 7 8 8 #include <linux/clk.h> 9 9 #include <linux/component.h> 10 + #include <linux/i2c.h> 10 11 #include <linux/media-bus-format.h> 11 12 #include <linux/mfd/syscon.h> 12 13 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> ··· 24 23 #include <drm/drm_atomic_helper.h> 25 24 #include <drm/drm_bridge.h> 26 25 #include <drm/drm_edid.h> 27 - #include <drm/drm_fb_helper.h> 28 26 #include <drm/drm_managed.h> 29 27 #include <drm/drm_of.h> 30 28 #include <drm/drm_panel.h>
-1
drivers/gpu/drm/imx/imx-tve.c
··· 19 19 20 20 #include <drm/drm_atomic_helper.h> 21 21 #include <drm/drm_edid.h> 22 - #include <drm/drm_fb_helper.h> 23 22 #include <drm/drm_managed.h> 24 23 #include <drm/drm_probe_helper.h> 25 24 #include <drm/drm_simple_kms_helper.h>
+1 -1
drivers/gpu/drm/imx/parallel-display.c
··· 8 8 #include <linux/component.h> 9 9 #include <linux/media-bus-format.h> 10 10 #include <linux/module.h> 11 + #include <linux/of.h> 11 12 #include <linux/platform_device.h> 12 13 #include <linux/videodev2.h> 13 14 ··· 17 16 #include <drm/drm_atomic_helper.h> 18 17 #include <drm/drm_bridge.h> 19 18 #include <drm/drm_edid.h> 20 - #include <drm/drm_fb_helper.h> 21 19 #include <drm/drm_managed.h> 22 20 #include <drm/drm_of.h> 23 21 #include <drm/drm_panel.h>
+6 -3
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
··· 32 32 #include <drm/drm_encoder.h> 33 33 #include <drm/drm_gem_dma_helper.h> 34 34 #include <drm/drm_fb_dma_helper.h> 35 - #include <drm/drm_fb_helper.h> 35 + #include <drm/drm_fbdev_generic.h> 36 36 #include <drm/drm_fourcc.h> 37 37 #include <drm/drm_framebuffer.h> 38 38 #include <drm/drm_gem_atomic_helper.h> ··· 1018 1018 1019 1019 static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { 1020 1020 .fb_create = ingenic_drm_gem_fb_create, 1021 - .output_poll_changed = drm_fb_helper_output_poll_changed, 1022 1021 .atomic_check = drm_atomic_helper_check, 1023 1022 .atomic_commit = drm_atomic_helper_commit, 1024 1023 }; ··· 1628 1629 return err; 1629 1630 } 1630 1631 1631 - return platform_driver_register(&ingenic_drm_driver); 1632 + err = platform_driver_register(&ingenic_drm_driver); 1633 + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err) 1634 + platform_driver_unregister(ingenic_ipu_driver_ptr); 1635 + 1636 + return err; 1632 1637 } 1633 1638 module_init(ingenic_drm_init); 1634 1639
+1 -1
drivers/gpu/drm/kmb/kmb_drv.c
··· 15 15 16 16 #include <drm/drm_atomic_helper.h> 17 17 #include <drm/drm_drv.h> 18 - #include <drm/drm_fb_helper.h> 18 + #include <drm/drm_fbdev_generic.h> 19 19 #include <drm/drm_gem_dma_helper.h> 20 20 #include <drm/drm_gem_framebuffer_helper.h> 21 21 #include <drm/drm_module.h>
-1
drivers/gpu/drm/kmb/kmb_plane.c
··· 9 9 #include <drm/drm_crtc.h> 10 10 #include <drm/drm_crtc_helper.h> 11 11 #include <drm/drm_fb_dma_helper.h> 12 - #include <drm/drm_fb_helper.h> 13 12 #include <drm/drm_fourcc.h> 14 13 #include <drm/drm_framebuffer.h> 15 14 #include <drm/drm_gem_dma_helper.h>
+1 -1
drivers/gpu/drm/logicvc/logicvc_drm.c
··· 17 17 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fbdev_generic.h> 21 21 #include <drm/drm_gem_dma_helper.h> 22 22 #include <drm/drm_print.h> 23 23
-2
drivers/gpu/drm/logicvc/logicvc_mode.c
··· 10 10 #include <drm/drm_atomic_helper.h> 11 11 #include <drm/drm_crtc_helper.h> 12 12 #include <drm/drm_drv.h> 13 - #include <drm/drm_fb_helper.h> 14 13 #include <drm/drm_gem_dma_helper.h> 15 14 #include <drm/drm_gem_framebuffer_helper.h> 16 15 #include <drm/drm_mode_config.h> ··· 25 26 26 27 static const struct drm_mode_config_funcs logicvc_mode_config_funcs = { 27 28 .fb_create = drm_gem_fb_create, 28 - .output_poll_changed = drm_fb_helper_output_poll_changed, 29 29 .atomic_check = drm_atomic_helper_check, 30 30 .atomic_commit = drm_atomic_helper_commit, 31 31 };
+1 -2
drivers/gpu/drm/mcde/mcde_drv.c
··· 69 69 #include <drm/drm_bridge.h> 70 70 #include <drm/drm_drv.h> 71 71 #include <drm/drm_fb_dma_helper.h> 72 - #include <drm/drm_fb_helper.h> 72 + #include <drm/drm_fbdev_generic.h> 73 73 #include <drm/drm_gem.h> 74 74 #include <drm/drm_gem_dma_helper.h> 75 75 #include <drm/drm_gem_framebuffer_helper.h> ··· 203 203 static const struct drm_driver mcde_drm_driver = { 204 204 .driver_features = 205 205 DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, 206 - .lastclose = drm_fb_helper_lastclose, 207 206 .ioctls = NULL, 208 207 .fops = &drm_fops, 209 208 .name = "mcde",
+1 -1
drivers/gpu/drm/mediatek/mtk_drm_drv.c
··· 17 17 #include <drm/drm_atomic.h> 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fbdev_generic.h> 21 21 #include <drm/drm_fourcc.h> 22 22 #include <drm/drm_gem.h> 23 23 #include <drm/drm_gem_dma_helper.h>
+1 -1
drivers/gpu/drm/meson/meson_drv.c
··· 18 18 #include <drm/drm_aperture.h> 19 19 #include <drm/drm_atomic_helper.h> 20 20 #include <drm/drm_drv.h> 21 - #include <drm/drm_fb_helper.h> 21 + #include <drm/drm_fbdev_generic.h> 22 22 #include <drm/drm_gem_dma_helper.h> 23 23 #include <drm/drm_gem_framebuffer_helper.h> 24 24 #include <drm/drm_modeset_helper_vtables.h>
+4 -3
drivers/gpu/drm/meson/meson_encoder_cvbs.c
··· 116 116 return i; 117 117 } 118 118 119 - static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, 120 - const struct drm_display_info *display_info, 121 - const struct drm_display_mode *mode) 119 + static enum drm_mode_status 120 + meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, 121 + const struct drm_display_info *display_info, 122 + const struct drm_display_mode *mode) 122 123 { 123 124 if (meson_cvbs_get_mode(mode)) 124 125 return MODE_OK;
+1
drivers/gpu/drm/mgag200/mgag200_drv.c
··· 11 11 12 12 #include <drm/drm_aperture.h> 13 13 #include <drm/drm_drv.h> 14 + #include <drm/drm_fbdev_generic.h> 14 15 #include <drm/drm_file.h> 15 16 #include <drm/drm_ioctl.h> 16 17 #include <drm/drm_managed.h>
-1
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 18 18 #include <drm/drm_connector.h> 19 19 #include <drm/drm_crtc.h> 20 20 #include <drm/drm_encoder.h> 21 - #include <drm/drm_fb_helper.h> 22 21 #include <drm/drm_gem.h> 23 22 #include <drm/drm_gem_shmem_helper.h> 24 23 #include <drm/drm_plane.h>
+2 -2
drivers/gpu/drm/msm/msm_fbdev.c
··· 93 93 goto fail; 94 94 } 95 95 96 - fbi = drm_fb_helper_alloc_fbi(helper); 96 + fbi = drm_fb_helper_alloc_info(helper); 97 97 if (IS_ERR(fbi)) { 98 98 DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); 99 99 ret = PTR_ERR(fbi); ··· 182 182 183 183 DBG(); 184 184 185 - drm_fb_helper_unregister_fbi(helper); 185 + drm_fb_helper_unregister_info(helper); 186 186 187 187 drm_fb_helper_fini(helper); 188 188
+1 -1
drivers/gpu/drm/mxsfb/lcdif_drv.c
··· 16 16 #include <drm/drm_atomic_helper.h> 17 17 #include <drm/drm_bridge.h> 18 18 #include <drm/drm_drv.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_gem_dma_helper.h> 21 21 #include <drm/drm_gem_framebuffer_helper.h> 22 22 #include <drm/drm_mode_config.h>
+16
drivers/gpu/drm/mxsfb/lcdif_kms.c
··· 5 5 * This code is based on drivers/gpu/drm/mxsfb/mxsfb* 6 6 */ 7 7 8 + #include <linux/bitfield.h> 8 9 #include <linux/clk.h> 9 10 #include <linux/io.h> 10 11 #include <linux/iopoll.h> ··· 333 332 { 334 333 u32 reg; 335 334 335 + /* Set FIFO Panic watermarks, low 1/3, high 2/3 . */ 336 + writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) | 337 + FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3), 338 + lcdif->base + LCDC_V8_PANIC0_THRES); 339 + 340 + /* 341 + * Enable FIFO Panic, this does not generate interrupt, but 342 + * boosts NoC priority based on FIFO Panic watermarks. 343 + */ 344 + writel(INT_ENABLE_D1_PLANE_PANIC_EN, 345 + lcdif->base + LCDC_V8_INT_ENABLE_D1); 346 + 336 347 reg = readl(lcdif->base + LCDC_V8_DISP_PARA); 337 348 reg |= DISP_PARA_DISP_ON; 338 349 writel(reg, lcdif->base + LCDC_V8_DISP_PARA); ··· 372 359 reg = readl(lcdif->base + LCDC_V8_DISP_PARA); 373 360 reg &= ~DISP_PARA_DISP_ON; 374 361 writel(reg, lcdif->base + LCDC_V8_DISP_PARA); 362 + 363 + /* Disable FIFO Panic NoC priority booster. */ 364 + writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1); 375 365 } 376 366 377 367 static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
+1
drivers/gpu/drm/mxsfb/lcdif_regs.h
··· 255 255 256 256 #define PANIC0_THRES_LOW_MASK GENMASK(24, 16) 257 257 #define PANIC0_THRES_HIGH_MASK GENMASK(8, 0) 258 + #define PANIC0_THRES_MAX 511 258 259 259 260 #define LCDIF_MIN_XRES 120 260 261 #define LCDIF_MIN_YRES 120
+1 -1
drivers/gpu/drm/mxsfb/mxsfb_drv.c
··· 20 20 #include <drm/drm_bridge.h> 21 21 #include <drm/drm_connector.h> 22 22 #include <drm/drm_drv.h> 23 - #include <drm/drm_fb_helper.h> 23 + #include <drm/drm_fbdev_generic.h> 24 24 #include <drm/drm_fourcc.h> 25 25 #include <drm/drm_gem_dma_helper.h> 26 26 #include <drm/drm_gem_framebuffer_helper.h>
+1
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 34 34 #include <drm/drm_crtc_helper.h> 35 35 #include <drm/drm_drv.h> 36 36 #include <drm/drm_fb_helper.h> 37 + #include <drm/drm_fbdev_generic.h> 37 38 #include <drm/drm_gem_ttm_helper.h> 38 39 #include <drm/drm_ioctl.h> 39 40 #include <drm/drm_vblank.h>
+613
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 1 + /* 2 + * Copyright © 2007 David Airlie 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 + * DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * David Airlie 25 + */ 26 + 27 + #include <linux/module.h> 28 + #include <linux/kernel.h> 29 + #include <linux/errno.h> 30 + #include <linux/string.h> 31 + #include <linux/mm.h> 32 + #include <linux/tty.h> 33 + #include <linux/sysrq.h> 34 + #include <linux/delay.h> 35 + #include <linux/init.h> 36 + #include <linux/screen_info.h> 37 + #include <linux/vga_switcheroo.h> 38 + #include <linux/console.h> 39 + 40 + #include <drm/drm_crtc.h> 41 + #include <drm/drm_crtc_helper.h> 42 + #include <drm/drm_probe_helper.h> 43 + #include <drm/drm_fb_helper.h> 44 + #include <drm/drm_fourcc.h> 45 + #include <drm/drm_atomic.h> 46 + 47 + #include "nouveau_drv.h" 48 + #include "nouveau_gem.h" 49 + #include "nouveau_bo.h" 50 + #include "nouveau_fbcon.h" 51 + #include "nouveau_chan.h" 52 + #include "nouveau_vmm.h" 53 + 54 + #include "nouveau_crtc.h" 55 + 56 + MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); 57 + int nouveau_nofbaccel = 0; 58 + module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 59 + 60 + MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)"); 61 + static int nouveau_fbcon_bpp; 62 + module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400); 63 + 64 + static void 65 + nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) 66 + { 67 + struct nouveau_fbdev *fbcon = info->par; 68 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 69 + struct nvif_device *device = &drm->client.device; 70 + int ret; 71 + 72 + if (info->state != FBINFO_STATE_RUNNING) 73 + return; 74 + 75 + ret = -ENODEV; 76 + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 77 + mutex_trylock(&drm->client.mutex)) { 78 + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) 79 + ret = nv04_fbcon_fillrect(info, rect); 80 + else 81 + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) 82 + ret = nv50_fbcon_fillrect(info, rect); 83 + else 84 + ret = nvc0_fbcon_fillrect(info, rect); 85 + mutex_unlock(&drm->client.mutex); 86 + } 87 + 88 + if (ret == 0) 89 + return; 90 + 91 + if (ret != -ENODEV) 92 + nouveau_fbcon_gpu_lockup(info); 93 + drm_fb_helper_cfb_fillrect(info, rect); 94 + } 95 + 96 + static void 97 + nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) 98 + { 99 + struct nouveau_fbdev *fbcon = info->par; 100 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 101 + struct nvif_device *device = &drm->client.device; 102 + int ret; 103 + 104 + if (info->state != FBINFO_STATE_RUNNING) 105 + return; 106 + 107 + ret = -ENODEV; 108 + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 109 + mutex_trylock(&drm->client.mutex)) { 110 + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) 111 + ret = nv04_fbcon_copyarea(info, image); 112 + else 113 + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) 114 + ret = nv50_fbcon_copyarea(info, image); 115 + else 116 + ret = nvc0_fbcon_copyarea(info, image); 117 + mutex_unlock(&drm->client.mutex); 118 + } 119 + 120 + if (ret == 0) 121 + return; 122 + 123 + if (ret != -ENODEV) 124 + nouveau_fbcon_gpu_lockup(info); 125 + drm_fb_helper_cfb_copyarea(info, image); 126 + } 127 + 128 + static void 129 + nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) 130 + { 131 + struct nouveau_fbdev *fbcon = info->par; 132 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 133 + struct nvif_device *device = &drm->client.device; 134 + int ret; 135 + 136 + if (info->state != FBINFO_STATE_RUNNING) 137 + return; 138 + 139 + ret = -ENODEV; 140 + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && 141 + mutex_trylock(&drm->client.mutex)) { 142 + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) 143 + ret = nv04_fbcon_imageblit(info, image); 144 + else 145 + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) 146 + ret = nv50_fbcon_imageblit(info, image); 147 + else 148 + ret = nvc0_fbcon_imageblit(info, image); 149 + mutex_unlock(&drm->client.mutex); 150 + } 151 + 152 + if (ret == 0) 153 + return; 154 + 155 + if (ret != -ENODEV) 156 + nouveau_fbcon_gpu_lockup(info); 157 + drm_fb_helper_cfb_imageblit(info, image); 158 + } 159 + 160 + static int 161 + nouveau_fbcon_sync(struct fb_info *info) 162 + { 163 + struct nouveau_fbdev *fbcon = info->par; 164 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 165 + struct nouveau_channel *chan = drm->channel; 166 + int ret; 167 + 168 + if (!chan || !chan->accel_done || in_interrupt() || 169 + info->state != FBINFO_STATE_RUNNING || 170 + info->flags & FBINFO_HWACCEL_DISABLED) 171 + return 0; 172 + 173 + if (!mutex_trylock(&drm->client.mutex)) 174 + return 0; 175 + 176 + ret = nouveau_channel_idle(chan); 177 + mutex_unlock(&drm->client.mutex); 178 + if (ret) { 179 + nouveau_fbcon_gpu_lockup(info); 180 + return 0; 181 + } 182 + 183 + chan->accel_done = false; 184 + return 0; 185 + } 186 + 187 + static int 188 + nouveau_fbcon_open(struct fb_info *info, int user) 189 + { 190 + struct nouveau_fbdev *fbcon = info->par; 191 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 192 + int ret = pm_runtime_get_sync(drm->dev->dev); 193 + if (ret < 0 && ret != -EACCES) { 194 + pm_runtime_put(drm->dev->dev); 195 + return ret; 196 + } 197 + return 0; 198 + } 199 + 200 + static int 201 + nouveau_fbcon_release(struct fb_info *info, int user) 202 + { 203 + struct nouveau_fbdev *fbcon = info->par; 204 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 205 + pm_runtime_put(drm->dev->dev); 206 + return 0; 207 + } 208 + 209 + static const struct fb_ops nouveau_fbcon_ops = { 210 + .owner = THIS_MODULE, 211 + DRM_FB_HELPER_DEFAULT_OPS, 212 + .fb_open = nouveau_fbcon_open, 213 + .fb_release = nouveau_fbcon_release, 214 + .fb_fillrect = nouveau_fbcon_fillrect, 215 + .fb_copyarea = nouveau_fbcon_copyarea, 216 + .fb_imageblit = nouveau_fbcon_imageblit, 217 + .fb_sync = nouveau_fbcon_sync, 218 + }; 219 + 220 + static const struct fb_ops nouveau_fbcon_sw_ops = { 221 + .owner = THIS_MODULE, 222 + DRM_FB_HELPER_DEFAULT_OPS, 223 + .fb_open = nouveau_fbcon_open, 224 + .fb_release = nouveau_fbcon_release, 225 + .fb_fillrect = drm_fb_helper_cfb_fillrect, 226 + .fb_copyarea = drm_fb_helper_cfb_copyarea, 227 + .fb_imageblit = drm_fb_helper_cfb_imageblit, 228 + }; 229 + 230 + void 231 + nouveau_fbcon_accel_save_disable(struct drm_device *dev) 232 + { 233 + struct nouveau_drm *drm = nouveau_drm(dev); 234 + if (drm->fbcon && drm->fbcon->helper.info) { 235 + drm->fbcon->saved_flags = drm->fbcon->helper.info->flags; 236 + drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED; 237 + } 238 + } 239 + 240 + void 241 + nouveau_fbcon_accel_restore(struct drm_device *dev) 242 + { 243 + struct nouveau_drm *drm = nouveau_drm(dev); 244 + if (drm->fbcon && drm->fbcon->helper.info) 245 + drm->fbcon->helper.info->flags = drm->fbcon->saved_flags; 246 + } 247 + 248 + static void 249 + nouveau_fbcon_accel_fini(struct drm_device *dev) 250 + { 251 + struct nouveau_drm *drm = nouveau_drm(dev); 252 + struct nouveau_fbdev *fbcon = drm->fbcon; 253 + if (fbcon && drm->channel) { 254 + console_lock(); 255 + if (fbcon->helper.info) 256 + fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED; 257 + console_unlock(); 258 + nouveau_channel_idle(drm->channel); 259 + nvif_object_dtor(&fbcon->twod); 260 + nvif_object_dtor(&fbcon->blit); 261 + nvif_object_dtor(&fbcon->gdi); 262 + nvif_object_dtor(&fbcon->patt); 263 + nvif_object_dtor(&fbcon->rop); 264 + nvif_object_dtor(&fbcon->clip); 265 + nvif_object_dtor(&fbcon->surf2d); 266 + } 267 + } 268 + 269 + static void 270 + nouveau_fbcon_accel_init(struct drm_device *dev) 271 + { 272 + struct nouveau_drm *drm = nouveau_drm(dev); 273 + struct nouveau_fbdev *fbcon = drm->fbcon; 274 + struct fb_info *info = fbcon->helper.info; 275 + int ret; 276 + 277 + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) 278 + ret = nv04_fbcon_accel_init(info); 279 + else 280 + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) 281 + ret = nv50_fbcon_accel_init(info); 282 + else 283 + ret = nvc0_fbcon_accel_init(info); 284 + 285 + if (ret == 0) 286 + info->fbops = &nouveau_fbcon_ops; 287 + } 288 + 289 + static void 290 + nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon) 291 + { 292 + struct fb_info *info = fbcon->helper.info; 293 + struct fb_fillrect rect; 294 + 295 + /* Clear the entire fbcon. The drm will program every connector 296 + * with it's preferred mode. If the sizes differ, one display will 297 + * quite likely have garbage around the console. 298 + */ 299 + rect.dx = rect.dy = 0; 300 + rect.width = info->var.xres_virtual; 301 + rect.height = info->var.yres_virtual; 302 + rect.color = 0; 303 + rect.rop = ROP_COPY; 304 + info->fbops->fb_fillrect(info, &rect); 305 + } 306 + 307 + static int 308 + nouveau_fbcon_create(struct drm_fb_helper *helper, 309 + struct drm_fb_helper_surface_size *sizes) 310 + { 311 + struct nouveau_fbdev *fbcon = 312 + container_of(helper, struct nouveau_fbdev, helper); 313 + struct drm_device *dev = fbcon->helper.dev; 314 + struct nouveau_drm *drm = nouveau_drm(dev); 315 + struct nvif_device *device = &drm->client.device; 316 + struct fb_info *info; 317 + struct drm_framebuffer *fb; 318 + struct nouveau_channel *chan; 319 + struct nouveau_bo *nvbo; 320 + struct drm_mode_fb_cmd2 mode_cmd = {}; 321 + int ret; 322 + 323 + mode_cmd.width = sizes->surface_width; 324 + mode_cmd.height = sizes->surface_height; 325 + 326 + mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3); 327 + mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256); 328 + 329 + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 330 + sizes->surface_depth); 331 + 332 + ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] * 333 + mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM, 334 + 0, 0x0000, &nvbo); 335 + if (ret) { 336 + NV_ERROR(drm, "failed to allocate framebuffer\n"); 337 + goto out; 338 + } 339 + 340 + ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb); 341 + if (ret) 342 + goto out_unref; 343 + 344 + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); 345 + if (ret) { 346 + NV_ERROR(drm, "failed to pin fb: %d\n", ret); 347 + goto out_unref; 348 + } 349 + 350 + ret = nouveau_bo_map(nvbo); 351 + if (ret) { 352 + NV_ERROR(drm, "failed to map fb: %d\n", ret); 353 + goto out_unpin; 354 + } 355 + 356 + chan = nouveau_nofbaccel ? NULL : drm->channel; 357 + if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) { 358 + ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma); 359 + if (ret) { 360 + NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); 361 + chan = NULL; 362 + } 363 + } 364 + 365 + info = drm_fb_helper_alloc_info(helper); 366 + if (IS_ERR(info)) { 367 + ret = PTR_ERR(info); 368 + goto out_unlock; 369 + } 370 + 371 + /* setup helper */ 372 + fbcon->helper.fb = fb; 373 + 374 + if (!chan) 375 + info->flags = FBINFO_HWACCEL_DISABLED; 376 + else 377 + info->flags = FBINFO_HWACCEL_COPYAREA | 378 + FBINFO_HWACCEL_FILLRECT | 379 + FBINFO_HWACCEL_IMAGEBLIT; 380 + info->fbops = &nouveau_fbcon_sw_ops; 381 + info->fix.smem_start = nvbo->bo.resource->bus.offset; 382 + info->fix.smem_len = nvbo->bo.base.size; 383 + 384 + info->screen_base = nvbo_kmap_obj_iovirtual(nvbo); 385 + info->screen_size = nvbo->bo.base.size; 386 + 387 + drm_fb_helper_fill_info(info, &fbcon->helper, sizes); 388 + 389 + /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 390 + 391 + if (chan) 392 + nouveau_fbcon_accel_init(dev); 393 + nouveau_fbcon_zfill(dev, fbcon); 394 + 395 + /* To allow resizeing without swapping buffers */ 396 + NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", 397 + fb->width, fb->height, nvbo->offset, nvbo); 398 + 399 + if (dev_is_pci(dev->dev)) 400 + vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info); 401 + 402 + return 0; 403 + 404 + out_unlock: 405 + if (chan) 406 + nouveau_vma_del(&fbcon->vma); 407 + nouveau_bo_unmap(nvbo); 408 + out_unpin: 409 + nouveau_bo_unpin(nvbo); 410 + out_unref: 411 + nouveau_bo_ref(NULL, &nvbo); 412 + out: 413 + return ret; 414 + } 415 + 416 + static int 417 + nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) 418 + { 419 + struct drm_framebuffer *fb = fbcon->helper.fb; 420 + struct nouveau_bo *nvbo; 421 + 422 + drm_fb_helper_unregister_info(&fbcon->helper); 423 + drm_fb_helper_fini(&fbcon->helper); 424 + 425 + if (fb && fb->obj[0]) { 426 + nvbo = nouveau_gem_object(fb->obj[0]); 427 + nouveau_vma_del(&fbcon->vma); 428 + nouveau_bo_unmap(nvbo); 429 + nouveau_bo_unpin(nvbo); 430 + drm_framebuffer_put(fb); 431 + } 432 + 433 + return 0; 434 + } 435 + 436 + void nouveau_fbcon_gpu_lockup(struct fb_info *info) 437 + { 438 + struct nouveau_fbdev *fbcon = info->par; 439 + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); 440 + 441 + NV_ERROR(drm, "GPU lockup - switching to software fbcon\n"); 442 + info->flags |= FBINFO_HWACCEL_DISABLED; 443 + } 444 + 445 + static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { 446 + .fb_probe = nouveau_fbcon_create, 447 + }; 448 + 449 + static void 450 + nouveau_fbcon_set_suspend_work(struct work_struct *work) 451 + { 452 + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); 453 + int state = READ_ONCE(drm->fbcon_new_state); 454 + 455 + if (state == FBINFO_STATE_RUNNING) 456 + pm_runtime_get_sync(drm->dev->dev); 457 + 458 + console_lock(); 459 + if (state == FBINFO_STATE_RUNNING) 460 + nouveau_fbcon_accel_restore(drm->dev); 461 + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); 462 + if (state != FBINFO_STATE_RUNNING) 463 + nouveau_fbcon_accel_save_disable(drm->dev); 464 + console_unlock(); 465 + 466 + if (state == FBINFO_STATE_RUNNING) { 467 + nouveau_fbcon_hotplug_resume(drm->fbcon); 468 + pm_runtime_mark_last_busy(drm->dev->dev); 469 + pm_runtime_put_autosuspend(drm->dev->dev); 470 + } 471 + } 472 + 473 + void 474 + nouveau_fbcon_set_suspend(struct drm_device *dev, int state) 475 + { 476 + struct nouveau_drm *drm = nouveau_drm(dev); 477 + 478 + if (!drm->fbcon) 479 + return; 480 + 481 + drm->fbcon_new_state = state; 482 + /* Since runtime resume can happen as a result of a sysfs operation, 483 + * it's possible we already have the console locked. So handle fbcon 484 + * init/deinit from a seperate work thread 485 + */ 486 + schedule_work(&drm->fbcon_work); 487 + } 488 + 489 + void 490 + nouveau_fbcon_output_poll_changed(struct drm_device *dev) 491 + { 492 + struct nouveau_drm *drm = nouveau_drm(dev); 493 + struct nouveau_fbdev *fbcon = drm->fbcon; 494 + int ret; 495 + 496 + if (!fbcon) 497 + return; 498 + 499 + mutex_lock(&fbcon->hotplug_lock); 500 + 501 + ret = pm_runtime_get(dev->dev); 502 + if (ret == 1 || ret == -EACCES) { 503 + drm_fb_helper_hotplug_event(&fbcon->helper); 504 + 505 + pm_runtime_mark_last_busy(dev->dev); 506 + pm_runtime_put_autosuspend(dev->dev); 507 + } else if (ret == 0) { 508 + /* If the GPU was already in the process of suspending before 509 + * this event happened, then we can't block here as we'll 510 + * deadlock the runtime pmops since they wait for us to 511 + * finish. So, just defer this event for when we runtime 512 + * resume again. It will be handled by fbcon_work. 513 + */ 514 + NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n"); 515 + fbcon->hotplug_waiting = true; 516 + pm_runtime_put_noidle(drm->dev->dev); 517 + } else { 518 + DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n", 519 + ret); 520 + } 521 + 522 + mutex_unlock(&fbcon->hotplug_lock); 523 + } 524 + 525 + void 526 + nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon) 527 + { 528 + struct nouveau_drm *drm; 529 + 530 + if (!fbcon) 531 + return; 532 + drm = nouveau_drm(fbcon->helper.dev); 533 + 534 + mutex_lock(&fbcon->hotplug_lock); 535 + if (fbcon->hotplug_waiting) { 536 + fbcon->hotplug_waiting = false; 537 + 538 + NV_DEBUG(drm, "Handling deferred fbcon HPD events\n"); 539 + drm_fb_helper_hotplug_event(&fbcon->helper); 540 + } 541 + mutex_unlock(&fbcon->hotplug_lock); 542 + } 543 + 544 + int 545 + nouveau_fbcon_init(struct drm_device *dev) 546 + { 547 + struct nouveau_drm *drm = nouveau_drm(dev); 548 + struct nouveau_fbdev *fbcon; 549 + int preferred_bpp = nouveau_fbcon_bpp; 550 + int ret; 551 + 552 + if (!dev->mode_config.num_crtc || 553 + (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA) 554 + return 0; 555 + 556 + fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 557 + if (!fbcon) 558 + return -ENOMEM; 559 + 560 + drm->fbcon = fbcon; 561 + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); 562 + mutex_init(&fbcon->hotplug_lock); 563 + 564 + drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); 565 + 566 + ret = drm_fb_helper_init(dev, &fbcon->helper); 567 + if (ret) 568 + goto free; 569 + 570 + if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) { 571 + if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) 572 + preferred_bpp = 8; 573 + else 574 + if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) 575 + preferred_bpp = 16; 576 + else 577 + preferred_bpp = 32; 578 + } 579 + 580 + /* disable all the possible outputs/crtcs before entering KMS mode */ 581 + if (!drm_drv_uses_atomic_modeset(dev)) 582 + drm_helper_disable_unused_functions(dev); 583 + 584 + ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); 585 + if (ret) 586 + goto fini; 587 + 588 + if (fbcon->helper.info) 589 + fbcon->helper.info->pixmap.buf_align = 4; 590 + return 0; 591 + 592 + fini: 593 + drm_fb_helper_fini(&fbcon->helper); 594 + free: 595 + kfree(fbcon); 596 + drm->fbcon = NULL; 597 + return ret; 598 + } 599 + 600 + void 601 + nouveau_fbcon_fini(struct drm_device *dev) 602 + { 603 + struct nouveau_drm *drm = nouveau_drm(dev); 604 + 605 + if (!drm->fbcon) 606 + return; 607 + 608 + drm_kms_helper_poll_fini(dev); 609 + nouveau_fbcon_accel_fini(dev); 610 + nouveau_fbcon_destroy(dev, drm->fbcon); 611 + kfree(drm->fbcon); 612 + drm->fbcon = NULL; 613 + }
+3 -3
drivers/gpu/drm/omapdrm/omap_fbdev.c
··· 38 38 static void pan_worker(struct work_struct *work) 39 39 { 40 40 struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); 41 - struct fb_info *fbi = fbdev->base.fbdev; 41 + struct fb_info *fbi = fbdev->base.info; 42 42 int npages; 43 43 44 44 /* DMM roll shifts in 4K pages: */ ··· 161 161 goto fail; 162 162 } 163 163 164 - fbi = drm_fb_helper_alloc_fbi(helper); 164 + fbi = drm_fb_helper_alloc_info(helper); 165 165 if (IS_ERR(fbi)) { 166 166 dev_err(dev->dev, "failed to allocate fb info\n"); 167 167 ret = PTR_ERR(fbi); ··· 272 272 if (!helper) 273 273 return; 274 274 275 - drm_fb_helper_unregister_fbi(helper); 275 + drm_fb_helper_unregister_info(helper); 276 276 277 277 drm_fb_helper_fini(helper); 278 278
+10
drivers/gpu/drm/panel/Kconfig
··· 203 203 24 bit RGB per pixel. It provides a MIPI DSI interface to 204 204 the host and has a built-in LED backlight. 205 205 206 + config DRM_PANEL_JADARD_JD9365DA_H3 207 + tristate "Jadard JD9365DA-H3 WXGA DSI panel" 208 + depends on OF 209 + depends on DRM_MIPI_DSI 210 + depends on BACKLIGHT_CLASS_DEVICE 211 + help 212 + Say Y here if you want to enable support for Jadard JD9365DA-H3 213 + WXGA MIPI DSI panel. The panel support TFT dot matrix LCD with 214 + 800RGBx1280 dots at maximum. 215 + 206 216 config DRM_PANEL_JDI_LT070ME05000 207 217 tristate "JDI LT070ME05000 WUXGA DSI panel" 208 218 depends on OF
+1
drivers/gpu/drm/panel/Makefile
··· 18 18 obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o 19 19 obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o 20 20 obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o 21 + obj-$(CONFIG_DRM_PANEL_JADARD_JD9365DA_H3) += panel-jadard-jd9365da-h3.o 21 22 obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o 22 23 obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o 23 24 obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
+2 -1
drivers/gpu/drm/panel/panel-ilitek-ili9341.c
··· 18 18 * Copyright 2018 David Lechner <david@lechnology.com> 19 19 */ 20 20 21 + #include <linux/backlight.h> 21 22 #include <linux/bitops.h> 22 23 #include <linux/delay.h> 23 24 #include <linux/gpio/consumer.h> ··· 31 30 32 31 #include <drm/drm_atomic_helper.h> 33 32 #include <drm/drm_drv.h> 34 - #include <drm/drm_fb_helper.h> 33 + #include <drm/drm_fbdev_generic.h> 35 34 #include <drm/drm_gem_atomic_helper.h> 36 35 #include <drm/drm_gem_dma_helper.h> 37 36 #include <drm/drm_gem_framebuffer_helper.h>
+473
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (c) 2019 Radxa Limited 4 + * Copyright (c) 2022 Edgeble AI Technologies Pvt. Ltd. 5 + * 6 + * Author: 7 + * - Jagan Teki <jagan@amarulasolutions.com> 8 + * - Stephen Chen <stephen@radxa.com> 9 + */ 10 + 11 + #include <drm/drm_mipi_dsi.h> 12 + #include <drm/drm_modes.h> 13 + #include <drm/drm_panel.h> 14 + #include <drm/drm_print.h> 15 + 16 + #include <linux/gpio/consumer.h> 17 + #include <linux/delay.h> 18 + #include <linux/module.h> 19 + #include <linux/of_device.h> 20 + #include <linux/regulator/consumer.h> 21 + 22 + #define JD9365DA_INIT_CMD_LEN 2 23 + 24 + struct jadard_init_cmd { 25 + u8 data[JD9365DA_INIT_CMD_LEN]; 26 + }; 27 + 28 + struct jadard_panel_desc { 29 + const struct drm_display_mode mode; 30 + unsigned int lanes; 31 + enum mipi_dsi_pixel_format format; 32 + const struct jadard_init_cmd *init_cmds; 33 + u32 num_init_cmds; 34 + }; 35 + 36 + struct jadard { 37 + struct drm_panel panel; 38 + struct mipi_dsi_device *dsi; 39 + const struct jadard_panel_desc *desc; 40 + 41 + struct regulator *vdd; 42 + struct regulator *vccio; 43 + struct gpio_desc *reset; 44 + }; 45 + 46 + static inline struct jadard *panel_to_jadard(struct drm_panel *panel) 47 + { 48 + return container_of(panel, struct jadard, panel); 49 + } 50 + 51 + static int jadard_enable(struct drm_panel *panel) 52 + { 53 + struct device *dev = panel->dev; 54 + struct jadard *jadard = panel_to_jadard(panel); 55 + const struct jadard_panel_desc *desc = jadard->desc; 56 + struct mipi_dsi_device *dsi = jadard->dsi; 57 + unsigned int i; 58 + int err; 59 + 60 + msleep(10); 61 + 62 + for (i = 0; i < desc->num_init_cmds; i++) { 63 + const struct jadard_init_cmd *cmd = &desc->init_cmds[i]; 64 + 65 + err = mipi_dsi_dcs_write_buffer(dsi, cmd->data, JD9365DA_INIT_CMD_LEN); 66 + if (err < 0) 67 + return err; 68 + } 69 + 70 + msleep(120); 71 + 72 + err = mipi_dsi_dcs_exit_sleep_mode(dsi); 73 + if (err < 0) 74 + DRM_DEV_ERROR(dev, "failed to exit sleep mode ret = %d\n", err); 75 + 76 + err = mipi_dsi_dcs_set_display_on(dsi); 77 + if (err < 0) 78 + DRM_DEV_ERROR(dev, "failed to set display on ret = %d\n", err); 79 + 80 + return 0; 81 + } 82 + 83 + static int jadard_disable(struct drm_panel *panel) 84 + { 85 + struct device *dev = panel->dev; 86 + struct jadard *jadard = panel_to_jadard(panel); 87 + int ret; 88 + 89 + ret = mipi_dsi_dcs_set_display_off(jadard->dsi); 90 + if (ret < 0) 91 + DRM_DEV_ERROR(dev, "failed to set display off: %d\n", ret); 92 + 93 + ret = mipi_dsi_dcs_enter_sleep_mode(jadard->dsi); 94 + if (ret < 0) 95 + DRM_DEV_ERROR(dev, "failed to enter sleep mode: %d\n", ret); 96 + 97 + return 0; 98 + } 99 + 100 + static int jadard_prepare(struct drm_panel *panel) 101 + { 102 + struct jadard *jadard = panel_to_jadard(panel); 103 + int ret; 104 + 105 + ret = regulator_enable(jadard->vccio); 106 + if (ret) 107 + return ret; 108 + 109 + ret = regulator_enable(jadard->vdd); 110 + if (ret) 111 + return ret; 112 + 113 + gpiod_set_value(jadard->reset, 1); 114 + msleep(5); 115 + 116 + gpiod_set_value(jadard->reset, 0); 117 + msleep(10); 118 + 119 + gpiod_set_value(jadard->reset, 1); 120 + msleep(120); 121 + 122 + return 0; 123 + } 124 + 125 + static int jadard_unprepare(struct drm_panel *panel) 126 + { 127 + struct jadard *jadard = panel_to_jadard(panel); 128 + 129 + gpiod_set_value(jadard->reset, 1); 130 + msleep(120); 131 + 132 + regulator_disable(jadard->vdd); 133 + regulator_disable(jadard->vccio); 134 + 135 + return 0; 136 + } 137 + 138 + static int jadard_get_modes(struct drm_panel *panel, 139 + struct drm_connector *connector) 140 + { 141 + struct jadard *jadard = panel_to_jadard(panel); 142 + const struct drm_display_mode *desc_mode = &jadard->desc->mode; 143 + struct drm_display_mode *mode; 144 + 145 + mode = drm_mode_duplicate(connector->dev, desc_mode); 146 + if (!mode) { 147 + DRM_DEV_ERROR(&jadard->dsi->dev, "failed to add mode %ux%ux@%u\n", 148 + desc_mode->hdisplay, desc_mode->vdisplay, 149 + drm_mode_vrefresh(desc_mode)); 150 + return -ENOMEM; 151 + } 152 + 153 + drm_mode_set_name(mode); 154 + drm_mode_probed_add(connector, mode); 155 + 156 + connector->display_info.width_mm = mode->width_mm; 157 + connector->display_info.height_mm = mode->height_mm; 158 + 159 + return 1; 160 + } 161 + 162 + static const struct drm_panel_funcs jadard_funcs = { 163 + .disable = jadard_disable, 164 + .unprepare = jadard_unprepare, 165 + .prepare = jadard_prepare, 166 + .enable = jadard_enable, 167 + .get_modes = jadard_get_modes, 168 + }; 169 + 170 + static const struct jadard_init_cmd cz101b4001_init_cmds[] = { 171 + { .data = { 0xE0, 0x00 } }, 172 + { .data = { 0xE1, 0x93 } }, 173 + { .data = { 0xE2, 0x65 } }, 174 + { .data = { 0xE3, 0xF8 } }, 175 + { .data = { 0x80, 0x03 } }, 176 + { .data = { 0xE0, 0x01 } }, 177 + { .data = { 0x00, 0x00 } }, 178 + { .data = { 0x01, 0x3B } }, 179 + { .data = { 0x0C, 0x74 } }, 180 + { .data = { 0x17, 0x00 } }, 181 + { .data = { 0x18, 0xAF } }, 182 + { .data = { 0x19, 0x00 } }, 183 + { .data = { 0x1A, 0x00 } }, 184 + { .data = { 0x1B, 0xAF } }, 185 + { .data = { 0x1C, 0x00 } }, 186 + { .data = { 0x35, 0x26 } }, 187 + { .data = { 0x37, 0x09 } }, 188 + { .data = { 0x38, 0x04 } }, 189 + { .data = { 0x39, 0x00 } }, 190 + { .data = { 0x3A, 0x01 } }, 191 + { .data = { 0x3C, 0x78 } }, 192 + { .data = { 0x3D, 0xFF } }, 193 + { .data = { 0x3E, 0xFF } }, 194 + { .data = { 0x3F, 0x7F } }, 195 + { .data = { 0x40, 0x06 } }, 196 + { .data = { 0x41, 0xA0 } }, 197 + { .data = { 0x42, 0x81 } }, 198 + { .data = { 0x43, 0x14 } }, 199 + { .data = { 0x44, 0x23 } }, 200 + { .data = { 0x45, 0x28 } }, 201 + { .data = { 0x55, 0x02 } }, 202 + { .data = { 0x57, 0x69 } }, 203 + { .data = { 0x59, 0x0A } }, 204 + { .data = { 0x5A, 0x2A } }, 205 + { .data = { 0x5B, 0x17 } }, 206 + { .data = { 0x5D, 0x7F } }, 207 + { .data = { 0x5E, 0x6B } }, 208 + { .data = { 0x5F, 0x5C } }, 209 + { .data = { 0x60, 0x4F } }, 210 + { .data = { 0x61, 0x4D } }, 211 + { .data = { 0x62, 0x3F } }, 212 + { .data = { 0x63, 0x42 } }, 213 + { .data = { 0x64, 0x2B } }, 214 + { .data = { 0x65, 0x44 } }, 215 + { .data = { 0x66, 0x43 } }, 216 + { .data = { 0x67, 0x43 } }, 217 + { .data = { 0x68, 0x63 } }, 218 + { .data = { 0x69, 0x52 } }, 219 + { .data = { 0x6A, 0x5A } }, 220 + { .data = { 0x6B, 0x4F } }, 221 + { .data = { 0x6C, 0x4E } }, 222 + { .data = { 0x6D, 0x20 } }, 223 + { .data = { 0x6E, 0x0F } }, 224 + { .data = { 0x6F, 0x00 } }, 225 + { .data = { 0x70, 0x7F } }, 226 + { .data = { 0x71, 0x6B } }, 227 + { .data = { 0x72, 0x5C } }, 228 + { .data = { 0x73, 0x4F } }, 229 + { .data = { 0x74, 0x4D } }, 230 + { .data = { 0x75, 0x3F } }, 231 + { .data = { 0x76, 0x42 } }, 232 + { .data = { 0x77, 0x2B } }, 233 + { .data = { 0x78, 0x44 } }, 234 + { .data = { 0x79, 0x43 } }, 235 + { .data = { 0x7A, 0x43 } }, 236 + { .data = { 0x7B, 0x63 } }, 237 + { .data = { 0x7C, 0x52 } }, 238 + { .data = { 0x7D, 0x5A } }, 239 + { .data = { 0x7E, 0x4F } }, 240 + { .data = { 0x7F, 0x4E } }, 241 + { .data = { 0x80, 0x20 } }, 242 + { .data = { 0x81, 0x0F } }, 243 + { .data = { 0x82, 0x00 } }, 244 + { .data = { 0xE0, 0x02 } }, 245 + { .data = { 0x00, 0x02 } }, 246 + { .data = { 0x01, 0x02 } }, 247 + { .data = { 0x02, 0x00 } }, 248 + { .data = { 0x03, 0x00 } }, 249 + { .data = { 0x04, 0x1E } }, 250 + { .data = { 0x05, 0x1E } }, 251 + { .data = { 0x06, 0x1F } }, 252 + { .data = { 0x07, 0x1F } }, 253 + { .data = { 0x08, 0x1F } }, 254 + { .data = { 0x09, 0x17 } }, 255 + { .data = { 0x0A, 0x17 } }, 256 + { .data = { 0x0B, 0x37 } }, 257 + { .data = { 0x0C, 0x37 } }, 258 + { .data = { 0x0D, 0x47 } }, 259 + { .data = { 0x0E, 0x47 } }, 260 + { .data = { 0x0F, 0x45 } }, 261 + { .data = { 0x10, 0x45 } }, 262 + { .data = { 0x11, 0x4B } }, 263 + { .data = { 0x12, 0x4B } }, 264 + { .data = { 0x13, 0x49 } }, 265 + { .data = { 0x14, 0x49 } }, 266 + { .data = { 0x15, 0x1F } }, 267 + { .data = { 0x16, 0x01 } }, 268 + { .data = { 0x17, 0x01 } }, 269 + { .data = { 0x18, 0x00 } }, 270 + { .data = { 0x19, 0x00 } }, 271 + { .data = { 0x1A, 0x1E } }, 272 + { .data = { 0x1B, 0x1E } }, 273 + { .data = { 0x1C, 0x1F } }, 274 + { .data = { 0x1D, 0x1F } }, 275 + { .data = { 0x1E, 0x1F } }, 276 + { .data = { 0x1F, 0x17 } }, 277 + { .data = { 0x20, 0x17 } }, 278 + { .data = { 0x21, 0x37 } }, 279 + { .data = { 0x22, 0x37 } }, 280 + { .data = { 0x23, 0x46 } }, 281 + { .data = { 0x24, 0x46 } }, 282 + { .data = { 0x25, 0x44 } }, 283 + { .data = { 0x26, 0x44 } }, 284 + { .data = { 0x27, 0x4A } }, 285 + { .data = { 0x28, 0x4A } }, 286 + { .data = { 0x29, 0x48 } }, 287 + { .data = { 0x2A, 0x48 } }, 288 + { .data = { 0x2B, 0x1F } }, 289 + { .data = { 0x2C, 0x01 } }, 290 + { .data = { 0x2D, 0x01 } }, 291 + { .data = { 0x2E, 0x00 } }, 292 + { .data = { 0x2F, 0x00 } }, 293 + { .data = { 0x30, 0x1F } }, 294 + { .data = { 0x31, 0x1F } }, 295 + { .data = { 0x32, 0x1E } }, 296 + { .data = { 0x33, 0x1E } }, 297 + { .data = { 0x34, 0x1F } }, 298 + { .data = { 0x35, 0x17 } }, 299 + { .data = { 0x36, 0x17 } }, 300 + { .data = { 0x37, 0x37 } }, 301 + { .data = { 0x38, 0x37 } }, 302 + { .data = { 0x39, 0x08 } }, 303 + { .data = { 0x3A, 0x08 } }, 304 + { .data = { 0x3B, 0x0A } }, 305 + { .data = { 0x3C, 0x0A } }, 306 + { .data = { 0x3D, 0x04 } }, 307 + { .data = { 0x3E, 0x04 } }, 308 + { .data = { 0x3F, 0x06 } }, 309 + { .data = { 0x40, 0x06 } }, 310 + { .data = { 0x41, 0x1F } }, 311 + { .data = { 0x42, 0x02 } }, 312 + { .data = { 0x43, 0x02 } }, 313 + { .data = { 0x44, 0x00 } }, 314 + { .data = { 0x45, 0x00 } }, 315 + { .data = { 0x46, 0x1F } }, 316 + { .data = { 0x47, 0x1F } }, 317 + { .data = { 0x48, 0x1E } }, 318 + { .data = { 0x49, 0x1E } }, 319 + { .data = { 0x4A, 0x1F } }, 320 + { .data = { 0x4B, 0x17 } }, 321 + { .data = { 0x4C, 0x17 } }, 322 + { .data = { 0x4D, 0x37 } }, 323 + { .data = { 0x4E, 0x37 } }, 324 + { .data = { 0x4F, 0x09 } }, 325 + { .data = { 0x50, 0x09 } }, 326 + { .data = { 0x51, 0x0B } }, 327 + { .data = { 0x52, 0x0B } }, 328 + { .data = { 0x53, 0x05 } }, 329 + { .data = { 0x54, 0x05 } }, 330 + { .data = { 0x55, 0x07 } }, 331 + { .data = { 0x56, 0x07 } }, 332 + { .data = { 0x57, 0x1F } }, 333 + { .data = { 0x58, 0x40 } }, 334 + { .data = { 0x5B, 0x30 } }, 335 + { .data = { 0x5C, 0x16 } }, 336 + { .data = { 0x5D, 0x34 } }, 337 + { .data = { 0x5E, 0x05 } }, 338 + { .data = { 0x5F, 0x02 } }, 339 + { .data = { 0x63, 0x00 } }, 340 + { .data = { 0x64, 0x6A } }, 341 + { .data = { 0x67, 0x73 } }, 342 + { .data = { 0x68, 0x1D } }, 343 + { .data = { 0x69, 0x08 } }, 344 + { .data = { 0x6A, 0x6A } }, 345 + { .data = { 0x6B, 0x08 } }, 346 + { .data = { 0x6C, 0x00 } }, 347 + { .data = { 0x6D, 0x00 } }, 348 + { .data = { 0x6E, 0x00 } }, 349 + { .data = { 0x6F, 0x88 } }, 350 + { .data = { 0x75, 0xFF } }, 351 + { .data = { 0x77, 0xDD } }, 352 + { .data = { 0x78, 0x3F } }, 353 + { .data = { 0x79, 0x15 } }, 354 + { .data = { 0x7A, 0x17 } }, 355 + { .data = { 0x7D, 0x14 } }, 356 + { .data = { 0x7E, 0x82 } }, 357 + { .data = { 0xE0, 0x04 } }, 358 + { .data = { 0x00, 0x0E } }, 359 + { .data = { 0x02, 0xB3 } }, 360 + { .data = { 0x09, 0x61 } }, 361 + { .data = { 0x0E, 0x48 } }, 362 + { .data = { 0xE0, 0x00 } }, 363 + { .data = { 0xE6, 0x02 } }, 364 + { .data = { 0xE7, 0x0C } }, 365 + }; 366 + 367 + static const struct jadard_panel_desc cz101b4001_desc = { 368 + .mode = { 369 + .clock = 70000, 370 + 371 + .hdisplay = 800, 372 + .hsync_start = 800 + 40, 373 + .hsync_end = 800 + 40 + 18, 374 + .htotal = 800 + 40 + 18 + 20, 375 + 376 + .vdisplay = 1280, 377 + .vsync_start = 1280 + 20, 378 + .vsync_end = 1280 + 20 + 4, 379 + .vtotal = 1280 + 20 + 4 + 20, 380 + 381 + .width_mm = 62, 382 + .height_mm = 110, 383 + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 384 + }, 385 + .lanes = 4, 386 + .format = MIPI_DSI_FMT_RGB888, 387 + .init_cmds = cz101b4001_init_cmds, 388 + .num_init_cmds = ARRAY_SIZE(cz101b4001_init_cmds), 389 + }; 390 + 391 + static int jadard_dsi_probe(struct mipi_dsi_device *dsi) 392 + { 393 + struct device *dev = &dsi->dev; 394 + const struct jadard_panel_desc *desc; 395 + struct jadard *jadard; 396 + int ret; 397 + 398 + jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL); 399 + if (!jadard) 400 + return -ENOMEM; 401 + 402 + desc = of_device_get_match_data(dev); 403 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | 404 + MIPI_DSI_MODE_NO_EOT_PACKET; 405 + dsi->format = desc->format; 406 + dsi->lanes = desc->lanes; 407 + 408 + jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); 409 + if (IS_ERR(jadard->reset)) { 410 + DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); 411 + return PTR_ERR(jadard->reset); 412 + } 413 + 414 + jadard->vdd = devm_regulator_get(dev, "vdd"); 415 + if (IS_ERR(jadard->vdd)) { 416 + DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n"); 417 + return PTR_ERR(jadard->vdd); 418 + } 419 + 420 + jadard->vccio = devm_regulator_get(dev, "vccio"); 421 + if (IS_ERR(jadard->vccio)) { 422 + DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n"); 423 + return PTR_ERR(jadard->vccio); 424 + } 425 + 426 + drm_panel_init(&jadard->panel, dev, &jadard_funcs, 427 + DRM_MODE_CONNECTOR_DSI); 428 + 429 + ret = drm_panel_of_backlight(&jadard->panel); 430 + if (ret) 431 + return ret; 432 + 433 + drm_panel_add(&jadard->panel); 434 + 435 + mipi_dsi_set_drvdata(dsi, jadard); 436 + jadard->dsi = dsi; 437 + jadard->desc = desc; 438 + 439 + ret = mipi_dsi_attach(dsi); 440 + if (ret < 0) 441 + drm_panel_remove(&jadard->panel); 442 + 443 + return ret; 444 + } 445 + 446 + static void jadard_dsi_remove(struct mipi_dsi_device *dsi) 447 + { 448 + struct jadard *jadard = mipi_dsi_get_drvdata(dsi); 449 + 450 + mipi_dsi_detach(dsi); 451 + drm_panel_remove(&jadard->panel); 452 + } 453 + 454 + static const struct of_device_id jadard_of_match[] = { 455 + { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc }, 456 + { /* sentinel */ } 457 + }; 458 + MODULE_DEVICE_TABLE(of, jadard_of_match); 459 + 460 + static struct mipi_dsi_driver jadard_driver = { 461 + .probe = jadard_dsi_probe, 462 + .remove = jadard_dsi_remove, 463 + .driver = { 464 + .name = "jadard-jd9365da", 465 + .of_match_table = jadard_of_match, 466 + }, 467 + }; 468 + module_mipi_dsi_driver(jadard_driver); 469 + 470 + MODULE_AUTHOR("Jagan Teki <jagan@edgeble.ai>"); 471 + MODULE_AUTHOR("Stephen Chen <stephen@radxa.com>"); 472 + MODULE_DESCRIPTION("Jadard JD9365DA-H3 WXGA DSI panel"); 473 + MODULE_LICENSE("GPL");
+1 -1
drivers/gpu/drm/pl111/pl111_drv.c
··· 48 48 #include <drm/drm_atomic_helper.h> 49 49 #include <drm/drm_bridge.h> 50 50 #include <drm/drm_drv.h> 51 - #include <drm/drm_fb_helper.h> 51 + #include <drm/drm_fbdev_generic.h> 52 52 #include <drm/drm_fourcc.h> 53 53 #include <drm/drm_gem_dma_helper.h> 54 54 #include <drm/drm_gem_framebuffer_helper.h>
+1
drivers/gpu/drm/qxl/qxl_drv.c
··· 37 37 #include <drm/drm_aperture.h> 38 38 #include <drm/drm_atomic_helper.h> 39 39 #include <drm/drm_drv.h> 40 + #include <drm/drm_fbdev_generic.h> 40 41 #include <drm/drm_file.h> 41 42 #include <drm/drm_gem_ttm_helper.h> 42 43 #include <drm/drm_module.h>
-1
drivers/gpu/drm/qxl/qxl_drv.h
··· 38 38 39 39 #include <drm/drm_crtc.h> 40 40 #include <drm/drm_encoder.h> 41 - #include <drm/drm_fb_helper.h> 42 41 #include <drm/drm_gem_ttm_helper.h> 43 42 #include <drm/drm_ioctl.h> 44 43 #include <drm/drm_gem.h>
+4 -2
drivers/gpu/drm/radeon/radeon_fb.c
··· 80 80 DRM_FB_HELPER_DEFAULT_OPS, 81 81 .fb_open = radeonfb_open, 82 82 .fb_release = radeonfb_release, 83 + .fb_read = drm_fb_helper_cfb_read, 84 + .fb_write = drm_fb_helper_cfb_write, 83 85 .fb_fillrect = drm_fb_helper_cfb_fillrect, 84 86 .fb_copyarea = drm_fb_helper_cfb_copyarea, 85 87 .fb_imageblit = drm_fb_helper_cfb_imageblit, ··· 245 243 rbo = gem_to_radeon_bo(gobj); 246 244 247 245 /* okay we have an object now allocate the framebuffer */ 248 - info = drm_fb_helper_alloc_fbi(helper); 246 + info = drm_fb_helper_alloc_info(helper); 249 247 if (IS_ERR(info)) { 250 248 ret = PTR_ERR(info); 251 249 goto out; ··· 311 309 { 312 310 struct drm_framebuffer *fb = &rfbdev->fb; 313 311 314 - drm_fb_helper_unregister_fbi(&rfbdev->helper); 312 + drm_fb_helper_unregister_info(&rfbdev->helper); 315 313 316 314 if (fb->obj[0]) { 317 315 radeonfb_destroy_pinned_object(fb->obj[0]);
+1 -1
drivers/gpu/drm/rcar-du/rcar_du_drv.c
··· 20 20 21 21 #include <drm/drm_atomic_helper.h> 22 22 #include <drm/drm_drv.h> 23 - #include <drm/drm_fb_helper.h> 23 + #include <drm/drm_fbdev_generic.h> 24 24 #include <drm/drm_gem_dma_helper.h> 25 25 #include <drm/drm_managed.h> 26 26 #include <drm/drm_probe_helper.h>
+1 -1
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
··· 17 17 18 18 #include <drm/drm_aperture.h> 19 19 #include <drm/drm_drv.h> 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fbdev_generic.h> 21 21 #include <drm/drm_gem_dma_helper.h> 22 22 #include <drm/drm_of.h> 23 23 #include <drm/drm_probe_helper.h>
+1 -1
drivers/gpu/drm/rockchip/rockchip_drm_drv.h
··· 9 9 #ifndef _ROCKCHIP_DRM_DRV_H 10 10 #define _ROCKCHIP_DRM_DRV_H 11 11 12 - #include <drm/drm_fb_helper.h> 13 12 #include <drm/drm_atomic_helper.h> 14 13 #include <drm/drm_gem.h> 15 14 15 + #include <linux/i2c.h> 16 16 #include <linux/module.h> 17 17 #include <linux/component.h> 18 18
-2
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
··· 9 9 #include <drm/drm.h> 10 10 #include <drm/drm_atomic.h> 11 11 #include <drm/drm_damage_helper.h> 12 - #include <drm/drm_fb_helper.h> 13 12 #include <drm/drm_fourcc.h> 14 13 #include <drm/drm_framebuffer.h> 15 14 #include <drm/drm_gem_framebuffer_helper.h> ··· 71 72 72 73 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 73 74 .fb_create = rockchip_fb_create, 74 - .output_poll_changed = drm_fb_helper_output_poll_changed, 75 75 .atomic_check = drm_atomic_helper_check, 76 76 .atomic_commit = drm_atomic_helper_commit, 77 77 };
+1
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
··· 9 9 #include <linux/vmalloc.h> 10 10 11 11 #include <drm/drm.h> 12 + #include <drm/drm_fb_helper.h> 12 13 #include <drm/drm_gem.h> 13 14 #include <drm/drm_gem_dma_helper.h> 14 15 #include <drm/drm_prime.h>
+91 -105
drivers/gpu/drm/scheduler/sched_entity.c
··· 140 140 return true; 141 141 } 142 142 143 + static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 144 + { 145 + struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 146 + 147 + drm_sched_fence_finished(job->s_fence); 148 + WARN_ON(job->s_fence->parent); 149 + job->sched->ops->free_job(job); 150 + } 151 + 152 + /* Signal the scheduler finished fence when the entity in question is killed. */ 153 + static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 154 + struct dma_fence_cb *cb) 155 + { 156 + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 157 + finish_cb); 158 + int r; 159 + 160 + dma_fence_put(f); 161 + 162 + /* Wait for all dependencies to avoid data corruptions */ 163 + while (!xa_empty(&job->dependencies)) { 164 + f = xa_erase(&job->dependencies, job->last_dependency++); 165 + r = dma_fence_add_callback(f, &job->finish_cb, 166 + drm_sched_entity_kill_jobs_cb); 167 + if (!r) 168 + return; 169 + 170 + dma_fence_put(f); 171 + } 172 + 173 + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 174 + schedule_work(&job->work); 175 + } 176 + 177 + /* Remove the entity from the scheduler and kill all pending jobs */ 178 + static void drm_sched_entity_kill(struct drm_sched_entity *entity) 179 + { 180 + struct drm_sched_job *job; 181 + struct dma_fence *prev; 182 + 183 + if (!entity->rq) 184 + return; 185 + 186 + spin_lock(&entity->rq_lock); 187 + entity->stopped = true; 188 + drm_sched_rq_remove_entity(entity->rq, entity); 189 + spin_unlock(&entity->rq_lock); 190 + 191 + /* Make sure this entity is not used by the scheduler at the moment */ 192 + wait_for_completion(&entity->entity_idle); 193 + 194 + prev = dma_fence_get(entity->last_scheduled); 195 + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 196 + struct drm_sched_fence *s_fence = job->s_fence; 197 + 198 + dma_fence_set_error(&s_fence->finished, -ESRCH); 199 + 200 + dma_fence_get(&s_fence->finished); 201 + if (!prev || dma_fence_add_callback(prev, &job->finish_cb, 202 + drm_sched_entity_kill_jobs_cb)) 203 + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 204 + 205 + prev = &s_fence->finished; 206 + } 207 + dma_fence_put(prev); 208 + } 209 + 143 210 /** 144 211 * drm_sched_entity_flush - Flush a context entity 145 212 * ··· 247 180 /* For killed process disable any more IBs enqueue right now */ 248 181 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); 249 182 if ((!last_user || last_user == current->group_leader) && 250 - (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { 251 - spin_lock(&entity->rq_lock); 252 - entity->stopped = true; 253 - drm_sched_rq_remove_entity(entity->rq, entity); 254 - spin_unlock(&entity->rq_lock); 255 - } 183 + (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) 184 + drm_sched_entity_kill(entity); 256 185 257 186 return ret; 258 187 } 259 188 EXPORT_SYMBOL(drm_sched_entity_flush); 260 - 261 - static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) 262 - { 263 - struct drm_sched_job *job = container_of(wrk, typeof(*job), work); 264 - 265 - drm_sched_fence_finished(job->s_fence); 266 - WARN_ON(job->s_fence->parent); 267 - job->sched->ops->free_job(job); 268 - } 269 - 270 - 271 - /* Signal the scheduler finished fence when the entity in question is killed. */ 272 - static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, 273 - struct dma_fence_cb *cb) 274 - { 275 - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, 276 - finish_cb); 277 - 278 - dma_fence_put(f); 279 - INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); 280 - schedule_work(&job->work); 281 - } 282 - 283 - static struct dma_fence * 284 - drm_sched_job_dependency(struct drm_sched_job *job, 285 - struct drm_sched_entity *entity) 286 - { 287 - if (!xa_empty(&job->dependencies)) 288 - return xa_erase(&job->dependencies, job->last_dependency++); 289 - 290 - if (job->sched->ops->dependency) 291 - return job->sched->ops->dependency(job, entity); 292 - 293 - return NULL; 294 - } 295 - 296 - static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) 297 - { 298 - struct drm_sched_job *job; 299 - struct dma_fence *f; 300 - int r; 301 - 302 - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { 303 - struct drm_sched_fence *s_fence = job->s_fence; 304 - 305 - /* Wait for all dependencies to avoid data corruptions */ 306 - while ((f = drm_sched_job_dependency(job, entity))) { 307 - dma_fence_wait(f, false); 308 - dma_fence_put(f); 309 - } 310 - 311 - drm_sched_fence_scheduled(s_fence); 312 - dma_fence_set_error(&s_fence->finished, -ESRCH); 313 - 314 - /* 315 - * When pipe is hanged by older entity, new entity might 316 - * not even have chance to submit it's first job to HW 317 - * and so entity->last_scheduled will remain NULL 318 - */ 319 - if (!entity->last_scheduled) { 320 - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 321 - continue; 322 - } 323 - 324 - dma_fence_get(entity->last_scheduled); 325 - r = dma_fence_add_callback(entity->last_scheduled, 326 - &job->finish_cb, 327 - drm_sched_entity_kill_jobs_cb); 328 - if (r == -ENOENT) 329 - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); 330 - else if (r) 331 - DRM_ERROR("fence add callback failed (%d)\n", r); 332 - } 333 - } 334 189 335 190 /** 336 191 * drm_sched_entity_fini - Destroy a context entity ··· 267 278 */ 268 279 void drm_sched_entity_fini(struct drm_sched_entity *entity) 269 280 { 270 - struct drm_gpu_scheduler *sched = NULL; 271 - 272 - if (entity->rq) { 273 - sched = entity->rq->sched; 274 - drm_sched_rq_remove_entity(entity->rq, entity); 275 - } 276 - 277 - /* Consumption of existing IBs wasn't completed. Forcefully 278 - * remove them here. 281 + /* 282 + * If consumption of existing IBs wasn't completed. Forcefully remove 283 + * them here. Also makes sure that the scheduler won't touch this entity 284 + * any more. 279 285 */ 280 - if (spsc_queue_count(&entity->job_queue)) { 281 - if (sched) { 282 - /* 283 - * Wait for thread to idle to make sure it isn't processing 284 - * this entity. 285 - */ 286 - wait_for_completion(&entity->entity_idle); 286 + drm_sched_entity_kill(entity); 287 287 288 - } 289 - if (entity->dependency) { 290 - dma_fence_remove_callback(entity->dependency, 291 - &entity->cb); 292 - dma_fence_put(entity->dependency); 293 - entity->dependency = NULL; 294 - } 295 - 296 - drm_sched_entity_kill_jobs(entity); 288 + if (entity->dependency) { 289 + dma_fence_remove_callback(entity->dependency, &entity->cb); 290 + dma_fence_put(entity->dependency); 291 + entity->dependency = NULL; 297 292 } 298 293 299 294 dma_fence_put(entity->last_scheduled); ··· 388 415 389 416 dma_fence_put(entity->dependency); 390 417 return false; 418 + } 419 + 420 + static struct dma_fence * 421 + drm_sched_job_dependency(struct drm_sched_job *job, 422 + struct drm_sched_entity *entity) 423 + { 424 + if (!xa_empty(&job->dependencies)) 425 + return xa_erase(&job->dependencies, job->last_dependency++); 426 + 427 + if (job->sched->ops->prepare_job) 428 + return job->sched->ops->prepare_job(job, entity); 429 + 430 + return NULL; 391 431 } 392 432 393 433 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+37 -44
drivers/gpu/drm/scheduler/sched_main.c
··· 286 286 } 287 287 288 288 /** 289 - * drm_sched_dependency_optimized - test if the dependency can be optimized 290 - * 291 - * @fence: the dependency fence 292 - * @entity: the entity which depends on the above fence 293 - * 294 - * Returns true if the dependency can be optimized and false otherwise 295 - */ 296 - bool drm_sched_dependency_optimized(struct dma_fence* fence, 297 - struct drm_sched_entity *entity) 298 - { 299 - struct drm_gpu_scheduler *sched = entity->rq->sched; 300 - struct drm_sched_fence *s_fence; 301 - 302 - if (!fence || dma_fence_is_signaled(fence)) 303 - return false; 304 - if (fence->context == entity->fence_context) 305 - return true; 306 - s_fence = to_drm_sched_fence(fence); 307 - if (s_fence && s_fence->sched == sched) 308 - return true; 309 - 310 - return false; 311 - } 312 - EXPORT_SYMBOL(drm_sched_dependency_optimized); 313 - 314 - /** 315 289 * drm_sched_start_timeout - start timeout for reset worker 316 290 * 317 291 * @sched: scheduler instance to start the worker for ··· 747 773 EXPORT_SYMBOL(drm_sched_job_add_dependency); 748 774 749 775 /** 776 + * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job 777 + * @job: scheduler job to add the dependencies to 778 + * @resv: the dma_resv object to get the fences from 779 + * @usage: the dma_resv_usage to use to filter the fences 780 + * 781 + * This adds all fences matching the given usage from @resv to @job. 782 + * Must be called with the @resv lock held. 783 + * 784 + * Returns: 785 + * 0 on success, or an error on failing to expand the array. 786 + */ 787 + int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 788 + struct dma_resv *resv, 789 + enum dma_resv_usage usage) 790 + { 791 + struct dma_resv_iter cursor; 792 + struct dma_fence *fence; 793 + int ret; 794 + 795 + dma_resv_assert_held(resv); 796 + 797 + dma_resv_for_each_fence(&cursor, resv, usage, fence) { 798 + /* Make sure to grab an additional ref on the added fence */ 799 + dma_fence_get(fence); 800 + ret = drm_sched_job_add_dependency(job, fence); 801 + if (ret) { 802 + dma_fence_put(fence); 803 + return ret; 804 + } 805 + } 806 + return 0; 807 + } 808 + EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); 809 + 810 + /** 750 811 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job 751 812 * dependencies 752 813 * @job: scheduler job to add the dependencies to ··· 800 791 struct drm_gem_object *obj, 801 792 bool write) 802 793 { 803 - struct dma_resv_iter cursor; 804 - struct dma_fence *fence; 805 - int ret; 806 - 807 - dma_resv_assert_held(obj->resv); 808 - 809 - dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write), 810 - fence) { 811 - /* Make sure to grab an additional ref on the added fence */ 812 - dma_fence_get(fence); 813 - ret = drm_sched_job_add_dependency(job, fence); 814 - if (ret) { 815 - dma_fence_put(fence); 816 - return ret; 817 - } 818 - } 819 - return 0; 794 + return drm_sched_job_add_resv_dependencies(job, obj->resv, 795 + dma_resv_usage_rw(write)); 820 796 } 821 797 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); 822 - 823 798 824 799 /** 825 800 * drm_sched_job_cleanup - clean up scheduler job resources
+1 -1
drivers/gpu/drm/solomon/ssd130x.c
··· 23 23 #include <drm/drm_crtc_helper.h> 24 24 #include <drm/drm_damage_helper.h> 25 25 #include <drm/drm_edid.h> 26 - #include <drm/drm_fb_helper.h> 26 + #include <drm/drm_fbdev_generic.h> 27 27 #include <drm/drm_format_helper.h> 28 28 #include <drm/drm_framebuffer.h> 29 29 #include <drm/drm_gem_atomic_helper.h>
+1 -1
drivers/gpu/drm/sti/sti_drv.c
··· 14 14 #include <drm/drm_atomic_helper.h> 15 15 #include <drm/drm_debugfs.h> 16 16 #include <drm/drm_drv.h> 17 - #include <drm/drm_fb_helper.h> 17 + #include <drm/drm_fbdev_generic.h> 18 18 #include <drm/drm_gem_dma_helper.h> 19 19 #include <drm/drm_gem_framebuffer_helper.h> 20 20 #include <drm/drm_of.h>
+1 -1
drivers/gpu/drm/stm/drv.c
··· 18 18 #include <drm/drm_atomic.h> 19 19 #include <drm/drm_atomic_helper.h> 20 20 #include <drm/drm_drv.h> 21 - #include <drm/drm_fb_helper.h> 21 + #include <drm/drm_fbdev_generic.h> 22 22 #include <drm/drm_gem_dma_helper.h> 23 23 #include <drm/drm_gem_framebuffer_helper.h> 24 24 #include <drm/drm_module.h>
+1 -1
drivers/gpu/drm/sun4i/sun4i_drv.c
··· 17 17 #include <drm/drm_aperture.h> 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fbdev_generic.h> 21 21 #include <drm/drm_gem_dma_helper.h> 22 22 #include <drm/drm_module.h> 23 23 #include <drm/drm_of.h>
+43 -18
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
··· 1101 1101 1102 1102 static int sun6i_dsi_probe(struct platform_device *pdev) 1103 1103 { 1104 + const struct sun6i_dsi_variant *variant; 1104 1105 struct device *dev = &pdev->dev; 1105 - const char *bus_clk_name = NULL; 1106 1106 struct sun6i_dsi *dsi; 1107 1107 void __iomem *base; 1108 1108 int ret; 1109 + 1110 + variant = device_get_match_data(dev); 1111 + if (!variant) 1112 + return -EINVAL; 1109 1113 1110 1114 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 1111 1115 if (!dsi) ··· 1118 1114 dsi->dev = dev; 1119 1115 dsi->host.ops = &sun6i_dsi_host_ops; 1120 1116 dsi->host.dev = dev; 1121 - 1122 - if (of_device_is_compatible(dev->of_node, 1123 - "allwinner,sun6i-a31-mipi-dsi")) 1124 - bus_clk_name = "bus"; 1117 + dsi->variant = variant; 1125 1118 1126 1119 base = devm_platform_ioremap_resource(pdev, 0); 1127 1120 if (IS_ERR(base)) { ··· 1143 1142 return PTR_ERR(dsi->regs); 1144 1143 } 1145 1144 1146 - dsi->bus_clk = devm_clk_get(dev, bus_clk_name); 1145 + dsi->bus_clk = devm_clk_get(dev, variant->has_mod_clk ? "bus" : NULL); 1147 1146 if (IS_ERR(dsi->bus_clk)) 1148 1147 return dev_err_probe(dev, PTR_ERR(dsi->bus_clk), 1149 1148 "Couldn't get the DSI bus clock\n"); ··· 1152 1151 if (ret) 1153 1152 return ret; 1154 1153 1155 - if (of_device_is_compatible(dev->of_node, 1156 - "allwinner,sun6i-a31-mipi-dsi")) { 1154 + if (variant->has_mod_clk) { 1157 1155 dsi->mod_clk = devm_clk_get(dev, "mod"); 1158 1156 if (IS_ERR(dsi->mod_clk)) { 1159 1157 dev_err(dev, "Couldn't get the DSI mod clock\n"); 1160 1158 ret = PTR_ERR(dsi->mod_clk); 1161 1159 goto err_attach_clk; 1162 1160 } 1163 - } 1164 1161 1165 - /* 1166 - * In order to operate properly, that clock seems to be always 1167 - * set to 297MHz. 1168 - */ 1169 - clk_set_rate_exclusive(dsi->mod_clk, 297000000); 1162 + /* 1163 + * In order to operate properly, the module clock on the 1164 + * A31 variant always seems to be set to 297MHz. 1165 + */ 1166 + if (variant->set_mod_clk) 1167 + clk_set_rate_exclusive(dsi->mod_clk, 297000000); 1168 + } 1170 1169 1171 1170 dsi->dphy = devm_phy_get(dev, "dphy"); 1172 1171 if (IS_ERR(dsi->dphy)) { ··· 1192 1191 err_remove_dsi_host: 1193 1192 mipi_dsi_host_unregister(&dsi->host); 1194 1193 err_unprotect_clk: 1195 - clk_rate_exclusive_put(dsi->mod_clk); 1194 + if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk) 1195 + clk_rate_exclusive_put(dsi->mod_clk); 1196 1196 err_attach_clk: 1197 1197 regmap_mmio_detach_clk(dsi->regs); 1198 1198 ··· 1207 1205 1208 1206 component_del(&pdev->dev, &sun6i_dsi_ops); 1209 1207 mipi_dsi_host_unregister(&dsi->host); 1210 - clk_rate_exclusive_put(dsi->mod_clk); 1208 + if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk) 1209 + clk_rate_exclusive_put(dsi->mod_clk); 1211 1210 1212 1211 regmap_mmio_detach_clk(dsi->regs); 1213 1212 1214 1213 return 0; 1215 1214 } 1216 1215 1216 + static const struct sun6i_dsi_variant sun6i_a31_mipi_dsi_variant = { 1217 + .has_mod_clk = true, 1218 + .set_mod_clk = true, 1219 + }; 1220 + 1221 + static const struct sun6i_dsi_variant sun50i_a64_mipi_dsi_variant = { 1222 + }; 1223 + 1224 + static const struct sun6i_dsi_variant sun50i_a100_mipi_dsi_variant = { 1225 + .has_mod_clk = true, 1226 + }; 1227 + 1217 1228 static const struct of_device_id sun6i_dsi_of_table[] = { 1218 - { .compatible = "allwinner,sun6i-a31-mipi-dsi" }, 1219 - { .compatible = "allwinner,sun50i-a64-mipi-dsi" }, 1229 + { 1230 + .compatible = "allwinner,sun6i-a31-mipi-dsi", 1231 + .data = &sun6i_a31_mipi_dsi_variant, 1232 + }, 1233 + { 1234 + .compatible = "allwinner,sun50i-a64-mipi-dsi", 1235 + .data = &sun50i_a64_mipi_dsi_variant, 1236 + }, 1237 + { 1238 + .compatible = "allwinner,sun50i-a100-mipi-dsi", 1239 + .data = &sun50i_a100_mipi_dsi_variant, 1240 + }, 1220 1241 { } 1221 1242 }; 1222 1243 MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table);
+7
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
··· 15 15 16 16 #define SUN6I_DSI_TCON_DIV 4 17 17 18 + struct sun6i_dsi_variant { 19 + bool has_mod_clk; 20 + bool set_mod_clk; 21 + }; 22 + 18 23 struct sun6i_dsi { 19 24 struct drm_connector connector; 20 25 struct drm_encoder encoder; ··· 36 31 struct mipi_dsi_device *device; 37 32 struct drm_device *drm; 38 33 struct drm_panel *panel; 34 + 35 + const struct sun6i_dsi_variant *variant; 39 36 }; 40 37 41 38 static inline struct sun6i_dsi *host_to_sun6i_dsi(struct mipi_dsi_host *host)
+5 -3
drivers/gpu/drm/tegra/fb.c
··· 206 206 static const struct fb_ops tegra_fb_ops = { 207 207 .owner = THIS_MODULE, 208 208 DRM_FB_HELPER_DEFAULT_OPS, 209 + .fb_read = drm_fb_helper_sys_read, 210 + .fb_write = drm_fb_helper_sys_write, 209 211 .fb_fillrect = drm_fb_helper_sys_fillrect, 210 212 .fb_copyarea = drm_fb_helper_sys_copyarea, 211 213 .fb_imageblit = drm_fb_helper_sys_imageblit, ··· 245 243 if (IS_ERR(bo)) 246 244 return PTR_ERR(bo); 247 245 248 - info = drm_fb_helper_alloc_fbi(helper); 246 + info = drm_fb_helper_alloc_info(helper); 249 247 if (IS_ERR(info)) { 250 248 dev_err(drm->dev, "failed to allocate framebuffer info\n"); 251 249 drm_gem_object_put(&bo->gem); ··· 263 261 264 262 fb = fbdev->fb; 265 263 helper->fb = fb; 266 - helper->fbdev = info; 264 + helper->info = info; 267 265 268 266 info->fbops = &tegra_fb_ops; 269 267 ··· 349 347 350 348 static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) 351 349 { 352 - drm_fb_helper_unregister_fbi(&fbdev->base); 350 + drm_fb_helper_unregister_info(&fbdev->base); 353 351 354 352 if (fbdev->fb) { 355 353 struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0);
+1 -1
drivers/gpu/drm/tidss/tidss_drv.c
··· 14 14 #include <drm/drm_crtc.h> 15 15 #include <drm/drm_crtc_helper.h> 16 16 #include <drm/drm_drv.h> 17 - #include <drm/drm_fb_helper.h> 17 + #include <drm/drm_fbdev_generic.h> 18 18 #include <drm/drm_gem_dma_helper.h> 19 19 #include <drm/drm_managed.h> 20 20 #include <drm/drm_module.h>
-1
drivers/gpu/drm/tidss/tidss_kms.c
··· 10 10 #include <drm/drm_atomic_helper.h> 11 11 #include <drm/drm_bridge.h> 12 12 #include <drm/drm_crtc_helper.h> 13 - #include <drm/drm_fb_helper.h> 14 13 #include <drm/drm_gem_framebuffer_helper.h> 15 14 #include <drm/drm_of.h> 16 15 #include <drm/drm_panel.h>
+1 -1
drivers/gpu/drm/tilcdc/tilcdc_drv.c
··· 16 16 #include <drm/drm_atomic_helper.h> 17 17 #include <drm/drm_debugfs.h> 18 18 #include <drm/drm_drv.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_fourcc.h> 21 21 #include <drm/drm_gem_dma_helper.h> 22 22 #include <drm/drm_gem_framebuffer_helper.h>
+1 -1
drivers/gpu/drm/tiny/Kconfig
··· 53 53 54 54 config DRM_OFDRM 55 55 tristate "Open Firmware display driver" 56 - depends on DRM && OF && (PPC || COMPILE_TEST) 56 + depends on DRM && MMU && OF && (PPC || COMPILE_TEST) 57 57 select APERTURE_HELPERS 58 58 select DRM_GEM_SHMEM_HELPER 59 59 select DRM_KMS_HELPER
+1 -1
drivers/gpu/drm/tiny/arcpgu.c
··· 12 12 #include <drm/drm_drv.h> 13 13 #include <drm/drm_edid.h> 14 14 #include <drm/drm_fb_dma_helper.h> 15 - #include <drm/drm_fb_helper.h> 15 + #include <drm/drm_fbdev_generic.h> 16 16 #include <drm/drm_fourcc.h> 17 17 #include <drm/drm_framebuffer.h> 18 18 #include <drm/drm_gem_dma_helper.h>
+1 -1
drivers/gpu/drm/tiny/bochs.c
··· 7 7 #include <drm/drm_atomic_helper.h> 8 8 #include <drm/drm_drv.h> 9 9 #include <drm/drm_edid.h> 10 - #include <drm/drm_fb_helper.h> 10 + #include <drm/drm_fbdev_generic.h> 11 11 #include <drm/drm_fourcc.h> 12 12 #include <drm/drm_framebuffer.h> 13 13 #include <drm/drm_gem_framebuffer_helper.h>
+1 -1
drivers/gpu/drm/tiny/cirrus.c
··· 30 30 #include <drm/drm_damage_helper.h> 31 31 #include <drm/drm_drv.h> 32 32 #include <drm/drm_edid.h> 33 - #include <drm/drm_fb_helper.h> 33 + #include <drm/drm_fbdev_generic.h> 34 34 #include <drm/drm_file.h> 35 35 #include <drm/drm_format_helper.h> 36 36 #include <drm/drm_fourcc.h>
+1 -1
drivers/gpu/drm/tiny/gm12u320.c
··· 12 12 #include <drm/drm_damage_helper.h> 13 13 #include <drm/drm_drv.h> 14 14 #include <drm/drm_edid.h> 15 - #include <drm/drm_fb_helper.h> 15 + #include <drm/drm_fbdev_generic.h> 16 16 #include <drm/drm_file.h> 17 17 #include <drm/drm_format_helper.h> 18 18 #include <drm/drm_fourcc.h>
+1 -1
drivers/gpu/drm/tiny/hx8357d.c
··· 18 18 19 19 #include <drm/drm_atomic_helper.h> 20 20 #include <drm/drm_drv.h> 21 - #include <drm/drm_fb_helper.h> 21 + #include <drm/drm_fbdev_generic.h> 22 22 #include <drm/drm_gem_atomic_helper.h> 23 23 #include <drm/drm_gem_dma_helper.h> 24 24 #include <drm/drm_managed.h>
+1 -1
drivers/gpu/drm/tiny/ili9163.c
··· 9 9 10 10 #include <drm/drm_atomic_helper.h> 11 11 #include <drm/drm_drv.h> 12 - #include <drm/drm_fb_helper.h> 12 + #include <drm/drm_fbdev_generic.h> 13 13 #include <drm/drm_gem_atomic_helper.h> 14 14 #include <drm/drm_gem_dma_helper.h> 15 15 #include <drm/drm_mipi_dbi.h>
+1 -1
drivers/gpu/drm/tiny/ili9225.c
··· 20 20 #include <drm/drm_damage_helper.h> 21 21 #include <drm/drm_drv.h> 22 22 #include <drm/drm_fb_dma_helper.h> 23 - #include <drm/drm_fb_helper.h> 23 + #include <drm/drm_fbdev_generic.h> 24 24 #include <drm/drm_fourcc.h> 25 25 #include <drm/drm_framebuffer.h> 26 26 #include <drm/drm_gem_atomic_helper.h>
+1 -1
drivers/gpu/drm/tiny/ili9341.c
··· 17 17 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fbdev_generic.h> 21 21 #include <drm/drm_gem_atomic_helper.h> 22 22 #include <drm/drm_gem_dma_helper.h> 23 23 #include <drm/drm_managed.h>
+1 -1
drivers/gpu/drm/tiny/ili9486.c
··· 16 16 17 17 #include <drm/drm_atomic_helper.h> 18 18 #include <drm/drm_drv.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_gem_atomic_helper.h> 21 21 #include <drm/drm_gem_dma_helper.h> 22 22 #include <drm/drm_managed.h>
+1 -1
drivers/gpu/drm/tiny/mi0283qt.c
··· 15 15 16 16 #include <drm/drm_atomic_helper.h> 17 17 #include <drm/drm_drv.h> 18 - #include <drm/drm_fb_helper.h> 18 + #include <drm/drm_fbdev_generic.h> 19 19 #include <drm/drm_gem_atomic_helper.h> 20 20 #include <drm/drm_gem_dma_helper.h> 21 21 #include <drm/drm_managed.h>
+17 -12
drivers/gpu/drm/tiny/ofdrm.c
··· 11 11 #include <drm/drm_damage_helper.h> 12 12 #include <drm/drm_device.h> 13 13 #include <drm/drm_drv.h> 14 - #include <drm/drm_fb_helper.h> 14 + #include <drm/drm_fbdev_generic.h> 15 15 #include <drm/drm_format_helper.h> 16 16 #include <drm/drm_framebuffer.h> 17 17 #include <drm/drm_gem_atomic_helper.h> ··· 231 231 return address; 232 232 } 233 233 234 - static bool is_avivo(__be32 vendor, __be32 device) 234 + static bool is_avivo(u32 vendor, u32 device) 235 235 { 236 236 /* This will match most R5xx */ 237 237 return (vendor == PCI_VENDOR_ID_ATI) && ··· 265 265 of_parent = of_get_parent(of_node); 266 266 vendor_p = of_get_property(of_parent, "vendor-id", NULL); 267 267 device_p = of_get_property(of_parent, "device-id", NULL); 268 - if (vendor_p && device_p && is_avivo(*vendor_p, *device_p)) 269 - model = OFDRM_MODEL_AVIVO; 268 + if (vendor_p && device_p) { 269 + u32 vendor = be32_to_cpup(vendor_p); 270 + u32 device = be32_to_cpup(device_p); 271 + 272 + if (is_avivo(vendor, device)) 273 + model = OFDRM_MODEL_AVIVO; 274 + } 270 275 of_node_put(of_parent); 271 276 } else if (of_device_is_compatible(of_node, "qemu,std-vga")) { 272 277 model = OFDRM_MODEL_QEMU; ··· 438 433 if (!addr_p) 439 434 addr_p = of_get_address(of_node, bar_no, &max_size, &flags); 440 435 if (!addr_p) 441 - return ERR_PTR(-ENODEV); 436 + return IOMEM_ERR_PTR(-ENODEV); 442 437 443 438 if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) 444 - return ERR_PTR(-ENODEV); 439 + return IOMEM_ERR_PTR(-ENODEV); 445 440 446 441 if ((offset + size) >= max_size) 447 - return ERR_PTR(-ENODEV); 442 + return IOMEM_ERR_PTR(-ENODEV); 448 443 449 444 address = of_translate_address(of_node, addr_p); 450 445 if (address == OF_BAD_ADDR) 451 - return ERR_PTR(-ENODEV); 446 + return IOMEM_ERR_PTR(-ENODEV); 452 447 453 448 mem = devm_ioremap(dev->dev, address + offset, size); 454 449 if (!mem) 455 - return ERR_PTR(-ENOMEM); 450 + return IOMEM_ERR_PTR(-ENOMEM); 456 451 457 452 return mem; 458 453 } ··· 470 465 471 466 cmap_base = devm_ioremap(dev->dev, address, 0x1000); 472 467 if (!cmap_base) 473 - return ERR_PTR(-ENOMEM); 468 + return IOMEM_ERR_PTR(-ENOMEM); 474 469 475 470 return cmap_base; 476 471 } ··· 629 624 630 625 address = of_translate_address(of_node, io_of_addr); 631 626 if (address == OF_BAD_ADDR) 632 - return ERR_PTR(-ENODEV); 627 + return IOMEM_ERR_PTR(-ENODEV); 633 628 634 629 cmap_base = devm_ioremap(dev->dev, address + 0x3c8, 2); 635 630 if (!cmap_base) 636 - return ERR_PTR(-ENOMEM); 631 + return IOMEM_ERR_PTR(-ENOMEM); 637 632 638 633 return cmap_base; 639 634 }
+1 -1
drivers/gpu/drm/tiny/panel-mipi-dbi.c
··· 16 16 17 17 #include <drm/drm_atomic_helper.h> 18 18 #include <drm/drm_drv.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_gem_atomic_helper.h> 21 21 #include <drm/drm_gem_dma_helper.h> 22 22 #include <drm/drm_managed.h>
+1 -1
drivers/gpu/drm/tiny/repaper.c
··· 26 26 #include <drm/drm_damage_helper.h> 27 27 #include <drm/drm_drv.h> 28 28 #include <drm/drm_fb_dma_helper.h> 29 - #include <drm/drm_fb_helper.h> 29 + #include <drm/drm_fbdev_generic.h> 30 30 #include <drm/drm_format_helper.h> 31 31 #include <drm/drm_framebuffer.h> 32 32 #include <drm/drm_gem_atomic_helper.h>
+1 -1
drivers/gpu/drm/tiny/simpledrm.c
··· 15 15 #include <drm/drm_damage_helper.h> 16 16 #include <drm/drm_device.h> 17 17 #include <drm/drm_drv.h> 18 - #include <drm/drm_fb_helper.h> 18 + #include <drm/drm_fbdev_generic.h> 19 19 #include <drm/drm_format_helper.h> 20 20 #include <drm/drm_gem_atomic_helper.h> 21 21 #include <drm/drm_gem_framebuffer_helper.h>
+1 -1
drivers/gpu/drm/tiny/st7586.c
··· 16 16 #include <drm/drm_damage_helper.h> 17 17 #include <drm/drm_drv.h> 18 18 #include <drm/drm_fb_dma_helper.h> 19 - #include <drm/drm_fb_helper.h> 19 + #include <drm/drm_fbdev_generic.h> 20 20 #include <drm/drm_format_helper.h> 21 21 #include <drm/drm_framebuffer.h> 22 22 #include <drm/drm_gem_atomic_helper.h>
+1 -1
drivers/gpu/drm/tiny/st7735r.c
··· 18 18 19 19 #include <drm/drm_atomic_helper.h> 20 20 #include <drm/drm_drv.h> 21 - #include <drm/drm_fb_helper.h> 21 + #include <drm/drm_fbdev_generic.h> 22 22 #include <drm/drm_gem_atomic_helper.h> 23 23 #include <drm/drm_gem_dma_helper.h> 24 24 #include <drm/drm_managed.h>
+58 -24
drivers/gpu/drm/ttm/ttm_pool.c
··· 344 344 return p->private; 345 345 } 346 346 347 + /* Called when we got a page, either from a pool or newly allocated */ 348 + static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, 349 + struct page *p, dma_addr_t **dma_addr, 350 + unsigned long *num_pages, 351 + struct page ***pages) 352 + { 353 + unsigned int i; 354 + int r; 355 + 356 + if (*dma_addr) { 357 + r = ttm_pool_map(pool, order, p, dma_addr); 358 + if (r) 359 + return r; 360 + } 361 + 362 + *num_pages -= 1 << order; 363 + for (i = 1 << order; i; --i, ++(*pages), ++p) 364 + **pages = p; 365 + 366 + return 0; 367 + } 368 + 347 369 /** 348 370 * ttm_pool_alloc - Fill a ttm_tt object 349 371 * ··· 407 385 for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); 408 386 num_pages; 409 387 order = min_t(unsigned int, order, __fls(num_pages))) { 410 - bool apply_caching = false; 411 388 struct ttm_pool_type *pt; 412 389 413 390 pt = ttm_pool_select_type(pool, tt->caching, order); 414 391 p = pt ? ttm_pool_type_take(pt) : NULL; 415 392 if (p) { 416 - apply_caching = true; 417 - } else { 418 - p = ttm_pool_alloc_page(pool, gfp_flags, order); 419 - if (p && PageHighMem(p)) 420 - apply_caching = true; 393 + r = ttm_pool_apply_caching(caching, pages, 394 + tt->caching); 395 + if (r) 396 + goto error_free_page; 397 + 398 + do { 399 + r = ttm_pool_page_allocated(pool, order, p, 400 + &dma_addr, 401 + &num_pages, 402 + &pages); 403 + if (r) 404 + goto error_free_page; 405 + 406 + if (num_pages < (1 << order)) 407 + break; 408 + 409 + p = ttm_pool_type_take(pt); 410 + } while (p); 411 + caching = pages; 412 + } 413 + 414 + while (num_pages >= (1 << order) && 415 + (p = ttm_pool_alloc_page(pool, gfp_flags, order))) { 416 + 417 + if (PageHighMem(p)) { 418 + r = ttm_pool_apply_caching(caching, pages, 419 + tt->caching); 420 + if (r) 421 + goto error_free_page; 422 + } 423 + r = ttm_pool_page_allocated(pool, order, p, &dma_addr, 424 + &num_pages, &pages); 425 + if (r) 426 + goto error_free_page; 427 + if (PageHighMem(p)) 428 + caching = pages; 421 429 } 422 430 423 431 if (!p) { ··· 458 406 r = -ENOMEM; 459 407 goto error_free_all; 460 408 } 461 - 462 - if (apply_caching) { 463 - r = ttm_pool_apply_caching(caching, pages, 464 - tt->caching); 465 - if (r) 466 - goto error_free_page; 467 - caching = pages + (1 << order); 468 - } 469 - 470 - if (dma_addr) { 471 - r = ttm_pool_map(pool, order, p, &dma_addr); 472 - if (r) 473 - goto error_free_page; 474 - } 475 - 476 - num_pages -= 1 << order; 477 - for (i = 1 << order; i; --i) 478 - *(pages++) = p++; 479 409 } 480 410 481 411 r = ttm_pool_apply_caching(caching, pages, tt->caching);
+2 -1
drivers/gpu/drm/tve200/tve200_drv.c
··· 32 32 #include <linux/irq.h> 33 33 #include <linux/io.h> 34 34 #include <linux/module.h> 35 + #include <linux/of.h> 35 36 #include <linux/platform_device.h> 36 37 #include <linux/shmem_fs.h> 37 38 #include <linux/slab.h> ··· 40 39 #include <drm/drm_atomic_helper.h> 41 40 #include <drm/drm_bridge.h> 42 41 #include <drm/drm_drv.h> 43 - #include <drm/drm_fb_helper.h> 42 + #include <drm/drm_fbdev_generic.h> 44 43 #include <drm/drm_gem_dma_helper.h> 45 44 #include <drm/drm_gem_framebuffer_helper.h> 46 45 #include <drm/drm_module.h>
+1 -1
drivers/gpu/drm/udl/udl_drv.c
··· 7 7 8 8 #include <drm/drm_crtc_helper.h> 9 9 #include <drm/drm_drv.h> 10 - #include <drm/drm_fb_helper.h> 10 + #include <drm/drm_fbdev_generic.h> 11 11 #include <drm/drm_file.h> 12 12 #include <drm/drm_gem_shmem_helper.h> 13 13 #include <drm/drm_managed.h>
-1
drivers/gpu/drm/v3d/v3d_drv.c
··· 22 22 #include <linux/reset.h> 23 23 24 24 #include <drm/drm_drv.h> 25 - #include <drm/drm_fb_helper.h> 26 25 #include <drm/drm_managed.h> 27 26 #include <uapi/drm/v3d_drm.h> 28 27
+1 -3
drivers/gpu/drm/vboxvideo/vbox_drv.c
··· 14 14 #include <drm/drm_aperture.h> 15 15 #include <drm/drm_crtc_helper.h> 16 16 #include <drm/drm_drv.h> 17 - #include <drm/drm_fb_helper.h> 17 + #include <drm/drm_fbdev_generic.h> 18 18 #include <drm/drm_file.h> 19 19 #include <drm/drm_ioctl.h> 20 20 #include <drm/drm_managed.h> ··· 177 177 static const struct drm_driver driver = { 178 178 .driver_features = 179 179 DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, 180 - 181 - .lastclose = drm_fb_helper_lastclose, 182 180 183 181 .fops = &vbox_fops, 184 182 .name = DRIVER_NAME,
-1
drivers/gpu/drm/vboxvideo/vbox_main.c
··· 11 11 #include <linux/pci.h> 12 12 #include <linux/vbox_err.h> 13 13 14 - #include <drm/drm_fb_helper.h> 15 14 #include <drm/drm_crtc_helper.h> 16 15 #include <drm/drm_damage_helper.h> 17 16
+1 -1
drivers/gpu/drm/vc4/vc4_drv.c
··· 33 33 #include <drm/drm_aperture.h> 34 34 #include <drm/drm_atomic_helper.h> 35 35 #include <drm/drm_drv.h> 36 - #include <drm/drm_fb_helper.h> 36 + #include <drm/drm_fbdev_generic.h> 37 37 #include <drm/drm_vblank.h> 38 38 39 39 #include <soc/bcm2835/raspberrypi-firmware.h>
+1
drivers/gpu/drm/virtio/virtgpu_drv.c
··· 35 35 #include <drm/drm_aperture.h> 36 36 #include <drm/drm_atomic_helper.h> 37 37 #include <drm/drm_drv.h> 38 + #include <drm/drm_fbdev_generic.h> 38 39 #include <drm/drm_file.h> 39 40 40 41 #include "virtgpu_drv.h"
-1
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 35 35 #include <drm/drm_atomic.h> 36 36 #include <drm/drm_drv.h> 37 37 #include <drm/drm_encoder.h> 38 - #include <drm/drm_fb_helper.h> 39 38 #include <drm/drm_fourcc.h> 40 39 #include <drm/drm_framebuffer.h> 41 40 #include <drm/drm_gem.h>
+1 -1
drivers/gpu/drm/vkms/vkms_drv.c
··· 17 17 #include <drm/drm_atomic.h> 18 18 #include <drm/drm_atomic_helper.h> 19 19 #include <drm/drm_drv.h> 20 - #include <drm/drm_fb_helper.h> 20 + #include <drm/drm_fbdev_generic.h> 21 21 #include <drm/drm_file.h> 22 22 #include <drm/drm_gem_framebuffer_helper.h> 23 23 #include <drm/drm_ioctl.h>
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 35 35 36 36 #include <drm/drm_aperture.h> 37 37 #include <drm/drm_drv.h> 38 - #include <drm/drm_fb_helper.h> 38 + #include <drm/drm_fbdev_generic.h> 39 39 #include <drm/drm_gem_ttm_helper.h> 40 40 #include <drm/drm_ioctl.h> 41 41 #include <drm/drm_module.h>
-1
drivers/gpu/drm/xen/xen_drm_front_gem.c
··· 12 12 #include <linux/scatterlist.h> 13 13 #include <linux/shmem_fs.h> 14 14 15 - #include <drm/drm_fb_helper.h> 16 15 #include <drm/drm_gem.h> 17 16 #include <drm/drm_prime.h> 18 17 #include <drm/drm_probe_helper.h>
+1 -1
drivers/gpu/drm/xlnx/zynqmp_kms.c
··· 19 19 #include <drm/drm_device.h> 20 20 #include <drm/drm_drv.h> 21 21 #include <drm/drm_encoder.h> 22 - #include <drm/drm_fb_helper.h> 22 + #include <drm/drm_fbdev_generic.h> 23 23 #include <drm/drm_fourcc.h> 24 24 #include <drm/drm_framebuffer.h> 25 25 #include <drm/drm_gem_dma_helper.h>
+49 -19
include/drm/drm_fb_helper.h
··· 30 30 #ifndef DRM_FB_HELPER_H 31 31 #define DRM_FB_HELPER_H 32 32 33 + struct drm_clip_rect; 33 34 struct drm_fb_helper; 34 35 35 - #include <drm/drm_client.h> 36 - #include <drm/drm_crtc.h> 37 - #include <drm/drm_device.h> 38 36 #include <linux/fb.h> 39 - #include <linux/kgdb.h> 37 + 38 + #include <drm/drm_client.h> 40 39 41 40 enum mode_set_atomic { 42 41 LEAVE_ATOMIC_MODE_SET, ··· 90 91 */ 91 92 int (*fb_probe)(struct drm_fb_helper *helper, 92 93 struct drm_fb_helper_surface_size *sizes); 94 + 95 + /** 96 + * @fb_dirty: 97 + * 98 + * Driver callback to update the framebuffer memory. If set, fbdev 99 + * emulation will invoke this callback in regular intervals after 100 + * the framebuffer has been written. 101 + * 102 + * This callback is optional. 103 + * 104 + * Returns: 105 + * 0 on success, or an error code otherwise. 106 + */ 107 + int (*fb_dirty)(struct drm_fb_helper *helper, struct drm_clip_rect *clip); 93 108 }; 94 109 95 110 /** ··· 111 98 * @fb: Scanout framebuffer object 112 99 * @dev: DRM device 113 100 * @funcs: driver callbacks for fb helper 114 - * @fbdev: emulated fbdev device info struct 101 + * @info: emulated fbdev device info struct 115 102 * @pseudo_palette: fake palette of 16 colors 116 103 * @damage_clip: clip rectangle used with deferred_io to accumulate damage to 117 104 * the screen buffer ··· 142 129 struct drm_framebuffer *fb; 143 130 struct drm_device *dev; 144 131 const struct drm_fb_helper_funcs *funcs; 145 - struct fb_info *fbdev; 132 + struct fb_info *info; 146 133 u32 pseudo_palette[17]; 147 134 struct drm_clip_rect damage_clip; 148 135 spinlock_t damage_lock; ··· 199 186 * See also: @deferred_setup 200 187 */ 201 188 int preferred_bpp; 189 + 190 + /** 191 + * @hint_leak_smem_start: 192 + * 193 + * Hint to the fbdev emulation to store the framebuffer's physical 194 + * address in struct &fb_info.fix.smem_start. If the hint is unset, 195 + * the smem_start field should always be cleared to zero. 196 + */ 197 + bool hint_leak_smem_start; 202 198 }; 203 199 204 200 static inline struct drm_fb_helper * ··· 246 224 247 225 int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); 248 226 249 - struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper); 250 - void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper); 227 + struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper); 228 + void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper); 251 229 void drm_fb_helper_fill_info(struct fb_info *info, 252 230 struct drm_fb_helper *fb_helper, 253 231 struct drm_fb_helper_surface_size *sizes); ··· 265 243 const struct fb_copyarea *area); 266 244 void drm_fb_helper_sys_imageblit(struct fb_info *info, 267 245 const struct fb_image *image); 246 + 247 + ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, 248 + size_t count, loff_t *ppos); 249 + ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, 250 + size_t count, loff_t *ppos); 268 251 269 252 void drm_fb_helper_cfb_fillrect(struct fb_info *info, 270 253 const struct fb_fillrect *rect); ··· 294 267 295 268 void drm_fb_helper_lastclose(struct drm_device *dev); 296 269 void drm_fb_helper_output_poll_changed(struct drm_device *dev); 297 - 298 - void drm_fbdev_generic_setup(struct drm_device *dev, 299 - unsigned int preferred_bpp); 300 270 #else 301 271 static inline void drm_fb_helper_prepare(struct drm_device *dev, 302 272 struct drm_fb_helper *helper, ··· 346 322 } 347 323 348 324 static inline struct fb_info * 349 - drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) 325 + drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) 350 326 { 351 327 return NULL; 352 328 } 353 329 354 - static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) 330 + static inline void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) 355 331 { 356 332 } 357 333 ··· 413 389 { 414 390 } 415 391 392 + static inline ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, 393 + size_t count, loff_t *ppos) 394 + { 395 + return -ENODEV; 396 + } 397 + 398 + static inline ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, 399 + size_t count, loff_t *ppos) 400 + { 401 + return -ENODEV; 402 + } 403 + 416 404 static inline void drm_fb_helper_cfb_fillrect(struct fb_info *info, 417 405 const struct fb_fillrect *rect) 418 406 { ··· 478 442 static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev) 479 443 { 480 444 } 481 - 482 - static inline void 483 - drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) 484 - { 485 - } 486 - 487 445 #endif 488 446 489 447 #endif
+15
include/drm/drm_fbdev_generic.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + 3 + #ifndef DRM_FBDEV_GENERIC_H 4 + #define DRM_FBDEV_GENERIC_H 5 + 6 + struct drm_device; 7 + 8 + #ifdef CONFIG_DRM_FBDEV_EMULATION 9 + void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp); 10 + #else 11 + static inline void drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp) 12 + { } 13 + #endif 14 + 15 + #endif
+10 -10
include/drm/drm_gem_atomic_helper.h
··· 103 103 .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \ 104 104 .atomic_destroy_state = drm_gem_destroy_shadow_plane_state 105 105 106 - int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); 107 - void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state); 106 + int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state); 107 + void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state); 108 108 109 109 /** 110 110 * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS - ··· 115 115 * functions. 116 116 */ 117 117 #define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \ 118 - .prepare_fb = drm_gem_prepare_shadow_fb, \ 119 - .cleanup_fb = drm_gem_cleanup_shadow_fb 118 + .begin_fb_access = drm_gem_begin_shadow_fb_access, \ 119 + .end_fb_access = drm_gem_end_shadow_fb_access 120 120 121 - int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, 122 - struct drm_plane_state *plane_state); 123 - void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, 124 - struct drm_plane_state *plane_state); 121 + int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, 122 + struct drm_plane_state *plane_state); 123 + void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, 124 + struct drm_plane_state *plane_state); 125 125 void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe); 126 126 struct drm_plane_state * 127 127 drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe); ··· 137 137 * functions. 138 138 */ 139 139 #define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \ 140 - .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \ 141 - .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \ 140 + .begin_fb_access = drm_gem_simple_kms_begin_shadow_fb_access, \ 141 + .end_fb_access = drm_gem_simple_kms_end_shadow_fb_access, \ 142 142 .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \ 143 143 .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \ 144 144 .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
+40 -1
include/drm/drm_modeset_helper_vtables.h
··· 1184 1184 * can call drm_gem_plane_helper_prepare_fb() from their @prepare_fb 1185 1185 * hook. 1186 1186 * 1187 + * The resources acquired in @prepare_fb persist after the end of 1188 + * the atomic commit. Resources that can be release at the commit's end 1189 + * should be acquired in @begin_fb_access and released in @end_fb_access. 1190 + * For example, a GEM buffer's pin operation belongs into @prepare_fb to 1191 + * keep the buffer pinned after the commit. But a vmap operation for 1192 + * shadow-plane helpers belongs into @begin_fb_access, so that atomic 1193 + * helpers remove the mapping at the end of the commit. 1194 + * 1187 1195 * The helpers will call @cleanup_fb with matching arguments for every 1188 1196 * successful call to this hook. 1189 1197 * 1190 1198 * This callback is used by the atomic modeset helpers and by the 1191 - * transitional plane helpers, but it is optional. 1199 + * transitional plane helpers, but it is optional. See @begin_fb_access 1200 + * for preparing per-commit resources. 1192 1201 * 1193 1202 * RETURNS: 1194 1203 * ··· 1219 1210 */ 1220 1211 void (*cleanup_fb)(struct drm_plane *plane, 1221 1212 struct drm_plane_state *old_state); 1213 + 1214 + /** 1215 + * @begin_fb_access: 1216 + * 1217 + * This hook prepares the plane for access during an atomic commit. 1218 + * In contrast to @prepare_fb, resources acquired in @begin_fb_access, 1219 + * are released at the end of the atomic commit in @end_fb_access. 1220 + * 1221 + * For example, with shadow-plane helpers, the GEM buffer's vmap 1222 + * operation belongs into @begin_fb_access, so that the buffer's 1223 + * memory will be unmapped at the end of the commit in @end_fb_access. 1224 + * But a GEM buffer's pin operation belongs into @prepare_fb 1225 + * to keep the buffer pinned after the commit. 1226 + * 1227 + * The callback is used by the atomic modeset helpers, but it is optional. 1228 + * See @end_fb_cleanup for undoing the effects of @begin_fb_access and 1229 + * @prepare_fb for acquiring resources until the next pageflip. 1230 + * 1231 + * Returns: 1232 + * 0 on success, or a negative errno code otherwise. 1233 + */ 1234 + int (*begin_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state); 1235 + 1236 + /** 1237 + * @end_fb_access: 1238 + * 1239 + * This hook cleans up resources allocated by @begin_fb_access. It it called 1240 + * at the end of a commit for the new plane state. 1241 + */ 1242 + void (*end_fb_access)(struct drm_plane *plane, struct drm_plane_state *new_plane_state); 1222 1243 1223 1244 /** 1224 1245 * @atomic_check:
+20
include/drm/drm_simple_kms_helper.h
··· 136 136 struct drm_plane_state *plane_state); 137 137 138 138 /** 139 + * @begin_fb_access: 140 + * 141 + * Optional, called by &drm_plane_helper_funcs.begin_fb_access. Please read 142 + * the documentation for the &drm_plane_helper_funcs.begin_fb_access hook for 143 + * more details. 144 + */ 145 + int (*begin_fb_access)(struct drm_simple_display_pipe *pipe, 146 + struct drm_plane_state *new_plane_state); 147 + 148 + /** 149 + * @end_fb_access: 150 + * 151 + * Optional, called by &drm_plane_helper_funcs.end_fb_access. Please read 152 + * the documentation for the &drm_plane_helper_funcs.end_fb_access hook for 153 + * more details. 154 + */ 155 + void (*end_fb_access)(struct drm_simple_display_pipe *pipe, 156 + struct drm_plane_state *plane_state); 157 + 158 + /** 139 159 * @enable_vblank: 140 160 * 141 161 * Optional, called by &drm_crtc_funcs.enable_vblank. Please read
+11 -9
include/drm/gpu_scheduler.h
··· 41 41 */ 42 42 #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS 43 43 44 + enum dma_resv_usage; 45 + struct dma_resv; 44 46 struct drm_gem_object; 45 47 46 48 struct drm_gpu_scheduler; ··· 329 327 */ 330 328 union { 331 329 struct dma_fence_cb finish_cb; 332 - struct work_struct work; 330 + struct work_struct work; 333 331 }; 334 332 335 333 uint64_t id; ··· 377 375 */ 378 376 struct drm_sched_backend_ops { 379 377 /** 380 - * @dependency: 378 + * @prepare_job: 381 379 * 382 380 * Called when the scheduler is considering scheduling this job next, to 383 381 * get another struct dma_fence for this job to block on. Once it 384 382 * returns NULL, run_job() may be called. 385 383 * 386 - * If a driver exclusively uses drm_sched_job_add_dependency() and 387 - * drm_sched_job_add_implicit_dependencies() this can be ommitted and 388 - * left as NULL. 384 + * Can be NULL if no additional preparation to the dependencies are 385 + * necessary. Skipped when jobs are killed instead of run. 389 386 */ 390 - struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, 391 - struct drm_sched_entity *s_entity); 387 + struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job, 388 + struct drm_sched_entity *s_entity); 392 389 393 390 /** 394 391 * @run_job: Called to execute the job once all of the dependencies ··· 515 514 void drm_sched_job_arm(struct drm_sched_job *job); 516 515 int drm_sched_job_add_dependency(struct drm_sched_job *job, 517 516 struct dma_fence *fence); 517 + int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, 518 + struct dma_resv *resv, 519 + enum dma_resv_usage usage); 518 520 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, 519 521 struct drm_gem_object *obj, 520 522 bool write); ··· 536 532 void drm_sched_increase_karma(struct drm_sched_job *bad); 537 533 void drm_sched_reset_karma(struct drm_sched_job *bad); 538 534 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); 539 - bool drm_sched_dependency_optimized(struct dma_fence* fence, 540 - struct drm_sched_entity *entity); 541 535 void drm_sched_fault(struct drm_gpu_scheduler *sched); 542 536 void drm_sched_job_kickout(struct drm_sched_job *s_job); 543 537