Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm/mdp5: release SMB (shared memory blocks) in various cases

Release all blocks after the pipe is disabled, even when vsync
didn't happen in some error cases. Allow requesting SMB multiple
times before configuring to hardware, by releasing blocks not
programmed to hardware yet for shrinking case.

This fixes a potential leak of shared memory pool blocks.

Signed-off-by: Wentao Xu <wentaox@codeaurora.org>
Tested-by: Archit Taneja <architt@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@gmail.com>

authored by

Wentao Xu and committed by
Rob Clark
b4cba04f 99fc1bc4

+104 -32
+13
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 76 76 77 77 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 78 78 { 79 + int i; 79 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 81 + int nplanes = mdp5_kms->dev->mode_config.num_total_plane; 82 + 83 + for (i = 0; i < nplanes; i++) { 84 + struct drm_plane *plane = state->planes[i]; 85 + struct drm_plane_state *plane_state = state->plane_states[i]; 86 + 87 + if (!plane) 88 + continue; 89 + 90 + mdp5_plane_complete_commit(plane, plane_state); 91 + } 92 + 80 93 mdp5_disable(mdp5_kms); 81 94 } 82 95
+2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
··· 227 227 struct drm_mode_object *obj); 228 228 uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 229 229 void mdp5_plane_complete_flip(struct drm_plane *plane); 230 + void mdp5_plane_complete_commit(struct drm_plane *plane, 231 + struct drm_plane_state *state); 230 232 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 231 233 struct drm_plane *mdp5_plane_init(struct drm_device *dev, 232 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
+14 -19
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 31 31 32 32 uint32_t nformats; 33 33 uint32_t formats[32]; 34 - 35 - bool enabled; 36 34 }; 37 35 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 38 36 ··· 52 54 static bool plane_enabled(struct drm_plane_state *state) 53 55 { 54 56 return state->fb && state->crtc; 55 - } 56 - 57 - static int mdp5_plane_disable(struct drm_plane *plane) 58 - { 59 - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 60 - struct mdp5_kms *mdp5_kms = get_kms(plane); 61 - enum mdp5_pipe pipe = mdp5_plane->pipe; 62 - 63 - DBG("%s: disable", mdp5_plane->name); 64 - 65 - if (mdp5_kms) { 66 - /* Release the memory we requested earlier from the SMP: */ 67 - mdp5_smp_release(mdp5_kms->smp, pipe); 68 - } 69 - 70 - return 0; 71 57 } 72 58 73 59 static void mdp5_plane_destroy(struct drm_plane *plane) ··· 206 224 207 225 if (!plane_enabled(state)) { 208 226 to_mdp5_plane_state(state)->pending = true; 209 - mdp5_plane_disable(plane); 210 227 } else if (to_mdp5_plane_state(state)->mode_changed) { 211 228 int ret; 212 229 to_mdp5_plane_state(state)->pending = true; ··· 581 600 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 582 601 583 602 return mdp5_plane->flush_mask; 603 + } 604 + 605 + /* called after vsync in thread context */ 606 + void mdp5_plane_complete_commit(struct drm_plane *plane, 607 + struct drm_plane_state *state) 608 + { 609 + struct mdp5_kms *mdp5_kms = get_kms(plane); 610 + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 611 + enum mdp5_pipe pipe = mdp5_plane->pipe; 612 + 613 + if (!plane_enabled(plane->state)) { 614 + DBG("%s: free SMP", mdp5_plane->name); 615 + mdp5_smp_release(mdp5_kms->smp, pipe); 616 + } 584 617 } 585 618 586 619 /* initialize plane */
+74 -13
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
··· 34 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 35 35 * 36 36 * For each block that can be dynamically allocated, it can be either 37 - * free, or pending/in-use by a client. The updates happen in three steps: 37 + * free: 38 + * The block is free. 39 + * 40 + * pending: 41 + * The block is allocated to some client and not free. 42 + * 43 + * configured: 44 + * The block is allocated to some client, and assigned to that 45 + * client in MDP5_MDP_SMP_ALLOC registers. 46 + * 47 + * inuse: 48 + * The block is being actively used by a client. 49 + * 50 + * The updates happen in the following steps: 38 51 * 39 52 * 1) mdp5_smp_request(): 40 53 * When plane scanout is setup, calculate required number of 41 - * blocks needed per client, and request. Blocks not inuse or 42 - * pending by any other client are added to client's pending 43 - * set. 54 + * blocks needed per client, and request. Blocks neither inuse nor 55 + * configured nor pending by any other client are added to client's 56 + * pending set. 57 + * For shrinking, blocks in pending but not in configured can be freed 58 + * directly, but those already in configured will be freed later by 59 + * mdp5_smp_commit. 44 60 * 45 61 * 2) mdp5_smp_configure(): 46 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 47 63 * are configured for the union(pending, inuse) 64 + * Current pending is copied to configured. 65 + * It is assumed that mdp5_smp_request and mdp5_smp_configure not run 66 + * concurrently for the same pipe. 48 67 * 49 68 * 3) mdp5_smp_commit(): 50 - * After next vblank, copy pending -> inuse. Optionally update 69 + * After next vblank, copy configured -> inuse. Optionally update 51 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks 71 + * 72 + * 4) mdp5_smp_release(): 73 + * Must be called after the pipe is disabled and no longer uses any SMB 52 74 * 53 75 * On the next vblank after changes have been committed to hw, the 54 76 * client's pending blocks become it's in-use blocks (and no-longer ··· 98 76 99 77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 100 78 }; 79 + 80 + static void update_smp_state(struct mdp5_smp *smp, 81 + u32 cid, mdp5_smp_state_t *assigned); 101 82 102 83 static inline 103 84 struct mdp5_kms *get_kms(struct mdp5_smp *smp) ··· 174 149 for (i = cur_nblks; i > nblks; i--) { 175 150 int blk = find_first_bit(ps->pending, cnt); 176 151 clear_bit(blk, ps->pending); 177 - /* don't clear in global smp_state until _commit() */ 152 + 153 + /* clear in global smp_state if not in configured 154 + * otherwise until _commit() 155 + */ 156 + if (!test_bit(blk, ps->configured)) 157 + clear_bit(blk, smp->state); 178 158 } 179 159 } 180 160 ··· 253 223 /* Release SMP blocks for all clients of the pipe */ 254 224 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 255 225 { 256 - int i, nblks; 226 + int i; 227 + unsigned long flags; 228 + int cnt = smp->blk_cnt; 257 229 258 - for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++) 259 - smp_request_block(smp, pipe2client(pipe, i), 0); 230 + for (i = 0; i < pipe2nclients(pipe); i++) { 231 + mdp5_smp_state_t assigned; 232 + u32 cid = pipe2client(pipe, i); 233 + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 234 + 235 + spin_lock_irqsave(&smp->state_lock, flags); 236 + 237 + /* clear hw assignment */ 238 + bitmap_or(assigned, ps->inuse, ps->configured, cnt); 239 + update_smp_state(smp, CID_UNUSED, &assigned); 240 + 241 + /* free to global pool */ 242 + bitmap_andnot(smp->state, smp->state, ps->pending, cnt); 243 + bitmap_andnot(smp->state, smp->state, assigned, cnt); 244 + 245 + /* clear client's infor */ 246 + bitmap_zero(ps->pending, cnt); 247 + bitmap_zero(ps->configured, cnt); 248 + bitmap_zero(ps->inuse, cnt); 249 + 250 + spin_unlock_irqrestore(&smp->state_lock, flags); 251 + } 252 + 260 253 set_fifo_thresholds(smp, pipe, 0); 261 254 } 262 255 ··· 327 274 u32 cid = pipe2client(pipe, i); 328 275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 329 276 330 - bitmap_or(assigned, ps->inuse, ps->pending, cnt); 277 + /* 278 + * if vblank has not happened since last smp_configure 279 + * skip the configure for now 280 + */ 281 + if (!bitmap_equal(ps->inuse, ps->configured, cnt)) 282 + continue; 283 + 284 + bitmap_copy(ps->configured, ps->pending, cnt); 285 + bitmap_or(assigned, ps->inuse, ps->configured, cnt); 331 286 update_smp_state(smp, cid, &assigned); 332 287 } 333 288 } 334 289 335 - /* step #3: after vblank, copy pending -> inuse: */ 290 + /* step #3: after vblank, copy configured -> inuse: */ 336 291 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 337 292 { 338 293 int cnt = smp->blk_cnt; ··· 356 295 * using, which can be released and made available to other 357 296 * clients: 358 297 */ 359 - if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 298 + if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { 360 299 unsigned long flags; 361 300 362 301 spin_lock_irqsave(&smp->state_lock, flags); ··· 367 306 update_smp_state(smp, CID_UNUSED, &released); 368 307 } 369 308 370 - bitmap_copy(ps->inuse, ps->pending, cnt); 309 + bitmap_copy(ps->inuse, ps->configured, cnt); 371 310 } 372 311 } 373 312
+1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
··· 23 23 24 24 struct mdp5_client_smp_state { 25 25 mdp5_smp_state_t inuse; 26 + mdp5_smp_state_t configured; 26 27 mdp5_smp_state_t pending; 27 28 }; 28 29