Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next

This pull request includes,
- Code refactoring on HDMI DDC and PHY.
- Regression fixup on deadlock issue with G2D pm integration.
- Fixup on page fault issue with wait_for_vblank mechianism specific to Exynos drm.
- And some cleanups.

* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos:
drm/exynos: g2d: simplify g2d_free_runqueue_node()
drm/exynos: g2d: use autosuspend mode for PM runtime
drm/exynos: g2d: wait for engine to finish
drm/exynos: g2d: remove runqueue nodes in g2d_{close,remove}()
drm/exynos: g2d: move PM management to runqueue worker
Revert "drm/exynos: g2d: fix system and runtime pm integration"
drm/exynos: use drm core to handle page-flip event
drm/exynos: mark exynos_dp_crtc_clock_enable() static
drm/exynos/fimd: add clock rate checking
drm/exynos: fix pending update handling
drm/exynos/vidi: use timer for vblanks instead of sleeping worker
drm/exynos: g2d: beautify probing message
drm/exynos: mixer: simplify loop in vp_win_reset()
drm/exynos: mixer: convert booleans to flags in mixer context
gpu: drm: exynos_hdmi: Remove duplicate initialization of regulator bulk consumer
gpu: drm: exynos_hdmi: Move PHY logic into single function
gpu: drm: exynos_hdmi: Move DDC logic into single function

+354 -328
-11
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 551 551 { 552 552 struct decon_context *ctx = dev_id; 553 553 u32 val; 554 - int win; 555 554 556 555 if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags)) 557 556 goto out; ··· 559 560 val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND; 560 561 561 562 if (val) { 562 - for (win = ctx->first_win; win < WINDOWS_NR ; win++) { 563 - struct exynos_drm_plane *plane = &ctx->planes[win]; 564 - 565 - if (!plane->pending_fb) 566 - continue; 567 - 568 - exynos_drm_crtc_finish_update(ctx->crtc, plane); 569 - } 570 - 571 - /* clear */ 572 563 writel(val, ctx->addr + DECON_VIDINTCON1); 573 564 drm_crtc_handle_vblank(&ctx->crtc->base); 574 565 }
-9
drivers/gpu/drm/exynos/exynos7_drm_decon.c
··· 603 603 { 604 604 struct decon_context *ctx = (struct decon_context *)dev_id; 605 605 u32 val, clear_bit; 606 - int win; 607 606 608 607 val = readl(ctx->regs + VIDINTCON1); 609 608 ··· 616 617 617 618 if (!ctx->i80_if) { 618 619 drm_crtc_handle_vblank(&ctx->crtc->base); 619 - for (win = 0 ; win < WINDOWS_NR ; win++) { 620 - struct exynos_drm_plane *plane = &ctx->planes[win]; 621 - 622 - if (!plane->pending_fb) 623 - continue; 624 - 625 - exynos_drm_crtc_finish_update(ctx->crtc, plane); 626 - } 627 620 628 621 /* set wait vsync event to zero and wake up queue. */ 629 622 if (atomic_read(&ctx->wait_vsync_event)) {
+1 -1
drivers/gpu/drm/exynos/exynos_dp.c
··· 43 43 struct analogix_dp_plat_data plat_data; 44 44 }; 45 45 46 - int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, 46 + static int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, 47 47 bool enable) 48 48 { 49 49 struct exynos_dp_device *dp = to_dp(plat_data);
+21 -37
drivers/gpu/drm/exynos/exynos_drm_crtc.c
··· 69 69 { 70 70 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 71 71 72 - exynos_crtc->event = crtc->state->event; 73 - 74 72 if (exynos_crtc->ops->atomic_begin) 75 73 exynos_crtc->ops->atomic_begin(exynos_crtc); 76 74 } ··· 77 79 struct drm_crtc_state *old_crtc_state) 78 80 { 79 81 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 82 + struct drm_pending_vblank_event *event; 83 + unsigned long flags; 80 84 81 85 if (exynos_crtc->ops->atomic_flush) 82 86 exynos_crtc->ops->atomic_flush(exynos_crtc); 87 + 88 + event = crtc->state->event; 89 + if (event) { 90 + crtc->state->event = NULL; 91 + 92 + spin_lock_irqsave(&crtc->dev->event_lock, flags); 93 + if (drm_crtc_vblank_get(crtc) == 0) 94 + drm_crtc_arm_vblank_event(crtc, event); 95 + else 96 + drm_crtc_send_vblank_event(crtc, event); 97 + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 98 + } 99 + 83 100 } 84 101 85 102 static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { ··· 147 134 exynos_crtc->ops = ops; 148 135 exynos_crtc->ctx = ctx; 149 136 150 - init_waitqueue_head(&exynos_crtc->wait_update); 151 - 152 137 crtc = &exynos_crtc->base; 153 138 154 139 private->crtc[pipe] = crtc; ··· 186 175 exynos_crtc->ops->disable_vblank(exynos_crtc); 187 176 } 188 177 189 - void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc) 190 - { 191 - wait_event_timeout(exynos_crtc->wait_update, 192 - (atomic_read(&exynos_crtc->pending_update) == 0), 193 - msecs_to_jiffies(50)); 194 - } 195 - 196 - void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, 197 - struct exynos_drm_plane *exynos_plane) 198 - { 199 - struct drm_crtc *crtc = &exynos_crtc->base; 200 - unsigned long flags; 201 - 202 - exynos_plane->pending_fb = NULL; 203 - 204 - if (atomic_dec_and_test(&exynos_crtc->pending_update)) 205 - wake_up(&exynos_crtc->wait_update); 206 - 207 - spin_lock_irqsave(&crtc->dev->event_lock, flags); 208 - if (exynos_crtc->event) 209 - drm_crtc_send_vblank_event(crtc, exynos_crtc->event); 210 - 211 - exynos_crtc->event = NULL; 212 - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 213 - } 214 - 215 178 int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, 216 179 enum exynos_drm_output_type out_type) 217 180 { ··· 213 228 void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, 214 229 struct drm_file *file) 215 230 { 216 - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 217 231 struct drm_pending_vblank_event *e; 218 232 unsigned long flags; 219 233 220 234 spin_lock_irqsave(&crtc->dev->event_lock, flags); 221 235 222 - e = exynos_crtc->event; 223 - if (e && e->base.file_priv == file) { 224 - exynos_crtc->event = NULL; 225 - atomic_dec(&exynos_crtc->pending_update); 226 - } 236 + e = crtc->state->event; 237 + if (e && e->base.file_priv == file) 238 + crtc->state->event = NULL; 239 + else 240 + e = NULL; 227 241 228 242 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 229 243 230 - if (e && e->base.file_priv == file) 244 + if (e) 231 245 drm_event_cancel_free(crtc->dev, &e->base); 232 246 }
+1 -43
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 45 45 u32 crtcs; 46 46 }; 47 47 48 - static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state) 49 - { 50 - struct drm_crtc_state *crtc_state; 51 - struct drm_crtc *crtc; 52 - int i, ret; 53 - 54 - for_each_crtc_in_state(state, crtc, crtc_state, i) { 55 - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 56 - 57 - if (!crtc->state->enable) 58 - continue; 59 - 60 - ret = drm_crtc_vblank_get(crtc); 61 - if (ret) 62 - continue; 63 - 64 - exynos_drm_crtc_wait_pending_update(exynos_crtc); 65 - drm_crtc_vblank_put(crtc); 66 - } 67 - } 68 - 69 48 static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) 70 49 { 71 50 struct drm_device *dev = commit->dev; 72 51 struct exynos_drm_private *priv = dev->dev_private; 73 52 struct drm_atomic_state *state = commit->state; 74 - struct drm_plane *plane; 75 - struct drm_crtc *crtc; 76 - struct drm_plane_state *plane_state; 77 - struct drm_crtc_state *crtc_state; 78 - int i; 79 53 80 54 drm_atomic_helper_commit_modeset_disables(dev, state); 81 55 ··· 63 89 * have the relevant clocks enabled to perform the update. 64 90 */ 65 91 66 - for_each_crtc_in_state(state, crtc, crtc_state, i) { 67 - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 68 - 69 - atomic_set(&exynos_crtc->pending_update, 0); 70 - } 71 - 72 - for_each_plane_in_state(state, plane, plane_state, i) { 73 - struct exynos_drm_crtc *exynos_crtc = 74 - to_exynos_crtc(plane->crtc); 75 - 76 - if (!plane->crtc) 77 - continue; 78 - 79 - atomic_inc(&exynos_crtc->pending_update); 80 - } 81 - 82 92 drm_atomic_helper_commit_planes(dev, state, 0); 83 93 84 - exynos_atomic_wait_for_commit(state); 94 + drm_atomic_helper_wait_for_vblanks(dev, state); 85 95 86 96 drm_atomic_helper_cleanup_planes(dev, state); 87 97
-4
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 86 86 struct drm_plane base; 87 87 const struct exynos_drm_plane_config *config; 88 88 unsigned int index; 89 - struct drm_framebuffer *pending_fb; 90 89 }; 91 90 92 91 #define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0) ··· 171 172 struct drm_crtc base; 172 173 enum exynos_drm_output_type type; 173 174 unsigned int pipe; 174 - struct drm_pending_vblank_event *event; 175 - wait_queue_head_t wait_update; 176 - atomic_t pending_update; 177 175 const struct exynos_drm_crtc_ops *ops; 178 176 void *ctx; 179 177 struct exynos_drm_clk *pipe_clk;
+30 -26
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 198 198 atomic_t wait_vsync_event; 199 199 atomic_t win_updated; 200 200 atomic_t triggering; 201 + u32 clkdiv; 201 202 202 203 const struct fimd_driver_data *driver_data; 203 204 struct drm_encoder *encoder; ··· 390 389 pm_runtime_put(ctx->dev); 391 390 } 392 391 393 - static u32 fimd_calc_clkdiv(struct fimd_context *ctx, 394 - const struct drm_display_mode *mode) 392 + 393 + static int fimd_atomic_check(struct exynos_drm_crtc *crtc, 394 + struct drm_crtc_state *state) 395 395 { 396 - unsigned long ideal_clk; 396 + struct drm_display_mode *mode = &state->adjusted_mode; 397 + struct fimd_context *ctx = crtc->ctx; 398 + unsigned long ideal_clk, lcd_rate; 397 399 u32 clkdiv; 398 400 399 401 if (mode->clock == 0) { 400 - DRM_ERROR("Mode has zero clock value.\n"); 401 - return 0xff; 402 + DRM_INFO("Mode has zero clock value.\n"); 403 + return -EINVAL; 402 404 } 403 405 404 406 ideal_clk = mode->clock * 1000; ··· 414 410 ideal_clk *= 2; 415 411 } 416 412 417 - /* Find the clock divider value that gets us closest to ideal_clk */ 418 - clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk); 413 + lcd_rate = clk_get_rate(ctx->lcd_clk); 414 + if (2 * lcd_rate < ideal_clk) { 415 + DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n", 416 + lcd_rate, ideal_clk); 417 + return -EINVAL; 418 + } 419 419 420 - return (clkdiv < 0x100) ? clkdiv : 0xff; 420 + /* Find the clock divider value that gets us closest to ideal_clk */ 421 + clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk); 422 + if (clkdiv >= 0x200) { 423 + DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk); 424 + return -EINVAL; 425 + } 426 + 427 + ctx->clkdiv = (clkdiv < 0x100) ? clkdiv : 0xff; 428 + 429 + return 0; 421 430 } 422 431 423 432 static void fimd_setup_trigger(struct fimd_context *ctx) ··· 459 442 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; 460 443 const struct fimd_driver_data *driver_data = ctx->driver_data; 461 444 void *timing_base = ctx->regs + driver_data->timing_base; 462 - u32 val, clkdiv; 445 + u32 val; 463 446 464 447 if (ctx->suspended) 465 448 return; ··· 560 543 if (ctx->driver_data->has_clksel) 561 544 val |= VIDCON0_CLKSEL_LCD; 562 545 563 - clkdiv = fimd_calc_clkdiv(ctx, mode); 564 - if (clkdiv > 1) 565 - val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR; 546 + if (ctx->clkdiv > 1) 547 + val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR; 566 548 567 549 writel(val, ctx->regs + VIDCON0); 568 550 } ··· 955 939 .update_plane = fimd_update_plane, 956 940 .disable_plane = fimd_disable_plane, 957 941 .atomic_flush = fimd_atomic_flush, 942 + .atomic_check = fimd_atomic_check, 958 943 .te_handler = fimd_te_handler, 959 944 }; 960 945 961 946 static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 962 947 { 963 948 struct fimd_context *ctx = (struct fimd_context *)dev_id; 964 - u32 val, clear_bit, start, start_s; 965 - int win; 949 + u32 val, clear_bit; 966 950 967 951 val = readl(ctx->regs + VIDINTCON1); 968 952 ··· 976 960 977 961 if (!ctx->i80_if) 978 962 drm_crtc_handle_vblank(&ctx->crtc->base); 979 - 980 - for (win = 0 ; win < WINDOWS_NR ; win++) { 981 - struct exynos_drm_plane *plane = &ctx->planes[win]; 982 - 983 - if (!plane->pending_fb) 984 - continue; 985 - 986 - start = readl(ctx->regs + VIDWx_BUF_START(win, 0)); 987 - start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0)); 988 - if (start == start_s) 989 - exynos_drm_crtc_finish_update(ctx->crtc, plane); 990 - } 991 963 992 964 if (ctx->i80_if) { 993 965 /* Exits triggering mode */
+190 -51
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 138 138 MAX_REG_TYPE_NR 139 139 }; 140 140 141 + enum g2d_flag_bits { 142 + /* 143 + * If set, suspends the runqueue worker after the currently 144 + * processed node is finished. 145 + */ 146 + G2D_BIT_SUSPEND_RUNQUEUE, 147 + /* 148 + * If set, indicates that the engine is currently busy. 149 + */ 150 + G2D_BIT_ENGINE_BUSY, 151 + }; 152 + 141 153 /* cmdlist data structure */ 142 154 struct g2d_cmdlist { 143 155 u32 head; ··· 238 226 struct workqueue_struct *g2d_workq; 239 227 struct work_struct runqueue_work; 240 228 struct exynos_drm_subdrv subdrv; 241 - bool suspended; 229 + unsigned long flags; 242 230 243 231 /* cmdlist */ 244 232 struct g2d_cmdlist_node *cmdlist_node; ··· 257 245 unsigned long current_pool; 258 246 unsigned long max_pool; 259 247 }; 248 + 249 + static inline void g2d_hw_reset(struct g2d_data *g2d) 250 + { 251 + writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET); 252 + clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 253 + } 260 254 261 255 static int g2d_init_cmdlist(struct g2d_data *g2d) 262 256 { ··· 821 803 struct g2d_cmdlist_node *node = 822 804 list_first_entry(&runqueue_node->run_cmdlist, 823 805 struct g2d_cmdlist_node, list); 824 - int ret; 825 806 826 - ret = pm_runtime_get_sync(g2d->dev); 827 - if (ret < 0) 828 - return; 829 - 807 + set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 830 808 writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); 831 809 writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); 832 810 } ··· 845 831 { 846 832 struct g2d_cmdlist_node *node; 847 833 848 - if (!runqueue_node) 849 - return; 850 - 851 834 mutex_lock(&g2d->cmdlist_mutex); 852 835 /* 853 836 * commands in run_cmdlist have been completed so unmap all gem ··· 858 847 kmem_cache_free(g2d->runqueue_slab, runqueue_node); 859 848 } 860 849 861 - static void g2d_exec_runqueue(struct g2d_data *g2d) 850 + /** 851 + * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes 852 + * @g2d: G2D state object 853 + * @file: if not zero, only remove items with this DRM file 854 + * 855 + * Has to be called under runqueue lock. 856 + */ 857 + static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file) 862 858 { 863 - g2d->runqueue_node = g2d_get_runqueue_node(g2d); 864 - if (g2d->runqueue_node) 865 - g2d_dma_start(g2d, g2d->runqueue_node); 859 + struct g2d_runqueue_node *node, *n; 860 + 861 + if (list_empty(&g2d->runqueue)) 862 + return; 863 + 864 + list_for_each_entry_safe(node, n, &g2d->runqueue, list) { 865 + if (file && node->filp != file) 866 + continue; 867 + 868 + list_del_init(&node->list); 869 + g2d_free_runqueue_node(g2d, node); 870 + } 866 871 } 867 872 868 873 static void g2d_runqueue_worker(struct work_struct *work) 869 874 { 870 875 struct g2d_data *g2d = container_of(work, struct g2d_data, 871 876 runqueue_work); 877 + struct g2d_runqueue_node *runqueue_node; 878 + 879 + /* 880 + * The engine is busy and the completion of the current node is going 881 + * to poke the runqueue worker, so nothing to do here. 882 + */ 883 + if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags)) 884 + return; 872 885 873 886 mutex_lock(&g2d->runqueue_mutex); 874 - pm_runtime_put_sync(g2d->dev); 875 887 876 - complete(&g2d->runqueue_node->complete); 877 - if (g2d->runqueue_node->async) 878 - g2d_free_runqueue_node(g2d, g2d->runqueue_node); 888 + runqueue_node = g2d->runqueue_node; 889 + g2d->runqueue_node = NULL; 879 890 880 - if (g2d->suspended) 881 - g2d->runqueue_node = NULL; 882 - else 883 - g2d_exec_runqueue(g2d); 891 + if (runqueue_node) { 892 + pm_runtime_mark_last_busy(g2d->dev); 893 + pm_runtime_put_autosuspend(g2d->dev); 894 + 895 + complete(&runqueue_node->complete); 896 + if (runqueue_node->async) 897 + g2d_free_runqueue_node(g2d, runqueue_node); 898 + } 899 + 900 + if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) { 901 + g2d->runqueue_node = g2d_get_runqueue_node(g2d); 902 + 903 + if (g2d->runqueue_node) { 904 + pm_runtime_get_sync(g2d->dev); 905 + g2d_dma_start(g2d, g2d->runqueue_node); 906 + } 907 + } 908 + 884 909 mutex_unlock(&g2d->runqueue_mutex); 885 910 } 886 911 ··· 965 918 } 966 919 } 967 920 968 - if (pending & G2D_INTP_ACMD_FIN) 921 + if (pending & G2D_INTP_ACMD_FIN) { 922 + clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 969 923 queue_work(g2d->g2d_workq, &g2d->runqueue_work); 924 + } 970 925 971 926 return IRQ_HANDLED; 927 + } 928 + 929 + /** 930 + * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node 931 + * @g2d: G2D state object 932 + * @file: if not zero, only wait if the current runqueue node belongs 933 + * to the DRM file 934 + * 935 + * Should the engine not become idle after a 100ms timeout, a hardware 936 + * reset is issued. 937 + */ 938 + static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file) 939 + { 940 + struct device *dev = g2d->dev; 941 + 942 + struct g2d_runqueue_node *runqueue_node = NULL; 943 + unsigned int tries = 10; 944 + 945 + mutex_lock(&g2d->runqueue_mutex); 946 + 947 + /* If no node is currently processed, we have nothing to do. */ 948 + if (!g2d->runqueue_node) 949 + goto out; 950 + 951 + runqueue_node = g2d->runqueue_node; 952 + 953 + /* Check if the currently processed item belongs to us. */ 954 + if (file && runqueue_node->filp != file) 955 + goto out; 956 + 957 + mutex_unlock(&g2d->runqueue_mutex); 958 + 959 + /* Wait for the G2D engine to finish. */ 960 + while (tries-- && (g2d->runqueue_node == runqueue_node)) 961 + mdelay(10); 962 + 963 + mutex_lock(&g2d->runqueue_mutex); 964 + 965 + if (g2d->runqueue_node != runqueue_node) 966 + goto out; 967 + 968 + dev_err(dev, "wait timed out, resetting engine...\n"); 969 + g2d_hw_reset(g2d); 970 + 971 + /* 972 + * After the hardware reset of the engine we are going to loose 973 + * the IRQ which triggers the PM runtime put(). 974 + * So do this manually here. 975 + */ 976 + pm_runtime_mark_last_busy(dev); 977 + pm_runtime_put_autosuspend(dev); 978 + 979 + complete(&runqueue_node->complete); 980 + if (runqueue_node->async) 981 + g2d_free_runqueue_node(g2d, runqueue_node); 982 + 983 + out: 984 + mutex_unlock(&g2d->runqueue_mutex); 972 985 } 973 986 974 987 static int g2d_check_reg_offset(struct device *dev, ··· 1366 1259 runqueue_node->pid = current->pid; 1367 1260 runqueue_node->filp = file; 1368 1261 list_add_tail(&runqueue_node->list, &g2d->runqueue); 1369 - if (!g2d->runqueue_node) 1370 - g2d_exec_runqueue(g2d); 1371 1262 mutex_unlock(&g2d->runqueue_mutex); 1263 + 1264 + /* Let the runqueue know that there is work to do. */ 1265 + queue_work(g2d->g2d_workq, &g2d->runqueue_work); 1372 1266 1373 1267 if (runqueue_node->async) 1374 1268 goto out; ··· 1447 1339 if (!g2d) 1448 1340 return; 1449 1341 1342 + /* Remove the runqueue nodes that belong to us. */ 1343 + mutex_lock(&g2d->runqueue_mutex); 1344 + g2d_remove_runqueue_nodes(g2d, file); 1345 + mutex_unlock(&g2d->runqueue_mutex); 1346 + 1347 + /* 1348 + * Wait for the runqueue worker to finish its current node. 1349 + * After this the engine should no longer be accessing any 1350 + * memory belonging to us. 1351 + */ 1352 + g2d_wait_finish(g2d, file); 1353 + 1354 + /* 1355 + * Even after the engine is idle, there might still be stale cmdlists 1356 + * (i.e. cmdlisst which we submitted but never executed) around, with 1357 + * their corresponding GEM/userptr buffers. 1358 + * Properly unmap these buffers here. 1359 + */ 1450 1360 mutex_lock(&g2d->cmdlist_mutex); 1451 1361 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { 1452 - /* 1453 - * unmap all gem objects not completed. 1454 - * 1455 - * P.S. if current process was terminated forcely then 1456 - * there may be some commands in inuse_cmdlist so unmap 1457 - * them. 1458 - */ 1459 1362 g2d_unmap_cmdlist_gem(g2d, node, file); 1460 1363 list_move_tail(&node->list, &g2d->free_cmdlist); 1461 1364 } ··· 1518 1399 goto err_destroy_workqueue; 1519 1400 } 1520 1401 1402 + pm_runtime_use_autosuspend(dev); 1403 + pm_runtime_set_autosuspend_delay(dev, 2000); 1521 1404 pm_runtime_enable(dev); 1405 + clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1406 + clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 1522 1407 1523 1408 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1524 1409 ··· 1563 1440 goto err_put_clk; 1564 1441 } 1565 1442 1566 - dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", 1443 + dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n", 1567 1444 G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); 1568 1445 1569 1446 return 0; ··· 1581 1458 { 1582 1459 struct g2d_data *g2d = platform_get_drvdata(pdev); 1583 1460 1461 + /* Suspend operation and wait for engine idle. */ 1462 + set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1463 + g2d_wait_finish(g2d, NULL); 1464 + 1584 1465 cancel_work_sync(&g2d->runqueue_work); 1585 1466 exynos_drm_subdrv_unregister(&g2d->subdrv); 1586 1467 1587 - while (g2d->runqueue_node) { 1588 - g2d_free_runqueue_node(g2d, g2d->runqueue_node); 1589 - g2d->runqueue_node = g2d_get_runqueue_node(g2d); 1590 - } 1468 + /* There should be no locking needed here. */ 1469 + g2d_remove_runqueue_nodes(g2d, NULL); 1591 1470 1471 + pm_runtime_dont_use_autosuspend(&pdev->dev); 1592 1472 pm_runtime_disable(&pdev->dev); 1593 1473 1594 1474 g2d_fini_cmdlist(g2d); ··· 1601 1475 return 0; 1602 1476 } 1603 1477 1478 + #ifdef CONFIG_PM_SLEEP 1479 + static int g2d_suspend(struct device *dev) 1480 + { 1481 + struct g2d_data *g2d = dev_get_drvdata(dev); 1482 + 1483 + /* 1484 + * Suspend the runqueue worker operation and wait until the G2D 1485 + * engine is idle. 1486 + */ 1487 + set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1488 + g2d_wait_finish(g2d, NULL); 1489 + flush_work(&g2d->runqueue_work); 1490 + 1491 + return 0; 1492 + } 1493 + 1494 + static int g2d_resume(struct device *dev) 1495 + { 1496 + struct g2d_data *g2d = dev_get_drvdata(dev); 1497 + 1498 + clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1499 + queue_work(g2d->g2d_workq, &g2d->runqueue_work); 1500 + 1501 + return 0; 1502 + } 1503 + #endif 1504 + 1604 1505 #ifdef CONFIG_PM 1605 1506 static int g2d_runtime_suspend(struct device *dev) 1606 1507 { 1607 1508 struct g2d_data *g2d = dev_get_drvdata(dev); 1608 - 1609 - mutex_lock(&g2d->runqueue_mutex); 1610 - g2d->suspended = true; 1611 - mutex_unlock(&g2d->runqueue_mutex); 1612 - 1613 - while (g2d->runqueue_node) 1614 - /* FIXME: good range? */ 1615 - usleep_range(500, 1000); 1616 - 1617 - flush_work(&g2d->runqueue_work); 1618 1509 1619 1510 clk_disable_unprepare(g2d->gate_clk); 1620 1511 ··· 1647 1504 if (ret < 0) 1648 1505 dev_warn(dev, "failed to enable clock.\n"); 1649 1506 1650 - g2d->suspended = false; 1651 - g2d_exec_runqueue(g2d); 1652 - 1653 1507 return ret; 1654 1508 } 1655 1509 #endif 1656 1510 1657 1511 static const struct dev_pm_ops g2d_pm_ops = { 1658 - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 1659 - pm_runtime_force_resume) 1512 + SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) 1660 1513 SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) 1661 1514 }; 1662 1515
-1
drivers/gpu/drm/exynos/exynos_drm_plane.c
··· 238 238 return; 239 239 240 240 plane->crtc = state->crtc; 241 - exynos_plane->pending_fb = state->fb; 242 241 243 242 if (exynos_crtc->ops->update_plane) 244 243 exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane);
+20 -56
drivers/gpu/drm/exynos/exynos_drm_vidi.c
··· 15 15 #include <linux/kernel.h> 16 16 #include <linux/platform_device.h> 17 17 #include <linux/component.h> 18 + #include <linux/timer.h> 18 19 19 20 #include <drm/exynos_drm.h> 20 21 ··· 28 27 #include "exynos_drm_fb.h" 29 28 #include "exynos_drm_plane.h" 30 29 #include "exynos_drm_vidi.h" 30 + 31 + /* VIDI uses fixed refresh rate of 50Hz */ 32 + #define VIDI_REFRESH_TIME (1000 / 50) 31 33 32 34 /* vidi has totally three virtual windows. */ 33 35 #define WINDOWS_NR 3 ··· 47 43 struct exynos_drm_plane planes[WINDOWS_NR]; 48 44 struct edid *raw_edid; 49 45 unsigned int clkdiv; 50 - unsigned long irq_flags; 51 46 unsigned int connected; 52 - bool vblank_on; 53 47 bool suspended; 54 - bool direct_vblank; 55 - struct work_struct work; 48 + struct timer_list timer; 56 49 struct mutex lock; 57 50 int pipe; 58 51 }; ··· 103 102 if (ctx->suspended) 104 103 return -EPERM; 105 104 106 - if (!test_and_set_bit(0, &ctx->irq_flags)) 107 - ctx->vblank_on = true; 108 - 109 - ctx->direct_vblank = true; 110 - 111 - /* 112 - * in case of page flip request, vidi_finish_pageflip function 113 - * will not be called because direct_vblank is true and then 114 - * that function will be called by crtc_ops->update_plane callback 115 - */ 116 - schedule_work(&ctx->work); 105 + mod_timer(&ctx->timer, 106 + jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1); 117 107 118 108 return 0; 119 109 } 120 110 121 111 static void vidi_disable_vblank(struct exynos_drm_crtc *crtc) 122 112 { 123 - struct vidi_context *ctx = crtc->ctx; 124 - 125 - if (ctx->suspended) 126 - return; 127 - 128 - if (test_and_clear_bit(0, &ctx->irq_flags)) 129 - ctx->vblank_on = false; 130 113 } 131 114 132 115 static void vidi_update_plane(struct exynos_drm_crtc *crtc, ··· 125 140 126 141 addr = exynos_drm_fb_dma_addr(state->fb, 0); 127 142 DRM_DEBUG_KMS("dma_addr = %pad\n", &addr); 128 - 129 - if (ctx->vblank_on) 130 - schedule_work(&ctx->work); 131 143 } 132 144 133 145 static void vidi_enable(struct exynos_drm_crtc *crtc) ··· 135 153 136 154 ctx->suspended = false; 137 155 138 - /* if vblank was enabled status, enable it again. */ 139 - if (test_and_clear_bit(0, &ctx->irq_flags)) 140 - vidi_enable_vblank(ctx->crtc); 141 - 142 156 mutex_unlock(&ctx->lock); 157 + 158 + drm_crtc_vblank_on(&crtc->base); 143 159 } 144 160 145 161 static void vidi_disable(struct exynos_drm_crtc *crtc) 146 162 { 147 163 struct vidi_context *ctx = crtc->ctx; 164 + 165 + drm_crtc_vblank_off(&crtc->base); 148 166 149 167 mutex_lock(&ctx->lock); 150 168 ··· 172 190 .update_plane = vidi_update_plane, 173 191 }; 174 192 175 - static void vidi_fake_vblank_handler(struct work_struct *work) 193 + static void vidi_fake_vblank_timer(unsigned long arg) 176 194 { 177 - struct vidi_context *ctx = container_of(work, struct vidi_context, 178 - work); 179 - int win; 195 + struct vidi_context *ctx = (void *)arg; 180 196 181 197 if (ctx->pipe < 0) 182 198 return; 183 199 184 - /* refresh rate is about 50Hz. */ 185 - usleep_range(16000, 20000); 186 - 187 - mutex_lock(&ctx->lock); 188 - 189 - if (ctx->direct_vblank) { 190 - drm_crtc_handle_vblank(&ctx->crtc->base); 191 - ctx->direct_vblank = false; 192 - mutex_unlock(&ctx->lock); 193 - return; 194 - } 195 - 196 - mutex_unlock(&ctx->lock); 197 - 198 - for (win = 0 ; win < WINDOWS_NR ; win++) { 199 - struct exynos_drm_plane *plane = &ctx->planes[win]; 200 - 201 - if (!plane->pending_fb) 202 - continue; 203 - 204 - exynos_drm_crtc_finish_update(ctx->crtc, plane); 205 - } 200 + if (drm_crtc_handle_vblank(&ctx->crtc->base)) 201 + mod_timer(&ctx->timer, 202 + jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1); 206 203 } 207 204 208 205 static ssize_t vidi_show_connection(struct device *dev, ··· 450 489 451 490 static void vidi_unbind(struct device *dev, struct device *master, void *data) 452 491 { 492 + struct vidi_context *ctx = dev_get_drvdata(dev); 493 + 494 + del_timer_sync(&ctx->timer); 453 495 } 454 496 455 497 static const struct component_ops vidi_component_ops = { ··· 471 507 472 508 ctx->pdev = pdev; 473 509 474 - INIT_WORK(&ctx->work, vidi_fake_vblank_handler); 510 + setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx); 475 511 476 512 mutex_init(&ctx->lock); 477 513
+59 -53
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 1669 1669 if (ret) 1670 1670 return ret; 1671 1671 1672 - for (i = 0; i < ARRAY_SIZE(supply); ++i) { 1672 + for (i = 0; i < ARRAY_SIZE(supply); ++i) 1673 1673 hdata->regul_bulk[i].supply = supply[i]; 1674 - hdata->regul_bulk[i].consumer = NULL; 1675 - } 1674 + 1676 1675 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk); 1677 1676 if (ret) { 1678 1677 if (ret != -EPROBE_DEFER) ··· 1759 1760 .unbind = hdmi_unbind, 1760 1761 }; 1761 1762 1762 - static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev) 1763 + static int hdmi_get_ddc_adapter(struct hdmi_context *hdata) 1763 1764 { 1764 1765 const char *compatible_str = "samsung,exynos4210-hdmiddc"; 1765 1766 struct device_node *np; 1767 + struct i2c_adapter *adpt; 1766 1768 1767 1769 np = of_find_compatible_node(NULL, NULL, compatible_str); 1768 1770 if (np) 1769 - return of_get_next_parent(np); 1771 + np = of_get_next_parent(np); 1772 + else 1773 + np = of_parse_phandle(hdata->dev->of_node, "ddc", 0); 1770 1774 1771 - return NULL; 1775 + if (!np) { 1776 + DRM_ERROR("Failed to find ddc node in device tree\n"); 1777 + return -ENODEV; 1778 + } 1779 + 1780 + adpt = of_find_i2c_adapter_by_node(np); 1781 + of_node_put(np); 1782 + 1783 + if (!adpt) { 1784 + DRM_INFO("Failed to get ddc i2c adapter by node\n"); 1785 + return -EPROBE_DEFER; 1786 + } 1787 + 1788 + hdata->ddc_adpt = adpt; 1789 + 1790 + return 0; 1772 1791 } 1773 1792 1774 - static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev) 1793 + static int hdmi_get_phy_io(struct hdmi_context *hdata) 1775 1794 { 1776 1795 const char *compatible_str = "samsung,exynos4212-hdmiphy"; 1796 + struct device_node *np; 1797 + int ret = 0; 1777 1798 1778 - return of_find_compatible_node(NULL, NULL, compatible_str); 1799 + np = of_find_compatible_node(NULL, NULL, compatible_str); 1800 + if (!np) { 1801 + np = of_parse_phandle(hdata->dev->of_node, "phy", 0); 1802 + if (!np) { 1803 + DRM_ERROR("Failed to find hdmiphy node in device tree\n"); 1804 + return -ENODEV; 1805 + } 1806 + } 1807 + 1808 + if (hdata->drv_data->is_apb_phy) { 1809 + hdata->regs_hdmiphy = of_iomap(np, 0); 1810 + if (!hdata->regs_hdmiphy) { 1811 + DRM_ERROR("failed to ioremap hdmi phy\n"); 1812 + ret = -ENOMEM; 1813 + goto out; 1814 + } 1815 + } else { 1816 + hdata->hdmiphy_port = of_find_i2c_device_by_node(np); 1817 + if (!hdata->hdmiphy_port) { 1818 + DRM_INFO("Failed to get hdmi phy i2c client\n"); 1819 + ret = -EPROBE_DEFER; 1820 + goto out; 1821 + } 1822 + } 1823 + 1824 + out: 1825 + of_node_put(np); 1826 + return ret; 1779 1827 } 1780 1828 1781 1829 static int hdmi_probe(struct platform_device *pdev) 1782 1830 { 1783 - struct device_node *ddc_node, *phy_node; 1784 1831 struct device *dev = &pdev->dev; 1785 1832 struct hdmi_context *hdata; 1786 1833 struct resource *res; ··· 1856 1811 return ret; 1857 1812 } 1858 1813 1859 - ddc_node = hdmi_legacy_ddc_dt_binding(dev); 1860 - if (ddc_node) 1861 - goto out_get_ddc_adpt; 1814 + ret = hdmi_get_ddc_adapter(hdata); 1815 + if (ret) 1816 + return ret; 1862 1817 1863 - ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); 1864 - if (!ddc_node) { 1865 - DRM_ERROR("Failed to find ddc node in device tree\n"); 1866 - return -ENODEV; 1867 - } 1868 - of_node_put(dev->of_node); 1869 - 1870 - out_get_ddc_adpt: 1871 - hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); 1872 - if (!hdata->ddc_adpt) { 1873 - DRM_ERROR("Failed to get ddc i2c adapter by node\n"); 1874 - return -EPROBE_DEFER; 1875 - } 1876 - 1877 - phy_node = hdmi_legacy_phy_dt_binding(dev); 1878 - if (phy_node) 1879 - goto out_get_phy_port; 1880 - 1881 - phy_node = of_parse_phandle(dev->of_node, "phy", 0); 1882 - if (!phy_node) { 1883 - DRM_ERROR("Failed to find hdmiphy node in device tree\n"); 1884 - ret = -ENODEV; 1818 + ret = hdmi_get_phy_io(hdata); 1819 + if (ret) 1885 1820 goto err_ddc; 1886 - } 1887 - of_node_put(dev->of_node); 1888 - 1889 - out_get_phy_port: 1890 - if (hdata->drv_data->is_apb_phy) { 1891 - hdata->regs_hdmiphy = of_iomap(phy_node, 0); 1892 - if (!hdata->regs_hdmiphy) { 1893 - DRM_ERROR("failed to ioremap hdmi phy\n"); 1894 - ret = -ENOMEM; 1895 - goto err_ddc; 1896 - } 1897 - } else { 1898 - hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node); 1899 - if (!hdata->hdmiphy_port) { 1900 - DRM_ERROR("Failed to get hdmi phy i2c client\n"); 1901 - ret = -EPROBE_DEFER; 1902 - goto err_ddc; 1903 - } 1904 - } 1905 1821 1906 1822 INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); 1907 1823
+32 -36
drivers/gpu/drm/exynos/exynos_mixer.c
··· 73 73 enum mixer_flag_bits { 74 74 MXR_BIT_POWERED, 75 75 MXR_BIT_VSYNC, 76 + MXR_BIT_INTERLACE, 77 + MXR_BIT_VP_ENABLED, 78 + MXR_BIT_HAS_SCLK, 76 79 }; 77 80 78 81 static const uint32_t mixer_formats[] = { ··· 101 98 struct exynos_drm_plane planes[MIXER_WIN_NR]; 102 99 int pipe; 103 100 unsigned long flags; 104 - bool interlace; 105 - bool vp_enabled; 106 - bool has_sclk; 107 101 108 102 struct mixer_resources mixer_res; 109 103 enum mixer_version_id mxr_ver; ··· 346 346 mixer_reg_writemask(res, MXR_STATUS, enable ? 347 347 MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); 348 348 349 - if (ctx->vp_enabled) 349 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) 350 350 vp_reg_write(res, VP_SHADOW_UPDATE, enable ? 351 351 VP_SHADOW_UPDATE_ENABLE : 0); 352 352 } ··· 357 357 u32 val; 358 358 359 359 /* choosing between interlace and progressive mode */ 360 - val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : 361 - MXR_CFG_SCAN_PROGRESSIVE); 360 + val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? 361 + MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE; 362 362 363 363 if (ctx->mxr_ver != MXR_VER_128_0_0_184) { 364 364 /* choosing between proper HD and SD mode */ ··· 436 436 mixer_reg_writemask(res, MXR_LAYER_CFG, 437 437 MXR_LAYER_CFG_GRP1_VAL(priority), 438 438 MXR_LAYER_CFG_GRP1_MASK); 439 + 439 440 break; 440 441 case VP_DEFAULT_WIN: 441 - if (ctx->vp_enabled) { 442 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { 442 443 vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); 443 444 mixer_reg_writemask(res, MXR_CFG, val, 444 445 MXR_CFG_VP_ENABLE); ··· 502 501 chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1); 503 502 504 503 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 505 - ctx->interlace = true; 504 + __set_bit(MXR_BIT_INTERLACE, &ctx->flags); 506 505 if (tiled_mode) { 507 506 luma_addr[1] = luma_addr[0] + 0x40; 508 507 chroma_addr[1] = chroma_addr[0] + 0x40; ··· 511 510 chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; 512 511 } 513 512 } else { 514 - ctx->interlace = false; 513 + __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); 515 514 luma_addr[1] = 0; 516 515 chroma_addr[1] = 0; 517 516 } ··· 519 518 spin_lock_irqsave(&res->reg_slock, flags); 520 519 521 520 /* interlace or progressive scan mode */ 522 - val = (ctx->interlace ? ~0 : 0); 521 + val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); 523 522 vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); 524 523 525 524 /* setup format */ ··· 542 541 543 542 vp_reg_write(res, VP_DST_WIDTH, state->crtc.w); 544 543 vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x); 545 - if (ctx->interlace) { 544 + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 546 545 vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2); 547 546 vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2); 548 547 } else { ··· 637 636 src_y_offset = 0; 638 637 639 638 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 640 - ctx->interlace = true; 639 + __set_bit(MXR_BIT_INTERLACE, &ctx->flags); 641 640 else 642 - ctx->interlace = false; 641 + __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); 643 642 644 643 spin_lock_irqsave(&res->reg_slock, flags); 645 644 ··· 698 697 static void vp_win_reset(struct mixer_context *ctx) 699 698 { 700 699 struct mixer_resources *res = &ctx->mixer_res; 701 - int tries = 100; 700 + unsigned int tries = 100; 702 701 703 702 vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); 704 - for (tries = 100; tries; --tries) { 703 + while (tries--) { 705 704 /* waiting until VP_SRESET_PROCESSING is 0 */ 706 705 if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) 707 706 break; ··· 734 733 mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); 735 734 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); 736 735 737 - if (ctx->vp_enabled) { 736 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { 738 737 /* configuration of Video Processor Registers */ 739 738 vp_win_reset(ctx); 740 739 vp_default_filter(res); ··· 743 742 /* disable all layers */ 744 743 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); 745 744 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); 746 - if (ctx->vp_enabled) 745 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) 747 746 mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); 748 747 749 748 spin_unlock_irqrestore(&res->reg_slock, flags); ··· 754 753 struct mixer_context *ctx = arg; 755 754 struct mixer_resources *res = &ctx->mixer_res; 756 755 u32 val, base, shadow; 757 - int win; 758 756 759 757 spin_lock(&res->reg_slock); 760 758 ··· 767 767 val &= ~MXR_INT_STATUS_VSYNC; 768 768 769 769 /* interlace scan need to check shadow register */ 770 - if (ctx->interlace) { 770 + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 771 771 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); 772 772 shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); 773 773 if (base != shadow) ··· 780 780 } 781 781 782 782 drm_crtc_handle_vblank(&ctx->crtc->base); 783 - for (win = 0 ; win < MIXER_WIN_NR ; win++) { 784 - struct exynos_drm_plane *plane = &ctx->planes[win]; 785 - 786 - if (!plane->pending_fb) 787 - continue; 788 - 789 - exynos_drm_crtc_finish_update(ctx->crtc, plane); 790 - } 791 783 } 792 784 793 785 out: ··· 859 867 return -ENODEV; 860 868 } 861 869 862 - if (mixer_ctx->has_sclk) { 870 + if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) { 863 871 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); 864 872 if (IS_ERR(mixer_res->sclk_mixer)) { 865 873 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); ··· 909 917 return ret; 910 918 } 911 919 912 - if (mixer_ctx->vp_enabled) { 920 + if (test_bit(MXR_BIT_VP_ENABLED, &mixer_ctx->flags)) { 913 921 /* acquire vp resources: regs, irqs, clocks */ 914 922 ret = vp_resources_init(mixer_ctx); 915 923 if (ret) { ··· 1152 1160 return ret; 1153 1161 1154 1162 for (i = 0; i < MIXER_WIN_NR; i++) { 1155 - if (i == VP_DEFAULT_WIN && !ctx->vp_enabled) 1163 + if (i == VP_DEFAULT_WIN && !test_bit(MXR_BIT_VP_ENABLED, 1164 + &ctx->flags)) 1156 1165 continue; 1157 1166 1158 1167 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, ··· 1208 1215 1209 1216 ctx->pdev = pdev; 1210 1217 ctx->dev = dev; 1211 - ctx->vp_enabled = drv->is_vp_enabled; 1212 - ctx->has_sclk = drv->has_sclk; 1213 1218 ctx->mxr_ver = drv->version; 1219 + 1220 + if (drv->is_vp_enabled) 1221 + __set_bit(MXR_BIT_VP_ENABLED, &ctx->flags); 1222 + if (drv->has_sclk) 1223 + __set_bit(MXR_BIT_HAS_SCLK, &ctx->flags); 1214 1224 1215 1225 platform_set_drvdata(pdev, ctx); 1216 1226 ··· 1240 1244 1241 1245 clk_disable_unprepare(res->hdmi); 1242 1246 clk_disable_unprepare(res->mixer); 1243 - if (ctx->vp_enabled) { 1247 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { 1244 1248 clk_disable_unprepare(res->vp); 1245 - if (ctx->has_sclk) 1249 + if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) 1246 1250 clk_disable_unprepare(res->sclk_mixer); 1247 1251 } 1248 1252 ··· 1265 1269 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); 1266 1270 return ret; 1267 1271 } 1268 - if (ctx->vp_enabled) { 1272 + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { 1269 1273 ret = clk_prepare_enable(res->vp); 1270 1274 if (ret < 0) { 1271 1275 DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", 1272 1276 ret); 1273 1277 return ret; 1274 1278 } 1275 - if (ctx->has_sclk) { 1279 + if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) { 1276 1280 ret = clk_prepare_enable(res->sclk_mixer); 1277 1281 if (ret < 0) { 1278 1282 DRM_ERROR("Failed to prepare_enable the " \