Merge tag 'drm/tegra/for-5.13-rc1' of ssh://git.freedesktop.org/git/tegra/linux into drm-next

drm/tegra: Changes for v5.13-rc1

The changes this time around contain a couple of fixes for host1x along
with some improvements for Tegra DRM. Most notably the Tegra DRM driver
now supports the hardware cursor on Tegra186 and later, more correctly
reflects the capabilities of the display pipelines on various Tegra SoC
generations and knows how to deal with the dGPU sector layout by using
framebuffer modifiers.

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Thierry Reding <thierry.reding@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210401164430.3349105-1-thierry.reding@gmail.com

+459 -149
+103 -10
drivers/gpu/drm/tegra/dc.c
··· 832 return &plane->base; 833 } 834 835 - static const u32 tegra_cursor_plane_formats[] = { 836 DRM_FORMAT_RGBA8888, 837 }; 838 839 static int tegra_cursor_atomic_check(struct drm_plane *plane, ··· 879 plane); 880 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); 881 struct tegra_dc *dc = to_tegra_dc(new_state->crtc); 882 - u32 value = CURSOR_CLIP_DISPLAY; 883 884 /* rien ne va plus */ 885 if (!new_state->crtc || !new_state->fb) 886 return; 887 888 switch (new_state->crtc_w) { 889 case 32: ··· 924 tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); 925 926 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 927 - value = (tegra_plane_state->iova[0] >> 32) & 0x3; 928 tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); 929 #endif 930 ··· 936 value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL); 937 value &= ~CURSOR_DST_BLEND_MASK; 938 value &= ~CURSOR_SRC_BLEND_MASK; 939 - value |= CURSOR_MODE_NORMAL; 940 value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC; 941 value |= CURSOR_SRC_BLEND_K1_TIMES_SRC; 942 value |= CURSOR_ALPHA; 943 tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); 944 945 /* position the cursor */ 946 - value = (new_state->crtc_y & 0x3fff) << 16 | 947 - (new_state->crtc_x & 0x3fff); 948 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); 949 } 950 ··· 1022 plane->index = 6; 1023 plane->dc = dc; 1024 1025 - num_formats = ARRAY_SIZE(tegra_cursor_plane_formats); 1026 - formats = tegra_cursor_plane_formats; 1027 1028 err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, 1029 &tegra_plane_funcs, formats, ··· 2080 return false; 2081 } 2082 2083 static int tegra_dc_init(struct host1x_client *client) 2084 { 2085 struct drm_device *drm = dev_get_drvdata(client->host); ··· 2099 struct drm_plane *primary = NULL; 2100 struct drm_plane *cursor = NULL; 2101 int err; 2102 2103 /* 2104 * XXX do not register DCs with no window groups because we cannot ··· 2172 if (dc->soc->pitch_align > tegra->pitch_align) 2173 tegra->pitch_align = dc->soc->pitch_align; 2174 2175 err = tegra_dc_rgb_init(drm, dc); 2176 if (err < 0 && err != -ENODEV) { 2177 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); ··· 2208 drm_plane_cleanup(primary); 2209 2210 host1x_client_iommu_detach(client); 2211 - host1x_syncpt_free(dc->syncpt); 2212 2213 return err; 2214 } ··· 2233 } 2234 2235 host1x_client_iommu_detach(client); 2236 - host1x_syncpt_free(dc->syncpt); 2237 2238 return 0; 2239 } ··· 2312 } 2313 2314 static const struct host1x_client_ops dc_client_ops = { 2315 .init = tegra_dc_init, 2316 .exit = tegra_dc_exit, 2317 .suspend = tegra_dc_runtime_suspend, 2318 .resume = tegra_dc_runtime_resume, 2319 }; ··· 2325 .supports_interlacing = false, 2326 .supports_cursor = false, 2327 .supports_block_linear = false, 2328 .has_legacy_blending = true, 2329 .pitch_align = 8, 2330 .has_powergate = false, ··· 2345 .supports_interlacing = false, 2346 .supports_cursor = false, 2347 .supports_block_linear = false, 2348 .has_legacy_blending = true, 2349 .pitch_align = 8, 2350 .has_powergate = false, ··· 2365 .supports_interlacing = false, 2366 .supports_cursor = false, 2367 .supports_block_linear = false, 2368 .has_legacy_blending = true, 2369 .pitch_align = 64, 2370 .has_powergate = true, ··· 2385 .supports_interlacing = true, 2386 .supports_cursor = true, 2387 .supports_block_linear = true, 2388 .has_legacy_blending = false, 2389 .pitch_align = 64, 2390 .has_powergate = true, ··· 2405 .supports_interlacing = true, 2406 .supports_cursor = true, 2407 .supports_block_linear = true, 2408 .has_legacy_blending = false, 2409 .pitch_align = 64, 2410 .has_powergate = true, ··· 2459 .supports_interlacing = true, 2460 .supports_cursor = true, 2461 .supports_block_linear = true, 2462 .has_legacy_blending = false, 2463 .pitch_align = 64, 2464 .has_powergate = false, ··· 2508 .supports_interlacing = true, 2509 .supports_cursor = true, 2510 .supports_block_linear = true, 2511 .has_legacy_blending = false, 2512 .pitch_align = 64, 2513 .has_powergate = false, ··· 2618 2619 static int tegra_dc_probe(struct platform_device *pdev) 2620 { 2621 struct tegra_dc *dc; 2622 int err; 2623 2624 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); 2625 if (!dc)
··· 832 return &plane->base; 833 } 834 835 + static const u32 tegra_legacy_cursor_plane_formats[] = { 836 DRM_FORMAT_RGBA8888, 837 + }; 838 + 839 + static const u32 tegra_cursor_plane_formats[] = { 840 + DRM_FORMAT_ARGB8888, 841 }; 842 843 static int tegra_cursor_atomic_check(struct drm_plane *plane, ··· 875 plane); 876 struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state); 877 struct tegra_dc *dc = to_tegra_dc(new_state->crtc); 878 + struct tegra_drm *tegra = plane->dev->dev_private; 879 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 880 + u64 dma_mask = *dc->dev->dma_mask; 881 + #endif 882 + unsigned int x, y; 883 + u32 value = 0; 884 885 /* rien ne va plus */ 886 if (!new_state->crtc || !new_state->fb) 887 return; 888 + 889 + /* 890 + * Legacy display supports hardware clipping of the cursor, but 891 + * nvdisplay relies on software to clip the cursor to the screen. 892 + */ 893 + if (!dc->soc->has_nvdisplay) 894 + value |= CURSOR_CLIP_DISPLAY; 895 896 switch (new_state->crtc_w) { 897 case 32: ··· 908 tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); 909 910 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 911 + value = (tegra_plane_state->iova[0] >> 32) & (dma_mask >> 32); 912 tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); 913 #endif 914 ··· 920 value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL); 921 value &= ~CURSOR_DST_BLEND_MASK; 922 value &= ~CURSOR_SRC_BLEND_MASK; 923 + 924 + if (dc->soc->has_nvdisplay) 925 + value &= ~CURSOR_COMPOSITION_MODE_XOR; 926 + else 927 + value |= CURSOR_MODE_NORMAL; 928 + 929 value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC; 930 value |= CURSOR_SRC_BLEND_K1_TIMES_SRC; 931 value |= CURSOR_ALPHA; 932 tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL); 933 934 + /* nvdisplay relies on software for clipping */ 935 + if (dc->soc->has_nvdisplay) { 936 + struct drm_rect src; 937 + 938 + x = new_state->dst.x1; 939 + y = new_state->dst.y1; 940 + 941 + drm_rect_fp_to_int(&src, &new_state->src); 942 + 943 + value = (src.y1 & tegra->vmask) << 16 | (src.x1 & tegra->hmask); 944 + tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR); 945 + 946 + value = (drm_rect_height(&src) & tegra->vmask) << 16 | 947 + (drm_rect_width(&src) & tegra->hmask); 948 + tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR); 949 + } else { 950 + x = new_state->crtc_x; 951 + y = new_state->crtc_y; 952 + } 953 + 954 /* position the cursor */ 955 + value = ((y & tegra->vmask) << 16) | (x & tegra->hmask); 956 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION); 957 } 958 ··· 982 plane->index = 6; 983 plane->dc = dc; 984 985 + if (!dc->soc->has_nvdisplay) { 986 + num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats); 987 + formats = tegra_legacy_cursor_plane_formats; 988 + } else { 989 + num_formats = ARRAY_SIZE(tegra_cursor_plane_formats); 990 + formats = tegra_cursor_plane_formats; 991 + } 992 993 err = drm_universal_plane_init(drm, &plane->base, possible_crtcs, 994 &tegra_plane_funcs, formats, ··· 2035 return false; 2036 } 2037 2038 + static int tegra_dc_early_init(struct host1x_client *client) 2039 + { 2040 + struct drm_device *drm = dev_get_drvdata(client->host); 2041 + struct tegra_drm *tegra = drm->dev_private; 2042 + 2043 + tegra->num_crtcs++; 2044 + 2045 + return 0; 2046 + } 2047 + 2048 static int tegra_dc_init(struct host1x_client *client) 2049 { 2050 struct drm_device *drm = dev_get_drvdata(client->host); ··· 2044 struct drm_plane *primary = NULL; 2045 struct drm_plane *cursor = NULL; 2046 int err; 2047 + 2048 + /* 2049 + * DC has been reset by now, so VBLANK syncpoint can be released 2050 + * for general use. 2051 + */ 2052 + host1x_syncpt_release_vblank_reservation(client, 26 + dc->pipe); 2053 2054 /* 2055 * XXX do not register DCs with no window groups because we cannot ··· 2111 if (dc->soc->pitch_align > tegra->pitch_align) 2112 tegra->pitch_align = dc->soc->pitch_align; 2113 2114 + /* track maximum resolution */ 2115 + if (dc->soc->has_nvdisplay) 2116 + drm->mode_config.max_width = drm->mode_config.max_height = 16384; 2117 + else 2118 + drm->mode_config.max_width = drm->mode_config.max_height = 4096; 2119 + 2120 err = tegra_dc_rgb_init(drm, dc); 2121 if (err < 0 && err != -ENODEV) { 2122 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); ··· 2141 drm_plane_cleanup(primary); 2142 2143 host1x_client_iommu_detach(client); 2144 + host1x_syncpt_put(dc->syncpt); 2145 2146 return err; 2147 } ··· 2166 } 2167 2168 host1x_client_iommu_detach(client); 2169 + host1x_syncpt_put(dc->syncpt); 2170 + 2171 + return 0; 2172 + } 2173 + 2174 + static int tegra_dc_late_exit(struct host1x_client *client) 2175 + { 2176 + struct drm_device *drm = dev_get_drvdata(client->host); 2177 + struct tegra_drm *tegra = drm->dev_private; 2178 + 2179 + tegra->num_crtcs--; 2180 2181 return 0; 2182 } ··· 2235 } 2236 2237 static const struct host1x_client_ops dc_client_ops = { 2238 + .early_init = tegra_dc_early_init, 2239 .init = tegra_dc_init, 2240 .exit = tegra_dc_exit, 2241 + .late_exit = tegra_dc_late_exit, 2242 .suspend = tegra_dc_runtime_suspend, 2243 .resume = tegra_dc_runtime_resume, 2244 }; ··· 2246 .supports_interlacing = false, 2247 .supports_cursor = false, 2248 .supports_block_linear = false, 2249 + .supports_sector_layout = false, 2250 .has_legacy_blending = true, 2251 .pitch_align = 8, 2252 .has_powergate = false, ··· 2265 .supports_interlacing = false, 2266 .supports_cursor = false, 2267 .supports_block_linear = false, 2268 + .supports_sector_layout = false, 2269 .has_legacy_blending = true, 2270 .pitch_align = 8, 2271 .has_powergate = false, ··· 2284 .supports_interlacing = false, 2285 .supports_cursor = false, 2286 .supports_block_linear = false, 2287 + .supports_sector_layout = false, 2288 .has_legacy_blending = true, 2289 .pitch_align = 64, 2290 .has_powergate = true, ··· 2303 .supports_interlacing = true, 2304 .supports_cursor = true, 2305 .supports_block_linear = true, 2306 + .supports_sector_layout = false, 2307 .has_legacy_blending = false, 2308 .pitch_align = 64, 2309 .has_powergate = true, ··· 2322 .supports_interlacing = true, 2323 .supports_cursor = true, 2324 .supports_block_linear = true, 2325 + .supports_sector_layout = false, 2326 .has_legacy_blending = false, 2327 .pitch_align = 64, 2328 .has_powergate = true, ··· 2375 .supports_interlacing = true, 2376 .supports_cursor = true, 2377 .supports_block_linear = true, 2378 + .supports_sector_layout = false, 2379 .has_legacy_blending = false, 2380 .pitch_align = 64, 2381 .has_powergate = false, ··· 2423 .supports_interlacing = true, 2424 .supports_cursor = true, 2425 .supports_block_linear = true, 2426 + .supports_sector_layout = true, 2427 .has_legacy_blending = false, 2428 .pitch_align = 64, 2429 .has_powergate = false, ··· 2532 2533 static int tegra_dc_probe(struct platform_device *pdev) 2534 { 2535 + u64 dma_mask = dma_get_mask(pdev->dev.parent); 2536 struct tegra_dc *dc; 2537 int err; 2538 + 2539 + err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask); 2540 + if (err < 0) { 2541 + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 2542 + return err; 2543 + } 2544 2545 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); 2546 if (!dc)
+6
drivers/gpu/drm/tegra/dc.h
··· 52 bool supports_interlacing; 53 bool supports_cursor; 54 bool supports_block_linear; 55 bool has_legacy_blending; 56 unsigned int pitch_align; 57 bool has_powergate; ··· 512 513 #define DC_DISP_CURSOR_START_ADDR_HI 0x4ec 514 #define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1 515 #define CURSOR_MODE_LEGACY (0 << 24) 516 #define CURSOR_MODE_NORMAL (1 << 24) 517 #define CURSOR_DST_BLEND_ZERO (0 << 16) ··· 707 #define DC_DISP_CORE_SOR_SET_CONTROL(x) (0x403 + (x)) 708 #define PROTOCOL_MASK (0xf << 8) 709 #define PROTOCOL_SINGLE_TMDS_A (0x1 << 8) 710 711 #define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702 712 #define OWNER_MASK (0xf << 0)
··· 52 bool supports_interlacing; 53 bool supports_cursor; 54 bool supports_block_linear; 55 + bool supports_sector_layout; 56 bool has_legacy_blending; 57 unsigned int pitch_align; 58 bool has_powergate; ··· 511 512 #define DC_DISP_CURSOR_START_ADDR_HI 0x4ec 513 #define DC_DISP_BLEND_CURSOR_CONTROL 0x4f1 514 + #define CURSOR_COMPOSITION_MODE_BLEND (0 << 25) 515 + #define CURSOR_COMPOSITION_MODE_XOR (1 << 25) 516 #define CURSOR_MODE_LEGACY (0 << 24) 517 #define CURSOR_MODE_NORMAL (1 << 24) 518 #define CURSOR_DST_BLEND_ZERO (0 << 16) ··· 704 #define DC_DISP_CORE_SOR_SET_CONTROL(x) (0x403 + (x)) 705 #define PROTOCOL_MASK (0xf << 8) 706 #define PROTOCOL_SINGLE_TMDS_A (0x1 << 8) 707 + 708 + #define DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR 0x442 709 + #define DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR 0x446 710 711 #define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL 0x702 712 #define OWNER_MASK (0xf << 0)
+17 -10
drivers/gpu/drm/tegra/drm.c
··· 174 struct drm_tegra_syncpt syncpt; 175 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 176 struct drm_gem_object **refs; 177 - struct host1x_syncpt *sp; 178 struct host1x_job *job; 179 unsigned int num_refs; 180 int err; ··· 301 goto fail; 302 } 303 304 - /* check whether syncpoint ID is valid */ 305 - sp = host1x_syncpt_get(host1x, syncpt.id); 306 if (!sp) { 307 err = -ENOENT; 308 goto fail; ··· 311 job->is_addr_reg = context->client->ops->is_addr_reg; 312 job->is_valid_class = context->client->ops->is_valid_class; 313 job->syncpt_incrs = syncpt.incrs; 314 - job->syncpt_id = syncpt.id; 315 job->timeout = 10000; 316 317 if (args->timeout && args->timeout < 10000) ··· 383 struct drm_tegra_syncpt_read *args = data; 384 struct host1x_syncpt *sp; 385 386 - sp = host1x_syncpt_get(host, args->id); 387 if (!sp) 388 return -EINVAL; 389 ··· 398 struct drm_tegra_syncpt_incr *args = data; 399 struct host1x_syncpt *sp; 400 401 - sp = host1x_syncpt_get(host1x, args->id); 402 if (!sp) 403 return -EINVAL; 404 ··· 412 struct drm_tegra_syncpt_wait *args = data; 413 struct host1x_syncpt *sp; 414 415 - sp = host1x_syncpt_get(host1x, args->id); 416 if (!sp) 417 return -EINVAL; 418 ··· 1121 1122 drm->mode_config.min_width = 0; 1123 drm->mode_config.min_height = 0; 1124 - 1125 - drm->mode_config.max_width = 4096; 1126 - drm->mode_config.max_height = 4096; 1127 1128 drm->mode_config.allow_fb_modifiers = true; 1129 ··· 1140 err = host1x_device_init(dev); 1141 if (err < 0) 1142 goto fbdev; 1143 1144 if (tegra->use_explicit_iommu) { 1145 u64 carveout_start, carveout_end, gem_start, gem_end;
··· 174 struct drm_tegra_syncpt syncpt; 175 struct host1x *host1x = dev_get_drvdata(drm->dev->parent); 176 struct drm_gem_object **refs; 177 + struct host1x_syncpt *sp = NULL; 178 struct host1x_job *job; 179 unsigned int num_refs; 180 int err; ··· 301 goto fail; 302 } 303 304 + /* Syncpoint ref will be dropped on job release. */ 305 + sp = host1x_syncpt_get_by_id(host1x, syncpt.id); 306 if (!sp) { 307 err = -ENOENT; 308 goto fail; ··· 311 job->is_addr_reg = context->client->ops->is_addr_reg; 312 job->is_valid_class = context->client->ops->is_valid_class; 313 job->syncpt_incrs = syncpt.incrs; 314 + job->syncpt = sp; 315 job->timeout = 10000; 316 317 if (args->timeout && args->timeout < 10000) ··· 383 struct drm_tegra_syncpt_read *args = data; 384 struct host1x_syncpt *sp; 385 386 + sp = host1x_syncpt_get_by_id_noref(host, args->id); 387 if (!sp) 388 return -EINVAL; 389 ··· 398 struct drm_tegra_syncpt_incr *args = data; 399 struct host1x_syncpt *sp; 400 401 + sp = host1x_syncpt_get_by_id_noref(host1x, args->id); 402 if (!sp) 403 return -EINVAL; 404 ··· 412 struct drm_tegra_syncpt_wait *args = data; 413 struct host1x_syncpt *sp; 414 415 + sp = host1x_syncpt_get_by_id_noref(host1x, args->id); 416 if (!sp) 417 return -EINVAL; 418 ··· 1121 1122 drm->mode_config.min_width = 0; 1123 drm->mode_config.min_height = 0; 1124 + drm->mode_config.max_width = 0; 1125 + drm->mode_config.max_height = 0; 1126 1127 drm->mode_config.allow_fb_modifiers = true; 1128 ··· 1141 err = host1x_device_init(dev); 1142 if (err < 0) 1143 goto fbdev; 1144 + 1145 + /* 1146 + * Now that all display controller have been initialized, the maximum 1147 + * supported resolution is known and the bitmask for horizontal and 1148 + * vertical bitfields can be computed. 1149 + */ 1150 + tegra->hmask = drm->mode_config.max_width - 1; 1151 + tegra->vmask = drm->mode_config.max_height - 1; 1152 1153 if (tegra->use_explicit_iommu) { 1154 u64 carveout_start, carveout_end, gem_start, gem_end;
+5
drivers/gpu/drm/tegra/drm.h
··· 24 #include "hub.h" 25 #include "trace.h" 26 27 struct reset_control; 28 29 #ifdef CONFIG_DRM_FBDEV_EMULATION ··· 57 struct tegra_fbdev *fbdev; 58 #endif 59 60 unsigned int pitch_align; 61 62 struct tegra_display_hub *hub; 63 };
··· 24 #include "hub.h" 25 #include "trace.h" 26 27 + /* XXX move to include/uapi/drm/drm_fourcc.h? */ 28 + #define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22) 29 + 30 struct reset_control; 31 32 #ifdef CONFIG_DRM_FBDEV_EMULATION ··· 54 struct tegra_fbdev *fbdev; 55 #endif 56 57 + unsigned int hmask, vmask; 58 unsigned int pitch_align; 59 + unsigned int num_crtcs; 60 61 struct tegra_display_hub *hub; 62 };
+10
drivers/gpu/drm/tegra/fb.c
··· 44 { 45 uint64_t modifier = framebuffer->modifier; 46 47 switch (modifier) { 48 case DRM_FORMAT_MOD_LINEAR: 49 tiling->mode = TEGRA_BO_TILING_MODE_PITCH; ··· 95 break; 96 97 default: 98 return -EINVAL; 99 } 100
··· 44 { 45 uint64_t modifier = framebuffer->modifier; 46 47 + if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) { 48 + if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0) 49 + tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA; 50 + else 51 + tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_GPU; 52 + 53 + modifier &= ~DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT; 54 + } 55 + 56 switch (modifier) { 57 case DRM_FORMAT_MOD_LINEAR: 58 tiling->mode = TEGRA_BO_TILING_MODE_PITCH; ··· 86 break; 87 88 default: 89 + DRM_DEBUG_KMS("unknown format modifier: %llx\n", modifier); 90 return -EINVAL; 91 } 92
+6
drivers/gpu/drm/tegra/gem.h
··· 21 TEGRA_BO_TILING_MODE_BLOCK, 22 }; 23 24 struct tegra_bo_tiling { 25 enum tegra_bo_tiling_mode mode; 26 unsigned long value; 27 }; 28 29 struct tegra_bo {
··· 21 TEGRA_BO_TILING_MODE_BLOCK, 22 }; 23 24 + enum tegra_bo_sector_layout { 25 + TEGRA_BO_SECTOR_LAYOUT_TEGRA, 26 + TEGRA_BO_SECTOR_LAYOUT_GPU, 27 + }; 28 + 29 struct tegra_bo_tiling { 30 enum tegra_bo_tiling_mode mode; 31 unsigned long value; 32 + enum tegra_bo_sector_layout sector_layout; 33 }; 34 35 struct tegra_bo {
+2 -2
drivers/gpu/drm/tegra/gr2d.c
··· 67 detach: 68 host1x_client_iommu_detach(client); 69 free: 70 - host1x_syncpt_free(client->syncpts[0]); 71 put: 72 host1x_channel_put(gr2d->channel); 73 return err; ··· 86 return err; 87 88 host1x_client_iommu_detach(client); 89 - host1x_syncpt_free(client->syncpts[0]); 90 host1x_channel_put(gr2d->channel); 91 92 return 0;
··· 67 detach: 68 host1x_client_iommu_detach(client); 69 free: 70 + host1x_syncpt_put(client->syncpts[0]); 71 put: 72 host1x_channel_put(gr2d->channel); 73 return err; ··· 86 return err; 87 88 host1x_client_iommu_detach(client); 89 + host1x_syncpt_put(client->syncpts[0]); 90 host1x_channel_put(gr2d->channel); 91 92 return 0;
+2 -2
drivers/gpu/drm/tegra/gr3d.c
··· 76 detach: 77 host1x_client_iommu_detach(client); 78 free: 79 - host1x_syncpt_free(client->syncpts[0]); 80 put: 81 host1x_channel_put(gr3d->channel); 82 return err; ··· 94 return err; 95 96 host1x_client_iommu_detach(client); 97 - host1x_syncpt_free(client->syncpts[0]); 98 host1x_channel_put(gr3d->channel); 99 100 return 0;
··· 76 detach: 77 host1x_client_iommu_detach(client); 78 free: 79 + host1x_syncpt_put(client->syncpts[0]); 80 put: 81 host1x_channel_put(gr3d->channel); 82 return err; ··· 94 return err; 95 96 host1x_client_iommu_detach(client); 97 + host1x_syncpt_put(client->syncpts[0]); 98 host1x_channel_put(gr3d->channel); 99 100 return 0;
+39 -2
drivers/gpu/drm/tegra/hub.c
··· 55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3), 56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4), 57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), 58 DRM_FORMAT_MOD_INVALID 59 }; 60 ··· 378 return -EINVAL; 379 } 380 381 /* 382 * Tegra doesn't support different strides for U and V planes so we 383 * error out if the user tries to display a framebuffer with such a ··· 503 504 base = tegra_plane_state->iova[0] + fb->offsets[0]; 505 506 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH); 507 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS); 508 ··· 590 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; 591 struct tegra_drm *tegra = drm->dev_private; 592 struct tegra_display_hub *hub = tegra->hub; 593 - /* planes can be assigned to arbitrary CRTCs */ 594 - unsigned int possible_crtcs = 0x7; 595 struct tegra_shared_plane *plane; 596 unsigned int num_formats; 597 const u64 *modifiers; 598 struct drm_plane *p; ··· 609 plane->wgrp->parent = &dc->client; 610 611 p = &plane->base.base; 612 613 num_formats = ARRAY_SIZE(tegra_shared_plane_formats); 614 formats = tegra_shared_plane_formats; ··· 878 879 static int tegra_display_hub_probe(struct platform_device *pdev) 880 { 881 struct device_node *child = NULL; 882 struct tegra_display_hub *hub; 883 struct clk *clk; 884 unsigned int i; 885 int err; 886 887 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL); 888 if (!hub)
··· 55 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3), 56 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4), 57 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), 58 + /* 59 + * The GPU sector layout is only supported on Tegra194, but these will 60 + * be filtered out later on by ->format_mod_supported() on SoCs where 61 + * it isn't supported. 62 + */ 63 + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, 64 + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, 65 + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, 66 + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, 67 + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, 68 + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT, 69 + /* sentinel */ 70 DRM_FORMAT_MOD_INVALID 71 }; 72 ··· 366 return -EINVAL; 367 } 368 369 + if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU && 370 + !dc->soc->supports_sector_layout) { 371 + DRM_ERROR("hardware doesn't support GPU sector layout\n"); 372 + return -EINVAL; 373 + } 374 + 375 /* 376 * Tegra doesn't support different strides for U and V planes so we 377 * error out if the user tries to display a framebuffer with such a ··· 485 486 base = tegra_plane_state->iova[0] + fb->offsets[0]; 487 488 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 489 + /* 490 + * Physical address bit 39 in Tegra194 is used as a switch for special 491 + * logic that swizzles the memory using either the legacy Tegra or the 492 + * dGPU sector layout. 493 + */ 494 + if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU) 495 + base |= BIT(39); 496 + #endif 497 + 498 tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH); 499 tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS); 500 ··· 562 enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY; 563 struct tegra_drm *tegra = drm->dev_private; 564 struct tegra_display_hub *hub = tegra->hub; 565 struct tegra_shared_plane *plane; 566 + unsigned int possible_crtcs; 567 unsigned int num_formats; 568 const u64 *modifiers; 569 struct drm_plane *p; ··· 582 plane->wgrp->parent = &dc->client; 583 584 p = &plane->base.base; 585 + 586 + /* planes can be assigned to arbitrary CRTCs */ 587 + possible_crtcs = BIT(tegra->num_crtcs) - 1; 588 589 num_formats = ARRAY_SIZE(tegra_shared_plane_formats); 590 formats = tegra_shared_plane_formats; ··· 848 849 static int tegra_display_hub_probe(struct platform_device *pdev) 850 { 851 + u64 dma_mask = dma_get_mask(pdev->dev.parent); 852 struct device_node *child = NULL; 853 struct tegra_display_hub *hub; 854 struct clk *clk; 855 unsigned int i; 856 int err; 857 + 858 + err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask); 859 + if (err < 0) { 860 + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); 861 + return err; 862 + } 863 864 hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL); 865 if (!hub)
+32
drivers/gpu/drm/tegra/plane.c
··· 83 kfree(state); 84 } 85 86 static bool tegra_plane_format_mod_supported(struct drm_plane *plane, 87 uint32_t format, 88 uint64_t modifier) ··· 107 108 if (modifier == DRM_FORMAT_MOD_LINEAR) 109 return true; 110 111 if (info->num_planes == 1) 112 return true; ··· 143 dma_addr_t phys_addr, *phys; 144 struct sg_table *sgt; 145 146 if (!domain || dc->client.group) 147 phys = &phys_addr; 148 else
··· 83 kfree(state); 84 } 85 86 + static bool tegra_plane_supports_sector_layout(struct drm_plane *plane) 87 + { 88 + struct drm_crtc *crtc; 89 + 90 + drm_for_each_crtc(crtc, plane->dev) { 91 + if (plane->possible_crtcs & drm_crtc_mask(crtc)) { 92 + struct tegra_dc *dc = to_tegra_dc(crtc); 93 + 94 + if (!dc->soc->supports_sector_layout) 95 + return false; 96 + } 97 + } 98 + 99 + return true; 100 + } 101 + 102 static bool tegra_plane_format_mod_supported(struct drm_plane *plane, 103 uint32_t format, 104 uint64_t modifier) ··· 91 92 if (modifier == DRM_FORMAT_MOD_LINEAR) 93 return true; 94 + 95 + /* check for the sector layout bit */ 96 + if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) { 97 + if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) { 98 + if (!tegra_plane_supports_sector_layout(plane)) 99 + return false; 100 + } 101 + } 102 103 if (info->num_planes == 1) 104 return true; ··· 119 dma_addr_t phys_addr, *phys; 120 struct sg_table *sgt; 121 122 + /* 123 + * If we're not attached to a domain, we already stored the 124 + * physical address when the buffer was allocated. If we're 125 + * part of a group that's shared between all display 126 + * controllers, we've also already mapped the framebuffer 127 + * through the SMMU. In both cases we can short-circuit the 128 + * code below and retrieve the stored IOV address. 129 + */ 130 if (!domain || dc->client.group) 131 phys = &phys_addr; 132 else
+2 -2
drivers/gpu/drm/tegra/vic.c
··· 214 return 0; 215 216 free_syncpt: 217 - host1x_syncpt_free(client->syncpts[0]); 218 free_channel: 219 host1x_channel_put(vic->channel); 220 detach: ··· 238 if (err < 0) 239 return err; 240 241 - host1x_syncpt_free(client->syncpts[0]); 242 host1x_channel_put(vic->channel); 243 host1x_client_iommu_detach(client); 244
··· 214 return 0; 215 216 free_syncpt: 217 + host1x_syncpt_put(client->syncpts[0]); 218 free_channel: 219 host1x_channel_put(vic->channel); 220 detach: ··· 238 if (err < 0) 239 return err; 240 241 + host1x_syncpt_put(client->syncpts[0]); 242 host1x_channel_put(vic->channel); 243 host1x_client_iommu_detach(client); 244
+31
drivers/gpu/host1x/bus.c
··· 197 mutex_lock(&device->clients_lock); 198 199 list_for_each_entry(client, &device->clients, list) { 200 if (client->ops && client->ops->init) { 201 err = client->ops->init(client); 202 if (err < 0) { ··· 227 list_for_each_entry_continue_reverse(client, &device->clients, list) 228 if (client->ops->exit) 229 client->ops->exit(client); 230 231 mutex_unlock(&device->clients_lock); 232 return err; ··· 263 if (err < 0) { 264 dev_err(&device->dev, 265 "failed to cleanup %s: %d\n", 266 dev_name(client->dev), err); 267 mutex_unlock(&device->clients_lock); 268 return err;
··· 197 mutex_lock(&device->clients_lock); 198 199 list_for_each_entry(client, &device->clients, list) { 200 + if (client->ops && client->ops->early_init) { 201 + err = client->ops->early_init(client); 202 + if (err < 0) { 203 + dev_err(&device->dev, "failed to early initialize %s: %d\n", 204 + dev_name(client->dev), err); 205 + goto teardown_late; 206 + } 207 + } 208 + } 209 + 210 + list_for_each_entry(client, &device->clients, list) { 211 if (client->ops && client->ops->init) { 212 err = client->ops->init(client); 213 if (err < 0) { ··· 216 list_for_each_entry_continue_reverse(client, &device->clients, list) 217 if (client->ops->exit) 218 client->ops->exit(client); 219 + 220 + /* reset client to end of list for late teardown */ 221 + client = list_entry(&device->clients, struct host1x_client, list); 222 + 223 + teardown_late: 224 + list_for_each_entry_continue_reverse(client, &device->clients, list) 225 + if (client->ops->late_exit) 226 + client->ops->late_exit(client); 227 228 mutex_unlock(&device->clients_lock); 229 return err; ··· 244 if (err < 0) { 245 dev_err(&device->dev, 246 "failed to cleanup %s: %d\n", 247 + dev_name(client->dev), err); 248 + mutex_unlock(&device->clients_lock); 249 + return err; 250 + } 251 + } 252 + } 253 + 254 + list_for_each_entry_reverse(client, &device->clients, list) { 255 + if (client->ops && client->ops->late_exit) { 256 + err = client->ops->late_exit(client); 257 + if (err < 0) { 258 + dev_err(&device->dev, "failed to late cleanup %s: %d\n", 259 dev_name(client->dev), err); 260 mutex_unlock(&device->clients_lock); 261 return err;
+3 -8
drivers/gpu/host1x/cdma.c
··· 273 static void cdma_start_timer_locked(struct host1x_cdma *cdma, 274 struct host1x_job *job) 275 { 276 - struct host1x *host = cdma_to_host1x(cdma); 277 - 278 if (cdma->timeout.client) { 279 /* timer already started */ 280 return; 281 } 282 283 cdma->timeout.client = job->client; 284 - cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id); 285 cdma->timeout.syncpt_val = job->syncpt_end; 286 cdma->timeout.start_ktime = ktime_get(); 287 ··· 310 static void update_cdma_locked(struct host1x_cdma *cdma) 311 { 312 bool signal = false; 313 - struct host1x *host1x = cdma_to_host1x(cdma); 314 struct host1x_job *job, *n; 315 316 /* If CDMA is stopped, queue is cleared and we can return */ ··· 321 * to consume as many sync queue entries as possible without blocking 322 */ 323 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { 324 - struct host1x_syncpt *sp = 325 - host1x_syncpt_get(host1x, job->syncpt_id); 326 327 /* Check whether this syncpt has completed, and bail if not */ 328 if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) { ··· 495 if (!cdma->timeout.initialized) { 496 int err; 497 498 - err = host1x_hw_cdma_timeout_init(host1x, cdma, 499 - job->syncpt_id); 500 if (err) { 501 mutex_unlock(&cdma->lock); 502 return err;
··· 273 static void cdma_start_timer_locked(struct host1x_cdma *cdma, 274 struct host1x_job *job) 275 { 276 if (cdma->timeout.client) { 277 /* timer already started */ 278 return; 279 } 280 281 cdma->timeout.client = job->client; 282 + cdma->timeout.syncpt = job->syncpt; 283 cdma->timeout.syncpt_val = job->syncpt_end; 284 cdma->timeout.start_ktime = ktime_get(); 285 ··· 312 static void update_cdma_locked(struct host1x_cdma *cdma) 313 { 314 bool signal = false; 315 struct host1x_job *job, *n; 316 317 /* If CDMA is stopped, queue is cleared and we can return */ ··· 324 * to consume as many sync queue entries as possible without blocking 325 */ 326 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { 327 + struct host1x_syncpt *sp = job->syncpt; 328 329 /* Check whether this syncpt has completed, and bail if not */ 330 if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) { ··· 499 if (!cdma->timeout.initialized) { 500 int err; 501 502 + err = host1x_hw_cdma_timeout_init(host1x, cdma); 503 if (err) { 504 mutex_unlock(&cdma->lock); 505 return err;
+11 -3
drivers/gpu/host1x/debug.c
··· 69 70 static void show_syncpts(struct host1x *m, struct output *o) 71 { 72 unsigned int i; 73 74 host1x_debug_output(o, "---- syncpts ----\n"); ··· 77 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) { 78 u32 max = host1x_syncpt_read_max(m->syncpt + i); 79 u32 min = host1x_syncpt_load(m->syncpt + i); 80 81 - if (!min && !max) 82 continue; 83 84 - host1x_debug_output(o, "id %u (%s) min %d max %d\n", 85 - i, m->syncpt[i].name, min, max); 86 } 87 88 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
··· 69 70 static void show_syncpts(struct host1x *m, struct output *o) 71 { 72 + struct list_head *pos; 73 unsigned int i; 74 75 host1x_debug_output(o, "---- syncpts ----\n"); ··· 76 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) { 77 u32 max = host1x_syncpt_read_max(m->syncpt + i); 78 u32 min = host1x_syncpt_load(m->syncpt + i); 79 + unsigned int waiters = 0; 80 81 + spin_lock(&m->syncpt[i].intr.lock); 82 + list_for_each(pos, &m->syncpt[i].intr.wait_head) 83 + waiters++; 84 + spin_unlock(&m->syncpt[i].intr.lock); 85 + 86 + if (!min && !max && !waiters) 87 continue; 88 89 + host1x_debug_output(o, 90 + "id %u (%s) min %d max %d (%d waiters)\n", 91 + i, m->syncpt[i].name, min, max, waiters); 92 } 93 94 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
+6
drivers/gpu/host1x/dev.c
··· 77 .has_hypervisor = false, 78 .num_sid_entries = 0, 79 .sid_table = NULL, 80 }; 81 82 static const struct host1x_info host1x02_info = { ··· 92 .has_hypervisor = false, 93 .num_sid_entries = 0, 94 .sid_table = NULL, 95 }; 96 97 static const struct host1x_info host1x04_info = { ··· 107 .has_hypervisor = false, 108 .num_sid_entries = 0, 109 .sid_table = NULL, 110 }; 111 112 static const struct host1x_info host1x05_info = { ··· 122 .has_hypervisor = false, 123 .num_sid_entries = 0, 124 .sid_table = NULL, 125 }; 126 127 static const struct host1x_sid_entry tegra186_sid_table[] = { ··· 146 .has_hypervisor = true, 147 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), 148 .sid_table = tegra186_sid_table, 149 }; 150 151 static const struct host1x_sid_entry tegra194_sid_table[] = { ··· 170 .has_hypervisor = true, 171 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), 172 .sid_table = tegra194_sid_table, 173 }; 174 175 static const struct of_device_id host1x_of_match[] = {
··· 77 .has_hypervisor = false, 78 .num_sid_entries = 0, 79 .sid_table = NULL, 80 + .reserve_vblank_syncpts = true, 81 }; 82 83 static const struct host1x_info host1x02_info = { ··· 91 .has_hypervisor = false, 92 .num_sid_entries = 0, 93 .sid_table = NULL, 94 + .reserve_vblank_syncpts = true, 95 }; 96 97 static const struct host1x_info host1x04_info = { ··· 105 .has_hypervisor = false, 106 .num_sid_entries = 0, 107 .sid_table = NULL, 108 + .reserve_vblank_syncpts = false, 109 }; 110 111 static const struct host1x_info host1x05_info = { ··· 119 .has_hypervisor = false, 120 .num_sid_entries = 0, 121 .sid_table = NULL, 122 + .reserve_vblank_syncpts = false, 123 }; 124 125 static const struct host1x_sid_entry tegra186_sid_table[] = { ··· 142 .has_hypervisor = true, 143 .num_sid_entries = ARRAY_SIZE(tegra186_sid_table), 144 .sid_table = tegra186_sid_table, 145 + .reserve_vblank_syncpts = false, 146 }; 147 148 static const struct host1x_sid_entry tegra194_sid_table[] = { ··· 165 .has_hypervisor = true, 166 .num_sid_entries = ARRAY_SIZE(tegra194_sid_table), 167 .sid_table = tegra194_sid_table, 168 + .reserve_vblank_syncpts = false, 169 }; 170 171 static const struct of_device_id host1x_of_match[] = {
+9 -4
drivers/gpu/host1x/dev.h
··· 37 void (*start)(struct host1x_cdma *cdma); 38 void (*stop)(struct host1x_cdma *cdma); 39 void (*flush)(struct host1x_cdma *cdma); 40 - int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt); 41 void (*timeout_destroy)(struct host1x_cdma *cdma); 42 void (*freeze)(struct host1x_cdma *cdma); 43 void (*resume)(struct host1x_cdma *cdma, u32 getptr); ··· 101 bool has_hypervisor; /* has hypervisor registers */ 102 unsigned int num_sid_entries; 103 const struct host1x_sid_entry *sid_table; 104 }; 105 106 struct host1x { ··· 267 } 268 269 static inline int host1x_hw_cdma_timeout_init(struct host1x *host, 270 - struct host1x_cdma *cdma, 271 - unsigned int syncpt) 272 { 273 - return host->cdma_op->timeout_init(cdma, syncpt); 274 } 275 276 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
··· 37 void (*start)(struct host1x_cdma *cdma); 38 void (*stop)(struct host1x_cdma *cdma); 39 void (*flush)(struct host1x_cdma *cdma); 40 + int (*timeout_init)(struct host1x_cdma *cdma); 41 void (*timeout_destroy)(struct host1x_cdma *cdma); 42 void (*freeze)(struct host1x_cdma *cdma); 43 void (*resume)(struct host1x_cdma *cdma, u32 getptr); ··· 101 bool has_hypervisor; /* has hypervisor registers */ 102 unsigned int num_sid_entries; 103 const struct host1x_sid_entry *sid_table; 104 + /* 105 + * On T20-T148, the boot chain may setup DC to increment syncpoints 106 + * 26/27 on VBLANK. As such we cannot use these syncpoints until 107 + * the display driver disables VBLANK increments. 108 + */ 109 + bool reserve_vblank_syncpts; 110 }; 111 112 struct host1x { ··· 261 } 262 263 static inline int host1x_hw_cdma_timeout_init(struct host1x *host, 264 + struct host1x_cdma *cdma) 265 { 266 + return host->cdma_op->timeout_init(cdma); 267 } 268 269 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
+1 -1
drivers/gpu/host1x/hw/cdma_hw.c
··· 295 /* 296 * Init timeout resources 297 */ 298 - static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt) 299 { 300 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); 301 cdma->timeout.initialized = true;
··· 295 /* 296 * Init timeout resources 297 */ 298 + static int cdma_timeout_init(struct host1x_cdma *cdma) 299 { 300 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); 301 cdma->timeout.initialized = true;
+4 -6
drivers/gpu/host1x/hw/channel_hw.c
··· 86 87 static inline void synchronize_syncpt_base(struct host1x_job *job) 88 { 89 - struct host1x *host = dev_get_drvdata(job->channel->dev->parent); 90 - struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; 91 unsigned int id; 92 u32 value; 93 ··· 117 static int channel_submit(struct host1x_job *job) 118 { 119 struct host1x_channel *ch = job->channel; 120 - struct host1x_syncpt *sp; 121 u32 user_syncpt_incrs = job->syncpt_incrs; 122 u32 prev_max = 0; 123 u32 syncval; ··· 125 struct host1x_waitlist *completed_waiter = NULL; 126 struct host1x *host = dev_get_drvdata(ch->dev->parent); 127 128 - sp = host->syncpt + job->syncpt_id; 129 trace_host1x_channel_submit(dev_name(ch->dev), 130 job->num_gathers, job->num_relocs, 131 - job->syncpt_id, job->syncpt_incrs); 132 133 /* before error checks, return current max */ 134 prev_max = job->syncpt_end = host1x_syncpt_read_max(sp); ··· 161 host1x_cdma_push(&ch->cdma, 162 host1x_opcode_setclass(HOST1X_CLASS_HOST1X, 163 host1x_uclass_wait_syncpt_r(), 1), 164 - host1x_class_host_wait_syncpt(job->syncpt_id, 165 host1x_syncpt_read_max(sp))); 166 } 167
··· 86 87 static inline void synchronize_syncpt_base(struct host1x_job *job) 88 { 89 + struct host1x_syncpt *sp = job->syncpt; 90 unsigned int id; 91 u32 value; 92 ··· 118 static int channel_submit(struct host1x_job *job) 119 { 120 struct host1x_channel *ch = job->channel; 121 + struct host1x_syncpt *sp = job->syncpt; 122 u32 user_syncpt_incrs = job->syncpt_incrs; 123 u32 prev_max = 0; 124 u32 syncval; ··· 126 struct host1x_waitlist *completed_waiter = NULL; 127 struct host1x *host = dev_get_drvdata(ch->dev->parent); 128 129 trace_host1x_channel_submit(dev_name(ch->dev), 130 job->num_gathers, job->num_relocs, 131 + job->syncpt->id, job->syncpt_incrs); 132 133 /* before error checks, return current max */ 134 prev_max = job->syncpt_end = host1x_syncpt_read_max(sp); ··· 163 host1x_cdma_push(&ch->cdma, 164 host1x_opcode_setclass(HOST1X_CLASS_HOST1X, 165 host1x_uclass_wait_syncpt_r(), 1), 166 + host1x_class_host_wait_syncpt(job->syncpt->id, 167 host1x_syncpt_read_max(sp))); 168 } 169
+1 -1
drivers/gpu/host1x/hw/debug_hw.c
··· 204 unsigned int i; 205 206 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n", 207 - job, job->syncpt_id, job->syncpt_end, 208 job->first_get, job->timeout, 209 job->num_slots, job->num_unpins); 210
··· 204 unsigned int i; 205 206 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n", 207 + job, job->syncpt->id, job->syncpt_end, 208 job->first_get, job->timeout, 209 job->num_slots, job->num_unpins); 210
+1 -1
drivers/gpu/host1x/hw/hw_host1x07_vm.h
··· 29 #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4 * (x)) 30 #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4 * (x)) 31 #define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4 * (x)) 32 - #define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8d00 + 4 * (x)) 33 #define HOST1X_SYNC_SYNCPT_CH_APP(x) (0xa604 + 4 * (x)) 34 #define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
··· 29 #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4 * (x)) 30 #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4 * (x)) 31 #define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4 * (x)) 32 + #define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x9980 + 4 * (x)) 33 #define HOST1X_SYNC_SYNCPT_CH_APP(x) (0xa604 + 4 * (x)) 34 #define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
+20 -8
drivers/gpu/host1x/intr.c
··· 235 host1x_hw_intr_enable_syncpt_intr(host, syncpt->id); 236 } 237 238 - spin_unlock(&syncpt->intr.lock); 239 - 240 if (ref) 241 *ref = waiter; 242 return 0; 243 } 244 245 - void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref) 246 { 247 struct host1x_waitlist *waiter = ref; 248 struct host1x_syncpt *syncpt; 249 250 - while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) == 251 - WLS_REMOVED) 252 - schedule(); 253 254 syncpt = host->syncpt + id; 255 - (void)process_wait_list(host, syncpt, 256 - host1x_syncpt_load(host->syncpt + id)); 257 258 kref_put(&waiter->refcount, waiter_release); 259 }
··· 235 host1x_hw_intr_enable_syncpt_intr(host, syncpt->id); 236 } 237 238 if (ref) 239 *ref = waiter; 240 + 241 + spin_unlock(&syncpt->intr.lock); 242 + 243 return 0; 244 } 245 246 + void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref, 247 + bool flush) 248 { 249 struct host1x_waitlist *waiter = ref; 250 struct host1x_syncpt *syncpt; 251 252 + atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED); 253 254 syncpt = host->syncpt + id; 255 + 256 + spin_lock(&syncpt->intr.lock); 257 + if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) == 258 + WLS_CANCELLED) { 259 + list_del(&waiter->list); 260 + kref_put(&waiter->refcount, waiter_release); 261 + } 262 + spin_unlock(&syncpt->intr.lock); 263 + 264 + if (flush) { 265 + /* Wait until any concurrently executing handler has finished. */ 266 + while (atomic_read(&waiter->state) != WLS_HANDLED) 267 + schedule(); 268 + } 269 270 kref_put(&waiter->refcount, waiter_release); 271 }
+3 -1
drivers/gpu/host1x/intr.h
··· 74 * Unreference an action submitted to host1x_intr_add_action(). 75 * You must call this if you passed non-NULL as ref. 76 * @ref the ref returned from host1x_intr_add_action() 77 */ 78 - void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref); 79 80 /* Initialize host1x sync point interrupt */ 81 int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
··· 74 * Unreference an action submitted to host1x_intr_add_action(). 75 * You must call this if you passed non-NULL as ref. 76 * @ref the ref returned from host1x_intr_add_action() 77 + * @flush wait until any pending handlers have completed before returning. 78 */ 79 + void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref, 80 + bool flush); 81 82 /* Initialize host1x sync point interrupt */ 83 int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
+4 -1
drivers/gpu/host1x/job.c
··· 79 { 80 struct host1x_job *job = container_of(ref, struct host1x_job, ref); 81 82 kfree(job); 83 } 84 ··· 677 */ 678 void host1x_job_dump(struct device *dev, struct host1x_job *job) 679 { 680 - dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id); 681 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end); 682 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get); 683 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
··· 79 { 80 struct host1x_job *job = container_of(ref, struct host1x_job, ref); 81 82 + if (job->syncpt) 83 + host1x_syncpt_put(job->syncpt); 84 + 85 kfree(job); 86 } 87 ··· 674 */ 675 void host1x_job_dump(struct device *dev, struct host1x_job *job) 676 { 677 + dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt->id); 678 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end); 679 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get); 680 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
+120 -80
drivers/gpu/host1x/syncpt.c
··· 42 base->requested = false; 43 } 44 45 - static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, 46 - struct host1x_client *client, 47 - unsigned long flags) 48 { 49 struct host1x_syncpt *sp = host->syncpt; 50 unsigned int i; 51 - char *name; 52 53 mutex_lock(&host->syncpt_mutex); 54 55 - for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++) 56 ; 57 58 if (i >= host->info->nb_pts) ··· 79 goto unlock; 80 } 81 82 - name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id, 83 - client ? dev_name(client->dev) : NULL); 84 - if (!name) 85 goto free_base; 86 87 - sp->client = client; 88 - sp->name = name; 89 90 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED) 91 sp->client_managed = true; 92 else 93 sp->client_managed = false; 94 95 mutex_unlock(&host->syncpt_mutex); 96 return sp; ··· 102 mutex_unlock(&host->syncpt_mutex); 103 return NULL; 104 } 105 106 /** 107 * host1x_syncpt_id() - retrieve syncpoint ID ··· 310 } 311 } 312 313 - host1x_intr_put_ref(sp->host, sp->id, ref); 314 315 done: 316 return err; ··· 323 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) 324 { 325 u32 current_val; 326 - u32 future_val; 327 328 smp_rmb(); 329 330 current_val = (u32)atomic_read(&sp->min_val); 331 - future_val = (u32)atomic_read(&sp->max_val); 332 333 - /* Note the use of unsigned arithmetic here (mod 1<<32). 334 - * 335 - * c = current_val = min_val = the current value of the syncpoint. 336 - * t = thresh = the value we are checking 337 - * f = future_val = max_val = the value c will reach when all 338 - * outstanding increments have completed. 339 - * 340 - * Note that c always chases f until it reaches f. 341 - * 342 - * Dtf = (f - t) 343 - * Dtc = (c - t) 344 - * 345 - * Consider all cases: 346 - * 347 - * A) .....c..t..f..... Dtf < Dtc need to wait 348 - * B) .....c.....f..t.. Dtf > Dtc expired 349 - * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large) 350 - * 351 - * Any case where f==c: always expired (for any t). Dtf == Dcf 352 - * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0) 353 - * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0, 354 - * Dtc!=0) 355 - * 356 - * Other cases: 357 - * 358 - * A) .....t..f..c..... Dtf < Dtc need to wait 359 - * A) .....f..c..t..... Dtf < Dtc need to wait 360 - * A) .....f..t..c..... Dtf > Dtc expired 361 - * 362 - * So: 363 - * Dtf >= Dtc implies EXPIRED (return true) 364 - * Dtf < Dtc implies WAIT (return false) 365 - * 366 - * Note: If t is expired then we *cannot* wait on it. We would wait 367 - * forever (hang the system). 368 - * 369 - * Note: do NOT get clever and remove the -thresh from both sides. It 370 - * is NOT the same. 371 - * 372 - * If future valueis zero, we have a client managed sync point. In that 373 - * case we do a direct comparison. 374 - */ 375 - if (!host1x_syncpt_client_managed(sp)) 376 - return future_val - thresh >= current_val - thresh; 377 - else 378 - return (s32)(current_val - thresh) >= 0; 379 } 380 381 int host1x_syncpt_init(struct host1x *host) ··· 370 host1x_hw_syncpt_enable_protection(host); 371 372 /* Allocate sync point to use for clearing waits for expired fences */ 373 - host->nop_sp = host1x_syncpt_alloc(host, NULL, 0); 374 if (!host->nop_sp) 375 return -ENOMEM; 376 377 return 0; 378 } ··· 390 * host1x client drivers can use this function to allocate a syncpoint for 391 * subsequent use. A syncpoint returned by this function will be reserved for 392 * use by the client exclusively. When no longer using a syncpoint, a host1x 393 - * client driver needs to release it using host1x_syncpt_free(). 394 */ 395 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, 396 unsigned long flags) 397 { 398 struct host1x *host = dev_get_drvdata(client->host->parent); 399 400 - return host1x_syncpt_alloc(host, client, flags); 401 } 402 EXPORT_SYMBOL(host1x_syncpt_request); 403 404 - /** 405 - * host1x_syncpt_free() - free a requested syncpoint 406 - * @sp: host1x syncpoint 407 - * 408 - * Release a syncpoint previously allocated using host1x_syncpt_request(). A 409 - * host1x client driver should call this when the syncpoint is no longer in 410 - * use. Note that client drivers must ensure that the syncpoint doesn't remain 411 - * under the control of hardware after calling this function, otherwise two 412 - * clients may end up trying to access the same syncpoint concurrently. 413 - */ 414 - void host1x_syncpt_free(struct host1x_syncpt *sp) 415 { 416 - if (!sp) 417 - return; 418 419 mutex_lock(&sp->host->syncpt_mutex); 420 421 host1x_syncpt_base_free(sp->base); 422 kfree(sp->name); 423 sp->base = NULL; 424 - sp->client = NULL; 425 sp->name = NULL; 426 sp->client_managed = false; 427 428 mutex_unlock(&sp->host->syncpt_mutex); 429 } 430 - EXPORT_SYMBOL(host1x_syncpt_free); 431 432 void host1x_syncpt_deinit(struct host1x *host) 433 { ··· 500 } 501 502 /** 503 - * host1x_syncpt_get() - obtain a syncpoint by ID 504 * @host: host1x controller 505 * @id: syncpoint ID 506 */ 507 - struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id) 508 { 509 if (id >= host->info->nb_pts) 510 return NULL; 511 512 - return host->syncpt + id; 513 } 514 EXPORT_SYMBOL(host1x_syncpt_get); 515 ··· 564 return base->id; 565 } 566 EXPORT_SYMBOL(host1x_syncpt_base_id);
··· 42 base->requested = false; 43 } 44 45 + /** 46 + * host1x_syncpt_alloc() - allocate a syncpoint 47 + * @host: host1x device data 48 + * @flags: bitfield of HOST1X_SYNCPT_* flags 49 + * @name: name for the syncpoint for use in debug prints 50 + * 51 + * Allocates a hardware syncpoint for the caller's use. The caller then has 52 + * the sole authority to mutate the syncpoint's value until it is freed again. 53 + * 54 + * If no free syncpoints are available, or a NULL name was specified, returns 55 + * NULL. 56 + */ 57 + struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, 58 + unsigned long flags, 59 + const char *name) 60 { 61 struct host1x_syncpt *sp = host->syncpt; 62 + char *full_name; 63 unsigned int i; 64 + 65 + if (!name) 66 + return NULL; 67 68 mutex_lock(&host->syncpt_mutex); 69 70 + for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++) 71 ; 72 73 if (i >= host->info->nb_pts) ··· 64 goto unlock; 65 } 66 67 + full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name); 68 + if (!full_name) 69 goto free_base; 70 71 + sp->name = full_name; 72 73 if (flags & HOST1X_SYNCPT_CLIENT_MANAGED) 74 sp->client_managed = true; 75 else 76 sp->client_managed = false; 77 + 78 + kref_init(&sp->ref); 79 80 mutex_unlock(&host->syncpt_mutex); 81 return sp; ··· 87 mutex_unlock(&host->syncpt_mutex); 88 return NULL; 89 } 90 + EXPORT_SYMBOL(host1x_syncpt_alloc); 91 92 /** 93 * host1x_syncpt_id() - retrieve syncpoint ID ··· 294 } 295 } 296 297 + host1x_intr_put_ref(sp->host, sp->id, ref, true); 298 299 done: 300 return err; ··· 307 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) 308 { 309 u32 current_val; 310 311 smp_rmb(); 312 313 current_val = (u32)atomic_read(&sp->min_val); 314 315 + return ((current_val - thresh) & 0x80000000U) == 0U; 316 } 317 318 int host1x_syncpt_init(struct host1x *host) ··· 401 host1x_hw_syncpt_enable_protection(host); 402 403 /* Allocate sync point to use for clearing waits for expired fences */ 404 + host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop"); 405 if (!host->nop_sp) 406 return -ENOMEM; 407 + 408 + if (host->info->reserve_vblank_syncpts) { 409 + kref_init(&host->syncpt[26].ref); 410 + kref_init(&host->syncpt[27].ref); 411 + } 412 413 return 0; 414 } ··· 416 * host1x client drivers can use this function to allocate a syncpoint for 417 * subsequent use. A syncpoint returned by this function will be reserved for 418 * use by the client exclusively. When no longer using a syncpoint, a host1x 419 + * client driver needs to release it using host1x_syncpt_put(). 420 */ 421 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, 422 unsigned long flags) 423 { 424 struct host1x *host = dev_get_drvdata(client->host->parent); 425 426 + return host1x_syncpt_alloc(host, flags, dev_name(client->dev)); 427 } 428 EXPORT_SYMBOL(host1x_syncpt_request); 429 430 + static void syncpt_release(struct kref *ref) 431 { 432 + struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref); 433 + 434 + atomic_set(&sp->max_val, host1x_syncpt_read(sp)); 435 436 mutex_lock(&sp->host->syncpt_mutex); 437 438 host1x_syncpt_base_free(sp->base); 439 kfree(sp->name); 440 sp->base = NULL; 441 sp->name = NULL; 442 sp->client_managed = false; 443 444 mutex_unlock(&sp->host->syncpt_mutex); 445 } 446 + 447 + /** 448 + * host1x_syncpt_put() - free a requested syncpoint 449 + * @sp: host1x syncpoint 450 + * 451 + * Release a syncpoint previously allocated using host1x_syncpt_request(). A 452 + * host1x client driver should call this when the syncpoint is no longer in 453 + * use. 454 + */ 455 + void host1x_syncpt_put(struct host1x_syncpt *sp) 456 + { 457 + if (!sp) 458 + return; 459 + 460 + kref_put(&sp->ref, syncpt_release); 461 + } 462 + EXPORT_SYMBOL(host1x_syncpt_put); 463 464 void host1x_syncpt_deinit(struct host1x *host) 465 { ··· 520 } 521 522 /** 523 + * host1x_syncpt_get_by_id() - obtain a syncpoint by ID 524 * @host: host1x controller 525 * @id: syncpoint ID 526 */ 527 + struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, 528 + unsigned int id) 529 { 530 if (id >= host->info->nb_pts) 531 return NULL; 532 533 + if (kref_get_unless_zero(&host->syncpt[id].ref)) 534 + return &host->syncpt[id]; 535 + else 536 + return NULL; 537 + } 538 + EXPORT_SYMBOL(host1x_syncpt_get_by_id); 539 + 540 + /** 541 + * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't 542 + * increase the refcount. 543 + * @host: host1x controller 544 + * @id: syncpoint ID 545 + */ 546 + struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, 547 + unsigned int id) 548 + { 549 + if (id >= host->info->nb_pts) 550 + return NULL; 551 + 552 + return &host->syncpt[id]; 553 + } 554 + EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref); 555 + 556 + /** 557 + * host1x_syncpt_get() - increment syncpoint refcount 558 + * @sp: syncpoint 559 + */ 560 + struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp) 561 + { 562 + kref_get(&sp->ref); 563 + 564 + return sp; 565 } 566 EXPORT_SYMBOL(host1x_syncpt_get); 567 ··· 552 return base->id; 553 } 554 EXPORT_SYMBOL(host1x_syncpt_base_id); 555 + 556 + static void do_nothing(struct kref *ref) 557 + { 558 + } 559 + 560 + /** 561 + * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint 562 + * available for allocation 563 + * 564 + * @client: host1x bus client 565 + * @syncpt_id: syncpoint ID to make available 566 + * 567 + * Makes VBLANK<i> syncpoint available for allocatation if it was 568 + * reserved at initialization time. This should be called by the display 569 + * driver after it has ensured that any VBLANK increment programming configured 570 + * by the boot chain has been disabled. 571 + */ 572 + void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, 573 + u32 syncpt_id) 574 + { 575 + struct host1x *host = dev_get_drvdata(client->host->parent); 576 + 577 + if (!host->info->reserve_vblank_syncpts) 578 + return; 579 + 580 + kref_put(&host->syncpt[syncpt_id].ref, do_nothing); 581 + } 582 + EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation);
+3 -1
drivers/gpu/host1x/syncpt.h
··· 11 #include <linux/atomic.h> 12 #include <linux/host1x.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> 15 16 #include "intr.h" ··· 27 }; 28 29 struct host1x_syncpt { 30 unsigned int id; 31 atomic_t min_val; 32 atomic_t max_val; ··· 36 const char *name; 37 bool client_managed; 38 struct host1x *host; 39 - struct host1x_client *client; 40 struct host1x_syncpt_base *base; 41 42 /* interrupt data */
··· 11 #include <linux/atomic.h> 12 #include <linux/host1x.h> 13 #include <linux/kernel.h> 14 + #include <linux/kref.h> 15 #include <linux/sched.h> 16 17 #include "intr.h" ··· 26 }; 27 28 struct host1x_syncpt { 29 + struct kref ref; 30 + 31 unsigned int id; 32 atomic_t min_val; 33 atomic_t max_val; ··· 33 const char *name; 34 bool client_managed; 35 struct host1x *host; 36 struct host1x_syncpt_base *base; 37 38 /* interrupt data */
+3 -3
drivers/staging/media/tegra-video/vi.c
··· 1131 int i; 1132 1133 for (i = 0; i < chan->numgangports; i++) { 1134 - host1x_syncpt_free(chan->mw_ack_sp[i]); 1135 - host1x_syncpt_free(chan->frame_start_sp[i]); 1136 } 1137 } 1138 ··· 1177 mw_sp = host1x_syncpt_request(&vi->client, flags); 1178 if (!mw_sp) { 1179 dev_err(vi->dev, "failed to request memory ack syncpoint\n"); 1180 - host1x_syncpt_free(fs_sp); 1181 ret = -ENOMEM; 1182 goto free_syncpts; 1183 }
··· 1131 int i; 1132 1133 for (i = 0; i < chan->numgangports; i++) { 1134 + host1x_syncpt_put(chan->mw_ack_sp[i]); 1135 + host1x_syncpt_put(chan->frame_start_sp[i]); 1136 } 1137 } 1138 ··· 1177 mw_sp = host1x_syncpt_request(&vi->client, flags); 1178 if (!mw_sp) { 1179 dev_err(vi->dev, "failed to request memory ack syncpoint\n"); 1180 + host1x_syncpt_put(fs_sp); 1181 ret = -ENOMEM; 1182 goto free_syncpts; 1183 }
+15 -3
include/linux/host1x.h
··· 25 26 /** 27 * struct host1x_client_ops - host1x client operations 28 * @init: host1x client initialization code 29 * @exit: host1x client tear down code 30 * @suspend: host1x client suspend code 31 * @resume: host1x client resume code 32 */ 33 struct host1x_client_ops { 34 int (*init)(struct host1x_client *client); 35 int (*exit)(struct host1x_client *client); 36 int (*suspend)(struct host1x_client *client); 37 int (*resume)(struct host1x_client *client); 38 }; ··· 146 struct host1x_syncpt; 147 struct host1x; 148 149 - struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id); 150 u32 host1x_syncpt_id(struct host1x_syncpt *sp); 151 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); 152 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); ··· 159 u32 *value); 160 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, 161 unsigned long flags); 162 - void host1x_syncpt_free(struct host1x_syncpt *sp); 163 164 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); 165 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); 166 167 /* 168 * host1x channel ··· 230 dma_addr_t *reloc_addr_phys; 231 232 /* Sync point id, number of increments and end related to the submit */ 233 - u32 syncpt_id; 234 u32 syncpt_incrs; 235 u32 syncpt_end; 236
··· 25 26 /** 27 * struct host1x_client_ops - host1x client operations 28 + * @early_init: host1x client early initialization code 29 * @init: host1x client initialization code 30 * @exit: host1x client tear down code 31 + * @late_exit: host1x client late tear down code 32 * @suspend: host1x client suspend code 33 * @resume: host1x client resume code 34 */ 35 struct host1x_client_ops { 36 + int (*early_init)(struct host1x_client *client); 37 int (*init)(struct host1x_client *client); 38 int (*exit)(struct host1x_client *client); 39 + int (*late_exit)(struct host1x_client *client); 40 int (*suspend)(struct host1x_client *client); 41 int (*resume)(struct host1x_client *client); 42 }; ··· 142 struct host1x_syncpt; 143 struct host1x; 144 145 + struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id); 146 + struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id); 147 + struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp); 148 u32 host1x_syncpt_id(struct host1x_syncpt *sp); 149 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp); 150 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp); ··· 153 u32 *value); 154 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client, 155 unsigned long flags); 156 + void host1x_syncpt_put(struct host1x_syncpt *sp); 157 + struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host, 158 + unsigned long flags, 159 + const char *name); 160 161 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp); 162 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); 163 + 164 + void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, 165 + u32 syncpt_id); 166 167 /* 168 * host1x channel ··· 218 dma_addr_t *reloc_addr_phys; 219 220 /* Sync point id, number of increments and end related to the submit */ 221 + struct host1x_syncpt *syncpt; 222 u32 syncpt_incrs; 223 u32 syncpt_end; 224