Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/tegra: Implement new UAPI

Implement the non-submission parts of the new UAPI, including
channel management and memory mapping. The UAPI is under the
CONFIG_DRM_TEGRA_STAGING config flag for now.

Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>

authored by

Mikko Perttunen and committed by
Thierry Reding
d7c591bc e0f2977c

+389 -16
+1
drivers/gpu/drm/tegra/Makefile
··· 3 3 4 4 tegra-drm-y := \ 5 5 drm.o \ 6 + uapi.o \ 6 7 gem.o \ 7 8 fb.o \ 8 9 dp.o \
+21 -16
drivers/gpu/drm/tegra/drm.c
··· 21 21 #include <drm/drm_prime.h> 22 22 #include <drm/drm_vblank.h> 23 23 24 + #include "uapi.h" 24 25 #include "drm.h" 25 26 #include "gem.h" 26 27 ··· 34 33 35 34 #define CARVEOUT_SZ SZ_64M 36 35 #define CDMA_GATHER_FETCHES_MAX_NB 16383 37 - 38 - struct tegra_drm_file { 39 - struct idr contexts; 40 - struct mutex lock; 41 - }; 42 36 43 37 static int tegra_atomic_check(struct drm_device *drm, 44 38 struct drm_atomic_state *state) ··· 90 94 if (!fpriv) 91 95 return -ENOMEM; 92 96 93 - idr_init_base(&fpriv->contexts, 1); 97 + idr_init_base(&fpriv->legacy_contexts, 1); 98 + xa_init_flags(&fpriv->contexts, XA_FLAGS_ALLOC1); 94 99 mutex_init(&fpriv->lock); 95 100 filp->driver_priv = fpriv; 96 101 ··· 416 419 if (err < 0) 417 420 return err; 418 421 419 - err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 422 + err = idr_alloc(&fpriv->legacy_contexts, context, 1, 0, GFP_KERNEL); 420 423 if (err < 0) { 421 424 client->ops->close_channel(context); 422 425 return err; ··· 471 474 472 475 mutex_lock(&fpriv->lock); 473 476 474 - context = idr_find(&fpriv->contexts, args->context); 477 + context = idr_find(&fpriv->legacy_contexts, args->context); 475 478 if (!context) { 476 479 err = -EINVAL; 477 480 goto unlock; 478 481 } 479 482 480 - idr_remove(&fpriv->contexts, context->id); 483 + idr_remove(&fpriv->legacy_contexts, context->id); 481 484 tegra_drm_context_free(context); 482 485 483 486 unlock: ··· 496 499 497 500 mutex_lock(&fpriv->lock); 498 501 499 - context = idr_find(&fpriv->contexts, args->context); 502 + context = idr_find(&fpriv->legacy_contexts, args->context); 500 503 if (!context) { 501 504 err = -ENODEV; 502 505 goto unlock; ··· 525 528 526 529 mutex_lock(&fpriv->lock); 527 530 528 - context = idr_find(&fpriv->contexts, args->context); 531 + context = idr_find(&fpriv->legacy_contexts, args->context); 529 532 if (!context) { 530 533 err = -ENODEV; 531 534 goto unlock; ··· 550 553 551 554 mutex_lock(&fpriv->lock); 552 555 553 - context = idr_find(&fpriv->contexts, args->context); 556 + context = idr_find(&fpriv->legacy_contexts, args->context); 554 557 if (!context) { 555 558 err = -ENODEV; 556 559 goto unlock; ··· 719 722 720 723 static const struct drm_ioctl_desc tegra_drm_ioctls[] = { 721 724 #ifdef CONFIG_DRM_TEGRA_STAGING 722 - DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, 725 + DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_OPEN, tegra_drm_ioctl_channel_open, 723 726 DRM_RENDER_ALLOW), 724 - DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, 727 + DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_CLOSE, tegra_drm_ioctl_channel_close, 725 728 DRM_RENDER_ALLOW), 729 + DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_MAP, tegra_drm_ioctl_channel_map, 730 + DRM_RENDER_ALLOW), 731 + DRM_IOCTL_DEF_DRV(TEGRA_CHANNEL_UNMAP, tegra_drm_ioctl_channel_unmap, 732 + DRM_RENDER_ALLOW), 733 + 734 + DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_RENDER_ALLOW), 735 + DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_RENDER_ALLOW), 726 736 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, 727 737 DRM_RENDER_ALLOW), 728 738 DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, ··· 783 779 struct tegra_drm_file *fpriv = file->driver_priv; 784 780 785 781 mutex_lock(&fpriv->lock); 786 - idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL); 782 + idr_for_each(&fpriv->legacy_contexts, tegra_drm_context_cleanup, NULL); 783 + tegra_drm_uapi_close_file(fpriv); 787 784 mutex_unlock(&fpriv->lock); 788 785 789 - idr_destroy(&fpriv->contexts); 786 + idr_destroy(&fpriv->legacy_contexts); 790 787 mutex_destroy(&fpriv->lock); 791 788 kfree(fpriv); 792 789 }
+10
drivers/gpu/drm/tegra/drm.h
··· 64 64 struct tegra_display_hub *hub; 65 65 }; 66 66 67 + static inline struct host1x *tegra_drm_to_host1x(struct tegra_drm *tegra) 68 + { 69 + return dev_get_drvdata(tegra->drm->dev->parent); 70 + } 71 + 67 72 struct tegra_drm_client; 68 73 69 74 struct tegra_drm_context { 70 75 struct tegra_drm_client *client; 71 76 struct host1x_channel *channel; 77 + 78 + /* Only used by legacy UAPI. */ 72 79 unsigned int id; 80 + 81 + /* Only used by new UAPI. */ 82 + struct xarray mappings; 73 83 }; 74 84 75 85 struct tegra_drm_client_ops {
+306
drivers/gpu/drm/tegra/uapi.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2020 NVIDIA Corporation */ 3 + 4 + #include <linux/host1x.h> 5 + #include <linux/iommu.h> 6 + #include <linux/list.h> 7 + 8 + #include <drm/drm_drv.h> 9 + #include <drm/drm_file.h> 10 + 11 + #include "drm.h" 12 + #include "uapi.h" 13 + 14 + static void tegra_drm_mapping_release(struct kref *ref) 15 + { 16 + struct tegra_drm_mapping *mapping = 17 + container_of(ref, struct tegra_drm_mapping, ref); 18 + 19 + if (mapping->sgt) 20 + dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, 21 + DMA_ATTR_SKIP_CPU_SYNC); 22 + 23 + host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); 24 + host1x_bo_put(mapping->bo); 25 + 26 + kfree(mapping); 27 + } 28 + 29 + void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping) 30 + { 31 + kref_put(&mapping->ref, tegra_drm_mapping_release); 32 + } 33 + 34 + static void tegra_drm_channel_context_close(struct tegra_drm_context *context) 35 + { 36 + struct tegra_drm_mapping *mapping; 37 + unsigned long id; 38 + 39 + xa_for_each(&context->mappings, id, mapping) 40 + tegra_drm_mapping_put(mapping); 41 + 42 + xa_destroy(&context->mappings); 43 + 44 + host1x_channel_put(context->channel); 45 + 46 + kfree(context); 47 + } 48 + 49 + void tegra_drm_uapi_close_file(struct tegra_drm_file *file) 50 + { 51 + struct tegra_drm_context *context; 52 + unsigned long id; 53 + 54 + xa_for_each(&file->contexts, id, context) 55 + tegra_drm_channel_context_close(context); 56 + 57 + xa_destroy(&file->contexts); 58 + } 59 + 60 + static struct tegra_drm_client *tegra_drm_find_client(struct tegra_drm *tegra, u32 class) 61 + { 62 + struct tegra_drm_client *client; 63 + 64 + list_for_each_entry(client, &tegra->clients, list) 65 + if (client->base.class == class) 66 + return client; 67 + 68 + return NULL; 69 + } 70 + 71 + int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_file *file) 72 + { 73 + struct tegra_drm_file *fpriv = file->driver_priv; 74 + struct tegra_drm *tegra = drm->dev_private; 75 + struct drm_tegra_channel_open *args = data; 76 + struct tegra_drm_client *client = NULL; 77 + struct tegra_drm_context *context; 78 + int err; 79 + 80 + if (args->flags) 81 + return -EINVAL; 82 + 83 + context = kzalloc(sizeof(*context), GFP_KERNEL); 84 + if (!context) 85 + return -ENOMEM; 86 + 87 + client = tegra_drm_find_client(tegra, args->host1x_class); 88 + if (!client) { 89 + err = -ENODEV; 90 + goto free; 91 + } 92 + 93 + if (client->shared_channel) { 94 + context->channel = host1x_channel_get(client->shared_channel); 95 + } else { 96 + context->channel = host1x_channel_request(&client->base); 97 + if (!context->channel) { 98 + err = -EBUSY; 99 + goto free; 100 + } 101 + } 102 + 103 + err = xa_alloc(&fpriv->contexts, &args->context, context, XA_LIMIT(1, U32_MAX), 104 + GFP_KERNEL); 105 + if (err < 0) 106 + goto put_channel; 107 + 108 + context->client = client; 109 + xa_init_flags(&context->mappings, XA_FLAGS_ALLOC1); 110 + 111 + args->version = client->version; 112 + args->capabilities = 0; 113 + 114 + if (device_get_dma_attr(client->base.dev) == DEV_DMA_COHERENT) 115 + args->capabilities |= DRM_TEGRA_CHANNEL_CAP_CACHE_COHERENT; 116 + 117 + return 0; 118 + 119 + put_channel: 120 + host1x_channel_put(context->channel); 121 + free: 122 + kfree(context); 123 + 124 + return err; 125 + } 126 + 127 + int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, struct drm_file *file) 128 + { 129 + struct tegra_drm_file *fpriv = file->driver_priv; 130 + struct drm_tegra_channel_close *args = data; 131 + struct tegra_drm_context *context; 132 + 133 + mutex_lock(&fpriv->lock); 134 + 135 + context = xa_load(&fpriv->contexts, args->context); 136 + if (!context) { 137 + mutex_unlock(&fpriv->lock); 138 + return -EINVAL; 139 + } 140 + 141 + xa_erase(&fpriv->contexts, args->context); 142 + 143 + mutex_unlock(&fpriv->lock); 144 + 145 + tegra_drm_channel_context_close(context); 146 + 147 + return 0; 148 + } 149 + 150 + int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_file *file) 151 + { 152 + struct tegra_drm_file *fpriv = file->driver_priv; 153 + struct drm_tegra_channel_map *args = data; 154 + struct tegra_drm_mapping *mapping; 155 + struct tegra_drm_context *context; 156 + int err = 0; 157 + 158 + if (args->flags & ~DRM_TEGRA_CHANNEL_MAP_READ_WRITE) 159 + return -EINVAL; 160 + 161 + mutex_lock(&fpriv->lock); 162 + 163 + context = xa_load(&fpriv->contexts, args->context); 164 + if (!context) { 165 + mutex_unlock(&fpriv->lock); 166 + return -EINVAL; 167 + } 168 + 169 + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 170 + if (!mapping) { 171 + err = -ENOMEM; 172 + goto unlock; 173 + } 174 + 175 + kref_init(&mapping->ref); 176 + 177 + mapping->dev = context->client->base.dev; 178 + mapping->bo = tegra_gem_lookup(file, args->handle); 179 + if (!mapping->bo) { 180 + err = -EINVAL; 181 + goto unlock; 182 + } 183 + 184 + if (context->client->base.group) { 185 + /* IOMMU domain managed directly using IOMMU API */ 186 + host1x_bo_pin(mapping->dev, mapping->bo, &mapping->iova); 187 + } else { 188 + switch (args->flags & DRM_TEGRA_CHANNEL_MAP_READ_WRITE) { 189 + case DRM_TEGRA_CHANNEL_MAP_READ_WRITE: 190 + mapping->direction = DMA_BIDIRECTIONAL; 191 + break; 192 + 193 + case DRM_TEGRA_CHANNEL_MAP_WRITE: 194 + mapping->direction = DMA_FROM_DEVICE; 195 + break; 196 + 197 + case DRM_TEGRA_CHANNEL_MAP_READ: 198 + mapping->direction = DMA_TO_DEVICE; 199 + break; 200 + 201 + default: 202 + return -EINVAL; 203 + } 204 + 205 + mapping->sgt = host1x_bo_pin(mapping->dev, mapping->bo, NULL); 206 + if (IS_ERR(mapping->sgt)) { 207 + err = PTR_ERR(mapping->sgt); 208 + goto put_gem; 209 + } 210 + 211 + err = dma_map_sgtable(mapping->dev, mapping->sgt, mapping->direction, 212 + DMA_ATTR_SKIP_CPU_SYNC); 213 + if (err) 214 + goto unpin; 215 + 216 + mapping->iova = sg_dma_address(mapping->sgt->sgl); 217 + } 218 + 219 + mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size; 220 + 221 + err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), 222 + GFP_KERNEL); 223 + if (err < 0) 224 + goto unmap; 225 + 226 + mutex_unlock(&fpriv->lock); 227 + 228 + return 0; 229 + 230 + unmap: 231 + if (mapping->sgt) { 232 + dma_unmap_sgtable(mapping->dev, mapping->sgt, mapping->direction, 233 + DMA_ATTR_SKIP_CPU_SYNC); 234 + } 235 + unpin: 236 + host1x_bo_unpin(mapping->dev, mapping->bo, mapping->sgt); 237 + put_gem: 238 + host1x_bo_put(mapping->bo); 239 + kfree(mapping); 240 + unlock: 241 + mutex_unlock(&fpriv->lock); 242 + return err; 243 + } 244 + 245 + int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, struct drm_file *file) 246 + { 247 + struct tegra_drm_file *fpriv = file->driver_priv; 248 + struct drm_tegra_channel_unmap *args = data; 249 + struct tegra_drm_mapping *mapping; 250 + struct tegra_drm_context *context; 251 + 252 + mutex_lock(&fpriv->lock); 253 + 254 + context = xa_load(&fpriv->contexts, args->context); 255 + if (!context) { 256 + mutex_unlock(&fpriv->lock); 257 + return -EINVAL; 258 + } 259 + 260 + mapping = xa_erase(&context->mappings, args->mapping); 261 + 262 + mutex_unlock(&fpriv->lock); 263 + 264 + if (!mapping) 265 + return -EINVAL; 266 + 267 + tegra_drm_mapping_put(mapping); 268 + return 0; 269 + } 270 + 271 + int tegra_drm_ioctl_gem_create(struct drm_device *drm, void *data, 272 + struct drm_file *file) 273 + { 274 + struct drm_tegra_gem_create *args = data; 275 + struct tegra_bo *bo; 276 + 277 + if (args->flags) 278 + return -EINVAL; 279 + 280 + bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, 281 + &args->handle); 282 + if (IS_ERR(bo)) 283 + return PTR_ERR(bo); 284 + 285 + return 0; 286 + } 287 + 288 + int tegra_drm_ioctl_gem_mmap(struct drm_device *drm, void *data, 289 + struct drm_file *file) 290 + { 291 + struct drm_tegra_gem_mmap *args = data; 292 + struct drm_gem_object *gem; 293 + struct tegra_bo *bo; 294 + 295 + gem = drm_gem_object_lookup(file, args->handle); 296 + if (!gem) 297 + return -EINVAL; 298 + 299 + bo = to_tegra_bo(gem); 300 + 301 + args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); 302 + 303 + drm_gem_object_put(gem); 304 + 305 + return 0; 306 + }
+51
drivers/gpu/drm/tegra/uapi.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright (c) 2020 NVIDIA Corporation */ 3 + 4 + #ifndef _TEGRA_DRM_UAPI_H 5 + #define _TEGRA_DRM_UAPI_H 6 + 7 + #include <linux/dma-mapping.h> 8 + #include <linux/idr.h> 9 + #include <linux/kref.h> 10 + #include <linux/xarray.h> 11 + 12 + #include <drm/drm.h> 13 + 14 + struct drm_file; 15 + struct drm_device; 16 + 17 + struct tegra_drm_file { 18 + /* Legacy UAPI state */ 19 + struct idr legacy_contexts; 20 + struct mutex lock; 21 + 22 + /* New UAPI state */ 23 + struct xarray contexts; 24 + }; 25 + 26 + struct tegra_drm_mapping { 27 + struct kref ref; 28 + 29 + struct device *dev; 30 + struct host1x_bo *bo; 31 + struct sg_table *sgt; 32 + enum dma_data_direction direction; 33 + dma_addr_t iova; 34 + dma_addr_t iova_end; 35 + }; 36 + 37 + int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, 38 + struct drm_file *file); 39 + int tegra_drm_ioctl_channel_close(struct drm_device *drm, void *data, 40 + struct drm_file *file); 41 + int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, 42 + struct drm_file *file); 43 + int tegra_drm_ioctl_channel_unmap(struct drm_device *drm, void *data, 44 + struct drm_file *file); 45 + int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data, 46 + struct drm_file *file); 47 + 48 + void tegra_drm_uapi_close_file(struct tegra_drm_file *file); 49 + void tegra_drm_mapping_put(struct tegra_drm_mapping *mapping); 50 + 51 + #endif