Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2020-11-27-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.11:

UAPI Changes:

Cross-subsystem Changes:

* char/agp: Disable frontend without CONFIG_DRM_LEGACY
* mm: Fix fput in mmap error path; Introduce vma_set_file() to change
vma->vm_file

Core Changes:

* dma-buf: Use sgtables in system heap; Move heap helpers to CMA-heap code;
Skip sync for unmapped buffers; Alloc higher order pages is available;
Respect num_fences when initializing shared fence list
* doc: Improvements around DRM modes and SCALING_FILTER
* Pass full state to connector atomic functions + callee updates
* Cleanups
* shmem: Map pages with caching by default; Cleanups
* ttm: Fix DMA32 for global page pool
* fbdev: Cleanups
* fb-helper: Update framebuffer after userspace writes; Unmap console buffer
during shutdown; Rework damage handling of shadow framebuffer

Driver Changes:

* amdgpu: Multi-hop fixes, Clenaups
* imx: Fix rotation for Vivante tiled formats; Support nearest-neighour
skaling; Cleanups
* mcde: Fix RGB formats; Support DPI output; Cleanups
* meson: HDMI clock fixes
* panel: Add driver and bindings for Innolux N125HCE-GN1
* panel/s6e63m0: More backlight levels; Fix init; Cleanups
* via: Clenunps
* virtio: Use fence ID for handling fences; Cleanups

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201127083055.GA29139@linux-uq9g

+2134 -905
+2
Documentation/devicetree/bindings/display/panel/panel-simple.yaml
··· 159 159 - innolux,g121x1-l03 160 160 # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel 161 161 - innolux,n116bge 162 + # InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel 163 + - innolux,n125hce-gn1 162 164 # InnoLux 15.6" WXGA TFT LCD panel 163 165 - innolux,n156bge-l21 164 166 # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
+5 -1
drivers/char/agp/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - agpgart-y := backend.o frontend.o generic.o isoch.o 2 + agpgart-y := backend.o generic.o isoch.o 3 3 4 + ifeq ($(CONFIG_DRM_LEGACY),y) 4 5 agpgart-$(CONFIG_COMPAT) += compat_ioctl.o 6 + agpgart-y += frontend.o 7 + endif 8 + 5 9 6 10 obj-$(CONFIG_AGP) += agpgart.o 7 11 obj-$(CONFIG_AGP_ALI) += ali-agp.o
+5
drivers/char/agp/agp.h
··· 186 186 void agp_remove_bridge(struct agp_bridge_data *bridge); 187 187 188 188 /* Frontend routines. */ 189 + #if IS_ENABLED(CONFIG_DRM_LEGACY) 189 190 int agp_frontend_initialize(void); 190 191 void agp_frontend_cleanup(void); 192 + #else 193 + static inline int agp_frontend_initialize(void) { return 0; } 194 + static inline void agp_frontend_cleanup(void) {} 195 + #endif 191 196 192 197 /* Generic routines. */ 193 198 void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
+2 -17
drivers/dma-buf/dma-buf.c
··· 1166 1166 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 1167 1167 unsigned long pgoff) 1168 1168 { 1169 - struct file *oldfile; 1170 - int ret; 1171 - 1172 1169 if (WARN_ON(!dmabuf || !vma)) 1173 1170 return -EINVAL; 1174 1171 ··· 1183 1186 return -EINVAL; 1184 1187 1185 1188 /* readjust the vma */ 1186 - get_file(dmabuf->file); 1187 - oldfile = vma->vm_file; 1188 - vma->vm_file = dmabuf->file; 1189 + vma_set_file(vma, dmabuf->file); 1189 1190 vma->vm_pgoff = pgoff; 1190 1191 1191 - ret = dmabuf->ops->mmap(dmabuf, vma); 1192 - if (ret) { 1193 - /* restore old parameters on failure */ 1194 - vma->vm_file = oldfile; 1195 - fput(dmabuf->file); 1196 - } else { 1197 - if (oldfile) 1198 - fput(oldfile); 1199 - } 1200 - return ret; 1201 - 1192 + return dmabuf->ops->mmap(dmabuf, vma); 1202 1193 } 1203 1194 EXPORT_SYMBOL_GPL(dma_buf_mmap); 1204 1195
+1 -1
drivers/dma-buf/dma-resv.c
··· 200 200 max = max(old->shared_count + num_fences, 201 201 old->shared_max * 2); 202 202 } else { 203 - max = 4; 203 + max = max(4ul, roundup_pow_of_two(num_fences)); 204 204 } 205 205 206 206 new = dma_resv_list_alloc(max);
-1
drivers/dma-buf/heaps/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - obj-y += heap-helpers.o 3 2 obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o 4 3 obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
+281 -50
drivers/dma-buf/heaps/cma_heap.c
··· 2 2 /* 3 3 * DMABUF CMA heap exporter 4 4 * 5 - * Copyright (C) 2012, 2019 Linaro Ltd. 5 + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. 6 6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. 7 + * 8 + * Also utilizing parts of Andrew Davis' SRAM heap: 9 + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ 10 + * Andrew F. Davis <afd@ti.com> 7 11 */ 8 - 9 12 #include <linux/cma.h> 10 - #include <linux/device.h> 11 13 #include <linux/dma-buf.h> 12 14 #include <linux/dma-heap.h> 13 15 #include <linux/dma-map-ops.h> 14 16 #include <linux/err.h> 15 - #include <linux/errno.h> 16 17 #include <linux/highmem.h> 18 + #include <linux/io.h> 19 + #include <linux/mm.h> 17 20 #include <linux/module.h> 18 - #include <linux/slab.h> 19 21 #include <linux/scatterlist.h> 20 - #include <linux/sched/signal.h> 22 + #include <linux/slab.h> 21 23 22 - #include "heap-helpers.h" 23 24 24 25 struct cma_heap { 25 26 struct dma_heap *heap; 26 27 struct cma *cma; 27 28 }; 28 29 29 - static void cma_heap_free(struct heap_helper_buffer *buffer) 30 - { 31 - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); 32 - unsigned long nr_pages = buffer->pagecount; 33 - struct page *cma_pages = buffer->priv_virt; 30 + struct cma_heap_buffer { 31 + struct cma_heap *heap; 32 + struct list_head attachments; 33 + struct mutex lock; 34 + unsigned long len; 35 + struct page *cma_pages; 36 + struct page **pages; 37 + pgoff_t pagecount; 38 + int vmap_cnt; 39 + void *vaddr; 40 + }; 34 41 35 - /* free page list */ 36 - kfree(buffer->pages); 37 - /* release memory */ 38 - cma_release(cma_heap->cma, cma_pages, nr_pages); 42 + struct dma_heap_attachment { 43 + struct device *dev; 44 + struct sg_table table; 45 + struct list_head list; 46 + bool mapped; 47 + }; 48 + 49 + static int cma_heap_attach(struct dma_buf *dmabuf, 50 + struct dma_buf_attachment *attachment) 51 + { 52 + struct cma_heap_buffer *buffer = dmabuf->priv; 53 + struct dma_heap_attachment *a; 54 + int ret; 55 + 56 + a = kzalloc(sizeof(*a), GFP_KERNEL); 57 + if (!a) 58 + return -ENOMEM; 59 + 60 + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, 61 + buffer->pagecount, 0, 62 + buffer->pagecount << PAGE_SHIFT, 63 + GFP_KERNEL); 64 + if (ret) { 65 + kfree(a); 66 + return ret; 67 + } 68 + 69 + a->dev = attachment->dev; 70 + INIT_LIST_HEAD(&a->list); 71 + a->mapped = false; 72 + 73 + attachment->priv = a; 74 + 75 + mutex_lock(&buffer->lock); 76 + list_add(&a->list, &buffer->attachments); 77 + mutex_unlock(&buffer->lock); 78 + 79 + return 0; 80 + } 81 + 82 + static void cma_heap_detach(struct dma_buf *dmabuf, 83 + struct dma_buf_attachment *attachment) 84 + { 85 + struct cma_heap_buffer *buffer = dmabuf->priv; 86 + struct dma_heap_attachment *a = attachment->priv; 87 + 88 + mutex_lock(&buffer->lock); 89 + list_del(&a->list); 90 + mutex_unlock(&buffer->lock); 91 + 92 + sg_free_table(&a->table); 93 + kfree(a); 94 + } 95 + 96 + static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, 97 + enum dma_data_direction direction) 98 + { 99 + struct dma_heap_attachment *a = attachment->priv; 100 + struct sg_table *table = &a->table; 101 + int ret; 102 + 103 + ret = dma_map_sgtable(attachment->dev, table, direction, 0); 104 + if (ret) 105 + return ERR_PTR(-ENOMEM); 106 + a->mapped = true; 107 + return table; 108 + } 109 + 110 + static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, 111 + struct sg_table *table, 112 + enum dma_data_direction direction) 113 + { 114 + struct dma_heap_attachment *a = attachment->priv; 115 + 116 + a->mapped = false; 117 + dma_unmap_sgtable(attachment->dev, table, direction, 0); 118 + } 119 + 120 + static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 121 + enum dma_data_direction direction) 122 + { 123 + struct cma_heap_buffer *buffer = dmabuf->priv; 124 + struct dma_heap_attachment *a; 125 + 126 + if (buffer->vmap_cnt) 127 + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); 128 + 129 + mutex_lock(&buffer->lock); 130 + list_for_each_entry(a, &buffer->attachments, list) { 131 + if (!a->mapped) 132 + continue; 133 + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); 134 + } 135 + mutex_unlock(&buffer->lock); 136 + 137 + return 0; 138 + } 139 + 140 + static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 141 + enum dma_data_direction direction) 142 + { 143 + struct cma_heap_buffer *buffer = dmabuf->priv; 144 + struct dma_heap_attachment *a; 145 + 146 + if (buffer->vmap_cnt) 147 + flush_kernel_vmap_range(buffer->vaddr, buffer->len); 148 + 149 + mutex_lock(&buffer->lock); 150 + list_for_each_entry(a, &buffer->attachments, list) { 151 + if (!a->mapped) 152 + continue; 153 + dma_sync_sgtable_for_device(a->dev, &a->table, direction); 154 + } 155 + mutex_unlock(&buffer->lock); 156 + 157 + return 0; 158 + } 159 + 160 + static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) 161 + { 162 + struct vm_area_struct *vma = vmf->vma; 163 + struct cma_heap_buffer *buffer = vma->vm_private_data; 164 + 165 + if (vmf->pgoff > buffer->pagecount) 166 + return VM_FAULT_SIGBUS; 167 + 168 + vmf->page = buffer->pages[vmf->pgoff]; 169 + get_page(vmf->page); 170 + 171 + return 0; 172 + } 173 + 174 + static const struct vm_operations_struct dma_heap_vm_ops = { 175 + .fault = cma_heap_vm_fault, 176 + }; 177 + 178 + static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 179 + { 180 + struct cma_heap_buffer *buffer = dmabuf->priv; 181 + 182 + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) 183 + return -EINVAL; 184 + 185 + vma->vm_ops = &dma_heap_vm_ops; 186 + vma->vm_private_data = buffer; 187 + 188 + return 0; 189 + } 190 + 191 + static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) 192 + { 193 + void *vaddr; 194 + 195 + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); 196 + if (!vaddr) 197 + return ERR_PTR(-ENOMEM); 198 + 199 + return vaddr; 200 + } 201 + 202 + static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 203 + { 204 + struct cma_heap_buffer *buffer = dmabuf->priv; 205 + void *vaddr; 206 + int ret = 0; 207 + 208 + mutex_lock(&buffer->lock); 209 + if (buffer->vmap_cnt) { 210 + buffer->vmap_cnt++; 211 + dma_buf_map_set_vaddr(map, buffer->vaddr); 212 + goto out; 213 + } 214 + 215 + vaddr = cma_heap_do_vmap(buffer); 216 + if (IS_ERR(vaddr)) { 217 + ret = PTR_ERR(vaddr); 218 + goto out; 219 + } 220 + buffer->vaddr = vaddr; 221 + buffer->vmap_cnt++; 222 + dma_buf_map_set_vaddr(map, buffer->vaddr); 223 + out: 224 + mutex_unlock(&buffer->lock); 225 + 226 + return ret; 227 + } 228 + 229 + static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 230 + { 231 + struct cma_heap_buffer *buffer = dmabuf->priv; 232 + 233 + mutex_lock(&buffer->lock); 234 + if (!--buffer->vmap_cnt) { 235 + vunmap(buffer->vaddr); 236 + buffer->vaddr = NULL; 237 + } 238 + mutex_unlock(&buffer->lock); 239 + dma_buf_map_clear(map); 240 + } 241 + 242 + static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) 243 + { 244 + struct cma_heap_buffer *buffer = dmabuf->priv; 245 + struct cma_heap *cma_heap = buffer->heap; 246 + 247 + if (buffer->vmap_cnt > 0) { 248 + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); 249 + vunmap(buffer->vaddr); 250 + buffer->vaddr = NULL; 251 + } 252 + 253 + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); 39 254 kfree(buffer); 40 255 } 41 256 42 - /* dmabuf heap CMA operations functions */ 257 + static const struct dma_buf_ops cma_heap_buf_ops = { 258 + .attach = cma_heap_attach, 259 + .detach = cma_heap_detach, 260 + .map_dma_buf = cma_heap_map_dma_buf, 261 + .unmap_dma_buf = cma_heap_unmap_dma_buf, 262 + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, 263 + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, 264 + .mmap = cma_heap_mmap, 265 + .vmap = cma_heap_vmap, 266 + .vunmap = cma_heap_vunmap, 267 + .release = cma_heap_dma_buf_release, 268 + }; 269 + 43 270 static int cma_heap_allocate(struct dma_heap *heap, 44 - unsigned long len, 45 - unsigned long fd_flags, 46 - unsigned long heap_flags) 271 + unsigned long len, 272 + unsigned long fd_flags, 273 + unsigned long heap_flags) 47 274 { 48 275 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); 49 - struct heap_helper_buffer *helper_buffer; 50 - struct page *cma_pages; 276 + struct cma_heap_buffer *buffer; 277 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 51 278 size_t size = PAGE_ALIGN(len); 52 - unsigned long nr_pages = size >> PAGE_SHIFT; 279 + pgoff_t pagecount = size >> PAGE_SHIFT; 53 280 unsigned long align = get_order(size); 281 + struct page *cma_pages; 54 282 struct dma_buf *dmabuf; 55 283 int ret = -ENOMEM; 56 284 pgoff_t pg; 57 285 286 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 287 + if (!buffer) 288 + return -ENOMEM; 289 + 290 + INIT_LIST_HEAD(&buffer->attachments); 291 + mutex_init(&buffer->lock); 292 + buffer->len = size; 293 + 58 294 if (align > CONFIG_CMA_ALIGNMENT) 59 295 align = CONFIG_CMA_ALIGNMENT; 60 296 61 - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); 62 - if (!helper_buffer) 63 - return -ENOMEM; 64 - 65 - init_heap_helper_buffer(helper_buffer, cma_heap_free); 66 - helper_buffer->heap = heap; 67 - helper_buffer->size = len; 68 - 69 - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); 297 + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); 70 298 if (!cma_pages) 71 - goto free_buf; 299 + goto free_buffer; 72 300 301 + /* Clear the cma pages */ 73 302 if (PageHighMem(cma_pages)) { 74 - unsigned long nr_clear_pages = nr_pages; 303 + unsigned long nr_clear_pages = pagecount; 75 304 struct page *page = cma_pages; 76 305 77 306 while (nr_clear_pages > 0) { ··· 314 85 */ 315 86 if (fatal_signal_pending(current)) 316 87 goto free_cma; 317 - 318 88 page++; 319 89 nr_clear_pages--; 320 90 } ··· 321 93 memset(page_address(cma_pages), 0, size); 322 94 } 323 95 324 - helper_buffer->pagecount = nr_pages; 325 - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, 326 - sizeof(*helper_buffer->pages), 327 - GFP_KERNEL); 328 - if (!helper_buffer->pages) { 96 + buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); 97 + if (!buffer->pages) { 329 98 ret = -ENOMEM; 330 99 goto free_cma; 331 100 } 332 101 333 - for (pg = 0; pg < helper_buffer->pagecount; pg++) 334 - helper_buffer->pages[pg] = &cma_pages[pg]; 102 + for (pg = 0; pg < pagecount; pg++) 103 + buffer->pages[pg] = &cma_pages[pg]; 104 + 105 + buffer->cma_pages = cma_pages; 106 + buffer->heap = cma_heap; 107 + buffer->pagecount = pagecount; 335 108 336 109 /* create the dmabuf */ 337 - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); 110 + exp_info.ops = &cma_heap_buf_ops; 111 + exp_info.size = buffer->len; 112 + exp_info.flags = fd_flags; 113 + exp_info.priv = buffer; 114 + dmabuf = dma_buf_export(&exp_info); 338 115 if (IS_ERR(dmabuf)) { 339 116 ret = PTR_ERR(dmabuf); 340 117 goto free_pages; 341 118 } 342 - 343 - helper_buffer->dmabuf = dmabuf; 344 - helper_buffer->priv_virt = cma_pages; 345 119 346 120 ret = dma_buf_fd(dmabuf, fd_flags); 347 121 if (ret < 0) { ··· 355 125 return ret; 356 126 357 127 free_pages: 358 - kfree(helper_buffer->pages); 128 + kfree(buffer->pages); 359 129 free_cma: 360 - cma_release(cma_heap->cma, cma_pages, nr_pages); 361 - free_buf: 362 - kfree(helper_buffer); 130 + cma_release(cma_heap->cma, cma_pages, pagecount); 131 + free_buffer: 132 + kfree(buffer); 133 + 363 134 return ret; 364 135 } 365 136
-274
drivers/dma-buf/heaps/heap-helpers.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/device.h> 3 - #include <linux/dma-buf.h> 4 - #include <linux/err.h> 5 - #include <linux/highmem.h> 6 - #include <linux/idr.h> 7 - #include <linux/list.h> 8 - #include <linux/slab.h> 9 - #include <linux/uaccess.h> 10 - #include <linux/vmalloc.h> 11 - #include <uapi/linux/dma-heap.h> 12 - 13 - #include "heap-helpers.h" 14 - 15 - void init_heap_helper_buffer(struct heap_helper_buffer *buffer, 16 - void (*free)(struct heap_helper_buffer *)) 17 - { 18 - buffer->priv_virt = NULL; 19 - mutex_init(&buffer->lock); 20 - buffer->vmap_cnt = 0; 21 - buffer->vaddr = NULL; 22 - buffer->pagecount = 0; 23 - buffer->pages = NULL; 24 - INIT_LIST_HEAD(&buffer->attachments); 25 - buffer->free = free; 26 - } 27 - 28 - struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, 29 - int fd_flags) 30 - { 31 - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 32 - 33 - exp_info.ops = &heap_helper_ops; 34 - exp_info.size = buffer->size; 35 - exp_info.flags = fd_flags; 36 - exp_info.priv = buffer; 37 - 38 - return dma_buf_export(&exp_info); 39 - } 40 - 41 - static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer) 42 - { 43 - void *vaddr; 44 - 45 - vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); 46 - if (!vaddr) 47 - return ERR_PTR(-ENOMEM); 48 - 49 - return vaddr; 50 - } 51 - 52 - static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer) 53 - { 54 - if (buffer->vmap_cnt > 0) { 55 - WARN(1, "%s: buffer still mapped in the kernel\n", __func__); 56 - vunmap(buffer->vaddr); 57 - } 58 - 59 - buffer->free(buffer); 60 - } 61 - 62 - static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer) 63 - { 64 - void *vaddr; 65 - 66 - if (buffer->vmap_cnt) { 67 - buffer->vmap_cnt++; 68 - return buffer->vaddr; 69 - } 70 - vaddr = dma_heap_map_kernel(buffer); 71 - if (IS_ERR(vaddr)) 72 - return vaddr; 73 - buffer->vaddr = vaddr; 74 - buffer->vmap_cnt++; 75 - return vaddr; 76 - } 77 - 78 - static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer) 79 - { 80 - if (!--buffer->vmap_cnt) { 81 - vunmap(buffer->vaddr); 82 - buffer->vaddr = NULL; 83 - } 84 - } 85 - 86 - struct dma_heaps_attachment { 87 - struct device *dev; 88 - struct sg_table table; 89 - struct list_head list; 90 - }; 91 - 92 - static int dma_heap_attach(struct dma_buf *dmabuf, 93 - struct dma_buf_attachment *attachment) 94 - { 95 - struct dma_heaps_attachment *a; 96 - struct heap_helper_buffer *buffer = dmabuf->priv; 97 - int ret; 98 - 99 - a = kzalloc(sizeof(*a), GFP_KERNEL); 100 - if (!a) 101 - return -ENOMEM; 102 - 103 - ret = sg_alloc_table_from_pages(&a->table, buffer->pages, 104 - buffer->pagecount, 0, 105 - buffer->pagecount << PAGE_SHIFT, 106 - GFP_KERNEL); 107 - if (ret) { 108 - kfree(a); 109 - return ret; 110 - } 111 - 112 - a->dev = attachment->dev; 113 - INIT_LIST_HEAD(&a->list); 114 - 115 - attachment->priv = a; 116 - 117 - mutex_lock(&buffer->lock); 118 - list_add(&a->list, &buffer->attachments); 119 - mutex_unlock(&buffer->lock); 120 - 121 - return 0; 122 - } 123 - 124 - static void dma_heap_detach(struct dma_buf *dmabuf, 125 - struct dma_buf_attachment *attachment) 126 - { 127 - struct dma_heaps_attachment *a = attachment->priv; 128 - struct heap_helper_buffer *buffer = dmabuf->priv; 129 - 130 - mutex_lock(&buffer->lock); 131 - list_del(&a->list); 132 - mutex_unlock(&buffer->lock); 133 - 134 - sg_free_table(&a->table); 135 - kfree(a); 136 - } 137 - 138 - static 139 - struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, 140 - enum dma_data_direction direction) 141 - { 142 - struct dma_heaps_attachment *a = attachment->priv; 143 - struct sg_table *table = &a->table; 144 - int ret; 145 - 146 - ret = dma_map_sgtable(attachment->dev, table, direction, 0); 147 - if (ret) 148 - table = ERR_PTR(ret); 149 - return table; 150 - } 151 - 152 - static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, 153 - struct sg_table *table, 154 - enum dma_data_direction direction) 155 - { 156 - dma_unmap_sgtable(attachment->dev, table, direction, 0); 157 - } 158 - 159 - static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf) 160 - { 161 - struct vm_area_struct *vma = vmf->vma; 162 - struct heap_helper_buffer *buffer = vma->vm_private_data; 163 - 164 - if (vmf->pgoff > buffer->pagecount) 165 - return VM_FAULT_SIGBUS; 166 - 167 - vmf->page = buffer->pages[vmf->pgoff]; 168 - get_page(vmf->page); 169 - 170 - return 0; 171 - } 172 - 173 - static const struct vm_operations_struct dma_heap_vm_ops = { 174 - .fault = dma_heap_vm_fault, 175 - }; 176 - 177 - static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 178 - { 179 - struct heap_helper_buffer *buffer = dmabuf->priv; 180 - 181 - if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) 182 - return -EINVAL; 183 - 184 - vma->vm_ops = &dma_heap_vm_ops; 185 - vma->vm_private_data = buffer; 186 - 187 - return 0; 188 - } 189 - 190 - static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) 191 - { 192 - struct heap_helper_buffer *buffer = dmabuf->priv; 193 - 194 - dma_heap_buffer_destroy(buffer); 195 - } 196 - 197 - static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 198 - enum dma_data_direction direction) 199 - { 200 - struct heap_helper_buffer *buffer = dmabuf->priv; 201 - struct dma_heaps_attachment *a; 202 - int ret = 0; 203 - 204 - mutex_lock(&buffer->lock); 205 - 206 - if (buffer->vmap_cnt) 207 - invalidate_kernel_vmap_range(buffer->vaddr, buffer->size); 208 - 209 - list_for_each_entry(a, &buffer->attachments, list) { 210 - dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents, 211 - direction); 212 - } 213 - mutex_unlock(&buffer->lock); 214 - 215 - return ret; 216 - } 217 - 218 - static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 219 - enum dma_data_direction direction) 220 - { 221 - struct heap_helper_buffer *buffer = dmabuf->priv; 222 - struct dma_heaps_attachment *a; 223 - 224 - mutex_lock(&buffer->lock); 225 - 226 - if (buffer->vmap_cnt) 227 - flush_kernel_vmap_range(buffer->vaddr, buffer->size); 228 - 229 - list_for_each_entry(a, &buffer->attachments, list) { 230 - dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, 231 - direction); 232 - } 233 - mutex_unlock(&buffer->lock); 234 - 235 - return 0; 236 - } 237 - 238 - static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 239 - { 240 - struct heap_helper_buffer *buffer = dmabuf->priv; 241 - void *vaddr; 242 - 243 - mutex_lock(&buffer->lock); 244 - vaddr = dma_heap_buffer_vmap_get(buffer); 245 - mutex_unlock(&buffer->lock); 246 - 247 - if (!vaddr) 248 - return -ENOMEM; 249 - dma_buf_map_set_vaddr(map, vaddr); 250 - 251 - return 0; 252 - } 253 - 254 - static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 255 - { 256 - struct heap_helper_buffer *buffer = dmabuf->priv; 257 - 258 - mutex_lock(&buffer->lock); 259 - dma_heap_buffer_vmap_put(buffer); 260 - mutex_unlock(&buffer->lock); 261 - } 262 - 263 - const struct dma_buf_ops heap_helper_ops = { 264 - .map_dma_buf = dma_heap_map_dma_buf, 265 - .unmap_dma_buf = dma_heap_unmap_dma_buf, 266 - .mmap = dma_heap_mmap, 267 - .release = dma_heap_dma_buf_release, 268 - .attach = dma_heap_attach, 269 - .detach = dma_heap_detach, 270 - .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, 271 - .end_cpu_access = dma_heap_dma_buf_end_cpu_access, 272 - .vmap = dma_heap_dma_buf_vmap, 273 - .vunmap = dma_heap_dma_buf_vunmap, 274 - };
-53
drivers/dma-buf/heaps/heap-helpers.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * DMABUF Heaps helper code 4 - * 5 - * Copyright (C) 2011 Google, Inc. 6 - * Copyright (C) 2019 Linaro Ltd. 7 - */ 8 - 9 - #ifndef _HEAP_HELPERS_H 10 - #define _HEAP_HELPERS_H 11 - 12 - #include <linux/dma-heap.h> 13 - #include <linux/list.h> 14 - 15 - /** 16 - * struct heap_helper_buffer - helper buffer metadata 17 - * @heap: back pointer to the heap the buffer came from 18 - * @dmabuf: backing dma-buf for this buffer 19 - * @size: size of the buffer 20 - * @priv_virt pointer to heap specific private value 21 - * @lock mutext to protect the data in this structure 22 - * @vmap_cnt count of vmap references on the buffer 23 - * @vaddr vmap'ed virtual address 24 - * @pagecount number of pages in the buffer 25 - * @pages list of page pointers 26 - * @attachments list of device attachments 27 - * 28 - * @free heap callback to free the buffer 29 - */ 30 - struct heap_helper_buffer { 31 - struct dma_heap *heap; 32 - struct dma_buf *dmabuf; 33 - size_t size; 34 - 35 - void *priv_virt; 36 - struct mutex lock; 37 - int vmap_cnt; 38 - void *vaddr; 39 - pgoff_t pagecount; 40 - struct page **pages; 41 - struct list_head attachments; 42 - 43 - void (*free)(struct heap_helper_buffer *buffer); 44 - }; 45 - 46 - void init_heap_helper_buffer(struct heap_helper_buffer *buffer, 47 - void (*free)(struct heap_helper_buffer *)); 48 - 49 - struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer, 50 - int fd_flags); 51 - 52 - extern const struct dma_buf_ops heap_helper_ops; 53 - #endif /* _HEAP_HELPERS_H */
+367 -47
drivers/dma-buf/heaps/system_heap.c
··· 3 3 * DMABUF System heap exporter 4 4 * 5 5 * Copyright (C) 2011 Google, Inc. 6 - * Copyright (C) 2019 Linaro Ltd. 6 + * Copyright (C) 2019, 2020 Linaro Ltd. 7 + * 8 + * Portions based off of Andrew Davis' SRAM heap: 9 + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ 10 + * Andrew F. Davis <afd@ti.com> 7 11 */ 8 12 9 13 #include <linux/dma-buf.h> ··· 19 15 #include <linux/module.h> 20 16 #include <linux/scatterlist.h> 21 17 #include <linux/slab.h> 22 - #include <linux/sched/signal.h> 23 - #include <asm/page.h> 18 + #include <linux/vmalloc.h> 24 19 25 - #include "heap-helpers.h" 20 + static struct dma_heap *sys_heap; 26 21 27 - struct dma_heap *sys_heap; 22 + struct system_heap_buffer { 23 + struct dma_heap *heap; 24 + struct list_head attachments; 25 + struct mutex lock; 26 + unsigned long len; 27 + struct sg_table sg_table; 28 + int vmap_cnt; 29 + void *vaddr; 30 + }; 28 31 29 - static void system_heap_free(struct heap_helper_buffer *buffer) 32 + struct dma_heap_attachment { 33 + struct device *dev; 34 + struct sg_table *table; 35 + struct list_head list; 36 + bool mapped; 37 + }; 38 + 39 + #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ 40 + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ 41 + | __GFP_COMP) 42 + #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP) 43 + static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP}; 44 + /* 45 + * The selection of the orders used for allocation (1MB, 64K, 4K) is designed 46 + * to match with the sizes often found in IOMMUs. Using order 4 pages instead 47 + * of order 0 pages can significantly improve the performance of many IOMMUs 48 + * by reducing TLB pressure and time spent updating page tables. 49 + */ 50 + static const unsigned int orders[] = {8, 4, 0}; 51 + #define NUM_ORDERS ARRAY_SIZE(orders) 52 + 53 + static struct sg_table *dup_sg_table(struct sg_table *table) 30 54 { 31 - pgoff_t pg; 55 + struct sg_table *new_table; 56 + int ret, i; 57 + struct scatterlist *sg, *new_sg; 32 58 33 - for (pg = 0; pg < buffer->pagecount; pg++) 34 - __free_page(buffer->pages[pg]); 35 - kfree(buffer->pages); 59 + new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); 60 + if (!new_table) 61 + return ERR_PTR(-ENOMEM); 62 + 63 + ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); 64 + if (ret) { 65 + kfree(new_table); 66 + return ERR_PTR(-ENOMEM); 67 + } 68 + 69 + new_sg = new_table->sgl; 70 + for_each_sgtable_sg(table, sg, i) { 71 + sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); 72 + new_sg = sg_next(new_sg); 73 + } 74 + 75 + return new_table; 76 + } 77 + 78 + static int system_heap_attach(struct dma_buf *dmabuf, 79 + struct dma_buf_attachment *attachment) 80 + { 81 + struct system_heap_buffer *buffer = dmabuf->priv; 82 + struct dma_heap_attachment *a; 83 + struct sg_table *table; 84 + 85 + a = kzalloc(sizeof(*a), GFP_KERNEL); 86 + if (!a) 87 + return -ENOMEM; 88 + 89 + table = dup_sg_table(&buffer->sg_table); 90 + if (IS_ERR(table)) { 91 + kfree(a); 92 + return -ENOMEM; 93 + } 94 + 95 + a->table = table; 96 + a->dev = attachment->dev; 97 + INIT_LIST_HEAD(&a->list); 98 + a->mapped = false; 99 + 100 + attachment->priv = a; 101 + 102 + mutex_lock(&buffer->lock); 103 + list_add(&a->list, &buffer->attachments); 104 + mutex_unlock(&buffer->lock); 105 + 106 + return 0; 107 + } 108 + 109 + static void system_heap_detach(struct dma_buf *dmabuf, 110 + struct dma_buf_attachment *attachment) 111 + { 112 + struct system_heap_buffer *buffer = dmabuf->priv; 113 + struct dma_heap_attachment *a = attachment->priv; 114 + 115 + mutex_lock(&buffer->lock); 116 + list_del(&a->list); 117 + mutex_unlock(&buffer->lock); 118 + 119 + sg_free_table(a->table); 120 + kfree(a->table); 121 + kfree(a); 122 + } 123 + 124 + static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment, 125 + enum dma_data_direction direction) 126 + { 127 + struct dma_heap_attachment *a = attachment->priv; 128 + struct sg_table *table = a->table; 129 + int ret; 130 + 131 + ret = dma_map_sgtable(attachment->dev, table, direction, 0); 132 + if (ret) 133 + return ERR_PTR(ret); 134 + 135 + a->mapped = true; 136 + return table; 137 + } 138 + 139 + static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, 140 + struct sg_table *table, 141 + enum dma_data_direction direction) 142 + { 143 + struct dma_heap_attachment *a = attachment->priv; 144 + 145 + a->mapped = false; 146 + dma_unmap_sgtable(attachment->dev, table, direction, 0); 147 + } 148 + 149 + static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 150 + enum dma_data_direction direction) 151 + { 152 + struct system_heap_buffer *buffer = dmabuf->priv; 153 + struct dma_heap_attachment *a; 154 + 155 + mutex_lock(&buffer->lock); 156 + 157 + if (buffer->vmap_cnt) 158 + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); 159 + 160 + list_for_each_entry(a, &buffer->attachments, list) { 161 + if (!a->mapped) 162 + continue; 163 + dma_sync_sgtable_for_cpu(a->dev, a->table, direction); 164 + } 165 + mutex_unlock(&buffer->lock); 166 + 167 + return 0; 168 + } 169 + 170 + static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 171 + enum dma_data_direction direction) 172 + { 173 + struct system_heap_buffer *buffer = dmabuf->priv; 174 + struct dma_heap_attachment *a; 175 + 176 + mutex_lock(&buffer->lock); 177 + 178 + if (buffer->vmap_cnt) 179 + flush_kernel_vmap_range(buffer->vaddr, buffer->len); 180 + 181 + list_for_each_entry(a, &buffer->attachments, list) { 182 + if (!a->mapped) 183 + continue; 184 + dma_sync_sgtable_for_device(a->dev, a->table, direction); 185 + } 186 + mutex_unlock(&buffer->lock); 187 + 188 + return 0; 189 + } 190 + 191 + static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 192 + { 193 + struct system_heap_buffer *buffer = dmabuf->priv; 194 + struct sg_table *table = &buffer->sg_table; 195 + unsigned long addr = vma->vm_start; 196 + struct sg_page_iter piter; 197 + int ret; 198 + 199 + for_each_sgtable_page(table, &piter, vma->vm_pgoff) { 200 + struct page *page = sg_page_iter_page(&piter); 201 + 202 + ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE, 203 + vma->vm_page_prot); 204 + if (ret) 205 + return ret; 206 + addr += PAGE_SIZE; 207 + if (addr >= vma->vm_end) 208 + return 0; 209 + } 210 + return 0; 211 + } 212 + 213 + static void *system_heap_do_vmap(struct system_heap_buffer *buffer) 214 + { 215 + struct sg_table *table = &buffer->sg_table; 216 + int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE; 217 + struct page **pages = vmalloc(sizeof(struct page *) * npages); 218 + struct page **tmp = pages; 219 + struct sg_page_iter piter; 220 + void *vaddr; 221 + 222 + if (!pages) 223 + return ERR_PTR(-ENOMEM); 224 + 225 + for_each_sgtable_page(table, &piter, 0) { 226 + WARN_ON(tmp - pages >= npages); 227 + *tmp++ = sg_page_iter_page(&piter); 228 + } 229 + 230 + vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL); 231 + vfree(pages); 232 + 233 + if (!vaddr) 234 + return ERR_PTR(-ENOMEM); 235 + 236 + return vaddr; 237 + } 238 + 239 + static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 240 + { 241 + struct system_heap_buffer *buffer = dmabuf->priv; 242 + void *vaddr; 243 + int ret = 0; 244 + 245 + mutex_lock(&buffer->lock); 246 + if (buffer->vmap_cnt) { 247 + buffer->vmap_cnt++; 248 + dma_buf_map_set_vaddr(map, buffer->vaddr); 249 + goto out; 250 + } 251 + 252 + vaddr = system_heap_do_vmap(buffer); 253 + if (IS_ERR(vaddr)) { 254 + ret = PTR_ERR(vaddr); 255 + goto out; 256 + } 257 + 258 + buffer->vaddr = vaddr; 259 + buffer->vmap_cnt++; 260 + dma_buf_map_set_vaddr(map, buffer->vaddr); 261 + out: 262 + mutex_unlock(&buffer->lock); 263 + 264 + return ret; 265 + } 266 + 267 + static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 268 + { 269 + struct system_heap_buffer *buffer = dmabuf->priv; 270 + 271 + mutex_lock(&buffer->lock); 272 + if (!--buffer->vmap_cnt) { 273 + vunmap(buffer->vaddr); 274 + buffer->vaddr = NULL; 275 + } 276 + mutex_unlock(&buffer->lock); 277 + dma_buf_map_clear(map); 278 + } 279 + 280 + static void system_heap_dma_buf_release(struct dma_buf *dmabuf) 281 + { 282 + struct system_heap_buffer *buffer = dmabuf->priv; 283 + struct sg_table *table; 284 + struct scatterlist *sg; 285 + int i; 286 + 287 + table = &buffer->sg_table; 288 + for_each_sg(table->sgl, sg, table->nents, i) { 289 + struct page *page = sg_page(sg); 290 + 291 + __free_pages(page, compound_order(page)); 292 + } 293 + sg_free_table(table); 36 294 kfree(buffer); 295 + } 296 + 297 + static const struct dma_buf_ops system_heap_buf_ops = { 298 + .attach = system_heap_attach, 299 + .detach = system_heap_detach, 300 + .map_dma_buf = system_heap_map_dma_buf, 301 + .unmap_dma_buf = system_heap_unmap_dma_buf, 302 + .begin_cpu_access = system_heap_dma_buf_begin_cpu_access, 303 + .end_cpu_access = system_heap_dma_buf_end_cpu_access, 304 + .mmap = system_heap_mmap, 305 + .vmap = system_heap_vmap, 306 + .vunmap = system_heap_vunmap, 307 + .release = system_heap_dma_buf_release, 308 + }; 309 + 310 + static struct page *alloc_largest_available(unsigned long size, 311 + unsigned int max_order) 312 + { 313 + struct page *page; 314 + int i; 315 + 316 + for (i = 0; i < NUM_ORDERS; i++) { 317 + if (size < (PAGE_SIZE << orders[i])) 318 + continue; 319 + if (max_order < orders[i]) 320 + continue; 321 + 322 + page = alloc_pages(order_flags[i], orders[i]); 323 + if (!page) 324 + continue; 325 + return page; 326 + } 327 + return NULL; 37 328 } 38 329 39 330 static int system_heap_allocate(struct dma_heap *heap, ··· 336 37 unsigned long fd_flags, 337 38 unsigned long heap_flags) 338 39 { 339 - struct heap_helper_buffer *helper_buffer; 40 + struct system_heap_buffer *buffer; 41 + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 42 + unsigned long size_remaining = len; 43 + unsigned int max_order = orders[0]; 340 44 struct dma_buf *dmabuf; 341 - int ret = -ENOMEM; 342 - pgoff_t pg; 45 + struct sg_table *table; 46 + struct scatterlist *sg; 47 + struct list_head pages; 48 + struct page *page, *tmp_page; 49 + int i, ret = -ENOMEM; 343 50 344 - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); 345 - if (!helper_buffer) 51 + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 52 + if (!buffer) 346 53 return -ENOMEM; 347 54 348 - init_heap_helper_buffer(helper_buffer, system_heap_free); 349 - helper_buffer->heap = heap; 350 - helper_buffer->size = len; 55 + INIT_LIST_HEAD(&buffer->attachments); 56 + mutex_init(&buffer->lock); 57 + buffer->heap = heap; 58 + buffer->len = len; 351 59 352 - helper_buffer->pagecount = len / PAGE_SIZE; 353 - helper_buffer->pages = kmalloc_array(helper_buffer->pagecount, 354 - sizeof(*helper_buffer->pages), 355 - GFP_KERNEL); 356 - if (!helper_buffer->pages) { 357 - ret = -ENOMEM; 358 - goto err0; 359 - } 360 - 361 - for (pg = 0; pg < helper_buffer->pagecount; pg++) { 60 + INIT_LIST_HEAD(&pages); 61 + i = 0; 62 + while (size_remaining > 0) { 362 63 /* 363 64 * Avoid trying to allocate memory if the process 364 - * has been killed by by SIGKILL 65 + * has been killed by SIGKILL 365 66 */ 366 67 if (fatal_signal_pending(current)) 367 - goto err1; 68 + goto free_buffer; 368 69 369 - helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO); 370 - if (!helper_buffer->pages[pg]) 371 - goto err1; 70 + page = alloc_largest_available(size_remaining, max_order); 71 + if (!page) 72 + goto free_buffer; 73 + 74 + list_add_tail(&page->lru, &pages); 75 + size_remaining -= page_size(page); 76 + max_order = compound_order(page); 77 + i++; 78 + } 79 + 80 + table = &buffer->sg_table; 81 + if (sg_alloc_table(table, i, GFP_KERNEL)) 82 + goto free_buffer; 83 + 84 + sg = table->sgl; 85 + list_for_each_entry_safe(page, tmp_page, &pages, lru) { 86 + sg_set_page(sg, page, page_size(page), 0); 87 + sg = sg_next(sg); 88 + list_del(&page->lru); 372 89 } 373 90 374 91 /* create the dmabuf */ 375 - dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); 92 + exp_info.ops = &system_heap_buf_ops; 93 + exp_info.size = buffer->len; 94 + exp_info.flags = fd_flags; 95 + exp_info.priv = buffer; 96 + dmabuf = dma_buf_export(&exp_info); 376 97 if (IS_ERR(dmabuf)) { 377 98 ret = PTR_ERR(dmabuf); 378 - goto err1; 99 + goto free_pages; 379 100 } 380 - 381 - helper_buffer->dmabuf = dmabuf; 382 101 383 102 ret = dma_buf_fd(dmabuf, fd_flags); 384 103 if (ret < 0) { ··· 404 87 /* just return, as put will call release and that will free */ 405 88 return ret; 406 89 } 407 - 408 90 return ret; 409 91 410 - err1: 411 - while (pg > 0) 412 - __free_page(helper_buffer->pages[--pg]); 413 - kfree(helper_buffer->pages); 414 - err0: 415 - kfree(helper_buffer); 92 + free_pages: 93 + for_each_sgtable_sg(table, sg, i) { 94 + struct page *p = sg_page(sg); 95 + 96 + __free_pages(p, compound_order(p)); 97 + } 98 + sg_free_table(table); 99 + free_buffer: 100 + list_for_each_entry_safe(page, tmp_page, &pages, lru) 101 + __free_pages(page, compound_order(page)); 102 + kfree(buffer); 416 103 417 104 return ret; 418 105 } ··· 428 107 static int system_heap_create(void) 429 108 { 430 109 struct dma_heap_export_info exp_info; 431 - int ret = 0; 432 110 433 111 exp_info.name = "system"; 434 112 exp_info.ops = &system_heap_ops; ··· 435 115 436 116 sys_heap = dma_heap_add(&exp_info); 437 117 if (IS_ERR(sys_heap)) 438 - ret = PTR_ERR(sys_heap); 118 + return PTR_ERR(sys_heap); 439 119 440 - return ret; 120 + return 0; 441 121 } 442 122 module_init(system_heap_create); 443 123 MODULE_LICENSE("GPL v2");
+2
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1280 1280 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); 1281 1281 long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, 1282 1282 unsigned long arg); 1283 + int amdgpu_info_ioctl(struct drm_device *dev, void *data, 1284 + struct drm_file *filp); 1283 1285 1284 1286 /* 1285 1287 * functions used by amdgpu_encoder.c
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1533 1533 return 0; 1534 1534 } 1535 1535 1536 - int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1537 - 1538 1536 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { 1539 1537 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 1540 1538 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+23 -30
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 551 551 struct ttm_resource *old_mem = &bo->mem; 552 552 int r; 553 553 554 - if ((old_mem->mem_type == TTM_PL_SYSTEM && 555 - new_mem->mem_type == TTM_PL_VRAM) || 556 - (old_mem->mem_type == TTM_PL_VRAM && 557 - new_mem->mem_type == TTM_PL_SYSTEM)) { 558 - hop->fpfn = 0; 559 - hop->lpfn = 0; 560 - hop->mem_type = TTM_PL_TT; 561 - hop->flags = 0; 562 - return -EMULTIHOP; 563 - } 564 - 565 554 if (new_mem->mem_type == TTM_PL_TT) { 566 555 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); 567 556 if (r) 568 557 return r; 569 558 } 570 - 571 - amdgpu_bo_move_notify(bo, evict, new_mem); 572 559 573 560 /* Can't move a pinned BO */ 574 561 abo = ttm_to_amdgpu_bo(bo); ··· 566 579 567 580 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 568 581 ttm_bo_move_null(bo, new_mem); 569 - return 0; 582 + goto out; 570 583 } 571 584 if (old_mem->mem_type == TTM_PL_SYSTEM && 572 585 new_mem->mem_type == TTM_PL_TT) { 573 586 ttm_bo_move_null(bo, new_mem); 574 - return 0; 587 + goto out; 575 588 } 576 - 577 589 if (old_mem->mem_type == TTM_PL_TT && 578 590 new_mem->mem_type == TTM_PL_SYSTEM) { 579 591 r = ttm_bo_wait_ctx(bo, ctx); 580 592 if (r) 581 - goto fail; 593 + return r; 582 594 583 595 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); 584 596 ttm_resource_free(bo, &bo->mem); 585 597 ttm_bo_assign_mem(bo, new_mem); 586 - return 0; 598 + goto out; 587 599 } 588 600 589 601 if (old_mem->mem_type == AMDGPU_PL_GDS || ··· 593 607 new_mem->mem_type == AMDGPU_PL_OA) { 594 608 /* Nothing to save here */ 595 609 ttm_bo_move_null(bo, new_mem); 596 - return 0; 610 + goto out; 597 611 } 598 612 599 - if (!adev->mman.buffer_funcs_enabled) { 613 + if (adev->mman.buffer_funcs_enabled) { 614 + if (((old_mem->mem_type == TTM_PL_SYSTEM && 615 + new_mem->mem_type == TTM_PL_VRAM) || 616 + (old_mem->mem_type == TTM_PL_VRAM && 617 + new_mem->mem_type == TTM_PL_SYSTEM))) { 618 + hop->fpfn = 0; 619 + hop->lpfn = 0; 620 + hop->mem_type = TTM_PL_TT; 621 + hop->flags = 0; 622 + return -EMULTIHOP; 623 + } 624 + 625 + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); 626 + } else { 600 627 r = -ENODEV; 601 - goto memcpy; 602 628 } 603 629 604 - r = amdgpu_move_blit(bo, evict, new_mem, old_mem); 605 630 if (r) { 606 - memcpy: 607 631 /* Check that all memory is CPU accessible */ 608 632 if (!amdgpu_mem_visible(adev, old_mem) || 609 633 !amdgpu_mem_visible(adev, new_mem)) { 610 634 pr_err("Move buffer fallback to memcpy unavailable\n"); 611 - goto fail; 635 + return r; 612 636 } 613 637 614 638 r = ttm_bo_move_memcpy(bo, ctx, new_mem); 615 639 if (r) 616 - goto fail; 640 + return r; 617 641 } 618 642 619 643 if (bo->type == ttm_bo_type_device && ··· 635 639 abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 636 640 } 637 641 642 + out: 638 643 /* update statistics */ 639 644 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); 645 + amdgpu_bo_move_notify(bo, evict, new_mem); 640 646 return 0; 641 - fail: 642 - swap(*new_mem, bo->mem); 643 - amdgpu_bo_move_notify(bo, false, new_mem); 644 - swap(*new_mem, bo->mem); 645 - return r; 646 647 } 647 648 648 649 /*
+4 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 24 24 */ 25 25 26 26 #include <linux/version.h> 27 + #include <drm/drm_atomic.h> 27 28 #include <drm/drm_atomic_helper.h> 28 29 #include <drm/drm_dp_mst_helper.h> 29 30 #include <drm/drm_dp_helper.h> ··· 253 252 254 253 static struct drm_encoder * 255 254 dm_mst_atomic_best_encoder(struct drm_connector *connector, 256 - struct drm_connector_state *connector_state) 255 + struct drm_atomic_state *state) 257 256 { 257 + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 258 + connector); 258 259 struct drm_device *dev = connector->dev; 259 260 struct amdgpu_device *adev = drm_to_adev(dev); 260 261 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
+4 -4
drivers/gpu/drm/drm_atomic_helper.c
··· 122 122 continue; 123 123 124 124 if (funcs->atomic_best_encoder) 125 - new_encoder = funcs->atomic_best_encoder(connector, new_conn_state); 125 + new_encoder = funcs->atomic_best_encoder(connector, 126 + state); 126 127 else if (funcs->best_encoder) 127 128 new_encoder = funcs->best_encoder(connector); 128 129 else ··· 346 345 funcs = connector->helper_private; 347 346 348 347 if (funcs->atomic_best_encoder) 349 - new_encoder = funcs->atomic_best_encoder(connector, 350 - new_connector_state); 348 + new_encoder = funcs->atomic_best_encoder(connector, state); 351 349 else if (funcs->best_encoder) 352 350 new_encoder = funcs->best_encoder(connector); 353 351 else ··· 1313 1313 1314 1314 if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { 1315 1315 WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); 1316 - funcs->atomic_commit(connector, new_conn_state); 1316 + funcs->atomic_commit(connector, old_state); 1317 1317 } 1318 1318 } 1319 1319 }
+1 -1
drivers/gpu/drm/drm_blend.c
··· 196 196 * exposed and assumed to be black). 197 197 * 198 198 * SCALING_FILTER: 199 - * 200 199 * Indicates scaling filter to be used for plane scaler 201 200 * 202 201 * The value of this property can be one of the following: 202 + * 203 203 * Default: 204 204 * Driver's default scaling filter 205 205 * Nearest Neighbor:
+1
drivers/gpu/drm/drm_bufs.c
··· 77 77 if ((entry->map->offset & 0xffffffff) == 78 78 (map->offset & 0xffffffff)) 79 79 return entry; 80 + break; 80 81 default: /* Make gcc happy */ 81 82 ; 82 83 }
-4
drivers/gpu/drm/drm_client.c
··· 314 314 struct dma_buf_map *map = &buffer->map; 315 315 int ret; 316 316 317 - if (dma_buf_map_is_set(map)) 318 - goto out; 319 - 320 317 /* 321 318 * FIXME: The dependency on GEM here isn't required, we could 322 319 * convert the driver handle to a dma-buf instead and use the ··· 326 329 if (ret) 327 330 return ret; 328 331 329 - out: 330 332 *map_copy = *map; 331 333 332 334 return 0;
+6 -6
drivers/gpu/drm/drm_crtc.c
··· 230 230 * 231 231 * Setting MODE_ID to 0 will release reserved resources for the CRTC. 232 232 * SCALING_FILTER: 233 - * Atomic property for setting the scaling filter for CRTC scaler 233 + * Atomic property for setting the scaling filter for CRTC scaler 234 234 * 235 - * The value of this property can be one of the following: 236 - * Default: 237 - * Driver's default scaling filter 238 - * Nearest Neighbor: 239 - * Nearest Neighbor scaling filter 235 + * The value of this property can be one of the following: 240 236 * 237 + * Default: 238 + * Driver's default scaling filter 239 + * Nearest Neighbor: 240 + * Nearest Neighbor scaling filter 241 241 */ 242 242 243 243 /**
+105 -58
drivers/gpu/drm/drm_fb_helper.c
··· 371 371 console_unlock(); 372 372 } 373 373 374 - static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, 375 - struct drm_clip_rect *clip, 376 - struct dma_buf_map *dst) 374 + static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, 375 + struct drm_clip_rect *clip, 376 + struct dma_buf_map *dst) 377 377 { 378 378 struct drm_framebuffer *fb = fb_helper->fb; 379 379 unsigned int cpp = fb->format->cpp[0]; ··· 391 391 } 392 392 } 393 393 394 - static void drm_fb_helper_dirty_work(struct work_struct *work) 394 + static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, 395 + struct drm_clip_rect *clip) 395 396 { 396 - struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 397 - dirty_work); 398 - struct drm_clip_rect *clip = &helper->dirty_clip; 399 - struct drm_clip_rect clip_copy; 400 - unsigned long flags; 401 - struct dma_buf_map map; 397 + struct drm_client_buffer *buffer = fb_helper->buffer; 398 + struct dma_buf_map map, dst; 402 399 int ret; 403 400 404 - spin_lock_irqsave(&helper->dirty_lock, flags); 401 + /* 402 + * We have to pin the client buffer to its current location while 403 + * flushing the shadow buffer. In the general case, concurrent 404 + * modesetting operations could try to move the buffer and would 405 + * fail. The modeset has to be serialized by acquiring the reservation 406 + * object of the underlying BO here. 407 + * 408 + * For fbdev emulation, we only have to protect against fbdev modeset 409 + * operations. Nothing else will involve the client buffer's BO. So it 410 + * is sufficient to acquire struct drm_fb_helper.lock here. 411 + */ 412 + mutex_lock(&fb_helper->lock); 413 + 414 + ret = drm_client_buffer_vmap(buffer, &map); 415 + if (ret) 416 + goto out; 417 + 418 + dst = map; 419 + drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); 420 + 421 + drm_client_buffer_vunmap(buffer); 422 + 423 + out: 424 + mutex_unlock(&fb_helper->lock); 425 + 426 + return ret; 427 + } 428 + 429 + static void drm_fb_helper_damage_work(struct work_struct *work) 430 + { 431 + struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 432 + damage_work); 433 + struct drm_device *dev = helper->dev; 434 + struct drm_clip_rect *clip = &helper->damage_clip; 435 + struct drm_clip_rect clip_copy; 436 + unsigned long flags; 437 + int ret; 438 + 439 + spin_lock_irqsave(&helper->damage_lock, flags); 405 440 clip_copy = *clip; 406 441 clip->x1 = clip->y1 = ~0; 407 442 clip->x2 = clip->y2 = 0; 408 - spin_unlock_irqrestore(&helper->dirty_lock, flags); 443 + spin_unlock_irqrestore(&helper->damage_lock, flags); 409 444 410 - /* call dirty callback only when it has been really touched */ 411 - if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { 445 + /* Call damage handlers only if necessary */ 446 + if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) 447 + return; 412 448 413 - /* Generic fbdev uses a shadow buffer */ 414 - if (helper->buffer) { 415 - ret = drm_client_buffer_vmap(helper->buffer, &map); 416 - if (ret) 417 - return; 418 - drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map); 419 - } 420 - 421 - if (helper->fb->funcs->dirty) 422 - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, 423 - &clip_copy, 1); 424 - 425 - if (helper->buffer) 426 - drm_client_buffer_vunmap(helper->buffer); 449 + if (helper->buffer) { 450 + ret = drm_fb_helper_damage_blit(helper, &clip_copy); 451 + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) 452 + goto err; 427 453 } 454 + 455 + if (helper->fb->funcs->dirty) { 456 + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 457 + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) 458 + goto err; 459 + } 460 + 461 + return; 462 + 463 + err: 464 + /* 465 + * Restore damage clip rectangle on errors. The next run 466 + * of the damage worker will perform the update. 467 + */ 468 + spin_lock_irqsave(&helper->damage_lock, flags); 469 + clip->x1 = min_t(u32, clip->x1, clip_copy.x1); 470 + clip->y1 = min_t(u32, clip->y1, clip_copy.y1); 471 + clip->x2 = max_t(u32, clip->x2, clip_copy.x2); 472 + clip->y2 = max_t(u32, clip->y2, clip_copy.y2); 473 + spin_unlock_irqrestore(&helper->damage_lock, flags); 428 474 } 429 475 430 476 /** ··· 486 440 const struct drm_fb_helper_funcs *funcs) 487 441 { 488 442 INIT_LIST_HEAD(&helper->kernel_fb_list); 489 - spin_lock_init(&helper->dirty_lock); 443 + spin_lock_init(&helper->damage_lock); 490 444 INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); 491 - INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work); 492 - helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0; 445 + INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); 446 + helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; 493 447 mutex_init(&helper->lock); 494 448 helper->funcs = funcs; 495 449 helper->dev = dev; ··· 625 579 return; 626 580 627 581 cancel_work_sync(&fb_helper->resume_work); 628 - cancel_work_sync(&fb_helper->dirty_work); 582 + cancel_work_sync(&fb_helper->damage_work); 629 583 630 584 info = fb_helper->fbdev; 631 585 if (info) { ··· 660 614 fb->funcs->dirty; 661 615 } 662 616 663 - static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, 664 - u32 width, u32 height) 617 + static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, 618 + u32 width, u32 height) 665 619 { 666 620 struct drm_fb_helper *helper = info->par; 667 - struct drm_clip_rect *clip = &helper->dirty_clip; 621 + struct drm_clip_rect *clip = &helper->damage_clip; 668 622 unsigned long flags; 669 623 670 624 if (!drm_fbdev_use_shadow_fb(helper)) 671 625 return; 672 626 673 - spin_lock_irqsave(&helper->dirty_lock, flags); 627 + spin_lock_irqsave(&helper->damage_lock, flags); 674 628 clip->x1 = min_t(u32, clip->x1, x); 675 629 clip->y1 = min_t(u32, clip->y1, y); 676 630 clip->x2 = max_t(u32, clip->x2, x + width); 677 631 clip->y2 = max_t(u32, clip->y2, y + height); 678 - spin_unlock_irqrestore(&helper->dirty_lock, flags); 632 + spin_unlock_irqrestore(&helper->damage_lock, flags); 679 633 680 - schedule_work(&helper->dirty_work); 634 + schedule_work(&helper->damage_work); 681 635 } 682 636 683 637 /** 684 638 * drm_fb_helper_deferred_io() - fbdev deferred_io callback function 685 639 * @info: fb_info struct pointer 686 - * @pagelist: list of dirty mmap framebuffer pages 640 + * @pagelist: list of mmap framebuffer pages that have to be flushed 687 641 * 688 642 * This function is used as the &fb_deferred_io.deferred_io 689 643 * callback function for flushing the fbdev mmap writes. ··· 708 662 y1 = min / info->fix.line_length; 709 663 y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length), 710 664 info->var.yres); 711 - drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1); 665 + drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1); 712 666 } 713 667 } 714 668 EXPORT_SYMBOL(drm_fb_helper_deferred_io); ··· 745 699 746 700 ret = fb_sys_write(info, buf, count, ppos); 747 701 if (ret > 0) 748 - drm_fb_helper_dirty(info, 0, 0, info->var.xres, 749 - info->var.yres); 702 + drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres); 750 703 751 704 return ret; 752 705 } ··· 762 717 const struct fb_fillrect *rect) 763 718 { 764 719 sys_fillrect(info, rect); 765 - drm_fb_helper_dirty(info, rect->dx, rect->dy, 766 - rect->width, rect->height); 720 + drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 767 721 } 768 722 EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); 769 723 ··· 777 733 const struct fb_copyarea *area) 778 734 { 779 735 sys_copyarea(info, area); 780 - drm_fb_helper_dirty(info, area->dx, area->dy, 781 - area->width, area->height); 736 + drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 782 737 } 783 738 EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); 784 739 ··· 792 749 const struct fb_image *image) 793 750 { 794 751 sys_imageblit(info, image); 795 - drm_fb_helper_dirty(info, image->dx, image->dy, 796 - image->width, image->height); 752 + drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 797 753 } 798 754 EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); 799 755 ··· 807 765 const struct fb_fillrect *rect) 808 766 { 809 767 cfb_fillrect(info, rect); 810 - drm_fb_helper_dirty(info, rect->dx, rect->dy, 811 - rect->width, rect->height); 768 + drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 812 769 } 813 770 EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); 814 771 ··· 822 781 const struct fb_copyarea *area) 823 782 { 824 783 cfb_copyarea(info, area); 825 - drm_fb_helper_dirty(info, area->dx, area->dy, 826 - area->width, area->height); 784 + drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 827 785 } 828 786 EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); 829 787 ··· 837 797 const struct fb_image *image) 838 798 { 839 799 cfb_imageblit(info, image); 840 - drm_fb_helper_dirty(info, image->dx, image->dy, 841 - image->width, image->height); 800 + drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 842 801 } 843 802 EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); 844 803 ··· 2027 1988 if (!fb_helper->dev) 2028 1989 return; 2029 1990 2030 - if (fbi && fbi->fbdefio) { 2031 - fb_deferred_io_cleanup(fbi); 2032 - shadow = fbi->screen_buffer; 1991 + if (fbi) { 1992 + if (fbi->fbdefio) 1993 + fb_deferred_io_cleanup(fbi); 1994 + if (drm_fbdev_use_shadow_fb(fb_helper)) 1995 + shadow = fbi->screen_buffer; 2033 1996 } 2034 1997 2035 1998 drm_fb_helper_fini(fb_helper); 2036 1999 2037 - vfree(shadow); 2000 + if (shadow) 2001 + vfree(shadow); 2002 + else 2003 + drm_client_buffer_vunmap(fb_helper->buffer); 2038 2004 2039 2005 drm_client_framebuffer_delete(fb_helper->buffer); 2040 2006 } ··· 2232 2188 2233 2189 if (ret > 0) 2234 2190 *ppos += ret; 2191 + 2192 + if (ret > 0) 2193 + drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual); 2235 2194 2236 2195 return ret ? ret : err; 2237 2196 }
+8 -32
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 51 51 if (!obj) 52 52 return ERR_PTR(-ENOMEM); 53 53 54 + shmem = to_drm_gem_shmem_obj(obj); 55 + 54 56 if (!obj->funcs) 55 57 obj->funcs = &drm_gem_shmem_funcs; 56 58 57 - if (private) 59 + if (private) { 58 60 drm_gem_private_object_init(dev, obj, size); 59 - else 61 + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ 62 + } else { 60 63 ret = drm_gem_object_init(dev, obj, size); 64 + } 61 65 if (ret) 62 66 goto err_free; 63 67 ··· 69 65 if (ret) 70 66 goto err_release; 71 67 72 - shmem = to_drm_gem_shmem_obj(obj); 73 68 mutex_init(&shmem->pages_lock); 74 69 mutex_init(&shmem->vmap_lock); 75 70 INIT_LIST_HEAD(&shmem->madv_list); ··· 287 284 if (ret) 288 285 goto err_zero_use; 289 286 290 - if (!shmem->map_cached) 287 + if (shmem->map_wc) 291 288 prot = pgprot_writecombine(prot); 292 289 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, 293 290 VM_MAP, prot); ··· 480 477 EXPORT_SYMBOL(drm_gem_shmem_purge); 481 478 482 479 /** 483 - * drm_gem_shmem_create_object_cached - Create a shmem buffer object with 484 - * cached mappings 485 - * @dev: DRM device 486 - * @size: Size of the object to allocate 487 - * 488 - * By default, shmem buffer objects use writecombine mappings. This 489 - * function implements struct drm_driver.gem_create_object for shmem 490 - * buffer objects with cached mappings. 491 - * 492 - * Returns: 493 - * A struct drm_gem_shmem_object * on success or NULL negative on failure. 494 - */ 495 - struct drm_gem_object * 496 - drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) 497 - { 498 - struct drm_gem_shmem_object *shmem; 499 - 500 - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); 501 - if (!shmem) 502 - return NULL; 503 - shmem->map_cached = true; 504 - 505 - return &shmem->base; 506 - } 507 - EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); 508 - 509 - /** 510 480 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object 511 481 * @file: DRM file structure to create the dumb buffer for 512 482 * @dev: DRM device ··· 602 626 603 627 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; 604 628 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 605 - if (!shmem->map_cached) 629 + if (shmem->map_wc) 606 630 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 607 631 vma->vm_ops = &drm_gem_shmem_vm_ops; 608 632
+1 -3
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 145 145 * address_space (so unmap_mapping_range does what we want, 146 146 * in particular in the case of mmap'd dmabufs) 147 147 */ 148 - fput(vma->vm_file); 149 - get_file(etnaviv_obj->base.filp); 150 148 vma->vm_pgoff = 0; 151 - vma->vm_file = etnaviv_obj->base.filp; 149 + vma_set_file(vma, etnaviv_obj->base.filp); 152 150 153 151 vma->vm_page_prot = vm_page_prot; 154 152 }
+5 -2
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 23 23 * 24 24 */ 25 25 26 + #include <drm/drm_atomic.h> 26 27 #include <drm/drm_atomic_helper.h> 27 28 #include <drm/drm_edid.h> 28 29 #include <drm/drm_probe_helper.h> ··· 720 719 } 721 720 722 721 static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, 723 - struct drm_connector_state *state) 722 + struct drm_atomic_state *state) 724 723 { 724 + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 725 + connector); 725 726 struct intel_connector *intel_connector = to_intel_connector(connector); 726 727 struct intel_dp *intel_dp = intel_connector->mst_port; 727 - struct intel_crtc *crtc = to_intel_crtc(state->crtc); 728 + struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); 728 729 729 730 return &intel_dp->mst_encoders[crtc->pipe]->base.base; 730 731 }
+1 -2
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
··· 114 114 if (ret) 115 115 return ret; 116 116 117 - fput(vma->vm_file); 118 - vma->vm_file = get_file(obj->base.filp); 117 + vma_set_file(vma, obj->base.filp); 119 118 120 119 return 0; 121 120 }
+3 -2
drivers/gpu/drm/i915/gem/i915_gem_mman.c
··· 893 893 * requires avoiding extraneous references to their filp, hence why 894 894 * we prefer to use an anonymous file for their mmaps. 895 895 */ 896 - fput(vma->vm_file); 897 - vma->vm_file = anon; 896 + vma_set_file(vma, anon); 897 + /* Drop the initial creation reference, the vma is now holding one. */ 898 + fput(anon); 898 899 899 900 switch (mmo->mmap_type) { 900 901 case I915_MMAP_TYPE_WC:
+3
drivers/gpu/drm/imx/dcss/dcss-dev.h
··· 7 7 #define __DCSS_PRV_H__ 8 8 9 9 #include <drm/drm_fourcc.h> 10 + #include <drm/drm_plane.h> 10 11 #include <linux/io.h> 11 12 #include <video/videomode.h> 12 13 ··· 166 165 /* SCALER */ 167 166 int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base); 168 167 void dcss_scaler_exit(struct dcss_scaler *scl); 168 + void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num, 169 + enum drm_scaling_filter scaling_filter); 169 170 void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, 170 171 const struct drm_format_info *format, 171 172 int src_xres, int src_yres, int dst_xres, int dst_yres,
+19 -5
drivers/gpu/drm/imx/dcss/dcss-plane.c
··· 103 103 bool mod_present, u64 modifier, 104 104 unsigned int rotation) 105 105 { 106 - bool linear_format = !mod_present || 107 - (mod_present && modifier == DRM_FORMAT_MOD_LINEAR); 106 + bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR; 108 107 u32 supported_rotation = DRM_MODE_ROTATE_0; 109 108 110 109 if (!format->is_yuv && linear_format) 111 110 supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 112 111 DRM_MODE_REFLECT_MASK; 113 112 else if (!format->is_yuv && 114 - modifier == DRM_FORMAT_MOD_VIVANTE_TILED) 113 + (modifier == DRM_FORMAT_MOD_VIVANTE_TILED || 114 + modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)) 115 115 supported_rotation = DRM_MODE_ROTATE_MASK | 116 116 DRM_MODE_REFLECT_MASK; 117 117 else if (format->is_yuv && linear_format && ··· 257 257 state->src_h != old_state->src_h || 258 258 fb->format->format != old_fb->format->format || 259 259 fb->modifier != old_fb->modifier || 260 - state->rotation != old_state->rotation; 260 + state->rotation != old_state->rotation || 261 + state->scaling_filter != old_state->scaling_filter; 261 262 } 262 263 263 264 static void dcss_plane_atomic_update(struct drm_plane *plane, ··· 273 272 u32 src_w, src_h, dst_w, dst_h; 274 273 struct drm_rect src, dst; 275 274 bool enable = true; 275 + bool is_rotation_90_or_270; 276 276 277 277 if (!fb || !state->crtc || !state->visible) 278 278 return; ··· 311 309 312 310 dcss_plane_atomic_set_base(dcss_plane); 313 311 312 + is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 | 313 + DRM_MODE_ROTATE_270); 314 + 315 + dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num, 316 + state->scaling_filter); 317 + 314 318 dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, 315 - state->fb->format, src_w, src_h, 319 + state->fb->format, 320 + is_rotation_90_or_270 ? src_h : src_w, 321 + is_rotation_90_or_270 ? src_w : src_h, 316 322 dst_w, dst_h, 317 323 drm_mode_vrefresh(&crtc_state->mode)); 318 324 ··· 397 387 ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos); 398 388 if (ret) 399 389 return ERR_PTR(ret); 390 + 391 + drm_plane_create_scaling_filter_property(&dcss_plane->base, 392 + BIT(DRM_SCALING_FILTER_DEFAULT) | 393 + BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); 400 394 401 395 drm_plane_create_rotation_property(&dcss_plane->base, 402 396 DRM_MODE_ROTATE_0,
+38 -9
drivers/gpu/drm/imx/dcss/dcss-scaler.c
··· 77 77 78 78 u32 c_vstart; 79 79 u32 c_hstart; 80 + 81 + bool use_nn_interpolation; 80 82 }; 81 83 82 84 struct dcss_scaler { ··· 245 243 } 246 244 } 247 245 246 + static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps, 247 + int coef[][PSC_NUM_TAPS]) 248 + { 249 + int i, j; 250 + 251 + for (i = 0; i < PSC_STORED_PHASES; i++) 252 + for (j = 0; j < PSC_NUM_TAPS; j++) 253 + coef[i][j] = j == PSC_NUM_TAPS >> 1 ? 254 + (1 << PSC_COEFF_PRECISION) : 0; 255 + } 256 + 248 257 /** 249 258 * dcss_scaler_filter_design() - Compute filter coefficients using 250 259 * Gaussian filter. ··· 266 253 */ 267 254 static void dcss_scaler_filter_design(int src_length, int dst_length, 268 255 bool use_5_taps, bool phase0_identity, 269 - int coef[][PSC_NUM_TAPS]) 256 + int coef[][PSC_NUM_TAPS], 257 + bool nn_interpolation) 270 258 { 271 259 int fc_q; 272 260 ··· 277 263 else 278 264 fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES); 279 265 280 - /* compute gaussian filter coefficients */ 281 - dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); 266 + if (nn_interpolation) 267 + dcss_scaler_nearest_neighbor_filter(use_5_taps, coef); 268 + else 269 + /* compute gaussian filter coefficients */ 270 + dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); 282 271 } 283 272 284 273 static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs) ··· 670 653 671 654 /* horizontal luma */ 672 655 dcss_scaler_filter_design(src_xres, dst_xres, false, 673 - src_xres == dst_xres, coef); 656 + src_xres == dst_xres, coef, 657 + ch->use_nn_interpolation); 674 658 dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); 675 659 676 660 /* vertical luma */ 677 661 dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, 678 - src_yres == dst_yres, coef); 662 + src_yres == dst_yres, coef, 663 + ch->use_nn_interpolation); 679 664 680 665 if (program_5_taps) 681 666 dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); ··· 697 678 /* horizontal chroma */ 698 679 dcss_scaler_filter_design(src_xres, dst_xres, false, 699 680 (src_xres == dst_xres) && (ch->c_hstart == 0), 700 - coef); 681 + coef, ch->use_nn_interpolation); 701 682 702 683 dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef); 703 684 704 685 /* vertical chroma */ 705 686 dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, 706 687 (src_yres == dst_yres) && (ch->c_vstart == 0), 707 - coef); 688 + coef, ch->use_nn_interpolation); 708 689 if (program_5_taps) 709 690 dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); 710 691 else ··· 719 700 720 701 /* horizontal RGB */ 721 702 dcss_scaler_filter_design(src_xres, dst_xres, false, 722 - src_xres == dst_xres, coef); 703 + src_xres == dst_xres, coef, 704 + ch->use_nn_interpolation); 723 705 dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); 724 706 725 707 /* vertical RGB */ 726 708 dcss_scaler_filter_design(src_yres, dst_yres, false, 727 - src_yres == dst_yres, coef); 709 + src_yres == dst_yres, coef, 710 + ch->use_nn_interpolation); 728 711 dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); 729 712 } 730 713 ··· 770 749 } 771 750 772 751 ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS; 752 + } 753 + 754 + void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num, 755 + enum drm_scaling_filter scaling_filter) 756 + { 757 + struct dcss_scaler_ch *ch = &scl->ch[ch_num]; 758 + 759 + ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR; 773 760 } 774 761 775 762 void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
+1 -1
drivers/gpu/drm/lima/lima_gem.c
··· 225 225 226 226 mutex_init(&bo->lock); 227 227 INIT_LIST_HEAD(&bo->va); 228 - 228 + bo->base.map_wc = true; 229 229 bo->base.base.funcs = &lima_gem_funcs; 230 230 231 231 return &bo->base.base;
+1
drivers/gpu/drm/mcde/Kconfig
··· 4 4 depends on CMA 5 5 depends on ARM || COMPILE_TEST 6 6 depends on OF 7 + depends on COMMON_CLK 7 8 select MFD_SYSCON 8 9 select DRM_MIPI_DSI 9 10 select DRM_BRIDGE
+1 -1
drivers/gpu/drm/mcde/Makefile
··· 1 - mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_display.o 1 + mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_clk_div.o mcde_display.o 2 2 3 3 obj-$(CONFIG_DRM_MCDE) += mcde_drm.o
+192
drivers/gpu/drm/mcde/mcde_clk_div.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/clk-provider.h> 3 + #include <linux/regulator/consumer.h> 4 + 5 + #include "mcde_drm.h" 6 + #include "mcde_display_regs.h" 7 + 8 + /* The MCDE internal clock dividers for FIFO A and B */ 9 + struct mcde_clk_div { 10 + struct clk_hw hw; 11 + struct mcde *mcde; 12 + u32 cr; 13 + u32 cr_div; 14 + }; 15 + 16 + static int mcde_clk_div_enable(struct clk_hw *hw) 17 + { 18 + struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw); 19 + struct mcde *mcde = cdiv->mcde; 20 + u32 val; 21 + 22 + spin_lock(&mcde->fifo_crx1_lock); 23 + val = readl(mcde->regs + cdiv->cr); 24 + /* 25 + * Select the PLL72 (LCD) clock as parent 26 + * FIXME: implement other parents. 27 + */ 28 + val &= ~MCDE_CRX1_CLKSEL_MASK; 29 + val |= MCDE_CRX1_CLKSEL_CLKPLL72 << MCDE_CRX1_CLKSEL_SHIFT; 30 + /* Internal clock */ 31 + val |= MCDE_CRA1_CLKTYPE_TVXCLKSEL1; 32 + 33 + /* Clear then set the divider */ 34 + val &= ~(MCDE_CRX1_BCD | MCDE_CRX1_PCD_MASK); 35 + val |= cdiv->cr_div; 36 + 37 + writel(val, mcde->regs + cdiv->cr); 38 + spin_unlock(&mcde->fifo_crx1_lock); 39 + 40 + return 0; 41 + } 42 + 43 + static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, 44 + unsigned long *prate, bool set_parent) 45 + { 46 + int best_div = 1, div; 47 + struct clk_hw *parent = clk_hw_get_parent(hw); 48 + unsigned long best_prate = 0; 49 + unsigned long best_diff = ~0ul; 50 + int max_div = (1 << MCDE_CRX1_PCD_BITS) - 1; 51 + 52 + for (div = 1; div < max_div; div++) { 53 + unsigned long this_prate, div_rate, diff; 54 + 55 + if (set_parent) 56 + this_prate = clk_hw_round_rate(parent, rate * div); 57 + else 58 + this_prate = *prate; 59 + div_rate = DIV_ROUND_UP_ULL(this_prate, div); 60 + diff = abs(rate - div_rate); 61 + 62 + if (diff < best_diff) { 63 + best_div = div; 64 + best_diff = diff; 65 + best_prate = this_prate; 66 + } 67 + } 68 + 69 + *prate = best_prate; 70 + return best_div; 71 + } 72 + 73 + static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, 74 + unsigned long *prate) 75 + { 76 + int div = mcde_clk_div_choose_div(hw, rate, prate, true); 77 + 78 + return DIV_ROUND_UP_ULL(*prate, div); 79 + } 80 + 81 + static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw, 82 + unsigned long prate) 83 + { 84 + struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw); 85 + struct mcde *mcde = cdiv->mcde; 86 + u32 cr; 87 + int div; 88 + 89 + /* 90 + * If the MCDE is not powered we can't access registers. 91 + * It will come up with 0 in the divider register bits, which 92 + * means "divide by 2". 93 + */ 94 + if (!regulator_is_enabled(mcde->epod)) 95 + return DIV_ROUND_UP_ULL(prate, 2); 96 + 97 + cr = readl(mcde->regs + cdiv->cr); 98 + if (cr & MCDE_CRX1_BCD) 99 + return prate; 100 + 101 + /* 0 in the PCD means "divide by 2", 1 means "divide by 3" etc */ 102 + div = cr & MCDE_CRX1_PCD_MASK; 103 + div += 2; 104 + 105 + return DIV_ROUND_UP_ULL(prate, div); 106 + } 107 + 108 + static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, 109 + unsigned long prate) 110 + { 111 + struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw); 112 + int div = mcde_clk_div_choose_div(hw, rate, &prate, false); 113 + u32 cr = 0; 114 + 115 + /* 116 + * We cache the CR bits to set the divide in the state so that 117 + * we can call this before we can even write to the hardware. 118 + */ 119 + if (div == 1) { 120 + /* Bypass clock divider */ 121 + cr |= MCDE_CRX1_BCD; 122 + } else { 123 + div -= 2; 124 + cr |= div & MCDE_CRX1_PCD_MASK; 125 + } 126 + cdiv->cr_div = cr; 127 + 128 + return 0; 129 + } 130 + 131 + static const struct clk_ops mcde_clk_div_ops = { 132 + .enable = mcde_clk_div_enable, 133 + .recalc_rate = mcde_clk_div_recalc_rate, 134 + .round_rate = mcde_clk_div_round_rate, 135 + .set_rate = mcde_clk_div_set_rate, 136 + }; 137 + 138 + int mcde_init_clock_divider(struct mcde *mcde) 139 + { 140 + struct device *dev = mcde->dev; 141 + struct mcde_clk_div *fifoa; 142 + struct mcde_clk_div *fifob; 143 + const char *parent_name; 144 + struct clk_init_data fifoa_init = { 145 + .name = "fifoa", 146 + .ops = &mcde_clk_div_ops, 147 + .parent_names = &parent_name, 148 + .num_parents = 1, 149 + .flags = CLK_SET_RATE_PARENT, 150 + }; 151 + struct clk_init_data fifob_init = { 152 + .name = "fifob", 153 + .ops = &mcde_clk_div_ops, 154 + .parent_names = &parent_name, 155 + .num_parents = 1, 156 + .flags = CLK_SET_RATE_PARENT, 157 + }; 158 + int ret; 159 + 160 + spin_lock_init(&mcde->fifo_crx1_lock); 161 + parent_name = __clk_get_name(mcde->lcd_clk); 162 + 163 + /* Allocate 2 clocks */ 164 + fifoa = devm_kzalloc(dev, sizeof(*fifoa), GFP_KERNEL); 165 + if (!fifoa) 166 + return -ENOMEM; 167 + fifob = devm_kzalloc(dev, sizeof(*fifob), GFP_KERNEL); 168 + if (!fifob) 169 + return -ENOMEM; 170 + 171 + fifoa->mcde = mcde; 172 + fifoa->cr = MCDE_CRA1; 173 + fifoa->hw.init = &fifoa_init; 174 + ret = devm_clk_hw_register(dev, &fifoa->hw); 175 + if (ret) { 176 + dev_err(dev, "error registering FIFO A clock divider\n"); 177 + return ret; 178 + } 179 + mcde->fifoa_clk = fifoa->hw.clk; 180 + 181 + fifob->mcde = mcde; 182 + fifob->cr = MCDE_CRB1; 183 + fifob->hw.init = &fifob_init; 184 + ret = devm_clk_hw_register(dev, &fifob->hw); 185 + if (ret) { 186 + dev_err(dev, "error registering FIFO B clock divider\n"); 187 + return ret; 188 + } 189 + mcde->fifob_clk = fifob->hw.clk; 190 + 191 + return 0; 192 + }
+352 -102
drivers/gpu/drm/mcde/mcde_display.c
··· 8 8 #include <linux/delay.h> 9 9 #include <linux/dma-buf.h> 10 10 #include <linux/regulator/consumer.h> 11 + #include <linux/media-bus-format.h> 11 12 12 13 #include <drm/drm_device.h> 13 14 #include <drm/drm_fb_cma_helper.h> ··· 17 16 #include <drm/drm_gem_framebuffer_helper.h> 18 17 #include <drm/drm_mipi_dsi.h> 19 18 #include <drm/drm_simple_kms_helper.h> 19 + #include <drm/drm_bridge.h> 20 20 #include <drm/drm_vblank.h> 21 21 #include <video/mipi_display.h> 22 22 ··· 59 57 MCDE_OVERLAY_5, 60 58 }; 61 59 62 - enum mcde_dsi_formatter { 60 + enum mcde_formatter { 63 61 MCDE_DSI_FORMATTER_0 = 0, 64 62 MCDE_DSI_FORMATTER_1, 65 63 MCDE_DSI_FORMATTER_2, 64 + MCDE_DSI_FORMATTER_3, 65 + MCDE_DSI_FORMATTER_4, 66 + MCDE_DSI_FORMATTER_5, 67 + MCDE_DPI_FORMATTER_0, 68 + MCDE_DPI_FORMATTER_1, 66 69 }; 67 70 68 71 void mcde_display_irq(struct mcde *mcde) ··· 88 81 * 89 82 * TODO: Currently only one DSI link is supported. 90 83 */ 91 - if (mcde_dsi_irq(mcde->mdsi)) { 84 + if (!mcde->dpi_output && mcde_dsi_irq(mcde->mdsi)) { 92 85 u32 val; 93 86 94 87 /* ··· 250 243 val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT; 251 244 val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT; 252 245 val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT; 253 - /* 254 - * MCDE has inverse semantics from DRM on RBG/BGR which is why 255 - * all the modes are inversed here. 256 - */ 246 + 257 247 switch (format) { 258 248 case DRM_FORMAT_ARGB8888: 259 249 val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 << 260 250 MCDE_EXTSRCXCONF_BPP_SHIFT; 261 - val |= MCDE_EXTSRCXCONF_BGR; 262 251 break; 263 252 case DRM_FORMAT_ABGR8888: 264 253 val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 << 265 254 MCDE_EXTSRCXCONF_BPP_SHIFT; 255 + val |= MCDE_EXTSRCXCONF_BGR; 266 256 break; 267 257 case DRM_FORMAT_XRGB8888: 268 258 val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 << 269 259 MCDE_EXTSRCXCONF_BPP_SHIFT; 270 - val |= MCDE_EXTSRCXCONF_BGR; 271 260 break; 272 261 case DRM_FORMAT_XBGR8888: 273 262 val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 << 274 263 MCDE_EXTSRCXCONF_BPP_SHIFT; 264 + val |= MCDE_EXTSRCXCONF_BGR; 275 265 break; 276 266 case DRM_FORMAT_RGB888: 277 267 val |= MCDE_EXTSRCXCONF_BPP_RGB888 << 278 268 MCDE_EXTSRCXCONF_BPP_SHIFT; 279 - val |= MCDE_EXTSRCXCONF_BGR; 280 269 break; 281 270 case DRM_FORMAT_BGR888: 282 271 val |= MCDE_EXTSRCXCONF_BPP_RGB888 << 283 272 MCDE_EXTSRCXCONF_BPP_SHIFT; 273 + val |= MCDE_EXTSRCXCONF_BGR; 284 274 break; 285 275 case DRM_FORMAT_ARGB4444: 286 276 val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 << 287 277 MCDE_EXTSRCXCONF_BPP_SHIFT; 288 - val |= MCDE_EXTSRCXCONF_BGR; 289 278 break; 290 279 case DRM_FORMAT_ABGR4444: 291 280 val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 << 292 281 MCDE_EXTSRCXCONF_BPP_SHIFT; 282 + val |= MCDE_EXTSRCXCONF_BGR; 293 283 break; 294 284 case DRM_FORMAT_XRGB4444: 295 285 val |= MCDE_EXTSRCXCONF_BPP_RGB444 << 296 286 MCDE_EXTSRCXCONF_BPP_SHIFT; 297 - val |= MCDE_EXTSRCXCONF_BGR; 298 287 break; 299 288 case DRM_FORMAT_XBGR4444: 300 289 val |= MCDE_EXTSRCXCONF_BPP_RGB444 << 301 290 MCDE_EXTSRCXCONF_BPP_SHIFT; 291 + val |= MCDE_EXTSRCXCONF_BGR; 302 292 break; 303 293 case DRM_FORMAT_XRGB1555: 304 294 val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 << 305 295 MCDE_EXTSRCXCONF_BPP_SHIFT; 306 - val |= MCDE_EXTSRCXCONF_BGR; 307 296 break; 308 297 case DRM_FORMAT_XBGR1555: 309 298 val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 << 310 299 MCDE_EXTSRCXCONF_BPP_SHIFT; 300 + val |= MCDE_EXTSRCXCONF_BGR; 311 301 break; 312 302 case DRM_FORMAT_RGB565: 313 303 val |= MCDE_EXTSRCXCONF_BPP_RGB565 << 314 304 MCDE_EXTSRCXCONF_BPP_SHIFT; 315 - val |= MCDE_EXTSRCXCONF_BGR; 316 305 break; 317 306 case DRM_FORMAT_BGR565: 318 307 val |= MCDE_EXTSRCXCONF_BPP_RGB565 << 319 308 MCDE_EXTSRCXCONF_BPP_SHIFT; 309 + val |= MCDE_EXTSRCXCONF_BGR; 320 310 break; 321 311 case DRM_FORMAT_YUV422: 322 312 val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 << ··· 560 556 << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT; 561 557 break; 562 558 case MCDE_VIDEO_FORMATTER_FLOW: 559 + case MCDE_DPI_FORMATTER_FLOW: 563 560 val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE 564 561 << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT; 565 562 val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER ··· 569 564 default: 570 565 dev_err(mcde->dev, "unknown flow mode %d\n", 571 566 mcde->flow_mode); 572 - break; 567 + return; 573 568 } 574 569 575 570 writel(val, mcde->regs + sync); ··· 599 594 mcde->regs + mux); 600 595 break; 601 596 } 597 + 598 + /* 599 + * If using DPI configure the sync event. 600 + * TODO: this is for LCD only, it does not cover TV out. 601 + */ 602 + if (mcde->dpi_output) { 603 + u32 stripwidth; 604 + 605 + stripwidth = 0xF000 / (mode->vdisplay * 4); 606 + dev_info(mcde->dev, "stripwidth: %d\n", stripwidth); 607 + 608 + val = MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO | 609 + (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_HWREQVCNT_SHIFT | 610 + MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO | 611 + (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_SWINTVCNT_SHIFT; 612 + 613 + switch (fifo) { 614 + case MCDE_FIFO_A: 615 + writel(val, mcde->regs + MCDE_SYNCHCONFA); 616 + break; 617 + case MCDE_FIFO_B: 618 + writel(val, mcde->regs + MCDE_SYNCHCONFB); 619 + break; 620 + } 621 + } 602 622 } 603 623 604 624 static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo, 605 - enum mcde_dsi_formatter fmt, 625 + enum mcde_formatter fmt, 606 626 int fifo_wtrmrk) 607 627 { 608 628 u32 val; ··· 648 618 } 649 619 650 620 val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT; 651 - /* We only support DSI formatting for now */ 652 - val |= MCDE_CTRLX_FORMTYPE_DSI << 653 - MCDE_CTRLX_FORMTYPE_SHIFT; 654 621 655 - /* Select the formatter to use for this FIFO */ 656 - val |= fmt << MCDE_CTRLX_FORMID_SHIFT; 622 + /* 623 + * Select the formatter to use for this FIFO 624 + * 625 + * The register definitions imply that different IDs should be used 626 + * by the DSI formatters depending on if they are in VID or CMD 627 + * mode, and the manual says they are dedicated but identical. 628 + * The vendor code uses them as it seems fit. 629 + */ 630 + switch (fmt) { 631 + case MCDE_DSI_FORMATTER_0: 632 + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; 633 + val |= MCDE_CTRLX_FORMID_DSI0VID << MCDE_CTRLX_FORMID_SHIFT; 634 + break; 635 + case MCDE_DSI_FORMATTER_1: 636 + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; 637 + val |= MCDE_CTRLX_FORMID_DSI0CMD << MCDE_CTRLX_FORMID_SHIFT; 638 + break; 639 + case MCDE_DSI_FORMATTER_2: 640 + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; 641 + val |= MCDE_CTRLX_FORMID_DSI1VID << MCDE_CTRLX_FORMID_SHIFT; 642 + break; 643 + case MCDE_DSI_FORMATTER_3: 644 + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; 645 + val |= MCDE_CTRLX_FORMID_DSI1CMD << MCDE_CTRLX_FORMID_SHIFT; 646 + break; 647 + case MCDE_DSI_FORMATTER_4: 648 + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; 649 + val |= MCDE_CTRLX_FORMID_DSI2VID << MCDE_CTRLX_FORMID_SHIFT; 650 + break; 651 + case MCDE_DSI_FORMATTER_5: 652 + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; 653 + val |= MCDE_CTRLX_FORMID_DSI2CMD << MCDE_CTRLX_FORMID_SHIFT; 654 + break; 655 + case MCDE_DPI_FORMATTER_0: 656 + val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT; 657 + val |= MCDE_CTRLX_FORMID_DPIA << MCDE_CTRLX_FORMID_SHIFT; 658 + break; 659 + case MCDE_DPI_FORMATTER_1: 660 + val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT; 661 + val |= MCDE_CTRLX_FORMID_DPIB << MCDE_CTRLX_FORMID_SHIFT; 662 + break; 663 + } 657 664 writel(val, mcde->regs + ctrl); 658 665 659 666 /* Blend source with Alpha 0xff on FIFO */ ··· 698 631 0xff << MCDE_CRX0_ALPHABLEND_SHIFT; 699 632 writel(val, mcde->regs + cr0); 700 633 701 - /* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */ 634 + spin_lock(&mcde->fifo_crx1_lock); 635 + val = readl(mcde->regs + cr1); 636 + /* 637 + * Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() 638 + * FIXME: a different clock needs to be selected for TV out. 639 + */ 640 + if (mcde->dpi_output) { 641 + struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge); 642 + u32 bus_format; 702 643 703 - /* Use the MCDE clock for this FIFO */ 704 - val = MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT; 644 + /* Assume RGB888 24 bit if we have no further info */ 645 + if (!connector->display_info.num_bus_formats) { 646 + dev_info(mcde->dev, "panel does not specify bus format, assume RGB888\n"); 647 + bus_format = MEDIA_BUS_FMT_RGB888_1X24; 648 + } else { 649 + bus_format = connector->display_info.bus_formats[0]; 650 + } 705 651 706 - /* TODO: when adding DPI support add OUTBPP etc here */ 652 + /* 653 + * Set up the CDWIN and OUTBPP for the LCD 654 + * 655 + * FIXME: fill this in if you know the correspondance between the MIPI 656 + * DPI specification and the media bus formats. 657 + */ 658 + val &= ~MCDE_CRX1_CDWIN_MASK; 659 + val &= ~MCDE_CRX1_OUTBPP_MASK; 660 + switch (bus_format) { 661 + case MEDIA_BUS_FMT_RGB888_1X24: 662 + val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT; 663 + val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT; 664 + break; 665 + default: 666 + dev_err(mcde->dev, "unknown bus format, assume RGB888\n"); 667 + val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT; 668 + val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT; 669 + break; 670 + } 671 + } else { 672 + /* Use the MCDE clock for DSI */ 673 + val &= ~MCDE_CRX1_CLKSEL_MASK; 674 + val |= MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT; 675 + } 707 676 writel(val, mcde->regs + cr1); 677 + spin_unlock(&mcde->fifo_crx1_lock); 708 678 }; 709 679 710 680 static void mcde_configure_dsi_formatter(struct mcde *mcde, 711 - enum mcde_dsi_formatter fmt, 681 + enum mcde_formatter fmt, 712 682 u32 formatter_frame, 713 683 int pkt_size) 714 684 { ··· 785 681 delay0 = MCDE_DSIVID2DELAY0; 786 682 delay1 = MCDE_DSIVID2DELAY1; 787 683 break; 684 + default: 685 + dev_err(mcde->dev, "tried to configure a non-DSI formatter as DSI\n"); 686 + return; 788 687 } 789 688 790 689 /* ··· 807 700 MCDE_DSICONF0_PACKING_SHIFT; 808 701 break; 809 702 case MIPI_DSI_FMT_RGB666_PACKED: 810 - val |= MCDE_DSICONF0_PACKING_RGB666_PACKED << 703 + dev_err(mcde->dev, 704 + "we cannot handle the packed RGB666 format\n"); 705 + val |= MCDE_DSICONF0_PACKING_RGB666 << 811 706 MCDE_DSICONF0_PACKING_SHIFT; 812 707 break; 813 708 case MIPI_DSI_FMT_RGB565: ··· 969 860 return 1; 970 861 } 971 862 972 - static void mcde_display_enable(struct drm_simple_display_pipe *pipe, 973 - struct drm_crtc_state *cstate, 974 - struct drm_plane_state *plane_state) 863 + static void mcde_setup_dpi(struct mcde *mcde, const struct drm_display_mode *mode, 864 + int *fifo_wtrmrk_lvl) 975 865 { 976 - struct drm_crtc *crtc = &pipe->crtc; 977 - struct drm_plane *plane = &pipe->plane; 978 - struct drm_device *drm = crtc->dev; 979 - struct mcde *mcde = to_mcde(drm); 980 - const struct drm_display_mode *mode = &cstate->mode; 981 - struct drm_framebuffer *fb = plane->state->fb; 982 - u32 format = fb->format->format; 983 - u32 formatter_ppl = mode->hdisplay; /* pixels per line */ 984 - u32 formatter_lpf = mode->vdisplay; /* lines per frame */ 985 - int pkt_size, fifo_wtrmrk; 986 - int cpp = fb->format->cpp[0]; 987 - int formatter_cpp; 988 - struct drm_format_name_buf tmp; 989 - u32 formatter_frame; 990 - u32 pkt_div; 866 + struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge); 867 + u32 hsw, hfp, hbp; 868 + u32 vsw, vfp, vbp; 991 869 u32 val; 992 - int ret; 993 870 994 - /* This powers up the entire MCDE block and the DSI hardware */ 995 - ret = regulator_enable(mcde->epod); 996 - if (ret) { 997 - dev_err(drm->dev, "can't re-enable EPOD regulator\n"); 998 - return; 999 - } 871 + /* FIXME: we only support LCD, implement TV out */ 872 + hsw = mode->hsync_end - mode->hsync_start; 873 + hfp = mode->hsync_start - mode->hdisplay; 874 + hbp = mode->htotal - mode->hsync_end; 875 + vsw = mode->vsync_end - mode->vsync_start; 876 + vfp = mode->vsync_start - mode->vdisplay; 877 + vbp = mode->vtotal - mode->vsync_end; 1000 878 1001 - dev_info(drm->dev, "enable MCDE, %d x %d format %s\n", 1002 - mode->hdisplay, mode->vdisplay, 1003 - drm_get_format_name(format, &tmp)); 1004 - if (!mcde->mdsi) { 1005 - /* TODO: deal with this for non-DSI output */ 1006 - dev_err(drm->dev, "no DSI master attached!\n"); 1007 - return; 1008 - } 879 + dev_info(mcde->dev, "output on DPI LCD from channel A\n"); 880 + /* Display actual values */ 881 + dev_info(mcde->dev, "HSW: %d, HFP: %d, HBP: %d, VSW: %d, VFP: %d, VBP: %d\n", 882 + hsw, hfp, hbp, vsw, vfp, vbp); 883 + 884 + /* 885 + * The pixel fetcher is 128 64-bit words deep = 1024 bytes. 886 + * One overlay of 32bpp (4 cpp) assumed, fetch 160 pixels. 887 + * 160 * 4 = 640 bytes. 888 + */ 889 + *fifo_wtrmrk_lvl = 640; 1009 890 1010 891 /* Set up the main control, watermark level at 7 */ 1011 892 val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT; 1012 - /* 24 bits DPI: connect LSB Ch B to D[0:7] */ 1013 - val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT; 1014 - /* TV out: connect LSB Ch B to D[8:15] */ 1015 - val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT; 893 + 894 + /* 895 + * This sets up the internal silicon muxing of the DPI 896 + * lines. This is how the silicon connects out to the 897 + * external pins, then the pins need to be further 898 + * configured into "alternate functions" using pin control 899 + * to actually get the signals out. 900 + * 901 + * FIXME: this is hardcoded to the only setting found in 902 + * the wild. If we need to use different settings for 903 + * different DPI displays, make this parameterizable from 904 + * the device tree. 905 + */ 906 + /* 24 bits DPI: connect Ch A LSB to D[0:7] */ 907 + val |= 0 << MCDE_CONF0_OUTMUX0_SHIFT; 908 + /* 24 bits DPI: connect Ch A MID to D[8:15] */ 909 + val |= 1 << MCDE_CONF0_OUTMUX1_SHIFT; 1016 910 /* Don't care about this muxing */ 1017 911 val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT; 1018 - /* 24 bits DPI: connect MID Ch B to D[24:31] */ 1019 - val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT; 1020 - /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */ 1021 - val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT; 1022 - /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */ 912 + /* Don't care about this muxing */ 913 + val |= 0 << MCDE_CONF0_OUTMUX3_SHIFT; 914 + /* 24 bits DPI: connect Ch A MSB to D[32:39] */ 915 + val |= 2 << MCDE_CONF0_OUTMUX4_SHIFT; 916 + /* Syncmux bits zero: DPI channel A */ 1023 917 writel(val, mcde->regs + MCDE_CONF0); 1024 918 1025 - /* Clear any pending interrupts */ 1026 - mcde_display_disable_irqs(mcde); 1027 - writel(0, mcde->regs + MCDE_IMSCERR); 1028 - writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR); 919 + /* This hammers us into LCD mode */ 920 + writel(0, mcde->regs + MCDE_TVCRA); 1029 921 1030 - dev_info(drm->dev, "output in %s mode, format %dbpp\n", 922 + /* Front porch and sync width */ 923 + val = (vsw << MCDE_TVBL1_BEL1_SHIFT); 924 + val |= (vfp << MCDE_TVBL1_BSL1_SHIFT); 925 + writel(val, mcde->regs + MCDE_TVBL1A); 926 + /* The vendor driver sets the same value into TVBL2A */ 927 + writel(val, mcde->regs + MCDE_TVBL2A); 928 + 929 + /* Vertical back porch */ 930 + val = (vbp << MCDE_TVDVO_DVO1_SHIFT); 931 + /* The vendor drivers sets the same value into TVDVOA */ 932 + val |= (vbp << MCDE_TVDVO_DVO2_SHIFT); 933 + writel(val, mcde->regs + MCDE_TVDVOA); 934 + 935 + /* Horizontal back porch, as 0 = 1 cycle we need to subtract 1 */ 936 + writel((hbp - 1), mcde->regs + MCDE_TVTIM1A); 937 + 938 + /* Horizongal sync width and horizonal front porch, 0 = 1 cycle */ 939 + val = ((hsw - 1) << MCDE_TVLBALW_LBW_SHIFT); 940 + val |= ((hfp - 1) << MCDE_TVLBALW_ALW_SHIFT); 941 + writel(val, mcde->regs + MCDE_TVLBALWA); 942 + 943 + /* Blank some TV registers we don't use */ 944 + writel(0, mcde->regs + MCDE_TVISLA); 945 + writel(0, mcde->regs + MCDE_TVBLUA); 946 + 947 + /* Set up sync inversion etc */ 948 + val = 0; 949 + if (mode->flags & DRM_MODE_FLAG_NHSYNC) 950 + val |= MCDE_LCDTIM1B_IHS; 951 + if (mode->flags & DRM_MODE_FLAG_NVSYNC) 952 + val |= MCDE_LCDTIM1B_IVS; 953 + if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW) 954 + val |= MCDE_LCDTIM1B_IOE; 955 + if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) 956 + val |= MCDE_LCDTIM1B_IPC; 957 + writel(val, mcde->regs + MCDE_LCDTIM1A); 958 + } 959 + 960 + static void mcde_setup_dsi(struct mcde *mcde, const struct drm_display_mode *mode, 961 + int cpp, int *fifo_wtrmrk_lvl, int *dsi_formatter_frame, 962 + int *dsi_pkt_size) 963 + { 964 + u32 formatter_ppl = mode->hdisplay; /* pixels per line */ 965 + u32 formatter_lpf = mode->vdisplay; /* lines per frame */ 966 + int formatter_frame; 967 + int formatter_cpp; 968 + int fifo_wtrmrk; 969 + u32 pkt_div; 970 + int pkt_size; 971 + u32 val; 972 + 973 + dev_info(mcde->dev, "output in %s mode, format %dbpp\n", 1031 974 (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? 1032 975 "VIDEO" : "CMD", 1033 976 mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format)); 1034 977 formatter_cpp = 1035 978 mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8; 1036 - dev_info(drm->dev, "overlay CPP %d bytes, DSI CPP %d bytes\n", 1037 - cpp, 1038 - formatter_cpp); 979 + dev_info(mcde->dev, "Overlay CPP: %d bytes, DSI formatter CPP %d bytes\n", 980 + cpp, formatter_cpp); 981 + 982 + /* Set up the main control, watermark level at 7 */ 983 + val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT; 984 + 985 + /* 986 + * This is the internal silicon muxing of the DPI 987 + * (parallell display) lines. Since we are not using 988 + * this at all (we are using DSI) these are just 989 + * dummy values from the vendor tree. 990 + */ 991 + val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT; 992 + val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT; 993 + val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT; 994 + val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT; 995 + val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT; 996 + writel(val, mcde->regs + MCDE_CONF0); 1039 997 1040 998 /* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */ 1041 999 ··· 1124 948 /* The FIFO is 640 entries deep on this v3 hardware */ 1125 949 pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640); 1126 950 } 1127 - dev_dbg(drm->dev, "FIFO watermark after flooring: %d bytes\n", 951 + dev_dbg(mcde->dev, "FIFO watermark after flooring: %d bytes\n", 1128 952 fifo_wtrmrk); 1129 - dev_dbg(drm->dev, "Packet divisor: %d bytes\n", pkt_div); 953 + dev_dbg(mcde->dev, "Packet divisor: %d bytes\n", pkt_div); 1130 954 1131 955 /* NOTE: pkt_div is 1 for video mode */ 1132 956 pkt_size = (formatter_ppl * formatter_cpp) / pkt_div; ··· 1134 958 if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)) 1135 959 pkt_size++; 1136 960 1137 - dev_dbg(drm->dev, "DSI packet size: %d * %d bytes per line\n", 961 + dev_dbg(mcde->dev, "DSI packet size: %d * %d bytes per line\n", 1138 962 pkt_size, pkt_div); 1139 - dev_dbg(drm->dev, "Overlay frame size: %u bytes\n", 963 + dev_dbg(mcde->dev, "Overlay frame size: %u bytes\n", 1140 964 mode->hdisplay * mode->vdisplay * cpp); 1141 - mcde->stride = mode->hdisplay * cpp; 1142 - dev_dbg(drm->dev, "Overlay line stride: %u bytes\n", 1143 - mcde->stride); 1144 965 /* NOTE: pkt_div is 1 for video mode */ 1145 966 formatter_frame = pkt_size * pkt_div * formatter_lpf; 1146 - dev_dbg(drm->dev, "Formatter frame size: %u bytes\n", formatter_frame); 967 + dev_dbg(mcde->dev, "Formatter frame size: %u bytes\n", formatter_frame); 968 + 969 + *fifo_wtrmrk_lvl = fifo_wtrmrk; 970 + *dsi_pkt_size = pkt_size; 971 + *dsi_formatter_frame = formatter_frame; 972 + } 973 + 974 + static void mcde_display_enable(struct drm_simple_display_pipe *pipe, 975 + struct drm_crtc_state *cstate, 976 + struct drm_plane_state *plane_state) 977 + { 978 + struct drm_crtc *crtc = &pipe->crtc; 979 + struct drm_plane *plane = &pipe->plane; 980 + struct drm_device *drm = crtc->dev; 981 + struct mcde *mcde = to_mcde(drm); 982 + const struct drm_display_mode *mode = &cstate->mode; 983 + struct drm_framebuffer *fb = plane->state->fb; 984 + u32 format = fb->format->format; 985 + int dsi_pkt_size; 986 + int fifo_wtrmrk; 987 + int cpp = fb->format->cpp[0]; 988 + struct drm_format_name_buf tmp; 989 + u32 dsi_formatter_frame; 990 + u32 val; 991 + int ret; 992 + 993 + /* This powers up the entire MCDE block and the DSI hardware */ 994 + ret = regulator_enable(mcde->epod); 995 + if (ret) { 996 + dev_err(drm->dev, "can't re-enable EPOD regulator\n"); 997 + return; 998 + } 999 + 1000 + dev_info(drm->dev, "enable MCDE, %d x %d format %s\n", 1001 + mode->hdisplay, mode->vdisplay, 1002 + drm_get_format_name(format, &tmp)); 1003 + 1004 + 1005 + /* Clear any pending interrupts */ 1006 + mcde_display_disable_irqs(mcde); 1007 + writel(0, mcde->regs + MCDE_IMSCERR); 1008 + writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR); 1009 + 1010 + if (mcde->dpi_output) 1011 + mcde_setup_dpi(mcde, mode, &fifo_wtrmrk); 1012 + else 1013 + mcde_setup_dsi(mcde, mode, cpp, &fifo_wtrmrk, 1014 + &dsi_formatter_frame, &dsi_pkt_size); 1015 + 1016 + mcde->stride = mode->hdisplay * cpp; 1017 + dev_dbg(drm->dev, "Overlay line stride: %u bytes\n", 1018 + mcde->stride); 1147 1019 1148 1020 /* Drain the FIFO A + channel 0 pipe so we have a clean slate */ 1149 1021 mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0); ··· 1219 995 */ 1220 996 mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode); 1221 997 1222 - /* Configure FIFO A to use DSI formatter 0 */ 1223 - mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0, 1224 - fifo_wtrmrk); 998 + if (mcde->dpi_output) { 999 + unsigned long lcd_freq; 1225 1000 1226 - /* 1227 - * This brings up the DSI bridge which is tightly connected 1228 - * to the MCDE DSI formatter. 1229 - * 1230 - * FIXME: if we want to use another formatter, such as DPI, 1231 - * we need to be more elaborate here and select the appropriate 1232 - * bridge. 1233 - */ 1234 - mcde_dsi_enable(mcde->bridge); 1001 + /* Configure FIFO A to use DPI formatter 0 */ 1002 + mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DPI_FORMATTER_0, 1003 + fifo_wtrmrk); 1235 1004 1236 - /* Configure the DSI formatter 0 for the DSI panel output */ 1237 - mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0, 1238 - formatter_frame, pkt_size); 1005 + /* Set up and enable the LCD clock */ 1006 + lcd_freq = clk_round_rate(mcde->fifoa_clk, mode->clock * 1000); 1007 + ret = clk_set_rate(mcde->fifoa_clk, lcd_freq); 1008 + if (ret) 1009 + dev_err(mcde->dev, "failed to set LCD clock rate %lu Hz\n", 1010 + lcd_freq); 1011 + ret = clk_prepare_enable(mcde->fifoa_clk); 1012 + if (ret) { 1013 + dev_err(mcde->dev, "failed to enable FIFO A DPI clock\n"); 1014 + return; 1015 + } 1016 + dev_info(mcde->dev, "LCD FIFO A clk rate %lu Hz\n", 1017 + clk_get_rate(mcde->fifoa_clk)); 1018 + } else { 1019 + /* Configure FIFO A to use DSI formatter 0 */ 1020 + mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0, 1021 + fifo_wtrmrk); 1022 + 1023 + /* 1024 + * This brings up the DSI bridge which is tightly connected 1025 + * to the MCDE DSI formatter. 1026 + */ 1027 + mcde_dsi_enable(mcde->bridge); 1028 + 1029 + /* Configure the DSI formatter 0 for the DSI panel output */ 1030 + mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0, 1031 + dsi_formatter_frame, dsi_pkt_size); 1032 + } 1239 1033 1240 1034 switch (mcde->flow_mode) { 1241 1035 case MCDE_COMMAND_TE_FLOW: 1242 1036 case MCDE_COMMAND_BTA_TE_FLOW: 1243 1037 case MCDE_VIDEO_TE_FLOW: 1244 - /* We are using TE in some comination */ 1038 + /* We are using TE in some combination */ 1245 1039 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 1246 1040 val = MCDE_VSCRC_VSPOL; 1247 1041 else ··· 1311 1069 /* Disable FIFO A flow */ 1312 1070 mcde_disable_fifo(mcde, MCDE_FIFO_A, true); 1313 1071 1314 - /* This disables the DSI bridge */ 1315 - mcde_dsi_disable(mcde->bridge); 1072 + if (mcde->dpi_output) { 1073 + clk_disable_unprepare(mcde->fifoa_clk); 1074 + } else { 1075 + /* This disables the DSI bridge */ 1076 + mcde_dsi_disable(mcde->bridge); 1077 + } 1316 1078 1317 1079 event = crtc->state->event; 1318 1080 if (event) { ··· 1506 1260 DRM_FORMAT_BGR565, 1507 1261 DRM_FORMAT_YUV422, 1508 1262 }; 1263 + 1264 + ret = mcde_init_clock_divider(mcde); 1265 + if (ret) 1266 + return ret; 1509 1267 1510 1268 ret = drm_simple_display_pipe_init(drm, &mcde->pipe, 1511 1269 &mcde_display_funcs,
+89 -2
drivers/gpu/drm/mcde/mcde_display_regs.h
··· 215 215 #define MCDE_OVLXCOMP_Z_SHIFT 27 216 216 #define MCDE_OVLXCOMP_Z_MASK 0x78000000 217 217 218 + /* DPI/TV configuration registers, channel A and B */ 219 + #define MCDE_TVCRA 0x00000838 220 + #define MCDE_TVCRB 0x00000A38 221 + #define MCDE_TVCR_MOD_TV BIT(0) /* 0 = LCD mode */ 222 + #define MCDE_TVCR_INTEREN BIT(1) 223 + #define MCDE_TVCR_IFIELD BIT(2) 224 + #define MCDE_TVCR_TVMODE_SDTV_656P (0 << 3) 225 + #define MCDE_TVCR_TVMODE_SDTV_656P_LE (3 << 3) 226 + #define MCDE_TVCR_TVMODE_SDTV_656P_BE (4 << 3) 227 + #define MCDE_TVCR_SDTVMODE_Y0CBY1CR (0 << 6) 228 + #define MCDE_TVCR_SDTVMODE_CBY0CRY1 (1 << 6) 229 + #define MCDE_TVCR_AVRGEN BIT(8) 230 + #define MCDE_TVCR_CKINV BIT(9) 231 + 232 + /* TV blanking control register 1, channel A and B */ 233 + #define MCDE_TVBL1A 0x0000083C 234 + #define MCDE_TVBL1B 0x00000A3C 235 + #define MCDE_TVBL1_BEL1_SHIFT 0 /* VFP vertical front porch 11 bits */ 236 + #define MCDE_TVBL1_BSL1_SHIFT 16 /* VSW vertical sync pulse width 11 bits */ 237 + 238 + /* Pixel processing TV start line, channel A and B */ 239 + #define MCDE_TVISLA 0x00000840 240 + #define MCDE_TVISLB 0x00000A40 241 + #define MCDE_TVISL_FSL1_SHIFT 0 /* Field 1 identification start line 11 bits */ 242 + #define MCDE_TVISL_FSL2_SHIFT 16 /* Field 2 identification start line 11 bits */ 243 + 244 + /* Pixel processing TV DVO offset */ 245 + #define MCDE_TVDVOA 0x00000844 246 + #define MCDE_TVDVOB 0x00000A44 247 + #define MCDE_TVDVO_DVO1_SHIFT 0 /* VBP vertical back porch 0 = 0 */ 248 + #define MCDE_TVDVO_DVO2_SHIFT 16 249 + 250 + /* 251 + * Pixel processing TV Timing 1 252 + * HBP horizontal back porch 11 bits horizontal offset 253 + * 0 = 1 pixel HBP, 255 = 256 pixels, so actual value - 1 254 + */ 255 + #define MCDE_TVTIM1A 0x0000084C 256 + #define MCDE_TVTIM1B 0x00000A4C 257 + 258 + /* Pixel processing TV LBALW */ 259 + /* 0 = 1 clock cycle, 255 = 256 clock cycles */ 260 + #define MCDE_TVLBALWA 0x00000850 261 + #define MCDE_TVLBALWB 0x00000A50 262 + #define MCDE_TVLBALW_LBW_SHIFT 0 /* HSW horizonal sync width, line blanking width 11 bits */ 263 + #define MCDE_TVLBALW_ALW_SHIFT 16 /* HFP horizontal front porch, active line width 11 bits */ 264 + 265 + /* TV blanking control register 1, channel A and B */ 266 + #define MCDE_TVBL2A 0x00000854 267 + #define MCDE_TVBL2B 0x00000A54 268 + #define MCDE_TVBL2_BEL2_SHIFT 0 /* Field 2 blanking end line 11 bits */ 269 + #define MCDE_TVBL2_BSL2_SHIFT 16 /* Field 2 blanking start line 11 bits */ 270 + 271 + /* Pixel processing TV background */ 272 + #define MCDE_TVBLUA 0x00000858 273 + #define MCDE_TVBLUB 0x00000A58 274 + #define MCDE_TVBLU_TVBLU_SHIFT 0 /* 8 bits luminance */ 275 + #define MCDE_TVBLU_TVBCB_SHIFT 8 /* 8 bits Cb chrominance */ 276 + #define MCDE_TVBLU_TVBCR_SHIFT 16 /* 8 bits Cr chrominance */ 277 + 278 + /* Pixel processing LCD timing 1 */ 279 + #define MCDE_LCDTIM1A 0x00000860 280 + #define MCDE_LCDTIM1B 0x00000A60 281 + /* inverted vertical sync pulse for HRTFT 0 = active low, 1 active high */ 282 + #define MCDE_LCDTIM1B_IVP BIT(19) 283 + /* inverted vertical sync, 0 = active high (the normal), 1 = active low */ 284 + #define MCDE_LCDTIM1B_IVS BIT(20) 285 + /* inverted horizontal sync, 0 = active high (the normal), 1 = active low */ 286 + #define MCDE_LCDTIM1B_IHS BIT(21) 287 + /* inverted panel clock 0 = rising edge data out, 1 = falling edge data out */ 288 + #define MCDE_LCDTIM1B_IPC BIT(22) 289 + /* invert output enable 0 = active high, 1 = active low */ 290 + #define MCDE_LCDTIM1B_IOE BIT(23) 291 + 218 292 #define MCDE_CRC 0x00000C00 219 293 #define MCDE_CRC_C1EN BIT(2) 220 294 #define MCDE_CRC_C2EN BIT(3) ··· 434 360 #define MCDE_CRB1 0x00000A04 435 361 #define MCDE_CRX1_PCD_SHIFT 0 436 362 #define MCDE_CRX1_PCD_MASK 0x000003FF 363 + #define MCDE_CRX1_PCD_BITS 10 437 364 #define MCDE_CRX1_CLKSEL_SHIFT 10 438 365 #define MCDE_CRX1_CLKSEL_MASK 0x00001C00 439 366 #define MCDE_CRX1_CLKSEL_CLKPLL72 0 ··· 496 421 #define MCDE_ROTACONF 0x0000087C 497 422 #define MCDE_ROTBCONF 0x00000A7C 498 423 424 + /* Synchronization event configuration */ 499 425 #define MCDE_SYNCHCONFA 0x00000880 500 426 #define MCDE_SYNCHCONFB 0x00000A80 427 + #define MCDE_SYNCHCONF_HWREQVEVENT_SHIFT 0 428 + #define MCDE_SYNCHCONF_HWREQVEVENT_VSYNC (0 << 0) 429 + #define MCDE_SYNCHCONF_HWREQVEVENT_BACK_PORCH (1 << 0) 430 + #define MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO (2 << 0) 431 + #define MCDE_SYNCHCONF_HWREQVEVENT_FRONT_PORCH (3 << 0) 432 + #define MCDE_SYNCHCONF_HWREQVCNT_SHIFT 2 /* 14 bits */ 433 + #define MCDE_SYNCHCONF_SWINTVEVENT_VSYNC (0 << 16) 434 + #define MCDE_SYNCHCONF_SWINTVEVENT_BACK_PORCH (1 << 16) 435 + #define MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO (2 << 16) 436 + #define MCDE_SYNCHCONF_SWINTVEVENT_FRONT_PORCH (3 << 16) 437 + #define MCDE_SYNCHCONF_SWINTVCNT_SHIFT 18 /* 14 bits */ 501 438 502 439 /* Channel A+B control registers */ 503 440 #define MCDE_CTRLA 0x00000884 ··· 552 465 #define MCDE_DSICONF0_PACKING_MASK 0x00700000 553 466 #define MCDE_DSICONF0_PACKING_RGB565 0 554 467 #define MCDE_DSICONF0_PACKING_RGB666 1 555 - #define MCDE_DSICONF0_PACKING_RGB666_PACKED 2 556 - #define MCDE_DSICONF0_PACKING_RGB888 3 468 + #define MCDE_DSICONF0_PACKING_RGB888 2 469 + #define MCDE_DSICONF0_PACKING_BGR888 3 557 470 #define MCDE_DSICONF0_PACKING_HDTV 4 558 471 559 472 #define MCDE_DSIVID0FRAME 0x00000E04
+10
drivers/gpu/drm/mcde/mcde_drm.h
··· 62 62 MCDE_VIDEO_TE_FLOW, 63 63 /* Video mode with the formatter itself as sync source */ 64 64 MCDE_VIDEO_FORMATTER_FLOW, 65 + /* DPI video with the formatter itsels as sync source */ 66 + MCDE_DPI_FORMATTER_FLOW, 65 67 }; 66 68 67 69 struct mcde { ··· 74 72 struct drm_connector *connector; 75 73 struct drm_simple_display_pipe pipe; 76 74 struct mipi_dsi_device *mdsi; 75 + bool dpi_output; 77 76 s16 stride; 78 77 enum mcde_flow_mode flow_mode; 79 78 unsigned int flow_active; ··· 85 82 struct clk *mcde_clk; 86 83 struct clk *lcd_clk; 87 84 struct clk *hdmi_clk; 85 + /* Handles to the clock dividers for FIFO A and B */ 86 + struct clk *fifoa_clk; 87 + struct clk *fifob_clk; 88 + /* Locks the MCDE FIFO control register A and B */ 89 + spinlock_t fifo_crx1_lock; 88 90 89 91 struct regulator *epod; 90 92 struct regulator *vana; ··· 112 104 void mcde_display_irq(struct mcde *mcde); 113 105 void mcde_display_disable_irqs(struct mcde *mcde); 114 106 int mcde_display_init(struct drm_device *drm); 107 + 108 + int mcde_init_clock_divider(struct mcde *mcde); 115 109 116 110 #endif /* _MCDE_DRM_H_ */
+34 -12
drivers/gpu/drm/mcde/mcde_drv.c
··· 22 22 * The hardware has four display pipes, and the layout is a little 23 23 * bit like this:: 24 24 * 25 - * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI 26 - * External 0..5 0..3 A,B, 3 x DSI bridge 25 + * Memory -> Overlay -> Channel -> FIFO -> 8 formatters -> DSI/DPI 26 + * External 0..5 0..3 A,B, 6 x DSI bridge 27 27 * source 0..9 C0,C1 2 x DPI 28 28 * 29 29 * FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for 30 30 * panels with embedded buffer. 31 - * 3 of the formatters are for DSI. 31 + * 6 of the formatters are for DSI, 3 pairs for VID/CMD respectively. 32 32 * 2 of the formatters are for DPI. 33 33 * 34 34 * Behind the formatters are the DSI or DPI ports that route to ··· 130 130 struct mcde *mcde = to_mcde(drm); 131 131 int ret; 132 132 133 + /* 134 + * If no other bridge was found, check if we have a DPI panel or 135 + * any other bridge connected directly to the MCDE DPI output. 136 + * If a DSI bridge is found, DSI will take precedence. 137 + * 138 + * TODO: more elaborate bridge selection if we have more than one 139 + * thing attached to the system. 140 + */ 133 141 if (!mcde->bridge) { 134 - dev_err(drm->dev, "no display output bridge yet\n"); 135 - return -EPROBE_DEFER; 142 + struct drm_panel *panel; 143 + struct drm_bridge *bridge; 144 + 145 + ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 146 + 0, 0, &panel, &bridge); 147 + if (ret) { 148 + dev_err(drm->dev, 149 + "Could not locate any output bridge or panel\n"); 150 + return ret; 151 + } 152 + if (panel) { 153 + bridge = drm_panel_bridge_add_typed(panel, 154 + DRM_MODE_CONNECTOR_DPI); 155 + if (IS_ERR(bridge)) { 156 + dev_err(drm->dev, 157 + "Could not connect panel bridge\n"); 158 + return PTR_ERR(bridge); 159 + } 160 + } 161 + mcde->dpi_output = true; 162 + mcde->bridge = bridge; 163 + mcde->flow_mode = MCDE_DPI_FORMATTER_FLOW; 136 164 } 137 165 138 166 mode_config = &drm->mode_config; ··· 184 156 return ret; 185 157 } 186 158 187 - /* 188 - * Attach the DSI bridge 189 - * 190 - * TODO: when adding support for the DPI bridge or several DSI bridges, 191 - * we selectively connect the bridge(s) here instead of this simple 192 - * attachment. 193 - */ 159 + /* Attach the bridge. */ 194 160 ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe, 195 161 mcde->bridge); 196 162 if (ret) {
+35 -16
drivers/gpu/drm/meson/meson_dw_hdmi.c
··· 145 145 struct reset_control *hdmitx_apb; 146 146 struct reset_control *hdmitx_ctrl; 147 147 struct reset_control *hdmitx_phy; 148 - struct clk *hdmi_pclk; 149 - struct clk *venci_clk; 150 148 struct regulator *hdmi_supply; 151 149 u32 irq_stat; 152 150 struct dw_hdmi *hdmi; ··· 944 946 regulator_disable(data); 945 947 } 946 948 949 + static void meson_disable_clk(void *data) 950 + { 951 + clk_disable_unprepare(data); 952 + } 953 + 954 + static int meson_enable_clk(struct device *dev, char *name) 955 + { 956 + struct clk *clk; 957 + int ret; 958 + 959 + clk = devm_clk_get(dev, name); 960 + if (IS_ERR(clk)) { 961 + dev_err(dev, "Unable to get %s pclk\n", name); 962 + return PTR_ERR(clk); 963 + } 964 + 965 + ret = clk_prepare_enable(clk); 966 + if (!ret) 967 + ret = devm_add_action_or_reset(dev, meson_disable_clk, clk); 968 + 969 + return ret; 970 + } 971 + 947 972 static int meson_dw_hdmi_bind(struct device *dev, struct device *master, 948 973 void *data) 949 974 { ··· 1047 1026 if (IS_ERR(meson_dw_hdmi->hdmitx)) 1048 1027 return PTR_ERR(meson_dw_hdmi->hdmitx); 1049 1028 1050 - meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr"); 1051 - if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) { 1052 - dev_err(dev, "Unable to get HDMI pclk\n"); 1053 - return PTR_ERR(meson_dw_hdmi->hdmi_pclk); 1054 - } 1055 - clk_prepare_enable(meson_dw_hdmi->hdmi_pclk); 1029 + ret = meson_enable_clk(dev, "isfr"); 1030 + if (ret) 1031 + return ret; 1056 1032 1057 - meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci"); 1058 - if (IS_ERR(meson_dw_hdmi->venci_clk)) { 1059 - dev_err(dev, "Unable to get venci clk\n"); 1060 - return PTR_ERR(meson_dw_hdmi->venci_clk); 1061 - } 1062 - clk_prepare_enable(meson_dw_hdmi->venci_clk); 1033 + ret = meson_enable_clk(dev, "iahb"); 1034 + if (ret) 1035 + return ret; 1036 + 1037 + ret = meson_enable_clk(dev, "venci"); 1038 + if (ret) 1039 + return ret; 1063 1040 1064 1041 dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi, 1065 1042 &meson_dw_hdmi_regmap_config); ··· 1090 1071 1091 1072 encoder->possible_crtcs = BIT(0); 1092 1073 1074 + meson_dw_hdmi_init(meson_dw_hdmi); 1075 + 1093 1076 DRM_DEBUG_DRIVER("encoder initialized\n"); 1094 1077 1095 1078 /* Bridge / Connector */ ··· 1115 1094 &meson_dw_hdmi->dw_plat_data); 1116 1095 if (IS_ERR(meson_dw_hdmi->hdmi)) 1117 1096 return PTR_ERR(meson_dw_hdmi->hdmi); 1118 - 1119 - meson_dw_hdmi_init(meson_dw_hdmi); 1120 1097 1121 1098 next_bridge = of_drm_find_bridge(pdev->dev.of_node); 1122 1099 if (next_bridge)
-1
drivers/gpu/drm/mgag200/mgag200_drv.c
··· 37 37 .major = DRIVER_MAJOR, 38 38 .minor = DRIVER_MINOR, 39 39 .patchlevel = DRIVER_PATCHLEVEL, 40 - .gem_create_object = drm_gem_shmem_create_object_cached, 41 40 DRM_GEM_SHMEM_DRIVER_OPS, 42 41 }; 43 42
+1 -3
drivers/gpu/drm/msm/msm_gem.c
··· 211 211 * address_space (so unmap_mapping_range does what we want, 212 212 * in particular in the case of mmap'd dmabufs) 213 213 */ 214 - fput(vma->vm_file); 215 - get_file(obj->filp); 216 214 vma->vm_pgoff = 0; 217 - vma->vm_file = obj->filp; 215 + vma_set_file(vma, obj->filp); 218 216 219 217 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 220 218 }
+4 -1
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 32 32 #include <linux/hdmi.h> 33 33 #include <linux/component.h> 34 34 35 + #include <drm/drm_atomic.h> 35 36 #include <drm/drm_atomic_helper.h> 36 37 #include <drm/drm_dp_helper.h> 37 38 #include <drm/drm_edid.h> ··· 1162 1161 1163 1162 static struct drm_encoder * 1164 1163 nv50_mstc_atomic_best_encoder(struct drm_connector *connector, 1165 - struct drm_connector_state *connector_state) 1164 + struct drm_atomic_state *state) 1166 1165 { 1166 + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 1167 + connector); 1167 1168 struct nv50_mstc *mstc = nv50_mstc(connector); 1168 1169 struct drm_crtc *crtc = connector_state->crtc; 1169 1170
+1 -2
drivers/gpu/drm/omapdrm/omap_gem.c
··· 564 564 * address_space (so unmap_mapping_range does what we want, 565 565 * in particular in the case of mmap'd dmabufs) 566 566 */ 567 - fput(vma->vm_file); 568 567 vma->vm_pgoff = 0; 569 - vma->vm_file = get_file(obj->filp); 568 + vma_set_file(vma, obj->filp); 570 569 571 570 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 572 571 }
+275 -68
drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
··· 23 23 #include "panel-samsung-s6e63m0.h" 24 24 25 25 /* Manufacturer Command Set */ 26 - #define MCS_ELVSS_ON 0xb1 27 - #define MCS_MIECTL1 0xc0 28 - #define MCS_BCMODE 0xc1 26 + #define MCS_ELVSS_ON 0xb1 27 + #define MCS_TEMP_SWIRE 0xb2 28 + #define MCS_MIECTL1 0xc0 29 + #define MCS_BCMODE 0xc1 29 30 #define MCS_ERROR_CHECK 0xd5 30 31 #define MCS_READ_ID1 0xda 31 32 #define MCS_READ_ID2 0xdb 32 33 #define MCS_READ_ID3 0xdc 33 34 #define MCS_LEVEL_2_KEY 0xf0 34 35 #define MCS_MTP_KEY 0xf1 35 - #define MCS_DISCTL 0xf2 36 - #define MCS_SRCCTL 0xf6 37 - #define MCS_IFCTL 0xf7 38 - #define MCS_PANELCTL 0xF8 39 - #define MCS_PGAMMACTL 0xfa 36 + #define MCS_DISCTL 0xf2 37 + #define MCS_SRCCTL 0xf6 38 + #define MCS_IFCTL 0xf7 39 + #define MCS_PANELCTL 0xf8 40 + #define MCS_PGAMMACTL 0xfa 40 41 41 42 #define S6E63M0_LCD_ID_VALUE_M2 0xA4 42 43 #define S6E63M0_LCD_ID_VALUE_SM2 0xB4 43 44 #define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6 44 45 45 - #define NUM_GAMMA_LEVELS 11 46 - #define GAMMA_TABLE_COUNT 23 46 + #define NUM_GAMMA_LEVELS 28 47 + #define GAMMA_TABLE_COUNT 23 47 48 48 - #define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) 49 + #define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) 49 50 50 51 /* array of gamma tables for gamma value 2.2 */ 51 52 static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = { 52 - { MCS_PGAMMACTL, 0x00, 53 - 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8, 54 - 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7, 55 - 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 }, 56 - { MCS_PGAMMACTL, 0x00, 57 - 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0, 58 - 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF, 59 - 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 }, 60 - { MCS_PGAMMACTL, 0x00, 61 - 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF, 62 - 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC, 63 - 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D }, 64 - { MCS_PGAMMACTL, 0x00, 65 - 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC, 66 - 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9, 67 - 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E }, 68 - { MCS_PGAMMACTL, 0x00, 69 - 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB, 70 - 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8, 71 - 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB }, 72 - { MCS_PGAMMACTL, 0x00, 73 - 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA, 74 - 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6, 75 - 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA }, 76 - { MCS_PGAMMACTL, 0x00, 77 - 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8, 78 - 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4, 79 - 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 }, 80 - { MCS_PGAMMACTL, 0x00, 81 - 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9, 82 - 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3, 83 - 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA }, 84 - { MCS_PGAMMACTL, 0x00, 85 - 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6, 86 - 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2, 87 - 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 }, 88 - { MCS_PGAMMACTL, 0x00, 89 - 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6, 90 - 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1, 91 - 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 }, 92 - { MCS_PGAMMACTL, 0x00, 93 - 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6, 94 - 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0, 95 - 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb }, 53 + /* 30 cd */ 54 + { MCS_PGAMMACTL, 0x02, 55 + 0x18, 0x08, 0x24, 0xA1, 0x51, 0x7B, 0xCE, 56 + 0xCB, 0xC2, 0xC7, 0xCB, 0xBC, 0xDA, 0xDD, 57 + 0xD3, 0x00, 0x53, 0x00, 0x52, 0x00, 0x6F, }, 58 + /* 40 cd */ 59 + { MCS_PGAMMACTL, 0x02, 60 + 0x18, 0x08, 0x24, 0x97, 0x58, 0x71, 0xCC, 61 + 0xCB, 0xC0, 0xC5, 0xC9, 0xBA, 0xD9, 0xDC, 62 + 0xD1, 0x00, 0x5B, 0x00, 0x5A, 0x00, 0x7A, }, 63 + /* 50 cd */ 64 + { MCS_PGAMMACTL, 0x02, 65 + 0x18, 0x08, 0x24, 0x96, 0x58, 0x72, 0xCB, 66 + 0xCA, 0xBF, 0xC6, 0xC9, 0xBA, 0xD6, 0xD9, 67 + 0xCD, 0x00, 0x61, 0x00, 0x61, 0x00, 0x83, }, 68 + /* 60 cd */ 69 + { MCS_PGAMMACTL, 0x02, 70 + 0x18, 0x08, 0x24, 0x91, 0x5E, 0x6E, 0xC9, 71 + 0xC9, 0xBD, 0xC4, 0xC9, 0xB8, 0xD3, 0xD7, 72 + 0xCA, 0x00, 0x69, 0x00, 0x67, 0x00, 0x8D, }, 73 + /* 70 cd */ 74 + { MCS_PGAMMACTL, 0x02, 75 + 0x18, 0x08, 0x24, 0x8E, 0x62, 0x6B, 0xC7, 76 + 0xC9, 0xBB, 0xC3, 0xC7, 0xB7, 0xD3, 0xD7, 77 + 0xCA, 0x00, 0x6E, 0x00, 0x6C, 0x00, 0x94, }, 78 + /* 80 cd */ 79 + { MCS_PGAMMACTL, 0x02, 80 + 0x18, 0x08, 0x24, 0x89, 0x68, 0x65, 0xC9, 81 + 0xC9, 0xBC, 0xC1, 0xC5, 0xB6, 0xD2, 0xD5, 82 + 0xC9, 0x00, 0x73, 0x00, 0x72, 0x00, 0x9A, }, 83 + /* 90 cd */ 84 + { MCS_PGAMMACTL, 0x02, 85 + 0x18, 0x08, 0x24, 0x89, 0x69, 0x64, 0xC7, 86 + 0xC8, 0xBB, 0xC0, 0xC5, 0xB4, 0xD2, 0xD5, 87 + 0xC9, 0x00, 0x77, 0x00, 0x76, 0x00, 0xA0, }, 88 + /* 100 cd */ 89 + { MCS_PGAMMACTL, 0x02, 90 + 0x18, 0x08, 0x24, 0x86, 0x69, 0x60, 0xC6, 91 + 0xC8, 0xBA, 0xBF, 0xC4, 0xB4, 0xD0, 0xD4, 92 + 0xC6, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0xA7, }, 93 + /* 110 cd */ 94 + { MCS_PGAMMACTL, 0x02, 95 + 0x18, 0x08, 0x24, 0x86, 0x6A, 0x60, 0xC5, 96 + 0xC7, 0xBA, 0xBD, 0xC3, 0xB2, 0xD0, 0xD4, 97 + 0xC5, 0x00, 0x80, 0x00, 0x7E, 0x00, 0xAD, }, 98 + /* 120 cd */ 99 + { MCS_PGAMMACTL, 0x02, 100 + 0x18, 0x08, 0x24, 0x82, 0x6B, 0x5E, 0xC4, 101 + 0xC8, 0xB9, 0xBD, 0xC2, 0xB1, 0xCE, 0xD2, 102 + 0xC4, 0x00, 0x85, 0x00, 0x82, 0x00, 0xB3, }, 103 + /* 130 cd */ 104 + { MCS_PGAMMACTL, 0x02, 105 + 0x18, 0x08, 0x24, 0x8C, 0x6C, 0x60, 0xC3, 106 + 0xC7, 0xB9, 0xBC, 0xC1, 0xAF, 0xCE, 0xD2, 107 + 0xC3, 0x00, 0x88, 0x00, 0x86, 0x00, 0xB8, }, 108 + /* 140 cd */ 109 + { MCS_PGAMMACTL, 0x02, 110 + 0x18, 0x08, 0x24, 0x80, 0x6C, 0x5F, 0xC1, 111 + 0xC6, 0xB7, 0xBC, 0xC1, 0xAE, 0xCD, 0xD0, 112 + 0xC2, 0x00, 0x8C, 0x00, 0x8A, 0x00, 0xBE, }, 113 + /* 150 cd */ 114 + { MCS_PGAMMACTL, 0x02, 115 + 0x18, 0x08, 0x24, 0x80, 0x6E, 0x5F, 0xC1, 116 + 0xC6, 0xB6, 0xBC, 0xC0, 0xAE, 0xCC, 0xD0, 117 + 0xC2, 0x00, 0x8F, 0x00, 0x8D, 0x00, 0xC2, }, 118 + /* 160 cd */ 119 + { MCS_PGAMMACTL, 0x02, 120 + 0x18, 0x08, 0x24, 0x7F, 0x6E, 0x5F, 0xC0, 121 + 0xC6, 0xB5, 0xBA, 0xBF, 0xAD, 0xCB, 0xCF, 122 + 0xC0, 0x00, 0x94, 0x00, 0x91, 0x00, 0xC8, }, 123 + /* 170 cd */ 124 + { MCS_PGAMMACTL, 0x02, 125 + 0x18, 0x08, 0x24, 0x7C, 0x6D, 0x5C, 0xC0, 126 + 0xC6, 0xB4, 0xBB, 0xBE, 0xAD, 0xCA, 0xCF, 127 + 0xC0, 0x00, 0x96, 0x00, 0x94, 0x00, 0xCC, }, 128 + /* 180 cd */ 129 + { MCS_PGAMMACTL, 0x02, 130 + 0x18, 0x08, 0x24, 0x7B, 0x6D, 0x5B, 0xC0, 131 + 0xC5, 0xB3, 0xBA, 0xBE, 0xAD, 0xCA, 0xCE, 132 + 0xBF, 0x00, 0x99, 0x00, 0x97, 0x00, 0xD0, }, 133 + /* 190 cd */ 134 + { MCS_PGAMMACTL, 0x02, 135 + 0x18, 0x08, 0x24, 0x7A, 0x6D, 0x59, 0xC1, 136 + 0xC5, 0xB4, 0xB8, 0xBD, 0xAC, 0xC9, 0xCE, 137 + 0xBE, 0x00, 0x9D, 0x00, 0x9A, 0x00, 0xD5, }, 138 + /* 200 cd */ 139 + { MCS_PGAMMACTL, 0x02, 140 + 0x18, 0x08, 0x24, 0x79, 0x6D, 0x58, 0xC1, 141 + 0xC4, 0xB4, 0xB6, 0xBD, 0xAA, 0xCA, 0xCD, 142 + 0xBE, 0x00, 0x9F, 0x00, 0x9D, 0x00, 0xD9, }, 143 + /* 210 cd */ 144 + { MCS_PGAMMACTL, 0x02, 145 + 0x18, 0x08, 0x24, 0x79, 0x6D, 0x57, 0xC0, 146 + 0xC4, 0xB4, 0xB7, 0xBD, 0xAA, 0xC8, 0xCC, 147 + 0xBD, 0x00, 0xA2, 0x00, 0xA0, 0x00, 0xDD, }, 148 + /* 220 cd */ 149 + { MCS_PGAMMACTL, 0x02, 150 + 0x18, 0x08, 0x24, 0x78, 0x6F, 0x58, 0xBF, 151 + 0xC4, 0xB3, 0xB5, 0xBB, 0xA9, 0xC8, 0xCC, 152 + 0xBC, 0x00, 0xA6, 0x00, 0xA3, 0x00, 0xE2, }, 153 + /* 230 cd */ 154 + { MCS_PGAMMACTL, 0x02, 155 + 0x18, 0x08, 0x24, 0x75, 0x6F, 0x56, 0xBF, 156 + 0xC3, 0xB2, 0xB6, 0xBB, 0xA8, 0xC7, 0xCB, 157 + 0xBC, 0x00, 0xA8, 0x00, 0xA6, 0x00, 0xE6, }, 158 + /* 240 cd */ 159 + { MCS_PGAMMACTL, 0x02, 160 + 0x18, 0x08, 0x24, 0x76, 0x6F, 0x56, 0xC0, 161 + 0xC3, 0xB2, 0xB5, 0xBA, 0xA8, 0xC6, 0xCB, 162 + 0xBB, 0x00, 0xAA, 0x00, 0xA8, 0x00, 0xE9, }, 163 + /* 250 cd */ 164 + { MCS_PGAMMACTL, 0x02, 165 + 0x18, 0x08, 0x24, 0x74, 0x6D, 0x54, 0xBF, 166 + 0xC3, 0xB2, 0xB4, 0xBA, 0xA7, 0xC6, 0xCA, 167 + 0xBA, 0x00, 0xAD, 0x00, 0xAB, 0x00, 0xED, }, 168 + /* 260 cd */ 169 + { MCS_PGAMMACTL, 0x02, 170 + 0x18, 0x08, 0x24, 0x74, 0x6E, 0x54, 0xBD, 171 + 0xC2, 0xB0, 0xB5, 0xBA, 0xA7, 0xC5, 0xC9, 172 + 0xBA, 0x00, 0xB0, 0x00, 0xAE, 0x00, 0xF1, }, 173 + /* 270 cd */ 174 + { MCS_PGAMMACTL, 0x02, 175 + 0x18, 0x08, 0x24, 0x71, 0x6C, 0x50, 0xBD, 176 + 0xC3, 0xB0, 0xB4, 0xB8, 0xA6, 0xC6, 0xC9, 177 + 0xBB, 0x00, 0xB2, 0x00, 0xB1, 0x00, 0xF4, }, 178 + /* 280 cd */ 179 + { MCS_PGAMMACTL, 0x02, 180 + 0x18, 0x08, 0x24, 0x6E, 0x6C, 0x4D, 0xBE, 181 + 0xC3, 0xB1, 0xB3, 0xB8, 0xA5, 0xC6, 0xC8, 182 + 0xBB, 0x00, 0xB4, 0x00, 0xB3, 0x00, 0xF7, }, 183 + /* 290 cd */ 184 + { MCS_PGAMMACTL, 0x02, 185 + 0x18, 0x08, 0x24, 0x71, 0x70, 0x50, 0xBD, 186 + 0xC1, 0xB0, 0xB2, 0xB8, 0xA4, 0xC6, 0xC7, 187 + 0xBB, 0x00, 0xB6, 0x00, 0xB6, 0x00, 0xFA, }, 188 + /* 300 cd */ 189 + { MCS_PGAMMACTL, 0x02, 190 + 0x18, 0x08, 0x24, 0x70, 0x6E, 0x4E, 0xBC, 191 + 0xC0, 0xAF, 0xB3, 0xB8, 0xA5, 0xC5, 0xC7, 192 + 0xBB, 0x00, 0xB9, 0x00, 0xB8, 0x00, 0xFC, }, 193 + }; 194 + 195 + #define NUM_ACL_LEVELS 7 196 + #define ACL_TABLE_COUNT 28 197 + 198 + static u8 const s6e63m0_acl[NUM_ACL_LEVELS][ACL_TABLE_COUNT] = { 199 + /* NULL ACL */ 200 + { MCS_BCMODE, 201 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 202 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 203 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 204 + 0x00, 0x00, 0x00 }, 205 + /* 40P ACL */ 206 + { MCS_BCMODE, 207 + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, 208 + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 209 + 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1C, 0x21, 0x26, 210 + 0x2B, 0x31, 0x36 }, 211 + /* 43P ACL */ 212 + { MCS_BCMODE, 213 + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, 214 + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 215 + 0x01, 0x07, 0x0C, 0x12, 0x18, 0x1E, 0x23, 0x29, 216 + 0x2F, 0x34, 0x3A }, 217 + /* 45P ACL */ 218 + { MCS_BCMODE, 219 + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, 220 + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 221 + 0x01, 0x07, 0x0D, 0x13, 0x19, 0x1F, 0x25, 0x2B, 222 + 0x31, 0x37, 0x3D }, 223 + /* 47P ACL */ 224 + { MCS_BCMODE, 225 + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, 226 + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 227 + 0x01, 0x07, 0x0E, 0x14, 0x1B, 0x21, 0x27, 0x2E, 228 + 0x34, 0x3B, 0x41 }, 229 + /* 48P ACL */ 230 + { MCS_BCMODE, 231 + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, 232 + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 233 + 0x01, 0x08, 0x0E, 0x15, 0x1B, 0x22, 0x29, 0x2F, 234 + 0x36, 0x3C, 0x43 }, 235 + /* 50P ACL */ 236 + { MCS_BCMODE, 237 + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, 238 + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, 239 + 0x01, 0x08, 0x0F, 0x16, 0x1D, 0x24, 0x2A, 0x31, 240 + 0x38, 0x3F, 0x46 }, 241 + }; 242 + 243 + /* This tells us which ACL level goes with which gamma */ 244 + static u8 const s6e63m0_acl_per_gamma[NUM_GAMMA_LEVELS] = { 245 + /* 30 - 60 cd: ACL off/NULL */ 246 + 0, 0, 0, 0, 247 + /* 70 - 250 cd: 40P ACL */ 248 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 249 + /* 260 - 300 cd: 50P ACL */ 250 + 6, 6, 6, 6, 6, 251 + }; 252 + 253 + /* The ELVSS backlight regulator has 5 levels */ 254 + #define S6E63M0_ELVSS_LEVELS 5 255 + 256 + static u8 const s6e63m0_elvss_offsets[S6E63M0_ELVSS_LEVELS] = { 257 + 0x00, /* not set */ 258 + 0x0D, /* 30 cd - 100 cd */ 259 + 0x09, /* 110 cd - 160 cd */ 260 + 0x07, /* 170 cd - 200 cd */ 261 + 0x00, /* 210 cd - 300 cd */ 262 + }; 263 + 264 + /* This tells us which ELVSS level goes with which gamma */ 265 + static u8 const s6e63m0_elvss_per_gamma[NUM_GAMMA_LEVELS] = { 266 + /* 30 - 100 cd */ 267 + 1, 1, 1, 1, 1, 1, 1, 1, 268 + /* 110 - 160 cd */ 269 + 2, 2, 2, 2, 2, 2, 270 + /* 170 - 200 cd */ 271 + 3, 3, 3, 3, 272 + /* 210 - 300 cd */ 273 + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 96 274 }; 97 275 98 276 struct s6e63m0 { ··· 280 102 struct drm_panel panel; 281 103 struct backlight_device *bl_dev; 282 104 u8 lcd_type; 105 + u8 elvss_pulse; 283 106 284 107 struct regulator_bulk_data supplies[2]; 285 108 struct gpio_desc *reset_gpio; ··· 366 187 367 188 dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); 368 189 369 - /* We attempt to detect what panel is mounted on the controller */ 190 + /* 191 + * We attempt to detect what panel is mounted on the controller. 192 + * The third ID byte represents the desired ELVSS pulse for 193 + * some displays. 194 + */ 370 195 switch (id2) { 371 196 case S6E63M0_LCD_ID_VALUE_M2: 372 197 dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n"); 198 + ctx->elvss_pulse = id3; 373 199 break; 374 200 case S6E63M0_LCD_ID_VALUE_SM2: 375 201 case S6E63M0_LCD_ID_VALUE_SM2_1: 376 202 dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n"); 203 + ctx->elvss_pulse = id3; 377 204 break; 378 205 default: 379 206 dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2); 207 + /* Default ELVSS pulse level */ 208 + ctx->elvss_pulse = 0x16; 380 209 break; 381 210 } 382 211 ··· 397 210 { 398 211 s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL, 399 212 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f, 400 - 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00); 213 + 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00); 401 214 402 215 s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL, 403 216 0x02, 0x03, 0x1c, 0x10, 0x10); ··· 413 226 0x01); 414 227 415 228 s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL, 416 - 0x00, 0x8c, 0x07); 417 - s6e63m0_dcs_write_seq_static(ctx, 0xb3, 418 - 0xc); 229 + 0x00, 0x8e, 0x07); 230 + s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c); 419 231 420 232 s6e63m0_dcs_write_seq_static(ctx, 0xb5, 421 233 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17, ··· 433 247 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b, 434 248 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a, 435 249 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23, 436 - 0x21, 0x20, 0x1e, 0x1e, 0x00, 0x00, 0x11, 437 - 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55, 438 - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66); 250 + 0x21, 0x20, 0x1e, 0x1e); 251 + 252 + s6e63m0_dcs_write_seq_static(ctx, 0xb8, 253 + 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44, 254 + 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 255 + 0x66, 0x66); 439 256 440 257 s6e63m0_dcs_write_seq_static(ctx, 0xb9, 441 258 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17, ··· 458 269 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 459 270 0x09, 0x0d, 0x0f, 0x12, 0x15, 0x18); 460 271 461 - s6e63m0_dcs_write_seq_static(ctx, 0xb2, 272 + s6e63m0_dcs_write_seq_static(ctx, MCS_TEMP_SWIRE, 462 273 0x10, 0x10, 0x0b, 0x05); 463 274 464 275 s6e63m0_dcs_write_seq_static(ctx, MCS_MIECTL1, ··· 636 447 static int s6e63m0_set_brightness(struct backlight_device *bd) 637 448 { 638 449 struct s6e63m0 *ctx = bl_get_data(bd); 639 - 640 450 int brightness = bd->props.brightness; 451 + u8 elvss_val; 452 + u8 elvss_cmd_set[5]; 453 + int i; 641 454 642 - /* disable and set new gamma */ 455 + /* Adjust ELVSS to candela level */ 456 + i = s6e63m0_elvss_per_gamma[brightness]; 457 + elvss_val = ctx->elvss_pulse + s6e63m0_elvss_offsets[i]; 458 + if (elvss_val > 0x1f) 459 + elvss_val = 0x1f; 460 + elvss_cmd_set[0] = MCS_TEMP_SWIRE; 461 + elvss_cmd_set[1] = elvss_val; 462 + elvss_cmd_set[2] = elvss_val; 463 + elvss_cmd_set[3] = elvss_val; 464 + elvss_cmd_set[4] = elvss_val; 465 + s6e63m0_dcs_write(ctx, elvss_cmd_set, 5); 466 + 467 + /* Update the ACL per gamma value */ 468 + i = s6e63m0_acl_per_gamma[brightness]; 469 + s6e63m0_dcs_write(ctx, s6e63m0_acl[i], 470 + ARRAY_SIZE(s6e63m0_acl[i])); 471 + 472 + /* Update gamma table */ 643 473 s6e63m0_dcs_write(ctx, s6e63m0_gamma_22[brightness], 644 474 ARRAY_SIZE(s6e63m0_gamma_22[brightness])); 475 + s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x03); 645 476 646 - /* update gamma table. */ 647 - s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x01); 648 477 649 478 return s6e63m0_clear_error(ctx); 650 479 }
+28
drivers/gpu/drm/panel/panel-simple.c
··· 2267 2267 }, 2268 2268 }; 2269 2269 2270 + static const struct drm_display_mode innolux_n125hce_gn1_mode = { 2271 + .clock = 162000, 2272 + .hdisplay = 1920, 2273 + .hsync_start = 1920 + 40, 2274 + .hsync_end = 1920 + 40 + 40, 2275 + .htotal = 1920 + 40 + 40 + 80, 2276 + .vdisplay = 1080, 2277 + .vsync_start = 1080 + 4, 2278 + .vsync_end = 1080 + 4 + 4, 2279 + .vtotal = 1080 + 4 + 4 + 24, 2280 + }; 2281 + 2282 + static const struct panel_desc innolux_n125hce_gn1 = { 2283 + .modes = &innolux_n125hce_gn1_mode, 2284 + .num_modes = 1, 2285 + .bpc = 8, 2286 + .size = { 2287 + .width = 276, 2288 + .height = 155, 2289 + }, 2290 + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 2291 + .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB, 2292 + .connector_type = DRM_MODE_CONNECTOR_eDP, 2293 + }; 2294 + 2270 2295 static const struct drm_display_mode innolux_n156bge_l21_mode = { 2271 2296 .clock = 69300, 2272 2297 .hdisplay = 1366, ··· 4147 4122 }, { 4148 4123 .compatible = "innolux,n116bge", 4149 4124 .data = &innolux_n116bge, 4125 + }, { 4126 + .compatible = "innolux,n125hce-gn1", 4127 + .data = &innolux_n125hce_gn1, 4150 4128 }, { 4151 4129 .compatible = "innolux,n156bge-l21", 4152 4130 .data = &innolux_n156bge_l21,
+1 -1
drivers/gpu/drm/panfrost/panfrost_gem.c
··· 228 228 INIT_LIST_HEAD(&obj->mappings.list); 229 229 mutex_init(&obj->mappings.lock); 230 230 obj->base.base.funcs = &panfrost_gem_funcs; 231 - obj->base.map_cached = pfdev->coherent; 231 + obj->base.map_wc = !pfdev->coherent; 232 232 233 233 return &obj->base.base; 234 234 }
+22
drivers/gpu/drm/ttm/ttm_pool.c
··· 63 63 static struct ttm_pool_type global_write_combined[MAX_ORDER]; 64 64 static struct ttm_pool_type global_uncached[MAX_ORDER]; 65 65 66 + static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; 67 + static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; 68 + 66 69 static spinlock_t shrinker_lock; 67 70 static struct list_head shrinker_list; 68 71 static struct shrinker mm_shrinker; ··· 293 290 #ifdef CONFIG_X86 294 291 switch (caching) { 295 292 case ttm_write_combined: 293 + if (pool->use_dma32) 294 + return &global_dma32_write_combined[order]; 295 + 296 296 return &global_write_combined[order]; 297 297 case ttm_uncached: 298 + if (pool->use_dma32) 299 + return &global_dma32_uncached[order]; 300 + 298 301 return &global_uncached[order]; 299 302 default: 300 303 break; ··· 579 570 seq_puts(m, "uc\t:"); 580 571 ttm_pool_debugfs_orders(global_uncached, m); 581 572 573 + seq_puts(m, "wc 32\t:"); 574 + ttm_pool_debugfs_orders(global_dma32_write_combined, m); 575 + seq_puts(m, "uc 32\t:"); 576 + ttm_pool_debugfs_orders(global_dma32_uncached, m); 577 + 582 578 for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { 583 579 seq_puts(m, "DMA "); 584 580 switch (i) { ··· 654 640 ttm_pool_type_init(&global_write_combined[i], NULL, 655 641 ttm_write_combined, i); 656 642 ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); 643 + 644 + ttm_pool_type_init(&global_dma32_write_combined[i], NULL, 645 + ttm_write_combined, i); 646 + ttm_pool_type_init(&global_dma32_uncached[i], NULL, 647 + ttm_uncached, i); 657 648 } 658 649 659 650 mm_shrinker.count_objects = ttm_pool_shrinker_count; ··· 679 660 for (i = 0; i < MAX_ORDER; ++i) { 680 661 ttm_pool_type_fini(&global_write_combined[i]); 681 662 ttm_pool_type_fini(&global_uncached[i]); 663 + 664 + ttm_pool_type_fini(&global_dma32_write_combined[i]); 665 + ttm_pool_type_fini(&global_dma32_uncached[i]); 682 666 } 683 667 684 668 unregister_shrinker(&mm_shrinker);
-2
drivers/gpu/drm/udl/udl_drv.c
··· 38 38 .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, 39 39 40 40 /* GEM hooks */ 41 - .gem_create_object = drm_gem_shmem_create_object_cached, 42 - 43 41 .fops = &udl_driver_fops, 44 42 DRM_GEM_SHMEM_DRIVER_OPS, 45 43
+1 -1
drivers/gpu/drm/v3d/v3d_bo.c
··· 78 78 obj = &bo->base.base; 79 79 80 80 obj->funcs = &v3d_gem_funcs; 81 - 81 + bo->base.map_wc = true; 82 82 INIT_LIST_HEAD(&bo->unref_head); 83 83 84 84 return &bo->base.base;
+3 -1
drivers/gpu/drm/vc4/vc4_txp.c
··· 273 273 } 274 274 275 275 static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, 276 - struct drm_connector_state *conn_state) 276 + struct drm_atomic_state *state) 277 277 { 278 + struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state, 279 + conn); 278 280 struct vc4_txp *txp = connector_to_vc4_txp(conn); 279 281 struct drm_gem_cma_object *gem; 280 282 struct drm_display_mode *mode;
+1 -2
drivers/gpu/drm/vgem/vgem_drv.c
··· 403 403 if (ret) 404 404 return ret; 405 405 406 - fput(vma->vm_file); 407 - vma->vm_file = get_file(obj->filp); 406 + vma_set_file(vma, obj->filp); 408 407 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 409 408 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 410 409
+1
drivers/gpu/drm/via/via_irq.c
··· 364 364 irqwait->request.sequence += 365 365 atomic_read(&cur_irq->irq_received); 366 366 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 367 + break; 367 368 case VIA_IRQ_ABSOLUTE: 368 369 break; 369 370 default:
+4 -3
drivers/gpu/drm/via/via_verifier.c
··· 1001 1001 state = via_check_vheader6(&buf, buf_end); 1002 1002 break; 1003 1003 case state_command: 1004 - if ((HALCYON_HEADER2 == (cmd = *buf)) && 1005 - supported_3d) 1004 + cmd = *buf; 1005 + if ((cmd == HALCYON_HEADER2) && supported_3d) 1006 1006 state = state_header2; 1007 1007 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1008 1008 state = state_header1; ··· 1064 1064 state = via_parse_vheader6(dev_priv, &buf, buf_end); 1065 1065 break; 1066 1066 case state_command: 1067 - if (HALCYON_HEADER2 == (cmd = *buf)) 1067 + cmd = *buf; 1068 + if (cmd == HALCYON_HEADER2) 1068 1069 state = state_header2; 1069 1070 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) 1070 1071 state = state_header1;
+2 -2
drivers/gpu/drm/virtio/virtgpu_debugfs.c
··· 67 67 struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; 68 68 69 69 seq_printf(m, "fence %llu %lld\n", 70 - (u64)atomic64_read(&vgdev->fence_drv.last_seq), 71 - vgdev->fence_drv.sync_seq); 70 + (u64)atomic64_read(&vgdev->fence_drv.last_fence_id), 71 + vgdev->fence_drv.current_fence_id); 72 72 return 0; 73 73 } 74 74
+4 -4
drivers/gpu/drm/virtio/virtgpu_drv.h
··· 127 127 struct virtio_gpu_vbuffer *vbuf); 128 128 129 129 struct virtio_gpu_fence_driver { 130 - atomic64_t last_seq; 131 - uint64_t sync_seq; 130 + atomic64_t last_fence_id; 131 + uint64_t current_fence_id; 132 132 uint64_t context; 133 133 struct list_head fences; 134 134 spinlock_t lock; ··· 257 257 struct mutex context_lock; 258 258 }; 259 259 260 - /* virtio_ioctl.c */ 260 + /* virtgpu_ioctl.c */ 261 261 #define DRM_VIRTIO_NUM_IOCTLS 11 262 262 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; 263 263 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); ··· 420 420 struct virtio_gpu_ctrl_hdr *cmd_hdr, 421 421 struct virtio_gpu_fence *fence); 422 422 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, 423 - u64 last_seq); 423 + u64 fence_id); 424 424 425 425 /* virtgpu_object.c */ 426 426 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
+7 -6
drivers/gpu/drm/virtio/virtgpu_fence.c
··· 48 48 /* leaked fence outside driver before completing 49 49 * initialization with virtio_gpu_fence_emit */ 50 50 return false; 51 - if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) 51 + if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno) 52 52 return true; 53 53 return false; 54 54 } ··· 62 62 { 63 63 struct virtio_gpu_fence *fence = to_virtio_fence(f); 64 64 65 - snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); 65 + snprintf(str, size, "%llu", 66 + (u64)atomic64_read(&fence->drv->last_fence_id)); 66 67 } 67 68 68 69 static const struct dma_fence_ops virtio_fence_ops = { ··· 101 100 unsigned long irq_flags; 102 101 103 102 spin_lock_irqsave(&drv->lock, irq_flags); 104 - fence->f.seqno = ++drv->sync_seq; 103 + fence->f.seqno = ++drv->current_fence_id; 105 104 dma_fence_get(&fence->f); 106 105 list_add_tail(&fence->node, &drv->fences); 107 106 spin_unlock_irqrestore(&drv->lock, irq_flags); ··· 113 112 } 114 113 115 114 void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, 116 - u64 last_seq) 115 + u64 fence_id) 117 116 { 118 117 struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; 119 118 struct virtio_gpu_fence *fence, *tmp; 120 119 unsigned long irq_flags; 121 120 122 121 spin_lock_irqsave(&drv->lock, irq_flags); 123 - atomic64_set(&vgdev->fence_drv.last_seq, last_seq); 122 + atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id); 124 123 list_for_each_entry_safe(fence, tmp, &drv->fences, node) { 125 - if (last_seq < fence->f.seqno) 124 + if (fence_id < fence->f.seqno) 126 125 continue; 127 126 dma_fence_signal_locked(&fence->f); 128 127 list_del(&fence->node);
+4 -3
drivers/gpu/drm/virtio/virtgpu_ioctl.c
··· 591 591 return 0; 592 592 } 593 593 594 - static int virtio_gpu_resource_create_blob(struct drm_device *dev, 595 - void *data, struct drm_file *file) 594 + static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, 595 + void *data, 596 + struct drm_file *file) 596 597 { 597 598 int ret = 0; 598 599 uint32_t handle = 0; ··· 697 696 DRM_RENDER_ALLOW), 698 697 699 698 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, 700 - virtio_gpu_resource_create_blob, 699 + virtio_gpu_resource_create_blob_ioctl, 701 700 DRM_RENDER_ALLOW), 702 701 };
-1
drivers/gpu/drm/virtio/virtgpu_object.c
··· 144 144 145 145 dshmem = &shmem->base.base; 146 146 dshmem->base.funcs = &virtio_gpu_shmem_funcs; 147 - dshmem->map_cached = true; 148 147 return &dshmem->base; 149 148 } 150 149
-1
drivers/gpu/drm/vkms/vkms_drv.c
··· 82 82 .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, 83 83 .release = vkms_release, 84 84 .fops = &vkms_driver_fops, 85 - .gem_create_object = drm_gem_shmem_create_object_cached, 86 85 DRM_GEM_SHMEM_DRIVER_OPS, 87 86 88 87 .name = DRIVER_NAME,
+5 -2
drivers/gpu/drm/vkms/vkms_writeback.c
··· 2 2 3 3 #include <linux/dma-buf-map.h> 4 4 5 + #include <drm/drm_atomic.h> 5 6 #include <drm/drm_fourcc.h> 6 7 #include <drm/drm_writeback.h> 7 8 #include <drm/drm_probe_helper.h> ··· 106 105 } 107 106 108 107 static void vkms_wb_atomic_commit(struct drm_connector *conn, 109 - struct drm_connector_state *state) 108 + struct drm_atomic_state *state) 110 109 { 110 + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, 111 + conn); 111 112 struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev); 112 113 struct vkms_output *output = &vkmsdev->output; 113 114 struct drm_writeback_connector *wb_conn = &output->wb_connector; ··· 125 122 crtc_state->active_writeback = conn_state->writeback_job->priv; 126 123 crtc_state->wb_pending = true; 127 124 spin_unlock_irq(&output->composer_lock); 128 - drm_writeback_queue_job(wb_conn, state); 125 + drm_writeback_queue_job(wb_conn, connector_state); 129 126 } 130 127 131 128 static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
+3 -3
drivers/staging/android/ashmem.c
··· 450 450 vma_set_anonymous(vma); 451 451 } 452 452 453 - if (vma->vm_file) 454 - fput(vma->vm_file); 455 - vma->vm_file = asma->file; 453 + vma_set_file(vma, asma->file); 454 + /* XXX: merge this with the get_file() above if possible */ 455 + fput(asma->file); 456 456 457 457 out: 458 458 mutex_unlock(&ashmem_mutex);
+1
drivers/video/fbdev/geode/lxfb_ops.c
··· 682 682 case DC_DV_CTL: 683 683 /* set all ram to dirty */ 684 684 write_dc(par, i, par->dc[i] | DC_DV_CTL_CLEAR_DV_RAM); 685 + break; 685 686 686 687 case DC_RSVD_1: 687 688 case DC_RSVD_2:
+1
drivers/video/fbdev/pm2fb.c
··· 239 239 fallthrough; 240 240 case 16: 241 241 timing >>= 1; 242 + fallthrough; 242 243 case 32: 243 244 break; 244 245 }
+7 -7
include/drm/drm_fb_helper.h
··· 100 100 * @funcs: driver callbacks for fb helper 101 101 * @fbdev: emulated fbdev device info struct 102 102 * @pseudo_palette: fake palette of 16 colors 103 - * @dirty_clip: clip rectangle used with deferred_io to accumulate damage to 104 - * the screen buffer 105 - * @dirty_lock: spinlock protecting @dirty_clip 106 - * @dirty_work: worker used to flush the framebuffer 103 + * @damage_clip: clip rectangle used with deferred_io to accumulate damage to 104 + * the screen buffer 105 + * @damage_lock: spinlock protecting @damage_clip 106 + * @damage_work: worker used to flush the framebuffer 107 107 * @resume_work: worker used during resume if the console lock is already taken 108 108 * 109 109 * This is the main structure used by the fbdev helpers. Drivers supporting ··· 131 131 const struct drm_fb_helper_funcs *funcs; 132 132 struct fb_info *fbdev; 133 133 u32 pseudo_palette[17]; 134 - struct drm_clip_rect dirty_clip; 135 - spinlock_t dirty_lock; 136 - struct work_struct dirty_work; 134 + struct drm_clip_rect damage_clip; 135 + spinlock_t damage_lock; 136 + struct work_struct damage_work; 137 137 struct work_struct resume_work; 138 138 139 139 /**
+2 -5
include/drm/drm_gem_shmem_helper.h
··· 98 98 unsigned int vmap_use_count; 99 99 100 100 /** 101 - * @map_cached: map object cached (instead of using writecombine). 101 + * @map_wc: map object write-combined (instead of using shmem defaults). 102 102 */ 103 - bool map_cached; 103 + bool map_wc; 104 104 }; 105 105 106 106 #define to_drm_gem_shmem_obj(obj) \ ··· 132 132 drm_gem_shmem_create_with_handle(struct drm_file *file_priv, 133 133 struct drm_device *dev, size_t size, 134 134 uint32_t *handle); 135 - 136 - struct drm_gem_object * 137 - drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size); 138 135 139 136 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev, 140 137 struct drm_mode_create_dumb *args);
+3
include/drm/drm_modes.h
··· 195 195 * @crtc_vsync_end: hardware mode vertical sync end 196 196 * @crtc_vtotal: hardware mode vertical total size 197 197 * 198 + * This is the kernel API display mode information structure. For the 199 + * user-space version see struct drm_mode_modeinfo. 200 + * 198 201 * The horizontal and vertical timings are defined per the following diagram. 199 202 * 200 203 * ::
+6 -7
include/drm/drm_modeset_helper_vtables.h
··· 1044 1044 * NOTE: 1045 1045 * 1046 1046 * This function is called in the check phase of an atomic update. The 1047 - * driver is not allowed to change anything outside of the free-standing 1048 - * state objects passed-in or assembled in the overall &drm_atomic_state 1049 - * update tracking structure. 1047 + * driver is not allowed to change anything outside of the 1048 + * &drm_atomic_state update tracking structure passed in. 1050 1049 * 1051 1050 * RETURNS: 1052 1051 * ··· 1055 1056 * for this. 1056 1057 */ 1057 1058 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector, 1058 - struct drm_connector_state *connector_state); 1059 + struct drm_atomic_state *state); 1059 1060 1060 1061 /** 1061 1062 * @atomic_check: ··· 1096 1097 * 1097 1098 * This hook is to be used by drivers implementing writeback connectors 1098 1099 * that need a point when to commit the writeback job to the hardware. 1099 - * The writeback_job to commit is available in 1100 - * &drm_connector_state.writeback_job. 1100 + * The writeback_job to commit is available in the new connector state, 1101 + * in &drm_connector_state.writeback_job. 1101 1102 * 1102 1103 * This hook is optional. 1103 1104 * 1104 1105 * This callback is used by the atomic modeset helpers. 1105 1106 */ 1106 1107 void (*atomic_commit)(struct drm_connector *connector, 1107 - struct drm_connector_state *state); 1108 + struct drm_atomic_state *state); 1108 1109 1109 1110 /** 1110 1111 * @prepare_writeback_job:
+2
include/linux/mm.h
··· 2719 2719 } 2720 2720 #endif 2721 2721 2722 + void vma_set_file(struct vm_area_struct *vma, struct file *file); 2723 + 2722 2724 #ifdef CONFIG_NUMA_BALANCING 2723 2725 unsigned long change_prot_numa(struct vm_area_struct *vma, 2724 2726 unsigned long start, unsigned long end);
+126 -39
include/uapi/drm/drm_mode.h
··· 218 218 #define DRM_MODE_CONTENT_PROTECTION_DESIRED 1 219 219 #define DRM_MODE_CONTENT_PROTECTION_ENABLED 2 220 220 221 + /** 222 + * struct drm_mode_modeinfo - Display mode information. 223 + * @clock: pixel clock in kHz 224 + * @hdisplay: horizontal display size 225 + * @hsync_start: horizontal sync start 226 + * @hsync_end: horizontal sync end 227 + * @htotal: horizontal total size 228 + * @hskew: horizontal skew 229 + * @vdisplay: vertical display size 230 + * @vsync_start: vertical sync start 231 + * @vsync_end: vertical sync end 232 + * @vtotal: vertical total size 233 + * @vscan: vertical scan 234 + * @vrefresh: approximate vertical refresh rate in Hz 235 + * @flags: bitmask of misc. flags, see DRM_MODE_FLAG_* defines 236 + * @type: bitmask of type flags, see DRM_MODE_TYPE_* defines 237 + * @name: string describing the mode resolution 238 + * 239 + * This is the user-space API display mode information structure. For the 240 + * kernel version see struct drm_display_mode. 241 + */ 221 242 struct drm_mode_modeinfo { 222 243 __u32 clock; 223 244 __u16 hdisplay; ··· 389 368 #define DRM_MODE_CONNECTOR_WRITEBACK 18 390 369 #define DRM_MODE_CONNECTOR_SPI 19 391 370 371 + /** 372 + * struct drm_mode_get_connector - Get connector metadata. 373 + * 374 + * User-space can perform a GETCONNECTOR ioctl to retrieve information about a 375 + * connector. User-space is expected to retrieve encoders, modes and properties 376 + * by performing this ioctl at least twice: the first time to retrieve the 377 + * number of elements, the second time to retrieve the elements themselves. 378 + * 379 + * To retrieve the number of elements, set @count_props and @count_encoders to 380 + * zero, set @count_modes to 1, and set @modes_ptr to a temporary struct 381 + * drm_mode_modeinfo element. 382 + * 383 + * To retrieve the elements, allocate arrays for @encoders_ptr, @modes_ptr, 384 + * @props_ptr and @prop_values_ptr, then set @count_modes, @count_props and 385 + * @count_encoders to their capacity. 386 + * 387 + * Performing the ioctl only twice may be racy: the number of elements may have 388 + * changed with a hotplug event in-between the two ioctls. User-space is 389 + * expected to retry the last ioctl until the number of elements stabilizes. 390 + * The kernel won't fill any array which doesn't have the expected length. 391 + * 392 + * **Force-probing a connector** 393 + * 394 + * If the @count_modes field is set to zero, the kernel will perform a forced 395 + * probe on the connector to refresh the connector status, modes and EDID. 396 + * A forced-probe can be slow and the ioctl will block. A force-probe can cause 397 + * flickering and temporary freezes, so it should not be performed 398 + * automatically. 399 + * 400 + * User-space shouldn't need to force-probe connectors in general: the kernel 401 + * will automatically take care of probing connectors that don't support 402 + * hot-plug detection when appropriate. However, user-space may force-probe 403 + * connectors on user request (e.g. clicking a "Scan connectors" button, or 404 + * opening a UI to manage screens). 405 + */ 392 406 struct drm_mode_get_connector { 393 - 407 + /** @encoders_ptr: Pointer to ``__u32`` array of object IDs. */ 394 408 __u64 encoders_ptr; 409 + /** @modes_ptr: Pointer to struct drm_mode_modeinfo array. */ 395 410 __u64 modes_ptr; 411 + /** @props_ptr: Pointer to ``__u32`` array of property IDs. */ 396 412 __u64 props_ptr; 413 + /** @prop_values_ptr: Pointer to ``__u64`` array of property values. */ 397 414 __u64 prop_values_ptr; 398 415 416 + /** @count_modes: Number of modes. */ 399 417 __u32 count_modes; 418 + /** @count_props: Number of properties. */ 400 419 __u32 count_props; 420 + /** @count_encoders: Number of encoders. */ 401 421 __u32 count_encoders; 402 422 403 - __u32 encoder_id; /**< Current Encoder */ 404 - __u32 connector_id; /**< Id */ 423 + /** @encoder_id: Object ID of the current encoder. */ 424 + __u32 encoder_id; 425 + /** @connector_id: Object ID of the connector. */ 426 + __u32 connector_id; 427 + /** 428 + * @connector_type: Type of the connector. 429 + * 430 + * See DRM_MODE_CONNECTOR_* defines. 431 + */ 405 432 __u32 connector_type; 433 + /** 434 + * @connector_type_id: Type-specific connector number. 435 + * 436 + * This is not an object ID. This is a per-type connector number. Each 437 + * (type, type_id) combination is unique across all connectors of a DRM 438 + * device. 439 + */ 406 440 __u32 connector_type_id; 407 441 442 + /** 443 + * @connection: Status of the connector. 444 + * 445 + * See enum drm_connector_status. 446 + */ 408 447 __u32 connection; 409 - __u32 mm_width; /**< width in millimeters */ 410 - __u32 mm_height; /**< height in millimeters */ 448 + /** @mm_width: Width of the connected sink in millimeters. */ 449 + __u32 mm_width; 450 + /** @mm_height: Height of the connected sink in millimeters. */ 451 + __u32 mm_height; 452 + /** 453 + * @subpixel: Subpixel order of the connected sink. 454 + * 455 + * See enum subpixel_order. 456 + */ 411 457 __u32 subpixel; 412 458 459 + /** @pad: Padding, must be zero. */ 413 460 __u32 pad; 414 461 }; 415 462 ··· 994 905 995 906 /** 996 907 * struct drm_mode_create_blob - Create New block property 997 - * @data: Pointer to data to copy. 998 - * @length: Length of data to copy. 999 - * @blob_id: new property ID. 908 + * 1000 909 * Create a new 'blob' data property, copying length bytes from data pointer, 1001 910 * and returning new blob ID. 1002 911 */ 1003 912 struct drm_mode_create_blob { 1004 - /** Pointer to data to copy. */ 913 + /** @data: Pointer to data to copy. */ 1005 914 __u64 data; 1006 - /** Length of data to copy. */ 915 + /** @length: Length of data to copy. */ 1007 916 __u32 length; 1008 - /** Return: new property ID. */ 917 + /** @blob_id: Return: new property ID. */ 1009 918 __u32 blob_id; 1010 919 }; 1011 920 1012 921 /** 1013 922 * struct drm_mode_destroy_blob - Destroy user blob 1014 923 * @blob_id: blob_id to destroy 924 + * 1015 925 * Destroy a user-created blob property. 1016 926 * 1017 927 * User-space can release blobs as soon as they do not need to refer to them by ··· 1025 937 1026 938 /** 1027 939 * struct drm_mode_create_lease - Create lease 1028 - * @object_ids: Pointer to array of object ids. 1029 - * @object_count: Number of object ids. 1030 - * @flags: flags for new FD. 1031 - * @lessee_id: unique identifier for lessee. 1032 - * @fd: file descriptor to new drm_master file. 940 + * 1033 941 * Lease mode resources, creating another drm_master. 1034 942 */ 1035 943 struct drm_mode_create_lease { 1036 - /** Pointer to array of object ids (__u32) */ 944 + /** @object_ids: Pointer to array of object ids (__u32) */ 1037 945 __u64 object_ids; 1038 - /** Number of object ids */ 946 + /** @object_count: Number of object ids */ 1039 947 __u32 object_count; 1040 - /** flags for new FD (O_CLOEXEC, etc) */ 948 + /** @flags: flags for new FD (O_CLOEXEC, etc) */ 1041 949 __u32 flags; 1042 950 1043 - /** Return: unique identifier for lessee. */ 951 + /** @lessee_id: Return: unique identifier for lessee. */ 1044 952 __u32 lessee_id; 1045 - /** Return: file descriptor to new drm_master file */ 953 + /** @fd: Return: file descriptor to new drm_master file */ 1046 954 __u32 fd; 1047 955 }; 1048 956 1049 957 /** 1050 958 * struct drm_mode_list_lessees - List lessees 1051 - * @count_lessees: Number of lessees. 1052 - * @pad: pad. 1053 - * @lessees_ptr: Pointer to lessess. 1054 - * List lesses from a drm_master 959 + * 960 + * List lesses from a drm_master. 1055 961 */ 1056 962 struct drm_mode_list_lessees { 1057 - /** Number of lessees. 963 + /** 964 + * @count_lessees: Number of lessees. 965 + * 1058 966 * On input, provides length of the array. 1059 967 * On output, provides total number. No 1060 968 * more than the input number will be written ··· 1058 974 * the size and then the data. 1059 975 */ 1060 976 __u32 count_lessees; 977 + /** @pad: Padding. */ 1061 978 __u32 pad; 1062 979 1063 - /** Pointer to lessees. 1064 - * pointer to __u64 array of lessee ids 980 + /** 981 + * @lessees_ptr: Pointer to lessees. 982 + * 983 + * Pointer to __u64 array of lessee ids 1065 984 */ 1066 985 __u64 lessees_ptr; 1067 986 }; 1068 987 1069 988 /** 1070 989 * struct drm_mode_get_lease - Get Lease 1071 - * @count_objects: Number of leased objects. 1072 - * @pad: pad. 1073 - * @objects_ptr: Pointer to objects. 1074 - * Get leased objects 990 + * 991 + * Get leased objects. 1075 992 */ 1076 993 struct drm_mode_get_lease { 1077 - /** Number of leased objects. 994 + /** 995 + * @count_objects: Number of leased objects. 996 + * 1078 997 * On input, provides length of the array. 1079 998 * On output, provides total number. No 1080 999 * more than the input number will be written ··· 1085 998 * the size and then the data. 1086 999 */ 1087 1000 __u32 count_objects; 1001 + /** @pad: Padding. */ 1088 1002 __u32 pad; 1089 1003 1090 - /** Pointer to objects. 1091 - * pointer to __u32 array of object ids 1004 + /** 1005 + * @objects_ptr: Pointer to objects. 1006 + * 1007 + * Pointer to __u32 array of object ids. 1092 1008 */ 1093 1009 __u64 objects_ptr; 1094 1010 }; 1095 1011 1096 1012 /** 1097 1013 * struct drm_mode_revoke_lease - Revoke lease 1098 - * @lessee_id: Unique ID of lessee. 1099 - * Revoke lease 1100 1014 */ 1101 1015 struct drm_mode_revoke_lease { 1102 - /** Unique ID of lessee 1103 - */ 1016 + /** @lessee_id: Unique ID of lessee */ 1104 1017 __u32 lessee_id; 1105 1018 }; 1106 1019
+4
include/uapi/linux/virtio_gpu.h
··· 115 115 116 116 enum virtio_gpu_shm_id { 117 117 VIRTIO_GPU_SHM_ID_UNDEFINED = 0, 118 + /* 119 + * VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB 120 + * VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB 121 + */ 118 122 VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1 119 123 }; 120 124
+1 -1
mm/mmap.c
··· 1897 1897 return addr; 1898 1898 1899 1899 unmap_and_free_vma: 1900 + fput(vma->vm_file); 1900 1901 vma->vm_file = NULL; 1901 - fput(file); 1902 1902 1903 1903 /* Undo any partial mapping done by a device driver. */ 1904 1904 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+12
mm/util.c
··· 311 311 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); 312 312 } 313 313 314 + /* 315 + * Change backing file, only valid to use during initial VMA setup. 316 + */ 317 + void vma_set_file(struct vm_area_struct *vma, struct file *file) 318 + { 319 + /* Changing an anonymous vma with this is illegal */ 320 + get_file(file); 321 + swap(vma->vm_file, file); 322 + fput(file); 323 + } 324 + EXPORT_SYMBOL(vma_set_file); 325 + 314 326 #ifndef STACK_RND_MASK 315 327 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */ 316 328 #endif