Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-next-2025-07-10' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for 6.17:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:

Driver Changes:
- amdgpu: debugfs improvements
- ast: Improve hardware generations implementation
- dma-buf heaps:
- Give the CMA heap a stable name
- panthor: fix UAF in debugfs
- rockchip: Convert inno_hdmi to a bridge
- sti: Convert to devm_drm_bridge_alloc()
- vkms: Use faux_device

- bridge:
- Improve CEC handling code, convertions to devm_drm_bridge_alloc()

Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250710-observant-elite-dingo-acfd6d@houat

+3761 -3169
+7 -4
Documentation/userspace-api/dma-buf-heaps.rst
··· 19 19 - The ``cma`` heap allocates physically contiguous, cacheable, 20 20 buffers. Only present if a CMA region is present. Such a region is 21 21 usually created either through the kernel commandline through the 22 - `cma` parameter, a memory region Device-Tree node with the 23 - `linux,cma-default` property set, or through the `CMA_SIZE_MBYTES` or 24 - `CMA_SIZE_PERCENTAGE` Kconfig options. Depending on the platform, it 25 - might be called ``reserved``, ``linux,cma``, or ``default-pool``. 22 + ``cma`` parameter, a memory region Device-Tree node with the 23 + ``linux,cma-default`` property set, or through the ``CMA_SIZE_MBYTES`` or 24 + ``CMA_SIZE_PERCENTAGE`` Kconfig options. The heap's name in devtmpfs is 25 + ``default_cma_region``. For backwards compatibility, when the 26 + ``DMABUF_HEAPS_CMA_LEGACY`` Kconfig option is set, a duplicate node is 27 + created following legacy naming conventions; the legacy name might be 28 + ``reserved``, ``linux,cma``, or ``default-pool``.
-2
drivers/Kconfig
··· 209 209 210 210 source "drivers/android/Kconfig" 211 211 212 - source "drivers/gpu/trace/Kconfig" 213 - 214 212 source "drivers/nvdimm/Kconfig" 215 213 216 214 source "drivers/dax/Kconfig"
-16
drivers/accel/drm_accel.c
··· 20 20 21 21 DEFINE_XARRAY_ALLOC(accel_minors_xa); 22 22 23 - static struct dentry *accel_debugfs_root; 24 - 25 23 static const struct device_type accel_sysfs_device_minor = { 26 24 .name = "accel_minor" 27 25 }; ··· 70 72 {"name", accel_name_info, 0} 71 73 }; 72 74 #define ACCEL_DEBUGFS_ENTRIES ARRAY_SIZE(accel_debugfs_list) 73 - 74 - /** 75 - * accel_debugfs_init() - Initialize debugfs for device 76 - * @dev: Pointer to the device instance. 77 - * 78 - * This function creates a root directory for the device in debugfs. 79 - */ 80 - void accel_debugfs_init(struct drm_device *dev) 81 - { 82 - drm_debugfs_dev_init(dev, accel_debugfs_root); 83 - } 84 75 85 76 /** 86 77 * accel_debugfs_register() - Register debugfs for device ··· 181 194 void accel_core_exit(void) 182 195 { 183 196 unregister_chrdev(ACCEL_MAJOR, "accel"); 184 - debugfs_remove(accel_debugfs_root); 185 197 accel_sysfs_destroy(); 186 198 WARN_ON(!xa_empty(&accel_minors_xa)); 187 199 } ··· 194 208 DRM_ERROR("Cannot create ACCEL class: %d\n", ret); 195 209 goto error; 196 210 } 197 - 198 - accel_debugfs_root = debugfs_create_dir("accel", NULL); 199 211 200 212 ret = register_chrdev(ACCEL_MAJOR, "accel", &accel_stub_fops); 201 213 if (ret < 0)
+10
drivers/dma-buf/heaps/Kconfig
··· 12 12 Choose this option to enable dma-buf CMA heap. This heap is backed 13 13 by the Contiguous Memory Allocator (CMA). If your system has these 14 14 regions, you should say Y here. 15 + 16 + config DMABUF_HEAPS_CMA_LEGACY 17 + bool "Legacy DMA-BUF CMA Heap" 18 + default y 19 + depends on DMABUF_HEAPS_CMA 20 + help 21 + Add a duplicate CMA-backed dma-buf heap with legacy naming derived 22 + from the CMA area's devicetree node, or "reserved" if the area is not 23 + defined in the devicetree. This uses the same underlying allocator as 24 + CONFIG_DMABUF_HEAPS_CMA.
+29 -7
drivers/dma-buf/heaps/cma_heap.c
··· 9 9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ 10 10 * Andrew F. Davis <afd@ti.com> 11 11 */ 12 + 13 + #define pr_fmt(fmt) "cma_heap: " fmt 14 + 12 15 #include <linux/cma.h> 13 16 #include <linux/dma-buf.h> 14 17 #include <linux/dma-heap.h> ··· 25 22 #include <linux/slab.h> 26 23 #include <linux/vmalloc.h> 27 24 25 + #define DEFAULT_CMA_NAME "default_cma_region" 28 26 29 27 struct cma_heap { 30 28 struct dma_heap *heap; ··· 370 366 .allocate = cma_heap_allocate, 371 367 }; 372 368 373 - static int __init __add_cma_heap(struct cma *cma, void *data) 369 + static int __init __add_cma_heap(struct cma *cma, const char *name) 374 370 { 375 - struct cma_heap *cma_heap; 376 371 struct dma_heap_export_info exp_info; 372 + struct cma_heap *cma_heap; 377 373 378 374 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); 379 375 if (!cma_heap) 380 376 return -ENOMEM; 381 377 cma_heap->cma = cma; 382 378 383 - exp_info.name = cma_get_name(cma); 379 + exp_info.name = name; 384 380 exp_info.ops = &cma_heap_ops; 385 381 exp_info.priv = cma_heap; 386 382 ··· 398 394 static int __init add_default_cma_heap(void) 399 395 { 400 396 struct cma *default_cma = dev_get_cma_area(NULL); 401 - int ret = 0; 397 + const char *legacy_cma_name; 398 + int ret; 402 399 403 - if (default_cma) 404 - ret = __add_cma_heap(default_cma, NULL); 400 + if (!default_cma) 401 + return 0; 405 402 406 - return ret; 403 + ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME); 404 + if (ret) 405 + return ret; 406 + 407 + if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) { 408 + legacy_cma_name = cma_get_name(default_cma); 409 + if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) { 410 + pr_warn("legacy name and default name are the same, skipping legacy heap\n"); 411 + return 0; 412 + } 413 + 414 + ret = __add_cma_heap(default_cma, legacy_cma_name); 415 + if (ret) 416 + pr_warn("failed to add legacy heap: %pe\n", 417 + ERR_PTR(ret)); 418 + } 419 + 420 + return 0; 407 421 } 408 422 module_init(add_default_cma_heap); 409 423 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+17 -26
drivers/dma-buf/heaps/system_heap.c
··· 33 33 34 34 struct dma_heap_attachment { 35 35 struct device *dev; 36 - struct sg_table *table; 36 + struct sg_table table; 37 37 struct list_head list; 38 38 bool mapped; 39 39 }; ··· 52 52 static const unsigned int orders[] = {8, 4, 0}; 53 53 #define NUM_ORDERS ARRAY_SIZE(orders) 54 54 55 - static struct sg_table *dup_sg_table(struct sg_table *table) 55 + static int dup_sg_table(struct sg_table *from, struct sg_table *to) 56 56 { 57 - struct sg_table *new_table; 58 - int ret, i; 59 57 struct scatterlist *sg, *new_sg; 58 + int ret, i; 60 59 61 - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); 62 - if (!new_table) 63 - return ERR_PTR(-ENOMEM); 60 + ret = sg_alloc_table(to, from->orig_nents, GFP_KERNEL); 61 + if (ret) 62 + return ret; 64 63 65 - ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL); 66 - if (ret) { 67 - kfree(new_table); 68 - return ERR_PTR(-ENOMEM); 69 - } 70 - 71 - new_sg = new_table->sgl; 72 - for_each_sgtable_sg(table, sg, i) { 64 + new_sg = to->sgl; 65 + for_each_sgtable_sg(from, sg, i) { 73 66 sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset); 74 67 new_sg = sg_next(new_sg); 75 68 } 76 69 77 - return new_table; 70 + return 0; 78 71 } 79 72 80 73 static int system_heap_attach(struct dma_buf *dmabuf, ··· 75 82 { 76 83 struct system_heap_buffer *buffer = dmabuf->priv; 77 84 struct dma_heap_attachment *a; 78 - struct sg_table *table; 85 + int ret; 79 86 80 87 a = kzalloc(sizeof(*a), GFP_KERNEL); 81 88 if (!a) 82 89 return -ENOMEM; 83 90 84 - table = dup_sg_table(&buffer->sg_table); 85 - if (IS_ERR(table)) { 91 + ret = dup_sg_table(&buffer->sg_table, &a->table); 92 + if (ret) { 86 93 kfree(a); 87 - return -ENOMEM; 94 + return ret; 88 95 } 89 96 90 - a->table = table; 91 97 a->dev = attachment->dev; 92 98 INIT_LIST_HEAD(&a->list); 93 99 a->mapped = false; ··· 110 118 list_del(&a->list); 111 119 mutex_unlock(&buffer->lock); 112 120 113 - sg_free_table(a->table); 114 - kfree(a->table); 121 + sg_free_table(&a->table); 115 122 kfree(a); 116 123 } 117 124 ··· 118 127 enum dma_data_direction direction) 119 128 { 120 129 struct dma_heap_attachment *a = attachment->priv; 121 - struct sg_table *table = a->table; 130 + struct sg_table *table = &a->table; 122 131 int ret; 123 132 124 133 ret = dma_map_sgtable(attachment->dev, table, direction, 0); ··· 153 162 list_for_each_entry(a, &buffer->attachments, list) { 154 163 if (!a->mapped) 155 164 continue; 156 - dma_sync_sgtable_for_cpu(a->dev, a->table, direction); 165 + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); 157 166 } 158 167 mutex_unlock(&buffer->lock); 159 168 ··· 174 183 list_for_each_entry(a, &buffer->attachments, list) { 175 184 if (!a->mapped) 176 185 continue; 177 - dma_sync_sgtable_for_device(a->dev, a->table, direction); 186 + dma_sync_sgtable_for_device(a->dev, &a->table, direction); 178 187 } 179 188 mutex_unlock(&buffer->lock); 180 189
+52
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 2131 2131 return 0; 2132 2132 } 2133 2133 2134 + static int amdgpu_pt_info_read(struct seq_file *m, void *unused) 2135 + { 2136 + struct drm_file *file; 2137 + struct amdgpu_fpriv *fpriv; 2138 + struct amdgpu_bo *root_bo; 2139 + int r; 2140 + 2141 + file = m->private; 2142 + if (!file) 2143 + return -EINVAL; 2144 + 2145 + fpriv = file->driver_priv; 2146 + if (!fpriv || !fpriv->vm.root.bo) 2147 + return -ENODEV; 2148 + 2149 + root_bo = amdgpu_bo_ref(fpriv->vm.root.bo); 2150 + r = amdgpu_bo_reserve(root_bo, true); 2151 + if (r) { 2152 + amdgpu_bo_unref(&root_bo); 2153 + return -EINVAL; 2154 + } 2155 + 2156 + seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo)); 2157 + 2158 + amdgpu_bo_unreserve(root_bo); 2159 + amdgpu_bo_unref(&root_bo); 2160 + 2161 + return 0; 2162 + } 2163 + 2164 + static int amdgpu_pt_info_open(struct inode *inode, struct file *file) 2165 + { 2166 + return single_open(file, amdgpu_pt_info_read, inode->i_private); 2167 + } 2168 + 2169 + static const struct file_operations amdgpu_pt_info_fops = { 2170 + .owner = THIS_MODULE, 2171 + .open = amdgpu_pt_info_open, 2172 + .read = seq_read, 2173 + .llseek = seq_lseek, 2174 + .release = single_release, 2175 + }; 2176 + 2177 + void amdgpu_debugfs_vm_init(struct drm_file *file) 2178 + { 2179 + debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file, 2180 + &amdgpu_pt_info_fops); 2181 + } 2182 + 2134 2183 #else 2135 2184 int amdgpu_debugfs_init(struct amdgpu_device *adev) 2136 2185 { ··· 2188 2139 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2189 2140 { 2190 2141 return 0; 2142 + } 2143 + void amdgpu_debugfs_vm_init(struct drm_file *file) 2144 + { 2191 2145 } 2192 2146 #endif
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
··· 33 33 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); 34 34 void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); 35 35 void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); 36 + void amdgpu_debugfs_vm_init(struct drm_file *file); 36 37
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 1395 1395 if (r) 1396 1396 goto error_pasid; 1397 1397 1398 + amdgpu_debugfs_vm_init(file_priv); 1399 + 1398 1400 r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); 1399 1401 if (r) 1400 1402 goto error_pasid;
+55
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 318 318 amdgpu_bo_unreserve(queue->db_obj.obj); 319 319 } 320 320 amdgpu_bo_unref(&queue->db_obj.obj); 321 + 322 + #if defined(CONFIG_DEBUG_FS) 323 + debugfs_remove_recursive(queue->debugfs_queue); 324 + #endif 321 325 r = amdgpu_userq_unmap_helper(uq_mgr, queue); 322 326 amdgpu_userq_cleanup(uq_mgr, queue, queue_id); 323 327 mutex_unlock(&uq_mgr->userq_mutex); ··· 347 343 return -EACCES; 348 344 } 349 345 346 + #if defined(CONFIG_DEBUG_FS) 347 + static int amdgpu_mqd_info_read(struct seq_file *m, void *unused) 348 + { 349 + struct amdgpu_usermode_queue *queue = m->private; 350 + struct amdgpu_bo *bo; 351 + int r; 352 + 353 + if (!queue || !queue->mqd.obj) 354 + return -EINVAL; 355 + 356 + bo = amdgpu_bo_ref(queue->mqd.obj); 357 + r = amdgpu_bo_reserve(bo, true); 358 + if (r) { 359 + amdgpu_bo_unref(&bo); 360 + return -EINVAL; 361 + } 362 + 363 + seq_printf(m, "queue_type %d\n", queue->queue_type); 364 + seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); 365 + 366 + amdgpu_bo_unreserve(bo); 367 + amdgpu_bo_unref(&bo); 368 + 369 + return 0; 370 + } 371 + 372 + static int amdgpu_mqd_info_open(struct inode *inode, struct file *file) 373 + { 374 + return single_open(file, amdgpu_mqd_info_read, inode->i_private); 375 + } 376 + 377 + static const struct file_operations amdgpu_mqd_info_fops = { 378 + .owner = THIS_MODULE, 379 + .open = amdgpu_mqd_info_open, 380 + .read = seq_read, 381 + .llseek = seq_lseek, 382 + .release = single_release, 383 + }; 384 + #endif 385 + 350 386 static int 351 387 amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) 352 388 { ··· 396 352 const struct amdgpu_userq_funcs *uq_funcs; 397 353 struct amdgpu_usermode_queue *queue; 398 354 struct amdgpu_db_info db_info; 355 + char *queue_name; 399 356 bool skip_map_queue; 400 357 uint64_t index; 401 358 int qid, r = 0; ··· 520 475 } 521 476 } 522 477 478 + queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); 479 + if (!queue_name) 480 + return -ENOMEM; 481 + 482 + #if defined(CONFIG_DEBUG_FS) 483 + /* Queue dentry per client to hold MQD information */ 484 + queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); 485 + debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); 486 + #endif 487 + kfree(queue_name); 523 488 524 489 args->out.queue_id = qid; 525 490
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
··· 65 65 struct dma_fence *last_fence; 66 66 u32 xcp_id; 67 67 int priority; 68 + struct dentry *debugfs_queue; 68 69 }; 69 70 70 71 struct amdgpu_userq_funcs {
+5
drivers/gpu/drm/ast/Makefile
··· 4 4 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 5 5 6 6 ast-y := \ 7 + ast_2000.o \ 8 + ast_2100.o \ 9 + ast_2300.o \ 10 + ast_2500.o \ 11 + ast_2600.o \ 7 12 ast_cursor.o \ 8 13 ast_ddc.o \ 9 14 ast_dp501.o \
+149
drivers/gpu/drm/ast/ast_2000.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + */ 25 + /* 26 + * Authors: Dave Airlie <airlied@redhat.com> 27 + */ 28 + 29 + #include <linux/delay.h> 30 + 31 + #include "ast_drv.h" 32 + #include "ast_post.h" 33 + 34 + /* 35 + * POST 36 + */ 37 + 38 + void ast_2000_set_def_ext_reg(struct ast_device *ast) 39 + { 40 + static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; 41 + u8 i, index, reg; 42 + const u8 *ext_reg_info; 43 + 44 + /* reset scratch */ 45 + for (i = 0x81; i <= 0x9f; i++) 46 + ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00); 47 + 48 + ext_reg_info = extreginfo; 49 + index = 0xa0; 50 + while (*ext_reg_info != 0xff) { 51 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info); 52 + index++; 53 + ext_reg_info++; 54 + } 55 + 56 + /* disable standard IO/MEM decode if secondary */ 57 + /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */ 58 + 59 + /* Set Ext. Default */ 60 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01); 61 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00); 62 + 63 + /* Enable RAMDAC for A1 */ 64 + reg = 0x04; 65 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); 66 + } 67 + 68 + static const struct ast_dramstruct ast2000_dram_table_data[] = { 69 + { 0x0108, 0x00000000 }, 70 + { 0x0120, 0x00004a21 }, 71 + AST_DRAMSTRUCT_UDELAY(67u), 72 + { 0x0000, 0xFFFFFFFF }, 73 + AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000089), 74 + { 0x0008, 0x22331353 }, 75 + { 0x000C, 0x0d07000b }, 76 + { 0x0010, 0x11113333 }, 77 + { 0x0020, 0x00110350 }, 78 + { 0x0028, 0x1e0828f0 }, 79 + { 0x0024, 0x00000001 }, 80 + { 0x001C, 0x00000000 }, 81 + { 0x0014, 0x00000003 }, 82 + AST_DRAMSTRUCT_UDELAY(67u), 83 + { 0x0018, 0x00000131 }, 84 + { 0x0014, 0x00000001 }, 85 + AST_DRAMSTRUCT_UDELAY(67u), 86 + { 0x0018, 0x00000031 }, 87 + { 0x0014, 0x00000001 }, 88 + AST_DRAMSTRUCT_UDELAY(67u), 89 + { 0x0028, 0x1e0828f1 }, 90 + { 0x0024, 0x00000003 }, 91 + { 0x002C, 0x1f0f28fb }, 92 + { 0x0030, 0xFFFFFE01 }, 93 + AST_DRAMSTRUCT_INVALID, 94 + }; 95 + 96 + static void ast_post_chip_2000(struct ast_device *ast) 97 + { 98 + u8 j; 99 + u32 temp, i; 100 + const struct ast_dramstruct *dram_reg_info; 101 + 102 + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 103 + 104 + if ((j & 0x80) == 0) { /* VGA only */ 105 + dram_reg_info = ast2000_dram_table_data; 106 + ast_write32(ast, 0xf004, 0x1e6e0000); 107 + ast_write32(ast, 0xf000, 0x1); 108 + ast_write32(ast, 0x10100, 0xa8); 109 + 110 + do { 111 + ; 112 + } while (ast_read32(ast, 0x10100) != 0xa8); 113 + 114 + while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) { 115 + if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) { 116 + for (i = 0; i < 15; i++) 117 + udelay(dram_reg_info->data); 118 + } else { 119 + ast_write32(ast, 0x10000 + dram_reg_info->index, 120 + dram_reg_info->data); 121 + } 122 + dram_reg_info++; 123 + } 124 + 125 + temp = ast_read32(ast, 0x10140); 126 + ast_write32(ast, 0x10140, temp | 0x40); 127 + } 128 + 129 + /* wait ready */ 130 + do { 131 + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 132 + } while ((j & 0x40) == 0); 133 + } 134 + 135 + int ast_2000_post(struct ast_device *ast) 136 + { 137 + ast_2000_set_def_ext_reg(ast); 138 + 139 + if (ast->config_mode == ast_use_p2a) { 140 + ast_post_chip_2000(ast); 141 + } else { 142 + if (ast->tx_chip == AST_TX_SIL164) { 143 + /* Enable DVO */ 144 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 145 + } 146 + } 147 + 148 + return 0; 149 + }
+348
drivers/gpu/drm/ast/ast_2100.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + */ 25 + /* 26 + * Authors: Dave Airlie <airlied@redhat.com> 27 + */ 28 + 29 + #include <linux/delay.h> 30 + 31 + #include "ast_drv.h" 32 + #include "ast_post.h" 33 + 34 + /* 35 + * POST 36 + */ 37 + 38 + static const struct ast_dramstruct ast1100_dram_table_data[] = { 39 + { 0x2000, 0x1688a8a8 }, 40 + { 0x2020, 0x000041f0 }, 41 + AST_DRAMSTRUCT_UDELAY(67u), 42 + { 0x0000, 0xfc600309 }, 43 + { 0x006C, 0x00909090 }, 44 + { 0x0064, 0x00050000 }, 45 + AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000585), 46 + { 0x0008, 0x0011030f }, 47 + { 0x0010, 0x22201724 }, 48 + { 0x0018, 0x1e29011a }, 49 + { 0x0020, 0x00c82222 }, 50 + { 0x0014, 0x01001523 }, 51 + { 0x001C, 0x1024010d }, 52 + { 0x0024, 0x00cb2522 }, 53 + { 0x0038, 0xffffff82 }, 54 + { 0x003C, 0x00000000 }, 55 + { 0x0040, 0x00000000 }, 56 + { 0x0044, 0x00000000 }, 57 + { 0x0048, 0x00000000 }, 58 + { 0x004C, 0x00000000 }, 59 + { 0x0050, 0x00000000 }, 60 + { 0x0054, 0x00000000 }, 61 + { 0x0058, 0x00000000 }, 62 + { 0x005C, 0x00000000 }, 63 + { 0x0060, 0x032aa02a }, 64 + { 0x0064, 0x002d3000 }, 65 + { 0x0068, 0x00000000 }, 66 + { 0x0070, 0x00000000 }, 67 + { 0x0074, 0x00000000 }, 68 + { 0x0078, 0x00000000 }, 69 + { 0x007C, 0x00000000 }, 70 + { 0x0034, 0x00000001 }, 71 + AST_DRAMSTRUCT_UDELAY(67u), 72 + { 0x002C, 0x00000732 }, 73 + { 0x0030, 0x00000040 }, 74 + { 0x0028, 0x00000005 }, 75 + { 0x0028, 0x00000007 }, 76 + { 0x0028, 0x00000003 }, 77 + { 0x0028, 0x00000001 }, 78 + { 0x000C, 0x00005a08 }, 79 + { 0x002C, 0x00000632 }, 80 + { 0x0028, 0x00000001 }, 81 + { 0x0030, 0x000003c0 }, 82 + { 0x0028, 0x00000003 }, 83 + { 0x0030, 0x00000040 }, 84 + { 0x0028, 0x00000003 }, 85 + { 0x000C, 0x00005a21 }, 86 + { 0x0034, 0x00007c03 }, 87 + { 0x0120, 0x00004c41 }, 88 + AST_DRAMSTRUCT_INVALID, 89 + }; 90 + 91 + static const struct ast_dramstruct ast2100_dram_table_data[] = { 92 + { 0x2000, 0x1688a8a8 }, 93 + { 0x2020, 0x00004120 }, 94 + AST_DRAMSTRUCT_UDELAY(67u), 95 + { 0x0000, 0xfc600309 }, 96 + { 0x006C, 0x00909090 }, 97 + { 0x0064, 0x00070000 }, 98 + AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000489), 99 + { 0x0008, 0x0011030f }, 100 + { 0x0010, 0x32302926 }, 101 + { 0x0018, 0x274c0122 }, 102 + { 0x0020, 0x00ce2222 }, 103 + { 0x0014, 0x01001523 }, 104 + { 0x001C, 0x1024010d }, 105 + { 0x0024, 0x00cb2522 }, 106 + { 0x0038, 0xffffff82 }, 107 + { 0x003C, 0x00000000 }, 108 + { 0x0040, 0x00000000 }, 109 + { 0x0044, 0x00000000 }, 110 + { 0x0048, 0x00000000 }, 111 + { 0x004C, 0x00000000 }, 112 + { 0x0050, 0x00000000 }, 113 + { 0x0054, 0x00000000 }, 114 + { 0x0058, 0x00000000 }, 115 + { 0x005C, 0x00000000 }, 116 + { 0x0060, 0x0f2aa02a }, 117 + { 0x0064, 0x003f3005 }, 118 + { 0x0068, 0x02020202 }, 119 + { 0x0070, 0x00000000 }, 120 + { 0x0074, 0x00000000 }, 121 + { 0x0078, 0x00000000 }, 122 + { 0x007C, 0x00000000 }, 123 + { 0x0034, 0x00000001 }, 124 + AST_DRAMSTRUCT_UDELAY(67u), 125 + { 0x002C, 0x00000942 }, 126 + { 0x0030, 0x00000040 }, 127 + { 0x0028, 0x00000005 }, 128 + { 0x0028, 0x00000007 }, 129 + { 0x0028, 0x00000003 }, 130 + { 0x0028, 0x00000001 }, 131 + { 0x000C, 0x00005a08 }, 132 + { 0x002C, 0x00000842 }, 133 + { 0x0028, 0x00000001 }, 134 + { 0x0030, 0x000003c0 }, 135 + { 0x0028, 0x00000003 }, 136 + { 0x0030, 0x00000040 }, 137 + { 0x0028, 0x00000003 }, 138 + { 0x000C, 0x00005a21 }, 139 + { 0x0034, 0x00007c03 }, 140 + { 0x0120, 0x00005061 }, 141 + AST_DRAMSTRUCT_INVALID, 142 + }; 143 + 144 + /* 145 + * AST2100/2150 DLL CBR Setting 146 + */ 147 + #define CBR_SIZE_AST2150 ((16 << 10) - 1) 148 + #define CBR_PASSNUM_AST2150 5 149 + #define CBR_THRESHOLD_AST2150 10 150 + #define CBR_THRESHOLD2_AST2150 10 151 + #define TIMEOUT_AST2150 5000000 152 + 153 + #define CBR_PATNUM_AST2150 8 154 + 155 + static const u32 pattern_AST2150[14] = { 156 + 0xFF00FF00, 157 + 0xCC33CC33, 158 + 0xAA55AA55, 159 + 0xFFFE0001, 160 + 0x683501FE, 161 + 0x0F1929B0, 162 + 0x2D0B4346, 163 + 0x60767F02, 164 + 0x6FBE36A6, 165 + 0x3A253035, 166 + 0x3019686D, 167 + 0x41C6167E, 168 + 0x620152BF, 169 + 0x20F050E0 170 + }; 171 + 172 + static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen) 173 + { 174 + u32 data, timeout; 175 + 176 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 177 + ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); 178 + timeout = 0; 179 + do { 180 + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; 181 + if (++timeout > TIMEOUT_AST2150) { 182 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 183 + return 0xffffffff; 184 + } 185 + } while (!data); 186 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 187 + ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); 188 + timeout = 0; 189 + do { 190 + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; 191 + if (++timeout > TIMEOUT_AST2150) { 192 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 193 + return 0xffffffff; 194 + } 195 + } while (!data); 196 + data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; 197 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 198 + return data; 199 + } 200 + 201 + static int cbrtest_ast2150(struct ast_device *ast) 202 + { 203 + int i; 204 + 205 + for (i = 0; i < 8; i++) 206 + if (mmctestburst2_ast2150(ast, i)) 207 + return 0; 208 + return 1; 209 + } 210 + 211 + static int cbrscan_ast2150(struct ast_device *ast, int busw) 212 + { 213 + u32 patcnt, loop; 214 + 215 + for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { 216 + ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); 217 + for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { 218 + if (cbrtest_ast2150(ast)) 219 + break; 220 + } 221 + if (loop == CBR_PASSNUM_AST2150) 222 + return 0; 223 + } 224 + return 1; 225 + } 226 + 227 + static void cbrdlli_ast2150(struct ast_device *ast, int busw) 228 + { 229 + u32 dll_min[4], dll_max[4], dlli, data, passcnt; 230 + 231 + cbr_start: 232 + dll_min[0] = 0xff; 233 + dll_min[1] = 0xff; 234 + dll_min[2] = 0xff; 235 + dll_min[3] = 0xff; 236 + dll_max[0] = 0x00; 237 + dll_max[1] = 0x00; 238 + dll_max[2] = 0x00; 239 + dll_max[3] = 0x00; 240 + passcnt = 0; 241 + 242 + for (dlli = 0; dlli < 100; dlli++) { 243 + ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); 244 + data = cbrscan_ast2150(ast, busw); 245 + if (data != 0) { 246 + if (data & 0x1) { 247 + if (dll_min[0] > dlli) 248 + dll_min[0] = dlli; 249 + if (dll_max[0] < dlli) 250 + dll_max[0] = dlli; 251 + } 252 + passcnt++; 253 + } else if (passcnt >= CBR_THRESHOLD_AST2150) { 254 + goto cbr_start; 255 + } 256 + } 257 + if (dll_max[0] == 0 || (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150) 258 + goto cbr_start; 259 + 260 + dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); 261 + ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); 262 + } 263 + 264 + static void ast_post_chip_2100(struct ast_device *ast) 265 + { 266 + u8 j; 267 + u32 data, temp, i; 268 + const struct ast_dramstruct *dram_reg_info; 269 + 270 + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 271 + 272 + if ((j & 0x80) == 0) { /* VGA only */ 273 + if (ast->chip == AST2100 || ast->chip == AST2200) 274 + dram_reg_info = ast2100_dram_table_data; 275 + else 276 + dram_reg_info = ast1100_dram_table_data; 277 + 278 + ast_write32(ast, 0xf004, 0x1e6e0000); 279 + ast_write32(ast, 0xf000, 0x1); 280 + ast_write32(ast, 0x12000, 0x1688A8A8); 281 + do { 282 + ; 283 + } while (ast_read32(ast, 0x12000) != 0x01); 284 + 285 + ast_write32(ast, 0x10000, 0xfc600309); 286 + do { 287 + ; 288 + } while (ast_read32(ast, 0x10000) != 0x01); 289 + 290 + while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) { 291 + if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) { 292 + for (i = 0; i < 15; i++) 293 + udelay(dram_reg_info->data); 294 + } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) { 295 + data = dram_reg_info->data; 296 + if (ast->dram_type == AST_DRAM_1Gx16) 297 + data = 0x00000d89; 298 + else if (ast->dram_type == AST_DRAM_1Gx32) 299 + data = 0x00000c8d; 300 + 301 + temp = ast_read32(ast, 0x12070); 302 + temp &= 0xc; 303 + temp <<= 2; 304 + ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp); 305 + } else { 306 + ast_write32(ast, 0x10000 + dram_reg_info->index, 307 + dram_reg_info->data); 308 + } 309 + dram_reg_info++; 310 + } 311 + 312 + /* AST 2100/2150 DRAM calibration */ 313 + data = ast_read32(ast, 0x10120); 314 + if (data == 0x5061) { /* 266Mhz */ 315 + data = ast_read32(ast, 0x10004); 316 + if (data & 0x40) 317 + cbrdlli_ast2150(ast, 16); /* 16 bits */ 318 + else 319 + cbrdlli_ast2150(ast, 32); /* 32 bits */ 320 + } 321 + 322 + temp = ast_read32(ast, 0x1200c); 323 + ast_write32(ast, 0x1200c, temp & 0xfffffffd); 324 + temp = ast_read32(ast, 0x12040); 325 + ast_write32(ast, 0x12040, temp | 0x40); 326 + } 327 + 328 + /* wait ready */ 329 + do { 330 + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 331 + } while ((j & 0x40) == 0); 332 + } 333 + 334 + int ast_2100_post(struct ast_device *ast) 335 + { 336 + ast_2000_set_def_ext_reg(ast); 337 + 338 + if (ast->config_mode == ast_use_p2a) { 339 + ast_post_chip_2100(ast); 340 + } else { 341 + if (ast->tx_chip == AST_TX_SIL164) { 342 + /* Enable DVO */ 343 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 344 + } 345 + } 346 + 347 + return 0; 348 + }
+1328
drivers/gpu/drm/ast/ast_2300.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + */ 25 + /* 26 + * Authors: Dave Airlie <airlied@redhat.com> 27 + */ 28 + 29 + #include <linux/delay.h> 30 + 31 + #include "ast_drv.h" 32 + #include "ast_post.h" 33 + 34 + /* 35 + * POST 36 + */ 37 + 38 + void ast_2300_set_def_ext_reg(struct ast_device *ast) 39 + { 40 + static const u8 extreginfo[] = { 0x0f, 0x04, 0x1f, 0xff }; 41 + u8 i, index, reg; 42 + const u8 *ext_reg_info; 43 + 44 + /* reset scratch */ 45 + for (i = 0x81; i <= 0x9f; i++) 46 + ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00); 47 + 48 + ext_reg_info = extreginfo; 49 + index = 0xa0; 50 + while (*ext_reg_info != 0xff) { 51 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info); 52 + index++; 53 + ext_reg_info++; 54 + } 55 + 56 + /* disable standard IO/MEM decode if secondary */ 57 + /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */ 58 + 59 + /* Set Ext. Default */ 60 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01); 61 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00); 62 + 63 + /* Enable RAMDAC for A1 */ 64 + reg = 0x04; 65 + reg |= 0x20; 66 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); 67 + } 68 + 69 + /* AST 2300 DRAM settings */ 70 + #define AST_DDR3 0 71 + #define AST_DDR2 1 72 + 73 + struct ast2300_dram_param { 74 + u32 dram_type; 75 + u32 dram_chipid; 76 + u32 dram_freq; 77 + u32 vram_size; 78 + u32 odt; 79 + u32 wodt; 80 + u32 rodt; 81 + u32 dram_config; 82 + u32 reg_PERIOD; 83 + u32 reg_MADJ; 84 + u32 reg_SADJ; 85 + u32 reg_MRS; 86 + u32 reg_EMRS; 87 + u32 reg_AC1; 88 + u32 reg_AC2; 89 + u32 reg_DQSIC; 90 + u32 reg_DRV; 91 + u32 reg_IOZ; 92 + u32 reg_DQIDLY; 93 + u32 reg_FREQ; 94 + u32 madj_max; 95 + u32 dll2_finetune_step; 96 + }; 97 + 98 + /* 99 + * DQSI DLL CBR Setting 100 + */ 101 + #define CBR_SIZE0 ((1 << 10) - 1) 102 + #define CBR_SIZE1 ((4 << 10) - 1) 103 + #define CBR_SIZE2 ((64 << 10) - 1) 104 + #define CBR_PASSNUM 5 105 + #define CBR_PASSNUM2 5 106 + #define CBR_THRESHOLD 10 107 + #define CBR_THRESHOLD2 10 108 + #define TIMEOUT 5000000 109 + #define CBR_PATNUM 8 110 + 111 + static const u32 pattern[8] = { 112 + 0xFF00FF00, 113 + 0xCC33CC33, 114 + 0xAA55AA55, 115 + 0x88778877, 116 + 0x92CC4D6E, 117 + 0x543D3CDE, 118 + 0xF1E843C7, 119 + 0x7C61D253 120 + }; 121 + 122 + static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl) 123 + { 124 + u32 data, timeout; 125 + 126 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 127 + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); 128 + timeout = 0; 129 + do { 130 + data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; 131 + if (++timeout > TIMEOUT) { 132 + ast_moutdwm(ast, 0x1e6e0070, 0x0); 133 + return 0xffffffff; 134 + } 135 + } while (!data); 136 + data = ast_mindwm(ast, 0x1e6e0078); 137 + data = (data | (data >> 16)) & 0xffff; 138 + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 139 + return data; 140 + } 141 + 142 + static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen) 143 + { 144 + return mmc_test2(ast, datagen, 0x41); 145 + } 146 + 147 + static bool mmc_test_single(struct ast_device *ast, u32 datagen) 148 + { 149 + return mmc_test(ast, datagen, 0xc5); 150 + } 151 + 152 + static u32 mmc_test_single2(struct ast_device *ast, u32 datagen) 153 + { 154 + return mmc_test2(ast, datagen, 0x05); 155 + } 156 + 157 + static int cbr_test(struct ast_device *ast) 158 + { 159 + u32 data; 160 + int i; 161 + 162 + data = mmc_test_single2(ast, 0); 163 + if ((data & 0xff) && (data & 0xff00)) 164 + return 0; 165 + for (i = 0; i < 8; i++) { 166 + data = mmc_test_burst2(ast, i); 167 + if ((data & 0xff) && (data & 0xff00)) 168 + return 0; 169 + } 170 + if (!data) 171 + return 3; 172 + else if (data & 0xff) 173 + return 2; 174 + return 1; 175 + } 176 + 177 + static int cbr_scan(struct ast_device *ast) 178 + { 179 + u32 data, data2, patcnt, loop; 180 + 181 + data2 = 3; 182 + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 183 + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 184 + for (loop = 0; loop < CBR_PASSNUM2; loop++) { 185 + data = cbr_test(ast); 186 + if (data != 0) { 187 + data2 &= data; 188 + if (!data2) 189 + return 0; 190 + break; 191 + } 192 + } 193 + if (loop == CBR_PASSNUM2) 194 + return 0; 195 + } 196 + return data2; 197 + } 198 + 199 + static u32 cbr_test2(struct ast_device *ast) 200 + { 201 + u32 data; 202 + 203 + data = mmc_test_burst2(ast, 0); 204 + if (data == 0xffff) 205 + return 0; 206 + data |= mmc_test_single2(ast, 0); 207 + if (data == 0xffff) 208 + return 0; 209 + 210 + return ~data & 0xffff; 211 + } 212 + 213 + static u32 cbr_scan2(struct ast_device *ast) 214 + { 215 + u32 data, data2, patcnt, loop; 216 + 217 + data2 = 0xffff; 218 + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 219 + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 220 + for (loop = 0; loop < CBR_PASSNUM2; loop++) { 221 + data = cbr_test2(ast); 222 + if (data != 0) { 223 + data2 &= data; 224 + if (!data2) 225 + return 0; 226 + break; 227 + } 228 + } 229 + if (loop == CBR_PASSNUM2) 230 + return 0; 231 + } 232 + return data2; 233 + } 234 + 235 + static bool cbr_test3(struct ast_device *ast) 236 + { 237 + if (!mmc_test_burst(ast, 0)) 238 + return false; 239 + if (!mmc_test_single(ast, 0)) 240 + return false; 241 + return true; 242 + } 243 + 244 + static bool cbr_scan3(struct ast_device *ast) 245 + { 246 + u32 patcnt, loop; 247 + 248 + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 249 + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 250 + for (loop = 0; loop < 2; loop++) { 251 + if (cbr_test3(ast)) 252 + break; 253 + } 254 + if (loop == 2) 255 + return false; 256 + } 257 + return true; 258 + } 259 + 260 + static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param) 261 + { 262 + u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0; 263 + bool status = false; 264 + FINETUNE_START: 265 + for (cnt = 0; cnt < 16; cnt++) { 266 + dllmin[cnt] = 0xff; 267 + dllmax[cnt] = 0x0; 268 + } 269 + passcnt = 0; 270 + for (dlli = 0; dlli < 76; dlli++) { 271 + ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); 272 + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); 273 + data = cbr_scan2(ast); 274 + if (data != 0) { 275 + mask = 0x00010001; 276 + for (cnt = 0; cnt < 16; cnt++) { 277 + if (data & mask) { 278 + if (dllmin[cnt] > dlli) 279 + dllmin[cnt] = dlli; 280 + if (dllmax[cnt] < dlli) 281 + dllmax[cnt] = dlli; 282 + } 283 + mask <<= 1; 284 + } 285 + passcnt++; 286 + } else if (passcnt >= CBR_THRESHOLD2) { 287 + break; 288 + } 289 + } 290 + gold_sadj[0] = 0x0; 291 + passcnt = 0; 292 + for (cnt = 0; cnt < 16; cnt++) { 293 + if ((dllmax[cnt] > dllmin[cnt]) && 294 + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 295 + gold_sadj[0] += dllmin[cnt]; 296 + passcnt++; 297 + } 298 + } 299 + if (retry++ > 10) 300 + goto FINETUNE_DONE; 301 + if (passcnt != 16) 302 + goto FINETUNE_START; 303 + status = true; 304 + FINETUNE_DONE: 305 + gold_sadj[0] = gold_sadj[0] >> 4; 306 + gold_sadj[1] = gold_sadj[0]; 307 + 308 + data = 0; 309 + for (cnt = 0; cnt < 8; cnt++) { 310 + data >>= 3; 311 + if ((dllmax[cnt] > dllmin[cnt]) && 312 + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 313 + dlli = dllmin[cnt]; 314 + if (gold_sadj[0] >= dlli) { 315 + dlli = ((gold_sadj[0] - dlli) * 19) >> 5; 316 + if (dlli > 3) 317 + dlli = 3; 318 + } else { 319 + dlli = ((dlli - gold_sadj[0]) * 19) >> 5; 320 + if (dlli > 4) 321 + dlli = 4; 322 + dlli = (8 - dlli) & 0x7; 323 + } 324 + data |= dlli << 21; 325 + } 326 + } 327 + ast_moutdwm(ast, 0x1E6E0080, data); 328 + 329 + data = 0; 330 + for (cnt = 8; cnt < 16; cnt++) { 331 + data >>= 3; 332 + if ((dllmax[cnt] > dllmin[cnt]) && 333 + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 334 + dlli = dllmin[cnt]; 335 + if (gold_sadj[1] >= dlli) { 336 + dlli = ((gold_sadj[1] - dlli) * 19) >> 5; 337 + if (dlli > 3) 338 + dlli = 3; 339 + else 340 + dlli = (dlli - 1) & 0x7; 341 + } else { 342 + dlli = ((dlli - gold_sadj[1]) * 19) >> 5; 343 + dlli += 1; 344 + if (dlli > 4) 345 + dlli = 4; 346 + dlli = (8 - dlli) & 0x7; 347 + } 348 + data |= dlli << 21; 349 + } 350 + } 351 + ast_moutdwm(ast, 0x1E6E0084, data); 352 + return status; 353 + } /* finetuneDQI_L */ 354 + 355 + static void finetuneDQSI(struct ast_device *ast) 356 + { 357 + u32 dlli, dqsip, dqidly; 358 + u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; 359 + u32 g_dqidly, g_dqsip, g_margin, g_side; 360 + u16 pass[32][2][2]; 361 + char tag[2][76]; 362 + 363 + /* Disable DQI CBR */ 364 + reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); 365 + reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); 366 + reg_mcr18 &= 0x0000ffff; 367 + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); 368 + 369 + for (dlli = 0; dlli < 76; dlli++) { 370 + tag[0][dlli] = 0x0; 371 + tag[1][dlli] = 0x0; 372 + } 373 + for (dqidly = 0; dqidly < 32; dqidly++) { 374 + pass[dqidly][0][0] = 0xff; 375 + pass[dqidly][0][1] = 0x0; 376 + pass[dqidly][1][0] = 0xff; 377 + pass[dqidly][1][1] = 0x0; 378 + } 379 + for (dqidly = 0; dqidly < 32; dqidly++) { 380 + passcnt[0] = 0; 381 + passcnt[1] = 0; 382 + for (dqsip = 0; dqsip < 2; dqsip++) { 383 + ast_moutdwm(ast, 0x1E6E000C, 0); 384 + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23)); 385 + ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); 386 + for (dlli = 0; dlli < 76; dlli++) { 387 + ast_moutdwm(ast, 0x1E6E0068, 388 + 0x00001300 | (dlli << 16) | (dlli << 24)); 389 + ast_moutdwm(ast, 0x1E6E0070, 0); 390 + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); 391 + if (cbr_scan3(ast)) { 392 + if (dlli == 0) 393 + break; 394 + passcnt[dqsip]++; 395 + tag[dqsip][dlli] = 'P'; 396 + if (dlli < pass[dqidly][dqsip][0]) 397 + pass[dqidly][dqsip][0] = (u16)dlli; 398 + if (dlli > pass[dqidly][dqsip][1]) 399 + pass[dqidly][dqsip][1] = (u16)dlli; 400 + } else if (passcnt[dqsip] >= 5) { 401 + break; 402 + } else { 403 + pass[dqidly][dqsip][0] = 0xff; 404 + pass[dqidly][dqsip][1] = 0x0; 405 + } 406 + } 407 + } 408 + if (passcnt[0] == 0 && passcnt[1] == 0) 409 + dqidly++; 410 + } 411 + /* Search margin */ 412 + g_dqidly = 0; 413 + g_dqsip = 0; 414 + g_margin = 0; 415 + g_side = 0; 416 + 417 + for (dqidly = 0; dqidly < 32; dqidly++) { 418 + for (dqsip = 0; dqsip < 2; dqsip++) { 419 + if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) 420 + continue; 421 + diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; 422 + if ((diff + 2) < g_margin) 423 + continue; 424 + passcnt[0] = 0; 425 + passcnt[1] = 0; 426 + for (dlli = pass[dqidly][dqsip][0]; 427 + dlli > 0 && tag[dqsip][dlli] != 0; 428 + dlli--, passcnt[0]++) { 429 + } 430 + for (dlli = pass[dqidly][dqsip][1]; 431 + dlli < 76 && tag[dqsip][dlli] != 0; 432 + dlli++, passcnt[1]++) { 433 + } 434 + if (passcnt[0] > passcnt[1]) 435 + passcnt[0] = passcnt[1]; 436 + passcnt[1] = 0; 437 + if (passcnt[0] > g_side) 438 + passcnt[1] = passcnt[0] - g_side; 439 + if (diff > (g_margin + 1) && (passcnt[1] > 0 || passcnt[0] > 8)) { 440 + g_margin = diff; 441 + g_dqidly = dqidly; 442 + g_dqsip = dqsip; 443 + g_side = passcnt[0]; 444 + } else if (passcnt[1] > 1 && g_side < 8) { 445 + if (diff > g_margin) 446 + g_margin = diff; 447 + g_dqidly = dqidly; 448 + g_dqsip = dqsip; 449 + g_side = passcnt[0]; 450 + } 451 + } 452 + } 453 + reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); 454 + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); 455 + } 456 + 457 + static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param) 458 + { 459 + u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; 460 + bool status = false; 461 + 462 + finetuneDQSI(ast); 463 + if (finetuneDQI_L(ast, param) == false) 464 + return status; 465 + 466 + CBR_START2: 467 + dllmin[0] = 0xff; 468 + dllmin[1] = 0xff; 469 + dllmax[0] = 0x0; 470 + dllmax[1] = 0x0; 471 + passcnt = 0; 472 + for (dlli = 0; dlli < 76; dlli++) { 473 + ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); 474 + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); 475 + data = cbr_scan(ast); 476 + if (data != 0) { 477 + if (data & 0x1) { 478 + if (dllmin[0] > dlli) 479 + dllmin[0] = dlli; 480 + if (dllmax[0] < dlli) 481 + dllmax[0] = dlli; 482 + } 483 + if (data & 0x2) { 484 + if (dllmin[1] > dlli) 485 + dllmin[1] = dlli; 486 + if (dllmax[1] < dlli) 487 + dllmax[1] = dlli; 488 + } 489 + passcnt++; 490 + } else if (passcnt >= CBR_THRESHOLD) { 491 + break; 492 + } 493 + } 494 + if (retry++ > 10) 495 + goto CBR_DONE2; 496 + if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD) 497 + goto CBR_START2; 498 + if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD) 499 + goto CBR_START2; 500 + status = true; 501 + CBR_DONE2: 502 + dlli = (dllmin[1] + dllmax[1]) >> 1; 503 + dlli <<= 8; 504 + dlli += (dllmin[0] + dllmax[0]) >> 1; 505 + ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16)); 506 + return status; 507 + } /* CBRDLL2 */ 508 + 509 + static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param) 510 + { 511 + u32 trap, trap_AC2, trap_MRS; 512 + 513 + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 514 + 515 + /* Ger trap info */ 516 + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; 517 + trap_AC2 = 0x00020000 + (trap << 16); 518 + trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); 519 + trap_MRS = 0x00000010 + (trap << 4); 520 + trap_MRS |= ((trap & 0x2) << 18); 521 + 522 + param->reg_MADJ = 0x00034C4C; 523 + param->reg_SADJ = 0x00001800; 524 + param->reg_DRV = 0x000000F0; 525 + param->reg_PERIOD = param->dram_freq; 526 + param->rodt = 0; 527 + 528 + switch (param->dram_freq) { 529 + case 336: 530 + ast_moutdwm(ast, 0x1E6E2020, 0x0190); 531 + param->wodt = 0; 532 + param->reg_AC1 = 0x22202725; 533 + param->reg_AC2 = 0xAA007613 | trap_AC2; 534 + param->reg_DQSIC = 0x000000BA; 535 + param->reg_MRS = 0x04001400 | trap_MRS; 536 + param->reg_EMRS = 0x00000000; 537 + param->reg_IOZ = 0x00000023; 538 + param->reg_DQIDLY = 0x00000074; 539 + param->reg_FREQ = 0x00004DC0; 540 + param->madj_max = 96; 541 + param->dll2_finetune_step = 3; 542 + switch (param->dram_chipid) { 543 + default: 544 + case AST_DRAM_512Mx16: 545 + case AST_DRAM_1Gx16: 546 + param->reg_AC2 = 0xAA007613 | trap_AC2; 547 + break; 548 + case AST_DRAM_2Gx16: 549 + param->reg_AC2 = 0xAA00761C | trap_AC2; 550 + break; 551 + case AST_DRAM_4Gx16: 552 + param->reg_AC2 = 0xAA007636 | trap_AC2; 553 + break; 554 + } 555 + break; 556 + default: 557 + case 396: 558 + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); 559 + param->wodt = 1; 560 + param->reg_AC1 = 0x33302825; 561 + param->reg_AC2 = 0xCC009617 | trap_AC2; 562 + param->reg_DQSIC = 0x000000E2; 563 + param->reg_MRS = 0x04001600 | trap_MRS; 564 + param->reg_EMRS = 0x00000000; 565 + param->reg_IOZ = 0x00000034; 566 + param->reg_DRV = 0x000000FA; 567 + param->reg_DQIDLY = 0x00000089; 568 + param->reg_FREQ = 0x00005040; 569 + param->madj_max = 96; 570 + param->dll2_finetune_step = 4; 571 + 572 + switch (param->dram_chipid) { 573 + default: 574 + case AST_DRAM_512Mx16: 575 + case AST_DRAM_1Gx16: 576 + param->reg_AC2 = 0xCC009617 | trap_AC2; 577 + break; 578 + case AST_DRAM_2Gx16: 579 + param->reg_AC2 = 0xCC009622 | trap_AC2; 580 + break; 581 + case AST_DRAM_4Gx16: 582 + param->reg_AC2 = 0xCC00963F | trap_AC2; 583 + break; 584 + } 585 + break; 586 + 587 + case 408: 588 + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); 589 + param->wodt = 1; 590 + param->reg_AC1 = 0x33302825; 591 + param->reg_AC2 = 0xCC009617 | trap_AC2; 592 + param->reg_DQSIC = 0x000000E2; 593 + param->reg_MRS = 0x04001600 | trap_MRS; 594 + param->reg_EMRS = 0x00000000; 595 + param->reg_IOZ = 0x00000023; 596 + param->reg_DRV = 0x000000FA; 597 + param->reg_DQIDLY = 0x00000089; 598 + param->reg_FREQ = 0x000050C0; 599 + param->madj_max = 96; 600 + param->dll2_finetune_step = 4; 601 + 602 + switch (param->dram_chipid) { 603 + default: 604 + case AST_DRAM_512Mx16: 605 + case AST_DRAM_1Gx16: 606 + param->reg_AC2 = 0xCC009617 | trap_AC2; 607 + break; 608 + case AST_DRAM_2Gx16: 609 + param->reg_AC2 = 0xCC009622 | trap_AC2; 610 + break; 611 + case AST_DRAM_4Gx16: 612 + param->reg_AC2 = 0xCC00963F | trap_AC2; 613 + break; 614 + } 615 + 616 + break; 617 + case 456: 618 + ast_moutdwm(ast, 0x1E6E2020, 0x0230); 619 + param->wodt = 0; 620 + param->reg_AC1 = 0x33302926; 621 + param->reg_AC2 = 0xCD44961A; 622 + param->reg_DQSIC = 0x000000FC; 623 + param->reg_MRS = 0x00081830; 624 + param->reg_EMRS = 0x00000000; 625 + param->reg_IOZ = 0x00000045; 626 + param->reg_DQIDLY = 0x00000097; 627 + param->reg_FREQ = 0x000052C0; 628 + param->madj_max = 88; 629 + param->dll2_finetune_step = 4; 630 + break; 631 + case 504: 632 + ast_moutdwm(ast, 0x1E6E2020, 0x0270); 633 + param->wodt = 1; 634 + param->reg_AC1 = 0x33302926; 635 + param->reg_AC2 = 0xDE44A61D; 636 + param->reg_DQSIC = 0x00000117; 637 + param->reg_MRS = 0x00081A30; 638 + param->reg_EMRS = 0x00000000; 639 + param->reg_IOZ = 0x070000BB; 640 + param->reg_DQIDLY = 0x000000A0; 641 + param->reg_FREQ = 0x000054C0; 642 + param->madj_max = 79; 643 + param->dll2_finetune_step = 4; 644 + break; 645 + case 528: 646 + ast_moutdwm(ast, 0x1E6E2020, 0x0290); 647 + param->wodt = 1; 648 + param->rodt = 1; 649 + param->reg_AC1 = 0x33302926; 650 + param->reg_AC2 = 0xEF44B61E; 651 + param->reg_DQSIC = 0x00000125; 652 + param->reg_MRS = 0x00081A30; 653 + param->reg_EMRS = 0x00000040; 654 + param->reg_DRV = 0x000000F5; 655 + param->reg_IOZ = 0x00000023; 656 + param->reg_DQIDLY = 0x00000088; 657 + param->reg_FREQ = 0x000055C0; 658 + param->madj_max = 76; 659 + param->dll2_finetune_step = 3; 660 + break; 661 + case 576: 662 + ast_moutdwm(ast, 0x1E6E2020, 0x0140); 663 + param->reg_MADJ = 0x00136868; 664 + param->reg_SADJ = 0x00004534; 665 + param->wodt = 1; 666 + param->rodt = 1; 667 + param->reg_AC1 = 0x33302A37; 668 + param->reg_AC2 = 0xEF56B61E; 669 + param->reg_DQSIC = 0x0000013F; 670 + param->reg_MRS = 0x00101A50; 671 + param->reg_EMRS = 0x00000040; 672 + param->reg_DRV = 0x000000FA; 673 + param->reg_IOZ = 0x00000023; 674 + param->reg_DQIDLY = 0x00000078; 675 + param->reg_FREQ = 0x000057C0; 676 + param->madj_max = 136; 677 + param->dll2_finetune_step = 3; 678 + break; 679 + case 600: 680 + ast_moutdwm(ast, 0x1E6E2020, 0x02E1); 681 + param->reg_MADJ = 0x00136868; 682 + param->reg_SADJ = 0x00004534; 683 + param->wodt = 1; 684 + param->rodt = 1; 685 + param->reg_AC1 = 0x32302A37; 686 + param->reg_AC2 = 0xDF56B61F; 687 + param->reg_DQSIC = 0x0000014D; 688 + param->reg_MRS = 0x00101A50; 689 + param->reg_EMRS = 0x00000004; 690 + param->reg_DRV = 0x000000F5; 691 + param->reg_IOZ = 0x00000023; 692 + param->reg_DQIDLY = 0x00000078; 693 + param->reg_FREQ = 0x000058C0; 694 + param->madj_max = 132; 695 + param->dll2_finetune_step = 3; 696 + break; 697 + case 624: 698 + ast_moutdwm(ast, 0x1E6E2020, 0x0160); 699 + param->reg_MADJ = 0x00136868; 700 + param->reg_SADJ = 0x00004534; 701 + param->wodt = 1; 702 + param->rodt = 1; 703 + param->reg_AC1 = 0x32302A37; 704 + param->reg_AC2 = 0xEF56B621; 705 + param->reg_DQSIC = 0x0000015A; 706 + param->reg_MRS = 0x02101A50; 707 + param->reg_EMRS = 0x00000004; 708 + param->reg_DRV = 0x000000F5; 709 + param->reg_IOZ = 0x00000034; 710 + param->reg_DQIDLY = 0x00000078; 711 + param->reg_FREQ = 0x000059C0; 712 + param->madj_max = 128; 713 + param->dll2_finetune_step = 3; 714 + break; 715 + } /* switch freq */ 716 + 717 + switch (param->dram_chipid) { 718 + case AST_DRAM_512Mx16: 719 + param->dram_config = 0x130; 720 + break; 721 + default: 722 + case AST_DRAM_1Gx16: 723 + param->dram_config = 0x131; 724 + break; 725 + case AST_DRAM_2Gx16: 726 + param->dram_config = 0x132; 727 + break; 728 + case AST_DRAM_4Gx16: 729 + param->dram_config = 0x133; 730 + break; 731 + } /* switch size */ 732 + 733 + switch (param->vram_size) { 734 + default: 735 + case SZ_8M: 736 + param->dram_config |= 0x00; 737 + break; 738 + case SZ_16M: 739 + param->dram_config |= 0x04; 740 + break; 741 + case SZ_32M: 742 + param->dram_config |= 0x08; 743 + break; 744 + case SZ_64M: 745 + param->dram_config |= 0x0c; 746 + break; 747 + } 748 + } 749 + 750 + static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param) 751 + { 752 + u32 data, data2, retry = 0; 753 + 754 + ddr3_init_start: 755 + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 756 + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); 757 + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); 758 + ast_moutdwm(ast, 0x1E6E0034, 0x00000000); 759 + udelay(10); 760 + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); 761 + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); 762 + udelay(10); 763 + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); 764 + udelay(10); 765 + 766 + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); 767 + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); 768 + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); 769 + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); 770 + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); 771 + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); 772 + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); 773 + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); 774 + ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); 775 + ast_moutdwm(ast, 0x1E6E0018, 0x00002370); 776 + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); 777 + ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); 778 + ast_moutdwm(ast, 0x1E6E0044, 0x22222222); 779 + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); 780 + ast_moutdwm(ast, 0x1E6E004C, 0x00000002); 781 + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 782 + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 783 + ast_moutdwm(ast, 0x1E6E0054, 0); 784 + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); 785 + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); 786 + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 787 + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); 788 + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); 789 + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 790 + /* Wait MCLK2X lock to MCLK */ 791 + do { 792 + data = ast_mindwm(ast, 0x1E6E001C); 793 + } while (!(data & 0x08000000)); 794 + data = ast_mindwm(ast, 0x1E6E001C); 795 + data = (data >> 8) & 0xff; 796 + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { 797 + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; 798 + if ((data2 & 0xff) > param->madj_max) 799 + break; 800 + ast_moutdwm(ast, 0x1E6E0064, data2); 801 + if (data2 & 0x00100000) 802 + data2 = ((data2 & 0xff) >> 3) + 3; 803 + else 804 + data2 = ((data2 & 0xff) >> 2) + 5; 805 + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; 806 + data2 += data & 0xff; 807 + data = data | (data2 << 8); 808 + ast_moutdwm(ast, 0x1E6E0068, data); 809 + udelay(10); 810 + ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); 811 + udelay(10); 812 + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; 813 + ast_moutdwm(ast, 0x1E6E0018, data); 814 + data = data | 0x200; 815 + ast_moutdwm(ast, 0x1E6E0018, data); 816 + do { 817 + data = ast_mindwm(ast, 0x1E6E001C); 818 + } while (!(data & 0x08000000)); 819 + 820 + data = ast_mindwm(ast, 0x1E6E001C); 821 + data = (data >> 8) & 0xff; 822 + } 823 + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); 824 + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; 825 + ast_moutdwm(ast, 0x1E6E0018, data); 826 + 827 + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); 828 + ast_moutdwm(ast, 0x1E6E000C, 0x00000040); 829 + udelay(50); 830 + /* Mode Register Setting */ 831 + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); 832 + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 833 + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); 834 + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); 835 + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 836 + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 837 + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); 838 + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); 839 + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 840 + 841 + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); 842 + data = 0; 843 + if (param->wodt) 844 + data = 0x300; 845 + if (param->rodt) 846 + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); 847 + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); 848 + 849 + /* Calibrate the DQSI delay */ 850 + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) 851 + goto ddr3_init_start; 852 + 853 + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); 854 + /* ECC Memory Initialization */ 855 + #ifdef ECC 856 + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 857 + ast_moutdwm(ast, 0x1E6E0070, 0x221); 858 + do { 859 + data = ast_mindwm(ast, 0x1E6E0070); 860 + } while (!(data & 0x00001000)); 861 + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 862 + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 863 + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 864 + #endif 865 + } 866 + 867 + static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param) 868 + { 869 + u32 trap, trap_AC2, trap_MRS; 870 + 871 + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 872 + 873 + /* Ger trap info */ 874 + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; 875 + trap_AC2 = (trap << 20) | (trap << 16); 876 + trap_AC2 += 0x00110000; 877 + trap_MRS = 0x00000040 | (trap << 4); 878 + 879 + param->reg_MADJ = 0x00034C4C; 880 + param->reg_SADJ = 0x00001800; 881 + param->reg_DRV = 0x000000F0; 882 + param->reg_PERIOD = param->dram_freq; 883 + param->rodt = 0; 884 + 885 + switch (param->dram_freq) { 886 + case 264: 887 + ast_moutdwm(ast, 0x1E6E2020, 0x0130); 888 + param->wodt = 0; 889 + param->reg_AC1 = 0x11101513; 890 + param->reg_AC2 = 0x78117011; 891 + param->reg_DQSIC = 0x00000092; 892 + param->reg_MRS = 0x00000842; 893 + param->reg_EMRS = 0x00000000; 894 + param->reg_DRV = 0x000000F0; 895 + param->reg_IOZ = 0x00000034; 896 + param->reg_DQIDLY = 0x0000005A; 897 + param->reg_FREQ = 0x00004AC0; 898 + param->madj_max = 138; 899 + param->dll2_finetune_step = 3; 900 + break; 901 + case 336: 902 + ast_moutdwm(ast, 0x1E6E2020, 0x0190); 903 + param->wodt = 1; 904 + param->reg_AC1 = 0x22202613; 905 + param->reg_AC2 = 0xAA009016 | trap_AC2; 906 + param->reg_DQSIC = 0x000000BA; 907 + param->reg_MRS = 0x00000A02 | trap_MRS; 908 + param->reg_EMRS = 0x00000040; 909 + param->reg_DRV = 0x000000FA; 910 + param->reg_IOZ = 0x00000034; 911 + param->reg_DQIDLY = 0x00000074; 912 + param->reg_FREQ = 0x00004DC0; 913 + param->madj_max = 96; 914 + param->dll2_finetune_step = 3; 915 + switch (param->dram_chipid) { 916 + default: 917 + case AST_DRAM_512Mx16: 918 + param->reg_AC2 = 0xAA009012 | trap_AC2; 919 + break; 920 + case AST_DRAM_1Gx16: 921 + param->reg_AC2 = 0xAA009016 | trap_AC2; 922 + break; 923 + case AST_DRAM_2Gx16: 924 + param->reg_AC2 = 0xAA009023 | trap_AC2; 925 + break; 926 + case AST_DRAM_4Gx16: 927 + param->reg_AC2 = 0xAA00903B | trap_AC2; 928 + break; 929 + } 930 + break; 931 + default: 932 + case 396: 933 + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); 934 + param->wodt = 1; 935 + param->rodt = 0; 936 + param->reg_AC1 = 0x33302714; 937 + param->reg_AC2 = 0xCC00B01B | trap_AC2; 938 + param->reg_DQSIC = 0x000000E2; 939 + param->reg_MRS = 0x00000C02 | trap_MRS; 940 + param->reg_EMRS = 0x00000040; 941 + param->reg_DRV = 0x000000FA; 942 + param->reg_IOZ = 0x00000034; 943 + param->reg_DQIDLY = 0x00000089; 944 + param->reg_FREQ = 0x00005040; 945 + param->madj_max = 96; 946 + param->dll2_finetune_step = 4; 947 + 948 + switch (param->dram_chipid) { 949 + case AST_DRAM_512Mx16: 950 + param->reg_AC2 = 0xCC00B016 | trap_AC2; 951 + break; 952 + default: 953 + case AST_DRAM_1Gx16: 954 + param->reg_AC2 = 0xCC00B01B | trap_AC2; 955 + break; 956 + case AST_DRAM_2Gx16: 957 + param->reg_AC2 = 0xCC00B02B | trap_AC2; 958 + break; 959 + case AST_DRAM_4Gx16: 960 + param->reg_AC2 = 0xCC00B03F | trap_AC2; 961 + break; 962 + } 963 + 964 + break; 965 + 966 + case 408: 967 + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); 968 + param->wodt = 1; 969 + param->rodt = 0; 970 + param->reg_AC1 = 0x33302714; 971 + param->reg_AC2 = 0xCC00B01B | trap_AC2; 972 + param->reg_DQSIC = 0x000000E2; 973 + param->reg_MRS = 0x00000C02 | trap_MRS; 974 + param->reg_EMRS = 0x00000040; 975 + param->reg_DRV = 0x000000FA; 976 + param->reg_IOZ = 0x00000034; 977 + param->reg_DQIDLY = 0x00000089; 978 + param->reg_FREQ = 0x000050C0; 979 + param->madj_max = 96; 980 + param->dll2_finetune_step = 4; 981 + 982 + switch (param->dram_chipid) { 983 + case AST_DRAM_512Mx16: 984 + param->reg_AC2 = 0xCC00B016 | trap_AC2; 985 + break; 986 + default: 987 + case AST_DRAM_1Gx16: 988 + param->reg_AC2 = 0xCC00B01B | trap_AC2; 989 + break; 990 + case AST_DRAM_2Gx16: 991 + param->reg_AC2 = 0xCC00B02B | trap_AC2; 992 + break; 993 + case AST_DRAM_4Gx16: 994 + param->reg_AC2 = 0xCC00B03F | trap_AC2; 995 + break; 996 + } 997 + 998 + break; 999 + case 456: 1000 + ast_moutdwm(ast, 0x1E6E2020, 0x0230); 1001 + param->wodt = 0; 1002 + param->reg_AC1 = 0x33302815; 1003 + param->reg_AC2 = 0xCD44B01E; 1004 + param->reg_DQSIC = 0x000000FC; 1005 + param->reg_MRS = 0x00000E72; 1006 + param->reg_EMRS = 0x00000000; 1007 + param->reg_DRV = 0x00000000; 1008 + param->reg_IOZ = 0x00000034; 1009 + param->reg_DQIDLY = 0x00000097; 1010 + param->reg_FREQ = 0x000052C0; 1011 + param->madj_max = 88; 1012 + param->dll2_finetune_step = 3; 1013 + break; 1014 + case 504: 1015 + ast_moutdwm(ast, 0x1E6E2020, 0x0261); 1016 + param->wodt = 1; 1017 + param->rodt = 1; 1018 + param->reg_AC1 = 0x33302815; 1019 + param->reg_AC2 = 0xDE44C022; 1020 + param->reg_DQSIC = 0x00000117; 1021 + param->reg_MRS = 0x00000E72; 1022 + param->reg_EMRS = 0x00000040; 1023 + param->reg_DRV = 0x0000000A; 1024 + param->reg_IOZ = 0x00000045; 1025 + param->reg_DQIDLY = 0x000000A0; 1026 + param->reg_FREQ = 0x000054C0; 1027 + param->madj_max = 79; 1028 + param->dll2_finetune_step = 3; 1029 + break; 1030 + case 528: 1031 + ast_moutdwm(ast, 0x1E6E2020, 0x0120); 1032 + param->wodt = 1; 1033 + param->rodt = 1; 1034 + param->reg_AC1 = 0x33302815; 1035 + param->reg_AC2 = 0xEF44D024; 1036 + param->reg_DQSIC = 0x00000125; 1037 + param->reg_MRS = 0x00000E72; 1038 + param->reg_EMRS = 0x00000004; 1039 + param->reg_DRV = 0x000000F9; 1040 + param->reg_IOZ = 0x00000045; 1041 + param->reg_DQIDLY = 0x000000A7; 1042 + param->reg_FREQ = 0x000055C0; 1043 + param->madj_max = 76; 1044 + param->dll2_finetune_step = 3; 1045 + break; 1046 + case 552: 1047 + ast_moutdwm(ast, 0x1E6E2020, 0x02A1); 1048 + param->wodt = 1; 1049 + param->rodt = 1; 1050 + param->reg_AC1 = 0x43402915; 1051 + param->reg_AC2 = 0xFF44E025; 1052 + param->reg_DQSIC = 0x00000132; 1053 + param->reg_MRS = 0x00000E72; 1054 + param->reg_EMRS = 0x00000040; 1055 + param->reg_DRV = 0x0000000A; 1056 + param->reg_IOZ = 0x00000045; 1057 + param->reg_DQIDLY = 0x000000AD; 1058 + param->reg_FREQ = 0x000056C0; 1059 + param->madj_max = 76; 1060 + param->dll2_finetune_step = 3; 1061 + break; 1062 + case 576: 1063 + ast_moutdwm(ast, 0x1E6E2020, 0x0140); 1064 + param->wodt = 1; 1065 + param->rodt = 1; 1066 + param->reg_AC1 = 0x43402915; 1067 + param->reg_AC2 = 0xFF44E027; 1068 + param->reg_DQSIC = 0x0000013F; 1069 + param->reg_MRS = 0x00000E72; 1070 + param->reg_EMRS = 0x00000004; 1071 + param->reg_DRV = 0x000000F5; 1072 + param->reg_IOZ = 0x00000045; 1073 + param->reg_DQIDLY = 0x000000B3; 1074 + param->reg_FREQ = 0x000057C0; 1075 + param->madj_max = 76; 1076 + param->dll2_finetune_step = 3; 1077 + break; 1078 + } 1079 + 1080 + switch (param->dram_chipid) { 1081 + case AST_DRAM_512Mx16: 1082 + param->dram_config = 0x100; 1083 + break; 1084 + default: 1085 + case AST_DRAM_1Gx16: 1086 + param->dram_config = 0x121; 1087 + break; 1088 + case AST_DRAM_2Gx16: 1089 + param->dram_config = 0x122; 1090 + break; 1091 + case AST_DRAM_4Gx16: 1092 + param->dram_config = 0x123; 1093 + break; 1094 + } /* switch size */ 1095 + 1096 + switch (param->vram_size) { 1097 + default: 1098 + case SZ_8M: 1099 + param->dram_config |= 0x00; 1100 + break; 1101 + case SZ_16M: 1102 + param->dram_config |= 0x04; 1103 + break; 1104 + case SZ_32M: 1105 + param->dram_config |= 0x08; 1106 + break; 1107 + case SZ_64M: 1108 + param->dram_config |= 0x0c; 1109 + break; 1110 + } 1111 + } 1112 + 1113 + static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param) 1114 + { 1115 + u32 data, data2, retry = 0; 1116 + 1117 + ddr2_init_start: 1118 + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 1119 + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); 1120 + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); 1121 + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); 1122 + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); 1123 + udelay(10); 1124 + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); 1125 + udelay(10); 1126 + 1127 + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); 1128 + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); 1129 + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); 1130 + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); 1131 + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); 1132 + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); 1133 + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); 1134 + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); 1135 + ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); 1136 + ast_moutdwm(ast, 0x1E6E0018, 0x00002330); 1137 + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); 1138 + ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); 1139 + ast_moutdwm(ast, 0x1E6E0044, 0x88848466); 1140 + ast_moutdwm(ast, 0x1E6E0048, 0x44440008); 1141 + ast_moutdwm(ast, 0x1E6E004C, 0x00000000); 1142 + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 1143 + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 1144 + ast_moutdwm(ast, 0x1E6E0054, 0); 1145 + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); 1146 + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); 1147 + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 1148 + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); 1149 + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); 1150 + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 1151 + 1152 + /* Wait MCLK2X lock to MCLK */ 1153 + do { 1154 + data = ast_mindwm(ast, 0x1E6E001C); 1155 + } while (!(data & 0x08000000)); 1156 + data = ast_mindwm(ast, 0x1E6E001C); 1157 + data = (data >> 8) & 0xff; 1158 + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { 1159 + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; 1160 + if ((data2 & 0xff) > param->madj_max) 1161 + break; 1162 + ast_moutdwm(ast, 0x1E6E0064, data2); 1163 + if (data2 & 0x00100000) 1164 + data2 = ((data2 & 0xff) >> 3) + 3; 1165 + else 1166 + data2 = ((data2 & 0xff) >> 2) + 5; 1167 + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; 1168 + data2 += data & 0xff; 1169 + data = data | (data2 << 8); 1170 + ast_moutdwm(ast, 0x1E6E0068, data); 1171 + udelay(10); 1172 + ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); 1173 + udelay(10); 1174 + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; 1175 + ast_moutdwm(ast, 0x1E6E0018, data); 1176 + data = data | 0x200; 1177 + ast_moutdwm(ast, 0x1E6E0018, data); 1178 + do { 1179 + data = ast_mindwm(ast, 0x1E6E001C); 1180 + } while (!(data & 0x08000000)); 1181 + 1182 + data = ast_mindwm(ast, 0x1E6E001C); 1183 + data = (data >> 8) & 0xff; 1184 + } 1185 + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); 1186 + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; 1187 + ast_moutdwm(ast, 0x1E6E0018, data); 1188 + 1189 + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); 1190 + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 1191 + udelay(50); 1192 + /* Mode Register Setting */ 1193 + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); 1194 + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1195 + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); 1196 + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); 1197 + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 1198 + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 1199 + 1200 + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); 1201 + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); 1202 + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 1203 + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); 1204 + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 1205 + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1206 + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 1207 + 1208 + ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); 1209 + data = 0; 1210 + if (param->wodt) 1211 + data = 0x500; 1212 + if (param->rodt) 1213 + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); 1214 + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); 1215 + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); 1216 + 1217 + /* Calibrate the DQSI delay */ 1218 + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) 1219 + goto ddr2_init_start; 1220 + 1221 + /* ECC Memory Initialization */ 1222 + #ifdef ECC 1223 + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 1224 + ast_moutdwm(ast, 0x1E6E0070, 0x221); 1225 + do { 1226 + data = ast_mindwm(ast, 0x1E6E0070); 1227 + } while (!(data & 0x00001000)); 1228 + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 1229 + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 1230 + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 1231 + #endif 1232 + } 1233 + 1234 + static void ast_post_chip_2300(struct ast_device *ast) 1235 + { 1236 + struct ast2300_dram_param param; 1237 + u32 temp; 1238 + u8 reg; 1239 + 1240 + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 1241 + if ((reg & 0x80) == 0) {/* vga only */ 1242 + ast_write32(ast, 0xf004, 0x1e6e0000); 1243 + ast_write32(ast, 0xf000, 0x1); 1244 + ast_write32(ast, 0x12000, 0x1688a8a8); 1245 + do { 1246 + ; 1247 + } while (ast_read32(ast, 0x12000) != 0x1); 1248 + 1249 + ast_write32(ast, 0x10000, 0xfc600309); 1250 + do { 1251 + ; 1252 + } while (ast_read32(ast, 0x10000) != 0x1); 1253 + 1254 + /* Slow down CPU/AHB CLK in VGA only mode */ 1255 + temp = ast_read32(ast, 0x12008); 1256 + temp |= 0x73; 1257 + ast_write32(ast, 0x12008, temp); 1258 + 1259 + param.dram_freq = 396; 1260 + param.dram_type = AST_DDR3; 1261 + temp = ast_mindwm(ast, 0x1e6e2070); 1262 + if (temp & 0x01000000) 1263 + param.dram_type = AST_DDR2; 1264 + switch (temp & 0x18000000) { 1265 + case 0: 1266 + param.dram_chipid = AST_DRAM_512Mx16; 1267 + break; 1268 + default: 1269 + case 0x08000000: 1270 + param.dram_chipid = AST_DRAM_1Gx16; 1271 + break; 1272 + case 0x10000000: 1273 + param.dram_chipid = AST_DRAM_2Gx16; 1274 + break; 1275 + case 0x18000000: 1276 + param.dram_chipid = AST_DRAM_4Gx16; 1277 + break; 1278 + } 1279 + switch (temp & 0x0c) { 1280 + default: 1281 + case 0x00: 1282 + param.vram_size = SZ_8M; 1283 + break; 1284 + case 0x04: 1285 + param.vram_size = SZ_16M; 1286 + break; 1287 + case 0x08: 1288 + param.vram_size = SZ_32M; 1289 + break; 1290 + case 0x0c: 1291 + param.vram_size = SZ_64M; 1292 + break; 1293 + } 1294 + 1295 + if (param.dram_type == AST_DDR3) { 1296 + get_ddr3_info(ast, &param); 1297 + ddr3_init(ast, &param); 1298 + } else { 1299 + get_ddr2_info(ast, &param); 1300 + ddr2_init(ast, &param); 1301 + } 1302 + 1303 + temp = ast_mindwm(ast, 0x1e6e2040); 1304 + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); 1305 + } 1306 + 1307 + /* wait ready */ 1308 + do { 1309 + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 1310 + } while ((reg & 0x40) == 0); 1311 + } 1312 + 1313 + int ast_2300_post(struct ast_device *ast) 1314 + { 1315 + ast_2300_set_def_ext_reg(ast); 1316 + 1317 + if (ast->config_mode == ast_use_p2a) { 1318 + ast_post_chip_2300(ast); 1319 + ast_init_3rdtx(ast); 1320 + } else { 1321 + if (ast->tx_chip == AST_TX_SIL164) { 1322 + /* Enable DVO */ 1323 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 1324 + } 1325 + } 1326 + 1327 + return 0; 1328 + }
+569
drivers/gpu/drm/ast/ast_2500.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + */ 25 + /* 26 + * Authors: Dave Airlie <airlied@redhat.com> 27 + */ 28 + 29 + #include <linux/delay.h> 30 + 31 + #include <drm/drm_print.h> 32 + 33 + #include "ast_drv.h" 34 + #include "ast_post.h" 35 + 36 + /* 37 + * POST 38 + */ 39 + 40 + /* 41 + * AST2500 DRAM settings modules 42 + */ 43 + 44 + #define REGTBL_NUM 17 45 + #define REGIDX_010 0 46 + #define REGIDX_014 1 47 + #define REGIDX_018 2 48 + #define REGIDX_020 3 49 + #define REGIDX_024 4 50 + #define REGIDX_02C 5 51 + #define REGIDX_030 6 52 + #define REGIDX_214 7 53 + #define REGIDX_2E0 8 54 + #define REGIDX_2E4 9 55 + #define REGIDX_2E8 10 56 + #define REGIDX_2EC 11 57 + #define REGIDX_2F0 12 58 + #define REGIDX_2F4 13 59 + #define REGIDX_2F8 14 60 + #define REGIDX_RFC 15 61 + #define REGIDX_PLL 16 62 + 63 + static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { 64 + 0x64604D38, /* 0x010 */ 65 + 0x29690599, /* 0x014 */ 66 + 0x00000300, /* 0x018 */ 67 + 0x00000000, /* 0x020 */ 68 + 0x00000000, /* 0x024 */ 69 + 0x02181E70, /* 0x02C */ 70 + 0x00000040, /* 0x030 */ 71 + 0x00000024, /* 0x214 */ 72 + 0x02001300, /* 0x2E0 */ 73 + 0x0E0000A0, /* 0x2E4 */ 74 + 0x000E001B, /* 0x2E8 */ 75 + 0x35B8C105, /* 0x2EC */ 76 + 0x08090408, /* 0x2F0 */ 77 + 0x9B000800, /* 0x2F4 */ 78 + 0x0E400A00, /* 0x2F8 */ 79 + 0x9971452F, /* tRFC */ 80 + 0x000071C1 /* PLL */ 81 + }; 82 + 83 + static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { 84 + 0x63604E37, /* 0x010 */ 85 + 0xE97AFA99, /* 0x014 */ 86 + 0x00019000, /* 0x018 */ 87 + 0x08000000, /* 0x020 */ 88 + 0x00000400, /* 0x024 */ 89 + 0x00000410, /* 0x02C */ 90 + 0x00000101, /* 0x030 */ 91 + 0x00000024, /* 0x214 */ 92 + 0x03002900, /* 0x2E0 */ 93 + 0x0E0000A0, /* 0x2E4 */ 94 + 0x000E001C, /* 0x2E8 */ 95 + 0x35B8C106, /* 0x2EC */ 96 + 0x08080607, /* 0x2F0 */ 97 + 0x9B000900, /* 0x2F4 */ 98 + 0x0E400A00, /* 0x2F8 */ 99 + 0x99714545, /* tRFC */ 100 + 0x000071C1 /* PLL */ 101 + }; 102 + 103 + #define TIMEOUT 5000000 104 + 105 + void ast_2500_patch_ahb(void __iomem *regs) 106 + { 107 + u32 data; 108 + 109 + /* Clear bus lock condition */ 110 + __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); 111 + __ast_moutdwm(regs, 0x1e600084, 0x00010000); 112 + __ast_moutdwm(regs, 0x1e600088, 0x00000000); 113 + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); 114 + 115 + data = __ast_mindwm(regs, 0x1e6e2070); 116 + if (data & 0x08000000) { /* check fast reset */ 117 + /* 118 + * If "Fast restet" is enabled for ARM-ICE debugger, 119 + * then WDT needs to enable, that 120 + * WDT04 is WDT#1 Reload reg. 121 + * WDT08 is WDT#1 counter restart reg to avoid system deadlock 122 + * WDT0C is WDT#1 control reg 123 + * [6:5]:= 01:Full chip 124 + * [4]:= 1:1MHz clock source 125 + * [1]:= 1:WDT will be cleeared and disabled after timeout occurs 126 + * [0]:= 1:WDT enable 127 + */ 128 + __ast_moutdwm(regs, 0x1E785004, 0x00000010); 129 + __ast_moutdwm(regs, 0x1E785008, 0x00004755); 130 + __ast_moutdwm(regs, 0x1E78500c, 0x00000033); 131 + udelay(1000); 132 + } 133 + 134 + do { 135 + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); 136 + data = __ast_mindwm(regs, 0x1e6e2000); 137 + } while (data != 1); 138 + 139 + __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ 140 + } 141 + 142 + static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen) 143 + { 144 + return mmc_test(ast, datagen, 0x85); 145 + } 146 + 147 + static bool cbr_test_2500(struct ast_device *ast) 148 + { 149 + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); 150 + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); 151 + if (!mmc_test_burst(ast, 0)) 152 + return false; 153 + if (!mmc_test_single_2500(ast, 0)) 154 + return false; 155 + return true; 156 + } 157 + 158 + static bool ddr_test_2500(struct ast_device *ast) 159 + { 160 + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); 161 + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); 162 + if (!mmc_test_burst(ast, 0)) 163 + return false; 164 + if (!mmc_test_burst(ast, 1)) 165 + return false; 166 + if (!mmc_test_burst(ast, 2)) 167 + return false; 168 + if (!mmc_test_burst(ast, 3)) 169 + return false; 170 + if (!mmc_test_single_2500(ast, 0)) 171 + return false; 172 + return true; 173 + } 174 + 175 + static void ddr_init_common_2500(struct ast_device *ast) 176 + { 177 + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); 178 + ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); 179 + ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); 180 + ast_moutdwm(ast, 0x1E6E0040, 0x88448844); 181 + ast_moutdwm(ast, 0x1E6E0044, 0x24422288); 182 + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); 183 + ast_moutdwm(ast, 0x1E6E004C, 0x22222222); 184 + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 185 + ast_moutdwm(ast, 0x1E6E0208, 0x00000000); 186 + ast_moutdwm(ast, 0x1E6E0218, 0x00000000); 187 + ast_moutdwm(ast, 0x1E6E0220, 0x00000000); 188 + ast_moutdwm(ast, 0x1E6E0228, 0x00000000); 189 + ast_moutdwm(ast, 0x1E6E0230, 0x00000000); 190 + ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); 191 + ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); 192 + ast_moutdwm(ast, 0x1E6E0240, 0x86000000); 193 + ast_moutdwm(ast, 0x1E6E0244, 0x00008600); 194 + ast_moutdwm(ast, 0x1E6E0248, 0x80000000); 195 + ast_moutdwm(ast, 0x1E6E024C, 0x80808080); 196 + } 197 + 198 + static void ddr_phy_init_2500(struct ast_device *ast) 199 + { 200 + u32 data, pass, timecnt; 201 + 202 + pass = 0; 203 + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); 204 + while (!pass) { 205 + for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { 206 + data = ast_mindwm(ast, 0x1E6E0060) & 0x1; 207 + if (!data) 208 + break; 209 + } 210 + if (timecnt != TIMEOUT) { 211 + data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; 212 + if (!data) 213 + pass = 1; 214 + } 215 + if (!pass) { 216 + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 217 + udelay(10); /* delay 10 us */ 218 + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); 219 + } 220 + } 221 + 222 + ast_moutdwm(ast, 0x1E6E0060, 0x00000006); 223 + } 224 + 225 + /* 226 + * Check DRAM Size 227 + * 1Gb : 0x80000000 ~ 0x87FFFFFF 228 + * 2Gb : 0x80000000 ~ 0x8FFFFFFF 229 + * 4Gb : 0x80000000 ~ 0x9FFFFFFF 230 + * 8Gb : 0x80000000 ~ 0xBFFFFFFF 231 + */ 232 + static void check_dram_size_2500(struct ast_device *ast, u32 tRFC) 233 + { 234 + u32 reg_04, reg_14; 235 + 236 + reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; 237 + reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; 238 + 239 + ast_moutdwm(ast, 0xA0100000, 0x41424344); 240 + ast_moutdwm(ast, 0x90100000, 0x35363738); 241 + ast_moutdwm(ast, 0x88100000, 0x292A2B2C); 242 + ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); 243 + 244 + /* Check 8Gbit */ 245 + if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { 246 + reg_04 |= 0x03; 247 + reg_14 |= (tRFC >> 24) & 0xFF; 248 + /* Check 4Gbit */ 249 + } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { 250 + reg_04 |= 0x02; 251 + reg_14 |= (tRFC >> 16) & 0xFF; 252 + /* Check 2Gbit */ 253 + } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { 254 + reg_04 |= 0x01; 255 + reg_14 |= (tRFC >> 8) & 0xFF; 256 + } else { 257 + reg_14 |= tRFC & 0xFF; 258 + } 259 + ast_moutdwm(ast, 0x1E6E0004, reg_04); 260 + ast_moutdwm(ast, 0x1E6E0014, reg_14); 261 + } 262 + 263 + static void enable_cache_2500(struct ast_device *ast) 264 + { 265 + u32 reg_04, data; 266 + 267 + reg_04 = ast_mindwm(ast, 0x1E6E0004); 268 + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); 269 + 270 + do 271 + data = ast_mindwm(ast, 0x1E6E0004); 272 + while (!(data & 0x80000)); 273 + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); 274 + } 275 + 276 + static void set_mpll_2500(struct ast_device *ast) 277 + { 278 + u32 addr, data, param; 279 + 280 + /* Reset MMC */ 281 + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 282 + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); 283 + for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { 284 + ast_moutdwm(ast, addr, 0x0); 285 + addr += 4; 286 + } 287 + ast_moutdwm(ast, 0x1E6E0034, 0x00020000); 288 + 289 + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 290 + data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; 291 + if (data) { 292 + /* CLKIN = 25MHz */ 293 + param = 0x930023E0; 294 + ast_moutdwm(ast, 0x1E6E2160, 0x00011320); 295 + } else { 296 + /* CLKIN = 24MHz */ 297 + param = 0x93002400; 298 + } 299 + ast_moutdwm(ast, 0x1E6E2020, param); 300 + udelay(100); 301 + } 302 + 303 + static void reset_mmc_2500(struct ast_device *ast) 304 + { 305 + ast_moutdwm(ast, 0x1E78505C, 0x00000004); 306 + ast_moutdwm(ast, 0x1E785044, 0x00000001); 307 + ast_moutdwm(ast, 0x1E785048, 0x00004755); 308 + ast_moutdwm(ast, 0x1E78504C, 0x00000013); 309 + mdelay(100); 310 + ast_moutdwm(ast, 0x1E785054, 0x00000077); 311 + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 312 + } 313 + 314 + static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table) 315 + { 316 + ast_moutdwm(ast, 0x1E6E0004, 0x00000303); 317 + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); 318 + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); 319 + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); 320 + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ 321 + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ 322 + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ 323 + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ 324 + 325 + /* DDR PHY Setting */ 326 + ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); 327 + ast_moutdwm(ast, 0x1E6E0204, 0x00001001); 328 + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); 329 + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); 330 + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); 331 + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); 332 + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); 333 + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); 334 + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); 335 + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); 336 + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); 337 + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); 338 + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); 339 + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); 340 + 341 + /* Controller Setting */ 342 + ast_moutdwm(ast, 0x1E6E0034, 0x00020091); 343 + 344 + /* Wait DDR PHY init done */ 345 + ddr_phy_init_2500(ast); 346 + 347 + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); 348 + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); 349 + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); 350 + 351 + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); 352 + enable_cache_2500(ast); 353 + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); 354 + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); 355 + } 356 + 357 + static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table) 358 + { 359 + u32 data, data2, pass, retrycnt; 360 + u32 ddr_vref, phy_vref; 361 + u32 min_ddr_vref = 0, min_phy_vref = 0; 362 + u32 max_ddr_vref = 0, max_phy_vref = 0; 363 + 364 + ast_moutdwm(ast, 0x1E6E0004, 0x00000313); 365 + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); 366 + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); 367 + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); 368 + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ 369 + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ 370 + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ 371 + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ 372 + 373 + /* DDR PHY Setting */ 374 + ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); 375 + ast_moutdwm(ast, 0x1E6E0204, 0x09002000); 376 + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); 377 + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); 378 + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); 379 + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); 380 + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); 381 + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); 382 + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); 383 + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); 384 + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); 385 + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); 386 + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); 387 + ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); 388 + ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); 389 + 390 + /* Controller Setting */ 391 + ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); 392 + 393 + /* Train PHY Vref first */ 394 + pass = 0; 395 + 396 + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { 397 + max_phy_vref = 0x0; 398 + pass = 0; 399 + ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); 400 + for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { 401 + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 402 + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 403 + ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8)); 404 + /* Fire DFI Init */ 405 + ddr_phy_init_2500(ast); 406 + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); 407 + if (cbr_test_2500(ast)) { 408 + pass++; 409 + data = ast_mindwm(ast, 0x1E6E03D0); 410 + data2 = data >> 8; 411 + data = data & 0xff; 412 + if (data > data2) 413 + data = data2; 414 + if (max_phy_vref < data) { 415 + max_phy_vref = data; 416 + min_phy_vref = phy_vref; 417 + } 418 + } else if (pass > 0) { 419 + break; 420 + } 421 + } 422 + } 423 + ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); 424 + 425 + /* Train DDR Vref next */ 426 + pass = 0; 427 + 428 + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { 429 + min_ddr_vref = 0xFF; 430 + max_ddr_vref = 0x0; 431 + pass = 0; 432 + for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { 433 + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 434 + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 435 + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); 436 + /* Fire DFI Init */ 437 + ddr_phy_init_2500(ast); 438 + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); 439 + if (cbr_test_2500(ast)) { 440 + pass++; 441 + if (min_ddr_vref > ddr_vref) 442 + min_ddr_vref = ddr_vref; 443 + if (max_ddr_vref < ddr_vref) 444 + max_ddr_vref = ddr_vref; 445 + } else if (pass != 0) { 446 + break; 447 + } 448 + } 449 + } 450 + 451 + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 452 + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 453 + ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; 454 + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); 455 + 456 + /* Wait DDR PHY init done */ 457 + ddr_phy_init_2500(ast); 458 + 459 + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); 460 + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); 461 + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); 462 + 463 + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); 464 + enable_cache_2500(ast); 465 + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); 466 + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); 467 + } 468 + 469 + static bool ast_dram_init_2500(struct ast_device *ast) 470 + { 471 + u32 data; 472 + u32 max_tries = 5; 473 + 474 + do { 475 + if (max_tries-- == 0) 476 + return false; 477 + set_mpll_2500(ast); 478 + reset_mmc_2500(ast); 479 + ddr_init_common_2500(ast); 480 + 481 + data = ast_mindwm(ast, 0x1E6E2070); 482 + if (data & 0x01000000) 483 + ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); 484 + else 485 + ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); 486 + } while (!ddr_test_2500(ast)); 487 + 488 + ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); 489 + 490 + /* Patch code */ 491 + data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; 492 + ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); 493 + 494 + return true; 495 + } 496 + 497 + static void ast_post_chip_2500(struct ast_device *ast) 498 + { 499 + struct drm_device *dev = &ast->base; 500 + u32 temp; 501 + u8 reg; 502 + 503 + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 504 + if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ 505 + /* Clear bus lock condition */ 506 + ast_2500_patch_ahb(ast->regs); 507 + 508 + /* Disable watchdog */ 509 + ast_moutdwm(ast, 0x1E78502C, 0x00000000); 510 + ast_moutdwm(ast, 0x1E78504C, 0x00000000); 511 + 512 + /* 513 + * Reset USB port to patch USB unknown device issue 514 + * SCU90 is Multi-function Pin Control #5 515 + * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub 516 + * port). 517 + * SCU94 is Multi-function Pin Control #6 518 + * [14:13]:= 1x:USB2.0 Host2 controller 519 + * SCU70 is Hardware Strap reg 520 + * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by 521 + * [18]: 0(24)/1(48) MHz) 522 + * SCU7C is Write clear reg to SCU70 523 + * [23]:= write 1 and then SCU70[23] will be clear as 0b. 524 + */ 525 + ast_moutdwm(ast, 0x1E6E2090, 0x20000000); 526 + ast_moutdwm(ast, 0x1E6E2094, 0x00004000); 527 + if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { 528 + ast_moutdwm(ast, 0x1E6E207C, 0x00800000); 529 + mdelay(100); 530 + ast_moutdwm(ast, 0x1E6E2070, 0x00800000); 531 + } 532 + /* Modify eSPI reset pin */ 533 + temp = ast_mindwm(ast, 0x1E6E2070); 534 + if (temp & 0x02000000) 535 + ast_moutdwm(ast, 0x1E6E207C, 0x00004000); 536 + 537 + /* Slow down CPU/AHB CLK in VGA only mode */ 538 + temp = ast_read32(ast, 0x12008); 539 + temp |= 0x73; 540 + ast_write32(ast, 0x12008, temp); 541 + 542 + if (!ast_dram_init_2500(ast)) 543 + drm_err(dev, "DRAM init failed !\n"); 544 + 545 + temp = ast_mindwm(ast, 0x1e6e2040); 546 + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); 547 + } 548 + 549 + /* wait ready */ 550 + do { 551 + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 552 + } while ((reg & 0x40) == 0); 553 + } 554 + 555 + int ast_2500_post(struct ast_device *ast) 556 + { 557 + ast_2300_set_def_ext_reg(ast); 558 + 559 + if (ast->config_mode == ast_use_p2a) { 560 + ast_post_chip_2500(ast); 561 + } else { 562 + if (ast->tx_chip == AST_TX_SIL164) { 563 + /* Enable DVO */ 564 + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 565 + } 566 + } 567 + 568 + return 0; 569 + }
+44
drivers/gpu/drm/ast/ast_2600.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2012 Red Hat Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + * 21 + * The above copyright notice and this permission notice (including the 22 + * next paragraph) shall be included in all copies or substantial portions 23 + * of the Software. 24 + */ 25 + /* 26 + * Authors: Dave Airlie <airlied@redhat.com> 27 + */ 28 + 29 + #include "ast_drv.h" 30 + #include "ast_post.h" 31 + 32 + /* 33 + * POST 34 + */ 35 + 36 + int ast_2600_post(struct ast_device *ast) 37 + { 38 + ast_2300_set_def_ext_reg(ast); 39 + 40 + if (ast->tx_chip == AST_TX_ASTDP) 41 + return ast_dp_launch(ast); 42 + 43 + return 0; 44 + }
-207
drivers/gpu/drm/ast/ast_dram_tables.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef AST_DRAM_TABLES_H 3 - #define AST_DRAM_TABLES_H 4 - 5 - /* DRAM timing tables */ 6 - struct ast_dramstruct { 7 - u16 index; 8 - u32 data; 9 - }; 10 - 11 - static const struct ast_dramstruct ast2000_dram_table_data[] = { 12 - { 0x0108, 0x00000000 }, 13 - { 0x0120, 0x00004a21 }, 14 - { 0xFF00, 0x00000043 }, 15 - { 0x0000, 0xFFFFFFFF }, 16 - { 0x0004, 0x00000089 }, 17 - { 0x0008, 0x22331353 }, 18 - { 0x000C, 0x0d07000b }, 19 - { 0x0010, 0x11113333 }, 20 - { 0x0020, 0x00110350 }, 21 - { 0x0028, 0x1e0828f0 }, 22 - { 0x0024, 0x00000001 }, 23 - { 0x001C, 0x00000000 }, 24 - { 0x0014, 0x00000003 }, 25 - { 0xFF00, 0x00000043 }, 26 - { 0x0018, 0x00000131 }, 27 - { 0x0014, 0x00000001 }, 28 - { 0xFF00, 0x00000043 }, 29 - { 0x0018, 0x00000031 }, 30 - { 0x0014, 0x00000001 }, 31 - { 0xFF00, 0x00000043 }, 32 - { 0x0028, 0x1e0828f1 }, 33 - { 0x0024, 0x00000003 }, 34 - { 0x002C, 0x1f0f28fb }, 35 - { 0x0030, 0xFFFFFE01 }, 36 - { 0xFFFF, 0xFFFFFFFF } 37 - }; 38 - 39 - static const struct ast_dramstruct ast1100_dram_table_data[] = { 40 - { 0x2000, 0x1688a8a8 }, 41 - { 0x2020, 0x000041f0 }, 42 - { 0xFF00, 0x00000043 }, 43 - { 0x0000, 0xfc600309 }, 44 - { 0x006C, 0x00909090 }, 45 - { 0x0064, 0x00050000 }, 46 - { 0x0004, 0x00000585 }, 47 - { 0x0008, 0x0011030f }, 48 - { 0x0010, 0x22201724 }, 49 - { 0x0018, 0x1e29011a }, 50 - { 0x0020, 0x00c82222 }, 51 - { 0x0014, 0x01001523 }, 52 - { 0x001C, 0x1024010d }, 53 - { 0x0024, 0x00cb2522 }, 54 - { 0x0038, 0xffffff82 }, 55 - { 0x003C, 0x00000000 }, 56 - { 0x0040, 0x00000000 }, 57 - { 0x0044, 0x00000000 }, 58 - { 0x0048, 0x00000000 }, 59 - { 0x004C, 0x00000000 }, 60 - { 0x0050, 0x00000000 }, 61 - { 0x0054, 0x00000000 }, 62 - { 0x0058, 0x00000000 }, 63 - { 0x005C, 0x00000000 }, 64 - { 0x0060, 0x032aa02a }, 65 - { 0x0064, 0x002d3000 }, 66 - { 0x0068, 0x00000000 }, 67 - { 0x0070, 0x00000000 }, 68 - { 0x0074, 0x00000000 }, 69 - { 0x0078, 0x00000000 }, 70 - { 0x007C, 0x00000000 }, 71 - { 0x0034, 0x00000001 }, 72 - { 0xFF00, 0x00000043 }, 73 - { 0x002C, 0x00000732 }, 74 - { 0x0030, 0x00000040 }, 75 - { 0x0028, 0x00000005 }, 76 - { 0x0028, 0x00000007 }, 77 - { 0x0028, 0x00000003 }, 78 - { 0x0028, 0x00000001 }, 79 - { 0x000C, 0x00005a08 }, 80 - { 0x002C, 0x00000632 }, 81 - { 0x0028, 0x00000001 }, 82 - { 0x0030, 0x000003c0 }, 83 - { 0x0028, 0x00000003 }, 84 - { 0x0030, 0x00000040 }, 85 - { 0x0028, 0x00000003 }, 86 - { 0x000C, 0x00005a21 }, 87 - { 0x0034, 0x00007c03 }, 88 - { 0x0120, 0x00004c41 }, 89 - { 0xffff, 0xffffffff }, 90 - }; 91 - 92 - static const struct ast_dramstruct ast2100_dram_table_data[] = { 93 - { 0x2000, 0x1688a8a8 }, 94 - { 0x2020, 0x00004120 }, 95 - { 0xFF00, 0x00000043 }, 96 - { 0x0000, 0xfc600309 }, 97 - { 0x006C, 0x00909090 }, 98 - { 0x0064, 0x00070000 }, 99 - { 0x0004, 0x00000489 }, 100 - { 0x0008, 0x0011030f }, 101 - { 0x0010, 0x32302926 }, 102 - { 0x0018, 0x274c0122 }, 103 - { 0x0020, 0x00ce2222 }, 104 - { 0x0014, 0x01001523 }, 105 - { 0x001C, 0x1024010d }, 106 - { 0x0024, 0x00cb2522 }, 107 - { 0x0038, 0xffffff82 }, 108 - { 0x003C, 0x00000000 }, 109 - { 0x0040, 0x00000000 }, 110 - { 0x0044, 0x00000000 }, 111 - { 0x0048, 0x00000000 }, 112 - { 0x004C, 0x00000000 }, 113 - { 0x0050, 0x00000000 }, 114 - { 0x0054, 0x00000000 }, 115 - { 0x0058, 0x00000000 }, 116 - { 0x005C, 0x00000000 }, 117 - { 0x0060, 0x0f2aa02a }, 118 - { 0x0064, 0x003f3005 }, 119 - { 0x0068, 0x02020202 }, 120 - { 0x0070, 0x00000000 }, 121 - { 0x0074, 0x00000000 }, 122 - { 0x0078, 0x00000000 }, 123 - { 0x007C, 0x00000000 }, 124 - { 0x0034, 0x00000001 }, 125 - { 0xFF00, 0x00000043 }, 126 - { 0x002C, 0x00000942 }, 127 - { 0x0030, 0x00000040 }, 128 - { 0x0028, 0x00000005 }, 129 - { 0x0028, 0x00000007 }, 130 - { 0x0028, 0x00000003 }, 131 - { 0x0028, 0x00000001 }, 132 - { 0x000C, 0x00005a08 }, 133 - { 0x002C, 0x00000842 }, 134 - { 0x0028, 0x00000001 }, 135 - { 0x0030, 0x000003c0 }, 136 - { 0x0028, 0x00000003 }, 137 - { 0x0030, 0x00000040 }, 138 - { 0x0028, 0x00000003 }, 139 - { 0x000C, 0x00005a21 }, 140 - { 0x0034, 0x00007c03 }, 141 - { 0x0120, 0x00005061 }, 142 - { 0xffff, 0xffffffff }, 143 - }; 144 - 145 - /* 146 - * AST2500 DRAM settings modules 147 - */ 148 - #define REGTBL_NUM 17 149 - #define REGIDX_010 0 150 - #define REGIDX_014 1 151 - #define REGIDX_018 2 152 - #define REGIDX_020 3 153 - #define REGIDX_024 4 154 - #define REGIDX_02C 5 155 - #define REGIDX_030 6 156 - #define REGIDX_214 7 157 - #define REGIDX_2E0 8 158 - #define REGIDX_2E4 9 159 - #define REGIDX_2E8 10 160 - #define REGIDX_2EC 11 161 - #define REGIDX_2F0 12 162 - #define REGIDX_2F4 13 163 - #define REGIDX_2F8 14 164 - #define REGIDX_RFC 15 165 - #define REGIDX_PLL 16 166 - 167 - static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { 168 - 0x64604D38, /* 0x010 */ 169 - 0x29690599, /* 0x014 */ 170 - 0x00000300, /* 0x018 */ 171 - 0x00000000, /* 0x020 */ 172 - 0x00000000, /* 0x024 */ 173 - 0x02181E70, /* 0x02C */ 174 - 0x00000040, /* 0x030 */ 175 - 0x00000024, /* 0x214 */ 176 - 0x02001300, /* 0x2E0 */ 177 - 0x0E0000A0, /* 0x2E4 */ 178 - 0x000E001B, /* 0x2E8 */ 179 - 0x35B8C105, /* 0x2EC */ 180 - 0x08090408, /* 0x2F0 */ 181 - 0x9B000800, /* 0x2F4 */ 182 - 0x0E400A00, /* 0x2F8 */ 183 - 0x9971452F, /* tRFC */ 184 - 0x000071C1 /* PLL */ 185 - }; 186 - 187 - static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { 188 - 0x63604E37, /* 0x010 */ 189 - 0xE97AFA99, /* 0x014 */ 190 - 0x00019000, /* 0x018 */ 191 - 0x08000000, /* 0x020 */ 192 - 0x00000400, /* 0x024 */ 193 - 0x00000410, /* 0x02C */ 194 - 0x00000101, /* 0x030 */ 195 - 0x00000024, /* 0x214 */ 196 - 0x03002900, /* 0x2E0 */ 197 - 0x0E0000A0, /* 0x2E4 */ 198 - 0x000E001C, /* 0x2E8 */ 199 - 0x35B8C106, /* 0x2EC */ 200 - 0x08080607, /* 0x2F0 */ 201 - 0x9B000900, /* 0x2F4 */ 202 - 0x0E400A00, /* 0x2F8 */ 203 - 0x99714545, /* tRFC */ 204 - 0x000071C1 /* PLL */ 205 - }; 206 - 207 - #endif
+2 -2
drivers/gpu/drm/ast/ast_drv.c
··· 64 64 .minor = DRIVER_MINOR, 65 65 .patchlevel = DRIVER_PATCHLEVEL, 66 66 67 - DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT, 67 + DRM_GEM_SHMEM_DRIVER_OPS, 68 68 DRM_FBDEV_SHMEM_DRIVER_OPS, 69 69 }; 70 70 ··· 171 171 /* Patch AST2500/AST2510 */ 172 172 if ((pdev->revision & 0xf0) == 0x40) { 173 173 if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK)) 174 - ast_patch_ahb_2500(regs); 174 + ast_2500_patch_ahb(regs); 175 175 } 176 176 177 177 /* Double check that it's actually working */
+16 -1
drivers/gpu/drm/ast/ast_drv.h
··· 417 417 418 418 int ast_mm_init(struct ast_device *ast); 419 419 420 + /* ast_2000.c */ 421 + int ast_2000_post(struct ast_device *ast); 422 + 423 + /* ast_2100.c */ 424 + int ast_2100_post(struct ast_device *ast); 425 + 426 + /* ast_2300.c */ 427 + int ast_2300_post(struct ast_device *ast); 428 + 429 + /* ast_2500.c */ 430 + void ast_2500_patch_ahb(void __iomem *regs); 431 + int ast_2500_post(struct ast_device *ast); 432 + 433 + /* ast_2600.c */ 434 + int ast_2600_post(struct ast_device *ast); 435 + 420 436 /* ast post */ 421 437 int ast_post_gpu(struct ast_device *ast); 422 438 u32 ast_mindwm(struct ast_device *ast, u32 r); 423 439 void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); 424 - void ast_patch_ahb_2500(void __iomem *regs); 425 440 426 441 int ast_vga_output_init(struct ast_device *ast); 427 442 int ast_sil164_output_init(struct ast_device *ast);
+21 -2006
drivers/gpu/drm/ast/ast_post.c
··· 31 31 32 32 #include <drm/drm_print.h> 33 33 34 - #include "ast_dram_tables.h" 35 34 #include "ast_drv.h" 35 + #include "ast_post.h" 36 36 37 - static void ast_post_chip_2300(struct ast_device *ast); 38 - static void ast_post_chip_2500(struct ast_device *ast); 39 - 40 - static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; 41 - static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; 42 - 43 - static void ast_set_def_ext_reg(struct ast_device *ast) 44 - { 45 - u8 i, index, reg; 46 - const u8 *ext_reg_info; 47 - 48 - /* reset scratch */ 49 - for (i = 0x81; i <= 0x9f; i++) 50 - ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00); 51 - 52 - if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) 53 - ext_reg_info = extreginfo_ast2300; 54 - else 55 - ext_reg_info = extreginfo; 56 - 57 - index = 0xa0; 58 - while (*ext_reg_info != 0xff) { 59 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info); 60 - index++; 61 - ext_reg_info++; 62 - } 63 - 64 - /* disable standard IO/MEM decode if secondary */ 65 - /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */ 66 - 67 - /* Set Ext. Default */ 68 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01); 69 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00); 70 - 71 - /* Enable RAMDAC for A1 */ 72 - reg = 0x04; 73 - if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) 74 - reg |= 0x20; 75 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); 76 - } 77 - 78 - static u32 __ast_mindwm(void __iomem *regs, u32 r) 37 + u32 __ast_mindwm(void __iomem *regs, u32 r) 79 38 { 80 39 u32 data; 81 40 ··· 48 89 return __ast_read32(regs, 0x10000 + (r & 0x0000ffff)); 49 90 } 50 91 51 - static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) 92 + void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) 52 93 { 53 94 u32 data; 54 95 ··· 72 113 __ast_moutdwm(ast->regs, r, v); 73 114 } 74 115 75 - /* 76 - * AST2100/2150 DLL CBR Setting 77 - */ 78 - #define CBR_SIZE_AST2150 ((16 << 10) - 1) 79 - #define CBR_PASSNUM_AST2150 5 80 - #define CBR_THRESHOLD_AST2150 10 81 - #define CBR_THRESHOLD2_AST2150 10 82 - #define TIMEOUT_AST2150 5000000 83 - 84 - #define CBR_PATNUM_AST2150 8 85 - 86 - static const u32 pattern_AST2150[14] = { 87 - 0xFF00FF00, 88 - 0xCC33CC33, 89 - 0xAA55AA55, 90 - 0xFFFE0001, 91 - 0x683501FE, 92 - 0x0F1929B0, 93 - 0x2D0B4346, 94 - 0x60767F02, 95 - 0x6FBE36A6, 96 - 0x3A253035, 97 - 0x3019686D, 98 - 0x41C6167E, 99 - 0x620152BF, 100 - 0x20F050E0 101 - }; 102 - 103 - static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen) 104 - { 105 - u32 data, timeout; 106 - 107 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 108 - ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); 109 - timeout = 0; 110 - do { 111 - data = ast_mindwm(ast, 0x1e6e0070) & 0x40; 112 - if (++timeout > TIMEOUT_AST2150) { 113 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 114 - return 0xffffffff; 115 - } 116 - } while (!data); 117 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 118 - ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); 119 - timeout = 0; 120 - do { 121 - data = ast_mindwm(ast, 0x1e6e0070) & 0x40; 122 - if (++timeout > TIMEOUT_AST2150) { 123 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 124 - return 0xffffffff; 125 - } 126 - } while (!data); 127 - data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; 128 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 129 - return data; 130 - } 131 - 132 - #if 0 /* unused in DDX driver - here for completeness */ 133 - static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen) 134 - { 135 - u32 data, timeout; 136 - 137 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 138 - ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); 139 - timeout = 0; 140 - do { 141 - data = ast_mindwm(ast, 0x1e6e0070) & 0x40; 142 - if (++timeout > TIMEOUT_AST2150) { 143 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 144 - return 0xffffffff; 145 - } 146 - } while (!data); 147 - data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; 148 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 149 - return data; 150 - } 151 - #endif 152 - 153 - static int cbrtest_ast2150(struct ast_device *ast) 154 - { 155 - int i; 156 - 157 - for (i = 0; i < 8; i++) 158 - if (mmctestburst2_ast2150(ast, i)) 159 - return 0; 160 - return 1; 161 - } 162 - 163 - static int cbrscan_ast2150(struct ast_device *ast, int busw) 164 - { 165 - u32 patcnt, loop; 166 - 167 - for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { 168 - ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); 169 - for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { 170 - if (cbrtest_ast2150(ast)) 171 - break; 172 - } 173 - if (loop == CBR_PASSNUM_AST2150) 174 - return 0; 175 - } 176 - return 1; 177 - } 178 - 179 - 180 - static void cbrdlli_ast2150(struct ast_device *ast, int busw) 181 - { 182 - u32 dll_min[4], dll_max[4], dlli, data, passcnt; 183 - 184 - cbr_start: 185 - dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff; 186 - dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0; 187 - passcnt = 0; 188 - 189 - for (dlli = 0; dlli < 100; dlli++) { 190 - ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); 191 - data = cbrscan_ast2150(ast, busw); 192 - if (data != 0) { 193 - if (data & 0x1) { 194 - if (dll_min[0] > dlli) 195 - dll_min[0] = dlli; 196 - if (dll_max[0] < dlli) 197 - dll_max[0] = dlli; 198 - } 199 - passcnt++; 200 - } else if (passcnt >= CBR_THRESHOLD_AST2150) 201 - goto cbr_start; 202 - } 203 - if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150) 204 - goto cbr_start; 205 - 206 - dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); 207 - ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); 208 - } 209 - 210 - 211 - 212 - static void ast_init_dram_reg(struct ast_device *ast) 213 - { 214 - u8 j; 215 - u32 data, temp, i; 216 - const struct ast_dramstruct *dram_reg_info; 217 - 218 - j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 219 - 220 - if ((j & 0x80) == 0) { /* VGA only */ 221 - if (IS_AST_GEN1(ast)) { 222 - dram_reg_info = ast2000_dram_table_data; 223 - ast_write32(ast, 0xf004, 0x1e6e0000); 224 - ast_write32(ast, 0xf000, 0x1); 225 - ast_write32(ast, 0x10100, 0xa8); 226 - 227 - do { 228 - ; 229 - } while (ast_read32(ast, 0x10100) != 0xa8); 230 - } else { /* GEN2/GEN3 */ 231 - if (ast->chip == AST2100 || ast->chip == AST2200) 232 - dram_reg_info = ast2100_dram_table_data; 233 - else 234 - dram_reg_info = ast1100_dram_table_data; 235 - 236 - ast_write32(ast, 0xf004, 0x1e6e0000); 237 - ast_write32(ast, 0xf000, 0x1); 238 - ast_write32(ast, 0x12000, 0x1688A8A8); 239 - do { 240 - ; 241 - } while (ast_read32(ast, 0x12000) != 0x01); 242 - 243 - ast_write32(ast, 0x10000, 0xfc600309); 244 - do { 245 - ; 246 - } while (ast_read32(ast, 0x10000) != 0x01); 247 - } 248 - 249 - while (dram_reg_info->index != 0xffff) { 250 - if (dram_reg_info->index == 0xff00) {/* delay fn */ 251 - for (i = 0; i < 15; i++) 252 - udelay(dram_reg_info->data); 253 - } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) { 254 - data = dram_reg_info->data; 255 - if (ast->dram_type == AST_DRAM_1Gx16) 256 - data = 0x00000d89; 257 - else if (ast->dram_type == AST_DRAM_1Gx32) 258 - data = 0x00000c8d; 259 - 260 - temp = ast_read32(ast, 0x12070); 261 - temp &= 0xc; 262 - temp <<= 2; 263 - ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp); 264 - } else 265 - ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data); 266 - dram_reg_info++; 267 - } 268 - 269 - /* AST 2100/2150 DRAM calibration */ 270 - data = ast_read32(ast, 0x10120); 271 - if (data == 0x5061) { /* 266Mhz */ 272 - data = ast_read32(ast, 0x10004); 273 - if (data & 0x40) 274 - cbrdlli_ast2150(ast, 16); /* 16 bits */ 275 - else 276 - cbrdlli_ast2150(ast, 32); /* 32 bits */ 277 - } 278 - 279 - switch (AST_GEN(ast)) { 280 - case 1: 281 - temp = ast_read32(ast, 0x10140); 282 - ast_write32(ast, 0x10140, temp | 0x40); 283 - break; 284 - case 2: 285 - case 3: 286 - temp = ast_read32(ast, 0x1200c); 287 - ast_write32(ast, 0x1200c, temp & 0xfffffffd); 288 - temp = ast_read32(ast, 0x12040); 289 - ast_write32(ast, 0x12040, temp | 0x40); 290 - break; 291 - default: 292 - break; 293 - } 294 - } 295 - 296 - /* wait ready */ 297 - do { 298 - j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 299 - } while ((j & 0x40) == 0); 300 - } 301 - 302 116 int ast_post_gpu(struct ast_device *ast) 303 117 { 304 118 int ret; 305 119 306 - ast_set_def_ext_reg(ast); 307 - 308 120 if (AST_GEN(ast) >= 7) { 309 - if (ast->tx_chip == AST_TX_ASTDP) { 310 - ret = ast_dp_launch(ast); 311 - if (ret) 312 - return ret; 313 - } 121 + ret = ast_2600_post(ast); 122 + if (ret) 123 + return ret; 314 124 } else if (AST_GEN(ast) >= 6) { 315 - if (ast->config_mode == ast_use_p2a) { 316 - ast_post_chip_2500(ast); 317 - } else { 318 - if (ast->tx_chip == AST_TX_SIL164) { 319 - /* Enable DVO */ 320 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 321 - } 322 - } 125 + ret = ast_2500_post(ast); 126 + if (ret) 127 + return ret; 323 128 } else if (AST_GEN(ast) >= 4) { 324 - if (ast->config_mode == ast_use_p2a) { 325 - ast_post_chip_2300(ast); 326 - ast_init_3rdtx(ast); 327 - } else { 328 - if (ast->tx_chip == AST_TX_SIL164) { 329 - /* Enable DVO */ 330 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 331 - } 332 - } 129 + ret = ast_2300_post(ast); 130 + if (ret) 131 + return ret; 132 + } else if (AST_GEN(ast) >= 2) { 133 + ret = ast_2100_post(ast); 134 + if (ret) 135 + return ret; 333 136 } else { 334 - if (ast->config_mode == ast_use_p2a) { 335 - ast_init_dram_reg(ast); 336 - } else { 337 - if (ast->tx_chip == AST_TX_SIL164) { 338 - /* Enable DVO */ 339 - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); 340 - } 341 - } 137 + ret = ast_2000_post(ast); 138 + if (ret) 139 + return ret; 342 140 } 343 141 344 142 return 0; 345 143 } 346 144 347 - /* AST 2300 DRAM settings */ 348 - #define AST_DDR3 0 349 - #define AST_DDR2 1 350 - 351 - struct ast2300_dram_param { 352 - u32 dram_type; 353 - u32 dram_chipid; 354 - u32 dram_freq; 355 - u32 vram_size; 356 - u32 odt; 357 - u32 wodt; 358 - u32 rodt; 359 - u32 dram_config; 360 - u32 reg_PERIOD; 361 - u32 reg_MADJ; 362 - u32 reg_SADJ; 363 - u32 reg_MRS; 364 - u32 reg_EMRS; 365 - u32 reg_AC1; 366 - u32 reg_AC2; 367 - u32 reg_DQSIC; 368 - u32 reg_DRV; 369 - u32 reg_IOZ; 370 - u32 reg_DQIDLY; 371 - u32 reg_FREQ; 372 - u32 madj_max; 373 - u32 dll2_finetune_step; 374 - }; 375 - 376 - /* 377 - * DQSI DLL CBR Setting 378 - */ 379 - #define CBR_SIZE0 ((1 << 10) - 1) 380 - #define CBR_SIZE1 ((4 << 10) - 1) 381 - #define CBR_SIZE2 ((64 << 10) - 1) 382 - #define CBR_PASSNUM 5 383 - #define CBR_PASSNUM2 5 384 - #define CBR_THRESHOLD 10 385 - #define CBR_THRESHOLD2 10 386 145 #define TIMEOUT 5000000 387 - #define CBR_PATNUM 8 388 146 389 - static const u32 pattern[8] = { 390 - 0xFF00FF00, 391 - 0xCC33CC33, 392 - 0xAA55AA55, 393 - 0x88778877, 394 - 0x92CC4D6E, 395 - 0x543D3CDE, 396 - 0xF1E843C7, 397 - 0x7C61D253 398 - }; 399 - 400 - static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl) 147 + bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl) 401 148 { 402 149 u32 data, timeout; 403 150 ··· 123 458 return true; 124 459 } 125 460 126 - static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl) 127 - { 128 - u32 data, timeout; 129 - 130 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 131 - ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); 132 - timeout = 0; 133 - do { 134 - data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; 135 - if (++timeout > TIMEOUT) { 136 - ast_moutdwm(ast, 0x1e6e0070, 0x0); 137 - return 0xffffffff; 138 - } 139 - } while (!data); 140 - data = ast_mindwm(ast, 0x1e6e0078); 141 - data = (data | (data >> 16)) & 0xffff; 142 - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); 143 - return data; 144 - } 145 - 146 - 147 - static bool mmc_test_burst(struct ast_device *ast, u32 datagen) 461 + bool mmc_test_burst(struct ast_device *ast, u32 datagen) 148 462 { 149 463 return mmc_test(ast, datagen, 0xc1); 150 - } 151 - 152 - static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen) 153 - { 154 - return mmc_test2(ast, datagen, 0x41); 155 - } 156 - 157 - static bool mmc_test_single(struct ast_device *ast, u32 datagen) 158 - { 159 - return mmc_test(ast, datagen, 0xc5); 160 - } 161 - 162 - static u32 mmc_test_single2(struct ast_device *ast, u32 datagen) 163 - { 164 - return mmc_test2(ast, datagen, 0x05); 165 - } 166 - 167 - static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen) 168 - { 169 - return mmc_test(ast, datagen, 0x85); 170 - } 171 - 172 - static int cbr_test(struct ast_device *ast) 173 - { 174 - u32 data; 175 - int i; 176 - data = mmc_test_single2(ast, 0); 177 - if ((data & 0xff) && (data & 0xff00)) 178 - return 0; 179 - for (i = 0; i < 8; i++) { 180 - data = mmc_test_burst2(ast, i); 181 - if ((data & 0xff) && (data & 0xff00)) 182 - return 0; 183 - } 184 - if (!data) 185 - return 3; 186 - else if (data & 0xff) 187 - return 2; 188 - return 1; 189 - } 190 - 191 - static int cbr_scan(struct ast_device *ast) 192 - { 193 - u32 data, data2, patcnt, loop; 194 - 195 - data2 = 3; 196 - for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 197 - ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 198 - for (loop = 0; loop < CBR_PASSNUM2; loop++) { 199 - if ((data = cbr_test(ast)) != 0) { 200 - data2 &= data; 201 - if (!data2) 202 - return 0; 203 - break; 204 - } 205 - } 206 - if (loop == CBR_PASSNUM2) 207 - return 0; 208 - } 209 - return data2; 210 - } 211 - 212 - static u32 cbr_test2(struct ast_device *ast) 213 - { 214 - u32 data; 215 - 216 - data = mmc_test_burst2(ast, 0); 217 - if (data == 0xffff) 218 - return 0; 219 - data |= mmc_test_single2(ast, 0); 220 - if (data == 0xffff) 221 - return 0; 222 - 223 - return ~data & 0xffff; 224 - } 225 - 226 - static u32 cbr_scan2(struct ast_device *ast) 227 - { 228 - u32 data, data2, patcnt, loop; 229 - 230 - data2 = 0xffff; 231 - for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 232 - ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 233 - for (loop = 0; loop < CBR_PASSNUM2; loop++) { 234 - if ((data = cbr_test2(ast)) != 0) { 235 - data2 &= data; 236 - if (!data2) 237 - return 0; 238 - break; 239 - } 240 - } 241 - if (loop == CBR_PASSNUM2) 242 - return 0; 243 - } 244 - return data2; 245 - } 246 - 247 - static bool cbr_test3(struct ast_device *ast) 248 - { 249 - if (!mmc_test_burst(ast, 0)) 250 - return false; 251 - if (!mmc_test_single(ast, 0)) 252 - return false; 253 - return true; 254 - } 255 - 256 - static bool cbr_scan3(struct ast_device *ast) 257 - { 258 - u32 patcnt, loop; 259 - 260 - for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { 261 - ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); 262 - for (loop = 0; loop < 2; loop++) { 263 - if (cbr_test3(ast)) 264 - break; 265 - } 266 - if (loop == 2) 267 - return false; 268 - } 269 - return true; 270 - } 271 - 272 - static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param) 273 - { 274 - u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0; 275 - bool status = false; 276 - FINETUNE_START: 277 - for (cnt = 0; cnt < 16; cnt++) { 278 - dllmin[cnt] = 0xff; 279 - dllmax[cnt] = 0x0; 280 - } 281 - passcnt = 0; 282 - for (dlli = 0; dlli < 76; dlli++) { 283 - ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); 284 - ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); 285 - data = cbr_scan2(ast); 286 - if (data != 0) { 287 - mask = 0x00010001; 288 - for (cnt = 0; cnt < 16; cnt++) { 289 - if (data & mask) { 290 - if (dllmin[cnt] > dlli) { 291 - dllmin[cnt] = dlli; 292 - } 293 - if (dllmax[cnt] < dlli) { 294 - dllmax[cnt] = dlli; 295 - } 296 - } 297 - mask <<= 1; 298 - } 299 - passcnt++; 300 - } else if (passcnt >= CBR_THRESHOLD2) { 301 - break; 302 - } 303 - } 304 - gold_sadj[0] = 0x0; 305 - passcnt = 0; 306 - for (cnt = 0; cnt < 16; cnt++) { 307 - if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 308 - gold_sadj[0] += dllmin[cnt]; 309 - passcnt++; 310 - } 311 - } 312 - if (retry++ > 10) 313 - goto FINETUNE_DONE; 314 - if (passcnt != 16) { 315 - goto FINETUNE_START; 316 - } 317 - status = true; 318 - FINETUNE_DONE: 319 - gold_sadj[0] = gold_sadj[0] >> 4; 320 - gold_sadj[1] = gold_sadj[0]; 321 - 322 - data = 0; 323 - for (cnt = 0; cnt < 8; cnt++) { 324 - data >>= 3; 325 - if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 326 - dlli = dllmin[cnt]; 327 - if (gold_sadj[0] >= dlli) { 328 - dlli = ((gold_sadj[0] - dlli) * 19) >> 5; 329 - if (dlli > 3) { 330 - dlli = 3; 331 - } 332 - } else { 333 - dlli = ((dlli - gold_sadj[0]) * 19) >> 5; 334 - if (dlli > 4) { 335 - dlli = 4; 336 - } 337 - dlli = (8 - dlli) & 0x7; 338 - } 339 - data |= dlli << 21; 340 - } 341 - } 342 - ast_moutdwm(ast, 0x1E6E0080, data); 343 - 344 - data = 0; 345 - for (cnt = 8; cnt < 16; cnt++) { 346 - data >>= 3; 347 - if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { 348 - dlli = dllmin[cnt]; 349 - if (gold_sadj[1] >= dlli) { 350 - dlli = ((gold_sadj[1] - dlli) * 19) >> 5; 351 - if (dlli > 3) { 352 - dlli = 3; 353 - } else { 354 - dlli = (dlli - 1) & 0x7; 355 - } 356 - } else { 357 - dlli = ((dlli - gold_sadj[1]) * 19) >> 5; 358 - dlli += 1; 359 - if (dlli > 4) { 360 - dlli = 4; 361 - } 362 - dlli = (8 - dlli) & 0x7; 363 - } 364 - data |= dlli << 21; 365 - } 366 - } 367 - ast_moutdwm(ast, 0x1E6E0084, data); 368 - return status; 369 - } /* finetuneDQI_L */ 370 - 371 - static void finetuneDQSI(struct ast_device *ast) 372 - { 373 - u32 dlli, dqsip, dqidly; 374 - u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; 375 - u32 g_dqidly, g_dqsip, g_margin, g_side; 376 - u16 pass[32][2][2]; 377 - char tag[2][76]; 378 - 379 - /* Disable DQI CBR */ 380 - reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); 381 - reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); 382 - reg_mcr18 &= 0x0000ffff; 383 - ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); 384 - 385 - for (dlli = 0; dlli < 76; dlli++) { 386 - tag[0][dlli] = 0x0; 387 - tag[1][dlli] = 0x0; 388 - } 389 - for (dqidly = 0; dqidly < 32; dqidly++) { 390 - pass[dqidly][0][0] = 0xff; 391 - pass[dqidly][0][1] = 0x0; 392 - pass[dqidly][1][0] = 0xff; 393 - pass[dqidly][1][1] = 0x0; 394 - } 395 - for (dqidly = 0; dqidly < 32; dqidly++) { 396 - passcnt[0] = passcnt[1] = 0; 397 - for (dqsip = 0; dqsip < 2; dqsip++) { 398 - ast_moutdwm(ast, 0x1E6E000C, 0); 399 - ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23)); 400 - ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); 401 - for (dlli = 0; dlli < 76; dlli++) { 402 - ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); 403 - ast_moutdwm(ast, 0x1E6E0070, 0); 404 - ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); 405 - if (cbr_scan3(ast)) { 406 - if (dlli == 0) 407 - break; 408 - passcnt[dqsip]++; 409 - tag[dqsip][dlli] = 'P'; 410 - if (dlli < pass[dqidly][dqsip][0]) 411 - pass[dqidly][dqsip][0] = (u16) dlli; 412 - if (dlli > pass[dqidly][dqsip][1]) 413 - pass[dqidly][dqsip][1] = (u16) dlli; 414 - } else if (passcnt[dqsip] >= 5) 415 - break; 416 - else { 417 - pass[dqidly][dqsip][0] = 0xff; 418 - pass[dqidly][dqsip][1] = 0x0; 419 - } 420 - } 421 - } 422 - if (passcnt[0] == 0 && passcnt[1] == 0) 423 - dqidly++; 424 - } 425 - /* Search margin */ 426 - g_dqidly = g_dqsip = g_margin = g_side = 0; 427 - 428 - for (dqidly = 0; dqidly < 32; dqidly++) { 429 - for (dqsip = 0; dqsip < 2; dqsip++) { 430 - if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) 431 - continue; 432 - diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; 433 - if ((diff+2) < g_margin) 434 - continue; 435 - passcnt[0] = passcnt[1] = 0; 436 - for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++); 437 - for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++); 438 - if (passcnt[0] > passcnt[1]) 439 - passcnt[0] = passcnt[1]; 440 - passcnt[1] = 0; 441 - if (passcnt[0] > g_side) 442 - passcnt[1] = passcnt[0] - g_side; 443 - if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) { 444 - g_margin = diff; 445 - g_dqidly = dqidly; 446 - g_dqsip = dqsip; 447 - g_side = passcnt[0]; 448 - } else if (passcnt[1] > 1 && g_side < 8) { 449 - if (diff > g_margin) 450 - g_margin = diff; 451 - g_dqidly = dqidly; 452 - g_dqsip = dqsip; 453 - g_side = passcnt[0]; 454 - } 455 - } 456 - } 457 - reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); 458 - ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); 459 - 460 - } 461 - static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param) 462 - { 463 - u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; 464 - bool status = false; 465 - 466 - finetuneDQSI(ast); 467 - if (finetuneDQI_L(ast, param) == false) 468 - return status; 469 - 470 - CBR_START2: 471 - dllmin[0] = dllmin[1] = 0xff; 472 - dllmax[0] = dllmax[1] = 0x0; 473 - passcnt = 0; 474 - for (dlli = 0; dlli < 76; dlli++) { 475 - ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); 476 - ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); 477 - data = cbr_scan(ast); 478 - if (data != 0) { 479 - if (data & 0x1) { 480 - if (dllmin[0] > dlli) { 481 - dllmin[0] = dlli; 482 - } 483 - if (dllmax[0] < dlli) { 484 - dllmax[0] = dlli; 485 - } 486 - } 487 - if (data & 0x2) { 488 - if (dllmin[1] > dlli) { 489 - dllmin[1] = dlli; 490 - } 491 - if (dllmax[1] < dlli) { 492 - dllmax[1] = dlli; 493 - } 494 - } 495 - passcnt++; 496 - } else if (passcnt >= CBR_THRESHOLD) { 497 - break; 498 - } 499 - } 500 - if (retry++ > 10) 501 - goto CBR_DONE2; 502 - if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) { 503 - goto CBR_START2; 504 - } 505 - if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) { 506 - goto CBR_START2; 507 - } 508 - status = true; 509 - CBR_DONE2: 510 - dlli = (dllmin[1] + dllmax[1]) >> 1; 511 - dlli <<= 8; 512 - dlli += (dllmin[0] + dllmax[0]) >> 1; 513 - ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16)); 514 - return status; 515 - } /* CBRDLL2 */ 516 - 517 - static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param) 518 - { 519 - u32 trap, trap_AC2, trap_MRS; 520 - 521 - ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 522 - 523 - /* Ger trap info */ 524 - trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; 525 - trap_AC2 = 0x00020000 + (trap << 16); 526 - trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); 527 - trap_MRS = 0x00000010 + (trap << 4); 528 - trap_MRS |= ((trap & 0x2) << 18); 529 - 530 - param->reg_MADJ = 0x00034C4C; 531 - param->reg_SADJ = 0x00001800; 532 - param->reg_DRV = 0x000000F0; 533 - param->reg_PERIOD = param->dram_freq; 534 - param->rodt = 0; 535 - 536 - switch (param->dram_freq) { 537 - case 336: 538 - ast_moutdwm(ast, 0x1E6E2020, 0x0190); 539 - param->wodt = 0; 540 - param->reg_AC1 = 0x22202725; 541 - param->reg_AC2 = 0xAA007613 | trap_AC2; 542 - param->reg_DQSIC = 0x000000BA; 543 - param->reg_MRS = 0x04001400 | trap_MRS; 544 - param->reg_EMRS = 0x00000000; 545 - param->reg_IOZ = 0x00000023; 546 - param->reg_DQIDLY = 0x00000074; 547 - param->reg_FREQ = 0x00004DC0; 548 - param->madj_max = 96; 549 - param->dll2_finetune_step = 3; 550 - switch (param->dram_chipid) { 551 - default: 552 - case AST_DRAM_512Mx16: 553 - case AST_DRAM_1Gx16: 554 - param->reg_AC2 = 0xAA007613 | trap_AC2; 555 - break; 556 - case AST_DRAM_2Gx16: 557 - param->reg_AC2 = 0xAA00761C | trap_AC2; 558 - break; 559 - case AST_DRAM_4Gx16: 560 - param->reg_AC2 = 0xAA007636 | trap_AC2; 561 - break; 562 - } 563 - break; 564 - default: 565 - case 396: 566 - ast_moutdwm(ast, 0x1E6E2020, 0x03F1); 567 - param->wodt = 1; 568 - param->reg_AC1 = 0x33302825; 569 - param->reg_AC2 = 0xCC009617 | trap_AC2; 570 - param->reg_DQSIC = 0x000000E2; 571 - param->reg_MRS = 0x04001600 | trap_MRS; 572 - param->reg_EMRS = 0x00000000; 573 - param->reg_IOZ = 0x00000034; 574 - param->reg_DRV = 0x000000FA; 575 - param->reg_DQIDLY = 0x00000089; 576 - param->reg_FREQ = 0x00005040; 577 - param->madj_max = 96; 578 - param->dll2_finetune_step = 4; 579 - 580 - switch (param->dram_chipid) { 581 - default: 582 - case AST_DRAM_512Mx16: 583 - case AST_DRAM_1Gx16: 584 - param->reg_AC2 = 0xCC009617 | trap_AC2; 585 - break; 586 - case AST_DRAM_2Gx16: 587 - param->reg_AC2 = 0xCC009622 | trap_AC2; 588 - break; 589 - case AST_DRAM_4Gx16: 590 - param->reg_AC2 = 0xCC00963F | trap_AC2; 591 - break; 592 - } 593 - break; 594 - 595 - case 408: 596 - ast_moutdwm(ast, 0x1E6E2020, 0x01F0); 597 - param->wodt = 1; 598 - param->reg_AC1 = 0x33302825; 599 - param->reg_AC2 = 0xCC009617 | trap_AC2; 600 - param->reg_DQSIC = 0x000000E2; 601 - param->reg_MRS = 0x04001600 | trap_MRS; 602 - param->reg_EMRS = 0x00000000; 603 - param->reg_IOZ = 0x00000023; 604 - param->reg_DRV = 0x000000FA; 605 - param->reg_DQIDLY = 0x00000089; 606 - param->reg_FREQ = 0x000050C0; 607 - param->madj_max = 96; 608 - param->dll2_finetune_step = 4; 609 - 610 - switch (param->dram_chipid) { 611 - default: 612 - case AST_DRAM_512Mx16: 613 - case AST_DRAM_1Gx16: 614 - param->reg_AC2 = 0xCC009617 | trap_AC2; 615 - break; 616 - case AST_DRAM_2Gx16: 617 - param->reg_AC2 = 0xCC009622 | trap_AC2; 618 - break; 619 - case AST_DRAM_4Gx16: 620 - param->reg_AC2 = 0xCC00963F | trap_AC2; 621 - break; 622 - } 623 - 624 - break; 625 - case 456: 626 - ast_moutdwm(ast, 0x1E6E2020, 0x0230); 627 - param->wodt = 0; 628 - param->reg_AC1 = 0x33302926; 629 - param->reg_AC2 = 0xCD44961A; 630 - param->reg_DQSIC = 0x000000FC; 631 - param->reg_MRS = 0x00081830; 632 - param->reg_EMRS = 0x00000000; 633 - param->reg_IOZ = 0x00000045; 634 - param->reg_DQIDLY = 0x00000097; 635 - param->reg_FREQ = 0x000052C0; 636 - param->madj_max = 88; 637 - param->dll2_finetune_step = 4; 638 - break; 639 - case 504: 640 - ast_moutdwm(ast, 0x1E6E2020, 0x0270); 641 - param->wodt = 1; 642 - param->reg_AC1 = 0x33302926; 643 - param->reg_AC2 = 0xDE44A61D; 644 - param->reg_DQSIC = 0x00000117; 645 - param->reg_MRS = 0x00081A30; 646 - param->reg_EMRS = 0x00000000; 647 - param->reg_IOZ = 0x070000BB; 648 - param->reg_DQIDLY = 0x000000A0; 649 - param->reg_FREQ = 0x000054C0; 650 - param->madj_max = 79; 651 - param->dll2_finetune_step = 4; 652 - break; 653 - case 528: 654 - ast_moutdwm(ast, 0x1E6E2020, 0x0290); 655 - param->wodt = 1; 656 - param->rodt = 1; 657 - param->reg_AC1 = 0x33302926; 658 - param->reg_AC2 = 0xEF44B61E; 659 - param->reg_DQSIC = 0x00000125; 660 - param->reg_MRS = 0x00081A30; 661 - param->reg_EMRS = 0x00000040; 662 - param->reg_DRV = 0x000000F5; 663 - param->reg_IOZ = 0x00000023; 664 - param->reg_DQIDLY = 0x00000088; 665 - param->reg_FREQ = 0x000055C0; 666 - param->madj_max = 76; 667 - param->dll2_finetune_step = 3; 668 - break; 669 - case 576: 670 - ast_moutdwm(ast, 0x1E6E2020, 0x0140); 671 - param->reg_MADJ = 0x00136868; 672 - param->reg_SADJ = 0x00004534; 673 - param->wodt = 1; 674 - param->rodt = 1; 675 - param->reg_AC1 = 0x33302A37; 676 - param->reg_AC2 = 0xEF56B61E; 677 - param->reg_DQSIC = 0x0000013F; 678 - param->reg_MRS = 0x00101A50; 679 - param->reg_EMRS = 0x00000040; 680 - param->reg_DRV = 0x000000FA; 681 - param->reg_IOZ = 0x00000023; 682 - param->reg_DQIDLY = 0x00000078; 683 - param->reg_FREQ = 0x000057C0; 684 - param->madj_max = 136; 685 - param->dll2_finetune_step = 3; 686 - break; 687 - case 600: 688 - ast_moutdwm(ast, 0x1E6E2020, 0x02E1); 689 - param->reg_MADJ = 0x00136868; 690 - param->reg_SADJ = 0x00004534; 691 - param->wodt = 1; 692 - param->rodt = 1; 693 - param->reg_AC1 = 0x32302A37; 694 - param->reg_AC2 = 0xDF56B61F; 695 - param->reg_DQSIC = 0x0000014D; 696 - param->reg_MRS = 0x00101A50; 697 - param->reg_EMRS = 0x00000004; 698 - param->reg_DRV = 0x000000F5; 699 - param->reg_IOZ = 0x00000023; 700 - param->reg_DQIDLY = 0x00000078; 701 - param->reg_FREQ = 0x000058C0; 702 - param->madj_max = 132; 703 - param->dll2_finetune_step = 3; 704 - break; 705 - case 624: 706 - ast_moutdwm(ast, 0x1E6E2020, 0x0160); 707 - param->reg_MADJ = 0x00136868; 708 - param->reg_SADJ = 0x00004534; 709 - param->wodt = 1; 710 - param->rodt = 1; 711 - param->reg_AC1 = 0x32302A37; 712 - param->reg_AC2 = 0xEF56B621; 713 - param->reg_DQSIC = 0x0000015A; 714 - param->reg_MRS = 0x02101A50; 715 - param->reg_EMRS = 0x00000004; 716 - param->reg_DRV = 0x000000F5; 717 - param->reg_IOZ = 0x00000034; 718 - param->reg_DQIDLY = 0x00000078; 719 - param->reg_FREQ = 0x000059C0; 720 - param->madj_max = 128; 721 - param->dll2_finetune_step = 3; 722 - break; 723 - } /* switch freq */ 724 - 725 - switch (param->dram_chipid) { 726 - case AST_DRAM_512Mx16: 727 - param->dram_config = 0x130; 728 - break; 729 - default: 730 - case AST_DRAM_1Gx16: 731 - param->dram_config = 0x131; 732 - break; 733 - case AST_DRAM_2Gx16: 734 - param->dram_config = 0x132; 735 - break; 736 - case AST_DRAM_4Gx16: 737 - param->dram_config = 0x133; 738 - break; 739 - } /* switch size */ 740 - 741 - switch (param->vram_size) { 742 - default: 743 - case SZ_8M: 744 - param->dram_config |= 0x00; 745 - break; 746 - case SZ_16M: 747 - param->dram_config |= 0x04; 748 - break; 749 - case SZ_32M: 750 - param->dram_config |= 0x08; 751 - break; 752 - case SZ_64M: 753 - param->dram_config |= 0x0c; 754 - break; 755 - } 756 - 757 - } 758 - 759 - static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param) 760 - { 761 - u32 data, data2, retry = 0; 762 - 763 - ddr3_init_start: 764 - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 765 - ast_moutdwm(ast, 0x1E6E0018, 0x00000100); 766 - ast_moutdwm(ast, 0x1E6E0024, 0x00000000); 767 - ast_moutdwm(ast, 0x1E6E0034, 0x00000000); 768 - udelay(10); 769 - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); 770 - ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); 771 - udelay(10); 772 - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); 773 - udelay(10); 774 - 775 - ast_moutdwm(ast, 0x1E6E0004, param->dram_config); 776 - ast_moutdwm(ast, 0x1E6E0008, 0x90040f); 777 - ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); 778 - ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); 779 - ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); 780 - ast_moutdwm(ast, 0x1E6E0080, 0x00000000); 781 - ast_moutdwm(ast, 0x1E6E0084, 0x00000000); 782 - ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); 783 - ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); 784 - ast_moutdwm(ast, 0x1E6E0018, 0x00002370); 785 - ast_moutdwm(ast, 0x1E6E0038, 0x00000000); 786 - ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); 787 - ast_moutdwm(ast, 0x1E6E0044, 0x22222222); 788 - ast_moutdwm(ast, 0x1E6E0048, 0x22222222); 789 - ast_moutdwm(ast, 0x1E6E004C, 0x00000002); 790 - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 791 - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 792 - ast_moutdwm(ast, 0x1E6E0054, 0); 793 - ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); 794 - ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); 795 - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 796 - ast_moutdwm(ast, 0x1E6E0074, 0x00000000); 797 - ast_moutdwm(ast, 0x1E6E0078, 0x00000000); 798 - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 799 - /* Wait MCLK2X lock to MCLK */ 800 - do { 801 - data = ast_mindwm(ast, 0x1E6E001C); 802 - } while (!(data & 0x08000000)); 803 - data = ast_mindwm(ast, 0x1E6E001C); 804 - data = (data >> 8) & 0xff; 805 - while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { 806 - data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; 807 - if ((data2 & 0xff) > param->madj_max) { 808 - break; 809 - } 810 - ast_moutdwm(ast, 0x1E6E0064, data2); 811 - if (data2 & 0x00100000) { 812 - data2 = ((data2 & 0xff) >> 3) + 3; 813 - } else { 814 - data2 = ((data2 & 0xff) >> 2) + 5; 815 - } 816 - data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; 817 - data2 += data & 0xff; 818 - data = data | (data2 << 8); 819 - ast_moutdwm(ast, 0x1E6E0068, data); 820 - udelay(10); 821 - ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); 822 - udelay(10); 823 - data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; 824 - ast_moutdwm(ast, 0x1E6E0018, data); 825 - data = data | 0x200; 826 - ast_moutdwm(ast, 0x1E6E0018, data); 827 - do { 828 - data = ast_mindwm(ast, 0x1E6E001C); 829 - } while (!(data & 0x08000000)); 830 - 831 - data = ast_mindwm(ast, 0x1E6E001C); 832 - data = (data >> 8) & 0xff; 833 - } 834 - ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); 835 - data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; 836 - ast_moutdwm(ast, 0x1E6E0018, data); 837 - 838 - ast_moutdwm(ast, 0x1E6E0034, 0x00000001); 839 - ast_moutdwm(ast, 0x1E6E000C, 0x00000040); 840 - udelay(50); 841 - /* Mode Register Setting */ 842 - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); 843 - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 844 - ast_moutdwm(ast, 0x1E6E0028, 0x00000005); 845 - ast_moutdwm(ast, 0x1E6E0028, 0x00000007); 846 - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 847 - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 848 - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); 849 - ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); 850 - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 851 - 852 - ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); 853 - data = 0; 854 - if (param->wodt) { 855 - data = 0x300; 856 - } 857 - if (param->rodt) { 858 - data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); 859 - } 860 - ast_moutdwm(ast, 0x1E6E0034, data | 0x3); 861 - 862 - /* Calibrate the DQSI delay */ 863 - if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) 864 - goto ddr3_init_start; 865 - 866 - ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); 867 - /* ECC Memory Initialization */ 868 - #ifdef ECC 869 - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 870 - ast_moutdwm(ast, 0x1E6E0070, 0x221); 871 - do { 872 - data = ast_mindwm(ast, 0x1E6E0070); 873 - } while (!(data & 0x00001000)); 874 - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 875 - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 876 - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 877 - #endif 878 - 879 - 880 - } 881 - 882 - static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param) 883 - { 884 - u32 trap, trap_AC2, trap_MRS; 885 - 886 - ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 887 - 888 - /* Ger trap info */ 889 - trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; 890 - trap_AC2 = (trap << 20) | (trap << 16); 891 - trap_AC2 += 0x00110000; 892 - trap_MRS = 0x00000040 | (trap << 4); 893 - 894 - 895 - param->reg_MADJ = 0x00034C4C; 896 - param->reg_SADJ = 0x00001800; 897 - param->reg_DRV = 0x000000F0; 898 - param->reg_PERIOD = param->dram_freq; 899 - param->rodt = 0; 900 - 901 - switch (param->dram_freq) { 902 - case 264: 903 - ast_moutdwm(ast, 0x1E6E2020, 0x0130); 904 - param->wodt = 0; 905 - param->reg_AC1 = 0x11101513; 906 - param->reg_AC2 = 0x78117011; 907 - param->reg_DQSIC = 0x00000092; 908 - param->reg_MRS = 0x00000842; 909 - param->reg_EMRS = 0x00000000; 910 - param->reg_DRV = 0x000000F0; 911 - param->reg_IOZ = 0x00000034; 912 - param->reg_DQIDLY = 0x0000005A; 913 - param->reg_FREQ = 0x00004AC0; 914 - param->madj_max = 138; 915 - param->dll2_finetune_step = 3; 916 - break; 917 - case 336: 918 - ast_moutdwm(ast, 0x1E6E2020, 0x0190); 919 - param->wodt = 1; 920 - param->reg_AC1 = 0x22202613; 921 - param->reg_AC2 = 0xAA009016 | trap_AC2; 922 - param->reg_DQSIC = 0x000000BA; 923 - param->reg_MRS = 0x00000A02 | trap_MRS; 924 - param->reg_EMRS = 0x00000040; 925 - param->reg_DRV = 0x000000FA; 926 - param->reg_IOZ = 0x00000034; 927 - param->reg_DQIDLY = 0x00000074; 928 - param->reg_FREQ = 0x00004DC0; 929 - param->madj_max = 96; 930 - param->dll2_finetune_step = 3; 931 - switch (param->dram_chipid) { 932 - default: 933 - case AST_DRAM_512Mx16: 934 - param->reg_AC2 = 0xAA009012 | trap_AC2; 935 - break; 936 - case AST_DRAM_1Gx16: 937 - param->reg_AC2 = 0xAA009016 | trap_AC2; 938 - break; 939 - case AST_DRAM_2Gx16: 940 - param->reg_AC2 = 0xAA009023 | trap_AC2; 941 - break; 942 - case AST_DRAM_4Gx16: 943 - param->reg_AC2 = 0xAA00903B | trap_AC2; 944 - break; 945 - } 946 - break; 947 - default: 948 - case 396: 949 - ast_moutdwm(ast, 0x1E6E2020, 0x03F1); 950 - param->wodt = 1; 951 - param->rodt = 0; 952 - param->reg_AC1 = 0x33302714; 953 - param->reg_AC2 = 0xCC00B01B | trap_AC2; 954 - param->reg_DQSIC = 0x000000E2; 955 - param->reg_MRS = 0x00000C02 | trap_MRS; 956 - param->reg_EMRS = 0x00000040; 957 - param->reg_DRV = 0x000000FA; 958 - param->reg_IOZ = 0x00000034; 959 - param->reg_DQIDLY = 0x00000089; 960 - param->reg_FREQ = 0x00005040; 961 - param->madj_max = 96; 962 - param->dll2_finetune_step = 4; 963 - 964 - switch (param->dram_chipid) { 965 - case AST_DRAM_512Mx16: 966 - param->reg_AC2 = 0xCC00B016 | trap_AC2; 967 - break; 968 - default: 969 - case AST_DRAM_1Gx16: 970 - param->reg_AC2 = 0xCC00B01B | trap_AC2; 971 - break; 972 - case AST_DRAM_2Gx16: 973 - param->reg_AC2 = 0xCC00B02B | trap_AC2; 974 - break; 975 - case AST_DRAM_4Gx16: 976 - param->reg_AC2 = 0xCC00B03F | trap_AC2; 977 - break; 978 - } 979 - 980 - break; 981 - 982 - case 408: 983 - ast_moutdwm(ast, 0x1E6E2020, 0x01F0); 984 - param->wodt = 1; 985 - param->rodt = 0; 986 - param->reg_AC1 = 0x33302714; 987 - param->reg_AC2 = 0xCC00B01B | trap_AC2; 988 - param->reg_DQSIC = 0x000000E2; 989 - param->reg_MRS = 0x00000C02 | trap_MRS; 990 - param->reg_EMRS = 0x00000040; 991 - param->reg_DRV = 0x000000FA; 992 - param->reg_IOZ = 0x00000034; 993 - param->reg_DQIDLY = 0x00000089; 994 - param->reg_FREQ = 0x000050C0; 995 - param->madj_max = 96; 996 - param->dll2_finetune_step = 4; 997 - 998 - switch (param->dram_chipid) { 999 - case AST_DRAM_512Mx16: 1000 - param->reg_AC2 = 0xCC00B016 | trap_AC2; 1001 - break; 1002 - default: 1003 - case AST_DRAM_1Gx16: 1004 - param->reg_AC2 = 0xCC00B01B | trap_AC2; 1005 - break; 1006 - case AST_DRAM_2Gx16: 1007 - param->reg_AC2 = 0xCC00B02B | trap_AC2; 1008 - break; 1009 - case AST_DRAM_4Gx16: 1010 - param->reg_AC2 = 0xCC00B03F | trap_AC2; 1011 - break; 1012 - } 1013 - 1014 - break; 1015 - case 456: 1016 - ast_moutdwm(ast, 0x1E6E2020, 0x0230); 1017 - param->wodt = 0; 1018 - param->reg_AC1 = 0x33302815; 1019 - param->reg_AC2 = 0xCD44B01E; 1020 - param->reg_DQSIC = 0x000000FC; 1021 - param->reg_MRS = 0x00000E72; 1022 - param->reg_EMRS = 0x00000000; 1023 - param->reg_DRV = 0x00000000; 1024 - param->reg_IOZ = 0x00000034; 1025 - param->reg_DQIDLY = 0x00000097; 1026 - param->reg_FREQ = 0x000052C0; 1027 - param->madj_max = 88; 1028 - param->dll2_finetune_step = 3; 1029 - break; 1030 - case 504: 1031 - ast_moutdwm(ast, 0x1E6E2020, 0x0261); 1032 - param->wodt = 1; 1033 - param->rodt = 1; 1034 - param->reg_AC1 = 0x33302815; 1035 - param->reg_AC2 = 0xDE44C022; 1036 - param->reg_DQSIC = 0x00000117; 1037 - param->reg_MRS = 0x00000E72; 1038 - param->reg_EMRS = 0x00000040; 1039 - param->reg_DRV = 0x0000000A; 1040 - param->reg_IOZ = 0x00000045; 1041 - param->reg_DQIDLY = 0x000000A0; 1042 - param->reg_FREQ = 0x000054C0; 1043 - param->madj_max = 79; 1044 - param->dll2_finetune_step = 3; 1045 - break; 1046 - case 528: 1047 - ast_moutdwm(ast, 0x1E6E2020, 0x0120); 1048 - param->wodt = 1; 1049 - param->rodt = 1; 1050 - param->reg_AC1 = 0x33302815; 1051 - param->reg_AC2 = 0xEF44D024; 1052 - param->reg_DQSIC = 0x00000125; 1053 - param->reg_MRS = 0x00000E72; 1054 - param->reg_EMRS = 0x00000004; 1055 - param->reg_DRV = 0x000000F9; 1056 - param->reg_IOZ = 0x00000045; 1057 - param->reg_DQIDLY = 0x000000A7; 1058 - param->reg_FREQ = 0x000055C0; 1059 - param->madj_max = 76; 1060 - param->dll2_finetune_step = 3; 1061 - break; 1062 - case 552: 1063 - ast_moutdwm(ast, 0x1E6E2020, 0x02A1); 1064 - param->wodt = 1; 1065 - param->rodt = 1; 1066 - param->reg_AC1 = 0x43402915; 1067 - param->reg_AC2 = 0xFF44E025; 1068 - param->reg_DQSIC = 0x00000132; 1069 - param->reg_MRS = 0x00000E72; 1070 - param->reg_EMRS = 0x00000040; 1071 - param->reg_DRV = 0x0000000A; 1072 - param->reg_IOZ = 0x00000045; 1073 - param->reg_DQIDLY = 0x000000AD; 1074 - param->reg_FREQ = 0x000056C0; 1075 - param->madj_max = 76; 1076 - param->dll2_finetune_step = 3; 1077 - break; 1078 - case 576: 1079 - ast_moutdwm(ast, 0x1E6E2020, 0x0140); 1080 - param->wodt = 1; 1081 - param->rodt = 1; 1082 - param->reg_AC1 = 0x43402915; 1083 - param->reg_AC2 = 0xFF44E027; 1084 - param->reg_DQSIC = 0x0000013F; 1085 - param->reg_MRS = 0x00000E72; 1086 - param->reg_EMRS = 0x00000004; 1087 - param->reg_DRV = 0x000000F5; 1088 - param->reg_IOZ = 0x00000045; 1089 - param->reg_DQIDLY = 0x000000B3; 1090 - param->reg_FREQ = 0x000057C0; 1091 - param->madj_max = 76; 1092 - param->dll2_finetune_step = 3; 1093 - break; 1094 - } 1095 - 1096 - switch (param->dram_chipid) { 1097 - case AST_DRAM_512Mx16: 1098 - param->dram_config = 0x100; 1099 - break; 1100 - default: 1101 - case AST_DRAM_1Gx16: 1102 - param->dram_config = 0x121; 1103 - break; 1104 - case AST_DRAM_2Gx16: 1105 - param->dram_config = 0x122; 1106 - break; 1107 - case AST_DRAM_4Gx16: 1108 - param->dram_config = 0x123; 1109 - break; 1110 - } /* switch size */ 1111 - 1112 - switch (param->vram_size) { 1113 - default: 1114 - case SZ_8M: 1115 - param->dram_config |= 0x00; 1116 - break; 1117 - case SZ_16M: 1118 - param->dram_config |= 0x04; 1119 - break; 1120 - case SZ_32M: 1121 - param->dram_config |= 0x08; 1122 - break; 1123 - case SZ_64M: 1124 - param->dram_config |= 0x0c; 1125 - break; 1126 - } 1127 - } 1128 - 1129 - static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param) 1130 - { 1131 - u32 data, data2, retry = 0; 1132 - 1133 - ddr2_init_start: 1134 - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 1135 - ast_moutdwm(ast, 0x1E6E0018, 0x00000100); 1136 - ast_moutdwm(ast, 0x1E6E0024, 0x00000000); 1137 - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); 1138 - ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); 1139 - udelay(10); 1140 - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); 1141 - udelay(10); 1142 - 1143 - ast_moutdwm(ast, 0x1E6E0004, param->dram_config); 1144 - ast_moutdwm(ast, 0x1E6E0008, 0x90040f); 1145 - ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); 1146 - ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); 1147 - ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); 1148 - ast_moutdwm(ast, 0x1E6E0080, 0x00000000); 1149 - ast_moutdwm(ast, 0x1E6E0084, 0x00000000); 1150 - ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); 1151 - ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); 1152 - ast_moutdwm(ast, 0x1E6E0018, 0x00002330); 1153 - ast_moutdwm(ast, 0x1E6E0038, 0x00000000); 1154 - ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); 1155 - ast_moutdwm(ast, 0x1E6E0044, 0x88848466); 1156 - ast_moutdwm(ast, 0x1E6E0048, 0x44440008); 1157 - ast_moutdwm(ast, 0x1E6E004C, 0x00000000); 1158 - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 1159 - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 1160 - ast_moutdwm(ast, 0x1E6E0054, 0); 1161 - ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); 1162 - ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); 1163 - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 1164 - ast_moutdwm(ast, 0x1E6E0074, 0x00000000); 1165 - ast_moutdwm(ast, 0x1E6E0078, 0x00000000); 1166 - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 1167 - 1168 - /* Wait MCLK2X lock to MCLK */ 1169 - do { 1170 - data = ast_mindwm(ast, 0x1E6E001C); 1171 - } while (!(data & 0x08000000)); 1172 - data = ast_mindwm(ast, 0x1E6E001C); 1173 - data = (data >> 8) & 0xff; 1174 - while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { 1175 - data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; 1176 - if ((data2 & 0xff) > param->madj_max) { 1177 - break; 1178 - } 1179 - ast_moutdwm(ast, 0x1E6E0064, data2); 1180 - if (data2 & 0x00100000) { 1181 - data2 = ((data2 & 0xff) >> 3) + 3; 1182 - } else { 1183 - data2 = ((data2 & 0xff) >> 2) + 5; 1184 - } 1185 - data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; 1186 - data2 += data & 0xff; 1187 - data = data | (data2 << 8); 1188 - ast_moutdwm(ast, 0x1E6E0068, data); 1189 - udelay(10); 1190 - ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); 1191 - udelay(10); 1192 - data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; 1193 - ast_moutdwm(ast, 0x1E6E0018, data); 1194 - data = data | 0x200; 1195 - ast_moutdwm(ast, 0x1E6E0018, data); 1196 - do { 1197 - data = ast_mindwm(ast, 0x1E6E001C); 1198 - } while (!(data & 0x08000000)); 1199 - 1200 - data = ast_mindwm(ast, 0x1E6E001C); 1201 - data = (data >> 8) & 0xff; 1202 - } 1203 - ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); 1204 - data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; 1205 - ast_moutdwm(ast, 0x1E6E0018, data); 1206 - 1207 - ast_moutdwm(ast, 0x1E6E0034, 0x00000001); 1208 - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 1209 - udelay(50); 1210 - /* Mode Register Setting */ 1211 - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); 1212 - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1213 - ast_moutdwm(ast, 0x1E6E0028, 0x00000005); 1214 - ast_moutdwm(ast, 0x1E6E0028, 0x00000007); 1215 - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 1216 - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 1217 - 1218 - ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); 1219 - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); 1220 - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); 1221 - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); 1222 - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 1223 - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); 1224 - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); 1225 - 1226 - ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); 1227 - data = 0; 1228 - if (param->wodt) { 1229 - data = 0x500; 1230 - } 1231 - if (param->rodt) { 1232 - data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); 1233 - } 1234 - ast_moutdwm(ast, 0x1E6E0034, data | 0x3); 1235 - ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); 1236 - 1237 - /* Calibrate the DQSI delay */ 1238 - if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) 1239 - goto ddr2_init_start; 1240 - 1241 - /* ECC Memory Initialization */ 1242 - #ifdef ECC 1243 - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); 1244 - ast_moutdwm(ast, 0x1E6E0070, 0x221); 1245 - do { 1246 - data = ast_mindwm(ast, 0x1E6E0070); 1247 - } while (!(data & 0x00001000)); 1248 - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); 1249 - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 1250 - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); 1251 - #endif 1252 - 1253 - } 1254 - 1255 - static void ast_post_chip_2300(struct ast_device *ast) 1256 - { 1257 - struct ast2300_dram_param param; 1258 - u32 temp; 1259 - u8 reg; 1260 - 1261 - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 1262 - if ((reg & 0x80) == 0) {/* vga only */ 1263 - ast_write32(ast, 0xf004, 0x1e6e0000); 1264 - ast_write32(ast, 0xf000, 0x1); 1265 - ast_write32(ast, 0x12000, 0x1688a8a8); 1266 - do { 1267 - ; 1268 - } while (ast_read32(ast, 0x12000) != 0x1); 1269 - 1270 - ast_write32(ast, 0x10000, 0xfc600309); 1271 - do { 1272 - ; 1273 - } while (ast_read32(ast, 0x10000) != 0x1); 1274 - 1275 - /* Slow down CPU/AHB CLK in VGA only mode */ 1276 - temp = ast_read32(ast, 0x12008); 1277 - temp |= 0x73; 1278 - ast_write32(ast, 0x12008, temp); 1279 - 1280 - param.dram_freq = 396; 1281 - param.dram_type = AST_DDR3; 1282 - temp = ast_mindwm(ast, 0x1e6e2070); 1283 - if (temp & 0x01000000) 1284 - param.dram_type = AST_DDR2; 1285 - switch (temp & 0x18000000) { 1286 - case 0: 1287 - param.dram_chipid = AST_DRAM_512Mx16; 1288 - break; 1289 - default: 1290 - case 0x08000000: 1291 - param.dram_chipid = AST_DRAM_1Gx16; 1292 - break; 1293 - case 0x10000000: 1294 - param.dram_chipid = AST_DRAM_2Gx16; 1295 - break; 1296 - case 0x18000000: 1297 - param.dram_chipid = AST_DRAM_4Gx16; 1298 - break; 1299 - } 1300 - switch (temp & 0x0c) { 1301 - default: 1302 - case 0x00: 1303 - param.vram_size = SZ_8M; 1304 - break; 1305 - 1306 - case 0x04: 1307 - param.vram_size = SZ_16M; 1308 - break; 1309 - 1310 - case 0x08: 1311 - param.vram_size = SZ_32M; 1312 - break; 1313 - 1314 - case 0x0c: 1315 - param.vram_size = SZ_64M; 1316 - break; 1317 - } 1318 - 1319 - if (param.dram_type == AST_DDR3) { 1320 - get_ddr3_info(ast, &param); 1321 - ddr3_init(ast, &param); 1322 - } else { 1323 - get_ddr2_info(ast, &param); 1324 - ddr2_init(ast, &param); 1325 - } 1326 - 1327 - temp = ast_mindwm(ast, 0x1e6e2040); 1328 - ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); 1329 - } 1330 - 1331 - /* wait ready */ 1332 - do { 1333 - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 1334 - } while ((reg & 0x40) == 0); 1335 - } 1336 - 1337 - static bool cbr_test_2500(struct ast_device *ast) 1338 - { 1339 - ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); 1340 - ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); 1341 - if (!mmc_test_burst(ast, 0)) 1342 - return false; 1343 - if (!mmc_test_single_2500(ast, 0)) 1344 - return false; 1345 - return true; 1346 - } 1347 - 1348 - static bool ddr_test_2500(struct ast_device *ast) 1349 - { 1350 - ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); 1351 - ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); 1352 - if (!mmc_test_burst(ast, 0)) 1353 - return false; 1354 - if (!mmc_test_burst(ast, 1)) 1355 - return false; 1356 - if (!mmc_test_burst(ast, 2)) 1357 - return false; 1358 - if (!mmc_test_burst(ast, 3)) 1359 - return false; 1360 - if (!mmc_test_single_2500(ast, 0)) 1361 - return false; 1362 - return true; 1363 - } 1364 - 1365 - static void ddr_init_common_2500(struct ast_device *ast) 1366 - { 1367 - ast_moutdwm(ast, 0x1E6E0034, 0x00020080); 1368 - ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); 1369 - ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); 1370 - ast_moutdwm(ast, 0x1E6E0040, 0x88448844); 1371 - ast_moutdwm(ast, 0x1E6E0044, 0x24422288); 1372 - ast_moutdwm(ast, 0x1E6E0048, 0x22222222); 1373 - ast_moutdwm(ast, 0x1E6E004C, 0x22222222); 1374 - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); 1375 - ast_moutdwm(ast, 0x1E6E0208, 0x00000000); 1376 - ast_moutdwm(ast, 0x1E6E0218, 0x00000000); 1377 - ast_moutdwm(ast, 0x1E6E0220, 0x00000000); 1378 - ast_moutdwm(ast, 0x1E6E0228, 0x00000000); 1379 - ast_moutdwm(ast, 0x1E6E0230, 0x00000000); 1380 - ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); 1381 - ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); 1382 - ast_moutdwm(ast, 0x1E6E0240, 0x86000000); 1383 - ast_moutdwm(ast, 0x1E6E0244, 0x00008600); 1384 - ast_moutdwm(ast, 0x1E6E0248, 0x80000000); 1385 - ast_moutdwm(ast, 0x1E6E024C, 0x80808080); 1386 - } 1387 - 1388 - static void ddr_phy_init_2500(struct ast_device *ast) 1389 - { 1390 - u32 data, pass, timecnt; 1391 - 1392 - pass = 0; 1393 - ast_moutdwm(ast, 0x1E6E0060, 0x00000005); 1394 - while (!pass) { 1395 - for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { 1396 - data = ast_mindwm(ast, 0x1E6E0060) & 0x1; 1397 - if (!data) 1398 - break; 1399 - } 1400 - if (timecnt != TIMEOUT) { 1401 - data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; 1402 - if (!data) 1403 - pass = 1; 1404 - } 1405 - if (!pass) { 1406 - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 1407 - udelay(10); /* delay 10 us */ 1408 - ast_moutdwm(ast, 0x1E6E0060, 0x00000005); 1409 - } 1410 - } 1411 - 1412 - ast_moutdwm(ast, 0x1E6E0060, 0x00000006); 1413 - } 1414 - 1415 - /* 1416 - * Check DRAM Size 1417 - * 1Gb : 0x80000000 ~ 0x87FFFFFF 1418 - * 2Gb : 0x80000000 ~ 0x8FFFFFFF 1419 - * 4Gb : 0x80000000 ~ 0x9FFFFFFF 1420 - * 8Gb : 0x80000000 ~ 0xBFFFFFFF 1421 - */ 1422 - static void check_dram_size_2500(struct ast_device *ast, u32 tRFC) 1423 - { 1424 - u32 reg_04, reg_14; 1425 - 1426 - reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; 1427 - reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; 1428 - 1429 - ast_moutdwm(ast, 0xA0100000, 0x41424344); 1430 - ast_moutdwm(ast, 0x90100000, 0x35363738); 1431 - ast_moutdwm(ast, 0x88100000, 0x292A2B2C); 1432 - ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); 1433 - 1434 - /* Check 8Gbit */ 1435 - if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { 1436 - reg_04 |= 0x03; 1437 - reg_14 |= (tRFC >> 24) & 0xFF; 1438 - /* Check 4Gbit */ 1439 - } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { 1440 - reg_04 |= 0x02; 1441 - reg_14 |= (tRFC >> 16) & 0xFF; 1442 - /* Check 2Gbit */ 1443 - } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { 1444 - reg_04 |= 0x01; 1445 - reg_14 |= (tRFC >> 8) & 0xFF; 1446 - } else { 1447 - reg_14 |= tRFC & 0xFF; 1448 - } 1449 - ast_moutdwm(ast, 0x1E6E0004, reg_04); 1450 - ast_moutdwm(ast, 0x1E6E0014, reg_14); 1451 - } 1452 - 1453 - static void enable_cache_2500(struct ast_device *ast) 1454 - { 1455 - u32 reg_04, data; 1456 - 1457 - reg_04 = ast_mindwm(ast, 0x1E6E0004); 1458 - ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); 1459 - 1460 - do 1461 - data = ast_mindwm(ast, 0x1E6E0004); 1462 - while (!(data & 0x80000)); 1463 - ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); 1464 - } 1465 - 1466 - static void set_mpll_2500(struct ast_device *ast) 1467 - { 1468 - u32 addr, data, param; 1469 - 1470 - /* Reset MMC */ 1471 - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 1472 - ast_moutdwm(ast, 0x1E6E0034, 0x00020080); 1473 - for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { 1474 - ast_moutdwm(ast, addr, 0x0); 1475 - addr += 4; 1476 - } 1477 - ast_moutdwm(ast, 0x1E6E0034, 0x00020000); 1478 - 1479 - ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); 1480 - data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; 1481 - if (data) { 1482 - /* CLKIN = 25MHz */ 1483 - param = 0x930023E0; 1484 - ast_moutdwm(ast, 0x1E6E2160, 0x00011320); 1485 - } else { 1486 - /* CLKIN = 24MHz */ 1487 - param = 0x93002400; 1488 - } 1489 - ast_moutdwm(ast, 0x1E6E2020, param); 1490 - udelay(100); 1491 - } 1492 - 1493 - static void reset_mmc_2500(struct ast_device *ast) 1494 - { 1495 - ast_moutdwm(ast, 0x1E78505C, 0x00000004); 1496 - ast_moutdwm(ast, 0x1E785044, 0x00000001); 1497 - ast_moutdwm(ast, 0x1E785048, 0x00004755); 1498 - ast_moutdwm(ast, 0x1E78504C, 0x00000013); 1499 - mdelay(100); 1500 - ast_moutdwm(ast, 0x1E785054, 0x00000077); 1501 - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); 1502 - } 1503 - 1504 - static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table) 1505 - { 1506 - 1507 - ast_moutdwm(ast, 0x1E6E0004, 0x00000303); 1508 - ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); 1509 - ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); 1510 - ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); 1511 - ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ 1512 - ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ 1513 - ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ 1514 - ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ 1515 - 1516 - /* DDR PHY Setting */ 1517 - ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); 1518 - ast_moutdwm(ast, 0x1E6E0204, 0x00001001); 1519 - ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); 1520 - ast_moutdwm(ast, 0x1E6E0210, 0x20000000); 1521 - ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); 1522 - ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); 1523 - ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); 1524 - ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); 1525 - ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); 1526 - ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); 1527 - ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); 1528 - ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); 1529 - ast_moutdwm(ast, 0x1E6E0290, 0x00100008); 1530 - ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); 1531 - 1532 - /* Controller Setting */ 1533 - ast_moutdwm(ast, 0x1E6E0034, 0x00020091); 1534 - 1535 - /* Wait DDR PHY init done */ 1536 - ddr_phy_init_2500(ast); 1537 - 1538 - ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); 1539 - ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); 1540 - ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); 1541 - 1542 - check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); 1543 - enable_cache_2500(ast); 1544 - ast_moutdwm(ast, 0x1E6E001C, 0x00000008); 1545 - ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); 1546 - } 1547 - 1548 - static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table) 1549 - { 1550 - u32 data, data2, pass, retrycnt; 1551 - u32 ddr_vref, phy_vref; 1552 - u32 min_ddr_vref = 0, min_phy_vref = 0; 1553 - u32 max_ddr_vref = 0, max_phy_vref = 0; 1554 - 1555 - ast_moutdwm(ast, 0x1E6E0004, 0x00000313); 1556 - ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); 1557 - ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); 1558 - ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); 1559 - ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ 1560 - ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ 1561 - ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ 1562 - ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ 1563 - 1564 - /* DDR PHY Setting */ 1565 - ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); 1566 - ast_moutdwm(ast, 0x1E6E0204, 0x09002000); 1567 - ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); 1568 - ast_moutdwm(ast, 0x1E6E0210, 0x20000000); 1569 - ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); 1570 - ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); 1571 - ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); 1572 - ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); 1573 - ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); 1574 - ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); 1575 - ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); 1576 - ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); 1577 - ast_moutdwm(ast, 0x1E6E0290, 0x00100008); 1578 - ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); 1579 - ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); 1580 - 1581 - /* Controller Setting */ 1582 - ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); 1583 - 1584 - /* Train PHY Vref first */ 1585 - pass = 0; 1586 - 1587 - for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { 1588 - max_phy_vref = 0x0; 1589 - pass = 0; 1590 - ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); 1591 - for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { 1592 - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 1593 - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 1594 - ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8)); 1595 - /* Fire DFI Init */ 1596 - ddr_phy_init_2500(ast); 1597 - ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); 1598 - if (cbr_test_2500(ast)) { 1599 - pass++; 1600 - data = ast_mindwm(ast, 0x1E6E03D0); 1601 - data2 = data >> 8; 1602 - data = data & 0xff; 1603 - if (data > data2) 1604 - data = data2; 1605 - if (max_phy_vref < data) { 1606 - max_phy_vref = data; 1607 - min_phy_vref = phy_vref; 1608 - } 1609 - } else if (pass > 0) 1610 - break; 1611 - } 1612 - } 1613 - ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); 1614 - 1615 - /* Train DDR Vref next */ 1616 - pass = 0; 1617 - 1618 - for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { 1619 - min_ddr_vref = 0xFF; 1620 - max_ddr_vref = 0x0; 1621 - pass = 0; 1622 - for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { 1623 - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 1624 - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 1625 - ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); 1626 - /* Fire DFI Init */ 1627 - ddr_phy_init_2500(ast); 1628 - ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); 1629 - if (cbr_test_2500(ast)) { 1630 - pass++; 1631 - if (min_ddr_vref > ddr_vref) 1632 - min_ddr_vref = ddr_vref; 1633 - if (max_ddr_vref < ddr_vref) 1634 - max_ddr_vref = ddr_vref; 1635 - } else if (pass != 0) 1636 - break; 1637 - } 1638 - } 1639 - 1640 - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); 1641 - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); 1642 - ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; 1643 - ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); 1644 - 1645 - /* Wait DDR PHY init done */ 1646 - ddr_phy_init_2500(ast); 1647 - 1648 - ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); 1649 - ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); 1650 - ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); 1651 - 1652 - check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); 1653 - enable_cache_2500(ast); 1654 - ast_moutdwm(ast, 0x1E6E001C, 0x00000008); 1655 - ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); 1656 - } 1657 - 1658 - static bool ast_dram_init_2500(struct ast_device *ast) 1659 - { 1660 - u32 data; 1661 - u32 max_tries = 5; 1662 - 1663 - do { 1664 - if (max_tries-- == 0) 1665 - return false; 1666 - set_mpll_2500(ast); 1667 - reset_mmc_2500(ast); 1668 - ddr_init_common_2500(ast); 1669 - 1670 - data = ast_mindwm(ast, 0x1E6E2070); 1671 - if (data & 0x01000000) 1672 - ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); 1673 - else 1674 - ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); 1675 - } while (!ddr_test_2500(ast)); 1676 - 1677 - ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); 1678 - 1679 - /* Patch code */ 1680 - data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; 1681 - ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); 1682 - 1683 - return true; 1684 - } 1685 - 1686 - void ast_patch_ahb_2500(void __iomem *regs) 1687 - { 1688 - u32 data; 1689 - 1690 - /* Clear bus lock condition */ 1691 - __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); 1692 - __ast_moutdwm(regs, 0x1e600084, 0x00010000); 1693 - __ast_moutdwm(regs, 0x1e600088, 0x00000000); 1694 - __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); 1695 - 1696 - data = __ast_mindwm(regs, 0x1e6e2070); 1697 - if (data & 0x08000000) { /* check fast reset */ 1698 - /* 1699 - * If "Fast restet" is enabled for ARM-ICE debugger, 1700 - * then WDT needs to enable, that 1701 - * WDT04 is WDT#1 Reload reg. 1702 - * WDT08 is WDT#1 counter restart reg to avoid system deadlock 1703 - * WDT0C is WDT#1 control reg 1704 - * [6:5]:= 01:Full chip 1705 - * [4]:= 1:1MHz clock source 1706 - * [1]:= 1:WDT will be cleeared and disabled after timeout occurs 1707 - * [0]:= 1:WDT enable 1708 - */ 1709 - __ast_moutdwm(regs, 0x1E785004, 0x00000010); 1710 - __ast_moutdwm(regs, 0x1E785008, 0x00004755); 1711 - __ast_moutdwm(regs, 0x1E78500c, 0x00000033); 1712 - udelay(1000); 1713 - } 1714 - 1715 - do { 1716 - __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); 1717 - data = __ast_mindwm(regs, 0x1e6e2000); 1718 - } while (data != 1); 1719 - 1720 - __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ 1721 - } 1722 - 1723 - void ast_post_chip_2500(struct ast_device *ast) 1724 - { 1725 - struct drm_device *dev = &ast->base; 1726 - u32 temp; 1727 - u8 reg; 1728 - 1729 - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 1730 - if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ 1731 - /* Clear bus lock condition */ 1732 - ast_patch_ahb_2500(ast->regs); 1733 - 1734 - /* Disable watchdog */ 1735 - ast_moutdwm(ast, 0x1E78502C, 0x00000000); 1736 - ast_moutdwm(ast, 0x1E78504C, 0x00000000); 1737 - 1738 - /* 1739 - * Reset USB port to patch USB unknown device issue 1740 - * SCU90 is Multi-function Pin Control #5 1741 - * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub 1742 - * port). 1743 - * SCU94 is Multi-function Pin Control #6 1744 - * [14:13]:= 1x:USB2.0 Host2 controller 1745 - * SCU70 is Hardware Strap reg 1746 - * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by 1747 - * [18]: 0(24)/1(48) MHz) 1748 - * SCU7C is Write clear reg to SCU70 1749 - * [23]:= write 1 and then SCU70[23] will be clear as 0b. 1750 - */ 1751 - ast_moutdwm(ast, 0x1E6E2090, 0x20000000); 1752 - ast_moutdwm(ast, 0x1E6E2094, 0x00004000); 1753 - if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { 1754 - ast_moutdwm(ast, 0x1E6E207C, 0x00800000); 1755 - mdelay(100); 1756 - ast_moutdwm(ast, 0x1E6E2070, 0x00800000); 1757 - } 1758 - /* Modify eSPI reset pin */ 1759 - temp = ast_mindwm(ast, 0x1E6E2070); 1760 - if (temp & 0x02000000) 1761 - ast_moutdwm(ast, 0x1E6E207C, 0x00004000); 1762 - 1763 - /* Slow down CPU/AHB CLK in VGA only mode */ 1764 - temp = ast_read32(ast, 0x12008); 1765 - temp |= 0x73; 1766 - ast_write32(ast, 0x12008, temp); 1767 - 1768 - if (!ast_dram_init_2500(ast)) 1769 - drm_err(dev, "DRAM init failed !\n"); 1770 - 1771 - temp = ast_mindwm(ast, 0x1e6e2040); 1772 - ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); 1773 - } 1774 - 1775 - /* wait ready */ 1776 - do { 1777 - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); 1778 - } while ((reg & 0x40) == 0); 1779 464 }
+50
drivers/gpu/drm/ast/ast_post.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + 3 + #ifndef AST_POST_H 4 + #define AST_POST_H 5 + 6 + #include <linux/limits.h> 7 + #include <linux/types.h> 8 + 9 + struct ast_device; 10 + 11 + /* DRAM timing tables */ 12 + struct ast_dramstruct { 13 + u16 index; 14 + u32 data; 15 + }; 16 + 17 + /* hardware fields */ 18 + #define __AST_DRAMSTRUCT_DRAM_TYPE 0x0004 19 + 20 + /* control commands */ 21 + #define __AST_DRAMSTRUCT_UDELAY 0xff00 22 + #define __AST_DRAMSTRUCT_INVALID 0xffff 23 + 24 + #define __AST_DRAMSTRUCT_INDEX(_name) \ 25 + (__AST_DRAMSTRUCT_ ## _name) 26 + 27 + #define AST_DRAMSTRUCT_INIT(_name, _value) \ 28 + { __AST_DRAMSTRUCT_INDEX(_name), (_value) } 29 + 30 + #define AST_DRAMSTRUCT_UDELAY(_usecs) \ 31 + AST_DRAMSTRUCT_INIT(UDELAY, _usecs) 32 + #define AST_DRAMSTRUCT_INVALID \ 33 + AST_DRAMSTRUCT_INIT(INVALID, U32_MAX) 34 + 35 + #define AST_DRAMSTRUCT_IS(_entry, _name) \ 36 + ((_entry)->index == __AST_DRAMSTRUCT_INDEX(_name)) 37 + 38 + u32 __ast_mindwm(void __iomem *regs, u32 r); 39 + void __ast_moutdwm(void __iomem *regs, u32 r, u32 v); 40 + 41 + bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl); 42 + bool mmc_test_burst(struct ast_device *ast, u32 datagen); 43 + 44 + /* ast_2000.c */ 45 + void ast_2000_set_def_ext_reg(struct ast_device *ast); 46 + 47 + /* ast_2300.c */ 48 + void ast_2300_set_def_ext_reg(struct ast_device *ast); 49 + 50 + #endif
+3 -3
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 1262 1262 1263 1263 adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | 1264 1264 DRM_BRIDGE_OP_EDID | 1265 - DRM_BRIDGE_OP_HDMI | 1266 - DRM_BRIDGE_OP_HDMI_AUDIO | 1267 - DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; 1265 + DRM_BRIDGE_OP_HDMI; 1268 1266 if (adv7511->i2c_main->irq) 1269 1267 adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD; 1270 1268 ··· 1270 1272 adv7511->bridge.product = adv7511->info->name; 1271 1273 1272 1274 #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO 1275 + adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_AUDIO; 1273 1276 adv7511->bridge.hdmi_audio_dev = dev; 1274 1277 adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2; 1275 1278 adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | ··· 1283 1284 #endif 1284 1285 1285 1286 #ifdef CONFIG_DRM_I2C_ADV7511_CEC 1287 + adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; 1286 1288 adv7511->bridge.hdmi_cec_dev = dev; 1287 1289 adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev); 1288 1290 adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS;
+11 -29
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
··· 1041 1041 struct drm_encoder *encoder, 1042 1042 enum drm_bridge_attach_flags flags) 1043 1043 { 1044 - struct analogix_dp_device *dp = bridge->driver_private; 1044 + struct analogix_dp_device *dp = to_dp(bridge); 1045 1045 struct drm_connector *connector = NULL; 1046 1046 int ret = 0; 1047 1047 ··· 1125 1125 static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, 1126 1126 struct drm_atomic_state *old_state) 1127 1127 { 1128 - struct analogix_dp_device *dp = bridge->driver_private; 1128 + struct analogix_dp_device *dp = to_dp(bridge); 1129 1129 struct drm_crtc *crtc; 1130 1130 struct drm_crtc_state *old_crtc_state; 1131 1131 ··· 1180 1180 static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, 1181 1181 struct drm_atomic_state *old_state) 1182 1182 { 1183 - struct analogix_dp_device *dp = bridge->driver_private; 1183 + struct analogix_dp_device *dp = to_dp(bridge); 1184 1184 struct drm_crtc *crtc; 1185 1185 struct drm_crtc_state *old_crtc_state; 1186 1186 int timeout_loop = 0; ··· 1217 1217 1218 1218 static void analogix_dp_bridge_disable(struct drm_bridge *bridge) 1219 1219 { 1220 - struct analogix_dp_device *dp = bridge->driver_private; 1220 + struct analogix_dp_device *dp = to_dp(bridge); 1221 1221 1222 1222 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1223 1223 return; ··· 1240 1240 static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, 1241 1241 struct drm_atomic_state *old_state) 1242 1242 { 1243 - struct analogix_dp_device *dp = bridge->driver_private; 1243 + struct analogix_dp_device *dp = to_dp(bridge); 1244 1244 struct drm_crtc *old_crtc, *new_crtc; 1245 1245 struct drm_crtc_state *old_crtc_state = NULL; 1246 1246 struct drm_crtc_state *new_crtc_state = NULL; ··· 1278 1278 static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, 1279 1279 struct drm_atomic_state *old_state) 1280 1280 { 1281 - struct analogix_dp_device *dp = bridge->driver_private; 1281 + struct analogix_dp_device *dp = to_dp(bridge); 1282 1282 struct drm_crtc *crtc; 1283 1283 struct drm_crtc_state *new_crtc_state; 1284 1284 int ret; ··· 1300 1300 const struct drm_display_mode *orig_mode, 1301 1301 const struct drm_display_mode *mode) 1302 1302 { 1303 - struct analogix_dp_device *dp = bridge->driver_private; 1303 + struct analogix_dp_device *dp = to_dp(bridge); 1304 1304 struct drm_display_info *display_info = &dp->connector.display_info; 1305 1305 struct video_info *video = &dp->video_info; 1306 1306 struct device_node *dp_node = dp->dev->of_node; ··· 1384 1384 .mode_set = analogix_dp_bridge_mode_set, 1385 1385 .attach = analogix_dp_bridge_attach, 1386 1386 }; 1387 - 1388 - static int analogix_dp_create_bridge(struct drm_device *drm_dev, 1389 - struct analogix_dp_device *dp) 1390 - { 1391 - struct drm_bridge *bridge; 1392 - 1393 - bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); 1394 - if (!bridge) { 1395 - DRM_ERROR("failed to allocate for drm bridge\n"); 1396 - return -ENOMEM; 1397 - } 1398 - 1399 - dp->bridge = bridge; 1400 - 1401 - bridge->driver_private = dp; 1402 - bridge->funcs = &analogix_dp_bridge_funcs; 1403 - 1404 - return drm_bridge_attach(dp->encoder, bridge, NULL, 0); 1405 - } 1406 1387 1407 1388 static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) 1408 1389 { ··· 1472 1491 return ERR_PTR(-EINVAL); 1473 1492 } 1474 1493 1475 - dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL); 1494 + dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge, 1495 + &analogix_dp_bridge_funcs); 1476 1496 if (!dp) 1477 1497 return ERR_PTR(-ENOMEM); 1478 1498 ··· 1625 1643 return ret; 1626 1644 } 1627 1645 1628 - ret = analogix_dp_create_bridge(drm_dev, dp); 1646 + ret = drm_bridge_attach(dp->encoder, &dp->bridge, NULL, 0); 1629 1647 if (ret) { 1630 1648 DRM_ERROR("failed to create bridge (%d)\n", ret); 1631 1649 goto err_unregister_aux; ··· 1642 1660 1643 1661 void analogix_dp_unbind(struct analogix_dp_device *dp) 1644 1662 { 1645 - analogix_dp_bridge_disable(dp->bridge); 1663 + analogix_dp_bridge_disable(&dp->bridge); 1646 1664 dp->connector.funcs->destroy(&dp->connector); 1647 1665 1648 1666 drm_panel_unprepare(dp->plat_data->panel);
+2 -1
drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
··· 11 11 12 12 #include <drm/display/drm_dp_helper.h> 13 13 #include <drm/drm_crtc.h> 14 + #include <drm/drm_bridge.h> 14 15 15 16 #define DP_TIMEOUT_LOOP_COUNT 100 16 17 #define MAX_CR_LOOP 5 ··· 155 154 struct device *dev; 156 155 struct drm_device *drm_dev; 157 156 struct drm_connector connector; 158 - struct drm_bridge *bridge; 157 + struct drm_bridge bridge; 159 158 struct drm_dp_aux aux; 160 159 struct clk *clock; 161 160 unsigned int irq;
+1
drivers/gpu/drm/bridge/tc358767.c
··· 2422 2422 struct device_node *node = NULL; 2423 2423 2424 2424 for_each_endpoint_of_node(dev->of_node, node) { 2425 + of_graph_parse_endpoint(node, &endpoint); 2425 2426 if (endpoint.port == 2) { 2426 2427 of_property_read_u8_array(node, "toshiba,pre-emphasis", 2427 2428 tc->pre_emphasis,
+5
drivers/gpu/drm/clients/drm_client_setup.c
··· 4 4 5 5 #include <drm/clients/drm_client_setup.h> 6 6 #include <drm/drm_device.h> 7 + #include <drm/drm_drv.h> 7 8 #include <drm/drm_fourcc.h> 8 9 #include <drm/drm_print.h> 9 10 ··· 34 33 */ 35 34 void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format) 36 35 { 36 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 37 + drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n"); 38 + return; 39 + } 37 40 38 41 #ifdef CONFIG_DRM_FBDEV_EMULATION 39 42 if (!strcmp(drm_client_default, "fbdev")) {
+7 -7
drivers/gpu/drm/display/drm_dp_helper.c
··· 4245 4245 "%s: Failed to read backlight level: %d\n", 4246 4246 aux->name, ret); 4247 4247 return ret; 4248 - } 4248 + } 4249 4249 4250 - /* 4251 - * Incase luminance is set we want to send the value back in nits but since 4252 - * DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we need to divide 4253 - * by 1000. 4254 - */ 4255 - return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000; 4250 + /* 4251 + * Incase luminance is set we want to send the value back in nits but 4252 + * since DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we 4253 + * need to divide by 1000. 4254 + */ 4255 + return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000; 4256 4256 } else { 4257 4257 ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, 4258 4258 buf, size);
+1 -1
drivers/gpu/drm/display/drm_hdmi_cec_helper.c
··· 69 69 struct drm_connector *connector = res; 70 70 struct drm_connector_hdmi_cec_data *data = connector->cec.data; 71 71 72 - cec_delete_adapter(data->adapter); 72 + cec_unregister_adapter(data->adapter); 73 73 74 74 if (data->funcs->uninit) 75 75 data->funcs->uninit(connector);
+112 -6
drivers/gpu/drm/drm_debugfs.c
··· 44 44 #include "drm_crtc_internal.h" 45 45 #include "drm_internal.h" 46 46 47 + static struct dentry *accel_debugfs_root; 48 + static struct dentry *drm_debugfs_root; 49 + 47 50 /*************************************************** 48 51 * Initialization, etc. 49 52 **************************************************/ ··· 290 287 } 291 288 EXPORT_SYMBOL(drm_debugfs_remove_files); 292 289 290 + void drm_debugfs_bridge_params(void) 291 + { 292 + drm_bridge_debugfs_params(drm_debugfs_root); 293 + } 294 + 295 + void drm_debugfs_init_root(void) 296 + { 297 + drm_debugfs_root = debugfs_create_dir("dri", NULL); 298 + #if IS_ENABLED(CONFIG_DRM_ACCEL) 299 + accel_debugfs_root = debugfs_create_dir("accel", NULL); 300 + #endif 301 + } 302 + 303 + void drm_debugfs_remove_root(void) 304 + { 305 + #if IS_ENABLED(CONFIG_DRM_ACCEL) 306 + debugfs_remove(accel_debugfs_root); 307 + #endif 308 + debugfs_remove(drm_debugfs_root); 309 + } 310 + 311 + static int drm_debugfs_proc_info_show(struct seq_file *m, void *unused) 312 + { 313 + struct pid *pid; 314 + struct task_struct *task; 315 + struct drm_file *file = m->private; 316 + 317 + if (!file) 318 + return -EINVAL; 319 + 320 + rcu_read_lock(); 321 + pid = rcu_dereference(file->pid); 322 + task = pid_task(pid, PIDTYPE_TGID); 323 + 324 + seq_printf(m, "pid: %d\n", task ? task->pid : 0); 325 + seq_printf(m, "comm: %s\n", task ? task->comm : "Unset"); 326 + rcu_read_unlock(); 327 + return 0; 328 + } 329 + 330 + static int drm_debufs_proc_info_open(struct inode *inode, struct file *file) 331 + { 332 + return single_open(file, drm_debugfs_proc_info_show, inode->i_private); 333 + } 334 + 335 + static const struct file_operations drm_debugfs_proc_info_fops = { 336 + .owner = THIS_MODULE, 337 + .open = drm_debufs_proc_info_open, 338 + .read = seq_read, 339 + .llseek = seq_lseek, 340 + .release = single_release, 341 + }; 342 + 343 + /** 344 + * drm_debugfs_clients_add - Add a per client debugfs directory 345 + * @file: drm_file for a client 346 + * 347 + * Create the debugfs directory for each client. This will be used to populate 348 + * driver specific data for each client. 349 + * 350 + * Also add the process information debugfs file for each client to tag 351 + * which client belongs to which process. 352 + */ 353 + void drm_debugfs_clients_add(struct drm_file *file) 354 + { 355 + char *client; 356 + 357 + client = kasprintf(GFP_KERNEL, "client-%llu", file->client_id); 358 + if (!client) 359 + return; 360 + 361 + /* Create a debugfs directory for the client in root on drm debugfs */ 362 + file->debugfs_client = debugfs_create_dir(client, drm_debugfs_root); 363 + kfree(client); 364 + 365 + debugfs_create_file("proc_info", 0444, file->debugfs_client, file, 366 + &drm_debugfs_proc_info_fops); 367 + 368 + client = kasprintf(GFP_KERNEL, "../%s", file->minor->dev->unique); 369 + if (!client) 370 + return; 371 + 372 + /* Create a link from client_id to the drm device this client id belongs to */ 373 + debugfs_create_symlink("device", file->debugfs_client, client); 374 + kfree(client); 375 + } 376 + 377 + /** 378 + * drm_debugfs_clients_remove - removes all debugfs directories and files 379 + * @file: drm_file for a client 380 + * 381 + * Removes the debugfs directories recursively from the client directory. 382 + * 383 + * There is also a possibility that debugfs files are open while the drm_file 384 + * is released. 385 + */ 386 + void drm_debugfs_clients_remove(struct drm_file *file) 387 + { 388 + debugfs_remove_recursive(file->debugfs_client); 389 + file->debugfs_client = NULL; 390 + } 391 + 293 392 /** 294 393 * drm_debugfs_dev_init - create debugfs directory for the device 295 394 * @dev: the device which we want to create the directory for 296 - * @root: the parent directory depending on the device type 297 395 * 298 396 * Creates the debugfs directory for the device under the given root directory. 299 397 */ 300 - void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) 398 + void drm_debugfs_dev_init(struct drm_device *dev) 301 399 { 302 - dev->debugfs_root = debugfs_create_dir(dev->unique, root); 400 + if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) 401 + dev->debugfs_root = debugfs_create_dir(dev->unique, accel_debugfs_root); 402 + else 403 + dev->debugfs_root = debugfs_create_dir(dev->unique, drm_debugfs_root); 303 404 } 304 405 305 406 /** ··· 430 323 drm_atomic_debugfs_init(dev); 431 324 } 432 325 433 - int drm_debugfs_register(struct drm_minor *minor, int minor_id, 434 - struct dentry *root) 326 + int drm_debugfs_register(struct drm_minor *minor, int minor_id) 435 327 { 436 328 struct drm_device *dev = minor->dev; 437 329 char name[64]; 438 330 439 331 sprintf(name, "%d", minor_id); 440 - minor->debugfs_symlink = debugfs_create_symlink(name, root, 332 + minor->debugfs_symlink = debugfs_create_symlink(name, drm_debugfs_root, 441 333 dev->unique); 442 334 443 335 /* TODO: Only for compatibility with drivers */
+5 -11
drivers/gpu/drm/drm_drv.c
··· 72 72 */ 73 73 static bool drm_core_init_complete; 74 74 75 - static struct dentry *drm_debugfs_root; 76 - 77 75 DEFINE_STATIC_SRCU(drm_unplug_srcu); 78 76 79 77 /* ··· 184 186 return 0; 185 187 186 188 if (minor->type != DRM_MINOR_ACCEL) { 187 - ret = drm_debugfs_register(minor, minor->index, 188 - drm_debugfs_root); 189 + ret = drm_debugfs_register(minor, minor->index); 189 190 if (ret) { 190 191 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); 191 192 goto err_debugfs; ··· 784 787 goto err; 785 788 } 786 789 787 - if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) 788 - accel_debugfs_init(dev); 789 - else 790 - drm_debugfs_dev_init(dev, drm_debugfs_root); 790 + drm_debugfs_dev_init(dev); 791 791 792 792 return 0; 793 793 ··· 1224 1230 drm_panic_exit(); 1225 1231 accel_core_exit(); 1226 1232 unregister_chrdev(DRM_MAJOR, "drm"); 1227 - debugfs_remove(drm_debugfs_root); 1233 + drm_debugfs_remove_root(); 1228 1234 drm_sysfs_destroy(); 1229 1235 WARN_ON(!xa_empty(&drm_minors_xa)); 1230 1236 drm_connector_ida_destroy(); ··· 1243 1249 goto error; 1244 1250 } 1245 1251 1246 - drm_debugfs_root = debugfs_create_dir("dri", NULL); 1247 - drm_bridge_debugfs_params(drm_debugfs_root); 1252 + drm_debugfs_init_root(); 1253 + drm_debugfs_bridge_params(); 1248 1254 1249 1255 ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); 1250 1256 if (ret < 0)
+11
drivers/gpu/drm/drm_file.c
··· 46 46 #include <drm/drm_file.h> 47 47 #include <drm/drm_gem.h> 48 48 #include <drm/drm_print.h> 49 + #include <drm/drm_debugfs.h> 49 50 50 51 #include "drm_crtc_internal.h" 51 52 #include "drm_internal.h" ··· 169 168 170 169 drm_prime_init_file_private(&file->prime); 171 170 171 + if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) 172 + drm_debugfs_clients_add(file); 173 + 172 174 if (dev->driver->open) { 173 175 ret = dev->driver->open(dev, file); 174 176 if (ret < 0) ··· 186 182 drm_syncobj_release(file); 187 183 if (drm_core_check_feature(dev, DRIVER_GEM)) 188 184 drm_gem_release(dev, file); 185 + 186 + if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) 187 + drm_debugfs_clients_remove(file); 188 + 189 189 put_pid(rcu_access_pointer(file->pid)); 190 190 kfree(file); 191 191 ··· 243 235 current->comm, task_pid_nr(current), 244 236 (long)old_encode_dev(file->minor->kdev->devt), 245 237 atomic_read(&dev->open_count)); 238 + 239 + if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) 240 + drm_debugfs_clients_remove(file); 246 241 247 242 drm_events_release(file); 248 243
+2 -4
drivers/gpu/drm/drm_internal.h
··· 182 182 #if defined(CONFIG_DEBUG_FS) 183 183 void drm_debugfs_dev_fini(struct drm_device *dev); 184 184 void drm_debugfs_dev_register(struct drm_device *dev); 185 - int drm_debugfs_register(struct drm_minor *minor, int minor_id, 186 - struct dentry *root); 185 + int drm_debugfs_register(struct drm_minor *minor, int minor_id); 187 186 void drm_debugfs_unregister(struct drm_minor *minor); 188 187 void drm_debugfs_connector_add(struct drm_connector *connector); 189 188 void drm_debugfs_connector_remove(struct drm_connector *connector); ··· 200 201 { 201 202 } 202 203 203 - static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id, 204 - struct dentry *root) 204 + static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id) 205 205 { 206 206 return 0; 207 207 }
+1 -1
drivers/gpu/drm/drm_panic_qr.rs
··· 27 27 //! * <https://github.com/erwanvivien/fast_qr> 28 28 //! * <https://github.com/bjguillot/qr> 29 29 30 - use kernel::{prelude::*, str::CStr}; 30 + use kernel::prelude::*; 31 31 32 32 #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] 33 33 struct Version(usize);
+58 -1
drivers/gpu/drm/imagination/pvr_power.c
··· 340 340 return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY; 341 341 } 342 342 343 + static int 344 + pvr_power_clear_error(struct pvr_device *pvr_dev) 345 + { 346 + struct device *dev = from_pvr_device(pvr_dev)->dev; 347 + int err; 348 + 349 + /* Ensure the device state is known and nothing is happening past this point */ 350 + pm_runtime_disable(dev); 351 + 352 + /* Attempt to clear the runtime PM error by setting the current state again */ 353 + if (pm_runtime_status_suspended(dev)) 354 + err = pm_runtime_set_suspended(dev); 355 + else 356 + err = pm_runtime_set_active(dev); 357 + 358 + if (err) { 359 + drm_err(from_pvr_device(pvr_dev), 360 + "%s: Failed to clear runtime PM error (new error %d)\n", 361 + __func__, err); 362 + } 363 + 364 + pm_runtime_enable(dev); 365 + 366 + return err; 367 + } 368 + 369 + /** 370 + * pvr_power_get_clear() - Acquire a power reference, correcting any errors 371 + * @pvr_dev: Device pointer 372 + * 373 + * Attempt to acquire a power reference on the device. If the runtime PM 374 + * is in error state, attempt to clear the error and retry. 375 + * 376 + * Returns: 377 + * * 0 on success, or 378 + * * Any error code returned by pvr_power_get() or the runtime PM API. 379 + */ 380 + static int 381 + pvr_power_get_clear(struct pvr_device *pvr_dev) 382 + { 383 + int err; 384 + 385 + err = pvr_power_get(pvr_dev); 386 + if (err == 0) 387 + return err; 388 + 389 + drm_warn(from_pvr_device(pvr_dev), 390 + "%s: pvr_power_get returned error %d, attempting recovery\n", 391 + __func__, err); 392 + 393 + err = pvr_power_clear_error(pvr_dev); 394 + if (err) 395 + return err; 396 + 397 + return pvr_power_get(pvr_dev); 398 + } 399 + 343 400 /** 344 401 * pvr_power_reset() - Reset the GPU 345 402 * @pvr_dev: Device pointer ··· 421 364 * Take a power reference during the reset. This should prevent any interference with the 422 365 * power state during reset. 423 366 */ 424 - WARN_ON(pvr_power_get(pvr_dev)); 367 + WARN_ON(pvr_power_get_clear(pvr_dev)); 425 368 426 369 down_write(&pvr_dev->reset_sem); 427 370
+15 -16
drivers/gpu/drm/panthor/panthor_gem.c
··· 16 16 #include "panthor_mmu.h" 17 17 18 18 #ifdef CONFIG_DEBUG_FS 19 - static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, 20 - struct panthor_gem_object *bo) 19 + static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) 21 20 { 22 21 INIT_LIST_HEAD(&bo->debugfs.node); 22 + } 23 + 24 + static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo) 25 + { 26 + struct panthor_device *ptdev = container_of(bo->base.base.dev, 27 + struct panthor_device, base); 23 28 24 29 bo->debugfs.creator.tgid = current->group_leader->pid; 25 30 get_task_comm(bo->debugfs.creator.process_name, current->group_leader); ··· 49 44 50 45 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) 51 46 { 52 - bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; 47 + bo->debugfs.flags = usage_flags; 48 + panthor_gem_debugfs_bo_add(bo); 53 49 } 54 50 #else 55 - static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, 56 - struct panthor_gem_object *bo) 57 - {} 58 51 static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {} 59 52 static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {} 53 + static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {} 60 54 #endif 61 55 62 56 static void panthor_gem_free_object(struct drm_gem_object *obj) ··· 250 246 drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); 251 247 mutex_init(&obj->label.lock); 252 248 253 - panthor_gem_debugfs_bo_add(ptdev, obj); 249 + panthor_gem_debugfs_bo_init(obj); 254 250 255 251 return &obj->base.base; 256 252 } ··· 289 285 bo->base.base.resv = bo->exclusive_vm_root_gem->resv; 290 286 } 291 287 288 + panthor_gem_debugfs_set_usage_flags(bo, 0); 289 + 292 290 /* 293 291 * Allocate an id of idr table where the obj is registered 294 292 * and handle has the id what user can see. ··· 301 295 302 296 /* drop reference from allocate - handle holds it now. */ 303 297 drm_gem_object_put(&shmem->base); 304 - 305 - /* 306 - * No explicit flags are needed in the call below, since the 307 - * function internally sets the INITIALIZED bit for us. 308 - */ 309 - panthor_gem_debugfs_set_usage_flags(bo, 0); 310 298 311 299 return ret; 312 300 } ··· 387 387 unsigned int refcount = kref_read(&bo->base.base.refcount); 388 388 char creator_info[32] = {}; 389 389 size_t resident_size; 390 - u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; 390 + u32 gem_usage_flags = bo->debugfs.flags; 391 391 u32 gem_state_flags = 0; 392 392 393 393 /* Skip BOs being destroyed. */ ··· 436 436 437 437 scoped_guard(mutex, &ptdev->gems.lock) { 438 438 list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) { 439 - if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED) 440 - panthor_gem_debugfs_bo_print(bo, m, &totals); 439 + panthor_gem_debugfs_bo_print(bo, m, &totals); 441 440 } 442 441 } 443 442
-3
drivers/gpu/drm/panthor/panthor_gem.h
··· 35 35 36 36 /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */ 37 37 PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT), 38 - 39 - /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */ 40 - PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31), 41 38 }; 42 39 43 40 /**
+108 -183
drivers/gpu/drm/rockchip/cdn-dp-core.c
··· 16 16 #include <sound/hdmi-codec.h> 17 17 18 18 #include <drm/display/drm_dp_helper.h> 19 + #include <drm/display/drm_hdmi_audio_helper.h> 19 20 #include <drm/drm_atomic_helper.h> 21 + #include <drm/drm_bridge_connector.h> 20 22 #include <drm/drm_edid.h> 21 23 #include <drm/drm_of.h> 22 24 #include <drm/drm_probe_helper.h> ··· 27 25 #include "cdn-dp-core.h" 28 26 #include "cdn-dp-reg.h" 29 27 30 - static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector) 28 + static inline struct cdn_dp_device *bridge_to_dp(struct drm_bridge *bridge) 31 29 { 32 - return container_of(connector, struct cdn_dp_device, connector); 30 + return container_of(bridge, struct cdn_dp_device, bridge); 33 31 } 34 32 35 33 static inline struct cdn_dp_device *encoder_to_dp(struct drm_encoder *encoder) ··· 233 231 } 234 232 235 233 static enum drm_connector_status 236 - cdn_dp_connector_detect(struct drm_connector *connector, bool force) 234 + cdn_dp_bridge_detect(struct drm_bridge *bridge) 237 235 { 238 - struct cdn_dp_device *dp = connector_to_dp(connector); 236 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 239 237 enum drm_connector_status status = connector_status_disconnected; 240 238 241 239 mutex_lock(&dp->lock); ··· 246 244 return status; 247 245 } 248 246 249 - static void cdn_dp_connector_destroy(struct drm_connector *connector) 247 + static const struct drm_edid * 248 + cdn_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) 250 249 { 251 - drm_connector_unregister(connector); 252 - drm_connector_cleanup(connector); 253 - } 254 - 255 - static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { 256 - .detect = cdn_dp_connector_detect, 257 - .destroy = cdn_dp_connector_destroy, 258 - .fill_modes = drm_helper_probe_single_connector_modes, 259 - .reset = drm_atomic_helper_connector_reset, 260 - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 261 - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 262 - }; 263 - 264 - static int cdn_dp_connector_get_modes(struct drm_connector *connector) 265 - { 266 - struct cdn_dp_device *dp = connector_to_dp(connector); 267 - int ret = 0; 250 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 251 + const struct drm_edid *drm_edid; 268 252 269 253 mutex_lock(&dp->lock); 270 - 271 - ret = drm_edid_connector_add_modes(connector); 272 - 254 + drm_edid = drm_edid_read_custom(connector, cdn_dp_get_edid_block, dp); 273 255 mutex_unlock(&dp->lock); 274 256 275 - return ret; 257 + return drm_edid; 276 258 } 277 259 278 260 static enum drm_mode_status 279 - cdn_dp_connector_mode_valid(struct drm_connector *connector, 280 - const struct drm_display_mode *mode) 261 + cdn_dp_bridge_mode_valid(struct drm_bridge *bridge, 262 + const struct drm_display_info *display_info, 263 + const struct drm_display_mode *mode) 281 264 { 282 - struct cdn_dp_device *dp = connector_to_dp(connector); 283 - struct drm_display_info *display_info = &dp->connector.display_info; 265 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 284 266 u32 requested, actual, rate, sink_max, source_max = 0; 285 267 u8 lanes, bpc; 286 268 ··· 309 323 return MODE_OK; 310 324 } 311 325 312 - static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { 313 - .get_modes = cdn_dp_connector_get_modes, 314 - .mode_valid = cdn_dp_connector_mode_valid, 315 - }; 316 - 317 326 static int cdn_dp_firmware_init(struct cdn_dp_device *dp) 318 327 { 319 328 int ret; ··· 341 360 342 361 static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) 343 362 { 344 - const struct drm_display_info *info = &dp->connector.display_info; 345 363 int ret; 346 364 347 365 if (!cdn_dp_check_sink_connection(dp)) ··· 352 372 DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret); 353 373 return ret; 354 374 } 355 - 356 - drm_edid_free(dp->drm_edid); 357 - dp->drm_edid = drm_edid_read_custom(&dp->connector, 358 - cdn_dp_get_edid_block, dp); 359 - drm_edid_connector_update(&dp->connector, dp->drm_edid); 360 - 361 - dp->sink_has_audio = info->has_audio; 362 - 363 - if (dp->drm_edid) 364 - DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", 365 - info->width_mm / 10, info->height_mm / 10); 366 375 367 376 return 0; 368 377 } ··· 457 488 dp->active = false; 458 489 dp->max_lanes = 0; 459 490 dp->max_rate = 0; 460 - if (!dp->connected) { 461 - drm_edid_free(dp->drm_edid); 462 - dp->drm_edid = NULL; 463 - } 464 491 465 492 return 0; 466 493 } ··· 511 546 return ret; 512 547 } 513 548 514 - static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, 515 - struct drm_display_mode *mode, 516 - struct drm_display_mode *adjusted) 549 + static void cdn_dp_bridge_mode_set(struct drm_bridge *bridge, 550 + const struct drm_display_mode *mode, 551 + const struct drm_display_mode *adjusted) 517 552 { 518 - struct cdn_dp_device *dp = encoder_to_dp(encoder); 519 - struct drm_display_info *display_info = &dp->connector.display_info; 553 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 520 554 struct video_info *video = &dp->video_info; 521 - 522 - switch (display_info->bpc) { 523 - case 10: 524 - video->color_depth = 10; 525 - break; 526 - case 6: 527 - video->color_depth = 6; 528 - break; 529 - default: 530 - video->color_depth = 8; 531 - break; 532 - } 533 555 534 556 video->color_fmt = PXL_RGB; 535 557 video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); ··· 544 592 return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); 545 593 } 546 594 547 - static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp, 548 - bool plugged) 595 + static void cdn_dp_display_info_update(struct cdn_dp_device *dp, 596 + struct drm_display_info *display_info) 549 597 { 550 - if (dp->codec_dev) 551 - dp->plugged_cb(dp->codec_dev, plugged); 598 + struct video_info *video = &dp->video_info; 599 + 600 + switch (display_info->bpc) { 601 + case 10: 602 + video->color_depth = 10; 603 + break; 604 + case 6: 605 + video->color_depth = 6; 606 + break; 607 + default: 608 + video->color_depth = 8; 609 + break; 610 + } 552 611 } 553 612 554 - static void cdn_dp_encoder_enable(struct drm_encoder *encoder) 613 + static void cdn_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) 555 614 { 556 - struct cdn_dp_device *dp = encoder_to_dp(encoder); 615 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 616 + struct drm_connector *connector; 557 617 int ret, val; 558 618 559 - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 619 + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); 620 + if (!connector) 621 + return; 622 + 623 + cdn_dp_display_info_update(dp, &connector->display_info); 624 + 625 + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, &dp->encoder.encoder); 560 626 if (ret < 0) { 561 627 DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); 562 628 return; ··· 595 625 596 626 ret = cdn_dp_enable(dp); 597 627 if (ret) { 598 - DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", 628 + DRM_DEV_ERROR(dp->dev, "Failed to enable bridge %d\n", 599 629 ret); 600 630 goto out; 601 631 } ··· 625 655 goto out; 626 656 } 627 657 628 - cdn_dp_audio_handle_plugged_change(dp, true); 629 - 630 658 out: 631 659 mutex_unlock(&dp->lock); 632 660 } 633 661 634 - static void cdn_dp_encoder_disable(struct drm_encoder *encoder) 662 + static void cdn_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) 635 663 { 636 - struct cdn_dp_device *dp = encoder_to_dp(encoder); 664 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 637 665 int ret; 638 666 639 667 mutex_lock(&dp->lock); 640 - cdn_dp_audio_handle_plugged_change(dp, false); 641 668 642 669 if (dp->active) { 643 670 ret = cdn_dp_disable(dp); 644 671 if (ret) { 645 - DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", 672 + DRM_DEV_ERROR(dp->dev, "Failed to disable bridge %d\n", 646 673 ret); 647 674 } 648 675 } ··· 671 704 } 672 705 673 706 static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { 674 - .mode_set = cdn_dp_encoder_mode_set, 675 - .enable = cdn_dp_encoder_enable, 676 - .disable = cdn_dp_encoder_disable, 677 707 .atomic_check = cdn_dp_encoder_atomic_check, 678 708 }; 679 709 ··· 743 779 return 0; 744 780 } 745 781 746 - static int cdn_dp_audio_hw_params(struct device *dev, void *data, 747 - struct hdmi_codec_daifmt *daifmt, 748 - struct hdmi_codec_params *params) 782 + static int cdn_dp_audio_prepare(struct drm_connector *connector, 783 + struct drm_bridge *bridge, 784 + struct hdmi_codec_daifmt *daifmt, 785 + struct hdmi_codec_params *params) 749 786 { 750 - struct cdn_dp_device *dp = dev_get_drvdata(dev); 787 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 751 788 struct audio_info audio = { 752 789 .sample_width = params->sample_width, 753 790 .sample_rate = params->sample_rate, ··· 770 805 audio.format = AFMT_SPDIF; 771 806 break; 772 807 default: 773 - DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); 808 + drm_err(bridge->dev, "Invalid format %d\n", daifmt->fmt); 774 809 ret = -EINVAL; 775 810 goto out; 776 811 } ··· 784 819 return ret; 785 820 } 786 821 787 - static void cdn_dp_audio_shutdown(struct device *dev, void *data) 822 + static void cdn_dp_audio_shutdown(struct drm_connector *connector, 823 + struct drm_bridge *bridge) 788 824 { 789 - struct cdn_dp_device *dp = dev_get_drvdata(dev); 825 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 790 826 int ret; 791 827 792 828 mutex_lock(&dp->lock); ··· 801 835 mutex_unlock(&dp->lock); 802 836 } 803 837 804 - static int cdn_dp_audio_mute_stream(struct device *dev, void *data, 838 + static int cdn_dp_audio_mute_stream(struct drm_connector *connector, 839 + struct drm_bridge *bridge, 805 840 bool enable, int direction) 806 841 { 807 - struct cdn_dp_device *dp = dev_get_drvdata(dev); 842 + struct cdn_dp_device *dp = bridge_to_dp(bridge); 808 843 int ret; 809 844 810 845 mutex_lock(&dp->lock); ··· 821 854 return ret; 822 855 } 823 856 824 - static int cdn_dp_audio_get_eld(struct device *dev, void *data, 825 - u8 *buf, size_t len) 826 - { 827 - struct cdn_dp_device *dp = dev_get_drvdata(dev); 857 + static const struct drm_bridge_funcs cdn_dp_bridge_funcs = { 858 + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, 859 + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 860 + .atomic_reset = drm_atomic_helper_bridge_reset, 861 + .detect = cdn_dp_bridge_detect, 862 + .edid_read = cdn_dp_bridge_edid_read, 863 + .atomic_enable = cdn_dp_bridge_atomic_enable, 864 + .atomic_disable = cdn_dp_bridge_atomic_disable, 865 + .mode_valid = cdn_dp_bridge_mode_valid, 866 + .mode_set = cdn_dp_bridge_mode_set, 828 867 829 - memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); 830 - 831 - return 0; 832 - } 833 - 834 - static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data, 835 - hdmi_codec_plugged_cb fn, 836 - struct device *codec_dev) 837 - { 838 - struct cdn_dp_device *dp = dev_get_drvdata(dev); 839 - 840 - mutex_lock(&dp->lock); 841 - dp->plugged_cb = fn; 842 - dp->codec_dev = codec_dev; 843 - cdn_dp_audio_handle_plugged_change(dp, dp->connected); 844 - mutex_unlock(&dp->lock); 845 - 846 - return 0; 847 - } 848 - 849 - static const struct hdmi_codec_ops audio_codec_ops = { 850 - .hw_params = cdn_dp_audio_hw_params, 851 - .audio_shutdown = cdn_dp_audio_shutdown, 852 - .mute_stream = cdn_dp_audio_mute_stream, 853 - .get_eld = cdn_dp_audio_get_eld, 854 - .hook_plugged_cb = cdn_dp_audio_hook_plugged_cb, 868 + .dp_audio_prepare = cdn_dp_audio_prepare, 869 + .dp_audio_mute_stream = cdn_dp_audio_mute_stream, 870 + .dp_audio_shutdown = cdn_dp_audio_shutdown, 855 871 }; 856 - 857 - static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, 858 - struct device *dev) 859 - { 860 - struct hdmi_codec_pdata codec_data = { 861 - .i2s = 1, 862 - .spdif = 1, 863 - .ops = &audio_codec_ops, 864 - .max_i2s_channels = 8, 865 - .no_capture_mute = 1, 866 - }; 867 - 868 - dp->audio_pdev = platform_device_register_data( 869 - dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, 870 - &codec_data, sizeof(codec_data)); 871 - 872 - return PTR_ERR_OR_ZERO(dp->audio_pdev); 873 - } 874 872 875 873 static int cdn_dp_request_firmware(struct cdn_dp_device *dp) 876 874 { ··· 938 1006 939 1007 out: 940 1008 mutex_unlock(&dp->lock); 941 - drm_connector_helper_hpd_irq_event(&dp->connector); 1009 + drm_bridge_hpd_notify(&dp->bridge, 1010 + dp->connected ? connector_status_connected 1011 + : connector_status_disconnected); 942 1012 } 943 1013 944 1014 static int cdn_dp_pd_event(struct notifier_block *nb, ··· 996 1062 997 1063 drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); 998 1064 999 - connector = &dp->connector; 1000 - connector->polled = DRM_CONNECTOR_POLL_HPD; 1001 - connector->dpms = DRM_MODE_DPMS_OFF; 1065 + dp->bridge.ops = 1066 + DRM_BRIDGE_OP_DETECT | 1067 + DRM_BRIDGE_OP_EDID | 1068 + DRM_BRIDGE_OP_HPD | 1069 + DRM_BRIDGE_OP_DP_AUDIO; 1070 + dp->bridge.of_node = dp->dev->of_node; 1071 + dp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; 1072 + dp->bridge.hdmi_audio_dev = dp->dev; 1073 + dp->bridge.hdmi_audio_max_i2s_playback_channels = 8; 1074 + dp->bridge.hdmi_audio_spdif_playback = 1; 1075 + dp->bridge.hdmi_audio_dai_port = -1; 1002 1076 1003 - ret = drm_connector_init(drm_dev, connector, 1004 - &cdn_dp_atomic_connector_funcs, 1005 - DRM_MODE_CONNECTOR_DisplayPort); 1006 - if (ret) { 1007 - DRM_ERROR("failed to initialize connector with drm\n"); 1008 - goto err_free_encoder; 1077 + ret = devm_drm_bridge_add(dev, &dp->bridge); 1078 + if (ret) 1079 + return ret; 1080 + 1081 + ret = drm_bridge_attach(encoder, &dp->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 1082 + if (ret) 1083 + return ret; 1084 + 1085 + connector = drm_bridge_connector_init(drm_dev, encoder); 1086 + if (IS_ERR(connector)) { 1087 + ret = PTR_ERR(connector); 1088 + dev_err(dp->dev, "failed to init bridge connector: %d\n", ret); 1089 + return ret; 1009 1090 } 1010 1091 1011 - drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); 1012 - 1013 - ret = drm_connector_attach_encoder(connector, encoder); 1014 - if (ret) { 1015 - DRM_ERROR("failed to attach connector and encoder\n"); 1016 - goto err_free_connector; 1017 - } 1092 + drm_connector_attach_encoder(connector, encoder); 1018 1093 1019 1094 for (i = 0; i < dp->ports; i++) { 1020 1095 port = dp->port[i]; ··· 1035 1092 if (ret) { 1036 1093 DRM_DEV_ERROR(dev, 1037 1094 "register EXTCON_DISP_DP notifier err\n"); 1038 - goto err_free_connector; 1095 + return ret; 1039 1096 } 1040 1097 } 1041 1098 ··· 1044 1101 schedule_work(&dp->event_work); 1045 1102 1046 1103 return 0; 1047 - 1048 - err_free_connector: 1049 - drm_connector_cleanup(connector); 1050 - err_free_encoder: 1051 - drm_encoder_cleanup(encoder); 1052 - return ret; 1053 1104 } 1054 1105 1055 1106 static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) 1056 1107 { 1057 1108 struct cdn_dp_device *dp = dev_get_drvdata(dev); 1058 1109 struct drm_encoder *encoder = &dp->encoder.encoder; 1059 - struct drm_connector *connector = &dp->connector; 1060 1110 1061 1111 cancel_work_sync(&dp->event_work); 1062 - cdn_dp_encoder_disable(encoder); 1063 1112 encoder->funcs->destroy(encoder); 1064 - connector->funcs->destroy(connector); 1065 1113 1066 1114 pm_runtime_disable(dev); 1067 1115 if (dp->fw_loaded) 1068 1116 release_firmware(dp->fw); 1069 - drm_edid_free(dp->drm_edid); 1070 - dp->drm_edid = NULL; 1071 1117 } 1072 1118 1073 1119 static const struct component_ops cdn_dp_component_ops = { ··· 1103 1171 int ret; 1104 1172 int i; 1105 1173 1106 - dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 1107 - if (!dp) 1108 - return -ENOMEM; 1174 + dp = devm_drm_bridge_alloc(dev, struct cdn_dp_device, bridge, 1175 + &cdn_dp_bridge_funcs); 1176 + if (IS_ERR(dp)) 1177 + return PTR_ERR(dp); 1109 1178 dp->dev = dev; 1110 1179 1111 1180 match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); ··· 1142 1209 mutex_init(&dp->lock); 1143 1210 dev_set_drvdata(dev, dp); 1144 1211 1145 - ret = cdn_dp_audio_codec_init(dp, dev); 1212 + ret = component_add(dev, &cdn_dp_component_ops); 1146 1213 if (ret) 1147 1214 return ret; 1148 1215 1149 - ret = component_add(dev, &cdn_dp_component_ops); 1150 - if (ret) 1151 - goto err_audio_deinit; 1152 - 1153 1216 return 0; 1154 - 1155 - err_audio_deinit: 1156 - platform_device_unregister(dp->audio_pdev); 1157 - return ret; 1158 1217 } 1159 1218 1160 1219 static void cdn_dp_remove(struct platform_device *pdev)
+2 -6
drivers/gpu/drm/rockchip/cdn-dp-core.h
··· 8 8 #define _CDN_DP_CORE_H 9 9 10 10 #include <drm/display/drm_dp_helper.h> 11 + #include <drm/drm_bridge.h> 11 12 #include <drm/drm_panel.h> 12 13 #include <drm/drm_probe_helper.h> 13 14 #include <sound/hdmi-codec.h> ··· 66 65 struct cdn_dp_device { 67 66 struct device *dev; 68 67 struct drm_device *drm_dev; 69 - struct drm_connector connector; 68 + struct drm_bridge bridge; 70 69 struct rockchip_encoder encoder; 71 70 struct drm_display_mode mode; 72 71 struct platform_device *audio_pdev; 73 72 struct work_struct event_work; 74 - const struct drm_edid *drm_edid; 75 73 76 74 struct mutex lock; 77 75 bool connected; ··· 101 101 int active_port; 102 102 103 103 u8 dpcd[DP_RECEIVER_CAP_SIZE]; 104 - bool sink_has_audio; 105 - 106 - hdmi_codec_plugged_cb plugged_cb; 107 - struct device *codec_dev; 108 104 }; 109 105 #endif /* _CDN_DP_CORE_H */
+4 -12
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
··· 213 213 214 214 if (IS_ERR(hdmi->ref_clk)) { 215 215 ret = PTR_ERR(hdmi->ref_clk); 216 - if (ret != -EPROBE_DEFER) 217 - dev_err(hdmi->dev, "failed to get reference clock\n"); 218 - return ret; 216 + return dev_err_probe(hdmi->dev, ret, "failed to get reference clock\n"); 219 217 } 220 218 221 219 hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf"); 222 220 if (IS_ERR(hdmi->grf_clk)) { 223 221 ret = PTR_ERR(hdmi->grf_clk); 224 - if (ret != -EPROBE_DEFER) 225 - dev_err(hdmi->dev, "failed to get grf clock\n"); 226 - return ret; 222 + return dev_err_probe(hdmi->dev, ret, "failed to get grf clock\n"); 227 223 } 228 224 229 225 ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9"); ··· 569 573 570 574 ret = rockchip_hdmi_parse_dt(hdmi); 571 575 if (ret) { 572 - if (ret != -EPROBE_DEFER) 573 - dev_err(hdmi->dev, "Unable to parse OF data\n"); 574 - return ret; 576 + return dev_err_probe(hdmi->dev, ret, "Unable to parse OF data\n"); 575 577 } 576 578 577 579 hdmi->phy = devm_phy_optional_get(dev, "hdmi"); 578 580 if (IS_ERR(hdmi->phy)) { 579 581 ret = PTR_ERR(hdmi->phy); 580 - if (ret != -EPROBE_DEFER) 581 - dev_err(hdmi->dev, "failed to get phy\n"); 582 - return ret; 582 + return dev_err_probe(hdmi->dev, ret, "failed to get phy\n"); 583 583 } 584 584 585 585 if (hdmi->phy) {
+385 -67
drivers/gpu/drm/rockchip/inno_hdmi.c
··· 29 29 30 30 #include "rockchip_drm_drv.h" 31 31 32 - #include "inno_hdmi.h" 32 + #define INNO_HDMI_MIN_TMDS_CLOCK 25000000U 33 + 34 + #define DDC_SEGMENT_ADDR 0x30 35 + 36 + #define HDMI_SCL_RATE (100 * 1000) 37 + 38 + #define DDC_BUS_FREQ_L 0x4b 39 + #define DDC_BUS_FREQ_H 0x4c 40 + 41 + #define HDMI_SYS_CTRL 0x00 42 + #define m_RST_ANALOG BIT(6) 43 + #define v_RST_ANALOG (0 << 6) 44 + #define v_NOT_RST_ANALOG BIT(6) 45 + #define m_RST_DIGITAL BIT(5) 46 + #define v_RST_DIGITAL (0 << 5) 47 + #define v_NOT_RST_DIGITAL BIT(5) 48 + #define m_REG_CLK_INV BIT(4) 49 + #define v_REG_CLK_NOT_INV (0 << 4) 50 + #define v_REG_CLK_INV BIT(4) 51 + #define m_VCLK_INV BIT(3) 52 + #define v_VCLK_NOT_INV (0 << 3) 53 + #define v_VCLK_INV BIT(3) 54 + #define m_REG_CLK_SOURCE BIT(2) 55 + #define v_REG_CLK_SOURCE_TMDS (0 << 2) 56 + #define v_REG_CLK_SOURCE_SYS BIT(2) 57 + #define m_POWER BIT(1) 58 + #define v_PWR_ON (0 << 1) 59 + #define v_PWR_OFF BIT(1) 60 + #define m_INT_POL BIT(0) 61 + #define v_INT_POL_HIGH 1 62 + #define v_INT_POL_LOW 0 63 + 64 + #define HDMI_VIDEO_CONTRL1 0x01 65 + #define m_VIDEO_INPUT_FORMAT (7 << 1) 66 + #define m_DE_SOURCE BIT(0) 67 + #define v_VIDEO_INPUT_FORMAT(n) ((n) << 1) 68 + #define v_DE_EXTERNAL 1 69 + #define v_DE_INTERNAL 0 70 + enum { 71 + VIDEO_INPUT_SDR_RGB444 = 0, 72 + VIDEO_INPUT_DDR_RGB444 = 5, 73 + VIDEO_INPUT_DDR_YCBCR422 = 6 74 + }; 75 + 76 + #define HDMI_VIDEO_CONTRL2 0x02 77 + #define m_VIDEO_OUTPUT_COLOR (3 << 6) 78 + #define m_VIDEO_INPUT_BITS (3 << 4) 79 + #define m_VIDEO_INPUT_CSP BIT(0) 80 + #define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6) 81 + #define v_VIDEO_INPUT_BITS(n) ((n) << 4) 82 + #define v_VIDEO_INPUT_CSP(n) ((n) << 0) 83 + enum { 84 + VIDEO_INPUT_12BITS = 0, 85 + VIDEO_INPUT_10BITS = 1, 86 + VIDEO_INPUT_REVERT = 2, 87 + VIDEO_INPUT_8BITS = 3, 88 + }; 89 + 90 + #define HDMI_VIDEO_CONTRL 0x03 91 + #define m_VIDEO_AUTO_CSC BIT(7) 92 + #define v_VIDEO_AUTO_CSC(n) ((n) << 7) 93 + #define m_VIDEO_C0_C2_SWAP BIT(0) 94 + #define v_VIDEO_C0_C2_SWAP(n) ((n) << 0) 95 + enum { 96 + C0_C2_CHANGE_ENABLE = 0, 97 + C0_C2_CHANGE_DISABLE = 1, 98 + AUTO_CSC_DISABLE = 0, 99 + AUTO_CSC_ENABLE = 1, 100 + }; 101 + 102 + #define HDMI_VIDEO_CONTRL3 0x04 103 + #define m_COLOR_DEPTH_NOT_INDICATED BIT(4) 104 + #define m_SOF BIT(3) 105 + #define m_COLOR_RANGE BIT(2) 106 + #define m_CSC BIT(0) 107 + #define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4) 108 + #define v_SOF_ENABLE (0 << 3) 109 + #define v_SOF_DISABLE BIT(3) 110 + #define v_COLOR_RANGE_FULL BIT(2) 111 + #define v_COLOR_RANGE_LIMITED (0 << 2) 112 + #define v_CSC_ENABLE 1 113 + #define v_CSC_DISABLE 0 114 + 115 + #define HDMI_AV_MUTE 0x05 116 + #define m_AVMUTE_CLEAR BIT(7) 117 + #define m_AVMUTE_ENABLE BIT(6) 118 + #define m_AUDIO_MUTE BIT(1) 119 + #define m_VIDEO_BLACK BIT(0) 120 + #define v_AVMUTE_CLEAR(n) ((n) << 7) 121 + #define v_AVMUTE_ENABLE(n) ((n) << 6) 122 + #define v_AUDIO_MUTE(n) ((n) << 1) 123 + #define v_VIDEO_MUTE(n) ((n) << 0) 124 + 125 + #define HDMI_VIDEO_TIMING_CTL 0x08 126 + #define v_HSYNC_POLARITY(n) ((n) << 3) 127 + #define v_VSYNC_POLARITY(n) ((n) << 2) 128 + #define v_INETLACE(n) ((n) << 1) 129 + #define v_EXTERANL_VIDEO(n) ((n) << 0) 130 + 131 + #define HDMI_VIDEO_EXT_HTOTAL_L 0x09 132 + #define HDMI_VIDEO_EXT_HTOTAL_H 0x0a 133 + #define HDMI_VIDEO_EXT_HBLANK_L 0x0b 134 + #define HDMI_VIDEO_EXT_HBLANK_H 0x0c 135 + #define HDMI_VIDEO_EXT_HDELAY_L 0x0d 136 + #define HDMI_VIDEO_EXT_HDELAY_H 0x0e 137 + #define HDMI_VIDEO_EXT_HDURATION_L 0x0f 138 + #define HDMI_VIDEO_EXT_HDURATION_H 0x10 139 + #define HDMI_VIDEO_EXT_VTOTAL_L 0x11 140 + #define HDMI_VIDEO_EXT_VTOTAL_H 0x12 141 + #define HDMI_VIDEO_EXT_VBLANK 0x13 142 + #define HDMI_VIDEO_EXT_VDELAY 0x14 143 + #define HDMI_VIDEO_EXT_VDURATION 0x15 144 + 145 + #define HDMI_VIDEO_CSC_COEF 0x18 146 + 147 + #define HDMI_AUDIO_CTRL1 0x35 148 + enum { 149 + CTS_SOURCE_INTERNAL = 0, 150 + CTS_SOURCE_EXTERNAL = 1, 151 + }; 152 + 153 + #define v_CTS_SOURCE(n) ((n) << 7) 154 + 155 + enum { 156 + DOWNSAMPLE_DISABLE = 0, 157 + DOWNSAMPLE_1_2 = 1, 158 + DOWNSAMPLE_1_4 = 2, 159 + }; 160 + 161 + #define v_DOWN_SAMPLE(n) ((n) << 5) 162 + 163 + enum { 164 + AUDIO_SOURCE_IIS = 0, 165 + AUDIO_SOURCE_SPDIF = 1, 166 + }; 167 + 168 + #define v_AUDIO_SOURCE(n) ((n) << 3) 169 + 170 + #define v_MCLK_ENABLE(n) ((n) << 2) 171 + 172 + enum { 173 + MCLK_128FS = 0, 174 + MCLK_256FS = 1, 175 + MCLK_384FS = 2, 176 + MCLK_512FS = 3, 177 + }; 178 + 179 + #define v_MCLK_RATIO(n) (n) 180 + 181 + #define AUDIO_SAMPLE_RATE 0x37 182 + 183 + enum { 184 + AUDIO_32K = 0x3, 185 + AUDIO_441K = 0x0, 186 + AUDIO_48K = 0x2, 187 + AUDIO_882K = 0x8, 188 + AUDIO_96K = 0xa, 189 + AUDIO_1764K = 0xc, 190 + AUDIO_192K = 0xe, 191 + }; 192 + 193 + #define AUDIO_I2S_MODE 0x38 194 + 195 + enum { 196 + I2S_CHANNEL_1_2 = 1, 197 + I2S_CHANNEL_3_4 = 3, 198 + I2S_CHANNEL_5_6 = 7, 199 + I2S_CHANNEL_7_8 = 0xf 200 + }; 201 + 202 + #define v_I2S_CHANNEL(n) ((n) << 2) 203 + 204 + enum { 205 + I2S_STANDARD = 0, 206 + I2S_LEFT_JUSTIFIED = 1, 207 + I2S_RIGHT_JUSTIFIED = 2, 208 + }; 209 + 210 + #define v_I2S_MODE(n) (n) 211 + 212 + #define AUDIO_I2S_MAP 0x39 213 + #define AUDIO_I2S_SWAPS_SPDIF 0x3a 214 + #define v_SPIDF_FREQ(n) (n) 215 + 216 + #define N_32K 0x1000 217 + #define N_441K 0x1880 218 + #define N_882K 0x3100 219 + #define N_1764K 0x6200 220 + #define N_48K 0x1800 221 + #define N_96K 0x3000 222 + #define N_192K 0x6000 223 + 224 + #define HDMI_AUDIO_CHANNEL_STATUS 0x3e 225 + #define m_AUDIO_STATUS_NLPCM BIT(7) 226 + #define m_AUDIO_STATUS_USE BIT(6) 227 + #define m_AUDIO_STATUS_COPYRIGHT BIT(5) 228 + #define m_AUDIO_STATUS_ADDITION (3 << 2) 229 + #define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0) 230 + #define v_AUDIO_STATUS_NLPCM(n) (((n) & 1) << 7) 231 + #define AUDIO_N_H 0x3f 232 + #define AUDIO_N_M 0x40 233 + #define AUDIO_N_L 0x41 234 + 235 + #define HDMI_AUDIO_CTS_H 0x45 236 + #define HDMI_AUDIO_CTS_M 0x46 237 + #define HDMI_AUDIO_CTS_L 0x47 238 + 239 + #define HDMI_DDC_CLK_L 0x4b 240 + #define HDMI_DDC_CLK_H 0x4c 241 + 242 + #define HDMI_EDID_SEGMENT_POINTER 0x4d 243 + #define HDMI_EDID_WORD_ADDR 0x4e 244 + #define HDMI_EDID_FIFO_OFFSET 0x4f 245 + #define HDMI_EDID_FIFO_ADDR 0x50 246 + 247 + #define HDMI_PACKET_SEND_MANUAL 0x9c 248 + #define HDMI_PACKET_SEND_AUTO 0x9d 249 + #define m_PACKET_GCP_EN BIT(7) 250 + #define m_PACKET_MSI_EN BIT(6) 251 + #define m_PACKET_SDI_EN BIT(5) 252 + #define m_PACKET_VSI_EN BIT(4) 253 + #define v_PACKET_GCP_EN(n) (((n) & 1) << 7) 254 + #define v_PACKET_MSI_EN(n) (((n) & 1) << 6) 255 + #define v_PACKET_SDI_EN(n) (((n) & 1) << 5) 256 + #define v_PACKET_VSI_EN(n) (((n) & 1) << 4) 257 + 258 + #define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f 259 + 260 + enum { 261 + INFOFRAME_VSI = 0x05, 262 + INFOFRAME_AVI = 0x06, 263 + INFOFRAME_AAI = 0x08, 264 + }; 265 + 266 + #define HDMI_CONTROL_PACKET_ADDR 0xa0 267 + #define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11 268 + 269 + enum { 270 + AVI_COLOR_MODE_RGB = 0, 271 + AVI_COLOR_MODE_YCBCR422 = 1, 272 + AVI_COLOR_MODE_YCBCR444 = 2, 273 + AVI_COLORIMETRY_NO_DATA = 0, 274 + 275 + AVI_COLORIMETRY_SMPTE_170M = 1, 276 + AVI_COLORIMETRY_ITU709 = 2, 277 + AVI_COLORIMETRY_EXTENDED = 3, 278 + 279 + AVI_CODED_FRAME_ASPECT_NO_DATA = 0, 280 + AVI_CODED_FRAME_ASPECT_4_3 = 1, 281 + AVI_CODED_FRAME_ASPECT_16_9 = 2, 282 + 283 + ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08, 284 + ACTIVE_ASPECT_RATE_4_3 = 0x09, 285 + ACTIVE_ASPECT_RATE_16_9 = 0x0A, 286 + ACTIVE_ASPECT_RATE_14_9 = 0x0B, 287 + }; 288 + 289 + #define HDMI_HDCP_CTRL 0x52 290 + #define m_HDMI_DVI BIT(1) 291 + #define v_HDMI_DVI(n) ((n) << 1) 292 + 293 + #define HDMI_INTERRUPT_MASK1 0xc0 294 + #define HDMI_INTERRUPT_STATUS1 0xc1 295 + #define m_INT_ACTIVE_VSYNC BIT(5) 296 + #define m_INT_EDID_READY BIT(2) 297 + 298 + #define HDMI_INTERRUPT_MASK2 0xc2 299 + #define HDMI_INTERRUPT_STATUS2 0xc3 300 + #define m_INT_HDCP_ERR BIT(7) 301 + #define m_INT_BKSV_FLAG BIT(6) 302 + #define m_INT_HDCP_OK BIT(4) 303 + 304 + #define HDMI_STATUS 0xc8 305 + #define m_HOTPLUG BIT(7) 306 + #define m_MASK_INT_HOTPLUG BIT(5) 307 + #define m_INT_HOTPLUG BIT(1) 308 + #define v_MASK_INT_HOTPLUG(n) (((n) & 0x1) << 5) 309 + 310 + #define HDMI_COLORBAR 0xc9 311 + 312 + #define HDMI_PHY_SYNC 0xce 313 + #define HDMI_PHY_SYS_CTL 0xe0 314 + #define m_TMDS_CLK_SOURCE BIT(5) 315 + #define v_TMDS_FROM_PLL (0 << 5) 316 + #define v_TMDS_FROM_GEN BIT(5) 317 + #define m_PHASE_CLK BIT(4) 318 + #define v_DEFAULT_PHASE (0 << 4) 319 + #define v_SYNC_PHASE BIT(4) 320 + #define m_TMDS_CURRENT_PWR BIT(3) 321 + #define v_TURN_ON_CURRENT (0 << 3) 322 + #define v_CAT_OFF_CURRENT BIT(3) 323 + #define m_BANDGAP_PWR BIT(2) 324 + #define v_BANDGAP_PWR_UP (0 << 2) 325 + #define v_BANDGAP_PWR_DOWN BIT(2) 326 + #define m_PLL_PWR BIT(1) 327 + #define v_PLL_PWR_UP (0 << 1) 328 + #define v_PLL_PWR_DOWN BIT(1) 329 + #define m_TMDS_CHG_PWR BIT(0) 330 + #define v_TMDS_CHG_PWR_UP (0 << 0) 331 + #define v_TMDS_CHG_PWR_DOWN BIT(0) 332 + 333 + #define HDMI_PHY_CHG_PWR 0xe1 334 + #define v_CLK_CHG_PWR(n) (((n) & 1) << 3) 335 + #define v_DATA_CHG_PWR(n) (((n) & 7) << 0) 336 + 337 + #define HDMI_PHY_DRIVER 0xe2 338 + #define v_CLK_MAIN_DRIVER(n) ((n) << 4) 339 + #define v_DATA_MAIN_DRIVER(n) ((n) << 0) 340 + 341 + #define HDMI_PHY_PRE_EMPHASIS 0xe3 342 + #define v_PRE_EMPHASIS(n) (((n) & 7) << 4) 343 + #define v_CLK_PRE_DRIVER(n) (((n) & 3) << 2) 344 + #define v_DATA_PRE_DRIVER(n) (((n) & 3) << 0) 345 + 346 + #define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7 347 + #define v_FEEDBACK_DIV_LOW(n) ((n) & 0xff) 348 + #define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8 349 + #define v_FEEDBACK_DIV_HIGH(n) ((n) & 1) 350 + 351 + #define HDMI_PHY_PRE_DIV_RATIO 0xed 352 + #define v_PRE_DIV_RATIO(n) ((n) & 0x1f) 353 + 354 + #define HDMI_CEC_CTRL 0xd0 355 + #define m_ADJUST_FOR_HISENSE BIT(6) 356 + #define m_REJECT_RX_BROADCAST BIT(5) 357 + #define m_BUSFREETIME_ENABLE BIT(2) 358 + #define m_REJECT_RX BIT(1) 359 + #define m_START_TX BIT(0) 360 + 361 + #define HDMI_CEC_DATA 0xd1 362 + #define HDMI_CEC_TX_OFFSET 0xd2 363 + #define HDMI_CEC_RX_OFFSET 0xd3 364 + #define HDMI_CEC_CLK_H 0xd4 365 + #define HDMI_CEC_CLK_L 0xd5 366 + #define HDMI_CEC_TX_LENGTH 0xd6 367 + #define HDMI_CEC_RX_LENGTH 0xd7 368 + #define HDMI_CEC_TX_INT_MASK 0xd8 369 + #define m_TX_DONE BIT(3) 370 + #define m_TX_NOACK BIT(2) 371 + #define m_TX_BROADCAST_REJ BIT(1) 372 + #define m_TX_BUSNOTFREE BIT(0) 373 + 374 + #define HDMI_CEC_RX_INT_MASK 0xd9 375 + #define m_RX_LA_ERR BIT(4) 376 + #define m_RX_GLITCH BIT(3) 377 + #define m_RX_DONE BIT(0) 378 + 379 + #define HDMI_CEC_TX_INT 0xda 380 + #define HDMI_CEC_RX_INT 0xdb 381 + #define HDMI_CEC_BUSFREETIME_L 0xdc 382 + #define HDMI_CEC_BUSFREETIME_H 0xdd 383 + #define HDMI_CEC_LOGICADDR 0xde 33 384 34 385 #define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) 35 - 36 - #define INNO_HDMI_MIN_TMDS_CLOCK 25000000U 37 386 38 387 #define RK3036_GRF_SOC_CON2 0x148 39 388 #define RK3036_HDMI_PHSYNC BIT(4) ··· 604 255 inno_hdmi_sys_power(hdmi, true); 605 256 }; 606 257 607 - static void inno_hdmi_reset(struct inno_hdmi *hdmi) 258 + static void inno_hdmi_init_hw(struct inno_hdmi *hdmi) 608 259 { 609 260 u32 val; 610 261 u32 msk; 611 262 612 263 hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL); 613 - udelay(100); 264 + usleep_range(100, 150); 614 265 615 266 hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG); 616 - udelay(100); 267 + usleep_range(100, 150); 617 268 618 269 msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL; 619 270 val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH; 620 271 hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val); 621 272 622 273 inno_hdmi_standby(hdmi); 274 + 275 + /* 276 + * When the controller isn't configured to an accurate 277 + * video timing and there is no reference clock available, 278 + * then the TMDS clock source would be switched to PCLK_HDMI, 279 + * so we need to init the TMDS rate to PCLK rate, and 280 + * reconfigure the DDC clock. 281 + */ 282 + if (hdmi->refclk) 283 + inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk)); 284 + else 285 + inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk)); 286 + 287 + /* Unmute hotplug interrupt */ 288 + hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1)); 623 289 } 624 290 625 291 static int inno_hdmi_disable_frame(struct drm_connector *connector, ··· 1139 775 * we assume that each word write to this i2c adapter 1140 776 * should be the offset of EDID word address. 1141 777 */ 1142 - if ((msgs->len != 1) || 1143 - ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR))) 778 + if (msgs->len != 1 || (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR)) 1144 779 return -EINVAL; 1145 780 1146 781 reinit_completion(&hdmi->i2c->cmp); ··· 1230 867 strscpy(adap->name, "Inno HDMI", sizeof(adap->name)); 1231 868 i2c_set_adapdata(adap, hdmi); 1232 869 1233 - ret = i2c_add_adapter(adap); 870 + ret = devm_i2c_add_adapter(hdmi->dev, adap); 1234 871 if (ret) { 1235 872 dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); 1236 - devm_kfree(hdmi->dev, i2c); 1237 873 return ERR_PTR(ret); 1238 874 } 1239 875 ··· 1269 907 if (IS_ERR(hdmi->regs)) 1270 908 return PTR_ERR(hdmi->regs); 1271 909 1272 - hdmi->pclk = devm_clk_get(hdmi->dev, "pclk"); 910 + hdmi->pclk = devm_clk_get_enabled(hdmi->dev, "pclk"); 1273 911 if (IS_ERR(hdmi->pclk)) 1274 912 return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n"); 1275 913 1276 - ret = clk_prepare_enable(hdmi->pclk); 1277 - if (ret) 1278 - return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret); 1279 - 1280 - hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref"); 1281 - if (IS_ERR(hdmi->refclk)) { 1282 - ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n"); 1283 - goto err_disable_pclk; 1284 - } 1285 - 1286 - ret = clk_prepare_enable(hdmi->refclk); 1287 - if (ret) { 1288 - ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret); 1289 - goto err_disable_pclk; 1290 - } 914 + hdmi->refclk = devm_clk_get_optional_enabled(hdmi->dev, "ref"); 915 + if (IS_ERR(hdmi->refclk)) 916 + return dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n"); 1291 917 1292 918 if (hdmi->variant->dev_type == RK3036_HDMI) { 1293 919 hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 1294 - if (IS_ERR(hdmi->grf)) { 1295 - ret = dev_err_probe(dev, PTR_ERR(hdmi->grf), 1296 - "Unable to get rockchip,grf\n"); 1297 - goto err_disable_clk; 1298 - } 920 + if (IS_ERR(hdmi->grf)) 921 + return dev_err_probe(dev, 922 + PTR_ERR(hdmi->grf), "Unable to get rockchip,grf\n"); 1299 923 } 1300 924 1301 925 irq = platform_get_irq(pdev, 0); 1302 - if (irq < 0) { 1303 - ret = irq; 1304 - goto err_disable_clk; 1305 - } 926 + if (irq < 0) 927 + return irq; 1306 928 1307 - inno_hdmi_reset(hdmi); 929 + inno_hdmi_init_hw(hdmi); 1308 930 1309 931 hdmi->ddc = inno_hdmi_i2c_adapter(hdmi); 1310 - if (IS_ERR(hdmi->ddc)) { 1311 - ret = PTR_ERR(hdmi->ddc); 1312 - hdmi->ddc = NULL; 1313 - goto err_disable_clk; 1314 - } 1315 - 1316 - /* 1317 - * When the controller isn't configured to an accurate 1318 - * video timing and there is no reference clock available, 1319 - * then the TMDS clock source would be switched to PCLK_HDMI, 1320 - * so we need to init the TMDS rate to PCLK rate, and 1321 - * reconfigure the DDC clock. 1322 - */ 1323 - if (hdmi->refclk) 1324 - inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk)); 1325 - else 1326 - inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk)); 932 + if (IS_ERR(hdmi->ddc)) 933 + return PTR_ERR(hdmi->ddc); 1327 934 1328 935 ret = inno_hdmi_register(drm, hdmi); 1329 936 if (ret) 1330 - goto err_put_adapter; 937 + return ret; 1331 938 1332 939 dev_set_drvdata(dev, hdmi); 1333 - 1334 - /* Unmute hotplug interrupt */ 1335 - hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1)); 1336 940 1337 941 ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq, 1338 942 inno_hdmi_irq, IRQF_SHARED, ··· 1310 982 err_cleanup_hdmi: 1311 983 hdmi->connector.funcs->destroy(&hdmi->connector); 1312 984 hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder); 1313 - err_put_adapter: 1314 - i2c_put_adapter(hdmi->ddc); 1315 - err_disable_clk: 1316 - clk_disable_unprepare(hdmi->refclk); 1317 - err_disable_pclk: 1318 - clk_disable_unprepare(hdmi->pclk); 1319 985 return ret; 1320 986 } 1321 987 ··· 1320 998 1321 999 hdmi->connector.funcs->destroy(&hdmi->connector); 1322 1000 hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder); 1323 - 1324 - i2c_put_adapter(hdmi->ddc); 1325 - clk_disable_unprepare(hdmi->refclk); 1326 - clk_disable_unprepare(hdmi->pclk); 1327 1001 } 1328 1002 1329 1003 static const struct component_ops inno_hdmi_ops = {
-349
drivers/gpu/drm/rockchip/inno_hdmi.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0-only */ 2 - /* 3 - * Copyright (C) Rockchip Electronics Co., Ltd. 4 - * Zheng Yang <zhengyang@rock-chips.com> 5 - * Yakir Yang <ykk@rock-chips.com> 6 - */ 7 - 8 - #ifndef __INNO_HDMI_H__ 9 - #define __INNO_HDMI_H__ 10 - 11 - #define DDC_SEGMENT_ADDR 0x30 12 - 13 - #define HDMI_SCL_RATE (100*1000) 14 - #define DDC_BUS_FREQ_L 0x4b 15 - #define DDC_BUS_FREQ_H 0x4c 16 - 17 - #define HDMI_SYS_CTRL 0x00 18 - #define m_RST_ANALOG (1 << 6) 19 - #define v_RST_ANALOG (0 << 6) 20 - #define v_NOT_RST_ANALOG (1 << 6) 21 - #define m_RST_DIGITAL (1 << 5) 22 - #define v_RST_DIGITAL (0 << 5) 23 - #define v_NOT_RST_DIGITAL (1 << 5) 24 - #define m_REG_CLK_INV (1 << 4) 25 - #define v_REG_CLK_NOT_INV (0 << 4) 26 - #define v_REG_CLK_INV (1 << 4) 27 - #define m_VCLK_INV (1 << 3) 28 - #define v_VCLK_NOT_INV (0 << 3) 29 - #define v_VCLK_INV (1 << 3) 30 - #define m_REG_CLK_SOURCE (1 << 2) 31 - #define v_REG_CLK_SOURCE_TMDS (0 << 2) 32 - #define v_REG_CLK_SOURCE_SYS (1 << 2) 33 - #define m_POWER (1 << 1) 34 - #define v_PWR_ON (0 << 1) 35 - #define v_PWR_OFF (1 << 1) 36 - #define m_INT_POL (1 << 0) 37 - #define v_INT_POL_HIGH 1 38 - #define v_INT_POL_LOW 0 39 - 40 - #define HDMI_VIDEO_CONTRL1 0x01 41 - #define m_VIDEO_INPUT_FORMAT (7 << 1) 42 - #define m_DE_SOURCE (1 << 0) 43 - #define v_VIDEO_INPUT_FORMAT(n) (n << 1) 44 - #define v_DE_EXTERNAL 1 45 - #define v_DE_INTERNAL 0 46 - enum { 47 - VIDEO_INPUT_SDR_RGB444 = 0, 48 - VIDEO_INPUT_DDR_RGB444 = 5, 49 - VIDEO_INPUT_DDR_YCBCR422 = 6 50 - }; 51 - 52 - #define HDMI_VIDEO_CONTRL2 0x02 53 - #define m_VIDEO_OUTPUT_COLOR (3 << 6) 54 - #define m_VIDEO_INPUT_BITS (3 << 4) 55 - #define m_VIDEO_INPUT_CSP (1 << 0) 56 - #define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6) 57 - #define v_VIDEO_INPUT_BITS(n) (n << 4) 58 - #define v_VIDEO_INPUT_CSP(n) (n << 0) 59 - enum { 60 - VIDEO_INPUT_12BITS = 0, 61 - VIDEO_INPUT_10BITS = 1, 62 - VIDEO_INPUT_REVERT = 2, 63 - VIDEO_INPUT_8BITS = 3, 64 - }; 65 - 66 - #define HDMI_VIDEO_CONTRL 0x03 67 - #define m_VIDEO_AUTO_CSC (1 << 7) 68 - #define v_VIDEO_AUTO_CSC(n) (n << 7) 69 - #define m_VIDEO_C0_C2_SWAP (1 << 0) 70 - #define v_VIDEO_C0_C2_SWAP(n) (n << 0) 71 - enum { 72 - C0_C2_CHANGE_ENABLE = 0, 73 - C0_C2_CHANGE_DISABLE = 1, 74 - AUTO_CSC_DISABLE = 0, 75 - AUTO_CSC_ENABLE = 1, 76 - }; 77 - 78 - #define HDMI_VIDEO_CONTRL3 0x04 79 - #define m_COLOR_DEPTH_NOT_INDICATED (1 << 4) 80 - #define m_SOF (1 << 3) 81 - #define m_COLOR_RANGE (1 << 2) 82 - #define m_CSC (1 << 0) 83 - #define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4) 84 - #define v_SOF_ENABLE (0 << 3) 85 - #define v_SOF_DISABLE (1 << 3) 86 - #define v_COLOR_RANGE_FULL (1 << 2) 87 - #define v_COLOR_RANGE_LIMITED (0 << 2) 88 - #define v_CSC_ENABLE 1 89 - #define v_CSC_DISABLE 0 90 - 91 - #define HDMI_AV_MUTE 0x05 92 - #define m_AVMUTE_CLEAR (1 << 7) 93 - #define m_AVMUTE_ENABLE (1 << 6) 94 - #define m_AUDIO_MUTE (1 << 1) 95 - #define m_VIDEO_BLACK (1 << 0) 96 - #define v_AVMUTE_CLEAR(n) (n << 7) 97 - #define v_AVMUTE_ENABLE(n) (n << 6) 98 - #define v_AUDIO_MUTE(n) (n << 1) 99 - #define v_VIDEO_MUTE(n) (n << 0) 100 - 101 - #define HDMI_VIDEO_TIMING_CTL 0x08 102 - #define v_HSYNC_POLARITY(n) (n << 3) 103 - #define v_VSYNC_POLARITY(n) (n << 2) 104 - #define v_INETLACE(n) (n << 1) 105 - #define v_EXTERANL_VIDEO(n) (n << 0) 106 - 107 - #define HDMI_VIDEO_EXT_HTOTAL_L 0x09 108 - #define HDMI_VIDEO_EXT_HTOTAL_H 0x0a 109 - #define HDMI_VIDEO_EXT_HBLANK_L 0x0b 110 - #define HDMI_VIDEO_EXT_HBLANK_H 0x0c 111 - #define HDMI_VIDEO_EXT_HDELAY_L 0x0d 112 - #define HDMI_VIDEO_EXT_HDELAY_H 0x0e 113 - #define HDMI_VIDEO_EXT_HDURATION_L 0x0f 114 - #define HDMI_VIDEO_EXT_HDURATION_H 0x10 115 - #define HDMI_VIDEO_EXT_VTOTAL_L 0x11 116 - #define HDMI_VIDEO_EXT_VTOTAL_H 0x12 117 - #define HDMI_VIDEO_EXT_VBLANK 0x13 118 - #define HDMI_VIDEO_EXT_VDELAY 0x14 119 - #define HDMI_VIDEO_EXT_VDURATION 0x15 120 - 121 - #define HDMI_VIDEO_CSC_COEF 0x18 122 - 123 - #define HDMI_AUDIO_CTRL1 0x35 124 - enum { 125 - CTS_SOURCE_INTERNAL = 0, 126 - CTS_SOURCE_EXTERNAL = 1, 127 - }; 128 - #define v_CTS_SOURCE(n) (n << 7) 129 - 130 - enum { 131 - DOWNSAMPLE_DISABLE = 0, 132 - DOWNSAMPLE_1_2 = 1, 133 - DOWNSAMPLE_1_4 = 2, 134 - }; 135 - #define v_DOWN_SAMPLE(n) (n << 5) 136 - 137 - enum { 138 - AUDIO_SOURCE_IIS = 0, 139 - AUDIO_SOURCE_SPDIF = 1, 140 - }; 141 - #define v_AUDIO_SOURCE(n) (n << 3) 142 - 143 - #define v_MCLK_ENABLE(n) (n << 2) 144 - enum { 145 - MCLK_128FS = 0, 146 - MCLK_256FS = 1, 147 - MCLK_384FS = 2, 148 - MCLK_512FS = 3, 149 - }; 150 - #define v_MCLK_RATIO(n) (n) 151 - 152 - #define AUDIO_SAMPLE_RATE 0x37 153 - enum { 154 - AUDIO_32K = 0x3, 155 - AUDIO_441K = 0x0, 156 - AUDIO_48K = 0x2, 157 - AUDIO_882K = 0x8, 158 - AUDIO_96K = 0xa, 159 - AUDIO_1764K = 0xc, 160 - AUDIO_192K = 0xe, 161 - }; 162 - 163 - #define AUDIO_I2S_MODE 0x38 164 - enum { 165 - I2S_CHANNEL_1_2 = 1, 166 - I2S_CHANNEL_3_4 = 3, 167 - I2S_CHANNEL_5_6 = 7, 168 - I2S_CHANNEL_7_8 = 0xf 169 - }; 170 - #define v_I2S_CHANNEL(n) ((n) << 2) 171 - enum { 172 - I2S_STANDARD = 0, 173 - I2S_LEFT_JUSTIFIED = 1, 174 - I2S_RIGHT_JUSTIFIED = 2, 175 - }; 176 - #define v_I2S_MODE(n) (n) 177 - 178 - #define AUDIO_I2S_MAP 0x39 179 - #define AUDIO_I2S_SWAPS_SPDIF 0x3a 180 - #define v_SPIDF_FREQ(n) (n) 181 - 182 - #define N_32K 0x1000 183 - #define N_441K 0x1880 184 - #define N_882K 0x3100 185 - #define N_1764K 0x6200 186 - #define N_48K 0x1800 187 - #define N_96K 0x3000 188 - #define N_192K 0x6000 189 - 190 - #define HDMI_AUDIO_CHANNEL_STATUS 0x3e 191 - #define m_AUDIO_STATUS_NLPCM (1 << 7) 192 - #define m_AUDIO_STATUS_USE (1 << 6) 193 - #define m_AUDIO_STATUS_COPYRIGHT (1 << 5) 194 - #define m_AUDIO_STATUS_ADDITION (3 << 2) 195 - #define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0) 196 - #define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7) 197 - #define AUDIO_N_H 0x3f 198 - #define AUDIO_N_M 0x40 199 - #define AUDIO_N_L 0x41 200 - 201 - #define HDMI_AUDIO_CTS_H 0x45 202 - #define HDMI_AUDIO_CTS_M 0x46 203 - #define HDMI_AUDIO_CTS_L 0x47 204 - 205 - #define HDMI_DDC_CLK_L 0x4b 206 - #define HDMI_DDC_CLK_H 0x4c 207 - 208 - #define HDMI_EDID_SEGMENT_POINTER 0x4d 209 - #define HDMI_EDID_WORD_ADDR 0x4e 210 - #define HDMI_EDID_FIFO_OFFSET 0x4f 211 - #define HDMI_EDID_FIFO_ADDR 0x50 212 - 213 - #define HDMI_PACKET_SEND_MANUAL 0x9c 214 - #define HDMI_PACKET_SEND_AUTO 0x9d 215 - #define m_PACKET_GCP_EN (1 << 7) 216 - #define m_PACKET_MSI_EN (1 << 6) 217 - #define m_PACKET_SDI_EN (1 << 5) 218 - #define m_PACKET_VSI_EN (1 << 4) 219 - #define v_PACKET_GCP_EN(n) ((n & 1) << 7) 220 - #define v_PACKET_MSI_EN(n) ((n & 1) << 6) 221 - #define v_PACKET_SDI_EN(n) ((n & 1) << 5) 222 - #define v_PACKET_VSI_EN(n) ((n & 1) << 4) 223 - 224 - #define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f 225 - enum { 226 - INFOFRAME_VSI = 0x05, 227 - INFOFRAME_AVI = 0x06, 228 - INFOFRAME_AAI = 0x08, 229 - }; 230 - 231 - #define HDMI_CONTROL_PACKET_ADDR 0xa0 232 - #define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11 233 - enum { 234 - AVI_COLOR_MODE_RGB = 0, 235 - AVI_COLOR_MODE_YCBCR422 = 1, 236 - AVI_COLOR_MODE_YCBCR444 = 2, 237 - AVI_COLORIMETRY_NO_DATA = 0, 238 - 239 - AVI_COLORIMETRY_SMPTE_170M = 1, 240 - AVI_COLORIMETRY_ITU709 = 2, 241 - AVI_COLORIMETRY_EXTENDED = 3, 242 - 243 - AVI_CODED_FRAME_ASPECT_NO_DATA = 0, 244 - AVI_CODED_FRAME_ASPECT_4_3 = 1, 245 - AVI_CODED_FRAME_ASPECT_16_9 = 2, 246 - 247 - ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08, 248 - ACTIVE_ASPECT_RATE_4_3 = 0x09, 249 - ACTIVE_ASPECT_RATE_16_9 = 0x0A, 250 - ACTIVE_ASPECT_RATE_14_9 = 0x0B, 251 - }; 252 - 253 - #define HDMI_HDCP_CTRL 0x52 254 - #define m_HDMI_DVI (1 << 1) 255 - #define v_HDMI_DVI(n) (n << 1) 256 - 257 - #define HDMI_INTERRUPT_MASK1 0xc0 258 - #define HDMI_INTERRUPT_STATUS1 0xc1 259 - #define m_INT_ACTIVE_VSYNC (1 << 5) 260 - #define m_INT_EDID_READY (1 << 2) 261 - 262 - #define HDMI_INTERRUPT_MASK2 0xc2 263 - #define HDMI_INTERRUPT_STATUS2 0xc3 264 - #define m_INT_HDCP_ERR (1 << 7) 265 - #define m_INT_BKSV_FLAG (1 << 6) 266 - #define m_INT_HDCP_OK (1 << 4) 267 - 268 - #define HDMI_STATUS 0xc8 269 - #define m_HOTPLUG (1 << 7) 270 - #define m_MASK_INT_HOTPLUG (1 << 5) 271 - #define m_INT_HOTPLUG (1 << 1) 272 - #define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5) 273 - 274 - #define HDMI_COLORBAR 0xc9 275 - 276 - #define HDMI_PHY_SYNC 0xce 277 - #define HDMI_PHY_SYS_CTL 0xe0 278 - #define m_TMDS_CLK_SOURCE (1 << 5) 279 - #define v_TMDS_FROM_PLL (0 << 5) 280 - #define v_TMDS_FROM_GEN (1 << 5) 281 - #define m_PHASE_CLK (1 << 4) 282 - #define v_DEFAULT_PHASE (0 << 4) 283 - #define v_SYNC_PHASE (1 << 4) 284 - #define m_TMDS_CURRENT_PWR (1 << 3) 285 - #define v_TURN_ON_CURRENT (0 << 3) 286 - #define v_CAT_OFF_CURRENT (1 << 3) 287 - #define m_BANDGAP_PWR (1 << 2) 288 - #define v_BANDGAP_PWR_UP (0 << 2) 289 - #define v_BANDGAP_PWR_DOWN (1 << 2) 290 - #define m_PLL_PWR (1 << 1) 291 - #define v_PLL_PWR_UP (0 << 1) 292 - #define v_PLL_PWR_DOWN (1 << 1) 293 - #define m_TMDS_CHG_PWR (1 << 0) 294 - #define v_TMDS_CHG_PWR_UP (0 << 0) 295 - #define v_TMDS_CHG_PWR_DOWN (1 << 0) 296 - 297 - #define HDMI_PHY_CHG_PWR 0xe1 298 - #define v_CLK_CHG_PWR(n) ((n & 1) << 3) 299 - #define v_DATA_CHG_PWR(n) ((n & 7) << 0) 300 - 301 - #define HDMI_PHY_DRIVER 0xe2 302 - #define v_CLK_MAIN_DRIVER(n) (n << 4) 303 - #define v_DATA_MAIN_DRIVER(n) (n << 0) 304 - 305 - #define HDMI_PHY_PRE_EMPHASIS 0xe3 306 - #define v_PRE_EMPHASIS(n) ((n & 7) << 4) 307 - #define v_CLK_PRE_DRIVER(n) ((n & 3) << 2) 308 - #define v_DATA_PRE_DRIVER(n) ((n & 3) << 0) 309 - 310 - #define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7 311 - #define v_FEEDBACK_DIV_LOW(n) (n & 0xff) 312 - #define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8 313 - #define v_FEEDBACK_DIV_HIGH(n) (n & 1) 314 - 315 - #define HDMI_PHY_PRE_DIV_RATIO 0xed 316 - #define v_PRE_DIV_RATIO(n) (n & 0x1f) 317 - 318 - #define HDMI_CEC_CTRL 0xd0 319 - #define m_ADJUST_FOR_HISENSE (1 << 6) 320 - #define m_REJECT_RX_BROADCAST (1 << 5) 321 - #define m_BUSFREETIME_ENABLE (1 << 2) 322 - #define m_REJECT_RX (1 << 1) 323 - #define m_START_TX (1 << 0) 324 - 325 - #define HDMI_CEC_DATA 0xd1 326 - #define HDMI_CEC_TX_OFFSET 0xd2 327 - #define HDMI_CEC_RX_OFFSET 0xd3 328 - #define HDMI_CEC_CLK_H 0xd4 329 - #define HDMI_CEC_CLK_L 0xd5 330 - #define HDMI_CEC_TX_LENGTH 0xd6 331 - #define HDMI_CEC_RX_LENGTH 0xd7 332 - #define HDMI_CEC_TX_INT_MASK 0xd8 333 - #define m_TX_DONE (1 << 3) 334 - #define m_TX_NOACK (1 << 2) 335 - #define m_TX_BROADCAST_REJ (1 << 1) 336 - #define m_TX_BUSNOTFREE (1 << 0) 337 - 338 - #define HDMI_CEC_RX_INT_MASK 0xd9 339 - #define m_RX_LA_ERR (1 << 4) 340 - #define m_RX_GLITCH (1 << 3) 341 - #define m_RX_DONE (1 << 0) 342 - 343 - #define HDMI_CEC_TX_INT 0xda 344 - #define HDMI_CEC_RX_INT 0xdb 345 - #define HDMI_CEC_BUSFREETIME_L 0xdc 346 - #define HDMI_CEC_BUSFREETIME_H 0xdd 347 - #define HDMI_CEC_LOGICADDR 0xde 348 - 349 - #endif /* __INNO_HDMI_H__ */
+10 -19
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 146 146 mutex_unlock(&vop2->vop2_lock); 147 147 } 148 148 149 - /* 150 - * Note: 151 - * The write mask function is documented but missing on rk3566/8, writes 152 - * to these bits have no effect. For newer soc(rk3588 and following) the 153 - * write mask is needed for register writes. 154 - * 155 - * GLB_CFG_DONE_EN has no write mask bit. 156 - * 157 - */ 158 - static void vop2_cfg_done(struct vop2_video_port *vp) 159 - { 160 - struct vop2 *vop2 = vp->vop2; 161 - u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN; 162 - 163 - val |= BIT(vp->id) | (BIT(vp->id) << 16); 164 - 165 - regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val); 166 - } 167 - 168 149 static void vop2_win_disable(struct vop2_win *win) 169 150 { 170 151 vop2_win_write(win, VOP2_WIN_ENABLE, 0); ··· 834 853 835 854 if (vop2->version == VOP_VERSION_RK3588) 836 855 rk3588_vop2_power_domain_enable_all(vop2); 856 + 857 + if (vop2->version <= VOP_VERSION_RK3588) { 858 + vop2->old_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); 859 + vop2->old_port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); 860 + } 837 861 838 862 vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN); 839 863 ··· 2408 2422 break; 2409 2423 } 2410 2424 } 2425 + 2426 + if (!vp->primary_plane) 2427 + return dev_err_probe(drm->dev, -ENOENT, 2428 + "no primary plane for vp %d\n", i); 2411 2429 } 2412 2430 2413 2431 /* Register all unused window as overlay plane */ ··· 2714 2724 return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n"); 2715 2725 2716 2726 mutex_init(&vop2->vop2_lock); 2727 + mutex_init(&vop2->ovl_lock); 2717 2728 2718 2729 ret = devm_request_irq(dev, vop2->irq, vop2_isr, IRQF_SHARED, dev_name(dev), vop2); 2719 2730 if (ret)
+33
drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
··· 334 334 /* optional internal rgb encoder */ 335 335 struct rockchip_rgb *rgb; 336 336 337 + /* 338 + * Used to record layer selection configuration on rk356x/rk3588 339 + * as register RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are 340 + * shared for all the Video Ports. 341 + */ 342 + u32 old_layer_sel; 343 + u32 old_port_sel; 344 + /* 345 + * Ensure that the updates to these two registers(RKK3568_OVL_LAYER_SEL/RK3568_OVL_PORT_SEL) 346 + * take effect in sequence. 347 + */ 348 + struct mutex ovl_lock; 349 + 337 350 /* must be put at the end of the struct */ 338 351 struct vop2_win win[]; 339 352 }; ··· 740 727 #define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20) 741 728 #define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18) 742 729 #define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16) 730 + #define RK3588_OVL_PORT_SET__PORT3_MUX GENMASK(15, 12) 743 731 #define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8) 744 732 #define RK3568_OVL_PORT_SET__PORT1_MUX GENMASK(7, 4) 745 733 #define RK3568_OVL_PORT_SET__PORT0_MUX GENMASK(3, 0) ··· 843 829 static inline struct vop2_win *to_vop2_win(struct drm_plane *p) 844 830 { 845 831 return container_of(p, struct vop2_win, base); 832 + } 833 + 834 + /* 835 + * Note: 836 + * The write mask function is documented but missing on rk3566/8, writes 837 + * to these bits have no effect. For newer soc(rk3588 and following) the 838 + * write mask is needed for register writes. 839 + * 840 + * GLB_CFG_DONE_EN has no write mask bit. 841 + * 842 + */ 843 + static inline void vop2_cfg_done(struct vop2_video_port *vp) 844 + { 845 + struct vop2 *vop2 = vp->vop2; 846 + u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN; 847 + 848 + val |= BIT(vp->id) | (BIT(vp->id) << 16); 849 + 850 + regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val); 846 851 } 847 852 848 853 #endif /* _ROCKCHIP_DRM_VOP2_H */
+29 -39
drivers/gpu/drm/rockchip/rockchip_lvds.c
··· 56 56 struct drm_device *drm_dev; 57 57 struct drm_panel *panel; 58 58 struct drm_bridge *bridge; 59 - struct drm_connector connector; 60 59 struct rockchip_encoder encoder; 61 60 struct dev_pin_info *pins; 62 61 }; 63 62 64 - static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *connector) 63 + static inline struct rockchip_lvds *brige_to_lvds(struct drm_bridge *bridge) 65 64 { 66 - return container_of(connector, struct rockchip_lvds, connector); 65 + return (struct rockchip_lvds *)bridge->driver_private; 67 66 } 68 67 69 68 static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *encoder) ··· 105 106 return -EINVAL; 106 107 } 107 108 108 - static const struct drm_connector_funcs rockchip_lvds_connector_funcs = { 109 - .fill_modes = drm_helper_probe_single_connector_modes, 110 - .destroy = drm_connector_cleanup, 111 - .reset = drm_atomic_helper_connector_reset, 112 - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 113 - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 114 - }; 115 - 116 - static int rockchip_lvds_connector_get_modes(struct drm_connector *connector) 109 + static int 110 + rockchip_lvds_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) 117 111 { 118 - struct rockchip_lvds *lvds = connector_to_lvds(connector); 112 + struct rockchip_lvds *lvds = brige_to_lvds(bridge); 119 113 struct drm_panel *panel = lvds->panel; 120 114 121 115 return drm_panel_get_modes(panel, connector); 122 116 } 123 117 124 118 static const 125 - struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = { 126 - .get_modes = rockchip_lvds_connector_get_modes, 119 + struct drm_bridge_funcs rockchip_lvds_bridge_funcs = { 120 + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, 121 + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, 122 + .atomic_reset = drm_atomic_helper_bridge_reset, 123 + .get_modes = rockchip_lvds_bridge_get_modes, 127 124 }; 128 125 129 126 static int ··· 601 606 } 602 607 603 608 drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs); 604 - connector = &lvds->connector; 605 609 606 610 if (lvds->panel) { 607 - connector->dpms = DRM_MODE_DPMS_OFF; 608 - ret = drm_connector_init(drm_dev, connector, 609 - &rockchip_lvds_connector_funcs, 610 - DRM_MODE_CONNECTOR_LVDS); 611 - if (ret < 0) { 612 - drm_err(drm_dev, 613 - "failed to initialize connector: %d\n", ret); 611 + lvds->bridge = drm_panel_bridge_add_typed(lvds->panel, DRM_MODE_CONNECTOR_LVDS); 612 + if (IS_ERR(lvds->bridge)) { 613 + ret = PTR_ERR(lvds->bridge); 614 614 goto err_free_encoder; 615 615 } 616 + } 616 617 617 - drm_connector_helper_add(connector, 618 - &rockchip_lvds_connector_helper_funcs); 619 - } else { 620 - ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 621 - DRM_BRIDGE_ATTACH_NO_CONNECTOR); 618 + if (lvds->bridge) { 619 + lvds->bridge->driver_private = lvds; 620 + lvds->bridge->ops = DRM_BRIDGE_OP_MODES; 621 + lvds->bridge->funcs = &rockchip_lvds_bridge_funcs; 622 + 623 + ret = drm_bridge_attach(encoder, lvds->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); 622 624 if (ret) 623 - goto err_free_encoder; 625 + goto err_free_bridge; 624 626 625 627 connector = drm_bridge_connector_init(lvds->drm_dev, encoder); 626 628 if (IS_ERR(connector)) { ··· 625 633 "failed to initialize bridge connector: %pe\n", 626 634 connector); 627 635 ret = PTR_ERR(connector); 628 - goto err_free_encoder; 636 + goto err_free_bridge; 629 637 } 630 - } 631 638 632 - ret = drm_connector_attach_encoder(connector, encoder); 633 - if (ret < 0) { 634 - drm_err(drm_dev, "failed to attach encoder: %d\n", ret); 635 - goto err_free_connector; 639 + ret = drm_connector_attach_encoder(connector, encoder); 640 + if (ret < 0) { 641 + drm_err(drm_dev, "failed to attach encoder: %d\n", ret); 642 + goto err_free_bridge; 643 + } 636 644 } 637 645 638 646 pm_runtime_enable(dev); ··· 641 649 642 650 return 0; 643 651 644 - err_free_connector: 645 - drm_connector_cleanup(connector); 652 + err_free_bridge: 653 + drm_panel_bridge_remove(lvds->bridge); 646 654 err_free_encoder: 647 655 drm_encoder_cleanup(encoder); 648 656 err_put_remote: ··· 662 670 encoder_funcs = lvds->soc_data->helper_funcs; 663 671 encoder_funcs->disable(&lvds->encoder.encoder); 664 672 pm_runtime_disable(dev); 665 - drm_connector_cleanup(&lvds->connector); 666 - drm_encoder_cleanup(&lvds->encoder.encoder); 667 673 } 668 674 669 675 static const struct component_ops rockchip_lvds_component_ops = {
+83 -6
drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
··· 2052 2052 } 2053 2053 } 2054 2054 2055 + static u32 rk3568_vop2_read_port_mux(struct vop2 *vop2) 2056 + { 2057 + return vop2_readl(vop2, RK3568_OVL_PORT_SEL); 2058 + } 2059 + 2060 + static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2) 2061 + { 2062 + u32 port_mux_sel; 2063 + int ret; 2064 + 2065 + /* 2066 + * Spin until the previous port_mux figuration is done. 2067 + */ 2068 + ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel, 2069 + port_mux_sel == vop2->old_port_sel, 0, 50 * 1000); 2070 + if (ret) 2071 + DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n", 2072 + port_mux_sel, vop2->old_port_sel); 2073 + } 2074 + 2075 + static u32 rk3568_vop2_read_layer_cfg(struct vop2 *vop2) 2076 + { 2077 + return vop2_readl(vop2, RK3568_OVL_LAYER_SEL); 2078 + } 2079 + 2080 + static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg) 2081 + { 2082 + u32 atv_layer_cfg; 2083 + int ret; 2084 + 2085 + /* 2086 + * Spin until the previous layer configuration is done. 2087 + */ 2088 + ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg, 2089 + atv_layer_cfg == cfg, 0, 50 * 1000); 2090 + if (ret) 2091 + DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n", 2092 + atv_layer_cfg, cfg); 2093 + } 2094 + 2055 2095 static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) 2056 2096 { 2057 2097 struct vop2 *vop2 = vp->vop2; 2058 2098 struct drm_plane *plane; 2059 2099 u32 layer_sel = 0; 2060 2100 u32 port_sel; 2101 + u32 old_layer_sel = 0; 2102 + u32 atv_layer_sel = 0; 2103 + u32 old_port_sel = 0; 2061 2104 u8 layer_id; 2062 2105 u8 old_layer_id; 2063 2106 u8 layer_sel_id; ··· 2112 2069 struct vop2_video_port *vp2 = &vop2->vps[2]; 2113 2070 struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); 2114 2071 2072 + mutex_lock(&vop2->ovl_lock); 2115 2073 ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); 2116 2074 ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; 2117 2075 ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL; 2118 - ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id); 2119 2076 2120 2077 if (vcstate->yuv_overlay) 2121 2078 ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); 2122 2079 else 2123 2080 ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id); 2124 2081 2125 - vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); 2126 - 2127 - port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); 2082 + old_port_sel = vop2->old_port_sel; 2083 + port_sel = old_port_sel; 2128 2084 port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT; 2129 2085 2130 2086 if (vp0->nlayers) ··· 2144 2102 else 2145 2103 port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); 2146 2104 2147 - layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); 2105 + /* Fixed value for rk3588 */ 2106 + if (vop2->version == VOP_VERSION_RK3588) 2107 + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SET__PORT3_MUX, 7); 2108 + 2109 + atv_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); 2110 + old_layer_sel = vop2->old_layer_sel; 2111 + layer_sel = old_layer_sel; 2148 2112 2149 2113 ofs = 0; 2150 2114 for (i = 0; i < vp->id; i++) ··· 2234 2186 old_win->data->layer_sel_id[vp->id]); 2235 2187 } 2236 2188 2189 + vop2->old_layer_sel = layer_sel; 2190 + vop2->old_port_sel = port_sel; 2191 + /* 2192 + * As the RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are shared by all Video Ports, 2193 + * and the configuration take effect by one Video Port's vsync. 2194 + * When performing layer migration or change the zpos of layers, there are two things 2195 + * to be observed and followed: 2196 + * 1. When a layer is migrated from one VP to another, the configuration of the layer 2197 + * can only take effect after the Port mux configuration is enabled. 2198 + * 2199 + * 2. When we change the zpos of layers, we must ensure that the change for the previous 2200 + * VP takes effect before we proceed to change the next VP. Otherwise, the new 2201 + * configuration might overwrite the previous one for the previous VP, or it could 2202 + * lead to the configuration of the previous VP being take effect along with the VSYNC 2203 + * of the new VP. 2204 + */ 2205 + if (layer_sel != old_layer_sel || port_sel != old_port_sel) 2206 + ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id); 2207 + vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); 2208 + 2209 + if (port_sel != old_port_sel) { 2210 + vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); 2211 + vop2_cfg_done(vp); 2212 + rk3568_vop2_wait_for_port_mux_done(vop2); 2213 + } 2214 + 2215 + if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) 2216 + rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel); 2217 + 2237 2218 vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); 2238 - vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); 2219 + mutex_unlock(&vop2->ovl_lock); 2239 2220 } 2240 2221 2241 2222 static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp)
+40 -41
drivers/gpu/drm/scheduler/sched_main.c
··· 84 84 #define CREATE_TRACE_POINTS 85 85 #include "gpu_scheduler_trace.h" 86 86 87 - #ifdef CONFIG_LOCKDEP 88 - static struct lockdep_map drm_sched_lockdep_map = { 89 - .name = "drm_sched_lockdep_map" 90 - }; 91 - #endif 92 - 93 87 int drm_sched_policy = DRM_SCHED_POLICY_FIFO; 94 88 95 89 /** ··· 263 269 entity = rq->current_entity; 264 270 if (entity) { 265 271 list_for_each_entry_continue(entity, &rq->entities, list) { 266 - if (drm_sched_entity_is_ready(entity)) { 267 - /* If we can't queue yet, preserve the current 268 - * entity in terms of fairness. 269 - */ 270 - if (!drm_sched_can_queue(sched, entity)) { 271 - spin_unlock(&rq->lock); 272 - return ERR_PTR(-ENOSPC); 273 - } 274 - 275 - rq->current_entity = entity; 276 - reinit_completion(&entity->entity_idle); 277 - spin_unlock(&rq->lock); 278 - return entity; 279 - } 272 + if (drm_sched_entity_is_ready(entity)) 273 + goto found; 280 274 } 281 275 } 282 276 283 277 list_for_each_entry(entity, &rq->entities, list) { 284 - if (drm_sched_entity_is_ready(entity)) { 285 - /* If we can't queue yet, preserve the current entity in 286 - * terms of fairness. 287 - */ 288 - if (!drm_sched_can_queue(sched, entity)) { 289 - spin_unlock(&rq->lock); 290 - return ERR_PTR(-ENOSPC); 291 - } 292 - 293 - rq->current_entity = entity; 294 - reinit_completion(&entity->entity_idle); 295 - spin_unlock(&rq->lock); 296 - return entity; 297 - } 278 + if (drm_sched_entity_is_ready(entity)) 279 + goto found; 298 280 299 281 if (entity == rq->current_entity) 300 282 break; ··· 279 309 spin_unlock(&rq->lock); 280 310 281 311 return NULL; 312 + 313 + found: 314 + if (!drm_sched_can_queue(sched, entity)) { 315 + /* 316 + * If scheduler cannot take more jobs signal the caller to not 317 + * consider lower priority queues. 318 + */ 319 + entity = ERR_PTR(-ENOSPC); 320 + } else { 321 + rq->current_entity = entity; 322 + reinit_completion(&entity->entity_idle); 323 + } 324 + 325 + spin_unlock(&rq->lock); 326 + 327 + return entity; 282 328 } 283 329 284 330 /** ··· 1247 1261 drm_sched_run_job_queue(sched); 1248 1262 } 1249 1263 1264 + static struct workqueue_struct *drm_sched_alloc_wq(const char *name) 1265 + { 1266 + #if (IS_ENABLED(CONFIG_LOCKDEP)) 1267 + static struct lockdep_map map = { 1268 + .name = "drm_sched_lockdep_map" 1269 + }; 1270 + 1271 + /* 1272 + * Avoid leaking a lockdep map on each drm sched creation and 1273 + * destruction by using a single lockdep map for all drm sched 1274 + * allocated submit_wq. 1275 + */ 1276 + 1277 + return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map); 1278 + #else 1279 + return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); 1280 + #endif 1281 + } 1282 + 1250 1283 /** 1251 1284 * drm_sched_init - Init a gpu scheduler instance 1252 1285 * ··· 1306 1301 sched->submit_wq = args->submit_wq; 1307 1302 sched->own_submit_wq = false; 1308 1303 } else { 1309 - #ifdef CONFIG_LOCKDEP 1310 - sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name, 1311 - WQ_MEM_RECLAIM, 1312 - &drm_sched_lockdep_map); 1313 - #else 1314 - sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM); 1315 - #endif 1304 + sched->submit_wq = drm_sched_alloc_wq(args->name); 1316 1305 if (!sched->submit_wq) 1317 1306 return -ENOMEM; 1318 1307
+13 -14
drivers/gpu/drm/sti/sti_hda.c
··· 246 246 struct device dev; 247 247 struct drm_device *drm_dev; 248 248 struct drm_display_mode mode; 249 + struct drm_bridge bridge; 249 250 void __iomem *regs; 250 251 void __iomem *video_dacs_ctrl; 251 252 struct clk *clk_pix; ··· 262 261 263 262 #define to_sti_hda_connector(x) \ 264 263 container_of(x, struct sti_hda_connector, drm_connector) 264 + 265 + static struct sti_hda *drm_bridge_to_sti_hda(struct drm_bridge *bridge) 266 + { 267 + return container_of(bridge, struct sti_hda, bridge); 268 + } 265 269 266 270 static u32 hda_read(struct sti_hda *hda, int offset) 267 271 { ··· 407 401 408 402 static void sti_hda_disable(struct drm_bridge *bridge) 409 403 { 410 - struct sti_hda *hda = bridge->driver_private; 404 + struct sti_hda *hda = drm_bridge_to_sti_hda(bridge); 411 405 u32 val; 412 406 413 407 if (!hda->enabled) ··· 432 426 433 427 static void sti_hda_pre_enable(struct drm_bridge *bridge) 434 428 { 435 - struct sti_hda *hda = bridge->driver_private; 429 + struct sti_hda *hda = drm_bridge_to_sti_hda(bridge); 436 430 u32 val, i, mode_idx; 437 431 u32 src_filter_y, src_filter_c; 438 432 u32 *coef_y, *coef_c; ··· 523 517 const struct drm_display_mode *mode, 524 518 const struct drm_display_mode *adjusted_mode) 525 519 { 526 - struct sti_hda *hda = bridge->driver_private; 520 + struct sti_hda *hda = drm_bridge_to_sti_hda(bridge); 527 521 u32 mode_idx; 528 522 int hddac_rate; 529 523 int ret; ··· 683 677 struct drm_encoder *encoder; 684 678 struct sti_hda_connector *connector; 685 679 struct drm_connector *drm_connector; 686 - struct drm_bridge *bridge; 687 680 int err; 688 681 689 682 /* Set the drm device handle */ ··· 698 693 699 694 connector->hda = hda; 700 695 701 - bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); 702 - if (!bridge) 703 - return -ENOMEM; 704 - 705 - bridge->driver_private = hda; 706 - bridge->funcs = &sti_hda_bridge_funcs; 707 - drm_bridge_attach(encoder, bridge, NULL, 0); 696 + drm_bridge_attach(encoder, &hda->bridge, NULL, 0); 708 697 709 698 connector->encoder = encoder; 710 699 ··· 744 745 745 746 DRM_INFO("%s\n", __func__); 746 747 747 - hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL); 748 - if (!hda) 749 - return -ENOMEM; 748 + hda = devm_drm_bridge_alloc(dev, struct sti_hda, bridge, &sti_hda_bridge_funcs); 749 + if (IS_ERR(hda)) 750 + return PTR_ERR(hda); 750 751 751 752 hda->dev = pdev->dev; 752 753 hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg");
+12 -14
drivers/gpu/drm/sti/sti_hdmi.c
··· 168 168 #define to_sti_hdmi_connector(x) \ 169 169 container_of(x, struct sti_hdmi_connector, drm_connector) 170 170 171 + static struct sti_hdmi *drm_bridge_to_sti_hdmi(struct drm_bridge *bridge) 172 + { 173 + return container_of(bridge, struct sti_hdmi, bridge); 174 + } 175 + 171 176 static const struct drm_prop_enum_list colorspace_mode_names[] = { 172 177 { HDMI_COLORSPACE_RGB, "rgb" }, 173 178 { HDMI_COLORSPACE_YUV422, "yuv422" }, ··· 754 749 755 750 static void sti_hdmi_disable(struct drm_bridge *bridge) 756 751 { 757 - struct sti_hdmi *hdmi = bridge->driver_private; 752 + struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge); 758 753 759 754 u32 val = hdmi_read(hdmi, HDMI_CFG); 760 755 ··· 886 881 887 882 static void sti_hdmi_pre_enable(struct drm_bridge *bridge) 888 883 { 889 - struct sti_hdmi *hdmi = bridge->driver_private; 884 + struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge); 890 885 891 886 DRM_DEBUG_DRIVER("\n"); 892 887 ··· 941 936 const struct drm_display_mode *mode, 942 937 const struct drm_display_mode *adjusted_mode) 943 938 { 944 - struct sti_hdmi *hdmi = bridge->driver_private; 939 + struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge); 945 940 int ret; 946 941 947 942 DRM_DEBUG_DRIVER("\n"); ··· 1278 1273 struct sti_hdmi_connector *connector; 1279 1274 struct cec_connector_info conn_info; 1280 1275 struct drm_connector *drm_connector; 1281 - struct drm_bridge *bridge; 1282 1276 int err; 1283 1277 1284 1278 /* Set the drm device handle */ ··· 1293 1289 1294 1290 connector->hdmi = hdmi; 1295 1291 1296 - bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); 1297 - if (!bridge) 1298 - return -EINVAL; 1299 - 1300 - bridge->driver_private = hdmi; 1301 - bridge->funcs = &sti_hdmi_bridge_funcs; 1302 - drm_bridge_attach(encoder, bridge, NULL, 0); 1292 + drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0); 1303 1293 1304 1294 connector->encoder = encoder; 1305 1295 ··· 1383 1385 1384 1386 DRM_INFO("%s\n", __func__); 1385 1387 1386 - hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); 1387 - if (!hdmi) 1388 - return -ENOMEM; 1388 + hdmi = devm_drm_bridge_alloc(dev, struct sti_hdmi, bridge, &sti_hdmi_bridge_funcs); 1389 + if (IS_ERR(hdmi)) 1390 + return PTR_ERR(hdmi); 1389 1391 1390 1392 ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0); 1391 1393 if (ddc) {
+2
drivers/gpu/drm/sti/sti_hdmi.h
··· 12 12 13 13 #include <media/cec-notifier.h> 14 14 15 + #include <drm/drm_bridge.h> 15 16 #include <drm/drm_modes.h> 16 17 #include <drm/drm_property.h> 17 18 ··· 87 86 struct hdmi_audio_params audio; 88 87 struct drm_connector *drm_connector; 89 88 struct cec_notifier *notifier; 89 + struct drm_bridge bridge; 90 90 }; 91 91 92 92 u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
+2 -2
drivers/gpu/drm/tegra/gem.c
··· 523 523 if (tegra->domain) { 524 524 tegra_bo_iommu_unmap(tegra, bo); 525 525 526 - if (gem->import_attach) { 526 + if (drm_gem_is_imported(gem)) { 527 527 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, 528 528 DMA_TO_DEVICE); 529 - dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach); 529 + dma_buf_detach(gem->dma_buf, gem->import_attach); 530 530 } 531 531 } 532 532
+1 -1
drivers/gpu/drm/udl/udl_drv.c
··· 57 57 58 58 /* GEM hooks */ 59 59 .fops = &udl_driver_fops, 60 - DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT, 60 + DRM_GEM_SHMEM_DRIVER_OPS, 61 61 DRM_FBDEV_SHMEM_DRIVER_OPS, 62 62 63 63 .name = DRIVER_NAME,
+15 -15
drivers/gpu/drm/vgem/vgem_drv.c
··· 32 32 33 33 #include <linux/dma-buf.h> 34 34 #include <linux/module.h> 35 - #include <linux/platform_device.h> 35 + #include <linux/device/faux.h> 36 36 #include <linux/shmem_fs.h> 37 37 #include <linux/vmalloc.h> 38 38 ··· 52 52 53 53 static struct vgem_device { 54 54 struct drm_device drm; 55 - struct platform_device *platform; 55 + struct faux_device *faux_dev; 56 56 } *vgem_device; 57 57 58 58 static int vgem_open(struct drm_device *dev, struct drm_file *file) ··· 127 127 static int __init vgem_init(void) 128 128 { 129 129 int ret; 130 - struct platform_device *pdev; 130 + struct faux_device *fdev; 131 131 132 - pdev = platform_device_register_simple("vgem", -1, NULL, 0); 133 - if (IS_ERR(pdev)) 134 - return PTR_ERR(pdev); 132 + fdev = faux_device_create("vgem", NULL, NULL); 133 + if (!fdev) 134 + return -ENODEV; 135 135 136 - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { 136 + if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) { 137 137 ret = -ENOMEM; 138 138 goto out_unregister; 139 139 } 140 140 141 - dma_coerce_mask_and_coherent(&pdev->dev, 141 + dma_coerce_mask_and_coherent(&fdev->dev, 142 142 DMA_BIT_MASK(64)); 143 143 144 - vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver, 144 + vgem_device = devm_drm_dev_alloc(&fdev->dev, &vgem_driver, 145 145 struct vgem_device, drm); 146 146 if (IS_ERR(vgem_device)) { 147 147 ret = PTR_ERR(vgem_device); 148 148 goto out_devres; 149 149 } 150 - vgem_device->platform = pdev; 150 + vgem_device->faux_dev = fdev; 151 151 152 152 /* Final step: expose the device/driver to userspace */ 153 153 ret = drm_dev_register(&vgem_device->drm, 0); ··· 157 157 return 0; 158 158 159 159 out_devres: 160 - devres_release_group(&pdev->dev, NULL); 160 + devres_release_group(&fdev->dev, NULL); 161 161 out_unregister: 162 - platform_device_unregister(pdev); 162 + faux_device_destroy(fdev); 163 163 return ret; 164 164 } 165 165 166 166 static void __exit vgem_exit(void) 167 167 { 168 - struct platform_device *pdev = vgem_device->platform; 168 + struct faux_device *fdev = vgem_device->faux_dev; 169 169 170 170 drm_dev_unregister(&vgem_device->drm); 171 - devres_release_group(&pdev->dev, NULL); 172 - platform_device_unregister(pdev); 171 + devres_release_group(&fdev->dev, NULL); 172 + faux_device_destroy(fdev); 173 173 } 174 174 175 175 module_init(vgem_init);
-2
drivers/gpu/drm/vkms/vkms_crtc.c
··· 302 302 vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0); 303 303 if (IS_ERR(vkms_out->composer_workq)) 304 304 return ERR_CAST(vkms_out->composer_workq); 305 - if (!vkms_out->composer_workq) 306 - return ERR_PTR(-ENOMEM); 307 305 308 306 return vkms_out; 309 307 }
+14 -14
drivers/gpu/drm/vkms/vkms_drv.c
··· 10 10 */ 11 11 12 12 #include <linux/module.h> 13 - #include <linux/platform_device.h> 13 + #include <linux/device/faux.h> 14 14 #include <linux/dma-mapping.h> 15 15 16 16 #include <drm/clients/drm_client_setup.h> ··· 149 149 static int vkms_create(struct vkms_config *config) 150 150 { 151 151 int ret; 152 - struct platform_device *pdev; 152 + struct faux_device *fdev; 153 153 struct vkms_device *vkms_device; 154 154 const char *dev_name; 155 155 156 156 dev_name = vkms_config_get_device_name(config); 157 - pdev = platform_device_register_simple(dev_name, -1, NULL, 0); 158 - if (IS_ERR(pdev)) 159 - return PTR_ERR(pdev); 157 + fdev = faux_device_create(dev_name, NULL, NULL); 158 + if (!fdev) 159 + return -ENODEV; 160 160 161 - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { 161 + if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) { 162 162 ret = -ENOMEM; 163 163 goto out_unregister; 164 164 } 165 165 166 - vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver, 166 + vkms_device = devm_drm_dev_alloc(&fdev->dev, &vkms_driver, 167 167 struct vkms_device, drm); 168 168 if (IS_ERR(vkms_device)) { 169 169 ret = PTR_ERR(vkms_device); 170 170 goto out_devres; 171 171 } 172 - vkms_device->platform = pdev; 172 + vkms_device->faux_dev = fdev; 173 173 vkms_device->config = config; 174 174 config->dev = vkms_device; 175 175 ··· 203 203 return 0; 204 204 205 205 out_devres: 206 - devres_release_group(&pdev->dev, NULL); 206 + devres_release_group(&fdev->dev, NULL); 207 207 out_unregister: 208 - platform_device_unregister(pdev); 208 + faux_device_destroy(fdev); 209 209 return ret; 210 210 } 211 211 ··· 231 231 232 232 static void vkms_destroy(struct vkms_config *config) 233 233 { 234 - struct platform_device *pdev; 234 + struct faux_device *fdev; 235 235 236 236 if (!config->dev) { 237 237 DRM_INFO("vkms_device is NULL.\n"); 238 238 return; 239 239 } 240 240 241 - pdev = config->dev->platform; 241 + fdev = config->dev->faux_dev; 242 242 243 243 drm_dev_unregister(&config->dev->drm); 244 244 drm_atomic_helper_shutdown(&config->dev->drm); 245 - devres_release_group(&pdev->dev, NULL); 246 - platform_device_unregister(pdev); 245 + devres_release_group(&fdev->dev, NULL); 246 + faux_device_destroy(fdev); 247 247 248 248 config->dev = NULL; 249 249 }
+2 -2
drivers/gpu/drm/vkms/vkms_drv.h
··· 232 232 * struct vkms_device - Description of a VKMS device 233 233 * 234 234 * @drm - Base device in DRM 235 - * @platform - Associated platform device 235 + * @faux_dev - Associated faux device 236 236 * @output - Configuration and sub-components of the VKMS device 237 237 * @config: Configuration used in this VKMS device 238 238 */ 239 239 struct vkms_device { 240 240 struct drm_device drm; 241 - struct platform_device *platform; 241 + struct faux_device *faux_dev; 242 242 const struct vkms_config *config; 243 243 }; 244 244
+10 -1
drivers/gpu/trace/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 3 3 config TRACE_GPU_MEM 4 - bool 4 + bool "Enable GPU memory usage tracepoints" 5 + default n 6 + help 7 + Choose this option to enable tracepoints for tracking 8 + global and per-process GPU memory usage. Intended for 9 + performance profiling and required for Android. 10 + 11 + Tracepoint availability varies by GPU driver. 12 + 13 + If in doubt, say "N".
+2
drivers/video/Kconfig
··· 87 87 88 88 endif 89 89 90 + source "drivers/gpu/trace/Kconfig" 91 + 90 92 endmenu
+2 -2
drivers/video/fbdev/core/fbcon.c
··· 953 953 int rows, cols; 954 954 955 955 /* 956 - * If num_registered_fb is zero, this is a call for the dummy part. 956 + * If fbcon_num_registered_fb is zero, this is a call for the dummy part. 957 957 * The frame buffer devices weren't initialized yet. 958 958 */ 959 959 if (!fbcon_num_registered_fb || info_idx == -1) 960 960 return display_desc; 961 961 /* 962 - * Instead of blindly using registered_fb[0], we use info_idx, set by 962 + * Instead of blindly using fbcon_registered_fb[0], we use info_idx, set by 963 963 * fbcon_fb_registered(); 964 964 */ 965 965 info = fbcon_registered_fb[info_idx];
-5
include/drm/drm_accel.h
··· 58 58 int accel_core_init(void); 59 59 void accel_set_device_instance_params(struct device *kdev, int index); 60 60 int accel_open(struct inode *inode, struct file *filp); 61 - void accel_debugfs_init(struct drm_device *dev); 62 61 void accel_debugfs_register(struct drm_device *dev); 63 62 64 63 #else ··· 73 74 } 74 75 75 76 static inline void accel_set_device_instance_params(struct device *kdev, int index) 76 - { 77 - } 78 - 79 - static inline void accel_debugfs_init(struct drm_device *dev) 80 77 { 81 78 } 82 79
+1 -1
include/drm/drm_bridge.h
··· 1051 1051 */ 1052 1052 DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER = BIT(7), 1053 1053 /** 1054 - * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC notifier 1054 + * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC adapter 1055 1055 * to be present. 1056 1056 */ 1057 1057 DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
+11
include/drm/drm_debugfs.h
··· 153 153 154 154 int drm_debugfs_gpuva_info(struct seq_file *m, 155 155 struct drm_gpuvm *gpuvm); 156 + 157 + void drm_debugfs_clients_add(struct drm_file *file); 158 + void drm_debugfs_clients_remove(struct drm_file *file); 156 159 #else 157 160 static inline void drm_debugfs_create_files(const struct drm_info_list *files, 158 161 int count, struct dentry *root, ··· 183 180 struct drm_gpuvm *gpuvm) 184 181 { 185 182 return 0; 183 + } 184 + 185 + static inline void drm_debugfs_clients_add(struct drm_file *file) 186 + { 187 + } 188 + 189 + static inline void drm_debugfs_clients_remove(struct drm_file *file) 190 + { 186 191 } 187 192 #endif 188 193
+17 -2
include/drm/drm_drv.h
··· 572 572 } 573 573 574 574 #if defined(CONFIG_DEBUG_FS) 575 - void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root); 575 + void drm_debugfs_dev_init(struct drm_device *dev); 576 + void drm_debugfs_init_root(void); 577 + void drm_debugfs_remove_root(void); 578 + void drm_debugfs_bridge_params(void); 576 579 #else 577 - static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) 580 + static inline void drm_debugfs_dev_init(struct drm_device *dev) 581 + { 582 + } 583 + 584 + static inline void drm_debugfs_init_root(void) 585 + { 586 + } 587 + 588 + static inline void drm_debugfs_remove_root(void) 589 + { 590 + } 591 + 592 + static inline void drm_debugfs_bridge_params(void) 578 593 { 579 594 } 580 595 #endif
+7
include/drm/drm_file.h
··· 400 400 * @client_name_lock: Protects @client_name. 401 401 */ 402 402 struct mutex client_name_lock; 403 + 404 + /** 405 + * @debugfs_client: 406 + * 407 + * debugfs directory for each client under a drm node. 408 + */ 409 + struct dentry *debugfs_client; 403 410 }; 404 411 405 412 /**
+3 -15
include/drm/drm_gem_shmem_helper.h
··· 293 293 /** 294 294 * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations 295 295 * 296 - * This macro provides a shortcut for setting the shmem GEM operations in 297 - * the &drm_driver structure. 296 + * This macro provides a shortcut for setting the shmem GEM operations 297 + * in the &drm_driver structure. Drivers that do not require an s/g table 298 + * for imported buffers should use this. 298 299 */ 299 300 #define DRM_GEM_SHMEM_DRIVER_OPS \ 300 - .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \ 301 - .dumb_create = drm_gem_shmem_dumb_create 302 - 303 - /** 304 - * DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT - shmem GEM operations 305 - * without mapping sg_table on 306 - * imported buffer. 307 - * 308 - * This macro provides a shortcut for setting the shmem GEM operations in 309 - * the &drm_driver structure for drivers that do not require a sg_table on 310 - * imported buffers. 311 - */ 312 - #define DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT \ 313 301 .gem_prime_import = drm_gem_shmem_prime_import_no_map, \ 314 302 .dumb_create = drm_gem_shmem_dumb_create 315 303
+13 -2
include/drm/drm_managed.h
··· 129 129 130 130 void __drmm_workqueue_release(struct drm_device *device, void *wq); 131 131 132 + /** 133 + * drmm_alloc_ordered_workqueue - &drm_device managed alloc_ordered_workqueue() 134 + * @dev: DRM device 135 + * @fmt: printf format for the name of the workqueue 136 + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) 137 + * @args: args for @fmt 138 + * 139 + * This is a &drm_device-managed version of alloc_ordered_workqueue(). The 140 + * allocated workqueue is automatically destroyed on the final drm_dev_put(). 141 + * 142 + * Returns: workqueue on success, negative ERR_PTR otherwise. 143 + */ 132 144 #define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \ 133 145 ({ \ 134 146 struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \ 135 147 wq ? ({ \ 136 148 int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \ 137 149 ret ? ERR_PTR(ret) : wq; \ 138 - }) : \ 139 - wq; \ 150 + }) : ERR_PTR(-ENOMEM); \ 140 151 }) 141 152 142 153 #endif
-1
rust/kernel/drm/driver.rs
··· 10 10 drm, 11 11 error::{to_result, Result}, 12 12 prelude::*, 13 - str::CStr, 14 13 types::ARef, 15 14 }; 16 15 use macros::vtable;