Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'topic/drm-header-rework-2014-09-12' of git://anongit.freedesktop.org/drm-intel into drm-next

So here's the header cleanup, rebased on top of drm-next. Two new header
files are created here:

- drivers/gpu/drm/drm_internal.h for non-legacy drm.ko private
declarations.

- include/drm/drm_legacy.h for legacy interfaces used by non-kms drivers.

And of course lots fo stuff gets shuffled into the already existing
drivers/gpu/drm/drm_legacy.h for drm.ko internal stuff.

topic branch smoke-tested in drm-intel-nightly for a bit. And the 0day
tester also worked through it (and found a few places I didn't add a
static to functions).

* tag 'topic/drm-header-rework-2014-09-12' of git://anongit.freedesktop.org/drm-intel:
drm: Move DRM_MAGIC_HASH_ORDER into drm_drv.c
drm: Move drm_class to drm_internal.h
drm: Move LOCK_TEST_WITH_RETURN to <drm/drm_legacy.h>
drm: Move legacy buffer structures to <drm/drm_legacy.h>
drm: Move drm_memory.c map support declarations to <drm/drm_legacy.h>
drm: Purge ioctl forward declarations from drmP.h
drm: unexport drm_global_mutex
drm: Move piles of functions from drmP.h to drm_internal.h
drm: Move vblank related module options into drm_irq.c
drm: Drop drm_sysfs_class from drmP.h
drm: Move __drm_pci_free to drm_legacy.h
drm: Create drm legacy driver header
drm: Move drm_legacy_vma_flush into drm_legacy.h
drm: Move sg functions into drm_legacy.h
drm: Move dma functions into drm_legacy.h

+588 -501
+1
drivers/gpu/drm/drm_auth.c
··· 34 34 */ 35 35 36 36 #include <drm/drmP.h> 37 + #include "drm_internal.h" 37 38 38 39 struct drm_magic_entry { 39 40 struct list_head head;
+2 -2
drivers/gpu/drm/drm_bufs.c
··· 473 473 dmah.vaddr = map->handle; 474 474 dmah.busaddr = map->offset; 475 475 dmah.size = map->size; 476 - __drm_pci_free(dev, &dmah); 476 + __drm_legacy_pci_free(dev, &dmah); 477 477 break; 478 478 } 479 479 kfree(map); ··· 1338 1338 task_pid_nr(current)); 1339 1339 return -EINVAL; 1340 1340 } 1341 - drm_free_buffer(dev, buf); 1341 + drm_legacy_free_buffer(dev, buf); 1342 1342 } 1343 1343 1344 1344 return 0;
+1
drivers/gpu/drm/drm_crtc.c
··· 40 40 #include <drm/drm_modeset_lock.h> 41 41 42 42 #include "drm_crtc_internal.h" 43 + #include "drm_internal.h" 43 44 44 45 static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, 45 46 struct drm_mode_fb_cmd2 *r,
+1
drivers/gpu/drm/drm_debugfs.c
··· 36 36 #include <linux/export.h> 37 37 #include <drm/drmP.h> 38 38 #include <drm/drm_edid.h> 39 + #include "drm_internal.h" 39 40 40 41 #if defined(CONFIG_DEBUG_FS) 41 42
+5 -6
drivers/gpu/drm/drm_dma.c
··· 35 35 36 36 #include <linux/export.h> 37 37 #include <drm/drmP.h> 38 + #include "drm_legacy.h" 38 39 39 40 /** 40 41 * Initialize the DMA data. ··· 125 124 * 126 125 * Resets the fields of \p buf. 127 126 */ 128 - void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf) 127 + void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf) 129 128 { 130 129 if (!buf) 131 130 return; ··· 143 142 * 144 143 * Frees each buffer associated with \p file_priv not already on the hardware. 145 144 */ 146 - void drm_core_reclaim_buffers(struct drm_device *dev, 147 - struct drm_file *file_priv) 145 + void drm_legacy_reclaim_buffers(struct drm_device *dev, 146 + struct drm_file *file_priv) 148 147 { 149 148 struct drm_device_dma *dma = dev->dma; 150 149 int i; ··· 155 154 if (dma->buflist[i]->file_priv == file_priv) { 156 155 switch (dma->buflist[i]->list) { 157 156 case DRM_LIST_NONE: 158 - drm_free_buffer(dev, dma->buflist[i]); 157 + drm_legacy_free_buffer(dev, dma->buflist[i]); 159 158 break; 160 159 case DRM_LIST_WAIT: 161 160 dma->buflist[i]->list = DRM_LIST_RECLAIM; ··· 167 166 } 168 167 } 169 168 } 170 - 171 - EXPORT_SYMBOL(drm_core_reclaim_buffers);
+3 -13
drivers/gpu/drm/drm_drv.c
··· 35 35 #include <drm/drmP.h> 36 36 #include <drm/drm_core.h> 37 37 #include "drm_legacy.h" 38 + #include "drm_internal.h" 38 39 39 40 unsigned int drm_debug = 0; /* 1 to enable debug output */ 40 41 EXPORT_SYMBOL(drm_debug); 41 - 42 - int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 43 - 44 - unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 45 - 46 - /* 47 - * Default to use monotonic timestamps for wait-for-vblank and page-flip 48 - * complete events. 49 - */ 50 - unsigned int drm_timestamp_monotonic = 1; 51 42 52 43 MODULE_AUTHOR(CORE_AUTHOR); 53 44 MODULE_DESCRIPTION(CORE_DESC); ··· 49 58 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 50 59 51 60 module_param_named(debug, drm_debug, int, 0600); 52 - module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 53 - module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 54 - module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 55 61 56 62 static DEFINE_SPINLOCK(drm_minor_lock); 57 63 static struct idr drm_minors_idr; ··· 89 101 va_end(args); 90 102 } 91 103 EXPORT_SYMBOL(drm_ut_debug_printk); 104 + 105 + #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ 92 106 93 107 struct drm_master *drm_master_create(struct drm_minor *minor) 94 108 {
+2 -2
drivers/gpu/drm/drm_fops.c
··· 39 39 #include <linux/slab.h> 40 40 #include <linux/module.h> 41 41 #include "drm_legacy.h" 42 + #include "drm_internal.h" 42 43 43 44 /* from BKL pushdown */ 44 45 DEFINE_MUTEX(drm_global_mutex); 45 - EXPORT_SYMBOL(drm_global_mutex); 46 46 47 47 static int drm_open_helper(struct file *filp, struct drm_minor *minor); 48 48 ··· 404 404 drm_master_release(dev, filp); 405 405 406 406 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 407 - drm_core_reclaim_buffers(dev, file_priv); 407 + drm_legacy_reclaim_buffers(dev, file_priv); 408 408 409 409 drm_events_release(file_priv); 410 410
+1
drivers/gpu/drm/drm_gem.c
··· 38 38 #include <linux/dma-buf.h> 39 39 #include <drm/drmP.h> 40 40 #include <drm/drm_vma_manager.h> 41 + #include "drm_internal.h" 41 42 42 43 /** @file drm_gem.c 43 44 *
+1
drivers/gpu/drm/drm_info.c
··· 35 35 36 36 #include <linux/seq_file.h> 37 37 #include <drm/drmP.h> 38 + #include "drm_legacy.h" 38 39 39 40 /** 40 41 * Called when "/proc/dri/.../name" is read.
+95
drivers/gpu/drm/drm_internal.h
··· 1 + /* 2 + * Copyright © 2014 Intel Corporation 3 + * Daniel Vetter <daniel.vetter@ffwll.ch> 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + */ 23 + 24 + /* drm_irq.c */ 25 + extern unsigned int drm_timestamp_monotonic; 26 + 27 + /* drm_fops.c */ 28 + extern struct mutex drm_global_mutex; 29 + int drm_lastclose(struct drm_device *dev); 30 + 31 + /* drm_pci.c */ 32 + int drm_pci_set_unique(struct drm_device *dev, 33 + struct drm_master *master, 34 + struct drm_unique *u); 35 + int drm_irq_by_busid(struct drm_device *dev, void *data, 36 + struct drm_file *file_priv); 37 + 38 + /* drm_vm.c */ 39 + int drm_vma_info(struct seq_file *m, void *data); 40 + int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 41 + void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma); 42 + 43 + /* drm_prime.c */ 44 + int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 45 + struct drm_file *file_priv); 46 + int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 47 + struct drm_file *file_priv); 48 + 49 + void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); 50 + void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); 51 + void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 52 + struct dma_buf *dma_buf); 53 + 54 + /* drm_info.c */ 55 + int drm_name_info(struct seq_file *m, void *data); 56 + int drm_vm_info(struct seq_file *m, void *data); 57 + int drm_bufs_info(struct seq_file *m, void *data); 58 + int drm_vblank_info(struct seq_file *m, void *data); 59 + int drm_clients_info(struct seq_file *m, void* data); 60 + int drm_gem_name_info(struct seq_file *m, void *data); 61 + 62 + /* drm_irq.c */ 63 + int drm_control(struct drm_device *dev, void *data, 64 + struct drm_file *file_priv); 65 + 66 + /* drm_auth.c */ 67 + int drm_getmagic(struct drm_device *dev, void *data, 68 + struct drm_file *file_priv); 69 + int drm_authmagic(struct drm_device *dev, void *data, 70 + struct drm_file *file_priv); 71 + int drm_remove_magic(struct drm_master *master, drm_magic_t magic); 72 + 73 + /* drm_sysfs.c */ 74 + extern struct class *drm_class; 75 + 76 + struct class *drm_sysfs_create(struct module *owner, char *name); 77 + void drm_sysfs_destroy(void); 78 + struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); 79 + int drm_sysfs_connector_add(struct drm_connector *connector); 80 + void drm_sysfs_connector_remove(struct drm_connector *connector); 81 + 82 + /* drm_gem.c */ 83 + int drm_gem_init(struct drm_device *dev); 84 + void drm_gem_destroy(struct drm_device *dev); 85 + int drm_gem_handle_create_tail(struct drm_file *file_priv, 86 + struct drm_gem_object *obj, 87 + u32 *handlep); 88 + int drm_gem_close_ioctl(struct drm_device *dev, void *data, 89 + struct drm_file *file_priv); 90 + int drm_gem_flink_ioctl(struct drm_device *dev, void *data, 91 + struct drm_file *file_priv); 92 + int drm_gem_open_ioctl(struct drm_device *dev, void *data, 93 + struct drm_file *file_priv); 94 + void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 95 + void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+124 -123
drivers/gpu/drm/drm_ioctl.c
··· 31 31 #include <drm/drmP.h> 32 32 #include <drm/drm_core.h> 33 33 #include "drm_legacy.h" 34 + #include "drm_internal.h" 34 35 35 36 #include <linux/pci.h> 36 37 #include <linux/export.h> ··· 41 40 42 41 static int drm_version(struct drm_device *dev, void *data, 43 42 struct drm_file *file_priv); 44 - 45 - #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 46 - [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 47 - 48 - /** Ioctl table */ 49 - static const struct drm_ioctl_desc drm_ioctls[] = { 50 - DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), 51 - DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 52 - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 53 - DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 54 - DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 55 - DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 56 - DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 57 - DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), 58 - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), 59 - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 60 - 61 - DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 62 - DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 63 - DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 64 - DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), 65 - 66 - DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 67 - DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), 68 - 69 - DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 70 - DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), 71 - 72 - DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 73 - DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 74 - 75 - DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), 76 - DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 77 - DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 78 - DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), 79 - DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 80 - DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 81 - DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), 82 - 83 - DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 84 - DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 85 - 86 - DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH), 87 - DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH), 88 - 89 - DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 90 - 91 - DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 92 - DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 93 - DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH), 94 - DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH), 95 - DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), 96 - DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), 97 - 98 - DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 99 - 100 - #if __OS_HAS_AGP 101 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 102 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 103 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 104 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 105 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 106 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 107 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 108 - DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 109 - #endif 110 - 111 - DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 112 - DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 113 - 114 - DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 115 - 116 - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 117 - 118 - DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 119 - 120 - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 121 - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 122 - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 123 - 124 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 125 - 126 - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 127 - DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 128 - 129 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 130 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 131 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 132 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 133 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 134 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 135 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), 136 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 137 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 138 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 139 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 140 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 141 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 142 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 143 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 144 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 145 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 146 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 147 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 148 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 149 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 150 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 151 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 152 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 153 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 154 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 155 - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 156 - }; 157 - 158 - #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 159 43 160 44 /** 161 45 * Get the bus id. ··· 53 167 * 54 168 * Copies the bus id from drm_device::unique into user space. 55 169 */ 56 - int drm_getunique(struct drm_device *dev, void *data, 170 + static int drm_getunique(struct drm_device *dev, void *data, 57 171 struct drm_file *file_priv) 58 172 { 59 173 struct drm_unique *u = data; ··· 92 206 * version 1.1 or greater. Also note that KMS is all version 1.1 and later and 93 207 * UMS was only ever supported on pci devices. 94 208 */ 95 - int drm_setunique(struct drm_device *dev, void *data, 209 + static int drm_setunique(struct drm_device *dev, void *data, 96 210 struct drm_file *file_priv) 97 211 { 98 212 struct drm_unique *u = data; ··· 164 278 * Searches for the mapping with the specified offset and copies its information 165 279 * into userspace 166 280 */ 167 - int drm_getmap(struct drm_device *dev, void *data, 281 + static int drm_getmap(struct drm_device *dev, void *data, 168 282 struct drm_file *file_priv) 169 283 { 170 284 struct drm_map *map = data; ··· 225 339 * Searches for the client with the specified index and copies its information 226 340 * into userspace 227 341 */ 228 - int drm_getclient(struct drm_device *dev, void *data, 342 + static int drm_getclient(struct drm_device *dev, void *data, 229 343 struct drm_file *file_priv) 230 344 { 231 345 struct drm_client *client = data; ··· 265 379 * 266 380 * \return zero on success or a negative number on failure. 267 381 */ 268 - int drm_getstats(struct drm_device *dev, void *data, 382 + static int drm_getstats(struct drm_device *dev, void *data, 269 383 struct drm_file *file_priv) 270 384 { 271 385 struct drm_stats *stats = data; ··· 279 393 /** 280 394 * Get device/driver capabilities 281 395 */ 282 - int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) 396 + static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv) 283 397 { 284 398 struct drm_get_cap *req = data; 285 399 ··· 329 443 /** 330 444 * Set device/driver capabilities 331 445 */ 332 - int 446 + static int 333 447 drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv) 334 448 { 335 449 struct drm_set_client_cap *req = data; ··· 363 477 * 364 478 * Sets the requested interface version 365 479 */ 366 - int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) 480 + static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) 367 481 { 368 482 struct drm_set_version *sv = data; 369 483 int if_version, retcode = 0; ··· 508 622 509 623 return 0; 510 624 } 625 + 626 + #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 627 + [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} 628 + 629 + /** Ioctl table */ 630 + static const struct drm_ioctl_desc drm_ioctls[] = { 631 + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), 632 + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 633 + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 634 + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 635 + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED), 636 + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 637 + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), 638 + DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), 639 + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), 640 + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 641 + 642 + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 643 + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 644 + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 645 + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), 646 + 647 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 648 + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), 649 + 650 + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 651 + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), 652 + 653 + DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 654 + DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 655 + 656 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), 657 + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 658 + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 659 + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH), 660 + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 661 + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 662 + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH), 663 + 664 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 665 + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 666 + 667 + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH), 668 + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH), 669 + 670 + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), 671 + 672 + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 673 + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 674 + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH), 675 + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH), 676 + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), 677 + DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), 678 + 679 + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 680 + 681 + #if __OS_HAS_AGP 682 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 683 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 684 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 685 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), 686 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 687 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 688 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 689 + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 690 + #endif 691 + 692 + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 693 + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 694 + 695 + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), 696 + 697 + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 698 + 699 + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 700 + 701 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 702 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), 703 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), 704 + 705 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 706 + 707 + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 708 + DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 709 + 710 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 711 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 712 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 713 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 714 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 715 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 716 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED), 717 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), 718 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 719 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 720 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 721 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 722 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 723 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 724 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 725 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 726 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 727 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 728 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 729 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 730 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 731 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 732 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 733 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 734 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), 735 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 736 + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), 737 + }; 738 + 739 + #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 511 740 512 741 /** 513 742 * Called whenever a process performs an ioctl on /dev/drm.
+15
drivers/gpu/drm/drm_irq.c
··· 34 34 35 35 #include <drm/drmP.h> 36 36 #include "drm_trace.h" 37 + #include "drm_internal.h" 37 38 38 39 #include <linux/interrupt.h> /* For task queue support */ 39 40 #include <linux/slab.h> ··· 59 58 static bool 60 59 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, 61 60 struct timeval *tvblank, unsigned flags); 61 + 62 + static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 63 + 64 + /* 65 + * Default to use monotonic timestamps for wait-for-vblank and page-flip 66 + * complete events. 67 + */ 68 + unsigned int drm_timestamp_monotonic = 1; 69 + 70 + static int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ 71 + 72 + module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 73 + module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 74 + module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 62 75 63 76 /** 64 77 * drm_update_vblank_count - update the master vblank counter
+18
drivers/gpu/drm/drm_legacy.h
··· 29 29 * drivers use them, and removing them are API breaks. 30 30 */ 31 31 #include <linux/list.h> 32 + #include <drm/drm_legacy.h> 32 33 33 34 struct agp_memory; 34 35 struct drm_device; ··· 72 71 int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f); 73 72 int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f); 74 73 74 + void drm_legacy_vma_flush(struct drm_device *d); 75 + 75 76 /* 76 77 * AGP Support 77 78 */ ··· 94 91 int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); 95 92 int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); 96 93 int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx); 94 + 95 + /* DMA support */ 96 + int drm_legacy_dma_setup(struct drm_device *dev); 97 + void drm_legacy_dma_takedown(struct drm_device *dev); 98 + void drm_legacy_free_buffer(struct drm_device *dev, 99 + struct drm_buf * buf); 100 + void drm_legacy_reclaim_buffers(struct drm_device *dev, 101 + struct drm_file *filp); 102 + 103 + /* Scatter Gather Support */ 104 + void drm_legacy_sg_cleanup(struct drm_device *dev); 105 + int drm_legacy_sg_alloc(struct drm_device *dev, void *data, 106 + struct drm_file *file_priv); 107 + int drm_legacy_sg_free(struct drm_device *dev, void *data, 108 + struct drm_file *file_priv); 97 109 98 110 #endif /* __DRM_LEGACY_H__ */
+1
drivers/gpu/drm/drm_lock.c
··· 36 36 #include <linux/export.h> 37 37 #include <drm/drmP.h> 38 38 #include "drm_legacy.h" 39 + #include "drm_internal.h" 39 40 40 41 static int drm_notifier(void *priv); 41 42
+6 -6
drivers/gpu/drm/drm_memory.c
··· 120 120 121 121 #endif /* agp */ 122 122 123 - void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) 123 + void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) 124 124 { 125 125 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 126 126 map->handle = agp_remap(map->offset, map->size, dev); 127 127 else 128 128 map->handle = ioremap(map->offset, map->size); 129 129 } 130 - EXPORT_SYMBOL(drm_core_ioremap); 130 + EXPORT_SYMBOL(drm_legacy_ioremap); 131 131 132 - void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) 132 + void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) 133 133 { 134 134 if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) 135 135 map->handle = agp_remap(map->offset, map->size, dev); 136 136 else 137 137 map->handle = ioremap_wc(map->offset, map->size); 138 138 } 139 - EXPORT_SYMBOL(drm_core_ioremap_wc); 139 + EXPORT_SYMBOL(drm_legacy_ioremap_wc); 140 140 141 - void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) 141 + void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) 142 142 { 143 143 if (!map->handle || !map->size) 144 144 return; ··· 148 148 else 149 149 iounmap(map->handle); 150 150 } 151 - EXPORT_SYMBOL(drm_core_ioremapfree); 151 + EXPORT_SYMBOL(drm_legacy_ioremapfree);
+3 -2
drivers/gpu/drm/drm_pci.c
··· 27 27 #include <linux/dma-mapping.h> 28 28 #include <linux/export.h> 29 29 #include <drm/drmP.h> 30 + #include "drm_legacy.h" 30 31 31 32 /** 32 33 * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. ··· 82 81 * 83 82 * This function is for internal use in the Linux-specific DRM core code. 84 83 */ 85 - void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 84 + void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 86 85 { 87 86 unsigned long addr; 88 87 size_t sz; ··· 106 105 */ 107 106 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) 108 107 { 109 - __drm_pci_free(dev, dmah); 108 + __drm_legacy_pci_free(dev, dmah); 110 109 kfree(dmah); 111 110 } 112 111
+1
drivers/gpu/drm/drm_prime.c
··· 29 29 #include <linux/export.h> 30 30 #include <linux/dma-buf.h> 31 31 #include <drm/drmP.h> 32 + #include "drm_internal.h" 32 33 33 34 /* 34 35 * DMA-BUF/GEM Object references and lifetime overview:
+5 -4
drivers/gpu/drm/drm_scatter.c
··· 34 34 #include <linux/vmalloc.h> 35 35 #include <linux/slab.h> 36 36 #include <drm/drmP.h> 37 + #include "drm_legacy.h" 37 38 38 39 #define DEBUG_SCATTER 0 39 40 ··· 79 78 # define ScatterHandle(x) (unsigned int)(x) 80 79 #endif 81 80 82 - int drm_sg_alloc(struct drm_device *dev, void *data, 83 - struct drm_file *file_priv) 81 + int drm_legacy_sg_alloc(struct drm_device *dev, void *data, 82 + struct drm_file *file_priv) 84 83 { 85 84 struct drm_scatter_gather *request = data; 86 85 struct drm_sg_mem *entry; ··· 195 194 return -ENOMEM; 196 195 } 197 196 198 - int drm_sg_free(struct drm_device *dev, void *data, 199 - struct drm_file *file_priv) 197 + int drm_legacy_sg_free(struct drm_device *dev, void *data, 198 + struct drm_file *file_priv) 200 199 { 201 200 struct drm_scatter_gather *request = data; 202 201 struct drm_sg_mem *entry;
+1
drivers/gpu/drm/drm_sysfs.c
··· 21 21 #include <drm/drm_sysfs.h> 22 22 #include <drm/drm_core.h> 23 23 #include <drm/drmP.h> 24 + #include "drm_internal.h" 24 25 25 26 #define to_drm_minor(d) dev_get_drvdata(d) 26 27 #define to_drm_connector(d) dev_get_drvdata(d)
+1 -1
drivers/gpu/drm/drm_vm.c
··· 272 272 dmah.vaddr = map->handle; 273 273 dmah.busaddr = map->offset; 274 274 dmah.size = map->size; 275 - __drm_pci_free(dev, &dmah); 275 + __drm_legacy_pci_free(dev, &dmah); 276 276 break; 277 277 } 278 278 kfree(map);
+6 -6
drivers/gpu/drm/i810/i810_dma.c
··· 213 213 (drm_i810_private_t *) dev->dev_private; 214 214 215 215 if (dev_priv->ring.virtual_start) 216 - drm_core_ioremapfree(&dev_priv->ring.map, dev); 216 + drm_legacy_ioremapfree(&dev_priv->ring.map, dev); 217 217 if (dev_priv->hw_status_page) { 218 218 pci_free_consistent(dev->pdev, PAGE_SIZE, 219 219 dev_priv->hw_status_page, ··· 227 227 drm_i810_buf_priv_t *buf_priv = buf->dev_private; 228 228 229 229 if (buf_priv->kernel_virtual && buf->total) 230 - drm_core_ioremapfree(&buf_priv->map, dev); 230 + drm_legacy_ioremapfree(&buf_priv->map, dev); 231 231 } 232 232 } 233 233 return 0; ··· 306 306 buf_priv->map.flags = 0; 307 307 buf_priv->map.mtrr = 0; 308 308 309 - drm_core_ioremap(&buf_priv->map, dev); 309 + drm_legacy_ioremap(&buf_priv->map, dev); 310 310 buf_priv->kernel_virtual = buf_priv->map.handle; 311 311 312 312 } ··· 334 334 DRM_ERROR("can not find sarea!\n"); 335 335 return -EINVAL; 336 336 } 337 - dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 337 + dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset); 338 338 if (!dev_priv->mmio_map) { 339 339 dev->dev_private = (void *)dev_priv; 340 340 i810_dma_cleanup(dev); ··· 342 342 return -EINVAL; 343 343 } 344 344 dev->agp_buffer_token = init->buffers_offset; 345 - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 345 + dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); 346 346 if (!dev->agp_buffer_map) { 347 347 dev->dev_private = (void *)dev_priv; 348 348 i810_dma_cleanup(dev); ··· 363 363 dev_priv->ring.map.flags = 0; 364 364 dev_priv->ring.map.mtrr = 0; 365 365 366 - drm_core_ioremap(&dev_priv->ring.map, dev); 366 + drm_legacy_ioremap(&dev_priv->ring.map, dev); 367 367 368 368 if (dev_priv->ring.map.handle == NULL) { 369 369 dev->dev_private = (void *)dev_priv;
+2
drivers/gpu/drm/i810/i810_drv.h
··· 32 32 #ifndef _I810_DRV_H_ 33 33 #define _I810_DRV_H_ 34 34 35 + #include <drm/drm_legacy.h> 36 + 35 37 /* General customization: 36 38 */ 37 39
+1
drivers/gpu/drm/i915/i915_dma.c
··· 31 31 #include <drm/drmP.h> 32 32 #include <drm/drm_crtc_helper.h> 33 33 #include <drm/drm_fb_helper.h> 34 + #include <drm/drm_legacy.h> 34 35 #include "intel_drv.h" 35 36 #include <drm/i915_drm.h> 36 37 #include "i915_drv.h"
+6 -3
drivers/gpu/drm/i915/i915_drv.h
··· 41 41 #include <linux/i2c.h> 42 42 #include <linux/i2c-algo-bit.h> 43 43 #include <drm/intel-gtt.h> 44 + #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 44 45 #include <linux/backlight.h> 45 46 #include <linux/hashtable.h> 46 47 #include <linux/intel-iommu.h> ··· 286 285 struct intel_overlay; 287 286 struct intel_overlay_error_state; 288 287 288 + struct drm_local_map; 289 + 289 290 struct drm_i915_master_private { 290 - drm_local_map_t *sarea; 291 + struct drm_local_map *sarea; 291 292 struct _drm_i915_sarea *sarea_priv; 292 293 }; 293 294 #define I915_FENCE_REG_NONE -1 ··· 1450 1447 struct drm_i915_gem_object *semaphore_obj; 1451 1448 uint32_t last_seqno, next_seqno; 1452 1449 1453 - drm_dma_handle_t *status_page_dmah; 1450 + struct drm_dma_handle *status_page_dmah; 1454 1451 struct resource mch_res; 1455 1452 1456 1453 /* protects the irq masks */ ··· 1837 1834 struct drm_file *pin_filp; 1838 1835 1839 1836 /** for phy allocated objects */ 1840 - drm_dma_handle_t *phys_handle; 1837 + struct drm_dma_handle *phys_handle; 1841 1838 1842 1839 union { 1843 1840 struct i915_gem_userptr {
+14 -14
drivers/gpu/drm/mga/mga_dma.c
··· 566 566 return err; 567 567 } 568 568 569 - drm_core_ioremap(dev_priv->warp, dev); 570 - drm_core_ioremap(dev_priv->primary, dev); 571 - drm_core_ioremap(dev->agp_buffer_map, dev); 569 + drm_legacy_ioremap(dev_priv->warp, dev); 570 + drm_legacy_ioremap(dev_priv->primary, dev); 571 + drm_legacy_ioremap(dev->agp_buffer_map, dev); 572 572 573 573 if (!dev_priv->warp->handle || 574 574 !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { ··· 821 821 dev_priv->dma_access = MGA_PAGPXFER; 822 822 dev_priv->wagp_enable = MGA_WAGP_ENABLE; 823 823 824 - dev_priv->status = drm_core_findmap(dev, init->status_offset); 824 + dev_priv->status = drm_legacy_findmap(dev, init->status_offset); 825 825 if (!dev_priv->status) { 826 826 DRM_ERROR("failed to find status page!\n"); 827 827 return -EINVAL; 828 828 } 829 - dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 829 + dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset); 830 830 if (!dev_priv->mmio) { 831 831 DRM_ERROR("failed to find mmio region!\n"); 832 832 return -EINVAL; 833 833 } 834 - dev_priv->warp = drm_core_findmap(dev, init->warp_offset); 834 + dev_priv->warp = drm_legacy_findmap(dev, init->warp_offset); 835 835 if (!dev_priv->warp) { 836 836 DRM_ERROR("failed to find warp microcode region!\n"); 837 837 return -EINVAL; 838 838 } 839 - dev_priv->primary = drm_core_findmap(dev, init->primary_offset); 839 + dev_priv->primary = drm_legacy_findmap(dev, init->primary_offset); 840 840 if (!dev_priv->primary) { 841 841 DRM_ERROR("failed to find primary dma region!\n"); 842 842 return -EINVAL; 843 843 } 844 844 dev->agp_buffer_token = init->buffers_offset; 845 845 dev->agp_buffer_map = 846 - drm_core_findmap(dev, init->buffers_offset); 846 + drm_legacy_findmap(dev, init->buffers_offset); 847 847 if (!dev->agp_buffer_map) { 848 848 DRM_ERROR("failed to find dma buffer region!\n"); 849 849 return -EINVAL; 850 850 } 851 851 852 - drm_core_ioremap(dev_priv->warp, dev); 853 - drm_core_ioremap(dev_priv->primary, dev); 854 - drm_core_ioremap(dev->agp_buffer_map, dev); 852 + drm_legacy_ioremap(dev_priv->warp, dev); 853 + drm_legacy_ioremap(dev_priv->primary, dev); 854 + drm_legacy_ioremap(dev->agp_buffer_map, dev); 855 855 } 856 856 857 857 dev_priv->sarea_priv = ··· 937 937 938 938 if ((dev_priv->warp != NULL) 939 939 && (dev_priv->warp->type != _DRM_CONSISTENT)) 940 - drm_core_ioremapfree(dev_priv->warp, dev); 940 + drm_legacy_ioremapfree(dev_priv->warp, dev); 941 941 942 942 if ((dev_priv->primary != NULL) 943 943 && (dev_priv->primary->type != _DRM_CONSISTENT)) 944 - drm_core_ioremapfree(dev_priv->primary, dev); 944 + drm_legacy_ioremapfree(dev_priv->primary, dev); 945 945 946 946 if (dev->agp_buffer_map != NULL) 947 - drm_core_ioremapfree(dev->agp_buffer_map, dev); 947 + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 948 948 949 949 if (dev_priv->used_new_dma_init) { 950 950 #if __OS_HAS_AGP
+2
drivers/gpu/drm/mga/mga_drv.h
··· 31 31 #ifndef __MGA_DRV_H__ 32 32 #define __MGA_DRV_H__ 33 33 34 + #include <drm/drm_legacy.h> 35 + 34 36 /* General customization: 35 37 */ 36 38
-2
drivers/gpu/drm/mgag200/mgag200_drv.h
··· 190 190 resource_size_t rmmio_size; 191 191 void __iomem *rmmio; 192 192 193 - drm_local_map_t *framebuffer; 194 - 195 193 struct mga_mc mc; 196 194 struct mga_mode_info mode_info; 197 195
+11 -11
drivers/gpu/drm/r128/r128_cce.c
··· 460 460 return -EINVAL; 461 461 } 462 462 463 - dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 463 + dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset); 464 464 if (!dev_priv->mmio) { 465 465 DRM_ERROR("could not find mmio region!\n"); 466 466 dev->dev_private = (void *)dev_priv; 467 467 r128_do_cleanup_cce(dev); 468 468 return -EINVAL; 469 469 } 470 - dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset); 470 + dev_priv->cce_ring = drm_legacy_findmap(dev, init->ring_offset); 471 471 if (!dev_priv->cce_ring) { 472 472 DRM_ERROR("could not find cce ring region!\n"); 473 473 dev->dev_private = (void *)dev_priv; 474 474 r128_do_cleanup_cce(dev); 475 475 return -EINVAL; 476 476 } 477 - dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 477 + dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); 478 478 if (!dev_priv->ring_rptr) { 479 479 DRM_ERROR("could not find ring read pointer!\n"); 480 480 dev->dev_private = (void *)dev_priv; ··· 482 482 return -EINVAL; 483 483 } 484 484 dev->agp_buffer_token = init->buffers_offset; 485 - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 485 + dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); 486 486 if (!dev->agp_buffer_map) { 487 487 DRM_ERROR("could not find dma buffer region!\n"); 488 488 dev->dev_private = (void *)dev_priv; ··· 492 492 493 493 if (!dev_priv->is_pci) { 494 494 dev_priv->agp_textures = 495 - drm_core_findmap(dev, init->agp_textures_offset); 495 + drm_legacy_findmap(dev, init->agp_textures_offset); 496 496 if (!dev_priv->agp_textures) { 497 497 DRM_ERROR("could not find agp texture region!\n"); 498 498 dev->dev_private = (void *)dev_priv; ··· 507 507 508 508 #if __OS_HAS_AGP 509 509 if (!dev_priv->is_pci) { 510 - drm_core_ioremap_wc(dev_priv->cce_ring, dev); 511 - drm_core_ioremap_wc(dev_priv->ring_rptr, dev); 512 - drm_core_ioremap_wc(dev->agp_buffer_map, dev); 510 + drm_legacy_ioremap_wc(dev_priv->cce_ring, dev); 511 + drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); 512 + drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); 513 513 if (!dev_priv->cce_ring->handle || 514 514 !dev_priv->ring_rptr->handle || 515 515 !dev->agp_buffer_map->handle) { ··· 603 603 #if __OS_HAS_AGP 604 604 if (!dev_priv->is_pci) { 605 605 if (dev_priv->cce_ring != NULL) 606 - drm_core_ioremapfree(dev_priv->cce_ring, dev); 606 + drm_legacy_ioremapfree(dev_priv->cce_ring, dev); 607 607 if (dev_priv->ring_rptr != NULL) 608 - drm_core_ioremapfree(dev_priv->ring_rptr, dev); 608 + drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); 609 609 if (dev->agp_buffer_map != NULL) { 610 - drm_core_ioremapfree(dev->agp_buffer_map, dev); 610 + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 611 611 dev->agp_buffer_map = NULL; 612 612 } 613 613 } else
+2
drivers/gpu/drm/r128/r128_drv.h
··· 36 36 #define __R128_DRV_H__ 37 37 38 38 #include <drm/ati_pcigart.h> 39 + #include <drm/drm_legacy.h> 40 + 39 41 /* General customization: 40 42 */ 41 43 #define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
+12 -12
drivers/gpu/drm/radeon/r600_cp.c
··· 1949 1949 #if __OS_HAS_AGP 1950 1950 if (dev_priv->flags & RADEON_IS_AGP) { 1951 1951 if (dev_priv->cp_ring != NULL) { 1952 - drm_core_ioremapfree(dev_priv->cp_ring, dev); 1952 + drm_legacy_ioremapfree(dev_priv->cp_ring, dev); 1953 1953 dev_priv->cp_ring = NULL; 1954 1954 } 1955 1955 if (dev_priv->ring_rptr != NULL) { 1956 - drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1956 + drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); 1957 1957 dev_priv->ring_rptr = NULL; 1958 1958 } 1959 1959 if (dev->agp_buffer_map != NULL) { 1960 - drm_core_ioremapfree(dev->agp_buffer_map, dev); 1960 + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 1961 1961 dev->agp_buffer_map = NULL; 1962 1962 } 1963 1963 } else ··· 1968 1968 r600_page_table_cleanup(dev, &dev_priv->gart_info); 1969 1969 1970 1970 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { 1971 - drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1971 + drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev); 1972 1972 dev_priv->gart_info.addr = NULL; 1973 1973 } 1974 1974 } ··· 2059 2059 return -EINVAL; 2060 2060 } 2061 2061 2062 - dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 2062 + dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset); 2063 2063 if (!dev_priv->cp_ring) { 2064 2064 DRM_ERROR("could not find cp ring region!\n"); 2065 2065 r600_do_cleanup_cp(dev); 2066 2066 return -EINVAL; 2067 2067 } 2068 - dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 2068 + dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); 2069 2069 if (!dev_priv->ring_rptr) { 2070 2070 DRM_ERROR("could not find ring read pointer!\n"); 2071 2071 r600_do_cleanup_cp(dev); 2072 2072 return -EINVAL; 2073 2073 } 2074 2074 dev->agp_buffer_token = init->buffers_offset; 2075 - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 2075 + dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); 2076 2076 if (!dev->agp_buffer_map) { 2077 2077 DRM_ERROR("could not find dma buffer region!\n"); 2078 2078 r600_do_cleanup_cp(dev); ··· 2081 2081 2082 2082 if (init->gart_textures_offset) { 2083 2083 dev_priv->gart_textures = 2084 - drm_core_findmap(dev, init->gart_textures_offset); 2084 + drm_legacy_findmap(dev, init->gart_textures_offset); 2085 2085 if (!dev_priv->gart_textures) { 2086 2086 DRM_ERROR("could not find GART texture region!\n"); 2087 2087 r600_do_cleanup_cp(dev); ··· 2092 2092 #if __OS_HAS_AGP 2093 2093 /* XXX */ 2094 2094 if (dev_priv->flags & RADEON_IS_AGP) { 2095 - drm_core_ioremap_wc(dev_priv->cp_ring, dev); 2096 - drm_core_ioremap_wc(dev_priv->ring_rptr, dev); 2097 - drm_core_ioremap_wc(dev->agp_buffer_map, dev); 2095 + drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); 2096 + drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); 2097 + drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); 2098 2098 if (!dev_priv->cp_ring->handle || 2099 2099 !dev_priv->ring_rptr->handle || 2100 2100 !dev->agp_buffer_map->handle) { ··· 2235 2235 dev_priv->gart_info.mapping.size = 2236 2236 dev_priv->gart_info.table_size; 2237 2237 2238 - drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); 2238 + drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev); 2239 2239 if (!dev_priv->gart_info.mapping.handle) { 2240 2240 DRM_ERROR("ioremap failed.\n"); 2241 2241 r600_do_cleanup_cp(dev);
+12 -12
drivers/gpu/drm/radeon/radeon_cp.c
··· 1305 1305 return -EINVAL; 1306 1306 } 1307 1307 1308 - dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset); 1308 + dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset); 1309 1309 if (!dev_priv->cp_ring) { 1310 1310 DRM_ERROR("could not find cp ring region!\n"); 1311 1311 radeon_do_cleanup_cp(dev); 1312 1312 return -EINVAL; 1313 1313 } 1314 - dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset); 1314 + dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); 1315 1315 if (!dev_priv->ring_rptr) { 1316 1316 DRM_ERROR("could not find ring read pointer!\n"); 1317 1317 radeon_do_cleanup_cp(dev); 1318 1318 return -EINVAL; 1319 1319 } 1320 1320 dev->agp_buffer_token = init->buffers_offset; 1321 - dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); 1321 + dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); 1322 1322 if (!dev->agp_buffer_map) { 1323 1323 DRM_ERROR("could not find dma buffer region!\n"); 1324 1324 radeon_do_cleanup_cp(dev); ··· 1327 1327 1328 1328 if (init->gart_textures_offset) { 1329 1329 dev_priv->gart_textures = 1330 - drm_core_findmap(dev, init->gart_textures_offset); 1330 + drm_legacy_findmap(dev, init->gart_textures_offset); 1331 1331 if (!dev_priv->gart_textures) { 1332 1332 DRM_ERROR("could not find GART texture region!\n"); 1333 1333 radeon_do_cleanup_cp(dev); ··· 1337 1337 1338 1338 #if __OS_HAS_AGP 1339 1339 if (dev_priv->flags & RADEON_IS_AGP) { 1340 - drm_core_ioremap_wc(dev_priv->cp_ring, dev); 1341 - drm_core_ioremap_wc(dev_priv->ring_rptr, dev); 1342 - drm_core_ioremap_wc(dev->agp_buffer_map, dev); 1340 + drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); 1341 + drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); 1342 + drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); 1343 1343 if (!dev_priv->cp_ring->handle || 1344 1344 !dev_priv->ring_rptr->handle || 1345 1345 !dev->agp_buffer_map->handle) { ··· 1475 1475 dev_priv->gart_info.mapping.size = 1476 1476 dev_priv->gart_info.table_size; 1477 1477 1478 - drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev); 1478 + drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev); 1479 1479 dev_priv->gart_info.addr = 1480 1480 dev_priv->gart_info.mapping.handle; 1481 1481 ··· 1569 1569 #if __OS_HAS_AGP 1570 1570 if (dev_priv->flags & RADEON_IS_AGP) { 1571 1571 if (dev_priv->cp_ring != NULL) { 1572 - drm_core_ioremapfree(dev_priv->cp_ring, dev); 1572 + drm_legacy_ioremapfree(dev_priv->cp_ring, dev); 1573 1573 dev_priv->cp_ring = NULL; 1574 1574 } 1575 1575 if (dev_priv->ring_rptr != NULL) { 1576 - drm_core_ioremapfree(dev_priv->ring_rptr, dev); 1576 + drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); 1577 1577 dev_priv->ring_rptr = NULL; 1578 1578 } 1579 1579 if (dev->agp_buffer_map != NULL) { 1580 - drm_core_ioremapfree(dev->agp_buffer_map, dev); 1580 + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 1581 1581 dev->agp_buffer_map = NULL; 1582 1582 } 1583 1583 } else ··· 1597 1597 1598 1598 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) 1599 1599 { 1600 - drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev); 1600 + drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev); 1601 1601 dev_priv->gart_info.addr = NULL; 1602 1602 } 1603 1603 }
+1
drivers/gpu/drm/radeon/radeon_drv.h
··· 33 33 34 34 #include <linux/firmware.h> 35 35 #include <linux/platform_device.h> 36 + #include <drm/drm_legacy.h> 36 37 37 38 #include <drm/ati_pcigart.h> 38 39 #include "radeon_family.h"
+8 -8
drivers/gpu/drm/savage/savage_bci.c
··· 706 706 return -EINVAL; 707 707 } 708 708 if (init->status_offset != 0) { 709 - dev_priv->status = drm_core_findmap(dev, init->status_offset); 709 + dev_priv->status = drm_legacy_findmap(dev, init->status_offset); 710 710 if (!dev_priv->status) { 711 711 DRM_ERROR("could not find shadow status region!\n"); 712 712 savage_do_cleanup_bci(dev); ··· 717 717 } 718 718 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { 719 719 dev->agp_buffer_token = init->buffers_offset; 720 - dev->agp_buffer_map = drm_core_findmap(dev, 720 + dev->agp_buffer_map = drm_legacy_findmap(dev, 721 721 init->buffers_offset); 722 722 if (!dev->agp_buffer_map) { 723 723 DRM_ERROR("could not find DMA buffer region!\n"); 724 724 savage_do_cleanup_bci(dev); 725 725 return -EINVAL; 726 726 } 727 - drm_core_ioremap(dev->agp_buffer_map, dev); 727 + drm_legacy_ioremap(dev->agp_buffer_map, dev); 728 728 if (!dev->agp_buffer_map->handle) { 729 729 DRM_ERROR("failed to ioremap DMA buffer region!\n"); 730 730 savage_do_cleanup_bci(dev); ··· 733 733 } 734 734 if (init->agp_textures_offset) { 735 735 dev_priv->agp_textures = 736 - drm_core_findmap(dev, init->agp_textures_offset); 736 + drm_legacy_findmap(dev, init->agp_textures_offset); 737 737 if (!dev_priv->agp_textures) { 738 738 DRM_ERROR("could not find agp texture region!\n"); 739 739 savage_do_cleanup_bci(dev); ··· 756 756 savage_do_cleanup_bci(dev); 757 757 return -EINVAL; 758 758 } 759 - dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); 759 + dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset); 760 760 if (!dev_priv->cmd_dma) { 761 761 DRM_ERROR("could not find command DMA region!\n"); 762 762 savage_do_cleanup_bci(dev); ··· 769 769 savage_do_cleanup_bci(dev); 770 770 return -EINVAL; 771 771 } 772 - drm_core_ioremap(dev_priv->cmd_dma, dev); 772 + drm_legacy_ioremap(dev_priv->cmd_dma, dev); 773 773 if (!dev_priv->cmd_dma->handle) { 774 774 DRM_ERROR("failed to ioremap command " 775 775 "DMA region!\n"); ··· 895 895 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && 896 896 dev_priv->cmd_dma->type == _DRM_AGP && 897 897 dev_priv->dma_type == SAVAGE_DMA_AGP) 898 - drm_core_ioremapfree(dev_priv->cmd_dma, dev); 898 + drm_legacy_ioremapfree(dev_priv->cmd_dma, dev); 899 899 900 900 if (dev_priv->dma_type == SAVAGE_DMA_AGP && 901 901 dev->agp_buffer_map && dev->agp_buffer_map->handle) { 902 - drm_core_ioremapfree(dev->agp_buffer_map, dev); 902 + drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 903 903 /* make sure the next instance (which may be running 904 904 * in PCI mode) doesn't try to use an old 905 905 * agp_buffer_map. */
+2
drivers/gpu/drm/savage/savage_drv.h
··· 26 26 #ifndef __SAVAGE_DRV_H__ 27 27 #define __SAVAGE_DRV_H__ 28 28 29 + #include <drm/drm_legacy.h> 30 + 29 31 #define DRIVER_AUTHOR "Felix Kuehling" 30 32 31 33 #define DRIVER_NAME "savage"
+2
drivers/gpu/drm/sis/sis_drv.h
··· 28 28 #ifndef _SIS_DRV_H_ 29 29 #define _SIS_DRV_H_ 30 30 31 + #include <drm/drm_legacy.h> 32 + 31 33 /* General customization: 32 34 */ 33 35
+2 -2
drivers/gpu/drm/via/via_dma.c
··· 161 161 if (dev_priv->ring.virtual_start) { 162 162 via_cmdbuf_reset(dev_priv); 163 163 164 - drm_core_ioremapfree(&dev_priv->ring.map, dev); 164 + drm_legacy_ioremapfree(&dev_priv->ring.map, dev); 165 165 dev_priv->ring.virtual_start = NULL; 166 166 } 167 167 ··· 200 200 dev_priv->ring.map.flags = 0; 201 201 dev_priv->ring.map.mtrr = 0; 202 202 203 - drm_core_ioremap(&dev_priv->ring.map, dev); 203 + drm_legacy_ioremap(&dev_priv->ring.map, dev); 204 204 205 205 if (dev_priv->ring.map.handle == NULL) { 206 206 via_dma_cleanup(dev);
+2
drivers/gpu/drm/via/via_drv.h
··· 25 25 #define _VIA_DRV_H_ 26 26 27 27 #include <drm/drm_mm.h> 28 + #include <drm/drm_legacy.h> 29 + 28 30 #define DRIVER_AUTHOR "Various" 29 31 30 32 #define DRIVER_NAME "via"
+2 -2
drivers/gpu/drm/via/via_map.c
··· 39 39 return -EINVAL; 40 40 } 41 41 42 - dev_priv->fb = drm_core_findmap(dev, init->fb_offset); 42 + dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset); 43 43 if (!dev_priv->fb) { 44 44 DRM_ERROR("could not find framebuffer!\n"); 45 45 dev->dev_private = (void *)dev_priv; 46 46 via_do_cleanup_map(dev); 47 47 return -EINVAL; 48 48 } 49 - dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); 49 + dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset); 50 50 if (!dev_priv->mmio) { 51 51 DRM_ERROR("could not find mmio region!\n"); 52 52 dev->dev_private = (void *)dev_priv;
+1
drivers/gpu/drm/via/via_verifier.c
··· 31 31 #include "via_3d_reg.h" 32 32 #include <drm/drmP.h> 33 33 #include <drm/via_drm.h> 34 + #include <drm/drm_legacy.h> 34 35 #include "via_verifier.h" 35 36 #include "via_drv.h" 36 37
+2
include/drm/ati_pcigart.h
··· 1 1 #ifndef DRM_ATI_PCIGART_H 2 2 #define DRM_ATI_PCIGART_H 3 3 4 + #include <drm/drm_legacy.h> 5 + 4 6 /* location of GART table */ 5 7 #define DRM_ATI_GART_MAIN 1 6 8 #define DRM_ATI_GART_FB 2
+9 -270
include/drm/drmP.h
··· 79 79 struct drm_file; 80 80 struct drm_device; 81 81 struct drm_agp_head; 82 + struct drm_local_map; 83 + struct drm_device_dma; 84 + struct drm_dma_handle; 82 85 83 86 struct device_node; 84 87 struct videomode; ··· 141 138 #define DRIVER_MODESET 0x2000 142 139 #define DRIVER_PRIME 0x4000 143 140 #define DRIVER_RENDER 0x8000 144 - 145 - /***********************************************************************/ 146 - /** \name Begin the DRM... */ 147 - /*@{*/ 148 - 149 - #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ 150 - 151 - /*@}*/ 152 141 153 142 /***********************************************************************/ 154 143 /** \name Macros to make printk easier */ ··· 214 219 #define DRM_IF_VERSION(maj, min) (maj << 16 | min) 215 220 216 221 /** 217 - * Test that the hardware lock is held by the caller, returning otherwise. 218 - * 219 - * \param dev DRM device. 220 - * \param filp file pointer of the caller. 221 - */ 222 - #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ 223 - do { \ 224 - if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ 225 - _file_priv->master->lock.file_priv != _file_priv) { \ 226 - DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 227 - __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ 228 - _file_priv->master->lock.file_priv, _file_priv); \ 229 - return -EINVAL; \ 230 - } \ 231 - } while (0) 232 - 233 - /** 234 222 * Ioctl function type. 235 223 * 236 224 * \param inode device inode. ··· 252 274 253 275 #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 254 276 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} 255 - 256 - /** 257 - * DMA buffer. 258 - */ 259 - struct drm_buf { 260 - int idx; /**< Index into master buflist */ 261 - int total; /**< Buffer size */ 262 - int order; /**< log-base-2(total) */ 263 - int used; /**< Amount of buffer in use (for DMA) */ 264 - unsigned long offset; /**< Byte offset (used internally) */ 265 - void *address; /**< Address of buffer */ 266 - unsigned long bus_address; /**< Bus address of buffer */ 267 - struct drm_buf *next; /**< Kernel-only: used for free list */ 268 - __volatile__ int waiting; /**< On kernel DMA queue */ 269 - __volatile__ int pending; /**< On hardware DMA queue */ 270 - struct drm_file *file_priv; /**< Private of holding file descr */ 271 - int context; /**< Kernel queue for this buffer */ 272 - int while_locked; /**< Dispatch this buffer while locked */ 273 - enum { 274 - DRM_LIST_NONE = 0, 275 - DRM_LIST_FREE = 1, 276 - DRM_LIST_WAIT = 2, 277 - DRM_LIST_PEND = 3, 278 - DRM_LIST_PRIO = 4, 279 - DRM_LIST_RECLAIM = 5 280 - } list; /**< Which list we're on */ 281 - 282 - int dev_priv_size; /**< Size of buffer private storage */ 283 - void *dev_private; /**< Per-buffer private storage */ 284 - }; 285 - 286 - typedef struct drm_dma_handle { 287 - dma_addr_t busaddr; 288 - void *vaddr; 289 - size_t size; 290 - } drm_dma_handle_t; 291 - 292 - /** 293 - * Buffer entry. There is one of this for each buffer size order. 294 - */ 295 - struct drm_buf_entry { 296 - int buf_size; /**< size */ 297 - int buf_count; /**< number of buffers */ 298 - struct drm_buf *buflist; /**< buffer list */ 299 - int seg_count; 300 - int page_order; 301 - struct drm_dma_handle **seglist; 302 - 303 - int low_mark; /**< Low water mark */ 304 - int high_mark; /**< High water mark */ 305 - }; 306 277 307 278 /* Event queued up for userspace to read */ 308 279 struct drm_pending_event { ··· 329 402 uint32_t user_waiters; 330 403 int idle_has_lock; 331 404 }; 332 - 333 - /** 334 - * DMA data. 335 - */ 336 - struct drm_device_dma { 337 - 338 - struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ 339 - int buf_count; /**< total number of buffers */ 340 - struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ 341 - int seg_count; 342 - int page_count; /**< number of pages */ 343 - unsigned long *pagelist; /**< page list */ 344 - unsigned long byte_count; 345 - enum { 346 - _DRM_DMA_USE_AGP = 0x01, 347 - _DRM_DMA_USE_SG = 0x02, 348 - _DRM_DMA_USE_FB = 0x04, 349 - _DRM_DMA_USE_PCI_RO = 0x08 350 - } flags; 351 - 352 - }; 353 - 354 - /** 355 - * Scatter-gather memory. 356 - */ 357 - struct drm_sg_mem { 358 - unsigned long handle; 359 - void *virtual; 360 - int pages; 361 - struct page **pagelist; 362 - dma_addr_t *busaddr; 363 - }; 364 - 365 - /** 366 - * Kernel side of a mapping 367 - */ 368 - struct drm_local_map { 369 - resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ 370 - unsigned long size; /**< Requested physical size (bytes) */ 371 - enum drm_map_type type; /**< Type of memory to map */ 372 - enum drm_map_flags flags; /**< Flags */ 373 - void *handle; /**< User-space: "Handle" to pass to mmap() */ 374 - /**< Kernel-space: kernel-virtual address */ 375 - int mtrr; /**< MTRR slot used */ 376 - }; 377 - 378 - typedef struct drm_local_map drm_local_map_t; 379 - 380 - /** 381 - * Mappings list 382 - */ 383 - struct drm_map_list { 384 - struct list_head head; /**< list head */ 385 - struct drm_hash_item hash; 386 - struct drm_local_map *map; /**< mapping */ 387 - uint64_t user_token; 388 - struct drm_master *master; 389 - }; 390 - 391 405 392 406 /** 393 407 * This structure defines the drm_mm memory object, which will be used by the ··· 952 1084 unsigned int cmd, unsigned long arg); 953 1085 extern long drm_compat_ioctl(struct file *filp, 954 1086 unsigned int cmd, unsigned long arg); 955 - extern int drm_lastclose(struct drm_device *dev); 956 1087 extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); 957 1088 958 1089 /* Device support (drm_fops.h) */ 959 - extern struct mutex drm_global_mutex; 960 1090 extern int drm_open(struct inode *inode, struct file *filp); 961 1091 extern ssize_t drm_read(struct file *filp, char __user *buffer, 962 1092 size_t count, loff_t *offset); ··· 962 1096 963 1097 /* Mapping support (drm_vm.h) */ 964 1098 extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 965 - extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 966 1099 extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma); 967 - extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma); 968 1100 extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 969 1101 970 - /* Misc. IOCTL support (drm_ioctl.h) */ 971 - extern int drm_irq_by_busid(struct drm_device *dev, void *data, 972 - struct drm_file *file_priv); 973 - extern int drm_getunique(struct drm_device *dev, void *data, 974 - struct drm_file *file_priv); 975 - extern int drm_setunique(struct drm_device *dev, void *data, 976 - struct drm_file *file_priv); 977 - extern int drm_getmap(struct drm_device *dev, void *data, 978 - struct drm_file *file_priv); 979 - extern int drm_getclient(struct drm_device *dev, void *data, 980 - struct drm_file *file_priv); 981 - extern int drm_getstats(struct drm_device *dev, void *data, 982 - struct drm_file *file_priv); 983 - extern int drm_getcap(struct drm_device *dev, void *data, 984 - struct drm_file *file_priv); 985 - extern int drm_setclientcap(struct drm_device *dev, void *data, 986 - struct drm_file *file_priv); 987 - extern int drm_setversion(struct drm_device *dev, void *data, 988 - struct drm_file *file_priv); 989 - extern int drm_noop(struct drm_device *dev, void *data, 990 - struct drm_file *file_priv); 991 - 992 - /* Authentication IOCTL support (drm_auth.h) */ 993 - extern int drm_getmagic(struct drm_device *dev, void *data, 994 - struct drm_file *file_priv); 995 - extern int drm_authmagic(struct drm_device *dev, void *data, 996 - struct drm_file *file_priv); 997 - extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic); 1102 + /* Misc. IOCTL support (drm_ioctl.c) */ 1103 + int drm_noop(struct drm_device *dev, void *data, 1104 + struct drm_file *file_priv); 998 1105 999 1106 /* Cache management (drm_cache.c) */ 1000 1107 void drm_clflush_pages(struct page *pages[], unsigned long num_pages); ··· 979 1140 * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. 980 1141 */ 981 1142 982 - /* DMA support (drm_dma.h) */ 983 - extern int drm_legacy_dma_setup(struct drm_device *dev); 984 - extern void drm_legacy_dma_takedown(struct drm_device *dev); 985 - extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); 986 - extern void drm_core_reclaim_buffers(struct drm_device *dev, 987 - struct drm_file *filp); 988 - 989 1143 /* IRQ support (drm_irq.h) */ 990 - extern int drm_control(struct drm_device *dev, void *data, 991 - struct drm_file *file_priv); 992 1144 extern int drm_irq_install(struct drm_device *dev, int irq); 993 1145 extern int drm_irq_uninstall(struct drm_device *dev); 994 1146 ··· 1044 1214 extern void drm_unplug_dev(struct drm_device *dev); 1045 1215 extern unsigned int drm_debug; 1046 1216 1047 - extern int drm_vblank_offdelay; 1048 - extern unsigned int drm_timestamp_precision; 1049 - extern unsigned int drm_timestamp_monotonic; 1050 - 1051 - extern struct class *drm_class; 1052 - 1053 1217 /* Debugfs support */ 1054 1218 #if defined(CONFIG_DEBUG_FS) 1055 1219 extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, ··· 1091 1267 1092 1268 #endif 1093 1269 1094 - /* Info file support */ 1095 - extern int drm_name_info(struct seq_file *m, void *data); 1096 - extern int drm_vm_info(struct seq_file *m, void *data); 1097 - extern int drm_bufs_info(struct seq_file *m, void *data); 1098 - extern int drm_vblank_info(struct seq_file *m, void *data); 1099 - extern int drm_clients_info(struct seq_file *m, void* data); 1100 - extern int drm_gem_name_info(struct seq_file *m, void *data); 1101 - 1102 - 1103 1270 extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 1104 1271 struct drm_gem_object *obj, int flags); 1105 1272 extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, ··· 1102 1287 struct drm_file *file_priv, int prime_fd, uint32_t *handle); 1103 1288 extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); 1104 1289 1105 - extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 1106 - struct drm_file *file_priv); 1107 - extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 1108 - struct drm_file *file_priv); 1109 - 1110 1290 extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 1111 1291 dma_addr_t *addrs, int max_pages); 1112 1292 extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); ··· 1111 1301 struct drm_device *dev, 1112 1302 uint32_t handle); 1113 1303 1114 - void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); 1115 - void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); 1116 - void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); 1117 1304 1118 - extern int drm_vma_info(struct seq_file *m, void *data); 1119 - 1120 - /* Scatter Gather Support (drm_scatter.h) */ 1121 - extern void drm_legacy_sg_cleanup(struct drm_device *dev); 1122 - extern int drm_sg_alloc(struct drm_device *dev, void *data, 1123 - struct drm_file *file_priv); 1124 - extern int drm_sg_free(struct drm_device *dev, void *data, 1125 - struct drm_file *file_priv); 1126 - 1127 - 1128 - extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, 1129 - size_t align); 1130 - extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1131 - extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 1132 - extern int drm_pci_set_unique(struct drm_device *dev, 1133 - struct drm_master *master, 1134 - struct drm_unique *u); 1135 - 1136 - /* Legacy Support */ 1137 - 1138 - int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, 1139 - unsigned int size, enum drm_map_type type, 1140 - enum drm_map_flags flags, struct drm_local_map **map_p); 1141 - int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); 1142 - int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); 1143 - struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); 1144 - 1145 - int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); 1146 - int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); 1147 - 1148 - void drm_legacy_vma_flush(struct drm_device *d); 1149 - 1150 - void drm_legacy_idlelock_take(struct drm_lock_data *lock); 1151 - void drm_legacy_idlelock_release(struct drm_lock_data *lock); 1305 + extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, 1306 + size_t align); 1307 + extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); 1152 1308 1153 1309 /* sysfs support (drm_sysfs.c) */ 1154 - struct drm_sysfs_class; 1155 - extern struct class *drm_sysfs_create(struct module *owner, char *name); 1156 - extern void drm_sysfs_destroy(void); 1157 - extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); 1158 1310 extern void drm_sysfs_hotplug_event(struct drm_device *dev); 1159 - extern int drm_sysfs_connector_add(struct drm_connector *connector); 1160 - extern void drm_sysfs_connector_remove(struct drm_connector *connector); 1161 1311 1162 1312 /* Graphics Execution Manager library functions (drm_gem.c) */ 1163 - int drm_gem_init(struct drm_device *dev); 1164 - void drm_gem_destroy(struct drm_device *dev); 1165 1313 void drm_gem_object_release(struct drm_gem_object *obj); 1166 1314 void drm_gem_object_free(struct kref *kref); 1167 1315 int drm_gem_object_init(struct drm_device *dev, ··· 1158 1390 } 1159 1391 } 1160 1392 1161 - int drm_gem_handle_create_tail(struct drm_file *file_priv, 1162 - struct drm_gem_object *obj, 1163 - u32 *handlep); 1164 1393 int drm_gem_handle_create(struct drm_file *file_priv, 1165 1394 struct drm_gem_object *obj, 1166 1395 u32 *handlep); ··· 1175 1410 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1176 1411 struct drm_file *filp, 1177 1412 u32 handle); 1178 - int drm_gem_close_ioctl(struct drm_device *dev, void *data, 1179 - struct drm_file *file_priv); 1180 - int drm_gem_flink_ioctl(struct drm_device *dev, void *data, 1181 - struct drm_file *file_priv); 1182 - int drm_gem_open_ioctl(struct drm_device *dev, void *data, 1183 - struct drm_file *file_priv); 1184 - void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 1185 - void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 1186 - 1187 - extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); 1188 - extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); 1189 - extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); 1190 - 1191 - static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, 1192 - unsigned int token) 1193 - { 1194 - struct drm_map_list *_entry; 1195 - list_for_each_entry(_entry, &dev->maplist, head) 1196 - if (_entry->user_token == token) 1197 - return _entry->map; 1198 - return NULL; 1199 - } 1200 - 1201 - static __inline__ void drm_core_dropmap(struct drm_local_map *map) 1202 - { 1203 - } 1204 1413 1205 1414 struct drm_device *drm_dev_alloc(struct drm_driver *driver, 1206 1415 struct device *parent);
+202
include/drm/drm_legacy.h
··· 1 + #ifndef __DRM_DRM_LEGACY_H__ 2 + #define __DRM_DRM_LEGACY_H__ 3 + 4 + /* 5 + * Legacy driver interfaces for the Direct Rendering Manager 6 + * 7 + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 8 + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 9 + * Copyright (c) 2009-2010, Code Aurora Forum. 10 + * All rights reserved. 11 + * Copyright © 2014 Intel Corporation 12 + * Daniel Vetter <daniel.vetter@ffwll.ch> 13 + * 14 + * Author: Rickard E. (Rik) Faith <faith@valinux.com> 15 + * Author: Gareth Hughes <gareth@valinux.com> 16 + * 17 + * Permission is hereby granted, free of charge, to any person obtaining a 18 + * copy of this software and associated documentation files (the "Software"), 19 + * to deal in the Software without restriction, including without limitation 20 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 21 + * and/or sell copies of the Software, and to permit persons to whom the 22 + * Software is furnished to do so, subject to the following conditions: 23 + * 24 + * The above copyright notice and this permission notice (including the next 25 + * paragraph) shall be included in all copies or substantial portions of the 26 + * Software. 27 + * 28 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 29 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 30 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 31 + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 32 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 33 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 34 + * OTHER DEALINGS IN THE SOFTWARE. 35 + */ 36 + 37 + 38 + /* 39 + * Legacy Support for palateontologic DRM drivers 40 + * 41 + * If you add a new driver and it uses any of these functions or structures, 42 + * you're doing it terribly wrong. 43 + */ 44 + 45 + /** 46 + * DMA buffer. 47 + */ 48 + struct drm_buf { 49 + int idx; /**< Index into master buflist */ 50 + int total; /**< Buffer size */ 51 + int order; /**< log-base-2(total) */ 52 + int used; /**< Amount of buffer in use (for DMA) */ 53 + unsigned long offset; /**< Byte offset (used internally) */ 54 + void *address; /**< Address of buffer */ 55 + unsigned long bus_address; /**< Bus address of buffer */ 56 + struct drm_buf *next; /**< Kernel-only: used for free list */ 57 + __volatile__ int waiting; /**< On kernel DMA queue */ 58 + __volatile__ int pending; /**< On hardware DMA queue */ 59 + struct drm_file *file_priv; /**< Private of holding file descr */ 60 + int context; /**< Kernel queue for this buffer */ 61 + int while_locked; /**< Dispatch this buffer while locked */ 62 + enum { 63 + DRM_LIST_NONE = 0, 64 + DRM_LIST_FREE = 1, 65 + DRM_LIST_WAIT = 2, 66 + DRM_LIST_PEND = 3, 67 + DRM_LIST_PRIO = 4, 68 + DRM_LIST_RECLAIM = 5 69 + } list; /**< Which list we're on */ 70 + 71 + int dev_priv_size; /**< Size of buffer private storage */ 72 + void *dev_private; /**< Per-buffer private storage */ 73 + }; 74 + 75 + typedef struct drm_dma_handle { 76 + dma_addr_t busaddr; 77 + void *vaddr; 78 + size_t size; 79 + } drm_dma_handle_t; 80 + 81 + /** 82 + * Buffer entry. There is one of this for each buffer size order. 83 + */ 84 + struct drm_buf_entry { 85 + int buf_size; /**< size */ 86 + int buf_count; /**< number of buffers */ 87 + struct drm_buf *buflist; /**< buffer list */ 88 + int seg_count; 89 + int page_order; 90 + struct drm_dma_handle **seglist; 91 + 92 + int low_mark; /**< Low water mark */ 93 + int high_mark; /**< High water mark */ 94 + }; 95 + 96 + /** 97 + * DMA data. 98 + */ 99 + struct drm_device_dma { 100 + 101 + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ 102 + int buf_count; /**< total number of buffers */ 103 + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ 104 + int seg_count; 105 + int page_count; /**< number of pages */ 106 + unsigned long *pagelist; /**< page list */ 107 + unsigned long byte_count; 108 + enum { 109 + _DRM_DMA_USE_AGP = 0x01, 110 + _DRM_DMA_USE_SG = 0x02, 111 + _DRM_DMA_USE_FB = 0x04, 112 + _DRM_DMA_USE_PCI_RO = 0x08 113 + } flags; 114 + 115 + }; 116 + 117 + /** 118 + * Scatter-gather memory. 119 + */ 120 + struct drm_sg_mem { 121 + unsigned long handle; 122 + void *virtual; 123 + int pages; 124 + struct page **pagelist; 125 + dma_addr_t *busaddr; 126 + }; 127 + 128 + /** 129 + * Kernel side of a mapping 130 + */ 131 + struct drm_local_map { 132 + resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ 133 + unsigned long size; /**< Requested physical size (bytes) */ 134 + enum drm_map_type type; /**< Type of memory to map */ 135 + enum drm_map_flags flags; /**< Flags */ 136 + void *handle; /**< User-space: "Handle" to pass to mmap() */ 137 + /**< Kernel-space: kernel-virtual address */ 138 + int mtrr; /**< MTRR slot used */ 139 + }; 140 + 141 + typedef struct drm_local_map drm_local_map_t; 142 + 143 + /** 144 + * Mappings list 145 + */ 146 + struct drm_map_list { 147 + struct list_head head; /**< list head */ 148 + struct drm_hash_item hash; 149 + struct drm_local_map *map; /**< mapping */ 150 + uint64_t user_token; 151 + struct drm_master *master; 152 + }; 153 + 154 + int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, 155 + unsigned int size, enum drm_map_type type, 156 + enum drm_map_flags flags, struct drm_local_map **map_p); 157 + int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); 158 + int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); 159 + struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); 160 + 161 + int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); 162 + int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); 163 + 164 + /** 165 + * Test that the hardware lock is held by the caller, returning otherwise. 166 + * 167 + * \param dev DRM device. 168 + * \param filp file pointer of the caller. 169 + */ 170 + #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ 171 + do { \ 172 + if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ 173 + _file_priv->master->lock.file_priv != _file_priv) { \ 174 + DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ 175 + __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ 176 + _file_priv->master->lock.file_priv, _file_priv); \ 177 + return -EINVAL; \ 178 + } \ 179 + } while (0) 180 + 181 + void drm_legacy_idlelock_take(struct drm_lock_data *lock); 182 + void drm_legacy_idlelock_release(struct drm_lock_data *lock); 183 + 184 + /* drm_pci.c dma alloc wrappers */ 185 + void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); 186 + 187 + /* drm_memory.c */ 188 + void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); 189 + void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); 190 + void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); 191 + 192 + static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, 193 + unsigned int token) 194 + { 195 + struct drm_map_list *_entry; 196 + list_for_each_entry(_entry, &dev->maplist, head) 197 + if (_entry->user_token == token) 198 + return _entry->map; 199 + return NULL; 200 + } 201 + 202 + #endif /* __DRM_DRM_LEGACY_H__ */