Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: android: ion: Remove ion_handle and ion_client

ion_handle was introduced as an abstraction to represent a reference to
a buffer via an ion_client. As frameworks outside of Ion evolved, the dmabuf
emerged as the preferred standard for use in the kernel. This has made
the ion_handle an unnecessary abstraction and prone to race
conditions. ion_client is also now only used internally. We have enough
mechanisms for race conditions and leaks already so just drop ion_handle
and ion_client. This also includes ripping out most of the debugfs
infrastructure since much of that was tied to clients and handles.
The debugfs infrastructure was prone to give confusing data (orphaned
allocations) so it can be replaced with something better if people
actually want it.

Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Laura Abbott and committed by
Greg Kroah-Hartman
15c6098c e3b914bc

+51 -805
+7 -46
drivers/staging/android/ion/ion-ioctl.c
··· 21 21 #include "ion.h" 22 22 23 23 union ion_ioctl_arg { 24 - struct ion_fd_data fd; 25 24 struct ion_allocation_data allocation; 26 - struct ion_handle_data handle; 27 25 struct ion_heap_query query; 28 26 }; 29 27 ··· 46 48 static unsigned int ion_ioctl_dir(unsigned int cmd) 47 49 { 48 50 switch (cmd) { 49 - case ION_IOC_FREE: 50 - return _IOC_WRITE; 51 51 default: 52 52 return _IOC_DIR(cmd); 53 53 } ··· 53 57 54 58 long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 55 59 { 56 - struct ion_client *client = filp->private_data; 57 - struct ion_handle *cleanup_handle = NULL; 58 60 int ret = 0; 59 61 unsigned int dir; 60 62 union ion_ioctl_arg data; ··· 80 86 switch (cmd) { 81 87 case ION_IOC_ALLOC: 82 88 { 83 - struct ion_handle *handle; 89 + int fd; 84 90 85 - handle = ion_alloc(client, data.allocation.len, 91 + fd = ion_alloc(data.allocation.len, 86 92 data.allocation.heap_id_mask, 87 93 data.allocation.flags); 88 - if (IS_ERR(handle)) 89 - return PTR_ERR(handle); 94 + if (fd < 0) 95 + return fd; 90 96 91 - data.allocation.handle = handle->id; 97 + data.allocation.fd = fd; 92 98 93 - cleanup_handle = handle; 94 - break; 95 - } 96 - case ION_IOC_FREE: 97 - { 98 - struct ion_handle *handle; 99 - 100 - mutex_lock(&client->lock); 101 - handle = ion_handle_get_by_id_nolock(client, 102 - data.handle.handle); 103 - if (IS_ERR(handle)) { 104 - mutex_unlock(&client->lock); 105 - return PTR_ERR(handle); 106 - } 107 - ion_free_nolock(client, handle); 108 - ion_handle_put_nolock(handle); 109 - mutex_unlock(&client->lock); 110 - break; 111 - } 112 - case ION_IOC_SHARE: 113 - { 114 - struct ion_handle *handle; 115 - 116 - handle = ion_handle_get_by_id(client, data.handle.handle); 117 - if (IS_ERR(handle)) 118 - return PTR_ERR(handle); 119 - data.fd.fd = ion_share_dma_buf_fd(client, handle); 120 - ion_handle_put(handle); 121 - if (data.fd.fd < 0) 122 - ret = data.fd.fd; 123 99 break; 124 100 } 125 101 case ION_IOC_HEAP_QUERY: 126 - ret = ion_query_heaps(client, &data.query); 102 + ret = ion_query_heaps(&data.query); 127 103 break; 128 104 default: 129 105 return -ENOTTY; 130 106 } 131 107 132 108 if (dir & _IOC_READ) { 133 - if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { 134 - if (cleanup_handle) 135 - ion_free(client, cleanup_handle); 109 + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) 136 110 return -EFAULT; 137 - } 138 111 } 139 112 return ret; 140 113 }
+41 -660
drivers/staging/android/ion/ion.c
··· 90 90 91 91 buffer->heap = heap; 92 92 buffer->flags = flags; 93 - kref_init(&buffer->ref); 94 93 95 94 ret = heap->ops->allocate(heap, buffer, len, flags); 96 95 ··· 139 140 kfree(buffer); 140 141 } 141 142 142 - static void _ion_buffer_destroy(struct kref *kref) 143 + static void _ion_buffer_destroy(struct ion_buffer *buffer) 143 144 { 144 - struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 145 145 struct ion_heap *heap = buffer->heap; 146 146 struct ion_device *dev = buffer->dev; 147 147 ··· 153 155 else 154 156 ion_buffer_destroy(buffer); 155 157 } 156 - 157 - static void ion_buffer_get(struct ion_buffer *buffer) 158 - { 159 - kref_get(&buffer->ref); 160 - } 161 - 162 - static int ion_buffer_put(struct ion_buffer *buffer) 163 - { 164 - return kref_put(&buffer->ref, _ion_buffer_destroy); 165 - } 166 - 167 - static void ion_buffer_add_to_handle(struct ion_buffer *buffer) 168 - { 169 - mutex_lock(&buffer->lock); 170 - buffer->handle_count++; 171 - mutex_unlock(&buffer->lock); 172 - } 173 - 174 - static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) 175 - { 176 - /* 177 - * when a buffer is removed from a handle, if it is not in 178 - * any other handles, copy the taskcomm and the pid of the 179 - * process it's being removed from into the buffer. At this 180 - * point there will be no way to track what processes this buffer is 181 - * being used by, it only exists as a dma_buf file descriptor. 182 - * The taskcomm and pid can provide a debug hint as to where this fd 183 - * is in the system 184 - */ 185 - mutex_lock(&buffer->lock); 186 - buffer->handle_count--; 187 - BUG_ON(buffer->handle_count < 0); 188 - if (!buffer->handle_count) { 189 - struct task_struct *task; 190 - 191 - task = current->group_leader; 192 - get_task_comm(buffer->task_comm, task); 193 - buffer->pid = task_pid_nr(task); 194 - } 195 - mutex_unlock(&buffer->lock); 196 - } 197 - 198 - static struct ion_handle *ion_handle_create(struct ion_client *client, 199 - struct ion_buffer *buffer) 200 - { 201 - struct ion_handle *handle; 202 - 203 - handle = kzalloc(sizeof(*handle), GFP_KERNEL); 204 - if (!handle) 205 - return ERR_PTR(-ENOMEM); 206 - kref_init(&handle->ref); 207 - RB_CLEAR_NODE(&handle->node); 208 - handle->client = client; 209 - ion_buffer_get(buffer); 210 - ion_buffer_add_to_handle(buffer); 211 - handle->buffer = buffer; 212 - 213 - return handle; 214 - } 215 - 216 - static void ion_handle_kmap_put(struct ion_handle *); 217 - 218 - static void ion_handle_destroy(struct kref *kref) 219 - { 220 - struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 221 - struct ion_client *client = handle->client; 222 - struct ion_buffer *buffer = handle->buffer; 223 - 224 - mutex_lock(&buffer->lock); 225 - while (handle->kmap_cnt) 226 - ion_handle_kmap_put(handle); 227 - mutex_unlock(&buffer->lock); 228 - 229 - idr_remove(&client->idr, handle->id); 230 - if (!RB_EMPTY_NODE(&handle->node)) 231 - rb_erase(&handle->node, &client->handles); 232 - 233 - ion_buffer_remove_from_handle(buffer); 234 - ion_buffer_put(buffer); 235 - 236 - kfree(handle); 237 - } 238 - 239 - static void ion_handle_get(struct ion_handle *handle) 240 - { 241 - kref_get(&handle->ref); 242 - } 243 - 244 - int ion_handle_put_nolock(struct ion_handle *handle) 245 - { 246 - return kref_put(&handle->ref, ion_handle_destroy); 247 - } 248 - 249 - int ion_handle_put(struct ion_handle *handle) 250 - { 251 - struct ion_client *client = handle->client; 252 - int ret; 253 - 254 - mutex_lock(&client->lock); 255 - ret = ion_handle_put_nolock(handle); 256 - mutex_unlock(&client->lock); 257 - 258 - return ret; 259 - } 260 - 261 - struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, 262 - int id) 263 - { 264 - struct ion_handle *handle; 265 - 266 - handle = idr_find(&client->idr, id); 267 - if (handle) 268 - ion_handle_get(handle); 269 - 270 - return handle ? handle : ERR_PTR(-EINVAL); 271 - } 272 - 273 - struct ion_handle *ion_handle_get_by_id(struct ion_client *client, 274 - int id) 275 - { 276 - struct ion_handle *handle; 277 - 278 - mutex_lock(&client->lock); 279 - handle = ion_handle_get_by_id_nolock(client, id); 280 - mutex_unlock(&client->lock); 281 - 282 - return handle; 283 - } 284 - 285 - static bool ion_handle_validate(struct ion_client *client, 286 - struct ion_handle *handle) 287 - { 288 - WARN_ON(!mutex_is_locked(&client->lock)); 289 - return idr_find(&client->idr, handle->id) == handle; 290 - } 291 - 292 - static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) 293 - { 294 - int id; 295 - struct rb_node **p = &client->handles.rb_node; 296 - struct rb_node *parent = NULL; 297 - struct ion_handle *entry; 298 - 299 - id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); 300 - if (id < 0) 301 - return id; 302 - 303 - handle->id = id; 304 - 305 - while (*p) { 306 - parent = *p; 307 - entry = rb_entry(parent, struct ion_handle, node); 308 - 309 - if (handle->buffer < entry->buffer) 310 - p = &(*p)->rb_left; 311 - else if (handle->buffer > entry->buffer) 312 - p = &(*p)->rb_right; 313 - else 314 - WARN(1, "%s: buffer already found.", __func__); 315 - } 316 - 317 - rb_link_node(&handle->node, parent, p); 318 - rb_insert_color(&handle->node, &client->handles); 319 - 320 - return 0; 321 - } 322 - 323 - struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 324 - unsigned int heap_id_mask, 325 - unsigned int flags) 326 - { 327 - struct ion_handle *handle; 328 - struct ion_device *dev = client->dev; 329 - struct ion_buffer *buffer = NULL; 330 - struct ion_heap *heap; 331 - int ret; 332 - 333 - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, 334 - len, heap_id_mask, flags); 335 - /* 336 - * traverse the list of heaps available in this system in priority 337 - * order. If the heap type is supported by the client, and matches the 338 - * request of the caller allocate from it. Repeat until allocate has 339 - * succeeded or all heaps have been tried 340 - */ 341 - len = PAGE_ALIGN(len); 342 - 343 - if (!len) 344 - return ERR_PTR(-EINVAL); 345 - 346 - down_read(&dev->lock); 347 - plist_for_each_entry(heap, &dev->heaps, node) { 348 - /* if the caller didn't specify this heap id */ 349 - if (!((1 << heap->id) & heap_id_mask)) 350 - continue; 351 - buffer = ion_buffer_create(heap, dev, len, flags); 352 - if (!IS_ERR(buffer)) 353 - break; 354 - } 355 - up_read(&dev->lock); 356 - 357 - if (buffer == NULL) 358 - return ERR_PTR(-ENODEV); 359 - 360 - if (IS_ERR(buffer)) 361 - return ERR_CAST(buffer); 362 - 363 - handle = ion_handle_create(client, buffer); 364 - 365 - /* 366 - * ion_buffer_create will create a buffer with a ref_cnt of 1, 367 - * and ion_handle_create will take a second reference, drop one here 368 - */ 369 - ion_buffer_put(buffer); 370 - 371 - if (IS_ERR(handle)) 372 - return handle; 373 - 374 - mutex_lock(&client->lock); 375 - ret = ion_handle_add(client, handle); 376 - mutex_unlock(&client->lock); 377 - if (ret) { 378 - ion_handle_put(handle); 379 - handle = ERR_PTR(ret); 380 - } 381 - 382 - return handle; 383 - } 384 - EXPORT_SYMBOL(ion_alloc); 385 - 386 - void ion_free_nolock(struct ion_client *client, 387 - struct ion_handle *handle) 388 - { 389 - if (!ion_handle_validate(client, handle)) { 390 - WARN(1, "%s: invalid handle passed to free.\n", __func__); 391 - return; 392 - } 393 - ion_handle_put_nolock(handle); 394 - } 395 - 396 - void ion_free(struct ion_client *client, struct ion_handle *handle) 397 - { 398 - BUG_ON(client != handle->client); 399 - 400 - mutex_lock(&client->lock); 401 - ion_free_nolock(client, handle); 402 - mutex_unlock(&client->lock); 403 - } 404 - EXPORT_SYMBOL(ion_free); 405 158 406 159 static void *ion_buffer_kmap_get(struct ion_buffer *buffer) 407 160 { ··· 181 432 buffer->vaddr = NULL; 182 433 } 183 434 } 184 - 185 - static void ion_handle_kmap_put(struct ion_handle *handle) 186 - { 187 - struct ion_buffer *buffer = handle->buffer; 188 - 189 - if (!handle->kmap_cnt) { 190 - WARN(1, "%s: Double unmap detected! bailing...\n", __func__); 191 - return; 192 - } 193 - handle->kmap_cnt--; 194 - if (!handle->kmap_cnt) 195 - ion_buffer_kmap_put(buffer); 196 - } 197 - 198 - static struct mutex debugfs_mutex; 199 - static struct rb_root *ion_root_client; 200 - static int is_client_alive(struct ion_client *client) 201 - { 202 - struct rb_node *node; 203 - struct ion_client *tmp; 204 - struct ion_device *dev; 205 - 206 - node = ion_root_client->rb_node; 207 - dev = container_of(ion_root_client, struct ion_device, clients); 208 - 209 - down_read(&dev->lock); 210 - while (node) { 211 - tmp = rb_entry(node, struct ion_client, node); 212 - if (client < tmp) { 213 - node = node->rb_left; 214 - } else if (client > tmp) { 215 - node = node->rb_right; 216 - } else { 217 - up_read(&dev->lock); 218 - return 1; 219 - } 220 - } 221 - 222 - up_read(&dev->lock); 223 - return 0; 224 - } 225 - 226 - static int ion_debug_client_show(struct seq_file *s, void *unused) 227 - { 228 - struct ion_client *client = s->private; 229 - struct rb_node *n; 230 - size_t sizes[ION_NUM_HEAP_IDS] = {0}; 231 - const char *names[ION_NUM_HEAP_IDS] = {NULL}; 232 - int i; 233 - 234 - mutex_lock(&debugfs_mutex); 235 - if (!is_client_alive(client)) { 236 - seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", 237 - client); 238 - mutex_unlock(&debugfs_mutex); 239 - return 0; 240 - } 241 - 242 - mutex_lock(&client->lock); 243 - for (n = rb_first(&client->handles); n; n = rb_next(n)) { 244 - struct ion_handle *handle = rb_entry(n, struct ion_handle, 245 - node); 246 - unsigned int id = handle->buffer->heap->id; 247 - 248 - if (!names[id]) 249 - names[id] = handle->buffer->heap->name; 250 - sizes[id] += handle->buffer->size; 251 - } 252 - mutex_unlock(&client->lock); 253 - mutex_unlock(&debugfs_mutex); 254 - 255 - seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 256 - for (i = 0; i < ION_NUM_HEAP_IDS; i++) { 257 - if (!names[i]) 258 - continue; 259 - seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); 260 - } 261 - return 0; 262 - } 263 - 264 - static int ion_debug_client_open(struct inode *inode, struct file *file) 265 - { 266 - return single_open(file, ion_debug_client_show, inode->i_private); 267 - } 268 - 269 - static const struct file_operations debug_client_fops = { 270 - .open = ion_debug_client_open, 271 - .read = seq_read, 272 - .llseek = seq_lseek, 273 - .release = single_release, 274 - }; 275 - 276 - static int ion_get_client_serial(const struct rb_root *root, 277 - const unsigned char *name) 278 - { 279 - int serial = -1; 280 - struct rb_node *node; 281 - 282 - for (node = rb_first(root); node; node = rb_next(node)) { 283 - struct ion_client *client = rb_entry(node, struct ion_client, 284 - node); 285 - 286 - if (strcmp(client->name, name)) 287 - continue; 288 - serial = max(serial, client->display_serial); 289 - } 290 - return serial + 1; 291 - } 292 - 293 - struct ion_client *ion_client_create(struct ion_device *dev, 294 - const char *name) 295 - { 296 - struct ion_client *client; 297 - struct task_struct *task; 298 - struct rb_node **p; 299 - struct rb_node *parent = NULL; 300 - struct ion_client *entry; 301 - pid_t pid; 302 - 303 - if (!name) { 304 - pr_err("%s: Name cannot be null\n", __func__); 305 - return ERR_PTR(-EINVAL); 306 - } 307 - 308 - get_task_struct(current->group_leader); 309 - task_lock(current->group_leader); 310 - pid = task_pid_nr(current->group_leader); 311 - /* 312 - * don't bother to store task struct for kernel threads, 313 - * they can't be killed anyway 314 - */ 315 - if (current->group_leader->flags & PF_KTHREAD) { 316 - put_task_struct(current->group_leader); 317 - task = NULL; 318 - } else { 319 - task = current->group_leader; 320 - } 321 - task_unlock(current->group_leader); 322 - 323 - client = kzalloc(sizeof(*client), GFP_KERNEL); 324 - if (!client) 325 - goto err_put_task_struct; 326 - 327 - client->dev = dev; 328 - client->handles = RB_ROOT; 329 - idr_init(&client->idr); 330 - mutex_init(&client->lock); 331 - client->task = task; 332 - client->pid = pid; 333 - client->name = kstrdup(name, GFP_KERNEL); 334 - if (!client->name) 335 - goto err_free_client; 336 - 337 - down_write(&dev->lock); 338 - client->display_serial = ion_get_client_serial(&dev->clients, name); 339 - client->display_name = kasprintf( 340 - GFP_KERNEL, "%s-%d", name, client->display_serial); 341 - if (!client->display_name) { 342 - up_write(&dev->lock); 343 - goto err_free_client_name; 344 - } 345 - p = &dev->clients.rb_node; 346 - while (*p) { 347 - parent = *p; 348 - entry = rb_entry(parent, struct ion_client, node); 349 - 350 - if (client < entry) 351 - p = &(*p)->rb_left; 352 - else if (client > entry) 353 - p = &(*p)->rb_right; 354 - } 355 - rb_link_node(&client->node, parent, p); 356 - rb_insert_color(&client->node, &dev->clients); 357 - 358 - client->debug_root = debugfs_create_file(client->display_name, 0664, 359 - dev->clients_debug_root, 360 - client, &debug_client_fops); 361 - if (!client->debug_root) { 362 - char buf[256], *path; 363 - 364 - path = dentry_path(dev->clients_debug_root, buf, 256); 365 - pr_err("Failed to create client debugfs at %s/%s\n", 366 - path, client->display_name); 367 - } 368 - 369 - up_write(&dev->lock); 370 - 371 - return client; 372 - 373 - err_free_client_name: 374 - kfree(client->name); 375 - err_free_client: 376 - kfree(client); 377 - err_put_task_struct: 378 - if (task) 379 - put_task_struct(current->group_leader); 380 - return ERR_PTR(-ENOMEM); 381 - } 382 - EXPORT_SYMBOL(ion_client_create); 383 - 384 - void ion_client_destroy(struct ion_client *client) 385 - { 386 - struct ion_device *dev = client->dev; 387 - struct rb_node *n; 388 - 389 - pr_debug("%s: %d\n", __func__, __LINE__); 390 - mutex_lock(&debugfs_mutex); 391 - while ((n = rb_first(&client->handles))) { 392 - struct ion_handle *handle = rb_entry(n, struct ion_handle, 393 - node); 394 - ion_handle_destroy(&handle->ref); 395 - } 396 - 397 - idr_destroy(&client->idr); 398 - 399 - down_write(&dev->lock); 400 - if (client->task) 401 - put_task_struct(client->task); 402 - rb_erase(&client->node, &dev->clients); 403 - debugfs_remove_recursive(client->debug_root); 404 - up_write(&dev->lock); 405 - 406 - kfree(client->display_name); 407 - kfree(client->name); 408 - kfree(client); 409 - mutex_unlock(&debugfs_mutex); 410 - } 411 - EXPORT_SYMBOL(ion_client_destroy); 412 435 413 436 static struct sg_table *dup_sg_table(struct sg_table *table) 414 437 { ··· 323 802 { 324 803 struct ion_buffer *buffer = dmabuf->priv; 325 804 326 - ion_buffer_put(buffer); 805 + _ion_buffer_destroy(buffer); 327 806 } 328 807 329 808 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) ··· 402 881 .kunmap = ion_dma_buf_kunmap, 403 882 }; 404 883 405 - struct dma_buf *ion_share_dma_buf(struct ion_client *client, 406 - struct ion_handle *handle) 884 + int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) 407 885 { 886 + struct ion_device *dev = internal_dev; 887 + struct ion_buffer *buffer = NULL; 888 + struct ion_heap *heap; 408 889 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 409 - struct ion_buffer *buffer; 890 + int fd; 410 891 struct dma_buf *dmabuf; 411 - bool valid_handle; 412 892 413 - mutex_lock(&client->lock); 414 - valid_handle = ion_handle_validate(client, handle); 415 - if (!valid_handle) { 416 - WARN(1, "%s: invalid handle passed to share.\n", __func__); 417 - mutex_unlock(&client->lock); 418 - return ERR_PTR(-EINVAL); 893 + pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, 894 + len, heap_id_mask, flags); 895 + /* 896 + * traverse the list of heaps available in this system in priority 897 + * order. If the heap type is supported by the client, and matches the 898 + * request of the caller allocate from it. Repeat until allocate has 899 + * succeeded or all heaps have been tried 900 + */ 901 + len = PAGE_ALIGN(len); 902 + 903 + if (!len) 904 + return -EINVAL; 905 + 906 + down_read(&dev->lock); 907 + plist_for_each_entry(heap, &dev->heaps, node) { 908 + /* if the caller didn't specify this heap id */ 909 + if (!((1 << heap->id) & heap_id_mask)) 910 + continue; 911 + buffer = ion_buffer_create(heap, dev, len, flags); 912 + if (!IS_ERR(buffer)) 913 + break; 419 914 } 420 - buffer = handle->buffer; 421 - ion_buffer_get(buffer); 422 - mutex_unlock(&client->lock); 915 + up_read(&dev->lock); 916 + 917 + if (buffer == NULL) 918 + return -ENODEV; 919 + 920 + if (IS_ERR(buffer)) 921 + return PTR_ERR(buffer); 423 922 424 923 exp_info.ops = &dma_buf_ops; 425 924 exp_info.size = buffer->size; ··· 448 907 449 908 dmabuf = dma_buf_export(&exp_info); 450 909 if (IS_ERR(dmabuf)) { 451 - ion_buffer_put(buffer); 452 - return dmabuf; 453 - } 454 - 455 - return dmabuf; 456 - } 457 - EXPORT_SYMBOL(ion_share_dma_buf); 458 - 459 - int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) 460 - { 461 - struct dma_buf *dmabuf; 462 - int fd; 463 - 464 - dmabuf = ion_share_dma_buf(client, handle); 465 - if (IS_ERR(dmabuf)) 910 + _ion_buffer_destroy(buffer); 466 911 return PTR_ERR(dmabuf); 912 + } 467 913 468 914 fd = dma_buf_fd(dmabuf, O_CLOEXEC); 469 915 if (fd < 0) ··· 458 930 459 931 return fd; 460 932 } 461 - EXPORT_SYMBOL(ion_share_dma_buf_fd); 462 933 463 - int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) 934 + int ion_query_heaps(struct ion_heap_query *query) 464 935 { 465 - struct ion_device *dev = client->dev; 936 + struct ion_device *dev = internal_dev; 466 937 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); 467 938 int ret = -EINVAL, cnt = 0, max_cnt; 468 939 struct ion_heap *heap; ··· 503 976 return ret; 504 977 } 505 978 506 - static int ion_release(struct inode *inode, struct file *file) 507 - { 508 - struct ion_client *client = file->private_data; 509 - 510 - pr_debug("%s: %d\n", __func__, __LINE__); 511 - ion_client_destroy(client); 512 - return 0; 513 - } 514 - 515 - static int ion_open(struct inode *inode, struct file *file) 516 - { 517 - struct miscdevice *miscdev = file->private_data; 518 - struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 519 - struct ion_client *client; 520 - char debug_name[64]; 521 - 522 - pr_debug("%s: %d\n", __func__, __LINE__); 523 - snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); 524 - client = ion_client_create(dev, debug_name); 525 - if (IS_ERR(client)) 526 - return PTR_ERR(client); 527 - file->private_data = client; 528 - 529 - return 0; 530 - } 531 - 532 979 static const struct file_operations ion_fops = { 533 980 .owner = THIS_MODULE, 534 - .open = ion_open, 535 - .release = ion_release, 536 981 .unlocked_ioctl = ion_ioctl, 537 982 #ifdef CONFIG_COMPAT 538 983 .compat_ioctl = ion_ioctl, 539 984 #endif 540 - }; 541 - 542 - static size_t ion_debug_heap_total(struct ion_client *client, 543 - unsigned int id) 544 - { 545 - size_t size = 0; 546 - struct rb_node *n; 547 - 548 - mutex_lock(&client->lock); 549 - for (n = rb_first(&client->handles); n; n = rb_next(n)) { 550 - struct ion_handle *handle = rb_entry(n, 551 - struct ion_handle, 552 - node); 553 - if (handle->buffer->heap->id == id) 554 - size += handle->buffer->size; 555 - } 556 - mutex_unlock(&client->lock); 557 - return size; 558 - } 559 - 560 - static int ion_debug_heap_show(struct seq_file *s, void *unused) 561 - { 562 - struct ion_heap *heap = s->private; 563 - struct ion_device *dev = heap->dev; 564 - struct rb_node *n; 565 - size_t total_size = 0; 566 - size_t total_orphaned_size = 0; 567 - 568 - seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); 569 - seq_puts(s, "----------------------------------------------------\n"); 570 - 571 - mutex_lock(&debugfs_mutex); 572 - for (n = rb_first(&dev->clients); n; n = rb_next(n)) { 573 - struct ion_client *client = rb_entry(n, struct ion_client, 574 - node); 575 - size_t size = ion_debug_heap_total(client, heap->id); 576 - 577 - if (!size) 578 - continue; 579 - if (client->task) { 580 - char task_comm[TASK_COMM_LEN]; 581 - 582 - get_task_comm(task_comm, client->task); 583 - seq_printf(s, "%16s %16u %16zu\n", task_comm, 584 - client->pid, size); 585 - } else { 586 - seq_printf(s, "%16s %16u %16zu\n", client->name, 587 - client->pid, size); 588 - } 589 - } 590 - mutex_unlock(&debugfs_mutex); 591 - 592 - seq_puts(s, "----------------------------------------------------\n"); 593 - seq_puts(s, "orphaned allocations (info is from last known client):\n"); 594 - mutex_lock(&dev->buffer_lock); 595 - for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { 596 - struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, 597 - node); 598 - if (buffer->heap->id != heap->id) 599 - continue; 600 - total_size += buffer->size; 601 - if (!buffer->handle_count) { 602 - seq_printf(s, "%16s %16u %16zu %d %d\n", 603 - buffer->task_comm, buffer->pid, 604 - buffer->size, buffer->kmap_cnt, 605 - kref_read(&buffer->ref)); 606 - total_orphaned_size += buffer->size; 607 - } 608 - } 609 - mutex_unlock(&dev->buffer_lock); 610 - seq_puts(s, "----------------------------------------------------\n"); 611 - seq_printf(s, "%16s %16zu\n", "total orphaned", 612 - total_orphaned_size); 613 - seq_printf(s, "%16s %16zu\n", "total ", total_size); 614 - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) 615 - seq_printf(s, "%16s %16zu\n", "deferred free", 616 - heap->free_list_size); 617 - seq_puts(s, "----------------------------------------------------\n"); 618 - 619 - if (heap->debug_show) 620 - heap->debug_show(heap, s, unused); 621 - 622 - return 0; 623 - } 624 - 625 - static int ion_debug_heap_open(struct inode *inode, struct file *file) 626 - { 627 - return single_open(file, ion_debug_heap_show, inode->i_private); 628 - } 629 - 630 - static const struct file_operations debug_heap_fops = { 631 - .open = ion_debug_heap_open, 632 - .read = seq_read, 633 - .llseek = seq_lseek, 634 - .release = single_release, 635 985 }; 636 986 637 987 static int debug_shrink_set(void *data, u64 val) ··· 573 1169 */ 574 1170 plist_node_init(&heap->node, -heap->id); 575 1171 plist_add(&heap->node, &dev->heaps); 576 - debug_file = debugfs_create_file(heap->name, 0664, 577 - dev->heaps_debug_root, heap, 578 - &debug_heap_fops); 579 - 580 - if (!debug_file) { 581 - char buf[256], *path; 582 - 583 - path = dentry_path(dev->heaps_debug_root, buf, 256); 584 - pr_err("Failed to create heap debugfs at %s/%s\n", 585 - path, heap->name); 586 - } 587 1172 588 1173 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { 589 1174 char debug_name[64]; 590 1175 591 1176 snprintf(debug_name, 64, "%s_shrink", heap->name); 592 1177 debug_file = debugfs_create_file( 593 - debug_name, 0644, dev->heaps_debug_root, heap, 1178 + debug_name, 0644, dev->debug_root, heap, 594 1179 &debug_shrink_fops); 595 1180 if (!debug_file) { 596 1181 char buf[256], *path; 597 1182 598 - path = dentry_path(dev->heaps_debug_root, buf, 256); 1183 + path = dentry_path(dev->debug_root, buf, 256); 599 1184 pr_err("Failed to create heap shrinker debugfs at %s/%s\n", 600 1185 path, debug_name); 601 1186 } ··· 620 1227 pr_err("ion: failed to create debugfs root directory.\n"); 621 1228 goto debugfs_done; 622 1229 } 623 - idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); 624 - if (!idev->heaps_debug_root) { 625 - pr_err("ion: failed to create debugfs heaps directory.\n"); 626 - goto debugfs_done; 627 - } 628 - idev->clients_debug_root = debugfs_create_dir("clients", 629 - idev->debug_root); 630 - if (!idev->clients_debug_root) 631 - pr_err("ion: failed to create debugfs clients directory.\n"); 632 1230 633 1231 debugfs_done: 634 1232 idev->buffers = RB_ROOT; 635 1233 mutex_init(&idev->buffer_lock); 636 1234 init_rwsem(&idev->lock); 637 1235 plist_head_init(&idev->heaps); 638 - idev->clients = RB_ROOT; 639 - ion_root_client = &idev->clients; 640 - mutex_init(&debugfs_mutex); 641 1236 internal_dev = idev; 642 1237 return 0; 643 1238 }
+2 -75
drivers/staging/android/ion/ion.h
··· 78 78 * handle, used for debugging 79 79 */ 80 80 struct ion_buffer { 81 - struct kref ref; 82 81 union { 83 82 struct rb_node node; 84 83 struct list_head list; ··· 108 109 * @buffers: an rb tree of all the existing buffers 109 110 * @buffer_lock: lock protecting the tree of buffers 110 111 * @lock: rwsem protecting the tree of heaps and clients 111 - * @heaps: list of all the heaps in the system 112 - * @user_clients: list of all the clients created from userspace 113 112 */ 114 113 struct ion_device { 115 114 struct miscdevice dev; ··· 115 118 struct mutex buffer_lock; 116 119 struct rw_semaphore lock; 117 120 struct plist_head heaps; 118 - struct rb_root clients; 119 121 struct dentry *debug_root; 120 - struct dentry *heaps_debug_root; 121 - struct dentry *clients_debug_root; 122 122 int heap_cnt; 123 - }; 124 - 125 - /** 126 - * struct ion_client - a process/hw block local address space 127 - * @node: node in the tree of all clients 128 - * @dev: backpointer to ion device 129 - * @handles: an rb tree of all the handles in this client 130 - * @idr: an idr space for allocating handle ids 131 - * @lock: lock protecting the tree of handles 132 - * @name: used for debugging 133 - * @display_name: used for debugging (unique version of @name) 134 - * @display_serial: used for debugging (to make display_name unique) 135 - * @task: used for debugging 136 - * 137 - * A client represents a list of buffers this client may access. 138 - * The mutex stored here is used to protect both handles tree 139 - * as well as the handles themselves, and should be held while modifying either. 140 - */ 141 - struct ion_client { 142 - struct rb_node node; 143 - struct ion_device *dev; 144 - struct rb_root handles; 145 - struct idr idr; 146 - struct mutex lock; 147 - const char *name; 148 - char *display_name; 149 - int display_serial; 150 - struct task_struct *task; 151 - pid_t pid; 152 - struct dentry *debug_root; 153 - }; 154 - 155 - /** 156 - * ion_handle - a client local reference to a buffer 157 - * @ref: reference count 158 - * @client: back pointer to the client the buffer resides in 159 - * @buffer: pointer to the buffer 160 - * @node: node in the client's handle rbtree 161 - * @kmap_cnt: count of times this client has mapped to kernel 162 - * @id: client-unique id allocated by client->idr 163 - * 164 - * Modifications to node, map_cnt or mapping should be protected by the 165 - * lock in the client. Other fields are never changed after initialization. 166 - */ 167 - struct ion_handle { 168 - struct kref ref; 169 - struct ion_client *client; 170 - struct ion_buffer *buffer; 171 - struct rb_node node; 172 - unsigned int kmap_cnt; 173 - int id; 174 123 }; 175 124 176 125 /** ··· 239 296 int ion_heap_buffer_zero(struct ion_buffer *buffer); 240 297 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); 241 298 242 - struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 299 + int ion_alloc(size_t len, 243 300 unsigned int heap_id_mask, 244 301 unsigned int flags); 245 - 246 - void ion_free(struct ion_client *client, struct ion_handle *handle); 247 - 248 - int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); 249 302 250 303 /** 251 304 * ion_heap_init_shrinker ··· 370 431 371 432 long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 372 433 373 - struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, 374 - int id); 375 - 376 - void ion_free_nolock(struct ion_client *client, struct ion_handle *handle); 377 - 378 - int ion_handle_put_nolock(struct ion_handle *handle); 379 - 380 - struct ion_handle *ion_handle_get_by_id(struct ion_client *client, 381 - int id); 382 - 383 - int ion_handle_put(struct ion_handle *handle); 384 - 385 - int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query); 434 + int ion_query_heaps(struct ion_heap_query *query); 386 435 387 436 #endif /* _ION_H */
+1 -24
drivers/staging/android/uapi/ion.h
··· 85 85 __u64 len; 86 86 __u32 heap_id_mask; 87 87 __u32 flags; 88 - __u32 handle; 89 - __u32 unused; 90 - }; 91 - 92 - /** 93 - * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair 94 - * @handle: a handle 95 - * @fd: a file descriptor representing that handle 96 - * 97 - * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with 98 - * the handle returned from ion alloc, and the kernel returns the file 99 - * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace 100 - * provides the file descriptor and the kernel returns the handle. 101 - */ 102 - struct ion_fd_data { 103 - __u32 handle; 104 88 __u32 fd; 105 - }; 106 - 107 - /** 108 - * struct ion_handle_data - a handle passed to/from the kernel 109 - * @handle: a handle 110 - */ 111 - struct ion_handle_data { 112 - __u32 handle; 89 + __u32 unused; 113 90 }; 114 91 115 92 #define MAX_HEAP_NAME 32