Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gpu: ion: Add ION Memory Manager

Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
[jstultz: Squished in Colin Cross' move to staging change,
also disables ION from the build, as it won't compile till
the end of the patchset]
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Rebecca Schultz Zavin and committed by
Greg Kroah-Hartman
c30707be 530376bf

+2378
+2
drivers/staging/android/Kconfig
··· 100 100 *WARNING* improper use of this can result in deadlocking kernel 101 101 drivers from userspace. 102 102 103 + source "drivers/staging/android/ion/Kconfig" 104 + 103 105 endif # if ANDROID 104 106 105 107 endmenu
+3
drivers/staging/android/Makefile
··· 1 1 ccflags-y += -I$(src) # needed for trace events 2 2 3 + # ION doesn't build just yet, so disable it from the build 4 + #obj-y += ion/ 5 + 3 6 obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o 4 7 obj-$(CONFIG_ASHMEM) += ashmem.o 5 8 obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+12
drivers/staging/android/ion/Kconfig
··· 1 + menuconfig ION 2 + tristate "Ion Memory Manager" 3 + select GENERIC_ALLOCATOR 4 + help 5 + Chose this option to enable the ION Memory Manager. 6 + 7 + config ION_TEGRA 8 + tristate "Ion for Tegra" 9 + depends on ARCH_TEGRA && ION 10 + help 11 + Choose this option if you wish to use ion on an nVidia Tegra. 12 +
+2
drivers/staging/android/ion/Makefile
··· 1 + obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o 2 + obj-$(CONFIG_ION_TEGRA) += tegra/
+1187
drivers/staging/android/ion/ion.c
··· 1 + /* 2 + * drivers/staging/android/ion/ion.c 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #include <linux/device.h> 18 + #include <linux/file.h> 19 + #include <linux/fs.h> 20 + #include <linux/anon_inodes.h> 21 + #include <linux/list.h> 22 + #include <linux/miscdevice.h> 23 + #include <linux/export.h> 24 + #include <linux/mm.h> 25 + #include <linux/mm_types.h> 26 + #include <linux/rbtree.h> 27 + #include <linux/sched.h> 28 + #include <linux/slab.h> 29 + #include <linux/seq_file.h> 30 + #include <linux/uaccess.h> 31 + #include <linux/debugfs.h> 32 + 33 + #include "ion.h" 34 + #include "ion_priv.h" 35 + #define DEBUG 36 + 37 + /** 38 + * struct ion_device - the metadata of the ion device node 39 + * @dev: the actual misc device 40 + * @buffers: an rb tree of all the existing buffers 41 + * @lock: lock protecting the buffers & heaps trees 42 + * @heaps: list of all the heaps in the system 43 + * @user_clients: list of all the clients created from userspace 44 + */ 45 + struct ion_device { 46 + struct miscdevice dev; 47 + struct rb_root buffers; 48 + struct mutex lock; 49 + struct rb_root heaps; 50 + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, 51 + unsigned long arg); 52 + struct rb_root user_clients; 53 + struct rb_root kernel_clients; 54 + struct dentry *debug_root; 55 + }; 56 + 57 + /** 58 + * struct ion_client - a process/hw block local address space 59 + * @ref: for reference counting the client 60 + * @node: node in the tree of all clients 61 + * @dev: backpointer to ion device 62 + * @handles: an rb tree of all the handles in this client 63 + * @lock: lock protecting the tree of handles 64 + * @heap_mask: mask of all supported heaps 65 + * @name: used for debugging 66 + * @task: used for debugging 67 + * 68 + * A client represents a list of buffers this client may access. 69 + * The mutex stored here is used to protect both handles tree 70 + * as well as the handles themselves, and should be held while modifying either. 71 + */ 72 + struct ion_client { 73 + struct kref ref; 74 + struct rb_node node; 75 + struct ion_device *dev; 76 + struct rb_root handles; 77 + struct mutex lock; 78 + unsigned int heap_mask; 79 + const char *name; 80 + struct task_struct *task; 81 + pid_t pid; 82 + struct dentry *debug_root; 83 + }; 84 + 85 + /** 86 + * ion_handle - a client local reference to a buffer 87 + * @ref: reference count 88 + * @client: back pointer to the client the buffer resides in 89 + * @buffer: pointer to the buffer 90 + * @node: node in the client's handle rbtree 91 + * @kmap_cnt: count of times this client has mapped to kernel 92 + * @dmap_cnt: count of times this client has mapped for dma 93 + * @usermap_cnt: count of times this client has mapped for userspace 94 + * 95 + * Modifications to node, map_cnt or mapping should be protected by the 96 + * lock in the client. Other fields are never changed after initialization. 97 + */ 98 + struct ion_handle { 99 + struct kref ref; 100 + struct ion_client *client; 101 + struct ion_buffer *buffer; 102 + struct rb_node node; 103 + unsigned int kmap_cnt; 104 + unsigned int dmap_cnt; 105 + unsigned int usermap_cnt; 106 + }; 107 + 108 + /* this function should only be called while dev->lock is held */ 109 + static void ion_buffer_add(struct ion_device *dev, 110 + struct ion_buffer *buffer) 111 + { 112 + struct rb_node **p = &dev->buffers.rb_node; 113 + struct rb_node *parent = NULL; 114 + struct ion_buffer *entry; 115 + 116 + while (*p) { 117 + parent = *p; 118 + entry = rb_entry(parent, struct ion_buffer, node); 119 + 120 + if (buffer < entry) { 121 + p = &(*p)->rb_left; 122 + } else if (buffer > entry) { 123 + p = &(*p)->rb_right; 124 + } else { 125 + pr_err("%s: buffer already found.", __func__); 126 + BUG(); 127 + } 128 + } 129 + 130 + rb_link_node(&buffer->node, parent, p); 131 + rb_insert_color(&buffer->node, &dev->buffers); 132 + } 133 + 134 + /* this function should only be called while dev->lock is held */ 135 + static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, 136 + struct ion_device *dev, 137 + unsigned long len, 138 + unsigned long align, 139 + unsigned long flags) 140 + { 141 + struct ion_buffer *buffer; 142 + int ret; 143 + 144 + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); 145 + if (!buffer) 146 + return ERR_PTR(-ENOMEM); 147 + 148 + buffer->heap = heap; 149 + kref_init(&buffer->ref); 150 + 151 + ret = heap->ops->allocate(heap, buffer, len, align, flags); 152 + if (ret) { 153 + kfree(buffer); 154 + return ERR_PTR(ret); 155 + } 156 + buffer->dev = dev; 157 + buffer->size = len; 158 + mutex_init(&buffer->lock); 159 + ion_buffer_add(dev, buffer); 160 + return buffer; 161 + } 162 + 163 + static void ion_buffer_destroy(struct kref *kref) 164 + { 165 + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); 166 + struct ion_device *dev = buffer->dev; 167 + 168 + buffer->heap->ops->free(buffer); 169 + mutex_lock(&dev->lock); 170 + rb_erase(&buffer->node, &dev->buffers); 171 + mutex_unlock(&dev->lock); 172 + kfree(buffer); 173 + } 174 + 175 + static void ion_buffer_get(struct ion_buffer *buffer) 176 + { 177 + kref_get(&buffer->ref); 178 + } 179 + 180 + static int ion_buffer_put(struct ion_buffer *buffer) 181 + { 182 + return kref_put(&buffer->ref, ion_buffer_destroy); 183 + } 184 + 185 + static struct ion_handle *ion_handle_create(struct ion_client *client, 186 + struct ion_buffer *buffer) 187 + { 188 + struct ion_handle *handle; 189 + 190 + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); 191 + if (!handle) 192 + return ERR_PTR(-ENOMEM); 193 + kref_init(&handle->ref); 194 + RB_CLEAR_NODE(&handle->node); 195 + handle->client = client; 196 + ion_buffer_get(buffer); 197 + handle->buffer = buffer; 198 + 199 + return handle; 200 + } 201 + 202 + static void ion_handle_destroy(struct kref *kref) 203 + { 204 + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); 205 + /* XXX Can a handle be destroyed while it's map count is non-zero?: 206 + if (handle->map_cnt) unmap 207 + */ 208 + ion_buffer_put(handle->buffer); 209 + mutex_lock(&handle->client->lock); 210 + if (!RB_EMPTY_NODE(&handle->node)) 211 + rb_erase(&handle->node, &handle->client->handles); 212 + mutex_unlock(&handle->client->lock); 213 + kfree(handle); 214 + } 215 + 216 + struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) 217 + { 218 + return handle->buffer; 219 + } 220 + 221 + static void ion_handle_get(struct ion_handle *handle) 222 + { 223 + kref_get(&handle->ref); 224 + } 225 + 226 + static int ion_handle_put(struct ion_handle *handle) 227 + { 228 + return kref_put(&handle->ref, ion_handle_destroy); 229 + } 230 + 231 + static struct ion_handle *ion_handle_lookup(struct ion_client *client, 232 + struct ion_buffer *buffer) 233 + { 234 + struct rb_node *n; 235 + 236 + for (n = rb_first(&client->handles); n; n = rb_next(n)) { 237 + struct ion_handle *handle = rb_entry(n, struct ion_handle, 238 + node); 239 + if (handle->buffer == buffer) 240 + return handle; 241 + } 242 + return NULL; 243 + } 244 + 245 + static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) 246 + { 247 + struct rb_node *n = client->handles.rb_node; 248 + 249 + while (n) { 250 + struct ion_handle *handle_node = rb_entry(n, struct ion_handle, 251 + node); 252 + if (handle < handle_node) 253 + n = n->rb_left; 254 + else if (handle > handle_node) 255 + n = n->rb_right; 256 + else 257 + return true; 258 + } 259 + return false; 260 + } 261 + 262 + static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) 263 + { 264 + struct rb_node **p = &client->handles.rb_node; 265 + struct rb_node *parent = NULL; 266 + struct ion_handle *entry; 267 + 268 + while (*p) { 269 + parent = *p; 270 + entry = rb_entry(parent, struct ion_handle, node); 271 + 272 + if (handle < entry) 273 + p = &(*p)->rb_left; 274 + else if (handle > entry) 275 + p = &(*p)->rb_right; 276 + else 277 + WARN(1, "%s: buffer already found.", __func__); 278 + } 279 + 280 + rb_link_node(&handle->node, parent, p); 281 + rb_insert_color(&handle->node, &client->handles); 282 + } 283 + 284 + struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 285 + size_t align, unsigned int flags) 286 + { 287 + struct rb_node *n; 288 + struct ion_handle *handle; 289 + struct ion_device *dev = client->dev; 290 + struct ion_buffer *buffer = NULL; 291 + 292 + /* 293 + * traverse the list of heaps available in this system in priority 294 + * order. If the heap type is supported by the client, and matches the 295 + * request of the caller allocate from it. Repeat until allocate has 296 + * succeeded or all heaps have been tried 297 + */ 298 + mutex_lock(&dev->lock); 299 + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { 300 + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); 301 + /* if the client doesn't support this heap type */ 302 + if (!((1 << heap->type) & client->heap_mask)) 303 + continue; 304 + /* if the caller didn't specify this heap type */ 305 + if (!((1 << heap->id) & flags)) 306 + continue; 307 + buffer = ion_buffer_create(heap, dev, len, align, flags); 308 + if (!IS_ERR_OR_NULL(buffer)) 309 + break; 310 + } 311 + mutex_unlock(&dev->lock); 312 + 313 + if (IS_ERR_OR_NULL(buffer)) 314 + return ERR_PTR(PTR_ERR(buffer)); 315 + 316 + handle = ion_handle_create(client, buffer); 317 + 318 + if (IS_ERR_OR_NULL(handle)) 319 + goto end; 320 + 321 + /* 322 + * ion_buffer_create will create a buffer with a ref_cnt of 1, 323 + * and ion_handle_create will take a second reference, drop one here 324 + */ 325 + ion_buffer_put(buffer); 326 + 327 + mutex_lock(&client->lock); 328 + ion_handle_add(client, handle); 329 + mutex_unlock(&client->lock); 330 + return handle; 331 + 332 + end: 333 + ion_buffer_put(buffer); 334 + return handle; 335 + } 336 + 337 + void ion_free(struct ion_client *client, struct ion_handle *handle) 338 + { 339 + bool valid_handle; 340 + 341 + BUG_ON(client != handle->client); 342 + 343 + mutex_lock(&client->lock); 344 + valid_handle = ion_handle_validate(client, handle); 345 + mutex_unlock(&client->lock); 346 + 347 + if (!valid_handle) { 348 + WARN("%s: invalid handle passed to free.\n", __func__); 349 + return; 350 + } 351 + ion_handle_put(handle); 352 + } 353 + 354 + static void ion_client_get(struct ion_client *client); 355 + static int ion_client_put(struct ion_client *client); 356 + 357 + static bool _ion_map(int *buffer_cnt, int *handle_cnt) 358 + { 359 + bool map; 360 + 361 + BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); 362 + 363 + if (*buffer_cnt) 364 + map = false; 365 + else 366 + map = true; 367 + if (*handle_cnt == 0) 368 + (*buffer_cnt)++; 369 + (*handle_cnt)++; 370 + return map; 371 + } 372 + 373 + static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) 374 + { 375 + BUG_ON(*handle_cnt == 0); 376 + (*handle_cnt)--; 377 + if (*handle_cnt != 0) 378 + return false; 379 + BUG_ON(*buffer_cnt == 0); 380 + (*buffer_cnt)--; 381 + if (*buffer_cnt == 0) 382 + return true; 383 + return false; 384 + } 385 + 386 + int ion_phys(struct ion_client *client, struct ion_handle *handle, 387 + ion_phys_addr_t *addr, size_t *len) 388 + { 389 + struct ion_buffer *buffer; 390 + int ret; 391 + 392 + mutex_lock(&client->lock); 393 + if (!ion_handle_validate(client, handle)) { 394 + mutex_unlock(&client->lock); 395 + return -EINVAL; 396 + } 397 + 398 + buffer = handle->buffer; 399 + 400 + if (!buffer->heap->ops->phys) { 401 + pr_err("%s: ion_phys is not implemented by this heap.\n", 402 + __func__); 403 + mutex_unlock(&client->lock); 404 + return -ENODEV; 405 + } 406 + mutex_unlock(&client->lock); 407 + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); 408 + return ret; 409 + } 410 + 411 + void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) 412 + { 413 + struct ion_buffer *buffer; 414 + void *vaddr; 415 + 416 + mutex_lock(&client->lock); 417 + if (!ion_handle_validate(client, handle)) { 418 + pr_err("%s: invalid handle passed to map_kernel.\n", 419 + __func__); 420 + mutex_unlock(&client->lock); 421 + return ERR_PTR(-EINVAL); 422 + } 423 + 424 + buffer = handle->buffer; 425 + mutex_lock(&buffer->lock); 426 + 427 + if (!handle->buffer->heap->ops->map_kernel) { 428 + pr_err("%s: map_kernel is not implemented by this heap.\n", 429 + __func__); 430 + mutex_unlock(&buffer->lock); 431 + mutex_unlock(&client->lock); 432 + return ERR_PTR(-ENODEV); 433 + } 434 + 435 + if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { 436 + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); 437 + if (IS_ERR_OR_NULL(vaddr)) 438 + _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); 439 + buffer->vaddr = vaddr; 440 + } else { 441 + vaddr = buffer->vaddr; 442 + } 443 + mutex_unlock(&buffer->lock); 444 + mutex_unlock(&client->lock); 445 + return vaddr; 446 + } 447 + 448 + struct scatterlist *ion_map_dma(struct ion_client *client, 449 + struct ion_handle *handle) 450 + { 451 + struct ion_buffer *buffer; 452 + struct scatterlist *sglist; 453 + 454 + mutex_lock(&client->lock); 455 + if (!ion_handle_validate(client, handle)) { 456 + pr_err("%s: invalid handle passed to map_dma.\n", 457 + __func__); 458 + mutex_unlock(&client->lock); 459 + return ERR_PTR(-EINVAL); 460 + } 461 + buffer = handle->buffer; 462 + mutex_lock(&buffer->lock); 463 + 464 + if (!handle->buffer->heap->ops->map_dma) { 465 + pr_err("%s: map_kernel is not implemented by this heap.\n", 466 + __func__); 467 + mutex_unlock(&buffer->lock); 468 + mutex_unlock(&client->lock); 469 + return ERR_PTR(-ENODEV); 470 + } 471 + if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { 472 + sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); 473 + if (IS_ERR_OR_NULL(sglist)) 474 + _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); 475 + buffer->sglist = sglist; 476 + } else { 477 + sglist = buffer->sglist; 478 + } 479 + mutex_unlock(&buffer->lock); 480 + mutex_unlock(&client->lock); 481 + return sglist; 482 + } 483 + 484 + void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) 485 + { 486 + struct ion_buffer *buffer; 487 + 488 + mutex_lock(&client->lock); 489 + buffer = handle->buffer; 490 + mutex_lock(&buffer->lock); 491 + if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { 492 + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); 493 + buffer->vaddr = NULL; 494 + } 495 + mutex_unlock(&buffer->lock); 496 + mutex_unlock(&client->lock); 497 + } 498 + 499 + void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) 500 + { 501 + struct ion_buffer *buffer; 502 + 503 + mutex_lock(&client->lock); 504 + buffer = handle->buffer; 505 + mutex_lock(&buffer->lock); 506 + if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { 507 + buffer->heap->ops->unmap_dma(buffer->heap, buffer); 508 + buffer->sglist = NULL; 509 + } 510 + mutex_unlock(&buffer->lock); 511 + mutex_unlock(&client->lock); 512 + } 513 + 514 + 515 + struct ion_buffer *ion_share(struct ion_client *client, 516 + struct ion_handle *handle) 517 + { 518 + bool valid_handle; 519 + 520 + mutex_lock(&client->lock); 521 + valid_handle = ion_handle_validate(client, handle); 522 + mutex_unlock(&client->lock); 523 + if (!valid_handle) { 524 + WARN("%s: invalid handle passed to share.\n", __func__); 525 + return ERR_PTR(-EINVAL); 526 + } 527 + 528 + /* do not take an extra reference here, the burden is on the caller 529 + * to make sure the buffer doesn't go away while it's passing it 530 + * to another client -- ion_free should not be called on this handle 531 + * until the buffer has been imported into the other client 532 + */ 533 + return handle->buffer; 534 + } 535 + 536 + struct ion_handle *ion_import(struct ion_client *client, 537 + struct ion_buffer *buffer) 538 + { 539 + struct ion_handle *handle = NULL; 540 + 541 + mutex_lock(&client->lock); 542 + /* if a handle exists for this buffer just take a reference to it */ 543 + handle = ion_handle_lookup(client, buffer); 544 + if (!IS_ERR_OR_NULL(handle)) { 545 + ion_handle_get(handle); 546 + goto end; 547 + } 548 + handle = ion_handle_create(client, buffer); 549 + if (IS_ERR_OR_NULL(handle)) 550 + goto end; 551 + ion_handle_add(client, handle); 552 + end: 553 + mutex_unlock(&client->lock); 554 + return handle; 555 + } 556 + 557 + static const struct file_operations ion_share_fops; 558 + 559 + struct ion_handle *ion_import_fd(struct ion_client *client, int fd) 560 + { 561 + struct file *file = fget(fd); 562 + struct ion_handle *handle; 563 + 564 + if (!file) { 565 + pr_err("%s: imported fd not found in file table.\n", __func__); 566 + return ERR_PTR(-EINVAL); 567 + } 568 + if (file->f_op != &ion_share_fops) { 569 + pr_err("%s: imported file is not a shared ion file.\n", 570 + __func__); 571 + handle = ERR_PTR(-EINVAL); 572 + goto end; 573 + } 574 + handle = ion_import(client, file->private_data); 575 + end: 576 + fput(file); 577 + return handle; 578 + } 579 + 580 + static int ion_debug_client_show(struct seq_file *s, void *unused) 581 + { 582 + struct ion_client *client = s->private; 583 + struct rb_node *n; 584 + size_t sizes[ION_NUM_HEAPS] = {0}; 585 + const char *names[ION_NUM_HEAPS] = {0}; 586 + int i; 587 + 588 + mutex_lock(&client->lock); 589 + for (n = rb_first(&client->handles); n; n = rb_next(n)) { 590 + struct ion_handle *handle = rb_entry(n, struct ion_handle, 591 + node); 592 + enum ion_heap_type type = handle->buffer->heap->type; 593 + 594 + if (!names[type]) 595 + names[type] = handle->buffer->heap->name; 596 + sizes[type] += handle->buffer->size; 597 + } 598 + mutex_unlock(&client->lock); 599 + 600 + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); 601 + for (i = 0; i < ION_NUM_HEAPS; i++) { 602 + if (!names[i]) 603 + continue; 604 + seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], 605 + atomic_read(&client->ref.refcount)); 606 + } 607 + return 0; 608 + } 609 + 610 + static int ion_debug_client_open(struct inode *inode, struct file *file) 611 + { 612 + return single_open(file, ion_debug_client_show, inode->i_private); 613 + } 614 + 615 + static const struct file_operations debug_client_fops = { 616 + .open = ion_debug_client_open, 617 + .read = seq_read, 618 + .llseek = seq_lseek, 619 + .release = single_release, 620 + }; 621 + 622 + static struct ion_client *ion_client_lookup(struct ion_device *dev, 623 + struct task_struct *task) 624 + { 625 + struct rb_node *n = dev->user_clients.rb_node; 626 + struct ion_client *client; 627 + 628 + mutex_lock(&dev->lock); 629 + while (n) { 630 + client = rb_entry(n, struct ion_client, node); 631 + if (task == client->task) { 632 + ion_client_get(client); 633 + mutex_unlock(&dev->lock); 634 + return client; 635 + } else if (task < client->task) { 636 + n = n->rb_left; 637 + } else if (task > client->task) { 638 + n = n->rb_right; 639 + } 640 + } 641 + mutex_unlock(&dev->lock); 642 + return NULL; 643 + } 644 + 645 + struct ion_client *ion_client_create(struct ion_device *dev, 646 + unsigned int heap_mask, 647 + const char *name) 648 + { 649 + struct ion_client *client; 650 + struct task_struct *task; 651 + struct rb_node **p; 652 + struct rb_node *parent = NULL; 653 + struct ion_client *entry; 654 + char debug_name[64]; 655 + pid_t pid; 656 + 657 + get_task_struct(current->group_leader); 658 + task_lock(current->group_leader); 659 + pid = task_pid_nr(current->group_leader); 660 + /* don't bother to store task struct for kernel threads, 661 + they can't be killed anyway */ 662 + if (current->group_leader->flags & PF_KTHREAD) { 663 + put_task_struct(current->group_leader); 664 + task = NULL; 665 + } else { 666 + task = current->group_leader; 667 + } 668 + task_unlock(current->group_leader); 669 + 670 + /* if this isn't a kernel thread, see if a client already 671 + exists */ 672 + if (task) { 673 + client = ion_client_lookup(dev, task); 674 + if (!IS_ERR_OR_NULL(client)) { 675 + put_task_struct(current->group_leader); 676 + return client; 677 + } 678 + } 679 + 680 + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); 681 + if (!client) { 682 + put_task_struct(current->group_leader); 683 + return ERR_PTR(-ENOMEM); 684 + } 685 + 686 + client->dev = dev; 687 + client->handles = RB_ROOT; 688 + mutex_init(&client->lock); 689 + client->name = name; 690 + client->heap_mask = heap_mask; 691 + client->task = task; 692 + client->pid = pid; 693 + kref_init(&client->ref); 694 + 695 + mutex_lock(&dev->lock); 696 + if (task) { 697 + p = &dev->user_clients.rb_node; 698 + while (*p) { 699 + parent = *p; 700 + entry = rb_entry(parent, struct ion_client, node); 701 + 702 + if (task < entry->task) 703 + p = &(*p)->rb_left; 704 + else if (task > entry->task) 705 + p = &(*p)->rb_right; 706 + } 707 + rb_link_node(&client->node, parent, p); 708 + rb_insert_color(&client->node, &dev->user_clients); 709 + } else { 710 + p = &dev->kernel_clients.rb_node; 711 + while (*p) { 712 + parent = *p; 713 + entry = rb_entry(parent, struct ion_client, node); 714 + 715 + if (client < entry) 716 + p = &(*p)->rb_left; 717 + else if (client > entry) 718 + p = &(*p)->rb_right; 719 + } 720 + rb_link_node(&client->node, parent, p); 721 + rb_insert_color(&client->node, &dev->kernel_clients); 722 + } 723 + 724 + snprintf(debug_name, 64, "%u", client->pid); 725 + client->debug_root = debugfs_create_file(debug_name, 0664, 726 + dev->debug_root, client, 727 + &debug_client_fops); 728 + mutex_unlock(&dev->lock); 729 + 730 + return client; 731 + } 732 + 733 + static void _ion_client_destroy(struct kref *kref) 734 + { 735 + struct ion_client *client = container_of(kref, struct ion_client, ref); 736 + struct ion_device *dev = client->dev; 737 + struct rb_node *n; 738 + 739 + pr_debug("%s: %d\n", __func__, __LINE__); 740 + while ((n = rb_first(&client->handles))) { 741 + struct ion_handle *handle = rb_entry(n, struct ion_handle, 742 + node); 743 + ion_handle_destroy(&handle->ref); 744 + } 745 + mutex_lock(&dev->lock); 746 + if (client->task) { 747 + rb_erase(&client->node, &dev->user_clients); 748 + put_task_struct(client->task); 749 + } else { 750 + rb_erase(&client->node, &dev->kernel_clients); 751 + } 752 + debugfs_remove_recursive(client->debug_root); 753 + mutex_unlock(&dev->lock); 754 + 755 + kfree(client); 756 + } 757 + 758 + static void ion_client_get(struct ion_client *client) 759 + { 760 + kref_get(&client->ref); 761 + } 762 + 763 + static int ion_client_put(struct ion_client *client) 764 + { 765 + return kref_put(&client->ref, _ion_client_destroy); 766 + } 767 + 768 + void ion_client_destroy(struct ion_client *client) 769 + { 770 + ion_client_put(client); 771 + } 772 + 773 + static int ion_share_release(struct inode *inode, struct file* file) 774 + { 775 + struct ion_buffer *buffer = file->private_data; 776 + 777 + pr_debug("%s: %d\n", __func__, __LINE__); 778 + /* drop the reference to the buffer -- this prevents the 779 + buffer from going away because the client holding it exited 780 + while it was being passed */ 781 + ion_buffer_put(buffer); 782 + return 0; 783 + } 784 + 785 + static void ion_vma_open(struct vm_area_struct *vma) 786 + { 787 + 788 + struct ion_buffer *buffer = vma->vm_file->private_data; 789 + struct ion_handle *handle = vma->vm_private_data; 790 + struct ion_client *client; 791 + 792 + pr_debug("%s: %d\n", __func__, __LINE__); 793 + /* check that the client still exists and take a reference so 794 + it can't go away until this vma is closed */ 795 + client = ion_client_lookup(buffer->dev, current->group_leader); 796 + if (IS_ERR_OR_NULL(client)) { 797 + vma->vm_private_data = NULL; 798 + return; 799 + } 800 + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", 801 + __func__, __LINE__, 802 + atomic_read(&client->ref.refcount), 803 + atomic_read(&handle->ref.refcount), 804 + atomic_read(&buffer->ref.refcount)); 805 + } 806 + 807 + static void ion_vma_close(struct vm_area_struct *vma) 808 + { 809 + struct ion_handle *handle = vma->vm_private_data; 810 + struct ion_buffer *buffer = vma->vm_file->private_data; 811 + struct ion_client *client; 812 + 813 + pr_debug("%s: %d\n", __func__, __LINE__); 814 + /* this indicates the client is gone, nothing to do here */ 815 + if (!handle) 816 + return; 817 + client = handle->client; 818 + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", 819 + __func__, __LINE__, 820 + atomic_read(&client->ref.refcount), 821 + atomic_read(&handle->ref.refcount), 822 + atomic_read(&buffer->ref.refcount)); 823 + ion_handle_put(handle); 824 + ion_client_put(client); 825 + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", 826 + __func__, __LINE__, 827 + atomic_read(&client->ref.refcount), 828 + atomic_read(&handle->ref.refcount), 829 + atomic_read(&buffer->ref.refcount)); 830 + } 831 + 832 + static struct vm_operations_struct ion_vm_ops = { 833 + .open = ion_vma_open, 834 + .close = ion_vma_close, 835 + }; 836 + 837 + static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) 838 + { 839 + struct ion_buffer *buffer = file->private_data; 840 + unsigned long size = vma->vm_end - vma->vm_start; 841 + struct ion_client *client; 842 + struct ion_handle *handle; 843 + int ret; 844 + 845 + pr_debug("%s: %d\n", __func__, __LINE__); 846 + /* make sure the client still exists, it's possible for the client to 847 + have gone away but the map/share fd still to be around, take 848 + a reference to it so it can't go away while this mapping exists */ 849 + client = ion_client_lookup(buffer->dev, current->group_leader); 850 + if (IS_ERR_OR_NULL(client)) { 851 + pr_err("%s: trying to mmap an ion handle in a process with no " 852 + "ion client\n", __func__); 853 + return -EINVAL; 854 + } 855 + 856 + if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > 857 + buffer->size)) { 858 + pr_err("%s: trying to map larger area than handle has available" 859 + "\n", __func__); 860 + ret = -EINVAL; 861 + goto err; 862 + } 863 + 864 + /* find the handle and take a reference to it */ 865 + handle = ion_import(client, buffer); 866 + if (IS_ERR_OR_NULL(handle)) { 867 + ret = -EINVAL; 868 + goto err; 869 + } 870 + 871 + if (!handle->buffer->heap->ops->map_user) { 872 + pr_err("%s: this heap does not define a method for mapping " 873 + "to userspace\n", __func__); 874 + ret = -EINVAL; 875 + goto err1; 876 + } 877 + 878 + mutex_lock(&buffer->lock); 879 + /* now map it to userspace */ 880 + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); 881 + mutex_unlock(&buffer->lock); 882 + if (ret) { 883 + pr_err("%s: failure mapping buffer to userspace\n", 884 + __func__); 885 + goto err1; 886 + } 887 + 888 + vma->vm_ops = &ion_vm_ops; 889 + /* move the handle into the vm_private_data so we can access it from 890 + vma_open/close */ 891 + vma->vm_private_data = handle; 892 + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", 893 + __func__, __LINE__, 894 + atomic_read(&client->ref.refcount), 895 + atomic_read(&handle->ref.refcount), 896 + atomic_read(&buffer->ref.refcount)); 897 + return 0; 898 + 899 + err1: 900 + /* drop the reference to the handle */ 901 + ion_handle_put(handle); 902 + err: 903 + /* drop the reference to the client */ 904 + ion_client_put(client); 905 + return ret; 906 + } 907 + 908 + static const struct file_operations ion_share_fops = { 909 + .owner = THIS_MODULE, 910 + .release = ion_share_release, 911 + .mmap = ion_share_mmap, 912 + }; 913 + 914 + static int ion_ioctl_share(struct file *parent, struct ion_client *client, 915 + struct ion_handle *handle) 916 + { 917 + int fd = get_unused_fd(); 918 + struct file *file; 919 + 920 + if (fd < 0) 921 + return -ENFILE; 922 + 923 + file = anon_inode_getfile("ion_share_fd", &ion_share_fops, 924 + handle->buffer, O_RDWR); 925 + if (IS_ERR_OR_NULL(file)) 926 + goto err; 927 + ion_buffer_get(handle->buffer); 928 + fd_install(fd, file); 929 + 930 + return fd; 931 + 932 + err: 933 + put_unused_fd(fd); 934 + return -ENFILE; 935 + } 936 + 937 + static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 938 + { 939 + struct ion_client *client = filp->private_data; 940 + 941 + switch (cmd) { 942 + case ION_IOC_ALLOC: 943 + { 944 + struct ion_allocation_data data; 945 + 946 + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 947 + return -EFAULT; 948 + data.handle = ion_alloc(client, data.len, data.align, 949 + data.flags); 950 + if (copy_to_user((void __user *)arg, &data, sizeof(data))) 951 + return -EFAULT; 952 + break; 953 + } 954 + case ION_IOC_FREE: 955 + { 956 + struct ion_handle_data data; 957 + bool valid; 958 + 959 + if (copy_from_user(&data, (void __user *)arg, 960 + sizeof(struct ion_handle_data))) 961 + return -EFAULT; 962 + mutex_lock(&client->lock); 963 + valid = ion_handle_validate(client, data.handle); 964 + mutex_unlock(&client->lock); 965 + if (!valid) 966 + return -EINVAL; 967 + ion_free(client, data.handle); 968 + break; 969 + } 970 + case ION_IOC_MAP: 971 + case ION_IOC_SHARE: 972 + { 973 + struct ion_fd_data data; 974 + 975 + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) 976 + return -EFAULT; 977 + mutex_lock(&client->lock); 978 + if (!ion_handle_validate(client, data.handle)) { 979 + pr_err("%s: invalid handle passed to share ioctl.\n", 980 + __func__); 981 + mutex_unlock(&client->lock); 982 + return -EINVAL; 983 + } 984 + data.fd = ion_ioctl_share(filp, client, data.handle); 985 + mutex_unlock(&client->lock); 986 + if (copy_to_user((void __user *)arg, &data, sizeof(data))) 987 + return -EFAULT; 988 + break; 989 + } 990 + case ION_IOC_IMPORT: 991 + { 992 + struct ion_fd_data data; 993 + if (copy_from_user(&data, (void __user *)arg, 994 + sizeof(struct ion_fd_data))) 995 + return -EFAULT; 996 + 997 + data.handle = ion_import_fd(client, data.fd); 998 + if (IS_ERR(data.handle)) 999 + data.handle = NULL; 1000 + if (copy_to_user((void __user *)arg, &data, 1001 + sizeof(struct ion_fd_data))) 1002 + return -EFAULT; 1003 + break; 1004 + } 1005 + case ION_IOC_CUSTOM: 1006 + { 1007 + struct ion_device *dev = client->dev; 1008 + struct ion_custom_data data; 1009 + 1010 + if (!dev->custom_ioctl) 1011 + return -ENOTTY; 1012 + if (copy_from_user(&data, (void __user *)arg, 1013 + sizeof(struct ion_custom_data))) 1014 + return -EFAULT; 1015 + return dev->custom_ioctl(client, data.cmd, data.arg); 1016 + } 1017 + default: 1018 + return -ENOTTY; 1019 + } 1020 + return 0; 1021 + } 1022 + 1023 + static int ion_release(struct inode *inode, struct file *file) 1024 + { 1025 + struct ion_client *client = file->private_data; 1026 + 1027 + pr_debug("%s: %d\n", __func__, __LINE__); 1028 + ion_client_put(client); 1029 + return 0; 1030 + } 1031 + 1032 + static int ion_open(struct inode *inode, struct file *file) 1033 + { 1034 + struct miscdevice *miscdev = file->private_data; 1035 + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); 1036 + struct ion_client *client; 1037 + 1038 + pr_debug("%s: %d\n", __func__, __LINE__); 1039 + client = ion_client_create(dev, -1, "user"); 1040 + if (IS_ERR_OR_NULL(client)) 1041 + return PTR_ERR(client); 1042 + file->private_data = client; 1043 + 1044 + return 0; 1045 + } 1046 + 1047 + static const struct file_operations ion_fops = { 1048 + .owner = THIS_MODULE, 1049 + .open = ion_open, 1050 + .release = ion_release, 1051 + .unlocked_ioctl = ion_ioctl, 1052 + }; 1053 + 1054 + static size_t ion_debug_heap_total(struct ion_client *client, 1055 + enum ion_heap_type type) 1056 + { 1057 + size_t size = 0; 1058 + struct rb_node *n; 1059 + 1060 + mutex_lock(&client->lock); 1061 + for (n = rb_first(&client->handles); n; n = rb_next(n)) { 1062 + struct ion_handle *handle = rb_entry(n, 1063 + struct ion_handle, 1064 + node); 1065 + if (handle->buffer->heap->type == type) 1066 + size += handle->buffer->size; 1067 + } 1068 + mutex_unlock(&client->lock); 1069 + return size; 1070 + } 1071 + 1072 + static int ion_debug_heap_show(struct seq_file *s, void *unused) 1073 + { 1074 + struct ion_heap *heap = s->private; 1075 + struct ion_device *dev = heap->dev; 1076 + struct rb_node *n; 1077 + 1078 + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); 1079 + for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { 1080 + struct ion_client *client = rb_entry(n, struct ion_client, 1081 + node); 1082 + char task_comm[TASK_COMM_LEN]; 1083 + size_t size = ion_debug_heap_total(client, heap->type); 1084 + if (!size) 1085 + continue; 1086 + 1087 + get_task_comm(task_comm, client->task); 1088 + seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, 1089 + size); 1090 + } 1091 + 1092 + for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { 1093 + struct ion_client *client = rb_entry(n, struct ion_client, 1094 + node); 1095 + size_t size = ion_debug_heap_total(client, heap->type); 1096 + if (!size) 1097 + continue; 1098 + seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, 1099 + size); 1100 + } 1101 + return 0; 1102 + } 1103 + 1104 + static int ion_debug_heap_open(struct inode *inode, struct file *file) 1105 + { 1106 + return single_open(file, ion_debug_heap_show, inode->i_private); 1107 + } 1108 + 1109 + static const struct file_operations debug_heap_fops = { 1110 + .open = ion_debug_heap_open, 1111 + .read = seq_read, 1112 + .llseek = seq_lseek, 1113 + .release = single_release, 1114 + }; 1115 + 1116 + void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) 1117 + { 1118 + struct rb_node **p = &dev->heaps.rb_node; 1119 + struct rb_node *parent = NULL; 1120 + struct ion_heap *entry; 1121 + 1122 + heap->dev = dev; 1123 + mutex_lock(&dev->lock); 1124 + while (*p) { 1125 + parent = *p; 1126 + entry = rb_entry(parent, struct ion_heap, node); 1127 + 1128 + if (heap->id < entry->id) { 1129 + p = &(*p)->rb_left; 1130 + } else if (heap->id > entry->id ) { 1131 + p = &(*p)->rb_right; 1132 + } else { 1133 + pr_err("%s: can not insert multiple heaps with " 1134 + "id %d\n", __func__, heap->id); 1135 + goto end; 1136 + } 1137 + } 1138 + 1139 + rb_link_node(&heap->node, parent, p); 1140 + rb_insert_color(&heap->node, &dev->heaps); 1141 + debugfs_create_file(heap->name, 0664, dev->debug_root, heap, 1142 + &debug_heap_fops); 1143 + end: 1144 + mutex_unlock(&dev->lock); 1145 + } 1146 + 1147 + struct ion_device *ion_device_create(long (*custom_ioctl) 1148 + (struct ion_client *client, 1149 + unsigned int cmd, 1150 + unsigned long arg)) 1151 + { 1152 + struct ion_device *idev; 1153 + int ret; 1154 + 1155 + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); 1156 + if (!idev) 1157 + return ERR_PTR(-ENOMEM); 1158 + 1159 + idev->dev.minor = MISC_DYNAMIC_MINOR; 1160 + idev->dev.name = "ion"; 1161 + idev->dev.fops = &ion_fops; 1162 + idev->dev.parent = NULL; 1163 + ret = misc_register(&idev->dev); 1164 + if (ret) { 1165 + pr_err("ion: failed to register misc device.\n"); 1166 + return ERR_PTR(ret); 1167 + } 1168 + 1169 + idev->debug_root = debugfs_create_dir("ion", NULL); 1170 + if (IS_ERR_OR_NULL(idev->debug_root)) 1171 + pr_err("ion: failed to create debug files.\n"); 1172 + 1173 + idev->custom_ioctl = custom_ioctl; 1174 + idev->buffers = RB_ROOT; 1175 + mutex_init(&idev->lock); 1176 + idev->heaps = RB_ROOT; 1177 + idev->user_clients = RB_ROOT; 1178 + idev->kernel_clients = RB_ROOT; 1179 + return idev; 1180 + } 1181 + 1182 + void ion_device_destroy(struct ion_device *dev) 1183 + { 1184 + misc_deregister(&dev->dev); 1185 + /* XXX need to free the heaps and clients ? */ 1186 + kfree(dev); 1187 + }
+344
drivers/staging/android/ion/ion.h
··· 1 + /* 2 + * drivers/staging/android/ion/ion.h 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #ifndef _LINUX_ION_H 18 + #define _LINUX_ION_H 19 + 20 + #include <linux/types.h> 21 + 22 + struct ion_handle; 23 + /** 24 + * enum ion_heap_types - list of all possible types of heaps 25 + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc 26 + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc 27 + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved 28 + * carveout heap, allocations are physically 29 + * contiguous 30 + * @ION_HEAP_END: helper for iterating over heaps 31 + */ 32 + enum ion_heap_type { 33 + ION_HEAP_TYPE_SYSTEM, 34 + ION_HEAP_TYPE_SYSTEM_CONTIG, 35 + ION_HEAP_TYPE_CARVEOUT, 36 + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always 37 + are at the end of this enum */ 38 + ION_NUM_HEAPS, 39 + }; 40 + 41 + #define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) 42 + #define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) 43 + #define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) 44 + 45 + #ifdef __KERNEL__ 46 + struct ion_device; 47 + struct ion_heap; 48 + struct ion_mapper; 49 + struct ion_client; 50 + struct ion_buffer; 51 + 52 + /* This should be removed some day when phys_addr_t's are fully 53 + plumbed in the kernel, and all instances of ion_phys_addr_t should 54 + be converted to phys_addr_t. For the time being many kernel interfaces 55 + do not accept phys_addr_t's that would have to */ 56 + #define ion_phys_addr_t unsigned long 57 + 58 + /** 59 + * struct ion_platform_heap - defines a heap in the given platform 60 + * @type: type of the heap from ion_heap_type enum 61 + * @id: unique identifier for heap. When allocating (lower numbers 62 + * will be allocated from first) 63 + * @name: used for debug purposes 64 + * @base: base address of heap in physical memory if applicable 65 + * @size: size of the heap in bytes if applicable 66 + * 67 + * Provided by the board file. 68 + */ 69 + struct ion_platform_heap { 70 + enum ion_heap_type type; 71 + unsigned int id; 72 + const char *name; 73 + ion_phys_addr_t base; 74 + size_t size; 75 + }; 76 + 77 + /** 78 + * struct ion_platform_data - array of platform heaps passed from board file 79 + * @nr: number of structures in the array 80 + * @heaps: array of platform_heap structions 81 + * 82 + * Provided by the board file in the form of platform data to a platform device. 83 + */ 84 + struct ion_platform_data { 85 + int nr; 86 + struct ion_platform_heap heaps[]; 87 + }; 88 + 89 + /** 90 + * ion_client_create() - allocate a client and returns it 91 + * @dev: the global ion device 92 + * @heap_mask: mask of heaps this client can allocate from 93 + * @name: used for debugging 94 + */ 95 + struct ion_client *ion_client_create(struct ion_device *dev, 96 + unsigned int heap_mask, const char *name); 97 + 98 + /** 99 + * ion_client_destroy() - free's a client and all it's handles 100 + * @client: the client 101 + * 102 + * Free the provided client and all it's resources including 103 + * any handles it is holding. 104 + */ 105 + void ion_client_destroy(struct ion_client *client); 106 + 107 + /** 108 + * ion_alloc - allocate ion memory 109 + * @client: the client 110 + * @len: size of the allocation 111 + * @align: requested allocation alignment, lots of hardware blocks have 112 + * alignment requirements of some kind 113 + * @flags: mask of heaps to allocate from, if multiple bits are set 114 + * heaps will be tried in order from lowest to highest order bit 115 + * 116 + * Allocate memory in one of the heaps provided in heap mask and return 117 + * an opaque handle to it. 118 + */ 119 + struct ion_handle *ion_alloc(struct ion_client *client, size_t len, 120 + size_t align, unsigned int flags); 121 + 122 + /** 123 + * ion_free - free a handle 124 + * @client: the client 125 + * @handle: the handle to free 126 + * 127 + * Free the provided handle. 128 + */ 129 + void ion_free(struct ion_client *client, struct ion_handle *handle); 130 + 131 + /** 132 + * ion_phys - returns the physical address and len of a handle 133 + * @client: the client 134 + * @handle: the handle 135 + * @addr: a pointer to put the address in 136 + * @len: a pointer to put the length in 137 + * 138 + * This function queries the heap for a particular handle to get the 139 + * handle's physical address. It't output is only correct if 140 + * a heap returns physically contiguous memory -- in other cases 141 + * this api should not be implemented -- ion_map_dma should be used 142 + * instead. Returns -EINVAL if the handle is invalid. This has 143 + * no implications on the reference counting of the handle -- 144 + * the returned value may not be valid if the caller is not 145 + * holding a reference. 146 + */ 147 + int ion_phys(struct ion_client *client, struct ion_handle *handle, 148 + ion_phys_addr_t *addr, size_t *len); 149 + 150 + /** 151 + * ion_map_kernel - create mapping for the given handle 152 + * @client: the client 153 + * @handle: handle to map 154 + * 155 + * Map the given handle into the kernel and return a kernel address that 156 + * can be used to access this address. 157 + */ 158 + void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); 159 + 160 + /** 161 + * ion_unmap_kernel() - destroy a kernel mapping for a handle 162 + * @client: the client 163 + * @handle: handle to unmap 164 + */ 165 + void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); 166 + 167 + /** 168 + * ion_map_dma - create a dma mapping for a given handle 169 + * @client: the client 170 + * @handle: handle to map 171 + * 172 + * Return an sglist describing the given handle 173 + */ 174 + struct scatterlist *ion_map_dma(struct ion_client *client, 175 + struct ion_handle *handle); 176 + 177 + /** 178 + * ion_unmap_dma() - destroy a dma mapping for a handle 179 + * @client: the client 180 + * @handle: handle to unmap 181 + */ 182 + void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle); 183 + 184 + /** 185 + * ion_share() - given a handle, obtain a buffer to pass to other clients 186 + * @client: the client 187 + * @handle: the handle to share 188 + * 189 + * Given a handle, return a buffer, which exists in a global name 190 + * space, and can be passed to other clients. Should be passed into ion_import 191 + * to obtain a new handle for this buffer. 192 + * 193 + * NOTE: This function does do not an extra reference. The burden is on the 194 + * caller to make sure the buffer doesn't go away while it's being passed to 195 + * another client. That is, ion_free should not be called on this handle until 196 + * the buffer has been imported into the other client. 197 + */ 198 + struct ion_buffer *ion_share(struct ion_client *client, 199 + struct ion_handle *handle); 200 + 201 + /** 202 + * ion_import() - given an buffer in another client, import it 203 + * @client: this blocks client 204 + * @buffer: the buffer to import (as obtained from ion_share) 205 + * 206 + * Given a buffer, add it to the client and return the handle to use to refer 207 + * to it further. This is called to share a handle from one kernel client to 208 + * another. 209 + */ 210 + struct ion_handle *ion_import(struct ion_client *client, 211 + struct ion_buffer *buffer); 212 + 213 + /** 214 + * ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it 215 + * @client: this blocks client 216 + * @fd: the fd 217 + * 218 + * A helper function for drivers that will be recieving ion buffers shared 219 + * with them from userspace. These buffers are represented by a file 220 + * descriptor obtained as the return from the ION_IOC_SHARE ioctl. 221 + * This function coverts that fd into the underlying buffer, and returns 222 + * the handle to use to refer to it further. 223 + */ 224 + struct ion_handle *ion_import_fd(struct ion_client *client, int fd); 225 + #endif /* __KERNEL__ */ 226 + 227 + /** 228 + * DOC: Ion Userspace API 229 + * 230 + * create a client by opening /dev/ion 231 + * most operations handled via following ioctls 232 + * 233 + */ 234 + 235 + /** 236 + * struct ion_allocation_data - metadata passed from userspace for allocations 237 + * @len: size of the allocation 238 + * @align: required alignment of the allocation 239 + * @flags: flags passed to heap 240 + * @handle: pointer that will be populated with a cookie to use to refer 241 + * to this allocation 242 + * 243 + * Provided by userspace as an argument to the ioctl 244 + */ 245 + struct ion_allocation_data { 246 + size_t len; 247 + size_t align; 248 + unsigned int flags; 249 + struct ion_handle *handle; 250 + }; 251 + 252 + /** 253 + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair 254 + * @handle: a handle 255 + * @fd: a file descriptor representing that handle 256 + * 257 + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with 258 + * the handle returned from ion alloc, and the kernel returns the file 259 + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace 260 + * provides the file descriptor and the kernel returns the handle. 261 + */ 262 + struct ion_fd_data { 263 + struct ion_handle *handle; 264 + int fd; 265 + }; 266 + 267 + /** 268 + * struct ion_handle_data - a handle passed to/from the kernel 269 + * @handle: a handle 270 + */ 271 + struct ion_handle_data { 272 + struct ion_handle *handle; 273 + }; 274 + 275 + /** 276 + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl 277 + * @cmd: the custom ioctl function to call 278 + * @arg: additional data to pass to the custom ioctl, typically a user 279 + * pointer to a predefined structure 280 + * 281 + * This works just like the regular cmd and arg fields of an ioctl. 282 + */ 283 + struct ion_custom_data { 284 + unsigned int cmd; 285 + unsigned long arg; 286 + }; 287 + 288 + #define ION_IOC_MAGIC 'I' 289 + 290 + /** 291 + * DOC: ION_IOC_ALLOC - allocate memory 292 + * 293 + * Takes an ion_allocation_data struct and returns it with the handle field 294 + * populated with the opaque handle for the allocation. 295 + */ 296 + #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ 297 + struct ion_allocation_data) 298 + 299 + /** 300 + * DOC: ION_IOC_FREE - free memory 301 + * 302 + * Takes an ion_handle_data struct and frees the handle. 303 + */ 304 + #define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) 305 + 306 + /** 307 + * DOC: ION_IOC_MAP - get a file descriptor to mmap 308 + * 309 + * Takes an ion_fd_data struct with the handle field populated with a valid 310 + * opaque handle. Returns the struct with the fd field set to a file 311 + * descriptor open in the current address space. This file descriptor 312 + * can then be used as an argument to mmap. 313 + */ 314 + #define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) 315 + 316 + /** 317 + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation 318 + * 319 + * Takes an ion_fd_data struct with the handle field populated with a valid 320 + * opaque handle. Returns the struct with the fd field set to a file 321 + * descriptor open in the current address space. This file descriptor 322 + * can then be passed to another process. The corresponding opaque handle can 323 + * be retrieved via ION_IOC_IMPORT. 324 + */ 325 + #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) 326 + 327 + /** 328 + * DOC: ION_IOC_IMPORT - imports a shared file descriptor 329 + * 330 + * Takes an ion_fd_data struct with the fd field populated with a valid file 331 + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle 332 + * filed set to the corresponding opaque handle. 333 + */ 334 + #define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int) 335 + 336 + /** 337 + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl 338 + * 339 + * Takes the argument of the architecture specific ioctl to call and 340 + * passes appropriate userdata for that ioctl 341 + */ 342 + #define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) 343 + 344 + #endif /* _LINUX_ION_H */
+162
drivers/staging/android/ion/ion_carveout_heap.c
··· 1 + /* 2 + * drivers/staging/android/ion/ion_carveout_heap.c 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + #include <linux/spinlock.h> 17 + 18 + #include <linux/err.h> 19 + #include <linux/genalloc.h> 20 + #include <linux/io.h> 21 + #include <linux/mm.h> 22 + #include <linux/scatterlist.h> 23 + #include <linux/slab.h> 24 + #include <linux/vmalloc.h> 25 + #include "ion.h" 26 + #include "ion_priv.h" 27 + 28 + #include <asm/mach/map.h> 29 + 30 + struct ion_carveout_heap { 31 + struct ion_heap heap; 32 + struct gen_pool *pool; 33 + ion_phys_addr_t base; 34 + }; 35 + 36 + ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, 37 + unsigned long size, 38 + unsigned long align) 39 + { 40 + struct ion_carveout_heap *carveout_heap = 41 + container_of(heap, struct ion_carveout_heap, heap); 42 + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); 43 + 44 + if (!offset) 45 + return ION_CARVEOUT_ALLOCATE_FAIL; 46 + 47 + return offset; 48 + } 49 + 50 + void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, 51 + unsigned long size) 52 + { 53 + struct ion_carveout_heap *carveout_heap = 54 + container_of(heap, struct ion_carveout_heap, heap); 55 + 56 + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) 57 + return; 58 + gen_pool_free(carveout_heap->pool, addr, size); 59 + } 60 + 61 + static int ion_carveout_heap_phys(struct ion_heap *heap, 62 + struct ion_buffer *buffer, 63 + ion_phys_addr_t *addr, size_t *len) 64 + { 65 + *addr = buffer->priv_phys; 66 + *len = buffer->size; 67 + return 0; 68 + } 69 + 70 + static int ion_carveout_heap_allocate(struct ion_heap *heap, 71 + struct ion_buffer *buffer, 72 + unsigned long size, unsigned long align, 73 + unsigned long flags) 74 + { 75 + buffer->priv_phys = ion_carveout_allocate(heap, size, align); 76 + return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; 77 + } 78 + 79 + static void ion_carveout_heap_free(struct ion_buffer *buffer) 80 + { 81 + struct ion_heap *heap = buffer->heap; 82 + 83 + ion_carveout_free(heap, buffer->priv_phys, buffer->size); 84 + buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; 85 + } 86 + 87 + struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, 88 + struct ion_buffer *buffer) 89 + { 90 + return ERR_PTR(-EINVAL); 91 + } 92 + 93 + void ion_carveout_heap_unmap_dma(struct ion_heap *heap, 94 + struct ion_buffer *buffer) 95 + { 96 + return; 97 + } 98 + 99 + void *ion_carveout_heap_map_kernel(struct ion_heap *heap, 100 + struct ion_buffer *buffer) 101 + { 102 + return __arch_ioremap(buffer->priv_phys, buffer->size, 103 + MT_MEMORY_NONCACHED); 104 + } 105 + 106 + void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, 107 + struct ion_buffer *buffer) 108 + { 109 + __arch_iounmap(buffer->vaddr); 110 + buffer->vaddr = NULL; 111 + return; 112 + } 113 + 114 + int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 115 + struct vm_area_struct *vma) 116 + { 117 + return remap_pfn_range(vma, vma->vm_start, 118 + __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, 119 + buffer->size, 120 + pgprot_noncached(vma->vm_page_prot)); 121 + } 122 + 123 + static struct ion_heap_ops carveout_heap_ops = { 124 + .allocate = ion_carveout_heap_allocate, 125 + .free = ion_carveout_heap_free, 126 + .phys = ion_carveout_heap_phys, 127 + .map_user = ion_carveout_heap_map_user, 128 + .map_kernel = ion_carveout_heap_map_kernel, 129 + .unmap_kernel = ion_carveout_heap_unmap_kernel, 130 + }; 131 + 132 + struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) 133 + { 134 + struct ion_carveout_heap *carveout_heap; 135 + 136 + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); 137 + if (!carveout_heap) 138 + return ERR_PTR(-ENOMEM); 139 + 140 + carveout_heap->pool = gen_pool_create(12, -1); 141 + if (!carveout_heap->pool) { 142 + kfree(carveout_heap); 143 + return ERR_PTR(-ENOMEM); 144 + } 145 + carveout_heap->base = heap_data->base; 146 + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, 147 + -1); 148 + carveout_heap->heap.ops = &carveout_heap_ops; 149 + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; 150 + 151 + return &carveout_heap->heap; 152 + } 153 + 154 + void ion_carveout_heap_destroy(struct ion_heap *heap) 155 + { 156 + struct ion_carveout_heap *carveout_heap = 157 + container_of(heap, struct ion_carveout_heap, heap); 158 + 159 + gen_pool_destroy(carveout_heap->pool); 160 + kfree(carveout_heap); 161 + carveout_heap = NULL; 162 + }
+72
drivers/staging/android/ion/ion_heap.c
··· 1 + /* 2 + * drivers/staging/android/ion/ion_heap.c 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #include <linux/err.h> 18 + #include "ion.h" 19 + #include "ion_priv.h" 20 + 21 + struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) 22 + { 23 + struct ion_heap *heap = NULL; 24 + 25 + switch (heap_data->type) { 26 + case ION_HEAP_TYPE_SYSTEM_CONTIG: 27 + heap = ion_system_contig_heap_create(heap_data); 28 + break; 29 + case ION_HEAP_TYPE_SYSTEM: 30 + heap = ion_system_heap_create(heap_data); 31 + break; 32 + case ION_HEAP_TYPE_CARVEOUT: 33 + heap = ion_carveout_heap_create(heap_data); 34 + break; 35 + default: 36 + pr_err("%s: Invalid heap type %d\n", __func__, 37 + heap_data->type); 38 + return ERR_PTR(-EINVAL); 39 + } 40 + 41 + if (IS_ERR_OR_NULL(heap)) { 42 + pr_err("%s: error creating heap %s type %d base %lu size %u\n", 43 + __func__, heap_data->name, heap_data->type, 44 + heap_data->base, heap_data->size); 45 + return ERR_PTR(-EINVAL); 46 + } 47 + 48 + heap->name = heap_data->name; 49 + heap->id = heap_data->id; 50 + return heap; 51 + } 52 + 53 + void ion_heap_destroy(struct ion_heap *heap) 54 + { 55 + if (!heap) 56 + return; 57 + 58 + switch (heap->type) { 59 + case ION_HEAP_TYPE_SYSTEM_CONTIG: 60 + ion_system_contig_heap_destroy(heap); 61 + break; 62 + case ION_HEAP_TYPE_SYSTEM: 63 + ion_system_heap_destroy(heap); 64 + break; 65 + case ION_HEAP_TYPE_CARVEOUT: 66 + ion_carveout_heap_destroy(heap); 67 + break; 68 + default: 69 + pr_err("%s: Invalid heap type %d\n", __func__, 70 + heap->type); 71 + } 72 + }
+185
drivers/staging/android/ion/ion_priv.h
··· 1 + /* 2 + * drivers/staging/android/ion/ion_priv.h 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #ifndef _ION_PRIV_H 18 + #define _ION_PRIV_H 19 + 20 + #include <linux/kref.h> 21 + #include <linux/mm_types.h> 22 + #include <linux/mutex.h> 23 + #include <linux/rbtree.h> 24 + 25 + #include "ion.h" 26 + 27 + struct ion_mapping; 28 + 29 + struct ion_dma_mapping { 30 + struct kref ref; 31 + struct scatterlist *sglist; 32 + }; 33 + 34 + struct ion_kernel_mapping { 35 + struct kref ref; 36 + void *vaddr; 37 + }; 38 + 39 + struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); 40 + 41 + /** 42 + * struct ion_buffer - metadata for a particular buffer 43 + * @ref: refernce count 44 + * @node: node in the ion_device buffers tree 45 + * @dev: back pointer to the ion_device 46 + * @heap: back pointer to the heap the buffer came from 47 + * @flags: buffer specific flags 48 + * @size: size of the buffer 49 + * @priv_virt: private data to the buffer representable as 50 + * a void * 51 + * @priv_phys: private data to the buffer representable as 52 + * an ion_phys_addr_t (and someday a phys_addr_t) 53 + * @lock: protects the buffers cnt fields 54 + * @kmap_cnt: number of times the buffer is mapped to the kernel 55 + * @vaddr: the kenrel mapping if kmap_cnt is not zero 56 + * @dmap_cnt: number of times the buffer is mapped for dma 57 + * @sglist: the scatterlist for the buffer is dmap_cnt is not zero 58 + */ 59 + struct ion_buffer { 60 + struct kref ref; 61 + struct rb_node node; 62 + struct ion_device *dev; 63 + struct ion_heap *heap; 64 + unsigned long flags; 65 + size_t size; 66 + union { 67 + void *priv_virt; 68 + ion_phys_addr_t priv_phys; 69 + }; 70 + struct mutex lock; 71 + int kmap_cnt; 72 + void *vaddr; 73 + int dmap_cnt; 74 + struct scatterlist *sglist; 75 + }; 76 + 77 + /** 78 + * struct ion_heap_ops - ops to operate on a given heap 79 + * @allocate: allocate memory 80 + * @free: free memory 81 + * @phys get physical address of a buffer (only define on 82 + * physically contiguous heaps) 83 + * @map_dma map the memory for dma to a scatterlist 84 + * @unmap_dma unmap the memory for dma 85 + * @map_kernel map memory to the kernel 86 + * @unmap_kernel unmap memory to the kernel 87 + * @map_user map memory to userspace 88 + */ 89 + struct ion_heap_ops { 90 + int (*allocate) (struct ion_heap *heap, 91 + struct ion_buffer *buffer, unsigned long len, 92 + unsigned long align, unsigned long flags); 93 + void (*free) (struct ion_buffer *buffer); 94 + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, 95 + ion_phys_addr_t *addr, size_t *len); 96 + struct scatterlist *(*map_dma) (struct ion_heap *heap, 97 + struct ion_buffer *buffer); 98 + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); 99 + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); 100 + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); 101 + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, 102 + struct vm_area_struct *vma); 103 + }; 104 + 105 + /** 106 + * struct ion_heap - represents a heap in the system 107 + * @node: rb node to put the heap on the device's tree of heaps 108 + * @dev: back pointer to the ion_device 109 + * @type: type of heap 110 + * @ops: ops struct as above 111 + * @id: id of heap, also indicates priority of this heap when 112 + * allocating. These are specified by platform data and 113 + * MUST be unique 114 + * @name: used for debugging 115 + * 116 + * Represents a pool of memory from which buffers can be made. In some 117 + * systems the only heap is regular system memory allocated via vmalloc. 118 + * On others, some blocks might require large physically contiguous buffers 119 + * that are allocated from a specially reserved heap. 120 + */ 121 + struct ion_heap { 122 + struct rb_node node; 123 + struct ion_device *dev; 124 + enum ion_heap_type type; 125 + struct ion_heap_ops *ops; 126 + int id; 127 + const char *name; 128 + }; 129 + 130 + /** 131 + * ion_device_create - allocates and returns an ion device 132 + * @custom_ioctl: arch specific ioctl function if applicable 133 + * 134 + * returns a valid device or -PTR_ERR 135 + */ 136 + struct ion_device *ion_device_create(long (*custom_ioctl) 137 + (struct ion_client *client, 138 + unsigned int cmd, 139 + unsigned long arg)); 140 + 141 + /** 142 + * ion_device_destroy - free and device and it's resource 143 + * @dev: the device 144 + */ 145 + void ion_device_destroy(struct ion_device *dev); 146 + 147 + /** 148 + * ion_device_add_heap - adds a heap to the ion device 149 + * @dev: the device 150 + * @heap: the heap to add 151 + */ 152 + void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); 153 + 154 + /** 155 + * functions for creating and destroying the built in ion heaps. 156 + * architectures can add their own custom architecture specific 157 + * heaps as appropriate. 158 + */ 159 + 160 + struct ion_heap *ion_heap_create(struct ion_platform_heap *); 161 + void ion_heap_destroy(struct ion_heap *); 162 + 163 + struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); 164 + void ion_system_heap_destroy(struct ion_heap *); 165 + 166 + struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); 167 + void ion_system_contig_heap_destroy(struct ion_heap *); 168 + 169 + struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); 170 + void ion_carveout_heap_destroy(struct ion_heap *); 171 + /** 172 + * kernel api to allocate/free from carveout -- used when carveout is 173 + * used to back an architecture specific custom heap 174 + */ 175 + ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, 176 + unsigned long align); 177 + void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, 178 + unsigned long size); 179 + /** 180 + * The carveout heap returns physical addresses, since 0 may be a valid 181 + * physical address, this is used to indicate allocation failed 182 + */ 183 + #define ION_CARVEOUT_ALLOCATE_FAIL -1 184 + 185 + #endif /* _ION_PRIV_H */
+198
drivers/staging/android/ion/ion_system_heap.c
··· 1 + /* 2 + * drivers/staging/android/ion/ion_system_heap.c 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #include <linux/err.h> 18 + #include <linux/mm.h> 19 + #include <linux/scatterlist.h> 20 + #include <linux/slab.h> 21 + #include <linux/vmalloc.h> 22 + #include "ion.h" 23 + #include "ion_priv.h" 24 + 25 + static int ion_system_heap_allocate(struct ion_heap *heap, 26 + struct ion_buffer *buffer, 27 + unsigned long size, unsigned long align, 28 + unsigned long flags) 29 + { 30 + buffer->priv_virt = vmalloc_user(size); 31 + if (!buffer->priv_virt) 32 + return -ENOMEM; 33 + return 0; 34 + } 35 + 36 + void ion_system_heap_free(struct ion_buffer *buffer) 37 + { 38 + vfree(buffer->priv_virt); 39 + } 40 + 41 + struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, 42 + struct ion_buffer *buffer) 43 + { 44 + struct scatterlist *sglist; 45 + struct page *page; 46 + int i; 47 + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; 48 + void *vaddr = buffer->priv_virt; 49 + 50 + sglist = vmalloc(npages * sizeof(struct scatterlist)); 51 + if (!sglist) 52 + return ERR_PTR(-ENOMEM); 53 + memset(sglist, 0, npages * sizeof(struct scatterlist)); 54 + sg_init_table(sglist, npages); 55 + for (i = 0; i < npages; i++) { 56 + page = vmalloc_to_page(vaddr); 57 + if (!page) 58 + goto end; 59 + sg_set_page(&sglist[i], page, PAGE_SIZE, 0); 60 + vaddr += PAGE_SIZE; 61 + } 62 + /* XXX do cache maintenance for dma? */ 63 + return sglist; 64 + end: 65 + vfree(sglist); 66 + return NULL; 67 + } 68 + 69 + void ion_system_heap_unmap_dma(struct ion_heap *heap, 70 + struct ion_buffer *buffer) 71 + { 72 + /* XXX undo cache maintenance for dma? */ 73 + if (buffer->sglist) 74 + vfree(buffer->sglist); 75 + } 76 + 77 + void *ion_system_heap_map_kernel(struct ion_heap *heap, 78 + struct ion_buffer *buffer) 79 + { 80 + return buffer->priv_virt; 81 + } 82 + 83 + void ion_system_heap_unmap_kernel(struct ion_heap *heap, 84 + struct ion_buffer *buffer) 85 + { 86 + } 87 + 88 + int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, 89 + struct vm_area_struct *vma) 90 + { 91 + return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); 92 + } 93 + 94 + static struct ion_heap_ops vmalloc_ops = { 95 + .allocate = ion_system_heap_allocate, 96 + .free = ion_system_heap_free, 97 + .map_dma = ion_system_heap_map_dma, 98 + .unmap_dma = ion_system_heap_unmap_dma, 99 + .map_kernel = ion_system_heap_map_kernel, 100 + .unmap_kernel = ion_system_heap_unmap_kernel, 101 + .map_user = ion_system_heap_map_user, 102 + }; 103 + 104 + struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) 105 + { 106 + struct ion_heap *heap; 107 + 108 + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 109 + if (!heap) 110 + return ERR_PTR(-ENOMEM); 111 + heap->ops = &vmalloc_ops; 112 + heap->type = ION_HEAP_TYPE_SYSTEM; 113 + return heap; 114 + } 115 + 116 + void ion_system_heap_destroy(struct ion_heap *heap) 117 + { 118 + kfree(heap); 119 + } 120 + 121 + static int ion_system_contig_heap_allocate(struct ion_heap *heap, 122 + struct ion_buffer *buffer, 123 + unsigned long len, 124 + unsigned long align, 125 + unsigned long flags) 126 + { 127 + buffer->priv_virt = kzalloc(len, GFP_KERNEL); 128 + if (!buffer->priv_virt) 129 + return -ENOMEM; 130 + return 0; 131 + } 132 + 133 + void ion_system_contig_heap_free(struct ion_buffer *buffer) 134 + { 135 + kfree(buffer->priv_virt); 136 + } 137 + 138 + static int ion_system_contig_heap_phys(struct ion_heap *heap, 139 + struct ion_buffer *buffer, 140 + ion_phys_addr_t *addr, size_t *len) 141 + { 142 + *addr = virt_to_phys(buffer->priv_virt); 143 + *len = buffer->size; 144 + return 0; 145 + } 146 + 147 + struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, 148 + struct ion_buffer *buffer) 149 + { 150 + struct scatterlist *sglist; 151 + 152 + sglist = vmalloc(sizeof(struct scatterlist)); 153 + if (!sglist) 154 + return ERR_PTR(-ENOMEM); 155 + sg_init_table(sglist, 1); 156 + sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); 157 + return sglist; 158 + } 159 + 160 + int ion_system_contig_heap_map_user(struct ion_heap *heap, 161 + struct ion_buffer *buffer, 162 + struct vm_area_struct *vma) 163 + { 164 + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); 165 + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 166 + vma->vm_end - vma->vm_start, 167 + vma->vm_page_prot); 168 + 169 + } 170 + 171 + static struct ion_heap_ops kmalloc_ops = { 172 + .allocate = ion_system_contig_heap_allocate, 173 + .free = ion_system_contig_heap_free, 174 + .phys = ion_system_contig_heap_phys, 175 + .map_dma = ion_system_contig_heap_map_dma, 176 + .unmap_dma = ion_system_heap_unmap_dma, 177 + .map_kernel = ion_system_heap_map_kernel, 178 + .unmap_kernel = ion_system_heap_unmap_kernel, 179 + .map_user = ion_system_contig_heap_map_user, 180 + }; 181 + 182 + struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) 183 + { 184 + struct ion_heap *heap; 185 + 186 + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); 187 + if (!heap) 188 + return ERR_PTR(-ENOMEM); 189 + heap->ops = &kmalloc_ops; 190 + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; 191 + return heap; 192 + } 193 + 194 + void ion_system_contig_heap_destroy(struct ion_heap *heap) 195 + { 196 + kfree(heap); 197 + } 198 +
+114
drivers/staging/android/ion/ion_system_mapper.c
··· 1 + /* 2 + * drivers/staging/android/ion/ion_system_mapper.c 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #include <linux/err.h> 18 + #include <linux/memory.h> 19 + #include <linux/mm.h> 20 + #include <linux/slab.h> 21 + #include <linux/vmalloc.h> 22 + #include "ion.h" 23 + #include "ion_priv.h" 24 + /* 25 + * This mapper is valid for any heap that allocates memory that already has 26 + * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, 27 + * pages obtained via io_remap, etc. 28 + */ 29 + static void *ion_kernel_mapper_map(struct ion_mapper *mapper, 30 + struct ion_buffer *buffer, 31 + struct ion_mapping **mapping) 32 + { 33 + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { 34 + pr_err("%s: attempting to map an unsupported heap\n", __func__); 35 + return ERR_PTR(-EINVAL); 36 + } 37 + /* XXX REVISIT ME!!! */ 38 + *((unsigned long *)mapping) = (unsigned long)buffer->priv; 39 + return buffer->priv; 40 + } 41 + 42 + static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, 43 + struct ion_buffer *buffer, 44 + struct ion_mapping *mapping) 45 + { 46 + if (!((1 << buffer->heap->type) & mapper->heap_mask)) 47 + pr_err("%s: attempting to unmap an unsupported heap\n", 48 + __func__); 49 + } 50 + 51 + static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, 52 + struct ion_buffer *buffer, 53 + struct ion_mapping *mapping) 54 + { 55 + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { 56 + pr_err("%s: attempting to unmap an unsupported heap\n", 57 + __func__); 58 + return ERR_PTR(-EINVAL); 59 + } 60 + return buffer->priv; 61 + } 62 + 63 + static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, 64 + struct ion_buffer *buffer, 65 + struct vm_area_struct *vma, 66 + struct ion_mapping *mapping) 67 + { 68 + int ret; 69 + 70 + switch (buffer->heap->type) { 71 + case ION_HEAP_KMALLOC: 72 + { 73 + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); 74 + ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 75 + vma->vm_end - vma->vm_start, 76 + vma->vm_page_prot); 77 + break; 78 + } 79 + case ION_HEAP_VMALLOC: 80 + ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); 81 + break; 82 + default: 83 + pr_err("%s: attempting to map unsupported heap to userspace\n", 84 + __func__); 85 + return -EINVAL; 86 + } 87 + 88 + return ret; 89 + } 90 + 91 + static struct ion_mapper_ops ops = { 92 + .map = ion_kernel_mapper_map, 93 + .map_kernel = ion_kernel_mapper_map_kernel, 94 + .map_user = ion_kernel_mapper_map_user, 95 + .unmap = ion_kernel_mapper_unmap, 96 + }; 97 + 98 + struct ion_mapper *ion_system_mapper_create(void) 99 + { 100 + struct ion_mapper *mapper; 101 + mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); 102 + if (!mapper) 103 + return ERR_PTR(-ENOMEM); 104 + mapper->type = ION_SYSTEM_MAPPER; 105 + mapper->ops = &ops; 106 + mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); 107 + return mapper; 108 + } 109 + 110 + void ion_system_mapper_destroy(struct ion_mapper *mapper) 111 + { 112 + kfree(mapper); 113 + } 114 +
+1
drivers/staging/android/ion/tegra/Makefile
··· 1 + obj-y += tegra_ion.o
+96
drivers/staging/android/ion/tegra/tegra_ion.c
··· 1 + /* 2 + * drivers/gpu/tegra/tegra_ion.c 3 + * 4 + * Copyright (C) 2011 Google, Inc. 5 + * 6 + * This software is licensed under the terms of the GNU General Public 7 + * License version 2, as published by the Free Software Foundation, and 8 + * may be copied, distributed, and modified under those terms. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + */ 16 + 17 + #include <linux/err.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/slab.h> 20 + #include "../ion.h" 21 + #include "../ion_priv.h" 22 + 23 + struct ion_device *idev; 24 + struct ion_mapper *tegra_user_mapper; 25 + int num_heaps; 26 + struct ion_heap **heaps; 27 + 28 + int tegra_ion_probe(struct platform_device *pdev) 29 + { 30 + struct ion_platform_data *pdata = pdev->dev.platform_data; 31 + int err; 32 + int i; 33 + 34 + num_heaps = pdata->nr; 35 + 36 + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); 37 + 38 + idev = ion_device_create(NULL); 39 + if (IS_ERR_OR_NULL(idev)) { 40 + kfree(heaps); 41 + return PTR_ERR(idev); 42 + } 43 + 44 + /* create the heaps as specified in the board file */ 45 + for (i = 0; i < num_heaps; i++) { 46 + struct ion_platform_heap *heap_data = &pdata->heaps[i]; 47 + 48 + heaps[i] = ion_heap_create(heap_data); 49 + if (IS_ERR_OR_NULL(heaps[i])) { 50 + err = PTR_ERR(heaps[i]); 51 + goto err; 52 + } 53 + ion_device_add_heap(idev, heaps[i]); 54 + } 55 + platform_set_drvdata(pdev, idev); 56 + return 0; 57 + err: 58 + for (i = 0; i < num_heaps; i++) { 59 + if (heaps[i]) 60 + ion_heap_destroy(heaps[i]); 61 + } 62 + kfree(heaps); 63 + return err; 64 + } 65 + 66 + int tegra_ion_remove(struct platform_device *pdev) 67 + { 68 + struct ion_device *idev = platform_get_drvdata(pdev); 69 + int i; 70 + 71 + ion_device_destroy(idev); 72 + for (i = 0; i < num_heaps; i++) 73 + ion_heap_destroy(heaps[i]); 74 + kfree(heaps); 75 + return 0; 76 + } 77 + 78 + static struct platform_driver ion_driver = { 79 + .probe = tegra_ion_probe, 80 + .remove = tegra_ion_remove, 81 + .driver = { .name = "ion-tegra" } 82 + }; 83 + 84 + static int __init ion_init(void) 85 + { 86 + return platform_driver_register(&ion_driver); 87 + } 88 + 89 + static void __exit ion_exit(void) 90 + { 91 + platform_driver_unregister(&ion_driver); 92 + } 93 + 94 + module_init(ion_init); 95 + module_exit(ion_exit); 96 +