Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

soc: qcom: Make qcom_smem_get() return a pointer

Passing a void ** almost always requires a cast at the call site.
Instead of littering the code with casts every time this function
is called, have qcom_smem_get() return a void pointer to the
location of the smem item. This frees the caller from having to
cast the pointer.

Cc: Bjorn Andersson <bjorn.andersson@sonymobile.com>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
Reviewed-by: Bjorn Andersson <bjorn.andersson@sonymobile.com>
Signed-off-by: Andy Gross <agross@codeaurora.org>

authored by

Stephen Boyd and committed by
Andy Gross
1a03964d 7d0c8bee

+48 -56
+15 -15
drivers/soc/qcom/smd.c
··· 989 989 spin_lock_init(&channel->recv_lock); 990 990 init_waitqueue_head(&channel->fblockread_event); 991 991 992 - ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info, 993 - &info_size); 994 - if (ret) 992 + info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); 993 + if (IS_ERR(info)) { 994 + ret = PTR_ERR(info); 995 995 goto free_name_and_channel; 996 + } 996 997 997 998 /* 998 999 * Use the size of the item to figure out which channel info struct to ··· 1012 1011 goto free_name_and_channel; 1013 1012 } 1014 1013 1015 - ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base, 1016 - &fifo_size); 1017 - if (ret) 1014 + fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); 1015 + if (IS_ERR(fifo_base)) { 1016 + ret = PTR_ERR(fifo_base); 1018 1017 goto free_name_and_channel; 1018 + } 1019 1019 1020 1020 /* The channel consist of a rx and tx fifo of equal size */ 1021 1021 fifo_size /= 2; ··· 1053 1051 unsigned long flags; 1054 1052 unsigned fifo_id; 1055 1053 unsigned info_id; 1056 - int ret; 1057 1054 int tbl; 1058 1055 int i; 1059 1056 1060 1057 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { 1061 - ret = qcom_smem_get(edge->remote_pid, 1062 - smem_items[tbl].alloc_tbl_id, 1063 - (void **)&alloc_tbl, 1064 - NULL); 1065 - if (ret < 0) 1058 + alloc_tbl = qcom_smem_get(edge->remote_pid, 1059 + smem_items[tbl].alloc_tbl_id, NULL); 1060 + if (IS_ERR(alloc_tbl)) 1066 1061 continue; 1067 1062 1068 1063 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { ··· 1237 1238 int num_edges; 1238 1239 int ret; 1239 1240 int i = 0; 1241 + void *p; 1240 1242 1241 1243 /* Wait for smem */ 1242 - ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL); 1243 - if (ret == -EPROBE_DEFER) 1244 - return ret; 1244 + p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); 1245 + if (PTR_ERR(p) == -EPROBE_DEFER) 1246 + return PTR_ERR(p); 1245 1247 1246 1248 num_edges = of_get_available_child_count(pdev->dev.of_node); 1247 1249 array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
+32 -40
drivers/soc/qcom/smem.c
··· 378 378 } 379 379 EXPORT_SYMBOL(qcom_smem_alloc); 380 380 381 - static int qcom_smem_get_global(struct qcom_smem *smem, 382 - unsigned item, 383 - void **ptr, 384 - size_t *size) 381 + static void *qcom_smem_get_global(struct qcom_smem *smem, 382 + unsigned item, 383 + size_t *size) 385 384 { 386 385 struct smem_header *header; 387 386 struct smem_region *area; ··· 389 390 unsigned i; 390 391 391 392 if (WARN_ON(item >= SMEM_ITEM_COUNT)) 392 - return -EINVAL; 393 + return ERR_PTR(-EINVAL); 393 394 394 395 header = smem->regions[0].virt_base; 395 396 entry = &header->toc[item]; 396 397 if (!entry->allocated) 397 - return -ENXIO; 398 + return ERR_PTR(-ENXIO); 398 399 399 - if (ptr != NULL) { 400 - aux_base = entry->aux_base & AUX_BASE_MASK; 400 + aux_base = entry->aux_base & AUX_BASE_MASK; 401 401 402 - for (i = 0; i < smem->num_regions; i++) { 403 - area = &smem->regions[i]; 402 + for (i = 0; i < smem->num_regions; i++) { 403 + area = &smem->regions[i]; 404 404 405 - if (area->aux_base == aux_base || !aux_base) { 406 - *ptr = area->virt_base + entry->offset; 407 - break; 408 - } 405 + if (area->aux_base == aux_base || !aux_base) { 406 + if (size != NULL) 407 + *size = entry->size; 408 + return area->virt_base + entry->offset; 409 409 } 410 410 } 411 - if (size != NULL) 412 - *size = entry->size; 413 411 414 - return 0; 412 + return ERR_PTR(-ENOENT); 415 413 } 416 414 417 - static int qcom_smem_get_private(struct qcom_smem *smem, 418 - unsigned host, 419 - unsigned item, 420 - void **ptr, 421 - size_t *size) 415 + static void *qcom_smem_get_private(struct qcom_smem *smem, 416 + unsigned host, 417 + unsigned item, 418 + size_t *size) 422 419 { 423 420 struct smem_partition_header *phdr; 424 421 struct smem_private_entry *hdr; ··· 430 435 dev_err(smem->dev, 431 436 "Found invalid canary in host %d partition\n", 432 437 host); 433 - return -EINVAL; 438 + return ERR_PTR(-EINVAL); 434 439 } 435 440 436 441 if (hdr->item == item) { 437 - if (ptr != NULL) 438 - *ptr = p + sizeof(*hdr) + hdr->padding_hdr; 439 - 440 442 if (size != NULL) 441 443 *size = hdr->size - hdr->padding_data; 442 444 443 - return 0; 445 + return p + sizeof(*hdr) + hdr->padding_hdr; 444 446 } 445 447 446 448 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 447 449 } 448 450 449 - return -ENOENT; 451 + return ERR_PTR(-ENOENT); 450 452 } 451 453 452 454 /** 453 455 * qcom_smem_get() - resolve ptr of size of a smem item 454 456 * @host: the remote processor, or -1 455 457 * @item: smem item handle 456 - * @ptr: pointer to be filled out with address of the item 457 458 * @size: pointer to be filled out with size of the item 458 459 * 459 - * Looks up pointer and size of a smem item. 460 + * Looks up smem item and returns pointer to it. Size of smem 461 + * item is returned in @size. 460 462 */ 461 - int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size) 463 + void *qcom_smem_get(unsigned host, unsigned item, size_t *size) 462 464 { 463 465 unsigned long flags; 464 466 int ret; 467 + void *ptr = ERR_PTR(-EPROBE_DEFER); 465 468 466 469 if (!__smem) 467 - return -EPROBE_DEFER; 470 + return ptr; 468 471 469 472 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 470 473 HWSPINLOCK_TIMEOUT, 471 474 &flags); 472 475 if (ret) 473 - return ret; 476 + return ERR_PTR(ret); 474 477 475 478 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) 476 - ret = qcom_smem_get_private(__smem, host, item, ptr, size); 479 + ptr = qcom_smem_get_private(__smem, host, item, size); 477 480 else 478 - ret = qcom_smem_get_global(__smem, item, ptr, size); 481 + ptr = qcom_smem_get_global(__smem, item, size); 479 482 480 483 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 481 - return ret; 484 + 485 + return ptr; 482 486 483 487 } 484 488 EXPORT_SYMBOL(qcom_smem_get); ··· 514 520 { 515 521 unsigned *versions; 516 522 size_t size; 517 - int ret; 518 523 519 - ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, 520 - (void **)&versions, &size); 521 - if (ret < 0) { 524 + versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); 525 + if (IS_ERR(versions)) { 522 526 dev_err(smem->dev, "Unable to read the version item\n"); 523 527 return -ENOENT; 524 528 }
+1 -1
include/linux/soc/qcom/smem.h
··· 4 4 #define QCOM_SMEM_HOST_ANY -1 5 5 6 6 int qcom_smem_alloc(unsigned host, unsigned item, size_t size); 7 - int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); 7 + void *qcom_smem_get(unsigned host, unsigned item, size_t *size); 8 8 9 9 int qcom_smem_get_free_space(unsigned host); 10 10