Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

soc: qcom: Switch to EXPORT_SYMBOL_GPL()

Switch to GPL version of EXPORT_SYMBOL for Qualcomm SoC drivers.

Signed-off-by: Unnathi Chalicheemala <quic_uchalich@quicinc.com>
Reviewed-by: Trilok Soni <quic_tsoni@quicinc.com>
Link: https://lore.kernel.org/r/20230922184817.5183-1-quic_uchalich@quicinc.com
Signed-off-by: Bjorn Andersson <andersson@kernel.org>

authored by

Unnathi Chalicheemala and committed by
Bjorn Andersson
9b09c0f2 433ce46a

+55 -55
+4 -4
drivers/soc/qcom/cmd-db.c
··· 133 133 134 134 return 0; 135 135 } 136 - EXPORT_SYMBOL(cmd_db_ready); 136 + EXPORT_SYMBOL_GPL(cmd_db_ready); 137 137 138 138 static int cmd_db_get_header(const char *id, const struct entry_header **eh, 139 139 const struct rsc_hdr **rh) ··· 193 193 194 194 return ret < 0 ? 0 : le32_to_cpu(ent->addr); 195 195 } 196 - EXPORT_SYMBOL(cmd_db_read_addr); 196 + EXPORT_SYMBOL_GPL(cmd_db_read_addr); 197 197 198 198 /** 199 199 * cmd_db_read_aux_data() - Query command db for aux data. ··· 218 218 219 219 return rsc_offset(rsc_hdr, ent); 220 220 } 221 - EXPORT_SYMBOL(cmd_db_read_aux_data); 221 + EXPORT_SYMBOL_GPL(cmd_db_read_aux_data); 222 222 223 223 /** 224 224 * cmd_db_read_slave_id - Get the slave ID for a given resource address ··· 240 240 addr = le32_to_cpu(ent->addr); 241 241 return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK; 242 242 } 243 - EXPORT_SYMBOL(cmd_db_read_slave_id); 243 + EXPORT_SYMBOL_GPL(cmd_db_read_slave_id); 244 244 245 245 #ifdef CONFIG_DEBUG_FS 246 246 static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
+2 -2
drivers/soc/qcom/kryo-l2-accessors.c
··· 32 32 isb(); 33 33 raw_spin_unlock_irqrestore(&l2_access_lock, flags); 34 34 } 35 - EXPORT_SYMBOL(kryo_l2_set_indirect_reg); 35 + EXPORT_SYMBOL_GPL(kryo_l2_set_indirect_reg); 36 36 37 37 /** 38 38 * kryo_l2_get_indirect_reg() - read an L2 register value ··· 54 54 55 55 return val; 56 56 } 57 - EXPORT_SYMBOL(kryo_l2_get_indirect_reg); 57 + EXPORT_SYMBOL_GPL(kryo_l2_get_indirect_reg);
+3 -3
drivers/soc/qcom/ocmem.c
··· 211 211 } 212 212 return ocmem; 213 213 } 214 - EXPORT_SYMBOL(of_get_ocmem); 214 + EXPORT_SYMBOL_GPL(of_get_ocmem); 215 215 216 216 struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, 217 217 unsigned long size) ··· 267 267 268 268 return ERR_PTR(ret); 269 269 } 270 - EXPORT_SYMBOL(ocmem_allocate); 270 + EXPORT_SYMBOL_GPL(ocmem_allocate); 271 271 272 272 void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, 273 273 struct ocmem_buf *buf) ··· 294 294 295 295 clear_bit_unlock(BIT(client), &ocmem->active_allocations); 296 296 } 297 - EXPORT_SYMBOL(ocmem_free); 297 + EXPORT_SYMBOL_GPL(ocmem_free); 298 298 299 299 static int ocmem_dev_probe(struct platform_device *pdev) 300 300 {
+4 -4
drivers/soc/qcom/pdr_interface.c
··· 554 554 kfree(pds); 555 555 return ERR_PTR(ret); 556 556 } 557 - EXPORT_SYMBOL(pdr_add_lookup); 557 + EXPORT_SYMBOL_GPL(pdr_add_lookup); 558 558 559 559 /** 560 560 * pdr_restart_pd() - restart PD ··· 634 634 635 635 return 0; 636 636 } 637 - EXPORT_SYMBOL(pdr_restart_pd); 637 + EXPORT_SYMBOL_GPL(pdr_restart_pd); 638 638 639 639 /** 640 640 * pdr_handle_alloc() - initialize the PDR client handle ··· 715 715 716 716 return ERR_PTR(ret); 717 717 } 718 - EXPORT_SYMBOL(pdr_handle_alloc); 718 + EXPORT_SYMBOL_GPL(pdr_handle_alloc); 719 719 720 720 /** 721 721 * pdr_handle_release() - release the PDR client handle ··· 749 749 750 750 kfree(pdr); 751 751 } 752 - EXPORT_SYMBOL(pdr_handle_release); 752 + EXPORT_SYMBOL_GPL(pdr_handle_release); 753 753 754 754 MODULE_LICENSE("GPL v2"); 755 755 MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
+19 -19
drivers/soc/qcom/qcom-geni-se.c
··· 199 199 200 200 return readl_relaxed(wrapper->base + QUP_HW_VER_REG); 201 201 } 202 - EXPORT_SYMBOL(geni_se_get_qup_hw_version); 202 + EXPORT_SYMBOL_GPL(geni_se_get_qup_hw_version); 203 203 204 204 static void geni_se_io_set_mode(void __iomem *base) 205 205 { ··· 272 272 val |= S_COMMON_GENI_S_IRQ_EN; 273 273 writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN); 274 274 } 275 - EXPORT_SYMBOL(geni_se_init); 275 + EXPORT_SYMBOL_GPL(geni_se_init); 276 276 277 277 static void geni_se_select_fifo_mode(struct geni_se *se) 278 278 { ··· 364 364 break; 365 365 } 366 366 } 367 - EXPORT_SYMBOL(geni_se_select_mode); 367 + EXPORT_SYMBOL_GPL(geni_se_select_mode); 368 368 369 369 /** 370 370 * DOC: Overview ··· 481 481 if (pack_words || bpw == 32) 482 482 writel_relaxed(bpw / 16, se->base + SE_GENI_BYTE_GRAN); 483 483 } 484 - EXPORT_SYMBOL(geni_se_config_packing); 484 + EXPORT_SYMBOL_GPL(geni_se_config_packing); 485 485 486 486 static void geni_se_clks_off(struct geni_se *se) 487 487 { ··· 512 512 geni_se_clks_off(se); 513 513 return 0; 514 514 } 515 - EXPORT_SYMBOL(geni_se_resources_off); 515 + EXPORT_SYMBOL_GPL(geni_se_resources_off); 516 516 517 517 static int geni_se_clks_on(struct geni_se *se) 518 518 { ··· 553 553 554 554 return ret; 555 555 } 556 - EXPORT_SYMBOL(geni_se_resources_on); 556 + EXPORT_SYMBOL_GPL(geni_se_resources_on); 557 557 558 558 /** 559 559 * geni_se_clk_tbl_get() - Get the clock table to program DFS ··· 594 594 *tbl = se->clk_perf_tbl; 595 595 return se->num_clk_levels; 596 596 } 597 - EXPORT_SYMBOL(geni_se_clk_tbl_get); 597 + EXPORT_SYMBOL_GPL(geni_se_clk_tbl_get); 598 598 599 599 /** 600 600 * geni_se_clk_freq_match() - Get the matching or closest SE clock frequency ··· 656 656 657 657 return 0; 658 658 } 659 - EXPORT_SYMBOL(geni_se_clk_freq_match); 659 + EXPORT_SYMBOL_GPL(geni_se_clk_freq_match); 660 660 661 661 #define GENI_SE_DMA_DONE_EN BIT(0) 662 662 #define GENI_SE_DMA_EOT_EN BIT(1) ··· 684 684 writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR); 685 685 writel(len, se->base + SE_DMA_TX_LEN); 686 686 } 687 - EXPORT_SYMBOL(geni_se_tx_init_dma); 687 + EXPORT_SYMBOL_GPL(geni_se_tx_init_dma); 688 688 689 689 /** 690 690 * geni_se_tx_dma_prep() - Prepare the serial engine for TX DMA transfer ··· 712 712 geni_se_tx_init_dma(se, *iova, len); 713 713 return 0; 714 714 } 715 - EXPORT_SYMBOL(geni_se_tx_dma_prep); 715 + EXPORT_SYMBOL_GPL(geni_se_tx_dma_prep); 716 716 717 717 /** 718 718 * geni_se_rx_init_dma() - Initiate RX DMA transfer on the serial engine ··· 736 736 writel_relaxed(0, se->base + SE_DMA_RX_ATTR); 737 737 writel(len, se->base + SE_DMA_RX_LEN); 738 738 } 739 - EXPORT_SYMBOL(geni_se_rx_init_dma); 739 + EXPORT_SYMBOL_GPL(geni_se_rx_init_dma); 740 740 741 741 /** 742 742 * geni_se_rx_dma_prep() - Prepare the serial engine for RX DMA transfer ··· 764 764 geni_se_rx_init_dma(se, *iova, len); 765 765 return 0; 766 766 } 767 - EXPORT_SYMBOL(geni_se_rx_dma_prep); 767 + EXPORT_SYMBOL_GPL(geni_se_rx_dma_prep); 768 768 769 769 /** 770 770 * geni_se_tx_dma_unprep() - Unprepare the serial engine after TX DMA transfer ··· 781 781 if (!dma_mapping_error(wrapper->dev, iova)) 782 782 dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE); 783 783 } 784 - EXPORT_SYMBOL(geni_se_tx_dma_unprep); 784 + EXPORT_SYMBOL_GPL(geni_se_tx_dma_unprep); 785 785 786 786 /** 787 787 * geni_se_rx_dma_unprep() - Unprepare the serial engine after RX DMA transfer ··· 798 798 if (!dma_mapping_error(wrapper->dev, iova)) 799 799 dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE); 800 800 } 801 - EXPORT_SYMBOL(geni_se_rx_dma_unprep); 801 + EXPORT_SYMBOL_GPL(geni_se_rx_dma_unprep); 802 802 803 803 int geni_icc_get(struct geni_se *se, const char *icc_ddr) 804 804 { ··· 827 827 return err; 828 828 829 829 } 830 - EXPORT_SYMBOL(geni_icc_get); 830 + EXPORT_SYMBOL_GPL(geni_icc_get); 831 831 832 832 int geni_icc_set_bw(struct geni_se *se) 833 833 { ··· 845 845 846 846 return 0; 847 847 } 848 - EXPORT_SYMBOL(geni_icc_set_bw); 848 + EXPORT_SYMBOL_GPL(geni_icc_set_bw); 849 849 850 850 void geni_icc_set_tag(struct geni_se *se, u32 tag) 851 851 { ··· 854 854 for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) 855 855 icc_set_tag(se->icc_paths[i].path, tag); 856 856 } 857 - EXPORT_SYMBOL(geni_icc_set_tag); 857 + EXPORT_SYMBOL_GPL(geni_icc_set_tag); 858 858 859 859 /* To do: Replace this by icc_bulk_enable once it's implemented in ICC core */ 860 860 int geni_icc_enable(struct geni_se *se) ··· 872 872 873 873 return 0; 874 874 } 875 - EXPORT_SYMBOL(geni_icc_enable); 875 + EXPORT_SYMBOL_GPL(geni_icc_enable); 876 876 877 877 int geni_icc_disable(struct geni_se *se) 878 878 { ··· 889 889 890 890 return 0; 891 891 } 892 - EXPORT_SYMBOL(geni_icc_disable); 892 + EXPORT_SYMBOL_GPL(geni_icc_disable); 893 893 894 894 static int geni_se_probe(struct platform_device *pdev) 895 895 {
+3 -3
drivers/soc/qcom/qcom_aoss.c
··· 260 260 261 261 return ret; 262 262 } 263 - EXPORT_SYMBOL(qmp_send); 263 + EXPORT_SYMBOL_GPL(qmp_send); 264 264 265 265 static int qmp_qdss_clk_prepare(struct clk_hw *hw) 266 266 { ··· 458 458 } 459 459 return qmp; 460 460 } 461 - EXPORT_SYMBOL(qmp_get); 461 + EXPORT_SYMBOL_GPL(qmp_get); 462 462 463 463 /** 464 464 * qmp_put() - release a qmp handle ··· 473 473 if (!IS_ERR_OR_NULL(qmp)) 474 474 put_device(qmp->dev); 475 475 } 476 - EXPORT_SYMBOL(qmp_put); 476 + EXPORT_SYMBOL_GPL(qmp_put); 477 477 478 478 static int qmp_probe(struct platform_device *pdev) 479 479 {
+3 -3
drivers/soc/qcom/qmi_encdec.c
··· 754 754 755 755 return msg; 756 756 } 757 - EXPORT_SYMBOL(qmi_encode_message); 757 + EXPORT_SYMBOL_GPL(qmi_encode_message); 758 758 759 759 /** 760 760 * qmi_decode_message() - Decode QMI encoded message to C structure ··· 778 778 return qmi_decode(ei, c_struct, buf + sizeof(struct qmi_header), 779 779 len - sizeof(struct qmi_header), 1); 780 780 } 781 - EXPORT_SYMBOL(qmi_decode_message); 781 + EXPORT_SYMBOL_GPL(qmi_decode_message); 782 782 783 783 /* Common header in all QMI responses */ 784 784 const struct qmi_elem_info qmi_response_type_v01_ei[] = { ··· 810 810 .ei_array = NULL, 811 811 }, 812 812 }; 813 - EXPORT_SYMBOL(qmi_response_type_v01_ei); 813 + EXPORT_SYMBOL_GPL(qmi_response_type_v01_ei); 814 814 815 815 MODULE_DESCRIPTION("QMI encoder/decoder helper"); 816 816 MODULE_LICENSE("GPL v2");
+10 -10
drivers/soc/qcom/qmi_interface.c
··· 223 223 224 224 return 0; 225 225 } 226 - EXPORT_SYMBOL(qmi_add_lookup); 226 + EXPORT_SYMBOL_GPL(qmi_add_lookup); 227 227 228 228 static void qmi_send_new_server(struct qmi_handle *qmi, struct qmi_service *svc) 229 229 { ··· 287 287 288 288 return 0; 289 289 } 290 - EXPORT_SYMBOL(qmi_add_server); 290 + EXPORT_SYMBOL_GPL(qmi_add_server); 291 291 292 292 /** 293 293 * qmi_txn_init() - allocate transaction id within the given QMI handle ··· 328 328 329 329 return ret; 330 330 } 331 - EXPORT_SYMBOL(qmi_txn_init); 331 + EXPORT_SYMBOL_GPL(qmi_txn_init); 332 332 333 333 /** 334 334 * qmi_txn_wait() - wait for a response on a transaction ··· 359 359 else 360 360 return txn->result; 361 361 } 362 - EXPORT_SYMBOL(qmi_txn_wait); 362 + EXPORT_SYMBOL_GPL(qmi_txn_wait); 363 363 364 364 /** 365 365 * qmi_txn_cancel() - cancel an ongoing transaction ··· 375 375 mutex_unlock(&txn->lock); 376 376 mutex_unlock(&qmi->txn_lock); 377 377 } 378 - EXPORT_SYMBOL(qmi_txn_cancel); 378 + EXPORT_SYMBOL_GPL(qmi_txn_cancel); 379 379 380 380 /** 381 381 * qmi_invoke_handler() - find and invoke a handler for a message ··· 676 676 677 677 return ret; 678 678 } 679 - EXPORT_SYMBOL(qmi_handle_init); 679 + EXPORT_SYMBOL_GPL(qmi_handle_init); 680 680 681 681 /** 682 682 * qmi_handle_release() - release the QMI client handle ··· 717 717 kfree(svc); 718 718 } 719 719 } 720 - EXPORT_SYMBOL(qmi_handle_release); 720 + EXPORT_SYMBOL_GPL(qmi_handle_release); 721 721 722 722 /** 723 723 * qmi_send_message() - send a QMI message ··· 796 796 return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei, 797 797 c_struct); 798 798 } 799 - EXPORT_SYMBOL(qmi_send_request); 799 + EXPORT_SYMBOL_GPL(qmi_send_request); 800 800 801 801 /** 802 802 * qmi_send_response() - send a response QMI message ··· 817 817 return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei, 818 818 c_struct); 819 819 } 820 - EXPORT_SYMBOL(qmi_send_response); 820 + EXPORT_SYMBOL_GPL(qmi_send_response); 821 821 822 822 /** 823 823 * qmi_send_indication() - send an indication QMI message ··· 851 851 852 852 return rval; 853 853 } 854 - EXPORT_SYMBOL(qmi_send_indication); 854 + EXPORT_SYMBOL_GPL(qmi_send_indication);
+4 -4
drivers/soc/qcom/rpmh.c
··· 239 239 240 240 return __rpmh_write(dev, state, rpm_msg); 241 241 } 242 - EXPORT_SYMBOL(rpmh_write_async); 242 + EXPORT_SYMBOL_GPL(rpmh_write_async); 243 243 244 244 /** 245 245 * rpmh_write: Write a set of RPMH commands and block until response ··· 270 270 WARN_ON(!ret); 271 271 return (ret > 0) ? 0 : -ETIMEDOUT; 272 272 } 273 - EXPORT_SYMBOL(rpmh_write); 273 + EXPORT_SYMBOL_GPL(rpmh_write); 274 274 275 275 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) 276 276 { ··· 395 395 396 396 return ret; 397 397 } 398 - EXPORT_SYMBOL(rpmh_write_batch); 398 + EXPORT_SYMBOL_GPL(rpmh_write_batch); 399 399 400 400 static int is_req_valid(struct cache_req *req) 401 401 { ··· 500 500 ctrlr->dirty = true; 501 501 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 502 502 } 503 - EXPORT_SYMBOL(rpmh_invalidate); 503 + EXPORT_SYMBOL_GPL(rpmh_invalidate);
+1 -1
drivers/soc/qcom/smd-rpm.c
··· 142 142 mutex_unlock(&rpm->lock); 143 143 return ret; 144 144 } 145 - EXPORT_SYMBOL(qcom_rpm_smd_write); 145 + EXPORT_SYMBOL_GPL(qcom_rpm_smd_write); 146 146 147 147 static int qcom_smd_rpm_callback(struct rpmsg_device *rpdev, 148 148 void *data,
+1 -1
drivers/soc/qcom/smem.c
··· 368 368 { 369 369 return !!__smem; 370 370 } 371 - EXPORT_SYMBOL(qcom_smem_is_available); 371 + EXPORT_SYMBOL_GPL(qcom_smem_is_available); 372 372 373 373 static int qcom_smem_alloc_private(struct qcom_smem *smem, 374 374 struct smem_partition *part,
+1 -1
drivers/soc/qcom/wcnss_ctrl.c
··· 287 287 288 288 return rpmsg_create_ept(_wcnss->channel->rpdev, cb, priv, chinfo); 289 289 } 290 - EXPORT_SYMBOL(qcom_wcnss_open_channel); 290 + EXPORT_SYMBOL_GPL(qcom_wcnss_open_channel); 291 291 292 292 static void wcnss_async_probe(struct work_struct *work) 293 293 {