Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'qcom-soc-for-4.3' into v4.2-rc2

Qualcomm ARM Based SoC Updates for 4.3

* Add SMEM driver
* Add SMD driver
* Add RPM over SMD driver
* Select QCOM_SCM by default

+2660
+117
Documentation/devicetree/bindings/soc/qcom,smd-rpm.txt
··· 1 + Qualcomm Resource Power Manager (RPM) over SMD 2 + 3 + This driver is used to interface with the Resource Power Manager (RPM) found in 4 + various Qualcomm platforms. The RPM allows each component in the system to vote 5 + for state of the system resources, such as clocks, regulators and bus 6 + frequencies. 7 + 8 + - compatible: 9 + Usage: required 10 + Value type: <string> 11 + Definition: must be one of: 12 + "qcom,rpm-msm8974" 13 + 14 + - qcom,smd-channels: 15 + Usage: required 16 + Value type: <stringlist> 17 + Definition: Shared Memory channel used for communication with the RPM 18 + 19 + = SUBDEVICES 20 + 21 + The RPM exposes resources to its subnodes. The below bindings specify the set 22 + of valid subnodes that can operate on these resources. 23 + 24 + == Regulators 25 + 26 + Regulator nodes are identified by their compatible: 27 + 28 + - compatible: 29 + Usage: required 30 + Value type: <string> 31 + Definition: must be one of: 32 + "qcom,rpm-pm8841-regulators" 33 + "qcom,rpm-pm8941-regulators" 34 + 35 + - vdd_s1-supply: 36 + - vdd_s2-supply: 37 + - vdd_s3-supply: 38 + - vdd_s4-supply: 39 + - vdd_s5-supply: 40 + - vdd_s6-supply: 41 + - vdd_s7-supply: 42 + - vdd_s8-supply: 43 + Usage: optional (pm8841 only) 44 + Value type: <phandle> 45 + Definition: reference to regulator supplying the input pin, as 46 + described in the data sheet 47 + 48 + - vdd_s1-supply: 49 + - vdd_s2-supply: 50 + - vdd_s3-supply: 51 + - vdd_l1_l3-supply: 52 + - vdd_l2_lvs1_2_3-supply: 53 + - vdd_l4_l11-supply: 54 + - vdd_l5_l7-supply: 55 + - vdd_l6_l12_l14_l15-supply: 56 + - vdd_l8_l16_l18_l19-supply: 57 + - vdd_l9_l10_l17_l22-supply: 58 + - vdd_l13_l20_l23_l24-supply: 59 + - vdd_l21-supply: 60 + - vin_5vs-supply: 61 + Usage: optional (pm8941 only) 62 + Value type: <phandle> 63 + Definition: reference to regulator supplying the input pin, as 64 + described in the data sheet 65 + 66 + The regulator node houses sub-nodes for each regulator within the device. Each 67 + sub-node is identified using the node's name, with valid values listed for each 68 + of the pmics below. 69 + 70 + pm8841: 71 + s1, s2, s3, s4, s5, s6, s7, s8 72 + 73 + pm8941: 74 + s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, 75 + l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2, 76 + lvs3, 5vs1, 5vs2 77 + 78 + The content of each sub-node is defined by the standard binding for regulators - 79 + see regulator.txt. 80 + 81 + = EXAMPLE 82 + 83 + smd { 84 + compatible = "qcom,smd"; 85 + 86 + rpm { 87 + interrupts = <0 168 1>; 88 + qcom,ipc = <&apcs 8 0>; 89 + qcom,smd-edge = <15>; 90 + 91 + rpm_requests { 92 + compatible = "qcom,rpm-msm8974"; 93 + qcom,smd-channels = "rpm_requests"; 94 + 95 + pm8941-regulators { 96 + compatible = "qcom,rpm-pm8941-regulators"; 97 + vdd_l13_l20_l23_l24-supply = <&pm8941_boost>; 98 + 99 + pm8941_s3: s3 { 100 + regulator-min-microvolt = <1800000>; 101 + regulator-max-microvolt = <1800000>; 102 + }; 103 + 104 + pm8941_boost: s4 { 105 + regulator-min-microvolt = <5000000>; 106 + regulator-max-microvolt = <5000000>; 107 + }; 108 + 109 + pm8941_l20: l20 { 110 + regulator-min-microvolt = <2950000>; 111 + regulator-max-microvolt = <2950000>; 112 + }; 113 + }; 114 + }; 115 + }; 116 + }; 117 +
+79
Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
··· 1 + Qualcomm Shared Memory Driver (SMD) binding 2 + 3 + This binding describes the Qualcomm Shared Memory Driver, a fifo based 4 + communication channel for sending data between the various subsystems in 5 + Qualcomm platforms. 6 + 7 + - compatible: 8 + Usage: required 9 + Value type: <stringlist> 10 + Definition: must be "qcom,smd" 11 + 12 + = EDGES 13 + 14 + Each subnode of the SMD node represents a remote subsystem or a remote 15 + processor of some sort - or in SMD language an "edge". The name of the edges 16 + are not important. 17 + The edge is described by the following properties: 18 + 19 + - interrupts: 20 + Usage: required 21 + Value type: <prop-encoded-array> 22 + Definition: should specify the IRQ used by the remote processor to 23 + signal this processor about communication related updates 24 + 25 + - qcom,ipc: 26 + Usage: required 27 + Value type: <prop-encoded-array> 28 + Definition: three entries specifying the outgoing ipc bit used for 29 + signaling the remote processor: 30 + - phandle to a syscon node representing the apcs registers 31 + - u32 representing offset to the register within the syscon 32 + - u32 representing the ipc bit within the register 33 + 34 + - qcom,smd-edge: 35 + Usage: required 36 + Value type: <u32> 37 + Definition: the identifier of the remote processor in the smd channel 38 + allocation table 39 + 40 + = SMD DEVICES 41 + 42 + In turn, subnodes of the "edges" represent devices tied to SMD channels on that 43 + "edge". The names of the devices are not important. The properties of these 44 + nodes are defined by the individual bindings for the SMD devices - but must 45 + contain the following property: 46 + 47 + - qcom,smd-channels: 48 + Usage: required 49 + Value type: <stringlist> 50 + Definition: a list of channels tied to this device, used for matching 51 + the device to channels 52 + 53 + = EXAMPLE 54 + 55 + The following example represents a smd node, with one edge representing the 56 + "rpm" subsystem. For the "rpm" subsystem we have a device tied to the 57 + "rpm_request" channel. 58 + 59 + apcs: syscon@f9011000 { 60 + compatible = "syscon"; 61 + reg = <0xf9011000 0x1000>; 62 + }; 63 + 64 + smd { 65 + compatible = "qcom,smd"; 66 + 67 + rpm { 68 + interrupts = <0 168 1>; 69 + qcom,ipc = <&apcs 8 0>; 70 + qcom,smd-edge = <15>; 71 + 72 + rpm_requests { 73 + compatible = "qcom,rpm-msm8974"; 74 + qcom,smd-channels = "rpm_requests"; 75 + 76 + ... 77 + }; 78 + }; 79 + };
+31
drivers/soc/qcom/Kconfig
··· 13 13 config QCOM_PM 14 14 bool "Qualcomm Power Management" 15 15 depends on ARCH_QCOM && !ARM64 16 + select QCOM_SCM 16 17 help 17 18 QCOM Platform specific power driver to manage cores and L2 low power 18 19 modes. It interface with various system drivers to put the cores in 19 20 low power modes. 21 + 22 + config QCOM_SMD 23 + tristate "Qualcomm Shared Memory Driver (SMD)" 24 + depends on QCOM_SMEM 25 + help 26 + Say y here to enable support for the Qualcomm Shared Memory Driver 27 + providing communication channels to remote processors in Qualcomm 28 + platforms. 29 + 30 + config QCOM_SMD_RPM 31 + tristate "Qualcomm Resource Power Manager (RPM) over SMD" 32 + depends on QCOM_SMD && OF 33 + help 34 + If you say yes to this option, support will be included for the 35 + Resource Power Manager system found in the Qualcomm 8974 based 36 + devices. 37 + 38 + This is required to access many regulators, clocks and bus 39 + frequencies controlled by the RPM on these devices. 40 + 41 + Say M here if you want to include support for the Qualcomm RPM as a 42 + module. This will build a module called "qcom-smd-rpm". 43 + 44 + config QCOM_SMEM 45 + tristate "Qualcomm Shared Memory Manager (SMEM)" 46 + depends on ARCH_QCOM 47 + help 48 + Say y here to enable support for the Qualcomm Shared Memory Manager. 49 + The driver provides an interface to items in a heap shared among all 50 + processors in a Qualcomm platform.
+3
drivers/soc/qcom/Makefile
··· 1 1 obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o 2 2 obj-$(CONFIG_QCOM_PM) += spm.o 3 + obj-$(CONFIG_QCOM_SMD) += smd.o 4 + obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o 5 + obj-$(CONFIG_QCOM_SMEM) += smem.o
+244
drivers/soc/qcom/smd-rpm.c
··· 1 + /* 2 + * Copyright (c) 2015, Sony Mobile Communications AB. 3 + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 and 7 + * only version 2 as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #include <linux/module.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/of_platform.h> 18 + #include <linux/io.h> 19 + #include <linux/interrupt.h> 20 + 21 + #include <linux/soc/qcom/smd.h> 22 + #include <linux/soc/qcom/smd-rpm.h> 23 + 24 + #define RPM_REQUEST_TIMEOUT (5 * HZ) 25 + 26 + /** 27 + * struct qcom_smd_rpm - state of the rpm device driver 28 + * @rpm_channel: reference to the smd channel 29 + * @ack: completion for acks 30 + * @lock: mutual exclusion around the send/complete pair 31 + * @ack_status: result of the rpm request 32 + */ 33 + struct qcom_smd_rpm { 34 + struct qcom_smd_channel *rpm_channel; 35 + 36 + struct completion ack; 37 + struct mutex lock; 38 + int ack_status; 39 + }; 40 + 41 + /** 42 + * struct qcom_rpm_header - header for all rpm requests and responses 43 + * @service_type: identifier of the service 44 + * @length: length of the payload 45 + */ 46 + struct qcom_rpm_header { 47 + u32 service_type; 48 + u32 length; 49 + }; 50 + 51 + /** 52 + * struct qcom_rpm_request - request message to the rpm 53 + * @msg_id: identifier of the outgoing message 54 + * @flags: active/sleep state flags 55 + * @type: resource type 56 + * @id: resource id 57 + * @data_len: length of the payload following this header 58 + */ 59 + struct qcom_rpm_request { 60 + u32 msg_id; 61 + u32 flags; 62 + u32 type; 63 + u32 id; 64 + u32 data_len; 65 + }; 66 + 67 + /** 68 + * struct qcom_rpm_message - response message from the rpm 69 + * @msg_type: indicator of the type of message 70 + * @length: the size of this message, including the message header 71 + * @msg_id: message id 72 + * @message: textual message from the rpm 73 + * 74 + * Multiple of these messages can be stacked in an rpm message. 75 + */ 76 + struct qcom_rpm_message { 77 + u32 msg_type; 78 + u32 length; 79 + union { 80 + u32 msg_id; 81 + u8 message[0]; 82 + }; 83 + }; 84 + 85 + #define RPM_SERVICE_TYPE_REQUEST 0x00716572 /* "req\0" */ 86 + 87 + #define RPM_MSG_TYPE_ERR 0x00727265 /* "err\0" */ 88 + #define RPM_MSG_TYPE_MSG_ID 0x2367736d /* "msg#" */ 89 + 90 + /** 91 + * qcom_rpm_smd_write - write @buf to @type:@id 92 + * @rpm: rpm handle 93 + * @type: resource type 94 + * @id: resource identifier 95 + * @buf: the data to be written 96 + * @count: number of bytes in @buf 97 + */ 98 + int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, 99 + int state, 100 + u32 type, u32 id, 101 + void *buf, 102 + size_t count) 103 + { 104 + static unsigned msg_id = 1; 105 + int left; 106 + int ret; 107 + 108 + struct { 109 + struct qcom_rpm_header hdr; 110 + struct qcom_rpm_request req; 111 + u8 payload[count]; 112 + } pkt; 113 + 114 + /* SMD packets to the RPM may not exceed 256 bytes */ 115 + if (WARN_ON(sizeof(pkt) >= 256)) 116 + return -EINVAL; 117 + 118 + mutex_lock(&rpm->lock); 119 + 120 + pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST; 121 + pkt.hdr.length = sizeof(struct qcom_rpm_request) + count; 122 + 123 + pkt.req.msg_id = msg_id++; 124 + pkt.req.flags = BIT(state); 125 + pkt.req.type = type; 126 + pkt.req.id = id; 127 + pkt.req.data_len = count; 128 + memcpy(pkt.payload, buf, count); 129 + 130 + ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt)); 131 + if (ret) 132 + goto out; 133 + 134 + left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT); 135 + if (!left) 136 + ret = -ETIMEDOUT; 137 + else 138 + ret = rpm->ack_status; 139 + 140 + out: 141 + mutex_unlock(&rpm->lock); 142 + return ret; 143 + } 144 + EXPORT_SYMBOL(qcom_rpm_smd_write); 145 + 146 + static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, 147 + const void *data, 148 + size_t count) 149 + { 150 + const struct qcom_rpm_header *hdr = data; 151 + const struct qcom_rpm_message *msg; 152 + struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); 153 + const u8 *buf = data + sizeof(struct qcom_rpm_header); 154 + const u8 *end = buf + hdr->length; 155 + char msgbuf[32]; 156 + int status = 0; 157 + u32 len; 158 + 159 + if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST || 160 + hdr->length < sizeof(struct qcom_rpm_message)) { 161 + dev_err(&qsdev->dev, "invalid request\n"); 162 + return 0; 163 + } 164 + 165 + while (buf < end) { 166 + msg = (struct qcom_rpm_message *)buf; 167 + switch (msg->msg_type) { 168 + case RPM_MSG_TYPE_MSG_ID: 169 + break; 170 + case RPM_MSG_TYPE_ERR: 171 + len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf)); 172 + memcpy_fromio(msgbuf, msg->message, len); 173 + msgbuf[len - 1] = 0; 174 + 175 + if (!strcmp(msgbuf, "resource does not exist")) 176 + status = -ENXIO; 177 + else 178 + status = -EINVAL; 179 + break; 180 + } 181 + 182 + buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4); 183 + } 184 + 185 + rpm->ack_status = status; 186 + complete(&rpm->ack); 187 + return 0; 188 + } 189 + 190 + static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev) 191 + { 192 + struct qcom_smd_rpm *rpm; 193 + 194 + rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL); 195 + if (!rpm) 196 + return -ENOMEM; 197 + 198 + mutex_init(&rpm->lock); 199 + init_completion(&rpm->ack); 200 + 201 + rpm->rpm_channel = sdev->channel; 202 + 203 + dev_set_drvdata(&sdev->dev, rpm); 204 + 205 + return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev); 206 + } 207 + 208 + static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev) 209 + { 210 + of_platform_depopulate(&sdev->dev); 211 + } 212 + 213 + static const struct of_device_id qcom_smd_rpm_of_match[] = { 214 + { .compatible = "qcom,rpm-msm8974" }, 215 + {} 216 + }; 217 + MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match); 218 + 219 + static struct qcom_smd_driver qcom_smd_rpm_driver = { 220 + .probe = qcom_smd_rpm_probe, 221 + .remove = qcom_smd_rpm_remove, 222 + .callback = qcom_smd_rpm_callback, 223 + .driver = { 224 + .name = "qcom_smd_rpm", 225 + .owner = THIS_MODULE, 226 + .of_match_table = qcom_smd_rpm_of_match, 227 + }, 228 + }; 229 + 230 + static int __init qcom_smd_rpm_init(void) 231 + { 232 + return qcom_smd_driver_register(&qcom_smd_rpm_driver); 233 + } 234 + arch_initcall(qcom_smd_rpm_init); 235 + 236 + static void __exit qcom_smd_rpm_exit(void) 237 + { 238 + qcom_smd_driver_unregister(&qcom_smd_rpm_driver); 239 + } 240 + module_exit(qcom_smd_rpm_exit); 241 + 242 + MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 243 + MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver"); 244 + MODULE_LICENSE("GPL v2");
+1319
drivers/soc/qcom/smd.c
··· 1 + /* 2 + * Copyright (c) 2015, Sony Mobile Communications AB. 3 + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 and 7 + * only version 2 as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #include <linux/interrupt.h> 16 + #include <linux/io.h> 17 + #include <linux/mfd/syscon.h> 18 + #include <linux/module.h> 19 + #include <linux/of_irq.h> 20 + #include <linux/of_platform.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/regmap.h> 23 + #include <linux/sched.h> 24 + #include <linux/slab.h> 25 + #include <linux/soc/qcom/smd.h> 26 + #include <linux/soc/qcom/smem.h> 27 + #include <linux/wait.h> 28 + 29 + /* 30 + * The Qualcomm Shared Memory communication solution provides point-to-point 31 + * channels for clients to send and receive streaming or packet based data. 32 + * 33 + * Each channel consists of a control item (channel info) and a ring buffer 34 + * pair. The channel info carry information related to channel state, flow 35 + * control and the offsets within the ring buffer. 36 + * 37 + * All allocated channels are listed in an allocation table, identifying the 38 + * pair of items by name, type and remote processor. 39 + * 40 + * Upon creating a new channel the remote processor allocates channel info and 41 + * ring buffer items from the smem heap and populate the allocation table. An 42 + * interrupt is sent to the other end of the channel and a scan for new 43 + * channels should be done. A channel never goes away, it will only change 44 + * state. 45 + * 46 + * The remote processor signals it intent for bring up the communication 47 + * channel by setting the state of its end of the channel to "opening" and 48 + * sends out an interrupt. We detect this change and register a smd device to 49 + * consume the channel. Upon finding a consumer we finish the handshake and the 50 + * channel is up. 51 + * 52 + * Upon closing a channel, the remote processor will update the state of its 53 + * end of the channel and signal us, we will then unregister any attached 54 + * device and close our end of the channel. 55 + * 56 + * Devices attached to a channel can use the qcom_smd_send function to push 57 + * data to the channel, this is done by copying the data into the tx ring 58 + * buffer, updating the pointers in the channel info and signaling the remote 59 + * processor. 60 + * 61 + * The remote processor does the equivalent when it transfer data and upon 62 + * receiving the interrupt we check the channel info for new data and delivers 63 + * this to the attached device. If the device is not ready to receive the data 64 + * we leave it in the ring buffer for now. 65 + */ 66 + 67 + struct smd_channel_info; 68 + struct smd_channel_info_word; 69 + 70 + #define SMD_ALLOC_TBL_COUNT 2 71 + #define SMD_ALLOC_TBL_SIZE 64 72 + 73 + /* 74 + * This lists the various smem heap items relevant for the allocation table and 75 + * smd channel entries. 76 + */ 77 + static const struct { 78 + unsigned alloc_tbl_id; 79 + unsigned info_base_id; 80 + unsigned fifo_base_id; 81 + } smem_items[SMD_ALLOC_TBL_COUNT] = { 82 + { 83 + .alloc_tbl_id = 13, 84 + .info_base_id = 14, 85 + .fifo_base_id = 338 86 + }, 87 + { 88 + .alloc_tbl_id = 14, 89 + .info_base_id = 266, 90 + .fifo_base_id = 202, 91 + }, 92 + }; 93 + 94 + /** 95 + * struct qcom_smd_edge - representing a remote processor 96 + * @smd: handle to qcom_smd 97 + * @of_node: of_node handle for information related to this edge 98 + * @edge_id: identifier of this edge 99 + * @irq: interrupt for signals on this edge 100 + * @ipc_regmap: regmap handle holding the outgoing ipc register 101 + * @ipc_offset: offset within @ipc_regmap of the register for ipc 102 + * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap 103 + * @channels: list of all channels detected on this edge 104 + * @channels_lock: guard for modifications of @channels 105 + * @allocated: array of bitmaps representing already allocated channels 106 + * @need_rescan: flag that the @work needs to scan smem for new channels 107 + * @smem_available: last available amount of smem triggering a channel scan 108 + * @work: work item for edge house keeping 109 + */ 110 + struct qcom_smd_edge { 111 + struct qcom_smd *smd; 112 + struct device_node *of_node; 113 + unsigned edge_id; 114 + 115 + int irq; 116 + 117 + struct regmap *ipc_regmap; 118 + int ipc_offset; 119 + int ipc_bit; 120 + 121 + struct list_head channels; 122 + spinlock_t channels_lock; 123 + 124 + DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE); 125 + 126 + bool need_rescan; 127 + unsigned smem_available; 128 + 129 + struct work_struct work; 130 + }; 131 + 132 + /* 133 + * SMD channel states. 134 + */ 135 + enum smd_channel_state { 136 + SMD_CHANNEL_CLOSED, 137 + SMD_CHANNEL_OPENING, 138 + SMD_CHANNEL_OPENED, 139 + SMD_CHANNEL_FLUSHING, 140 + SMD_CHANNEL_CLOSING, 141 + SMD_CHANNEL_RESET, 142 + SMD_CHANNEL_RESET_OPENING 143 + }; 144 + 145 + /** 146 + * struct qcom_smd_channel - smd channel struct 147 + * @edge: qcom_smd_edge this channel is living on 148 + * @qsdev: reference to a associated smd client device 149 + * @name: name of the channel 150 + * @state: local state of the channel 151 + * @remote_state: remote state of the channel 152 + * @tx_info: byte aligned outgoing channel info 153 + * @rx_info: byte aligned incoming channel info 154 + * @tx_info_word: word aligned outgoing channel info 155 + * @rx_info_word: word aligned incoming channel info 156 + * @tx_lock: lock to make writes to the channel mutually exclusive 157 + * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR 158 + * @tx_fifo: pointer to the outgoing ring buffer 159 + * @rx_fifo: pointer to the incoming ring buffer 160 + * @fifo_size: size of each ring buffer 161 + * @bounce_buffer: bounce buffer for reading wrapped packets 162 + * @cb: callback function registered for this channel 163 + * @recv_lock: guard for rx info modifications and cb pointer 164 + * @pkt_size: size of the currently handled packet 165 + * @list: lite entry for @channels in qcom_smd_edge 166 + */ 167 + struct qcom_smd_channel { 168 + struct qcom_smd_edge *edge; 169 + 170 + struct qcom_smd_device *qsdev; 171 + 172 + char *name; 173 + enum smd_channel_state state; 174 + enum smd_channel_state remote_state; 175 + 176 + struct smd_channel_info *tx_info; 177 + struct smd_channel_info *rx_info; 178 + 179 + struct smd_channel_info_word *tx_info_word; 180 + struct smd_channel_info_word *rx_info_word; 181 + 182 + struct mutex tx_lock; 183 + wait_queue_head_t fblockread_event; 184 + 185 + void *tx_fifo; 186 + void *rx_fifo; 187 + int fifo_size; 188 + 189 + void *bounce_buffer; 190 + int (*cb)(struct qcom_smd_device *, const void *, size_t); 191 + 192 + spinlock_t recv_lock; 193 + 194 + int pkt_size; 195 + 196 + struct list_head list; 197 + }; 198 + 199 + /** 200 + * struct qcom_smd - smd struct 201 + * @dev: device struct 202 + * @num_edges: number of entries in @edges 203 + * @edges: array of edges to be handled 204 + */ 205 + struct qcom_smd { 206 + struct device *dev; 207 + 208 + unsigned num_edges; 209 + struct qcom_smd_edge edges[0]; 210 + }; 211 + 212 + /* 213 + * Format of the smd_info smem items, for byte aligned channels. 214 + */ 215 + struct smd_channel_info { 216 + u32 state; 217 + u8 fDSR; 218 + u8 fCTS; 219 + u8 fCD; 220 + u8 fRI; 221 + u8 fHEAD; 222 + u8 fTAIL; 223 + u8 fSTATE; 224 + u8 fBLOCKREADINTR; 225 + u32 tail; 226 + u32 head; 227 + }; 228 + 229 + /* 230 + * Format of the smd_info smem items, for word aligned channels. 231 + */ 232 + struct smd_channel_info_word { 233 + u32 state; 234 + u32 fDSR; 235 + u32 fCTS; 236 + u32 fCD; 237 + u32 fRI; 238 + u32 fHEAD; 239 + u32 fTAIL; 240 + u32 fSTATE; 241 + u32 fBLOCKREADINTR; 242 + u32 tail; 243 + u32 head; 244 + }; 245 + 246 + #define GET_RX_CHANNEL_INFO(channel, param) \ 247 + (channel->rx_info_word ? \ 248 + channel->rx_info_word->param : \ 249 + channel->rx_info->param) 250 + 251 + #define SET_RX_CHANNEL_INFO(channel, param, value) \ 252 + (channel->rx_info_word ? \ 253 + (channel->rx_info_word->param = value) : \ 254 + (channel->rx_info->param = value)) 255 + 256 + #define GET_TX_CHANNEL_INFO(channel, param) \ 257 + (channel->tx_info_word ? \ 258 + channel->tx_info_word->param : \ 259 + channel->tx_info->param) 260 + 261 + #define SET_TX_CHANNEL_INFO(channel, param, value) \ 262 + (channel->tx_info_word ? \ 263 + (channel->tx_info_word->param = value) : \ 264 + (channel->tx_info->param = value)) 265 + 266 + /** 267 + * struct qcom_smd_alloc_entry - channel allocation entry 268 + * @name: channel name 269 + * @cid: channel index 270 + * @flags: channel flags and edge id 271 + * @ref_count: reference count of the channel 272 + */ 273 + struct qcom_smd_alloc_entry { 274 + u8 name[20]; 275 + u32 cid; 276 + u32 flags; 277 + u32 ref_count; 278 + } __packed; 279 + 280 + #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff 281 + #define SMD_CHANNEL_FLAGS_STREAM BIT(8) 282 + #define SMD_CHANNEL_FLAGS_PACKET BIT(9) 283 + 284 + /* 285 + * Each smd packet contains a 20 byte header, with the first 4 being the length 286 + * of the packet. 287 + */ 288 + #define SMD_PACKET_HEADER_LEN 20 289 + 290 + /* 291 + * Signal the remote processor associated with 'channel'. 292 + */ 293 + static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) 294 + { 295 + struct qcom_smd_edge *edge = channel->edge; 296 + 297 + regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit)); 298 + } 299 + 300 + /* 301 + * Initialize the tx channel info 302 + */ 303 + static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) 304 + { 305 + SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); 306 + SET_TX_CHANNEL_INFO(channel, fDSR, 0); 307 + SET_TX_CHANNEL_INFO(channel, fCTS, 0); 308 + SET_TX_CHANNEL_INFO(channel, fCD, 0); 309 + SET_TX_CHANNEL_INFO(channel, fRI, 0); 310 + SET_TX_CHANNEL_INFO(channel, fHEAD, 0); 311 + SET_TX_CHANNEL_INFO(channel, fTAIL, 0); 312 + SET_TX_CHANNEL_INFO(channel, fSTATE, 1); 313 + SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0); 314 + SET_TX_CHANNEL_INFO(channel, head, 0); 315 + SET_TX_CHANNEL_INFO(channel, tail, 0); 316 + 317 + qcom_smd_signal_channel(channel); 318 + 319 + channel->state = SMD_CHANNEL_CLOSED; 320 + channel->pkt_size = 0; 321 + } 322 + 323 + /* 324 + * Calculate the amount of data available in the rx fifo 325 + */ 326 + static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel) 327 + { 328 + unsigned head; 329 + unsigned tail; 330 + 331 + head = GET_RX_CHANNEL_INFO(channel, head); 332 + tail = GET_RX_CHANNEL_INFO(channel, tail); 333 + 334 + return (head - tail) & (channel->fifo_size - 1); 335 + } 336 + 337 + /* 338 + * Set tx channel state and inform the remote processor 339 + */ 340 + static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, 341 + int state) 342 + { 343 + struct qcom_smd_edge *edge = channel->edge; 344 + bool is_open = state == SMD_CHANNEL_OPENED; 345 + 346 + if (channel->state == state) 347 + return; 348 + 349 + dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); 350 + 351 + SET_TX_CHANNEL_INFO(channel, fDSR, is_open); 352 + SET_TX_CHANNEL_INFO(channel, fCTS, is_open); 353 + SET_TX_CHANNEL_INFO(channel, fCD, is_open); 354 + 355 + SET_TX_CHANNEL_INFO(channel, state, state); 356 + SET_TX_CHANNEL_INFO(channel, fSTATE, 1); 357 + 358 + channel->state = state; 359 + qcom_smd_signal_channel(channel); 360 + } 361 + 362 + /* 363 + * Copy count bytes of data using 32bit accesses, if that's required. 364 + */ 365 + static void smd_copy_to_fifo(void __iomem *_dst, 366 + const void *_src, 367 + size_t count, 368 + bool word_aligned) 369 + { 370 + u32 *dst = (u32 *)_dst; 371 + u32 *src = (u32 *)_src; 372 + 373 + if (word_aligned) { 374 + count /= sizeof(u32); 375 + while (count--) 376 + writel_relaxed(*src++, dst++); 377 + } else { 378 + memcpy_toio(_dst, _src, count); 379 + } 380 + } 381 + 382 + /* 383 + * Copy count bytes of data using 32bit accesses, if that is required. 384 + */ 385 + static void smd_copy_from_fifo(void *_dst, 386 + const void __iomem *_src, 387 + size_t count, 388 + bool word_aligned) 389 + { 390 + u32 *dst = (u32 *)_dst; 391 + u32 *src = (u32 *)_src; 392 + 393 + if (word_aligned) { 394 + count /= sizeof(u32); 395 + while (count--) 396 + *dst++ = readl_relaxed(src++); 397 + } else { 398 + memcpy_fromio(_dst, _src, count); 399 + } 400 + } 401 + 402 + /* 403 + * Read count bytes of data from the rx fifo into buf, but don't advance the 404 + * tail. 405 + */ 406 + static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, 407 + void *buf, size_t count) 408 + { 409 + bool word_aligned; 410 + unsigned tail; 411 + size_t len; 412 + 413 + word_aligned = channel->rx_info_word != NULL; 414 + tail = GET_RX_CHANNEL_INFO(channel, tail); 415 + 416 + len = min_t(size_t, count, channel->fifo_size - tail); 417 + if (len) { 418 + smd_copy_from_fifo(buf, 419 + channel->rx_fifo + tail, 420 + len, 421 + word_aligned); 422 + } 423 + 424 + if (len != count) { 425 + smd_copy_from_fifo(buf + len, 426 + channel->rx_fifo, 427 + count - len, 428 + word_aligned); 429 + } 430 + 431 + return count; 432 + } 433 + 434 + /* 435 + * Advance the rx tail by count bytes. 436 + */ 437 + static void qcom_smd_channel_advance(struct qcom_smd_channel *channel, 438 + size_t count) 439 + { 440 + unsigned tail; 441 + 442 + tail = GET_RX_CHANNEL_INFO(channel, tail); 443 + tail += count; 444 + tail &= (channel->fifo_size - 1); 445 + SET_RX_CHANNEL_INFO(channel, tail, tail); 446 + } 447 + 448 + /* 449 + * Read out a single packet from the rx fifo and deliver it to the device 450 + */ 451 + static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel) 452 + { 453 + struct qcom_smd_device *qsdev = channel->qsdev; 454 + unsigned tail; 455 + size_t len; 456 + void *ptr; 457 + int ret; 458 + 459 + if (!channel->cb) 460 + return 0; 461 + 462 + tail = GET_RX_CHANNEL_INFO(channel, tail); 463 + 464 + /* Use bounce buffer if the data wraps */ 465 + if (tail + channel->pkt_size >= channel->fifo_size) { 466 + ptr = channel->bounce_buffer; 467 + len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size); 468 + } else { 469 + ptr = channel->rx_fifo + tail; 470 + len = channel->pkt_size; 471 + } 472 + 473 + ret = channel->cb(qsdev, ptr, len); 474 + if (ret < 0) 475 + return ret; 476 + 477 + /* Only forward the tail if the client consumed the data */ 478 + qcom_smd_channel_advance(channel, len); 479 + 480 + channel->pkt_size = 0; 481 + 482 + return 0; 483 + } 484 + 485 + /* 486 + * Per channel interrupt handling 487 + */ 488 + static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) 489 + { 490 + bool need_state_scan = false; 491 + int remote_state; 492 + u32 pktlen; 493 + int avail; 494 + int ret; 495 + 496 + /* Handle state changes */ 497 + remote_state = GET_RX_CHANNEL_INFO(channel, state); 498 + if (remote_state != channel->remote_state) { 499 + channel->remote_state = remote_state; 500 + need_state_scan = true; 501 + } 502 + /* Indicate that we have seen any state change */ 503 + SET_RX_CHANNEL_INFO(channel, fSTATE, 0); 504 + 505 + /* Signal waiting qcom_smd_send() about the interrupt */ 506 + if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR)) 507 + wake_up_interruptible(&channel->fblockread_event); 508 + 509 + /* Don't consume any data until we've opened the channel */ 510 + if (channel->state != SMD_CHANNEL_OPENED) 511 + goto out; 512 + 513 + /* Indicate that we've seen the new data */ 514 + SET_RX_CHANNEL_INFO(channel, fHEAD, 0); 515 + 516 + /* Consume data */ 517 + for (;;) { 518 + avail = qcom_smd_channel_get_rx_avail(channel); 519 + 520 + if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { 521 + qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); 522 + qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); 523 + channel->pkt_size = pktlen; 524 + } else if (channel->pkt_size && avail >= channel->pkt_size) { 525 + ret = qcom_smd_channel_recv_single(channel); 526 + if (ret) 527 + break; 528 + } else { 529 + break; 530 + } 531 + } 532 + 533 + /* Indicate that we have seen and updated tail */ 534 + SET_RX_CHANNEL_INFO(channel, fTAIL, 1); 535 + 536 + /* Signal the remote that we've consumed the data (if requested) */ 537 + if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) { 538 + /* Ensure ordering of channel info updates */ 539 + wmb(); 540 + 541 + qcom_smd_signal_channel(channel); 542 + } 543 + 544 + out: 545 + return need_state_scan; 546 + } 547 + 548 + /* 549 + * The edge interrupts are triggered by the remote processor on state changes, 550 + * channel info updates or when new channels are created. 551 + */ 552 + static irqreturn_t qcom_smd_edge_intr(int irq, void *data) 553 + { 554 + struct qcom_smd_edge *edge = data; 555 + struct qcom_smd_channel *channel; 556 + unsigned available; 557 + bool kick_worker = false; 558 + 559 + /* 560 + * Handle state changes or data on each of the channels on this edge 561 + */ 562 + spin_lock(&edge->channels_lock); 563 + list_for_each_entry(channel, &edge->channels, list) { 564 + spin_lock(&channel->recv_lock); 565 + kick_worker |= qcom_smd_channel_intr(channel); 566 + spin_unlock(&channel->recv_lock); 567 + } 568 + spin_unlock(&edge->channels_lock); 569 + 570 + /* 571 + * Creating a new channel requires allocating an smem entry, so we only 572 + * have to scan if the amount of available space in smem have changed 573 + * since last scan. 574 + */ 575 + available = qcom_smem_get_free_space(edge->edge_id); 576 + if (available != edge->smem_available) { 577 + edge->smem_available = available; 578 + edge->need_rescan = true; 579 + kick_worker = true; 580 + } 581 + 582 + if (kick_worker) 583 + schedule_work(&edge->work); 584 + 585 + return IRQ_HANDLED; 586 + } 587 + 588 + /* 589 + * Delivers any outstanding packets in the rx fifo, can be used after probe of 590 + * the clients to deliver any packets that wasn't delivered before the client 591 + * was setup. 592 + */ 593 + static void qcom_smd_channel_resume(struct qcom_smd_channel *channel) 594 + { 595 + unsigned long flags; 596 + 597 + spin_lock_irqsave(&channel->recv_lock, flags); 598 + qcom_smd_channel_intr(channel); 599 + spin_unlock_irqrestore(&channel->recv_lock, flags); 600 + } 601 + 602 + /* 603 + * Calculate how much space is available in the tx fifo. 604 + */ 605 + static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel) 606 + { 607 + unsigned head; 608 + unsigned tail; 609 + unsigned mask = channel->fifo_size - 1; 610 + 611 + head = GET_TX_CHANNEL_INFO(channel, head); 612 + tail = GET_TX_CHANNEL_INFO(channel, tail); 613 + 614 + return mask - ((head - tail) & mask); 615 + } 616 + 617 + /* 618 + * Write count bytes of data into channel, possibly wrapping in the ring buffer 619 + */ 620 + static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, 621 + const void *data, 622 + size_t count) 623 + { 624 + bool word_aligned; 625 + unsigned head; 626 + size_t len; 627 + 628 + word_aligned = channel->tx_info_word != NULL; 629 + head = GET_TX_CHANNEL_INFO(channel, head); 630 + 631 + len = min_t(size_t, count, channel->fifo_size - head); 632 + if (len) { 633 + smd_copy_to_fifo(channel->tx_fifo + head, 634 + data, 635 + len, 636 + word_aligned); 637 + } 638 + 639 + if (len != count) { 640 + smd_copy_to_fifo(channel->tx_fifo, 641 + data + len, 642 + count - len, 643 + word_aligned); 644 + } 645 + 646 + head += count; 647 + head &= (channel->fifo_size - 1); 648 + SET_TX_CHANNEL_INFO(channel, head, head); 649 + 650 + return count; 651 + } 652 + 653 + /** 654 + * qcom_smd_send - write data to smd channel 655 + * @channel: channel handle 656 + * @data: buffer of data to write 657 + * @len: number of bytes to write 658 + * 659 + * This is a blocking write of len bytes into the channel's tx ring buffer and 660 + * signal the remote end. It will sleep until there is enough space available 661 + * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid 662 + * polling. 663 + */ 664 + int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) 665 + { 666 + u32 hdr[5] = {len,}; 667 + int tlen = sizeof(hdr) + len; 668 + int ret; 669 + 670 + /* Word aligned channels only accept word size aligned data */ 671 + if (channel->rx_info_word != NULL && len % 4) 672 + return -EINVAL; 673 + 674 + ret = mutex_lock_interruptible(&channel->tx_lock); 675 + if (ret) 676 + return ret; 677 + 678 + while (qcom_smd_get_tx_avail(channel) < tlen) { 679 + if (channel->state != SMD_CHANNEL_OPENED) { 680 + ret = -EPIPE; 681 + goto out; 682 + } 683 + 684 + SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); 685 + 686 + ret = wait_event_interruptible(channel->fblockread_event, 687 + qcom_smd_get_tx_avail(channel) >= tlen || 688 + channel->state != SMD_CHANNEL_OPENED); 689 + if (ret) 690 + goto out; 691 + 692 + SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0); 693 + } 694 + 695 + SET_TX_CHANNEL_INFO(channel, fTAIL, 0); 696 + 697 + qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); 698 + qcom_smd_write_fifo(channel, data, len); 699 + 700 + SET_TX_CHANNEL_INFO(channel, fHEAD, 1); 701 + 702 + /* Ensure ordering of channel info updates */ 703 + wmb(); 704 + 705 + qcom_smd_signal_channel(channel); 706 + 707 + out: 708 + mutex_unlock(&channel->tx_lock); 709 + 710 + return ret; 711 + } 712 + EXPORT_SYMBOL(qcom_smd_send); 713 + 714 + static struct qcom_smd_device *to_smd_device(struct device *dev) 715 + { 716 + return container_of(dev, struct qcom_smd_device, dev); 717 + } 718 + 719 + static struct qcom_smd_driver *to_smd_driver(struct device *dev) 720 + { 721 + struct qcom_smd_device *qsdev = to_smd_device(dev); 722 + 723 + return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver); 724 + } 725 + 726 + static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) 727 + { 728 + return of_driver_match_device(dev, drv); 729 + } 730 + 731 + /* 732 + * Probe the smd client. 733 + * 734 + * The remote side have indicated that it want the channel to be opened, so 735 + * complete the state handshake and probe our client driver. 736 + */ 737 + static int qcom_smd_dev_probe(struct device *dev) 738 + { 739 + struct qcom_smd_device *qsdev = to_smd_device(dev); 740 + struct qcom_smd_driver *qsdrv = to_smd_driver(dev); 741 + struct qcom_smd_channel *channel = qsdev->channel; 742 + size_t bb_size; 743 + int ret; 744 + 745 + /* 746 + * Packets are maximum 4k, but reduce if the fifo is smaller 747 + */ 748 + bb_size = min(channel->fifo_size, SZ_4K); 749 + channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); 750 + if (!channel->bounce_buffer) 751 + return -ENOMEM; 752 + 753 + channel->cb = qsdrv->callback; 754 + 755 + qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING); 756 + 757 + qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED); 758 + 759 + ret = qsdrv->probe(qsdev); 760 + if (ret) 761 + goto err; 762 + 763 + qcom_smd_channel_resume(channel); 764 + 765 + return 0; 766 + 767 + err: 768 + dev_err(&qsdev->dev, "probe failed\n"); 769 + 770 + channel->cb = NULL; 771 + kfree(channel->bounce_buffer); 772 + channel->bounce_buffer = NULL; 773 + 774 + qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); 775 + return ret; 776 + } 777 + 778 + /* 779 + * Remove the smd client. 780 + * 781 + * The channel is going away, for some reason, so remove the smd client and 782 + * reset the channel state. 783 + */ 784 + static int qcom_smd_dev_remove(struct device *dev) 785 + { 786 + struct qcom_smd_device *qsdev = to_smd_device(dev); 787 + struct qcom_smd_driver *qsdrv = to_smd_driver(dev); 788 + struct qcom_smd_channel *channel = qsdev->channel; 789 + unsigned long flags; 790 + 791 + qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); 792 + 793 + /* 794 + * Make sure we don't race with the code receiving data. 795 + */ 796 + spin_lock_irqsave(&channel->recv_lock, flags); 797 + channel->cb = NULL; 798 + spin_unlock_irqrestore(&channel->recv_lock, flags); 799 + 800 + /* Wake up any sleepers in qcom_smd_send() */ 801 + wake_up_interruptible(&channel->fblockread_event); 802 + 803 + /* 804 + * We expect that the client might block in remove() waiting for any 805 + * outstanding calls to qcom_smd_send() to wake up and finish. 806 + */ 807 + if (qsdrv->remove) 808 + qsdrv->remove(qsdev); 809 + 810 + /* 811 + * The client is now gone, cleanup and reset the channel state. 812 + */ 813 + channel->qsdev = NULL; 814 + kfree(channel->bounce_buffer); 815 + channel->bounce_buffer = NULL; 816 + 817 + qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED); 818 + 819 + qcom_smd_channel_reset(channel); 820 + 821 + return 0; 822 + } 823 + 824 + static struct bus_type qcom_smd_bus = { 825 + .name = "qcom_smd", 826 + .match = qcom_smd_dev_match, 827 + .probe = qcom_smd_dev_probe, 828 + .remove = qcom_smd_dev_remove, 829 + }; 830 + 831 + /* 832 + * Release function for the qcom_smd_device object. 833 + */ 834 + static void qcom_smd_release_device(struct device *dev) 835 + { 836 + struct qcom_smd_device *qsdev = to_smd_device(dev); 837 + 838 + kfree(qsdev); 839 + } 840 + 841 + /* 842 + * Finds the device_node for the smd child interested in this channel. 843 + */ 844 + static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, 845 + const char *channel) 846 + { 847 + struct device_node *child; 848 + const char *name; 849 + const char *key; 850 + int ret; 851 + 852 + for_each_available_child_of_node(edge_node, child) { 853 + key = "qcom,smd-channels"; 854 + ret = of_property_read_string(child, key, &name); 855 + if (ret) { 856 + of_node_put(child); 857 + continue; 858 + } 859 + 860 + if (strcmp(name, channel) == 0) 861 + return child; 862 + } 863 + 864 + return NULL; 865 + } 866 + 867 + /* 868 + * Create a smd client device for channel that is being opened. 869 + */ 870 + static int qcom_smd_create_device(struct qcom_smd_channel *channel) 871 + { 872 + struct qcom_smd_device *qsdev; 873 + struct qcom_smd_edge *edge = channel->edge; 874 + struct device_node *node; 875 + struct qcom_smd *smd = edge->smd; 876 + int ret; 877 + 878 + if (channel->qsdev) 879 + return -EEXIST; 880 + 881 + node = qcom_smd_match_channel(edge->of_node, channel->name); 882 + if (!node) { 883 + dev_dbg(smd->dev, "no match for '%s'\n", channel->name); 884 + return -ENXIO; 885 + } 886 + 887 + dev_dbg(smd->dev, "registering '%s'\n", channel->name); 888 + 889 + qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 890 + if (!qsdev) 891 + return -ENOMEM; 892 + 893 + dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name); 894 + qsdev->dev.parent = smd->dev; 895 + qsdev->dev.bus = &qcom_smd_bus; 896 + qsdev->dev.release = qcom_smd_release_device; 897 + qsdev->dev.of_node = node; 898 + 899 + qsdev->channel = channel; 900 + 901 + channel->qsdev = qsdev; 902 + 903 + ret = device_register(&qsdev->dev); 904 + if (ret) { 905 + dev_err(smd->dev, "device_register failed: %d\n", ret); 906 + put_device(&qsdev->dev); 907 + } 908 + 909 + return ret; 910 + } 911 + 912 + /* 913 + * Destroy a smd client device for a channel that's going away. 914 + */ 915 + static void qcom_smd_destroy_device(struct qcom_smd_channel *channel) 916 + { 917 + struct device *dev; 918 + 919 + BUG_ON(!channel->qsdev); 920 + 921 + dev = &channel->qsdev->dev; 922 + 923 + device_unregister(dev); 924 + of_node_put(dev->of_node); 925 + put_device(dev); 926 + } 927 + 928 + /** 929 + * qcom_smd_driver_register - register a smd driver 930 + * @qsdrv: qcom_smd_driver struct 931 + */ 932 + int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv) 933 + { 934 + qsdrv->driver.bus = &qcom_smd_bus; 935 + return driver_register(&qsdrv->driver); 936 + } 937 + EXPORT_SYMBOL(qcom_smd_driver_register); 938 + 939 + /** 940 + * qcom_smd_driver_unregister - unregister a smd driver 941 + * @qsdrv: qcom_smd_driver struct 942 + */ 943 + void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv) 944 + { 945 + driver_unregister(&qsdrv->driver); 946 + } 947 + EXPORT_SYMBOL(qcom_smd_driver_unregister); 948 + 949 + /* 950 + * Allocate the qcom_smd_channel object for a newly found smd channel, 951 + * retrieving and validating the smem items involved. 952 + */ 953 + static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge, 954 + unsigned smem_info_item, 955 + unsigned smem_fifo_item, 956 + char *name) 957 + { 958 + struct qcom_smd_channel *channel; 959 + struct qcom_smd *smd = edge->smd; 960 + size_t fifo_size; 961 + size_t info_size; 962 + void *fifo_base; 963 + void *info; 964 + int ret; 965 + 966 + channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL); 967 + if (!channel) 968 + return ERR_PTR(-ENOMEM); 969 + 970 + channel->edge = edge; 971 + channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL); 972 + if (!channel->name) 973 + return ERR_PTR(-ENOMEM); 974 + 975 + mutex_init(&channel->tx_lock); 976 + spin_lock_init(&channel->recv_lock); 977 + init_waitqueue_head(&channel->fblockread_event); 978 + 979 + ret = qcom_smem_get(edge->edge_id, smem_info_item, (void **)&info, &info_size); 980 + if (ret) 981 + goto free_name_and_channel; 982 + 983 + /* 984 + * Use the size of the item to figure out which channel info struct to 985 + * use. 986 + */ 987 + if (info_size == 2 * sizeof(struct smd_channel_info_word)) { 988 + channel->tx_info_word = info; 989 + channel->rx_info_word = info + sizeof(struct smd_channel_info_word); 990 + } else if (info_size == 2 * sizeof(struct smd_channel_info)) { 991 + channel->tx_info = info; 992 + channel->rx_info = info + sizeof(struct smd_channel_info); 993 + } else { 994 + dev_err(smd->dev, 995 + "channel info of size %zu not supported\n", info_size); 996 + ret = -EINVAL; 997 + goto free_name_and_channel; 998 + } 999 + 1000 + ret = qcom_smem_get(edge->edge_id, smem_fifo_item, &fifo_base, &fifo_size); 1001 + if (ret) 1002 + goto free_name_and_channel; 1003 + 1004 + /* The channel consist of a rx and tx fifo of equal size */ 1005 + fifo_size /= 2; 1006 + 1007 + dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", 1008 + name, info_size, fifo_size); 1009 + 1010 + channel->tx_fifo = fifo_base; 1011 + channel->rx_fifo = fifo_base + fifo_size; 1012 + channel->fifo_size = fifo_size; 1013 + 1014 + qcom_smd_channel_reset(channel); 1015 + 1016 + return channel; 1017 + 1018 + free_name_and_channel: 1019 + devm_kfree(smd->dev, channel->name); 1020 + devm_kfree(smd->dev, channel); 1021 + 1022 + return ERR_PTR(ret); 1023 + } 1024 + 1025 + /* 1026 + * Scans the allocation table for any newly allocated channels, calls 1027 + * qcom_smd_create_channel() to create representations of these and add 1028 + * them to the edge's list of channels. 1029 + */ 1030 + static void qcom_discover_channels(struct qcom_smd_edge *edge) 1031 + { 1032 + struct qcom_smd_alloc_entry *alloc_tbl; 1033 + struct qcom_smd_alloc_entry *entry; 1034 + struct qcom_smd_channel *channel; 1035 + struct qcom_smd *smd = edge->smd; 1036 + unsigned long flags; 1037 + unsigned fifo_id; 1038 + unsigned info_id; 1039 + int ret; 1040 + int tbl; 1041 + int i; 1042 + 1043 + for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { 1044 + ret = qcom_smem_get(edge->edge_id, 1045 + smem_items[tbl].alloc_tbl_id, 1046 + (void **)&alloc_tbl, 1047 + NULL); 1048 + if (ret < 0) 1049 + continue; 1050 + 1051 + for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { 1052 + entry = &alloc_tbl[i]; 1053 + if (test_bit(i, edge->allocated[tbl])) 1054 + continue; 1055 + 1056 + if (entry->ref_count == 0) 1057 + continue; 1058 + 1059 + if (!entry->name[0]) 1060 + continue; 1061 + 1062 + if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET)) 1063 + continue; 1064 + 1065 + if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) 1066 + continue; 1067 + 1068 + info_id = smem_items[tbl].info_base_id + entry->cid; 1069 + fifo_id = smem_items[tbl].fifo_base_id + entry->cid; 1070 + 1071 + channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); 1072 + if (IS_ERR(channel)) 1073 + continue; 1074 + 1075 + spin_lock_irqsave(&edge->channels_lock, flags); 1076 + list_add(&channel->list, &edge->channels); 1077 + spin_unlock_irqrestore(&edge->channels_lock, flags); 1078 + 1079 + dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name); 1080 + set_bit(i, edge->allocated[tbl]); 1081 + } 1082 + } 1083 + 1084 + schedule_work(&edge->work); 1085 + } 1086 + 1087 + /* 1088 + * This per edge worker scans smem for any new channels and register these. It 1089 + * then scans all registered channels for state changes that should be handled 1090 + * by creating or destroying smd client devices for the registered channels. 1091 + * 1092 + * LOCKING: edge->channels_lock is not needed to be held during the traversal 1093 + * of the channels list as it's done synchronously with the only writer. 1094 + */ 1095 + static void qcom_channel_state_worker(struct work_struct *work) 1096 + { 1097 + struct qcom_smd_channel *channel; 1098 + struct qcom_smd_edge *edge = container_of(work, 1099 + struct qcom_smd_edge, 1100 + work); 1101 + unsigned remote_state; 1102 + 1103 + /* 1104 + * Rescan smem if we have reason to belive that there are new channels. 1105 + */ 1106 + if (edge->need_rescan) { 1107 + edge->need_rescan = false; 1108 + qcom_discover_channels(edge); 1109 + } 1110 + 1111 + /* 1112 + * Register a device for any closed channel where the remote processor 1113 + * is showing interest in opening the channel. 1114 + */ 1115 + list_for_each_entry(channel, &edge->channels, list) { 1116 + if (channel->state != SMD_CHANNEL_CLOSED) 1117 + continue; 1118 + 1119 + remote_state = GET_RX_CHANNEL_INFO(channel, state); 1120 + if (remote_state != SMD_CHANNEL_OPENING && 1121 + remote_state != SMD_CHANNEL_OPENED) 1122 + continue; 1123 + 1124 + qcom_smd_create_device(channel); 1125 + } 1126 + 1127 + /* 1128 + * Unregister the device for any channel that is opened where the 1129 + * remote processor is closing the channel. 1130 + */ 1131 + list_for_each_entry(channel, &edge->channels, list) { 1132 + if (channel->state != SMD_CHANNEL_OPENING && 1133 + channel->state != SMD_CHANNEL_OPENED) 1134 + continue; 1135 + 1136 + remote_state = GET_RX_CHANNEL_INFO(channel, state); 1137 + if (remote_state == SMD_CHANNEL_OPENING || 1138 + remote_state == SMD_CHANNEL_OPENED) 1139 + continue; 1140 + 1141 + qcom_smd_destroy_device(channel); 1142 + } 1143 + } 1144 + 1145 + /* 1146 + * Parses an of_node describing an edge. 1147 + */ 1148 + static int qcom_smd_parse_edge(struct device *dev, 1149 + struct device_node *node, 1150 + struct qcom_smd_edge *edge) 1151 + { 1152 + struct device_node *syscon_np; 1153 + const char *key; 1154 + int irq; 1155 + int ret; 1156 + 1157 + INIT_LIST_HEAD(&edge->channels); 1158 + spin_lock_init(&edge->channels_lock); 1159 + 1160 + INIT_WORK(&edge->work, qcom_channel_state_worker); 1161 + 1162 + edge->of_node = of_node_get(node); 1163 + 1164 + irq = irq_of_parse_and_map(node, 0); 1165 + if (irq < 0) { 1166 + dev_err(dev, "required smd interrupt missing\n"); 1167 + return -EINVAL; 1168 + } 1169 + 1170 + ret = devm_request_irq(dev, irq, 1171 + qcom_smd_edge_intr, IRQF_TRIGGER_RISING, 1172 + node->name, edge); 1173 + if (ret) { 1174 + dev_err(dev, "failed to request smd irq\n"); 1175 + return ret; 1176 + } 1177 + 1178 + edge->irq = irq; 1179 + 1180 + key = "qcom,smd-edge"; 1181 + ret = of_property_read_u32(node, key, &edge->edge_id); 1182 + if (ret) { 1183 + dev_err(dev, "edge missing %s property\n", key); 1184 + return -EINVAL; 1185 + } 1186 + 1187 + syscon_np = of_parse_phandle(node, "qcom,ipc", 0); 1188 + if (!syscon_np) { 1189 + dev_err(dev, "no qcom,ipc node\n"); 1190 + return -ENODEV; 1191 + } 1192 + 1193 + edge->ipc_regmap = syscon_node_to_regmap(syscon_np); 1194 + if (IS_ERR(edge->ipc_regmap)) 1195 + return PTR_ERR(edge->ipc_regmap); 1196 + 1197 + key = "qcom,ipc"; 1198 + ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); 1199 + if (ret < 0) { 1200 + dev_err(dev, "no offset in %s\n", key); 1201 + return -EINVAL; 1202 + } 1203 + 1204 + ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); 1205 + if (ret < 0) { 1206 + dev_err(dev, "no bit in %s\n", key); 1207 + return -EINVAL; 1208 + } 1209 + 1210 + return 0; 1211 + } 1212 + 1213 + static int qcom_smd_probe(struct platform_device *pdev) 1214 + { 1215 + struct qcom_smd_edge *edge; 1216 + struct device_node *node; 1217 + struct qcom_smd *smd; 1218 + size_t array_size; 1219 + int num_edges; 1220 + int ret; 1221 + int i = 0; 1222 + 1223 + /* Wait for smem */ 1224 + ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL); 1225 + if (ret == -EPROBE_DEFER) 1226 + return ret; 1227 + 1228 + num_edges = of_get_available_child_count(pdev->dev.of_node); 1229 + array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge); 1230 + smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL); 1231 + if (!smd) 1232 + return -ENOMEM; 1233 + smd->dev = &pdev->dev; 1234 + 1235 + smd->num_edges = num_edges; 1236 + for_each_available_child_of_node(pdev->dev.of_node, node) { 1237 + edge = &smd->edges[i++]; 1238 + edge->smd = smd; 1239 + 1240 + ret = qcom_smd_parse_edge(&pdev->dev, node, edge); 1241 + if (ret) 1242 + continue; 1243 + 1244 + edge->need_rescan = true; 1245 + schedule_work(&edge->work); 1246 + } 1247 + 1248 + platform_set_drvdata(pdev, smd); 1249 + 1250 + return 0; 1251 + } 1252 + 1253 + /* 1254 + * Shut down all smd clients by making sure that each edge stops processing 1255 + * events and scanning for new channels, then call destroy on the devices. 1256 + */ 1257 + static int qcom_smd_remove(struct platform_device *pdev) 1258 + { 1259 + struct qcom_smd_channel *channel; 1260 + struct qcom_smd_edge *edge; 1261 + struct qcom_smd *smd = platform_get_drvdata(pdev); 1262 + int i; 1263 + 1264 + for (i = 0; i < smd->num_edges; i++) { 1265 + edge = &smd->edges[i]; 1266 + 1267 + disable_irq(edge->irq); 1268 + cancel_work_sync(&edge->work); 1269 + 1270 + list_for_each_entry(channel, &edge->channels, list) { 1271 + if (!channel->qsdev) 1272 + continue; 1273 + 1274 + qcom_smd_destroy_device(channel); 1275 + } 1276 + } 1277 + 1278 + return 0; 1279 + } 1280 + 1281 + static const struct of_device_id qcom_smd_of_match[] = { 1282 + { .compatible = "qcom,smd" }, 1283 + {} 1284 + }; 1285 + MODULE_DEVICE_TABLE(of, qcom_smd_of_match); 1286 + 1287 + static struct platform_driver qcom_smd_driver = { 1288 + .probe = qcom_smd_probe, 1289 + .remove = qcom_smd_remove, 1290 + .driver = { 1291 + .name = "qcom-smd", 1292 + .of_match_table = qcom_smd_of_match, 1293 + }, 1294 + }; 1295 + 1296 + static int __init qcom_smd_init(void) 1297 + { 1298 + int ret; 1299 + 1300 + ret = bus_register(&qcom_smd_bus); 1301 + if (ret) { 1302 + pr_err("failed to register smd bus: %d\n", ret); 1303 + return ret; 1304 + } 1305 + 1306 + return platform_driver_register(&qcom_smd_driver); 1307 + } 1308 + postcore_initcall(qcom_smd_init); 1309 + 1310 + static void __exit qcom_smd_exit(void) 1311 + { 1312 + platform_driver_unregister(&qcom_smd_driver); 1313 + bus_unregister(&qcom_smd_bus); 1314 + } 1315 + module_exit(qcom_smd_exit); 1316 + 1317 + MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 1318 + MODULE_DESCRIPTION("Qualcomm Shared Memory Driver"); 1319 + MODULE_LICENSE("GPL v2");
+775
drivers/soc/qcom/smem.c
··· 1 + /* 2 + * Copyright (c) 2015, Sony Mobile Communications AB. 3 + * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 and 7 + * only version 2 as published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #include <linux/hwspinlock.h> 16 + #include <linux/io.h> 17 + #include <linux/module.h> 18 + #include <linux/of.h> 19 + #include <linux/of_address.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/slab.h> 22 + #include <linux/soc/qcom/smem.h> 23 + 24 + /* 25 + * The Qualcomm shared memory system is a allocate only heap structure that 26 + * consists of one of more memory areas that can be accessed by the processors 27 + * in the SoC. 28 + * 29 + * All systems contains a global heap, accessible by all processors in the SoC, 30 + * with a table of contents data structure (@smem_header) at the beginning of 31 + * the main shared memory block. 32 + * 33 + * The global header contains meta data for allocations as well as a fixed list 34 + * of 512 entries (@smem_global_entry) that can be initialized to reference 35 + * parts of the shared memory space. 36 + * 37 + * 38 + * In addition to this global heap a set of "private" heaps can be set up at 39 + * boot time with access restrictions so that only certain processor pairs can 40 + * access the data. 41 + * 42 + * These partitions are referenced from an optional partition table 43 + * (@smem_ptable), that is found 4kB from the end of the main smem region. The 44 + * partition table entries (@smem_ptable_entry) lists the involved processors 45 + * (or hosts) and their location in the main shared memory region. 46 + * 47 + * Each partition starts with a header (@smem_partition_header) that identifies 48 + * the partition and holds properties for the two internal memory regions. The 49 + * two regions are cached and non-cached memory respectively. Each region 50 + * contain a link list of allocation headers (@smem_private_entry) followed by 51 + * their data. 52 + * 53 + * Items in the non-cached region are allocated from the start of the partition 54 + * while items in the cached region are allocated from the end. The free area 55 + * is hence the region between the cached and non-cached offsets. 56 + * 57 + * 58 + * To synchronize allocations in the shared memory heaps a remote spinlock must 59 + * be held - currently lock number 3 of the sfpb or tcsr is used for this on all 60 + * platforms. 61 + * 62 + */ 63 + 64 + /* 65 + * Item 3 of the global heap contains an array of versions for the various 66 + * software components in the SoC. We verify that the boot loader version is 67 + * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check. 68 + */ 69 + #define SMEM_ITEM_VERSION 3 70 + #define SMEM_MASTER_SBL_VERSION_INDEX 7 71 + #define SMEM_EXPECTED_VERSION 11 72 + 73 + /* 74 + * The first 8 items are only to be allocated by the boot loader while 75 + * initializing the heap. 76 + */ 77 + #define SMEM_ITEM_LAST_FIXED 8 78 + 79 + /* Highest accepted item number, for both global and private heaps */ 80 + #define SMEM_ITEM_COUNT 512 81 + 82 + /* Processor/host identifier for the application processor */ 83 + #define SMEM_HOST_APPS 0 84 + 85 + /* Max number of processors/hosts in a system */ 86 + #define SMEM_HOST_COUNT 9 87 + 88 + /** 89 + * struct smem_proc_comm - proc_comm communication struct (legacy) 90 + * @command: current command to be executed 91 + * @status: status of the currently requested command 92 + * @params: parameters to the command 93 + */ 94 + struct smem_proc_comm { 95 + u32 command; 96 + u32 status; 97 + u32 params[2]; 98 + }; 99 + 100 + /** 101 + * struct smem_global_entry - entry to reference smem items on the heap 102 + * @allocated: boolean to indicate if this entry is used 103 + * @offset: offset to the allocated space 104 + * @size: size of the allocated space, 8 byte aligned 105 + * @aux_base: base address for the memory region used by this unit, or 0 for 106 + * the default region. bits 0,1 are reserved 107 + */ 108 + struct smem_global_entry { 109 + u32 allocated; 110 + u32 offset; 111 + u32 size; 112 + u32 aux_base; /* bits 1:0 reserved */ 113 + }; 114 + #define AUX_BASE_MASK 0xfffffffc 115 + 116 + /** 117 + * struct smem_header - header found in beginning of primary smem region 118 + * @proc_comm: proc_comm communication interface (legacy) 119 + * @version: array of versions for the various subsystems 120 + * @initialized: boolean to indicate that smem is initialized 121 + * @free_offset: index of the first unallocated byte in smem 122 + * @available: number of bytes available for allocation 123 + * @reserved: reserved field, must be 0 124 + * toc: array of references to items 125 + */ 126 + struct smem_header { 127 + struct smem_proc_comm proc_comm[4]; 128 + u32 version[32]; 129 + u32 initialized; 130 + u32 free_offset; 131 + u32 available; 132 + u32 reserved; 133 + struct smem_global_entry toc[SMEM_ITEM_COUNT]; 134 + }; 135 + 136 + /** 137 + * struct smem_ptable_entry - one entry in the @smem_ptable list 138 + * @offset: offset, within the main shared memory region, of the partition 139 + * @size: size of the partition 140 + * @flags: flags for the partition (currently unused) 141 + * @host0: first processor/host with access to this partition 142 + * @host1: second processor/host with access to this partition 143 + * @reserved: reserved entries for later use 144 + */ 145 + struct smem_ptable_entry { 146 + u32 offset; 147 + u32 size; 148 + u32 flags; 149 + u16 host0; 150 + u16 host1; 151 + u32 reserved[8]; 152 + }; 153 + 154 + /** 155 + * struct smem_ptable - partition table for the private partitions 156 + * @magic: magic number, must be SMEM_PTABLE_MAGIC 157 + * @version: version of the partition table 158 + * @num_entries: number of partitions in the table 159 + * @reserved: for now reserved entries 160 + * @entry: list of @smem_ptable_entry for the @num_entries partitions 161 + */ 162 + struct smem_ptable { 163 + u32 magic; 164 + u32 version; 165 + u32 num_entries; 166 + u32 reserved[5]; 167 + struct smem_ptable_entry entry[]; 168 + }; 169 + #define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */ 170 + 171 + /** 172 + * struct smem_partition_header - header of the partitions 173 + * @magic: magic number, must be SMEM_PART_MAGIC 174 + * @host0: first processor/host with access to this partition 175 + * @host1: second processor/host with access to this partition 176 + * @size: size of the partition 177 + * @offset_free_uncached: offset to the first free byte of uncached memory in 178 + * this partition 179 + * @offset_free_cached: offset to the first free byte of cached memory in this 180 + * partition 181 + * @reserved: for now reserved entries 182 + */ 183 + struct smem_partition_header { 184 + u32 magic; 185 + u16 host0; 186 + u16 host1; 187 + u32 size; 188 + u32 offset_free_uncached; 189 + u32 offset_free_cached; 190 + u32 reserved[3]; 191 + }; 192 + #define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */ 193 + 194 + /** 195 + * struct smem_private_entry - header of each item in the private partition 196 + * @canary: magic number, must be SMEM_PRIVATE_CANARY 197 + * @item: identifying number of the smem item 198 + * @size: size of the data, including padding bytes 199 + * @padding_data: number of bytes of padding of data 200 + * @padding_hdr: number of bytes of padding between the header and the data 201 + * @reserved: for now reserved entry 202 + */ 203 + struct smem_private_entry { 204 + u16 canary; 205 + u16 item; 206 + u32 size; /* includes padding bytes */ 207 + u16 padding_data; 208 + u16 padding_hdr; 209 + u32 reserved; 210 + }; 211 + #define SMEM_PRIVATE_CANARY 0xa5a5 212 + 213 + /** 214 + * struct smem_region - representation of a chunk of memory used for smem 215 + * @aux_base: identifier of aux_mem base 216 + * @virt_base: virtual base address of memory with this aux_mem identifier 217 + * @size: size of the memory region 218 + */ 219 + struct smem_region { 220 + u32 aux_base; 221 + void __iomem *virt_base; 222 + size_t size; 223 + }; 224 + 225 + /** 226 + * struct qcom_smem - device data for the smem device 227 + * @dev: device pointer 228 + * @hwlock: reference to a hwspinlock 229 + * @partitions: list of pointers to partitions affecting the current 230 + * processor/host 231 + * @num_regions: number of @regions 232 + * @regions: list of the memory regions defining the shared memory 233 + */ 234 + struct qcom_smem { 235 + struct device *dev; 236 + 237 + struct hwspinlock *hwlock; 238 + 239 + struct smem_partition_header *partitions[SMEM_HOST_COUNT]; 240 + 241 + unsigned num_regions; 242 + struct smem_region regions[0]; 243 + }; 244 + 245 + /* Pointer to the one and only smem handle */ 246 + static struct qcom_smem *__smem; 247 + 248 + /* Timeout (ms) for the trylock of remote spinlocks */ 249 + #define HWSPINLOCK_TIMEOUT 1000 250 + 251 + static int qcom_smem_alloc_private(struct qcom_smem *smem, 252 + unsigned host, 253 + unsigned item, 254 + size_t size) 255 + { 256 + struct smem_partition_header *phdr; 257 + struct smem_private_entry *hdr; 258 + size_t alloc_size; 259 + void *p; 260 + 261 + /* We're not going to find it if there's no matching partition */ 262 + if (host >= SMEM_HOST_COUNT || !smem->partitions[host]) 263 + return -ENOENT; 264 + 265 + phdr = smem->partitions[host]; 266 + 267 + p = (void *)phdr + sizeof(*phdr); 268 + while (p < (void *)phdr + phdr->offset_free_uncached) { 269 + hdr = p; 270 + 271 + if (hdr->canary != SMEM_PRIVATE_CANARY) { 272 + dev_err(smem->dev, 273 + "Found invalid canary in host %d partition\n", 274 + host); 275 + return -EINVAL; 276 + } 277 + 278 + if (hdr->item == item) 279 + return -EEXIST; 280 + 281 + p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 282 + } 283 + 284 + /* Check that we don't grow into the cached region */ 285 + alloc_size = sizeof(*hdr) + ALIGN(size, 8); 286 + if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { 287 + dev_err(smem->dev, "Out of memory\n"); 288 + return -ENOSPC; 289 + } 290 + 291 + hdr = p; 292 + hdr->canary = SMEM_PRIVATE_CANARY; 293 + hdr->item = item; 294 + hdr->size = ALIGN(size, 8); 295 + hdr->padding_data = hdr->size - size; 296 + hdr->padding_hdr = 0; 297 + 298 + /* 299 + * Ensure the header is written before we advance the free offset, so 300 + * that remote processors that does not take the remote spinlock still 301 + * gets a consistent view of the linked list. 302 + */ 303 + wmb(); 304 + phdr->offset_free_uncached += alloc_size; 305 + 306 + return 0; 307 + } 308 + 309 + static int qcom_smem_alloc_global(struct qcom_smem *smem, 310 + unsigned item, 311 + size_t size) 312 + { 313 + struct smem_header *header; 314 + struct smem_global_entry *entry; 315 + 316 + if (WARN_ON(item >= SMEM_ITEM_COUNT)) 317 + return -EINVAL; 318 + 319 + header = smem->regions[0].virt_base; 320 + entry = &header->toc[item]; 321 + if (entry->allocated) 322 + return -EEXIST; 323 + 324 + size = ALIGN(size, 8); 325 + if (WARN_ON(size > header->available)) 326 + return -ENOMEM; 327 + 328 + entry->offset = header->free_offset; 329 + entry->size = size; 330 + 331 + /* 332 + * Ensure the header is consistent before we mark the item allocated, 333 + * so that remote processors will get a consistent view of the item 334 + * even though they do not take the spinlock on read. 335 + */ 336 + wmb(); 337 + entry->allocated = 1; 338 + 339 + header->free_offset += size; 340 + header->available -= size; 341 + 342 + return 0; 343 + } 344 + 345 + /** 346 + * qcom_smem_alloc() - allocate space for a smem item 347 + * @host: remote processor id, or -1 348 + * @item: smem item handle 349 + * @size: number of bytes to be allocated 350 + * 351 + * Allocate space for a given smem item of size @size, given that the item is 352 + * not yet allocated. 353 + */ 354 + int qcom_smem_alloc(unsigned host, unsigned item, size_t size) 355 + { 356 + unsigned long flags; 357 + int ret; 358 + 359 + if (!__smem) 360 + return -EPROBE_DEFER; 361 + 362 + if (item < SMEM_ITEM_LAST_FIXED) { 363 + dev_err(__smem->dev, 364 + "Rejecting allocation of static entry %d\n", item); 365 + return -EINVAL; 366 + } 367 + 368 + ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 369 + HWSPINLOCK_TIMEOUT, 370 + &flags); 371 + if (ret) 372 + return ret; 373 + 374 + ret = qcom_smem_alloc_private(__smem, host, item, size); 375 + if (ret == -ENOENT) 376 + ret = qcom_smem_alloc_global(__smem, item, size); 377 + 378 + hwspin_unlock_irqrestore(__smem->hwlock, &flags); 379 + 380 + return ret; 381 + } 382 + EXPORT_SYMBOL(qcom_smem_alloc); 383 + 384 + static int qcom_smem_get_global(struct qcom_smem *smem, 385 + unsigned item, 386 + void **ptr, 387 + size_t *size) 388 + { 389 + struct smem_header *header; 390 + struct smem_region *area; 391 + struct smem_global_entry *entry; 392 + u32 aux_base; 393 + unsigned i; 394 + 395 + if (WARN_ON(item >= SMEM_ITEM_COUNT)) 396 + return -EINVAL; 397 + 398 + header = smem->regions[0].virt_base; 399 + entry = &header->toc[item]; 400 + if (!entry->allocated) 401 + return -ENXIO; 402 + 403 + if (ptr != NULL) { 404 + aux_base = entry->aux_base & AUX_BASE_MASK; 405 + 406 + for (i = 0; i < smem->num_regions; i++) { 407 + area = &smem->regions[i]; 408 + 409 + if (area->aux_base == aux_base || !aux_base) { 410 + *ptr = area->virt_base + entry->offset; 411 + break; 412 + } 413 + } 414 + } 415 + if (size != NULL) 416 + *size = entry->size; 417 + 418 + return 0; 419 + } 420 + 421 + static int qcom_smem_get_private(struct qcom_smem *smem, 422 + unsigned host, 423 + unsigned item, 424 + void **ptr, 425 + size_t *size) 426 + { 427 + struct smem_partition_header *phdr; 428 + struct smem_private_entry *hdr; 429 + void *p; 430 + 431 + /* We're not going to find it if there's no matching partition */ 432 + if (host >= SMEM_HOST_COUNT || !smem->partitions[host]) 433 + return -ENOENT; 434 + 435 + phdr = smem->partitions[host]; 436 + 437 + p = (void *)phdr + sizeof(*phdr); 438 + while (p < (void *)phdr + phdr->offset_free_uncached) { 439 + hdr = p; 440 + 441 + if (hdr->canary != SMEM_PRIVATE_CANARY) { 442 + dev_err(smem->dev, 443 + "Found invalid canary in host %d partition\n", 444 + host); 445 + return -EINVAL; 446 + } 447 + 448 + if (hdr->item == item) { 449 + if (ptr != NULL) 450 + *ptr = p + sizeof(*hdr) + hdr->padding_hdr; 451 + 452 + if (size != NULL) 453 + *size = hdr->size - hdr->padding_data; 454 + 455 + return 0; 456 + } 457 + 458 + p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 459 + } 460 + 461 + return -ENOENT; 462 + } 463 + 464 + /** 465 + * qcom_smem_get() - resolve ptr of size of a smem item 466 + * @host: the remote processor, or -1 467 + * @item: smem item handle 468 + * @ptr: pointer to be filled out with address of the item 469 + * @size: pointer to be filled out with size of the item 470 + * 471 + * Looks up pointer and size of a smem item. 472 + */ 473 + int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size) 474 + { 475 + unsigned long flags; 476 + int ret; 477 + 478 + if (!__smem) 479 + return -EPROBE_DEFER; 480 + 481 + ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 482 + HWSPINLOCK_TIMEOUT, 483 + &flags); 484 + if (ret) 485 + return ret; 486 + 487 + ret = qcom_smem_get_private(__smem, host, item, ptr, size); 488 + if (ret == -ENOENT) 489 + ret = qcom_smem_get_global(__smem, item, ptr, size); 490 + 491 + hwspin_unlock_irqrestore(__smem->hwlock, &flags); 492 + return ret; 493 + 494 + } 495 + EXPORT_SYMBOL(qcom_smem_get); 496 + 497 + /** 498 + * qcom_smem_get_free_space() - retrieve amount of free space in a partition 499 + * @host: the remote processor identifying a partition, or -1 500 + * 501 + * To be used by smem clients as a quick way to determine if any new 502 + * allocations has been made. 503 + */ 504 + int qcom_smem_get_free_space(unsigned host) 505 + { 506 + struct smem_partition_header *phdr; 507 + struct smem_header *header; 508 + unsigned ret; 509 + 510 + if (!__smem) 511 + return -EPROBE_DEFER; 512 + 513 + if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 514 + phdr = __smem->partitions[host]; 515 + ret = phdr->offset_free_cached - phdr->offset_free_uncached; 516 + } else { 517 + header = __smem->regions[0].virt_base; 518 + ret = header->available; 519 + } 520 + 521 + return ret; 522 + } 523 + EXPORT_SYMBOL(qcom_smem_get_free_space); 524 + 525 + static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 526 + { 527 + unsigned *versions; 528 + size_t size; 529 + int ret; 530 + 531 + ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, 532 + (void **)&versions, &size); 533 + if (ret < 0) { 534 + dev_err(smem->dev, "Unable to read the version item\n"); 535 + return -ENOENT; 536 + } 537 + 538 + if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) { 539 + dev_err(smem->dev, "Version item is too small\n"); 540 + return -EINVAL; 541 + } 542 + 543 + return versions[SMEM_MASTER_SBL_VERSION_INDEX]; 544 + } 545 + 546 + static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, 547 + unsigned local_host) 548 + { 549 + struct smem_partition_header *header; 550 + struct smem_ptable_entry *entry; 551 + struct smem_ptable *ptable; 552 + unsigned remote_host; 553 + int i; 554 + 555 + ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; 556 + if (ptable->magic != SMEM_PTABLE_MAGIC) 557 + return 0; 558 + 559 + if (ptable->version != 1) { 560 + dev_err(smem->dev, 561 + "Unsupported partition header version %d\n", 562 + ptable->version); 563 + return -EINVAL; 564 + } 565 + 566 + for (i = 0; i < ptable->num_entries; i++) { 567 + entry = &ptable->entry[i]; 568 + 569 + if (entry->host0 != local_host && entry->host1 != local_host) 570 + continue; 571 + 572 + if (!entry->offset) 573 + continue; 574 + 575 + if (!entry->size) 576 + continue; 577 + 578 + if (entry->host0 == local_host) 579 + remote_host = entry->host1; 580 + else 581 + remote_host = entry->host0; 582 + 583 + if (remote_host >= SMEM_HOST_COUNT) { 584 + dev_err(smem->dev, 585 + "Invalid remote host %d\n", 586 + remote_host); 587 + return -EINVAL; 588 + } 589 + 590 + if (smem->partitions[remote_host]) { 591 + dev_err(smem->dev, 592 + "Already found a partition for host %d\n", 593 + remote_host); 594 + return -EINVAL; 595 + } 596 + 597 + header = smem->regions[0].virt_base + entry->offset; 598 + 599 + if (header->magic != SMEM_PART_MAGIC) { 600 + dev_err(smem->dev, 601 + "Partition %d has invalid magic\n", i); 602 + return -EINVAL; 603 + } 604 + 605 + if (header->host0 != local_host && header->host1 != local_host) { 606 + dev_err(smem->dev, 607 + "Partition %d hosts are invalid\n", i); 608 + return -EINVAL; 609 + } 610 + 611 + if (header->host0 != remote_host && header->host1 != remote_host) { 612 + dev_err(smem->dev, 613 + "Partition %d hosts are invalid\n", i); 614 + return -EINVAL; 615 + } 616 + 617 + if (header->size != entry->size) { 618 + dev_err(smem->dev, 619 + "Partition %d has invalid size\n", i); 620 + return -EINVAL; 621 + } 622 + 623 + if (header->offset_free_uncached > header->size) { 624 + dev_err(smem->dev, 625 + "Partition %d has invalid free pointer\n", i); 626 + return -EINVAL; 627 + } 628 + 629 + smem->partitions[remote_host] = header; 630 + } 631 + 632 + return 0; 633 + } 634 + 635 + static int qcom_smem_count_mem_regions(struct platform_device *pdev) 636 + { 637 + struct resource *res; 638 + int num_regions = 0; 639 + int i; 640 + 641 + for (i = 0; i < pdev->num_resources; i++) { 642 + res = &pdev->resource[i]; 643 + 644 + if (resource_type(res) == IORESOURCE_MEM) 645 + num_regions++; 646 + } 647 + 648 + return num_regions; 649 + } 650 + 651 + static int qcom_smem_probe(struct platform_device *pdev) 652 + { 653 + struct smem_header *header; 654 + struct device_node *np; 655 + struct qcom_smem *smem; 656 + struct resource *res; 657 + struct resource r; 658 + size_t array_size; 659 + int num_regions = 0; 660 + int hwlock_id; 661 + u32 version; 662 + int ret; 663 + int i; 664 + 665 + num_regions = qcom_smem_count_mem_regions(pdev) + 1; 666 + 667 + array_size = num_regions * sizeof(struct smem_region); 668 + smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); 669 + if (!smem) 670 + return -ENOMEM; 671 + 672 + smem->dev = &pdev->dev; 673 + smem->num_regions = num_regions; 674 + 675 + np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); 676 + if (!np) { 677 + dev_err(&pdev->dev, "No memory-region specified\n"); 678 + return -EINVAL; 679 + } 680 + 681 + ret = of_address_to_resource(np, 0, &r); 682 + of_node_put(np); 683 + if (ret) 684 + return ret; 685 + 686 + smem->regions[0].aux_base = (u32)r.start; 687 + smem->regions[0].size = resource_size(&r); 688 + smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev, 689 + r.start, 690 + resource_size(&r)); 691 + if (!smem->regions[0].virt_base) 692 + return -ENOMEM; 693 + 694 + for (i = 1; i < num_regions; i++) { 695 + res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1); 696 + 697 + smem->regions[i].aux_base = (u32)res->start; 698 + smem->regions[i].size = resource_size(res); 699 + smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev, 700 + res->start, 701 + resource_size(res)); 702 + if (!smem->regions[i].virt_base) 703 + return -ENOMEM; 704 + } 705 + 706 + header = smem->regions[0].virt_base; 707 + if (header->initialized != 1 || header->reserved) { 708 + dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 709 + return -EINVAL; 710 + } 711 + 712 + version = qcom_smem_get_sbl_version(smem); 713 + if (version >> 16 != SMEM_EXPECTED_VERSION) { 714 + dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); 715 + return -EINVAL; 716 + } 717 + 718 + ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS); 719 + if (ret < 0) 720 + return ret; 721 + 722 + hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); 723 + if (hwlock_id < 0) { 724 + dev_err(&pdev->dev, "failed to retrieve hwlock\n"); 725 + return hwlock_id; 726 + } 727 + 728 + smem->hwlock = hwspin_lock_request_specific(hwlock_id); 729 + if (!smem->hwlock) 730 + return -ENXIO; 731 + 732 + __smem = smem; 733 + 734 + return 0; 735 + } 736 + 737 + static int qcom_smem_remove(struct platform_device *pdev) 738 + { 739 + __smem = NULL; 740 + hwspin_lock_free(__smem->hwlock); 741 + 742 + return 0; 743 + } 744 + 745 + static const struct of_device_id qcom_smem_of_match[] = { 746 + { .compatible = "qcom,smem" }, 747 + {} 748 + }; 749 + MODULE_DEVICE_TABLE(of, qcom_smem_of_match); 750 + 751 + static struct platform_driver qcom_smem_driver = { 752 + .probe = qcom_smem_probe, 753 + .remove = qcom_smem_remove, 754 + .driver = { 755 + .name = "qcom-smem", 756 + .of_match_table = qcom_smem_of_match, 757 + .suppress_bind_attrs = true, 758 + }, 759 + }; 760 + 761 + static int __init qcom_smem_init(void) 762 + { 763 + return platform_driver_register(&qcom_smem_driver); 764 + } 765 + arch_initcall(qcom_smem_init); 766 + 767 + static void __exit qcom_smem_exit(void) 768 + { 769 + platform_driver_unregister(&qcom_smem_driver); 770 + } 771 + module_exit(qcom_smem_exit) 772 + 773 + MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>"); 774 + MODULE_DESCRIPTION("Qualcomm Shared Memory Manager"); 775 + MODULE_LICENSE("GPL v2");
+35
include/linux/soc/qcom/smd-rpm.h
··· 1 + #ifndef __QCOM_SMD_RPM_H__ 2 + #define __QCOM_SMD_RPM_H__ 3 + 4 + struct qcom_smd_rpm; 5 + 6 + #define QCOM_SMD_RPM_ACTIVE_STATE 0 7 + #define QCOM_SMD_RPM_SLEEP_STATE 1 8 + 9 + /* 10 + * Constants used for addressing resources in the RPM. 11 + */ 12 + #define QCOM_SMD_RPM_BOOST 0x61747362 13 + #define QCOM_SMD_RPM_BUS_CLK 0x316b6c63 14 + #define QCOM_SMD_RPM_BUS_MASTER 0x73616d62 15 + #define QCOM_SMD_RPM_BUS_SLAVE 0x766c7362 16 + #define QCOM_SMD_RPM_CLK_BUF_A 0x616B6C63 17 + #define QCOM_SMD_RPM_LDOA 0x616f646c 18 + #define QCOM_SMD_RPM_LDOB 0x626F646C 19 + #define QCOM_SMD_RPM_MEM_CLK 0x326b6c63 20 + #define QCOM_SMD_RPM_MISC_CLK 0x306b6c63 21 + #define QCOM_SMD_RPM_NCPA 0x6170636E 22 + #define QCOM_SMD_RPM_NCPB 0x6270636E 23 + #define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f 24 + #define QCOM_SMD_RPM_QPIC_CLK 0x63697071 25 + #define QCOM_SMD_RPM_SMPA 0x61706d73 26 + #define QCOM_SMD_RPM_SMPB 0x62706d73 27 + #define QCOM_SMD_RPM_SPDM 0x63707362 28 + #define QCOM_SMD_RPM_VSA 0x00617376 29 + 30 + int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, 31 + int state, 32 + u32 resource_type, u32 resource_id, 33 + void *buf, size_t count); 34 + 35 + #endif
+46
include/linux/soc/qcom/smd.h
··· 1 + #ifndef __QCOM_SMD_H__ 2 + #define __QCOM_SMD_H__ 3 + 4 + #include <linux/device.h> 5 + #include <linux/mod_devicetable.h> 6 + 7 + struct qcom_smd; 8 + struct qcom_smd_channel; 9 + struct qcom_smd_lookup; 10 + 11 + /** 12 + * struct qcom_smd_device - smd device struct 13 + * @dev: the device struct 14 + * @channel: handle to the smd channel for this device 15 + */ 16 + struct qcom_smd_device { 17 + struct device dev; 18 + struct qcom_smd_channel *channel; 19 + }; 20 + 21 + /** 22 + * struct qcom_smd_driver - smd driver struct 23 + * @driver: underlying device driver 24 + * @probe: invoked when the smd channel is found 25 + * @remove: invoked when the smd channel is closed 26 + * @callback: invoked when an inbound message is received on the channel, 27 + * should return 0 on success or -EBUSY if the data cannot be 28 + * consumed at this time 29 + */ 30 + struct qcom_smd_driver { 31 + struct device_driver driver; 32 + int (*probe)(struct qcom_smd_device *dev); 33 + void (*remove)(struct qcom_smd_device *dev); 34 + int (*callback)(struct qcom_smd_device *, const void *, size_t); 35 + }; 36 + 37 + int qcom_smd_driver_register(struct qcom_smd_driver *drv); 38 + void qcom_smd_driver_unregister(struct qcom_smd_driver *drv); 39 + 40 + #define module_qcom_smd_driver(__smd_driver) \ 41 + module_driver(__smd_driver, qcom_smd_driver_register, \ 42 + qcom_smd_driver_unregister) 43 + 44 + int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len); 45 + 46 + #endif
+11
include/linux/soc/qcom/smem.h
··· 1 + #ifndef __QCOM_SMEM_H__ 2 + #define __QCOM_SMEM_H__ 3 + 4 + #define QCOM_SMEM_HOST_ANY -1 5 + 6 + int qcom_smem_alloc(unsigned host, unsigned item, size_t size); 7 + int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); 8 + 9 + int qcom_smem_get_free_space(unsigned host); 10 + 11 + #endif