Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

pds_fwctl: add rpc and query support

The pds_fwctl driver doesn't know what RPC operations are available
in the firmware, so also doesn't know what scope they might have. The
userland utility supplies the firmware "endpoint" and "operation" id values
and this driver queries the firmware for endpoints and their available
operations. The operation descriptions include the scope information
which the driver uses for scope testing.

Link: https://patch.msgid.link/r/20250320194412.67983-6-shannon.nelson@amd.com
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Brett Creeley <brett.creeley@amd.com>
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Shannon Nelson <shannon.nelson@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Brett Creeley and committed by
Jason Gunthorpe
92c66ee8 4d09dd11

+593 -1
+369 -1
drivers/fwctl/pds/main.c
··· 5 5 #include <linux/auxiliary_bus.h> 6 6 #include <linux/pci.h> 7 7 #include <linux/vmalloc.h> 8 + #include <linux/bitfield.h> 8 9 9 10 #include <uapi/fwctl/fwctl.h> 10 11 #include <uapi/fwctl/pds.h> ··· 21 20 u32 uctx_caps; 22 21 }; 23 22 23 + struct pdsfc_rpc_endpoint_info { 24 + u32 endpoint; 25 + dma_addr_t operations_pa; 26 + struct pds_fwctl_query_data *operations; 27 + struct mutex lock; /* lock for endpoint info management */ 28 + }; 29 + 24 30 struct pdsfc_dev { 25 31 struct fwctl_device fwctl; 26 32 struct pds_auxiliary_dev *padev; 27 33 u32 caps; 28 34 struct pds_fwctl_ident ident; 35 + dma_addr_t endpoints_pa; 36 + struct pds_fwctl_query_data *endpoints; 37 + struct pdsfc_rpc_endpoint_info *endpoint_info; 29 38 }; 30 39 31 40 static int pdsfc_open_uctx(struct fwctl_uctx *uctx) ··· 102 91 return err; 103 92 } 104 93 94 + static void pdsfc_free_endpoints(struct pdsfc_dev *pdsfc) 95 + { 96 + struct device *dev = &pdsfc->fwctl.dev; 97 + int i; 98 + 99 + if (!pdsfc->endpoints) 100 + return; 101 + 102 + for (i = 0; pdsfc->endpoint_info && i < pdsfc->endpoints->num_entries; i++) 103 + mutex_destroy(&pdsfc->endpoint_info[i].lock); 104 + vfree(pdsfc->endpoint_info); 105 + pdsfc->endpoint_info = NULL; 106 + dma_free_coherent(dev->parent, PAGE_SIZE, 107 + pdsfc->endpoints, pdsfc->endpoints_pa); 108 + pdsfc->endpoints = NULL; 109 + pdsfc->endpoints_pa = DMA_MAPPING_ERROR; 110 + } 111 + 112 + static void pdsfc_free_operations(struct pdsfc_dev *pdsfc) 113 + { 114 + struct device *dev = &pdsfc->fwctl.dev; 115 + u32 num_endpoints; 116 + int i; 117 + 118 + num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries); 119 + for (i = 0; i < num_endpoints; i++) { 120 + struct pdsfc_rpc_endpoint_info *ei = &pdsfc->endpoint_info[i]; 121 + 122 + if (ei->operations) { 123 + dma_free_coherent(dev->parent, PAGE_SIZE, 124 + ei->operations, ei->operations_pa); 125 + ei->operations = NULL; 126 + ei->operations_pa = DMA_MAPPING_ERROR; 127 + } 128 + } 129 + } 130 + 131 + static struct pds_fwctl_query_data *pdsfc_get_endpoints(struct pdsfc_dev *pdsfc, 132 + dma_addr_t *pa) 133 + { 134 + struct device *dev = &pdsfc->fwctl.dev; 135 + union pds_core_adminq_comp comp = {0}; 136 + struct pds_fwctl_query_data *data; 137 + union pds_core_adminq_cmd cmd; 138 + dma_addr_t data_pa; 139 + int err; 140 + 141 + data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL); 142 + if (!data) { 143 + dev_err(dev, "Failed to map endpoint list\n"); 144 + return ERR_PTR(-ENOMEM); 145 + } 146 + 147 + cmd = (union pds_core_adminq_cmd) { 148 + .fwctl_query = { 149 + .opcode = PDS_FWCTL_CMD_QUERY, 150 + .entity = PDS_FWCTL_RPC_ROOT, 151 + .version = 0, 152 + .query_data_buf_len = cpu_to_le32(PAGE_SIZE), 153 + .query_data_buf_pa = cpu_to_le64(data_pa), 154 + } 155 + }; 156 + 157 + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); 158 + if (err) { 159 + dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n", 160 + cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err); 161 + dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa); 162 + return ERR_PTR(err); 163 + } 164 + 165 + *pa = data_pa; 166 + 167 + return data; 168 + } 169 + 170 + static int pdsfc_init_endpoints(struct pdsfc_dev *pdsfc) 171 + { 172 + struct pds_fwctl_query_data_endpoint *ep_entry; 173 + u32 num_endpoints; 174 + int i; 175 + 176 + pdsfc->endpoints = pdsfc_get_endpoints(pdsfc, &pdsfc->endpoints_pa); 177 + if (IS_ERR(pdsfc->endpoints)) 178 + return PTR_ERR(pdsfc->endpoints); 179 + 180 + num_endpoints = le32_to_cpu(pdsfc->endpoints->num_entries); 181 + pdsfc->endpoint_info = vcalloc(num_endpoints, 182 + sizeof(*pdsfc->endpoint_info)); 183 + if (!pdsfc->endpoint_info) { 184 + pdsfc_free_endpoints(pdsfc); 185 + return -ENOMEM; 186 + } 187 + 188 + ep_entry = (struct pds_fwctl_query_data_endpoint *)pdsfc->endpoints->entries; 189 + for (i = 0; i < num_endpoints; i++) { 190 + mutex_init(&pdsfc->endpoint_info[i].lock); 191 + pdsfc->endpoint_info[i].endpoint = ep_entry[i].id; 192 + } 193 + 194 + return 0; 195 + } 196 + 197 + static struct pds_fwctl_query_data *pdsfc_get_operations(struct pdsfc_dev *pdsfc, 198 + dma_addr_t *pa, u32 ep) 199 + { 200 + struct pds_fwctl_query_data_operation *entries; 201 + struct device *dev = &pdsfc->fwctl.dev; 202 + union pds_core_adminq_comp comp = {0}; 203 + struct pds_fwctl_query_data *data; 204 + union pds_core_adminq_cmd cmd; 205 + dma_addr_t data_pa; 206 + int err; 207 + int i; 208 + 209 + /* Query the operations list for the given endpoint */ 210 + data = dma_alloc_coherent(dev->parent, PAGE_SIZE, &data_pa, GFP_KERNEL); 211 + if (!data) { 212 + dev_err(dev, "Failed to map operations list\n"); 213 + return ERR_PTR(-ENOMEM); 214 + } 215 + 216 + cmd = (union pds_core_adminq_cmd) { 217 + .fwctl_query = { 218 + .opcode = PDS_FWCTL_CMD_QUERY, 219 + .entity = PDS_FWCTL_RPC_ENDPOINT, 220 + .version = 0, 221 + .query_data_buf_len = cpu_to_le32(PAGE_SIZE), 222 + .query_data_buf_pa = cpu_to_le64(data_pa), 223 + .ep = cpu_to_le32(ep), 224 + } 225 + }; 226 + 227 + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); 228 + if (err) { 229 + dev_err(dev, "Failed to send adminq cmd opcode: %u entity: %u err: %d\n", 230 + cmd.fwctl_query.opcode, cmd.fwctl_query.entity, err); 231 + dma_free_coherent(dev->parent, PAGE_SIZE, data, data_pa); 232 + return ERR_PTR(err); 233 + } 234 + 235 + *pa = data_pa; 236 + 237 + entries = (struct pds_fwctl_query_data_operation *)data->entries; 238 + dev_dbg(dev, "num_entries %d\n", data->num_entries); 239 + for (i = 0; i < data->num_entries; i++) { 240 + 241 + /* Translate FW command attribute to fwctl scope */ 242 + switch (entries[i].scope) { 243 + case PDSFC_FW_CMD_ATTR_READ: 244 + case PDSFC_FW_CMD_ATTR_WRITE: 245 + case PDSFC_FW_CMD_ATTR_SYNC: 246 + entries[i].scope = FWCTL_RPC_CONFIGURATION; 247 + break; 248 + case PDSFC_FW_CMD_ATTR_DEBUG_READ: 249 + entries[i].scope = FWCTL_RPC_DEBUG_READ_ONLY; 250 + break; 251 + case PDSFC_FW_CMD_ATTR_DEBUG_WRITE: 252 + entries[i].scope = FWCTL_RPC_DEBUG_WRITE; 253 + break; 254 + default: 255 + entries[i].scope = FWCTL_RPC_DEBUG_WRITE_FULL; 256 + break; 257 + } 258 + dev_dbg(dev, "endpoint %d operation: id %x scope %d\n", 259 + ep, entries[i].id, entries[i].scope); 260 + } 261 + 262 + return data; 263 + } 264 + 265 + static int pdsfc_validate_rpc(struct pdsfc_dev *pdsfc, 266 + struct fwctl_rpc_pds *rpc, 267 + enum fwctl_rpc_scope scope) 268 + { 269 + struct pds_fwctl_query_data_operation *op_entry; 270 + struct pdsfc_rpc_endpoint_info *ep_info = NULL; 271 + struct device *dev = &pdsfc->fwctl.dev; 272 + int i; 273 + 274 + /* validate rpc in_len & out_len based 275 + * on ident.max_req_sz & max_resp_sz 276 + */ 277 + if (rpc->in.len > pdsfc->ident.max_req_sz) { 278 + dev_dbg(dev, "Invalid request size %u, max %u\n", 279 + rpc->in.len, pdsfc->ident.max_req_sz); 280 + return -EINVAL; 281 + } 282 + 283 + if (rpc->out.len > pdsfc->ident.max_resp_sz) { 284 + dev_dbg(dev, "Invalid response size %u, max %u\n", 285 + rpc->out.len, pdsfc->ident.max_resp_sz); 286 + return -EINVAL; 287 + } 288 + 289 + for (i = 0; i < pdsfc->endpoints->num_entries; i++) { 290 + if (pdsfc->endpoint_info[i].endpoint == rpc->in.ep) { 291 + ep_info = &pdsfc->endpoint_info[i]; 292 + break; 293 + } 294 + } 295 + if (!ep_info) { 296 + dev_dbg(dev, "Invalid endpoint %d\n", rpc->in.ep); 297 + return -EINVAL; 298 + } 299 + 300 + /* query and cache this endpoint's operations */ 301 + mutex_lock(&ep_info->lock); 302 + if (!ep_info->operations) { 303 + struct pds_fwctl_query_data *operations; 304 + 305 + operations = pdsfc_get_operations(pdsfc, 306 + &ep_info->operations_pa, 307 + rpc->in.ep); 308 + if (IS_ERR(operations)) { 309 + mutex_unlock(&ep_info->lock); 310 + return -ENOMEM; 311 + } 312 + ep_info->operations = operations; 313 + } 314 + mutex_unlock(&ep_info->lock); 315 + 316 + /* reject unsupported and/or out of scope commands */ 317 + op_entry = (struct pds_fwctl_query_data_operation *)ep_info->operations->entries; 318 + for (i = 0; i < ep_info->operations->num_entries; i++) { 319 + if (PDS_FWCTL_RPC_OPCODE_CMP(rpc->in.op, op_entry[i].id)) { 320 + if (scope < op_entry[i].scope) 321 + return -EPERM; 322 + return 0; 323 + } 324 + } 325 + 326 + dev_dbg(dev, "Invalid operation %d for endpoint %d\n", rpc->in.op, rpc->in.ep); 327 + 328 + return -EINVAL; 329 + } 330 + 105 331 static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope, 106 332 void *in, size_t in_len, size_t *out_len) 107 333 { 108 - return NULL; 334 + struct pdsfc_dev *pdsfc = container_of(uctx->fwctl, struct pdsfc_dev, fwctl); 335 + struct device *dev = &uctx->fwctl->dev; 336 + union pds_core_adminq_comp comp = {0}; 337 + dma_addr_t out_payload_dma_addr = 0; 338 + dma_addr_t in_payload_dma_addr = 0; 339 + struct fwctl_rpc_pds *rpc = in; 340 + union pds_core_adminq_cmd cmd; 341 + void *out_payload = NULL; 342 + void *in_payload = NULL; 343 + void *out = NULL; 344 + int err; 345 + 346 + err = pdsfc_validate_rpc(pdsfc, rpc, scope); 347 + if (err) 348 + return ERR_PTR(err); 349 + 350 + if (rpc->in.len > 0) { 351 + in_payload = kzalloc(rpc->in.len, GFP_KERNEL); 352 + if (!in_payload) { 353 + dev_err(dev, "Failed to allocate in_payload\n"); 354 + err = -ENOMEM; 355 + goto err_out; 356 + } 357 + 358 + if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload), 359 + rpc->in.len)) { 360 + dev_dbg(dev, "Failed to copy in_payload from user\n"); 361 + err = -EFAULT; 362 + goto err_in_payload; 363 + } 364 + 365 + in_payload_dma_addr = dma_map_single(dev->parent, in_payload, 366 + rpc->in.len, DMA_TO_DEVICE); 367 + err = dma_mapping_error(dev->parent, in_payload_dma_addr); 368 + if (err) { 369 + dev_dbg(dev, "Failed to map in_payload\n"); 370 + goto err_in_payload; 371 + } 372 + } 373 + 374 + if (rpc->out.len > 0) { 375 + out_payload = kzalloc(rpc->out.len, GFP_KERNEL); 376 + if (!out_payload) { 377 + dev_dbg(dev, "Failed to allocate out_payload\n"); 378 + err = -ENOMEM; 379 + goto err_out_payload; 380 + } 381 + 382 + out_payload_dma_addr = dma_map_single(dev->parent, out_payload, 383 + rpc->out.len, DMA_FROM_DEVICE); 384 + err = dma_mapping_error(dev->parent, out_payload_dma_addr); 385 + if (err) { 386 + dev_dbg(dev, "Failed to map out_payload\n"); 387 + goto err_out_payload; 388 + } 389 + } 390 + 391 + cmd = (union pds_core_adminq_cmd) { 392 + .fwctl_rpc = { 393 + .opcode = PDS_FWCTL_CMD_RPC, 394 + .flags = PDS_FWCTL_RPC_IND_REQ | PDS_FWCTL_RPC_IND_RESP, 395 + .ep = cpu_to_le32(rpc->in.ep), 396 + .op = cpu_to_le32(rpc->in.op), 397 + .req_pa = cpu_to_le64(in_payload_dma_addr), 398 + .req_sz = cpu_to_le32(rpc->in.len), 399 + .resp_pa = cpu_to_le64(out_payload_dma_addr), 400 + .resp_sz = cpu_to_le32(rpc->out.len), 401 + } 402 + }; 403 + 404 + err = pds_client_adminq_cmd(pdsfc->padev, &cmd, sizeof(cmd), &comp, 0); 405 + if (err) { 406 + dev_dbg(dev, "%s: ep %d op %x req_pa %llx req_sz %d req_sg %d resp_pa %llx resp_sz %d resp_sg %d err %d\n", 407 + __func__, rpc->in.ep, rpc->in.op, 408 + cmd.fwctl_rpc.req_pa, cmd.fwctl_rpc.req_sz, cmd.fwctl_rpc.req_sg_elems, 409 + cmd.fwctl_rpc.resp_pa, cmd.fwctl_rpc.resp_sz, cmd.fwctl_rpc.resp_sg_elems, 410 + err); 411 + goto done; 412 + } 413 + 414 + dynamic_hex_dump("out ", DUMP_PREFIX_OFFSET, 16, 1, out_payload, rpc->out.len, true); 415 + 416 + if (copy_to_user(u64_to_user_ptr(rpc->out.payload), out_payload, rpc->out.len)) { 417 + dev_dbg(dev, "Failed to copy out_payload to user\n"); 418 + out = ERR_PTR(-EFAULT); 419 + goto done; 420 + } 421 + 422 + rpc->out.retval = le32_to_cpu(comp.fwctl_rpc.err); 423 + *out_len = in_len; 424 + out = in; 425 + 426 + done: 427 + if (out_payload_dma_addr) 428 + dma_unmap_single(dev->parent, out_payload_dma_addr, 429 + rpc->out.len, DMA_FROM_DEVICE); 430 + err_out_payload: 431 + kfree(out_payload); 432 + 433 + if (in_payload_dma_addr) 434 + dma_unmap_single(dev->parent, in_payload_dma_addr, 435 + rpc->in.len, DMA_TO_DEVICE); 436 + err_in_payload: 437 + kfree(in_payload); 438 + err_out: 439 + if (err) 440 + return ERR_PTR(err); 441 + 442 + return out; 109 443 } 110 444 111 445 static const struct fwctl_ops pdsfc_ops = { ··· 483 127 return dev_err_probe(dev, err, "Failed to identify device\n"); 484 128 } 485 129 130 + err = pdsfc_init_endpoints(pdsfc); 131 + if (err) { 132 + fwctl_put(&pdsfc->fwctl); 133 + return dev_err_probe(dev, err, "Failed to init endpoints\n"); 134 + } 135 + 136 + pdsfc->caps = PDS_FWCTL_QUERY_CAP | PDS_FWCTL_SEND_CAP; 137 + 486 138 err = fwctl_register(&pdsfc->fwctl); 487 139 if (err) { 140 + pdsfc_free_endpoints(pdsfc); 488 141 fwctl_put(&pdsfc->fwctl); 489 142 return dev_err_probe(dev, err, "Failed to register device\n"); 490 143 } ··· 508 143 struct pdsfc_dev *pdsfc = auxiliary_get_drvdata(adev); 509 144 510 145 fwctl_unregister(&pdsfc->fwctl); 146 + pdsfc_free_operations(pdsfc); 147 + pdsfc_free_endpoints(pdsfc); 148 + 511 149 fwctl_put(&pdsfc->fwctl); 512 150 } 513 151
+194
include/linux/pds/pds_adminq.h
··· 1181 1181 1182 1182 enum pds_fwctl_cmd_opcode { 1183 1183 PDS_FWCTL_CMD_IDENT = 70, 1184 + PDS_FWCTL_CMD_RPC = 71, 1185 + PDS_FWCTL_CMD_QUERY = 72, 1184 1186 }; 1185 1187 1186 1188 /** ··· 1259 1257 u8 max_resp_sg_elems; 1260 1258 } __packed; 1261 1259 1260 + enum pds_fwctl_query_entity { 1261 + PDS_FWCTL_RPC_ROOT = 0, 1262 + PDS_FWCTL_RPC_ENDPOINT = 1, 1263 + PDS_FWCTL_RPC_OPERATION = 2, 1264 + }; 1265 + 1266 + #define PDS_FWCTL_RPC_OPCODE_CMD_SHIFT 0 1267 + #define PDS_FWCTL_RPC_OPCODE_CMD_MASK GENMASK(15, PDS_FWCTL_RPC_OPCODE_CMD_SHIFT) 1268 + #define PDS_FWCTL_RPC_OPCODE_VER_SHIFT 16 1269 + #define PDS_FWCTL_RPC_OPCODE_VER_MASK GENMASK(23, PDS_FWCTL_RPC_OPCODE_VER_SHIFT) 1270 + 1271 + #define PDS_FWCTL_RPC_OPCODE_GET_CMD(op) FIELD_GET(PDS_FWCTL_RPC_OPCODE_CMD_MASK, op) 1272 + #define PDS_FWCTL_RPC_OPCODE_GET_VER(op) FIELD_GET(PDS_FWCTL_RPC_OPCODE_VER_MASK, op) 1273 + 1274 + #define PDS_FWCTL_RPC_OPCODE_CMP(op1, op2) \ 1275 + (PDS_FWCTL_RPC_OPCODE_GET_CMD(op1) == PDS_FWCTL_RPC_OPCODE_GET_CMD(op2) && \ 1276 + PDS_FWCTL_RPC_OPCODE_GET_VER(op1) <= PDS_FWCTL_RPC_OPCODE_GET_VER(op2)) 1277 + 1278 + /* 1279 + * FW command attributes that map to the FWCTL scope values 1280 + */ 1281 + #define PDSFC_FW_CMD_ATTR_READ 0x00 1282 + #define PDSFC_FW_CMD_ATTR_DEBUG_READ 0x02 1283 + #define PDSFC_FW_CMD_ATTR_WRITE 0x04 1284 + #define PDSFC_FW_CMD_ATTR_DEBUG_WRITE 0x08 1285 + #define PDSFC_FW_CMD_ATTR_SYNC 0x10 1286 + 1287 + /** 1288 + * struct pds_fwctl_query_cmd - Firmware control query command structure 1289 + * @opcode: Operation code for the command 1290 + * @entity: Entity type to query (enum pds_fwctl_query_entity) 1291 + * @version: Version of the query data structure supported by the driver 1292 + * @rsvd: Reserved 1293 + * @query_data_buf_len: Length of the query data buffer 1294 + * @query_data_buf_pa: Physical address of the query data buffer 1295 + * @ep: Endpoint identifier to query (when entity is PDS_FWCTL_RPC_ENDPOINT) 1296 + * @op: Operation identifier to query (when entity is PDS_FWCTL_RPC_OPERATION) 1297 + * 1298 + * This structure is used to send a query command to the firmware control 1299 + * interface. The structure is packed to ensure there is no padding between 1300 + * the fields. 1301 + */ 1302 + struct pds_fwctl_query_cmd { 1303 + u8 opcode; 1304 + u8 entity; 1305 + u8 version; 1306 + u8 rsvd; 1307 + __le32 query_data_buf_len; 1308 + __le64 query_data_buf_pa; 1309 + union { 1310 + __le32 ep; 1311 + __le32 op; 1312 + }; 1313 + } __packed; 1314 + 1315 + /** 1316 + * struct pds_fwctl_query_comp - Firmware control query completion structure 1317 + * @status: Status of the query command 1318 + * @rsvd: Reserved 1319 + * @comp_index: Completion index in little-endian format 1320 + * @version: Version of the query data structure returned by firmware. This 1321 + * should be less than or equal to the version supported by the driver 1322 + * @rsvd2: Reserved 1323 + * @color: Color bit indicating the state of the completion 1324 + */ 1325 + struct pds_fwctl_query_comp { 1326 + u8 status; 1327 + u8 rsvd; 1328 + __le16 comp_index; 1329 + u8 version; 1330 + u8 rsvd2[2]; 1331 + u8 color; 1332 + } __packed; 1333 + 1334 + /** 1335 + * struct pds_fwctl_query_data_endpoint - query data for entity PDS_FWCTL_RPC_ROOT 1336 + * @id: The identifier for the data endpoint 1337 + */ 1338 + struct pds_fwctl_query_data_endpoint { 1339 + __le32 id; 1340 + } __packed; 1341 + 1342 + /** 1343 + * struct pds_fwctl_query_data_operation - query data for entity PDS_FWCTL_RPC_ENDPOINT 1344 + * @id: Operation identifier 1345 + * @scope: Scope of the operation (enum fwctl_rpc_scope) 1346 + * @rsvd: Reserved 1347 + */ 1348 + struct pds_fwctl_query_data_operation { 1349 + __le32 id; 1350 + u8 scope; 1351 + u8 rsvd[3]; 1352 + } __packed; 1353 + 1354 + /** 1355 + * struct pds_fwctl_query_data - query data structure 1356 + * @version: Version of the query data structure 1357 + * @rsvd: Reserved 1358 + * @num_entries: Number of entries in the union 1359 + * @entries: Array of query data entries, depending on the entity type 1360 + */ 1361 + struct pds_fwctl_query_data { 1362 + u8 version; 1363 + u8 rsvd[3]; 1364 + __le32 num_entries; 1365 + u8 entries[] __counted_by_le(num_entries); 1366 + } __packed; 1367 + 1368 + /** 1369 + * struct pds_fwctl_rpc_cmd - Firmware control RPC command 1370 + * @opcode: opcode PDS_FWCTL_CMD_RPC 1371 + * @rsvd: Reserved 1372 + * @flags: Indicates indirect request and/or response handling 1373 + * @ep: Endpoint identifier 1374 + * @op: Operation identifier 1375 + * @inline_req0: Buffer for inline request 1376 + * @inline_req1: Buffer for inline request 1377 + * @req_pa: Physical address of request data 1378 + * @req_sz: Size of the request 1379 + * @req_sg_elems: Number of request SGs 1380 + * @req_rsvd: Reserved 1381 + * @inline_req2: Buffer for inline request 1382 + * @resp_pa: Physical address of response data 1383 + * @resp_sz: Size of the response 1384 + * @resp_sg_elems: Number of response SGs 1385 + * @resp_rsvd: Reserved 1386 + */ 1387 + struct pds_fwctl_rpc_cmd { 1388 + u8 opcode; 1389 + u8 rsvd; 1390 + __le16 flags; 1391 + #define PDS_FWCTL_RPC_IND_REQ 0x1 1392 + #define PDS_FWCTL_RPC_IND_RESP 0x2 1393 + __le32 ep; 1394 + __le32 op; 1395 + u8 inline_req0[16]; 1396 + union { 1397 + u8 inline_req1[16]; 1398 + struct { 1399 + __le64 req_pa; 1400 + __le32 req_sz; 1401 + u8 req_sg_elems; 1402 + u8 req_rsvd[3]; 1403 + }; 1404 + }; 1405 + union { 1406 + u8 inline_req2[16]; 1407 + struct { 1408 + __le64 resp_pa; 1409 + __le32 resp_sz; 1410 + u8 resp_sg_elems; 1411 + u8 resp_rsvd[3]; 1412 + }; 1413 + }; 1414 + } __packed; 1415 + 1416 + /** 1417 + * struct pds_sg_elem - Transmit scatter-gather (SG) descriptor element 1418 + * @addr: DMA address of SG element data buffer 1419 + * @len: Length of SG element data buffer, in bytes 1420 + * @rsvd: Reserved 1421 + */ 1422 + struct pds_sg_elem { 1423 + __le64 addr; 1424 + __le32 len; 1425 + u8 rsvd[4]; 1426 + } __packed; 1427 + 1428 + /** 1429 + * struct pds_fwctl_rpc_comp - Completion of a firmware control RPC 1430 + * @status: Status of the command 1431 + * @rsvd: Reserved 1432 + * @comp_index: Completion index of the command 1433 + * @err: Error code, if any, from the RPC 1434 + * @resp_sz: Size of the response 1435 + * @rsvd2: Reserved 1436 + * @color: Color bit indicating the state of the completion 1437 + */ 1438 + struct pds_fwctl_rpc_comp { 1439 + u8 status; 1440 + u8 rsvd; 1441 + __le16 comp_index; 1442 + __le32 err; 1443 + __le32 resp_sz; 1444 + u8 rsvd2[3]; 1445 + u8 color; 1446 + } __packed; 1447 + 1262 1448 union pds_core_adminq_cmd { 1263 1449 u8 opcode; 1264 1450 u8 bytes[64]; ··· 1487 1297 1488 1298 struct pds_fwctl_cmd fwctl; 1489 1299 struct pds_fwctl_ident_cmd fwctl_ident; 1300 + struct pds_fwctl_rpc_cmd fwctl_rpc; 1301 + struct pds_fwctl_query_cmd fwctl_query; 1490 1302 }; 1491 1303 1492 1304 union pds_core_adminq_comp { ··· 1518 1326 struct pds_lm_dirty_status_comp lm_dirty_status; 1519 1327 1520 1328 struct pds_fwctl_comp fwctl; 1329 + struct pds_fwctl_rpc_comp fwctl_rpc; 1330 + struct pds_fwctl_query_comp fwctl_query; 1521 1331 }; 1522 1332 1523 1333 #ifndef __CHECKER__
+30
include/uapi/fwctl/pds.h
··· 29 29 PDS_FWCTL_QUERY_CAP = 0, 30 30 PDS_FWCTL_SEND_CAP, 31 31 }; 32 + 33 + /** 34 + * struct fwctl_rpc_pds 35 + * @in.op: requested operation code 36 + * @in.ep: firmware endpoint to operate on 37 + * @in.rsvd: reserved 38 + * @in.len: length of payload data 39 + * @in.payload: address of payload buffer 40 + * @in: rpc in parameters 41 + * @out.retval: operation result value 42 + * @out.rsvd: reserved 43 + * @out.len: length of result data buffer 44 + * @out.payload: address of payload data buffer 45 + * @out: rpc out parameters 46 + */ 47 + struct fwctl_rpc_pds { 48 + struct { 49 + __u32 op; 50 + __u32 ep; 51 + __u32 rsvd; 52 + __u32 len; 53 + __aligned_u64 payload; 54 + } in; 55 + struct { 56 + __u32 retval; 57 + __u32 rsvd[2]; 58 + __u32 len; 59 + __aligned_u64 payload; 60 + } out; 61 + }; 32 62 #endif /* _UAPI_FWCTL_PDS_H_ */