Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: iosm: protocol operations

1) Update UL/DL transfer descriptors in message ring.
2) Define message set for pipe/sleep protocol.

Signed-off-by: M Chetan Kumar <m.chetan.kumar@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

M Chetan Kumar and committed by
David S. Miller
64516f63 faed4c6f

+996
+552
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2020-21 Intel Corporation. 4 + */ 5 + 6 + #include "iosm_ipc_protocol.h" 7 + #include "iosm_ipc_protocol_ops.h" 8 + 9 + /* Get the next free message element.*/ 10 + static union ipc_mem_msg_entry * 11 + ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index) 12 + { 13 + u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head); 14 + u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES; 15 + union ipc_mem_msg_entry *msg; 16 + 17 + if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) { 18 + dev_err(ipc_protocol->dev, "message ring is full"); 19 + return NULL; 20 + } 21 + 22 + /* Get the pointer to the next free message element, 23 + * reset the fields and mark is as invalid. 24 + */ 25 + msg = &ipc_protocol->p_ap_shm->msg_ring[head]; 26 + memset(msg, 0, sizeof(*msg)); 27 + 28 + /* return index in message ring */ 29 + *index = head; 30 + 31 + return msg; 32 + } 33 + 34 + /* Updates the message ring Head pointer */ 35 + void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem) 36 + { 37 + struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol; 38 + u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head); 39 + u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES; 40 + 41 + /* Update head pointer and fire doorbell. */ 42 + ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head); 43 + ipc_protocol->old_msg_tail = 44 + le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail); 45 + 46 + ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false); 47 + } 48 + 49 + /* Allocate and prepare a OPEN_PIPE message. 50 + * This also allocates the memory for the new TDR structure and 51 + * updates the pipe structure referenced in the preparation arguments. 52 + */ 53 + static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol, 54 + union ipc_msg_prep_args *args) 55 + { 56 + int index; 57 + union ipc_mem_msg_entry *msg = 58 + ipc_protocol_free_msg_get(ipc_protocol, &index); 59 + struct ipc_pipe *pipe = args->pipe_open.pipe; 60 + struct ipc_protocol_td *tdr; 61 + struct sk_buff **skbr; 62 + 63 + if (!msg) { 64 + dev_err(ipc_protocol->dev, "failed to get free message"); 65 + return -EIO; 66 + } 67 + 68 + /* Allocate the skbuf elements for the skbuf which are on the way. 69 + * SKB ring is internal memory allocation for driver. No need to 70 + * re-calculate the start and end addresses. 71 + */ 72 + skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC); 73 + if (!skbr) 74 + return -ENOMEM; 75 + 76 + /* Allocate the transfer descriptors for the pipe. */ 77 + tdr = pci_alloc_consistent(ipc_protocol->pcie->pci, 78 + pipe->nr_of_entries * sizeof(*tdr), 79 + &pipe->phy_tdr_start); 80 + if (!tdr) { 81 + kfree(skbr); 82 + dev_err(ipc_protocol->dev, "tdr alloc error"); 83 + return -ENOMEM; 84 + } 85 + 86 + pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1; 87 + pipe->nr_of_queued_entries = 0; 88 + pipe->tdr_start = tdr; 89 + pipe->skbr_start = skbr; 90 + pipe->old_tail = 0; 91 + 92 + ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0; 93 + 94 + msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE; 95 + msg->open_pipe.pipe_nr = pipe->pipe_nr; 96 + msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start); 97 + msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries); 98 + msg->open_pipe.accumulation_backoff = 99 + cpu_to_le32(pipe->accumulation_backoff); 100 + msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq); 101 + 102 + return index; 103 + } 104 + 105 + static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol, 106 + union ipc_msg_prep_args *args) 107 + { 108 + int index = -1; 109 + union ipc_mem_msg_entry *msg = 110 + ipc_protocol_free_msg_get(ipc_protocol, &index); 111 + struct ipc_pipe *pipe = args->pipe_close.pipe; 112 + 113 + if (!msg) 114 + return -EIO; 115 + 116 + msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE; 117 + msg->close_pipe.pipe_nr = pipe->pipe_nr; 118 + 119 + dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)", 120 + msg->close_pipe.pipe_nr); 121 + 122 + return index; 123 + } 124 + 125 + static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol, 126 + union ipc_msg_prep_args *args) 127 + { 128 + int index = -1; 129 + union ipc_mem_msg_entry *msg = 130 + ipc_protocol_free_msg_get(ipc_protocol, &index); 131 + 132 + if (!msg) { 133 + dev_err(ipc_protocol->dev, "failed to get free message"); 134 + return -EIO; 135 + } 136 + 137 + /* Prepare and send the host sleep message to CP to enter or exit D3. */ 138 + msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP; 139 + msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */ 140 + 141 + /* state; 0=enter, 1=exit 2=enter w/o protocol */ 142 + msg->host_sleep.state = args->sleep.state; 143 + 144 + dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)", 145 + msg->host_sleep.target, msg->host_sleep.state); 146 + 147 + return index; 148 + } 149 + 150 + static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol, 151 + union ipc_msg_prep_args *args) 152 + { 153 + int index = -1; 154 + union ipc_mem_msg_entry *msg = 155 + ipc_protocol_free_msg_get(ipc_protocol, &index); 156 + 157 + if (!msg) { 158 + dev_err(ipc_protocol->dev, "failed to get free message"); 159 + return -EIO; 160 + } 161 + 162 + msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET; 163 + msg->feature_set.reset_enable = args->feature_set.reset_enable << 164 + RESET_BIT; 165 + 166 + dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)", 167 + msg->feature_set.reset_enable >> RESET_BIT); 168 + 169 + return index; 170 + } 171 + 172 + /* Processes the message consumed by CP. */ 173 + bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq) 174 + { 175 + struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol; 176 + struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring; 177 + bool msg_processed = false; 178 + u32 i; 179 + 180 + if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >= 181 + IPC_MEM_MSG_ENTRIES) { 182 + dev_err(ipc_protocol->dev, "msg_tail out of range: %d", 183 + le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)); 184 + return msg_processed; 185 + } 186 + 187 + if (irq != IMEM_IRQ_DONT_CARE && 188 + irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector) 189 + return msg_processed; 190 + 191 + for (i = ipc_protocol->old_msg_tail; 192 + i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail); 193 + i = (i + 1) % IPC_MEM_MSG_ENTRIES) { 194 + union ipc_mem_msg_entry *msg = 195 + &ipc_protocol->p_ap_shm->msg_ring[i]; 196 + 197 + dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i, 198 + msg->common.type_of_message, 199 + msg->common.completion_status); 200 + 201 + /* Update response with status and wake up waiting requestor */ 202 + if (rsp_ring[i]) { 203 + rsp_ring[i]->status = 204 + le32_to_cpu(msg->common.completion_status); 205 + complete(&rsp_ring[i]->completion); 206 + rsp_ring[i] = NULL; 207 + } 208 + msg_processed = true; 209 + } 210 + 211 + ipc_protocol->old_msg_tail = i; 212 + return msg_processed; 213 + } 214 + 215 + /* Sends data from UL list to CP for the provided pipe by updating the Head 216 + * pointer of given pipe. 217 + */ 218 + bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol, 219 + struct ipc_pipe *pipe, 220 + struct sk_buff_head *p_ul_list) 221 + { 222 + struct ipc_protocol_td *td; 223 + bool hpda_pending = false; 224 + struct sk_buff *skb; 225 + s32 free_elements; 226 + u32 head; 227 + u32 tail; 228 + 229 + if (!ipc_protocol->p_ap_shm) { 230 + dev_err(ipc_protocol->dev, "driver is not initialized"); 231 + return false; 232 + } 233 + 234 + /* Get head and tail of the td list and calculate 235 + * the number of free elements. 236 + */ 237 + head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]); 238 + tail = pipe->old_tail; 239 + 240 + while (!skb_queue_empty(p_ul_list)) { 241 + if (head < tail) 242 + free_elements = tail - head - 1; 243 + else 244 + free_elements = 245 + pipe->nr_of_entries - head + ((s32)tail - 1); 246 + 247 + if (free_elements <= 0) { 248 + dev_dbg(ipc_protocol->dev, 249 + "no free td elements for UL pipe %d", 250 + pipe->pipe_nr); 251 + break; 252 + } 253 + 254 + /* Get the td address. */ 255 + td = &pipe->tdr_start[head]; 256 + 257 + /* Take the first element of the uplink list and add it 258 + * to the td list. 259 + */ 260 + skb = skb_dequeue(p_ul_list); 261 + if (WARN_ON(!skb)) 262 + break; 263 + 264 + /* Save the reference to the uplink skbuf. */ 265 + pipe->skbr_start[head] = skb; 266 + 267 + td->buffer.address = IPC_CB(skb)->mapping; 268 + td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK); 269 + td->next = 0; 270 + 271 + pipe->nr_of_queued_entries++; 272 + 273 + /* Calculate the new head and save it. */ 274 + head++; 275 + if (head >= pipe->nr_of_entries) 276 + head = 0; 277 + 278 + ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 279 + cpu_to_le32(head); 280 + } 281 + 282 + if (pipe->old_head != head) { 283 + dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr); 284 + 285 + pipe->old_head = head; 286 + /* Trigger doorbell because of pending UL packets. */ 287 + hpda_pending = true; 288 + } 289 + 290 + return hpda_pending; 291 + } 292 + 293 + /* Checks for Tail pointer update from CP and returns the data as SKB. */ 294 + struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol, 295 + struct ipc_pipe *pipe) 296 + { 297 + struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail]; 298 + struct sk_buff *skb = pipe->skbr_start[pipe->old_tail]; 299 + 300 + pipe->nr_of_queued_entries--; 301 + pipe->old_tail++; 302 + if (pipe->old_tail >= pipe->nr_of_entries) 303 + pipe->old_tail = 0; 304 + 305 + if (!p_td->buffer.address) { 306 + dev_err(ipc_protocol->dev, "Td buffer address is NULL"); 307 + return NULL; 308 + } 309 + 310 + if (p_td->buffer.address != IPC_CB(skb)->mapping) { 311 + dev_err(ipc_protocol->dev, 312 + "pipe %d: invalid buf_addr or skb_data", 313 + pipe->pipe_nr); 314 + return NULL; 315 + } 316 + 317 + return skb; 318 + } 319 + 320 + /* Allocates an SKB for CP to send data and updates the Head Pointer 321 + * of the given Pipe#. 322 + */ 323 + bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol, 324 + struct ipc_pipe *pipe) 325 + { 326 + struct ipc_protocol_td *td; 327 + dma_addr_t mapping = 0; 328 + u32 head, new_head; 329 + struct sk_buff *skb; 330 + u32 tail; 331 + 332 + /* Get head and tail of the td list and calculate 333 + * the number of free elements. 334 + */ 335 + head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]); 336 + tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]); 337 + 338 + new_head = head + 1; 339 + if (new_head >= pipe->nr_of_entries) 340 + new_head = 0; 341 + 342 + if (new_head == tail) 343 + return false; 344 + 345 + /* Get the td address. */ 346 + td = &pipe->tdr_start[head]; 347 + 348 + /* Allocate the skbuf for the descriptor. */ 349 + skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC, 350 + &mapping, DMA_FROM_DEVICE, 351 + IPC_MEM_DL_ETH_OFFSET); 352 + if (!skb) 353 + return false; 354 + 355 + td->buffer.address = mapping; 356 + td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK); 357 + td->next = 0; 358 + 359 + /* store the new head value. */ 360 + ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 361 + cpu_to_le32(new_head); 362 + 363 + /* Save the reference to the skbuf. */ 364 + pipe->skbr_start[head] = skb; 365 + 366 + pipe->nr_of_queued_entries++; 367 + 368 + return true; 369 + } 370 + 371 + /* Processes DL TD's */ 372 + struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, 373 + struct ipc_pipe *pipe) 374 + { 375 + u32 tail = 376 + le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]); 377 + struct ipc_protocol_td *p_td; 378 + struct sk_buff *skb; 379 + 380 + if (!pipe->tdr_start) 381 + return NULL; 382 + 383 + /* Copy the reference to the downlink buffer. */ 384 + p_td = &pipe->tdr_start[pipe->old_tail]; 385 + skb = pipe->skbr_start[pipe->old_tail]; 386 + 387 + /* Reset the ring elements. */ 388 + pipe->skbr_start[pipe->old_tail] = NULL; 389 + 390 + pipe->nr_of_queued_entries--; 391 + 392 + pipe->old_tail++; 393 + if (pipe->old_tail >= pipe->nr_of_entries) 394 + pipe->old_tail = 0; 395 + 396 + if (!skb) { 397 + dev_err(ipc_protocol->dev, "skb is null"); 398 + goto ret; 399 + } else if (!p_td->buffer.address) { 400 + dev_err(ipc_protocol->dev, "td/buffer address is null"); 401 + ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 402 + skb = NULL; 403 + goto ret; 404 + } 405 + 406 + if (!IPC_CB(skb)) { 407 + dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL", 408 + pipe->pipe_nr, tail); 409 + ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 410 + skb = NULL; 411 + goto ret; 412 + } 413 + 414 + if (p_td->buffer.address != IPC_CB(skb)->mapping) { 415 + dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p", 416 + (void *)p_td->buffer.address, skb->data); 417 + ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 418 + skb = NULL; 419 + goto ret; 420 + } else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) { 421 + dev_err(ipc_protocol->dev, "invalid buffer size %d > %d", 422 + le32_to_cpu(p_td->scs) & SIZE_MASK, 423 + pipe->buf_size); 424 + ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 425 + skb = NULL; 426 + goto ret; 427 + } else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS == 428 + IPC_MEM_TD_CS_ABORT) { 429 + /* Discard aborted buffers. */ 430 + dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers"); 431 + ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 432 + skb = NULL; 433 + goto ret; 434 + } 435 + 436 + /* Set the length field in skbuf. */ 437 + skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK); 438 + 439 + ret: 440 + return skb; 441 + } 442 + 443 + void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol, 444 + struct ipc_pipe *pipe, u32 *head, 445 + u32 *tail) 446 + { 447 + struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm; 448 + 449 + if (head) 450 + *head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]); 451 + 452 + if (tail) 453 + *tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]); 454 + } 455 + 456 + /* Frees the TDs given to CP. */ 457 + void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol, 458 + struct ipc_pipe *pipe) 459 + { 460 + struct sk_buff *skb; 461 + u32 head; 462 + u32 tail; 463 + 464 + /* Get the start and the end of the buffer list. */ 465 + head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]); 466 + tail = pipe->old_tail; 467 + 468 + /* Reset tail and head to 0. */ 469 + ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0; 470 + ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0; 471 + 472 + /* Free pending uplink and downlink buffers. */ 473 + if (pipe->skbr_start) { 474 + while (head != tail) { 475 + /* Get the reference to the skbuf, 476 + * which is on the way and free it. 477 + */ 478 + skb = pipe->skbr_start[tail]; 479 + if (skb) 480 + ipc_pcie_kfree_skb(ipc_protocol->pcie, skb); 481 + 482 + tail++; 483 + if (tail >= pipe->nr_of_entries) 484 + tail = 0; 485 + } 486 + 487 + kfree(pipe->skbr_start); 488 + pipe->skbr_start = NULL; 489 + } 490 + 491 + pipe->old_tail = 0; 492 + 493 + /* Free and reset the td and skbuf circular buffers. kfree is save! */ 494 + if (pipe->tdr_start) { 495 + pci_free_consistent(ipc_protocol->pcie->pci, 496 + sizeof(*pipe->tdr_start) * 497 + pipe->nr_of_entries, 498 + pipe->tdr_start, pipe->phy_tdr_start); 499 + 500 + pipe->tdr_start = NULL; 501 + } 502 + } 503 + 504 + enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol 505 + *ipc_protocol) 506 + { 507 + return (enum ipc_mem_device_ipc_state) 508 + le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status); 509 + } 510 + 511 + enum ipc_mem_exec_stage 512 + ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol) 513 + { 514 + return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage); 515 + } 516 + 517 + int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem, 518 + enum ipc_msg_prep_type msg_type, 519 + union ipc_msg_prep_args *args) 520 + { 521 + struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol; 522 + 523 + switch (msg_type) { 524 + case IPC_MSG_PREP_SLEEP: 525 + return ipc_protocol_msg_prep_sleep(ipc_protocol, args); 526 + 527 + case IPC_MSG_PREP_PIPE_OPEN: 528 + return ipc_protocol_msg_prepipe_open(ipc_protocol, args); 529 + 530 + case IPC_MSG_PREP_PIPE_CLOSE: 531 + return ipc_protocol_msg_prepipe_close(ipc_protocol, args); 532 + 533 + case IPC_MSG_PREP_FEATURE_SET: 534 + return ipc_protocol_msg_prep_feature_set(ipc_protocol, args); 535 + 536 + /* Unsupported messages in protocol */ 537 + case IPC_MSG_PREP_MAP: 538 + case IPC_MSG_PREP_UNMAP: 539 + default: 540 + dev_err(ipc_protocol->dev, 541 + "unsupported message type: %d in protocol", msg_type); 542 + return -EINVAL; 543 + } 544 + } 545 + 546 + u32 547 + ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol) 548 + { 549 + struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm; 550 + 551 + return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification); 552 + }
+444
drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only 2 + * 3 + * Copyright (C) 2020-21 Intel Corporation. 4 + */ 5 + 6 + #ifndef IOSM_IPC_PROTOCOL_OPS_H 7 + #define IOSM_IPC_PROTOCOL_OPS_H 8 + 9 + #define SIZE_MASK 0x00FFFFFF 10 + #define COMPLETION_STATUS 24 11 + #define RESET_BIT 7 12 + 13 + /** 14 + * enum ipc_mem_td_cs - Completion status of a TD 15 + * @IPC_MEM_TD_CS_INVALID: Initial status - td not yet used. 16 + * @IPC_MEM_TD_CS_PARTIAL_TRANSFER: More data pending -> next TD used for this 17 + * @IPC_MEM_TD_CS_END_TRANSFER: IO transfer is complete. 18 + * @IPC_MEM_TD_CS_OVERFLOW: IO transfer to small for the buff to write 19 + * @IPC_MEM_TD_CS_ABORT: TD marked as abort and shall be discarded 20 + * by AP. 21 + * @IPC_MEM_TD_CS_ERROR: General error. 22 + */ 23 + enum ipc_mem_td_cs { 24 + IPC_MEM_TD_CS_INVALID, 25 + IPC_MEM_TD_CS_PARTIAL_TRANSFER, 26 + IPC_MEM_TD_CS_END_TRANSFER, 27 + IPC_MEM_TD_CS_OVERFLOW, 28 + IPC_MEM_TD_CS_ABORT, 29 + IPC_MEM_TD_CS_ERROR, 30 + }; 31 + 32 + /** 33 + * enum ipc_mem_msg_cs - Completion status of IPC Message 34 + * @IPC_MEM_MSG_CS_INVALID: Initial status. 35 + * @IPC_MEM_MSG_CS_SUCCESS: IPC Message completion success. 36 + * @IPC_MEM_MSG_CS_ERROR: Message send error. 37 + */ 38 + enum ipc_mem_msg_cs { 39 + IPC_MEM_MSG_CS_INVALID, 40 + IPC_MEM_MSG_CS_SUCCESS, 41 + IPC_MEM_MSG_CS_ERROR, 42 + }; 43 + 44 + /** 45 + * struct ipc_msg_prep_args_pipe - struct for pipe args for message preparation 46 + * @pipe: Pipe to open/close 47 + */ 48 + struct ipc_msg_prep_args_pipe { 49 + struct ipc_pipe *pipe; 50 + }; 51 + 52 + /** 53 + * struct ipc_msg_prep_args_sleep - struct for sleep args for message 54 + * preparation 55 + * @target: 0=host, 1=device 56 + * @state: 0=enter sleep, 1=exit sleep 57 + */ 58 + struct ipc_msg_prep_args_sleep { 59 + unsigned int target; 60 + unsigned int state; 61 + }; 62 + 63 + /** 64 + * struct ipc_msg_prep_feature_set - struct for feature set argument for 65 + * message preparation 66 + * @reset_enable: 0=out-of-band, 1=in-band-crash notification 67 + */ 68 + struct ipc_msg_prep_feature_set { 69 + u8 reset_enable; 70 + }; 71 + 72 + /** 73 + * struct ipc_msg_prep_map - struct for map argument for message preparation 74 + * @region_id: Region to map 75 + * @addr: Pcie addr of region to map 76 + * @size: Size of the region to map 77 + */ 78 + struct ipc_msg_prep_map { 79 + unsigned int region_id; 80 + unsigned long addr; 81 + size_t size; 82 + }; 83 + 84 + /** 85 + * struct ipc_msg_prep_unmap - struct for unmap argument for message preparation 86 + * @region_id: Region to unmap 87 + */ 88 + struct ipc_msg_prep_unmap { 89 + unsigned int region_id; 90 + }; 91 + 92 + /** 93 + * struct ipc_msg_prep_args - Union to handle different message types 94 + * @pipe_open: Pipe open message preparation struct 95 + * @pipe_close: Pipe close message preparation struct 96 + * @sleep: Sleep message preparation struct 97 + * @feature_set: Feature set message preparation struct 98 + * @map: Memory map message preparation struct 99 + * @unmap: Memory unmap message preparation struct 100 + */ 101 + union ipc_msg_prep_args { 102 + struct ipc_msg_prep_args_pipe pipe_open; 103 + struct ipc_msg_prep_args_pipe pipe_close; 104 + struct ipc_msg_prep_args_sleep sleep; 105 + struct ipc_msg_prep_feature_set feature_set; 106 + struct ipc_msg_prep_map map; 107 + struct ipc_msg_prep_unmap unmap; 108 + }; 109 + 110 + /** 111 + * enum ipc_msg_prep_type - Enum for message prepare actions 112 + * @IPC_MSG_PREP_SLEEP: Sleep message preparation type 113 + * @IPC_MSG_PREP_PIPE_OPEN: Pipe open message preparation type 114 + * @IPC_MSG_PREP_PIPE_CLOSE: Pipe close message preparation type 115 + * @IPC_MSG_PREP_FEATURE_SET: Feature set message preparation type 116 + * @IPC_MSG_PREP_MAP: Memory map message preparation type 117 + * @IPC_MSG_PREP_UNMAP: Memory unmap message preparation type 118 + */ 119 + enum ipc_msg_prep_type { 120 + IPC_MSG_PREP_SLEEP, 121 + IPC_MSG_PREP_PIPE_OPEN, 122 + IPC_MSG_PREP_PIPE_CLOSE, 123 + IPC_MSG_PREP_FEATURE_SET, 124 + IPC_MSG_PREP_MAP, 125 + IPC_MSG_PREP_UNMAP, 126 + }; 127 + 128 + /** 129 + * struct ipc_rsp - Response to sent message 130 + * @completion: For waking up requestor 131 + * @status: Completion status 132 + */ 133 + struct ipc_rsp { 134 + struct completion completion; 135 + enum ipc_mem_msg_cs status; 136 + }; 137 + 138 + /** 139 + * enum ipc_mem_msg - Type-definition of the messages. 140 + * @IPC_MEM_MSG_OPEN_PIPE: AP ->CP: Open a pipe 141 + * @IPC_MEM_MSG_CLOSE_PIPE: AP ->CP: Close a pipe 142 + * @IPC_MEM_MSG_ABORT_PIPE: AP ->CP: wait for completion of the 143 + * running transfer and abort all pending 144 + * IO-transfers for the pipe 145 + * @IPC_MEM_MSG_SLEEP: AP ->CP: host enter or exit sleep 146 + * @IPC_MEM_MSG_FEATURE_SET: AP ->CP: Intel feature configuration 147 + */ 148 + enum ipc_mem_msg { 149 + IPC_MEM_MSG_OPEN_PIPE = 0x01, 150 + IPC_MEM_MSG_CLOSE_PIPE = 0x02, 151 + IPC_MEM_MSG_ABORT_PIPE = 0x03, 152 + IPC_MEM_MSG_SLEEP = 0x04, 153 + IPC_MEM_MSG_FEATURE_SET = 0xF0, 154 + }; 155 + 156 + /** 157 + * struct ipc_mem_msg_open_pipe - Message structure for open pipe 158 + * @tdr_addr: Tdr address 159 + * @tdr_entries: Tdr entries 160 + * @pipe_nr: Pipe number 161 + * @type_of_message: Message type 162 + * @irq_vector: MSI vector number 163 + * @accumulation_backoff: Time in usec for data accumalation 164 + * @completion_status: Message Completion Status 165 + */ 166 + struct ipc_mem_msg_open_pipe { 167 + __le64 tdr_addr; 168 + __le16 tdr_entries; 169 + u8 pipe_nr; 170 + u8 type_of_message; 171 + __le32 irq_vector; 172 + __le32 accumulation_backoff; 173 + __le32 completion_status; 174 + }; 175 + 176 + /** 177 + * struct ipc_mem_msg_close_pipe - Message structure for close pipe 178 + * @reserved1: Reserved 179 + * @reserved2: Reserved 180 + * @pipe_nr: Pipe number 181 + * @type_of_message: Message type 182 + * @reserved3: Reserved 183 + * @reserved4: Reserved 184 + * @completion_status: Message Completion Status 185 + */ 186 + struct ipc_mem_msg_close_pipe { 187 + __le32 reserved1[2]; 188 + __le16 reserved2; 189 + u8 pipe_nr; 190 + u8 type_of_message; 191 + __le32 reserved3; 192 + __le32 reserved4; 193 + __le32 completion_status; 194 + }; 195 + 196 + /** 197 + * struct ipc_mem_msg_abort_pipe - Message structure for abort pipe 198 + * @reserved1: Reserved 199 + * @reserved2: Reserved 200 + * @pipe_nr: Pipe number 201 + * @type_of_message: Message type 202 + * @reserved3: Reserved 203 + * @reserved4: Reserved 204 + * @completion_status: Message Completion Status 205 + */ 206 + struct ipc_mem_msg_abort_pipe { 207 + __le32 reserved1[2]; 208 + __le16 reserved2; 209 + u8 pipe_nr; 210 + u8 type_of_message; 211 + __le32 reserved3; 212 + __le32 reserved4; 213 + __le32 completion_status; 214 + }; 215 + 216 + /** 217 + * struct ipc_mem_msg_host_sleep - Message structure for sleep message. 218 + * @reserved1: Reserved 219 + * @target: 0=host, 1=device, host or EP devie 220 + * is the message target 221 + * @state: 0=enter sleep, 1=exit sleep, 222 + * 2=enter sleep no protocol 223 + * @reserved2: Reserved 224 + * @type_of_message: Message type 225 + * @reserved3: Reserved 226 + * @reserved4: Reserved 227 + * @completion_status: Message Completion Status 228 + */ 229 + struct ipc_mem_msg_host_sleep { 230 + __le32 reserved1[2]; 231 + u8 target; 232 + u8 state; 233 + u8 reserved2; 234 + u8 type_of_message; 235 + __le32 reserved3; 236 + __le32 reserved4; 237 + __le32 completion_status; 238 + }; 239 + 240 + /** 241 + * struct ipc_mem_msg_feature_set - Message structure for feature_set message 242 + * @reserved1: Reserved 243 + * @reserved2: Reserved 244 + * @reset_enable: 0=out-of-band, 1=in-band-crash notification 245 + * @type_of_message: Message type 246 + * @reserved3: Reserved 247 + * @reserved4: Reserved 248 + * @completion_status: Message Completion Status 249 + */ 250 + struct ipc_mem_msg_feature_set { 251 + __le32 reserved1[2]; 252 + __le16 reserved2; 253 + u8 reset_enable; 254 + u8 type_of_message; 255 + __le32 reserved3; 256 + __le32 reserved4; 257 + __le32 completion_status; 258 + }; 259 + 260 + /** 261 + * struct ipc_mem_msg_common - Message structure for completion status update. 262 + * @reserved1: Reserved 263 + * @reserved2: Reserved 264 + * @type_of_message: Message type 265 + * @reserved3: Reserved 266 + * @reserved4: Reserved 267 + * @completion_status: Message Completion Status 268 + */ 269 + struct ipc_mem_msg_common { 270 + __le32 reserved1[2]; 271 + u8 reserved2[3]; 272 + u8 type_of_message; 273 + __le32 reserved3; 274 + __le32 reserved4; 275 + __le32 completion_status; 276 + }; 277 + 278 + /** 279 + * union ipc_mem_msg_entry - Union with all possible messages. 280 + * @open_pipe: Open pipe message struct 281 + * @close_pipe: Close pipe message struct 282 + * @abort_pipe: Abort pipe message struct 283 + * @host_sleep: Host sleep message struct 284 + * @feature_set: Featuer set message struct 285 + * @common: Used to access msg_type and to set the completion status 286 + */ 287 + union ipc_mem_msg_entry { 288 + struct ipc_mem_msg_open_pipe open_pipe; 289 + struct ipc_mem_msg_close_pipe close_pipe; 290 + struct ipc_mem_msg_abort_pipe abort_pipe; 291 + struct ipc_mem_msg_host_sleep host_sleep; 292 + struct ipc_mem_msg_feature_set feature_set; 293 + struct ipc_mem_msg_common common; 294 + }; 295 + 296 + /* Transfer descriptor definition. */ 297 + struct ipc_protocol_td { 298 + union { 299 + /* 0 : 63 - 64-bit address of a buffer in host memory. */ 300 + dma_addr_t address; 301 + struct { 302 + /* 0 : 31 - 32 bit address */ 303 + __le32 address; 304 + /* 32 : 63 - corresponding descriptor */ 305 + __le32 desc; 306 + } __packed shm; 307 + } buffer; 308 + 309 + /* 0 - 2nd byte - Size of the buffer. 310 + * The host provides the size of the buffer queued. 311 + * The EP device reads this value and shall update 312 + * it for downlink transfers to indicate the 313 + * amount of data written in buffer. 314 + * 3rd byte - This field provides the completion status 315 + * of the TD. When queuing the TD, the host sets 316 + * the status to 0. The EP device updates this 317 + * field when completing the TD. 318 + */ 319 + __le32 scs; 320 + 321 + /* 0th - nr of following descriptors 322 + * 1 - 3rd byte - reserved 323 + */ 324 + __le32 next; 325 + } __packed; 326 + 327 + /** 328 + * ipc_protocol_msg_prep - Prepare message based upon message type 329 + * @ipc_imem: iosm_protocol instance 330 + * @msg_type: message prepare type 331 + * @args: message arguments 332 + * 333 + * Return: 0 on success and failure value on error 334 + */ 335 + int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem, 336 + enum ipc_msg_prep_type msg_type, 337 + union ipc_msg_prep_args *args); 338 + 339 + /** 340 + * ipc_protocol_msg_hp_update - Function for head pointer update 341 + * of message ring 342 + * @ipc_imem: iosm_protocol instance 343 + */ 344 + void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem); 345 + 346 + /** 347 + * ipc_protocol_msg_process - Function for processing responses 348 + * to IPC messages 349 + * @ipc_imem: iosm_protocol instance 350 + * @irq: IRQ vector 351 + * 352 + * Return: True on success, false if error 353 + */ 354 + bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq); 355 + 356 + /** 357 + * ipc_protocol_ul_td_send - Function for sending the data to CP 358 + * @ipc_protocol: iosm_protocol instance 359 + * @pipe: Pipe instance 360 + * @p_ul_list: uplink sk_buff list 361 + * 362 + * Return: true in success, false in case of error 363 + */ 364 + bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol, 365 + struct ipc_pipe *pipe, 366 + struct sk_buff_head *p_ul_list); 367 + 368 + /** 369 + * ipc_protocol_ul_td_process - Function for processing the sent data 370 + * @ipc_protocol: iosm_protocol instance 371 + * @pipe: Pipe instance 372 + * 373 + * Return: sk_buff instance 374 + */ 375 + struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol, 376 + struct ipc_pipe *pipe); 377 + 378 + /** 379 + * ipc_protocol_dl_td_prepare - Function for providing DL TDs to CP 380 + * @ipc_protocol: iosm_protocol instance 381 + * @pipe: Pipe instance 382 + * 383 + * Return: true in success, false in case of error 384 + */ 385 + bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol, 386 + struct ipc_pipe *pipe); 387 + 388 + /** 389 + * ipc_protocol_dl_td_process - Function for processing the DL data 390 + * @ipc_protocol: iosm_protocol instance 391 + * @pipe: Pipe instance 392 + * 393 + * Return: sk_buff instance 394 + */ 395 + struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol, 396 + struct ipc_pipe *pipe); 397 + 398 + /** 399 + * ipc_protocol_get_head_tail_index - Function for getting Head and Tail 400 + * pointer index of given pipe 401 + * @ipc_protocol: iosm_protocol instance 402 + * @pipe: Pipe Instance 403 + * @head: head pointer index of the given pipe 404 + * @tail: tail pointer index of the given pipe 405 + */ 406 + void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol, 407 + struct ipc_pipe *pipe, u32 *head, 408 + u32 *tail); 409 + /** 410 + * ipc_protocol_get_ipc_status - Function for getting the IPC Status 411 + * @ipc_protocol: iosm_protocol instance 412 + * 413 + * Return: Returns IPC State 414 + */ 415 + enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol 416 + *ipc_protocol); 417 + 418 + /** 419 + * ipc_protocol_pipe_cleanup - Function to cleanup pipe resources 420 + * @ipc_protocol: iosm_protocol instance 421 + * @pipe: Pipe instance 422 + */ 423 + void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol, 424 + struct ipc_pipe *pipe); 425 + 426 + /** 427 + * ipc_protocol_get_ap_exec_stage - Function for getting AP Exec Stage 428 + * @ipc_protocol: pointer to struct iosm protocol 429 + * 430 + * Return: returns BOOT Stages 431 + */ 432 + enum ipc_mem_exec_stage 433 + ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol); 434 + 435 + /** 436 + * ipc_protocol_pm_dev_get_sleep_notification - Function for getting Dev Sleep 437 + * notification 438 + * @ipc_protocol: iosm_protocol instance 439 + * 440 + * Return: Returns dev PM State 441 + */ 442 + u32 ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol 443 + *ipc_protocol); 444 + #endif