Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.17-rc3 330 lines 8.8 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Interrupt bottom half (BH). 4 * 5 * Copyright (c) 2017-2020, Silicon Laboratories, Inc. 6 * Copyright (c) 2010, ST-Ericsson 7 */ 8#include <linux/gpio/consumer.h> 9#include <net/mac80211.h> 10 11#include "bh.h" 12#include "wfx.h" 13#include "hwio.h" 14#include "traces.h" 15#include "hif_rx.h" 16#include "hif_api_cmd.h" 17 18static void device_wakeup(struct wfx_dev *wdev) 19{ 20 int max_retry = 3; 21 22 if (!wdev->pdata.gpio_wakeup) 23 return; 24 if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0) 25 return; 26 27 if (wfx_api_older_than(wdev, 1, 4)) { 28 gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); 29 if (!completion_done(&wdev->hif.ctrl_ready)) 30 usleep_range(2000, 2500); 31 return; 32 } 33 for (;;) { 34 gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); 35 /* completion.h does not provide any function to wait 36 * completion without consume it (a kind of 37 * wait_for_completion_done_timeout()). So we have to emulate 38 * it. 39 */ 40 if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, 41 msecs_to_jiffies(2))) { 42 complete(&wdev->hif.ctrl_ready); 43 return; 44 } else if (max_retry-- > 0) { 45 /* Older firmwares have a race in sleep/wake-up process. 46 * Redo the process is sufficient to unfreeze the 47 * chip. 48 */ 49 dev_err(wdev->dev, "timeout while wake up chip\n"); 50 gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); 51 usleep_range(2000, 2500); 52 } else { 53 dev_err(wdev->dev, "max wake-up retries reached\n"); 54 return; 55 } 56 } 57} 58 59static void device_release(struct wfx_dev *wdev) 60{ 61 if (!wdev->pdata.gpio_wakeup) 62 return; 63 64 gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); 65} 66 67static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf) 68{ 69 struct sk_buff *skb; 70 struct hif_msg *hif; 71 size_t alloc_len; 72 size_t computed_len; 73 int release_count; 74 int piggyback = 0; 75 76 WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), 77 "%s: request exceed the chip capability", __func__); 78 79 /* Add 2 to take into account piggyback size */ 80 alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2); 81 skb = dev_alloc_skb(alloc_len); 82 if (!skb) 83 return -ENOMEM; 84 85 if (wfx_data_read(wdev, skb->data, alloc_len)) 86 goto err; 87 88 piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2)); 89 _trace_piggyback(piggyback, false); 90 91 hif = (struct hif_msg *)skb->data; 92 WARN(hif->encrypted & 0x3, "encryption is unsupported"); 93 if (WARN(read_len < sizeof(struct hif_msg), "corrupted read")) 94 goto err; 95 computed_len = le16_to_cpu(hif->len); 96 computed_len = round_up(computed_len, 2); 97 if (computed_len != read_len) { 98 dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n", 99 computed_len, read_len); 100 print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1, 101 hif, read_len, true); 102 goto err; 103 } 104 105 if (!(hif->id & HIF_ID_IS_INDICATION)) { 106 (*is_cnf)++; 107 if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT) 108 release_count = ((struct hif_cnf_multi_transmit *)hif->body)->num_tx_confs; 109 else 110 release_count = 1; 111 WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter"); 112 wdev->hif.tx_buffers_used -= release_count; 113 } 114 _trace_hif_recv(hif, wdev->hif.tx_buffers_used); 115 116 if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) { 117 if (hif->seqnum != wdev->hif.rx_seqnum) 118 dev_warn(wdev->dev, "wrong message sequence: %d != %d\n", 119 hif->seqnum, wdev->hif.rx_seqnum); 120 wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1); 121 } 122 123 skb_put(skb, le16_to_cpu(hif->len)); 124 /* wfx_handle_rx takes care on SKB livetime */ 125 wfx_handle_rx(wdev, skb); 126 if (!wdev->hif.tx_buffers_used) 127 wake_up(&wdev->hif.tx_buffers_empty); 128 129 return piggyback; 130 131err: 132 if (skb) 133 dev_kfree_skb(skb); 134 return -EIO; 135} 136 137static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf) 138{ 139 size_t len; 140 int i; 141 int ctrl_reg, piggyback; 142 143 piggyback = 0; 144 for (i = 0; i < max_msg; i++) { 145 if (piggyback & CTRL_NEXT_LEN_MASK) 146 ctrl_reg = piggyback; 147 else if (try_wait_for_completion(&wdev->hif.ctrl_ready)) 148 ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0); 149 else 150 ctrl_reg = 0; 151 if (!(ctrl_reg & CTRL_NEXT_LEN_MASK)) 152 return i; 153 /* ctrl_reg units are 16bits words */ 154 len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2; 155 piggyback = rx_helper(wdev, len, num_cnf); 156 if (piggyback < 0) 157 return i; 158 if (!(piggyback & CTRL_WLAN_READY)) 159 dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n", 160 piggyback); 161 } 162 if (piggyback & CTRL_NEXT_LEN_MASK) { 163 ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback); 164 complete(&wdev->hif.ctrl_ready); 165 if (ctrl_reg) 166 dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n", 167 ctrl_reg, piggyback); 168 } 169 return i; 170} 171 172static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif) 173{ 174 int ret; 175 void *data; 176 bool is_encrypted = false; 177 size_t len = le16_to_cpu(hif->len); 178 179 WARN(len < sizeof(*hif), "try to send corrupted data"); 180 181 hif->seqnum = wdev->hif.tx_seqnum; 182 wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1); 183 184 data = hif; 185 WARN(len > wdev->hw_caps.size_inp_ch_buf, 186 "%s: request exceed the chip capability: %zu > %d\n", __func__, 187 len, wdev->hw_caps.size_inp_ch_buf); 188 len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len); 189 ret = wfx_data_write(wdev, data, len); 190 if (ret) 191 goto end; 192 193 wdev->hif.tx_buffers_used++; 194 _trace_hif_send(hif, wdev->hif.tx_buffers_used); 195end: 196 if (is_encrypted) 197 kfree(data); 198} 199 200static int bh_work_tx(struct wfx_dev *wdev, int max_msg) 201{ 202 struct hif_msg *hif; 203 int i; 204 205 for (i = 0; i < max_msg; i++) { 206 hif = NULL; 207 if (wdev->hif.tx_buffers_used < wdev->hw_caps.num_inp_ch_bufs) { 208 if (try_wait_for_completion(&wdev->hif_cmd.ready)) { 209 WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); 210 hif = wdev->hif_cmd.buf_send; 211 } else { 212 hif = wfx_tx_queues_get(wdev); 213 } 214 } 215 if (!hif) 216 return i; 217 tx_helper(wdev, hif); 218 } 219 return i; 220} 221 222/* In SDIO mode, it is necessary to make an access to a register to acknowledge 223 * last received message. It could be possible to restrict this acknowledge to 224 * SDIO mode and only if last operation was rx. 225 */ 226static void ack_sdio_data(struct wfx_dev *wdev) 227{ 228 u32 cfg_reg; 229 230 config_reg_read(wdev, &cfg_reg); 231 if (cfg_reg & 0xFF) { 232 dev_warn(wdev->dev, "chip reports errors: %02x\n", 233 cfg_reg & 0xFF); 234 config_reg_write_bits(wdev, 0xFF, 0x00); 235 } 236} 237 238static void bh_work(struct work_struct *work) 239{ 240 struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh); 241 int stats_req = 0, stats_cnf = 0, stats_ind = 0; 242 bool release_chip = false, last_op_is_rx = false; 243 int num_tx, num_rx; 244 245 device_wakeup(wdev); 246 do { 247 num_tx = bh_work_tx(wdev, 32); 248 stats_req += num_tx; 249 if (num_tx) 250 last_op_is_rx = false; 251 num_rx = bh_work_rx(wdev, 32, &stats_cnf); 252 stats_ind += num_rx; 253 if (num_rx) 254 last_op_is_rx = true; 255 } while (num_rx || num_tx); 256 stats_ind -= stats_cnf; 257 258 if (last_op_is_rx) 259 ack_sdio_data(wdev); 260 if (!wdev->hif.tx_buffers_used && !work_pending(work)) { 261 device_release(wdev); 262 release_chip = true; 263 } 264 _trace_bh_stats(stats_ind, stats_req, stats_cnf, 265 wdev->hif.tx_buffers_used, release_chip); 266} 267 268/* An IRQ from chip did occur */ 269void wfx_bh_request_rx(struct wfx_dev *wdev) 270{ 271 u32 cur, prev; 272 273 control_reg_read(wdev, &cur); 274 prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); 275 complete(&wdev->hif.ctrl_ready); 276 queue_work(system_highpri_wq, &wdev->hif.bh); 277 278 if (!(cur & CTRL_NEXT_LEN_MASK)) 279 dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n", 280 cur); 281 if (prev != 0) 282 dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n", 283 prev, cur); 284} 285 286/* Driver want to send data */ 287void wfx_bh_request_tx(struct wfx_dev *wdev) 288{ 289 queue_work(system_highpri_wq, &wdev->hif.bh); 290} 291 292/* If IRQ is not available, this function allow to manually poll the control 293 * register and simulate an IRQ ahen an event happened. 294 * 295 * Note that the device has a bug: If an IRQ raise while host read control 296 * register, the IRQ is lost. So, use this function carefully (only duing 297 * device initialisation). 298 */ 299void wfx_bh_poll_irq(struct wfx_dev *wdev) 300{ 301 ktime_t now, start; 302 u32 reg; 303 304 WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ"); 305 start = ktime_get(); 306 for (;;) { 307 control_reg_read(wdev, &reg); 308 now = ktime_get(); 309 if (reg & 0xFFF) 310 break; 311 if (ktime_after(now, ktime_add_ms(start, 1000))) { 312 dev_err(wdev->dev, "time out while polling control register\n"); 313 return; 314 } 315 udelay(200); 316 } 317 wfx_bh_request_rx(wdev); 318} 319 320void wfx_bh_register(struct wfx_dev *wdev) 321{ 322 INIT_WORK(&wdev->hif.bh, bh_work); 323 init_completion(&wdev->hif.ctrl_ready); 324 init_waitqueue_head(&wdev->hif.tx_buffers_empty); 325} 326 327void wfx_bh_unregister(struct wfx_dev *wdev) 328{ 329 flush_work(&wdev->hif.bh); 330}