Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
4 *
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7 *
8 * Based on:
9 * ST-Ericsson UMAC CW1200 driver, which is
10 * Copyright (c) 2010, ST-Ericsson
11 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
12 */
13
14#include <linux/module.h>
15#include <net/mac80211.h>
16#include <linux/kthread.h>
17#include <linux/timer.h>
18
19#include "cw1200.h"
20#include "bh.h"
21#include "hwio.h"
22#include "wsm.h"
23#include "hwbus.h"
24#include "debug.h"
25#include "fwio.h"
26
27static int cw1200_bh(void *arg);
28
29#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
30/* an SPI message cannot be bigger than (2"12-1)*2 bytes
31 * "*2" to cvt to bytes
32 */
33#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
34#define PIGGYBACK_CTRL_REG (2)
35#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
36
37/* Suspend state privates */
38enum cw1200_bh_pm_state {
39 CW1200_BH_RESUMED = 0,
40 CW1200_BH_SUSPEND,
41 CW1200_BH_SUSPENDED,
42 CW1200_BH_RESUME,
43};
44
45static void cw1200_bh_work(struct work_struct *work)
46{
47 struct cw1200_common *priv =
48 container_of(work, struct cw1200_common, bh_work);
49 cw1200_bh(priv);
50}
51
52int cw1200_register_bh(struct cw1200_common *priv)
53{
54 int err = 0;
55 /* Realtime workqueue */
56 priv->bh_workqueue = alloc_workqueue("cw1200_bh",
57 WQ_MEM_RECLAIM | WQ_HIGHPRI |
58 WQ_CPU_INTENSIVE | WQ_PERCPU,
59 1);
60
61 if (!priv->bh_workqueue)
62 return -ENOMEM;
63
64 INIT_WORK(&priv->bh_work, cw1200_bh_work);
65
66 pr_debug("[BH] register.\n");
67
68 atomic_set(&priv->bh_rx, 0);
69 atomic_set(&priv->bh_tx, 0);
70 atomic_set(&priv->bh_term, 0);
71 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
72 priv->bh_error = 0;
73 priv->hw_bufs_used = 0;
74 priv->buf_id_tx = 0;
75 priv->buf_id_rx = 0;
76 init_waitqueue_head(&priv->bh_wq);
77 init_waitqueue_head(&priv->bh_evt_wq);
78
79 err = !queue_work(priv->bh_workqueue, &priv->bh_work);
80 WARN_ON(err);
81 return err;
82}
83
84void cw1200_unregister_bh(struct cw1200_common *priv)
85{
86 atomic_inc(&priv->bh_term);
87 wake_up(&priv->bh_wq);
88
89 destroy_workqueue(priv->bh_workqueue);
90 priv->bh_workqueue = NULL;
91
92 pr_debug("[BH] unregistered.\n");
93}
94
95void cw1200_irq_handler(struct cw1200_common *priv)
96{
97 pr_debug("[BH] irq.\n");
98
99 /* Disable Interrupts! */
100 /* NOTE: hwbus_ops->lock already held */
101 __cw1200_irq_enable(priv, 0);
102
103 if (/* WARN_ON */(priv->bh_error))
104 return;
105
106 if (atomic_inc_return(&priv->bh_rx) == 1)
107 wake_up(&priv->bh_wq);
108}
109EXPORT_SYMBOL_GPL(cw1200_irq_handler);
110
111void cw1200_bh_wakeup(struct cw1200_common *priv)
112{
113 pr_debug("[BH] wakeup.\n");
114 if (priv->bh_error) {
115 pr_err("[BH] wakeup failed (BH error)\n");
116 return;
117 }
118
119 if (atomic_inc_return(&priv->bh_tx) == 1)
120 wake_up(&priv->bh_wq);
121}
122
123int cw1200_bh_suspend(struct cw1200_common *priv)
124{
125 pr_debug("[BH] suspend.\n");
126 if (priv->bh_error) {
127 wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
128 return -EINVAL;
129 }
130
131 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
132 wake_up(&priv->bh_wq);
133 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
134 (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
135 1 * HZ) ? 0 : -ETIMEDOUT;
136}
137
138int cw1200_bh_resume(struct cw1200_common *priv)
139{
140 pr_debug("[BH] resume.\n");
141 if (priv->bh_error) {
142 wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
143 return -EINVAL;
144 }
145
146 atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
147 wake_up(&priv->bh_wq);
148 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
149 (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
150 1 * HZ) ? 0 : -ETIMEDOUT;
151}
152
153static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
154{
155 ++priv->hw_bufs_used;
156}
157
158int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
159{
160 int ret = 0;
161 int hw_bufs_used = priv->hw_bufs_used;
162
163 priv->hw_bufs_used -= count;
164 if (WARN_ON(priv->hw_bufs_used < 0))
165 ret = -1;
166 else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
167 ret = 1;
168 if (!priv->hw_bufs_used)
169 wake_up(&priv->bh_evt_wq);
170 return ret;
171}
172
173static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
174 u16 *ctrl_reg)
175{
176 int ret;
177
178 ret = cw1200_reg_read_16(priv,
179 ST90TDS_CONTROL_REG_ID, ctrl_reg);
180 if (ret) {
181 ret = cw1200_reg_read_16(priv,
182 ST90TDS_CONTROL_REG_ID, ctrl_reg);
183 if (ret)
184 pr_err("[BH] Failed to read control register.\n");
185 }
186
187 return ret;
188}
189
190static int cw1200_device_wakeup(struct cw1200_common *priv)
191{
192 u16 ctrl_reg;
193 int ret;
194
195 pr_debug("[BH] Device wakeup.\n");
196
197 /* First, set the dpll register */
198 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
199 cw1200_dpll_from_clk(priv->hw_refclk));
200 if (WARN_ON(ret))
201 return ret;
202
203 /* To force the device to be always-on, the host sets WLAN_UP to 1 */
204 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
205 ST90TDS_CONT_WUP_BIT);
206 if (WARN_ON(ret))
207 return ret;
208
209 ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
210 if (WARN_ON(ret))
211 return ret;
212
213 /* If the device returns WLAN_RDY as 1, the device is active and will
214 * remain active.
215 */
216 if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
217 pr_debug("[BH] Device awake.\n");
218 return 1;
219 }
220
221 return 0;
222}
223
224/* Must be called from BH thraed. */
225void cw1200_enable_powersave(struct cw1200_common *priv,
226 bool enable)
227{
228 pr_debug("[BH] Powerave is %s.\n",
229 enable ? "enabled" : "disabled");
230 priv->powersave_enabled = enable;
231}
232
233static int cw1200_bh_rx_helper(struct cw1200_common *priv,
234 uint16_t *ctrl_reg,
235 int *tx)
236{
237 size_t read_len = 0;
238 struct sk_buff *skb_rx = NULL;
239 struct wsm_hdr *wsm;
240 size_t wsm_len;
241 u16 wsm_id;
242 u8 wsm_seq;
243 int rx_resync = 1;
244
245 size_t alloc_len;
246 u8 *data;
247
248 read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
249 if (!read_len)
250 return 0; /* No more work */
251
252 if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
253 (read_len > EFFECTIVE_BUF_SIZE))) {
254 pr_debug("Invalid read len: %zu (%04x)",
255 read_len, *ctrl_reg);
256 goto err;
257 }
258
259 /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
260 * to the NEXT Message length + 2 Bytes for SKB
261 */
262 read_len = read_len + 2;
263
264 alloc_len = priv->hwbus_ops->align_size(
265 priv->hwbus_priv, read_len);
266
267 /* Check if not exceeding CW1200 capabilities */
268 if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
269 pr_debug("Read aligned len: %zu\n",
270 alloc_len);
271 }
272
273 skb_rx = dev_alloc_skb(alloc_len);
274 if (WARN_ON(!skb_rx))
275 goto err;
276
277 skb_trim(skb_rx, 0);
278 skb_put(skb_rx, read_len);
279 data = skb_rx->data;
280 if (WARN_ON(!data))
281 goto err;
282
283 if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
284 pr_err("rx blew up, len %zu\n", alloc_len);
285 goto err;
286 }
287
288 /* Piggyback */
289 *ctrl_reg = __le16_to_cpu(
290 ((__le16 *)data)[alloc_len / 2 - 1]);
291
292 wsm = (struct wsm_hdr *)data;
293 wsm_len = __le16_to_cpu(wsm->len);
294 if (WARN_ON(wsm_len > read_len))
295 goto err;
296
297 if (priv->wsm_enable_wsm_dumps)
298 print_hex_dump_bytes("<-- ",
299 DUMP_PREFIX_NONE,
300 data, wsm_len);
301
302 wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
303 wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
304
305 skb_trim(skb_rx, wsm_len);
306
307 if (wsm_id == 0x0800) {
308 wsm_handle_exception(priv,
309 &data[sizeof(*wsm)],
310 wsm_len - sizeof(*wsm));
311 goto err;
312 } else if (!rx_resync) {
313 if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
314 goto err;
315 }
316 priv->wsm_rx_seq = (wsm_seq + 1) & 7;
317 rx_resync = 0;
318
319 if (wsm_id & 0x0400) {
320 int rc = wsm_release_tx_buffer(priv, 1);
321 if (WARN_ON(rc < 0)) {
322 dev_kfree_skb(skb_rx);
323 return rc;
324 } else if (rc > 0) {
325 *tx = 1;
326 }
327 }
328
329 /* cw1200_wsm_rx takes care on SKB livetime */
330 if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
331 goto err;
332
333 dev_kfree_skb(skb_rx);
334
335 return 0;
336
337err:
338 dev_kfree_skb(skb_rx);
339 return -1;
340}
341
342static int cw1200_bh_tx_helper(struct cw1200_common *priv,
343 int *pending_tx,
344 int *tx_burst)
345{
346 size_t tx_len;
347 u8 *data;
348 int ret;
349 struct wsm_hdr *wsm;
350
351 if (priv->device_can_sleep) {
352 ret = cw1200_device_wakeup(priv);
353 if (WARN_ON(ret < 0)) { /* Error in wakeup */
354 *pending_tx = 1;
355 return 0;
356 } else if (ret) { /* Woke up */
357 priv->device_can_sleep = false;
358 } else { /* Did not awake */
359 *pending_tx = 1;
360 return 0;
361 }
362 }
363
364 wsm_alloc_tx_buffer(priv);
365 ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
366 if (ret <= 0) {
367 wsm_release_tx_buffer(priv, 1);
368 if (WARN_ON(ret < 0))
369 return ret; /* Error */
370 return 0; /* No work */
371 }
372
373 wsm = (struct wsm_hdr *)data;
374 BUG_ON(tx_len < sizeof(*wsm));
375 BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
376
377 atomic_inc(&priv->bh_tx);
378
379 tx_len = priv->hwbus_ops->align_size(
380 priv->hwbus_priv, tx_len);
381
382 /* Check if not exceeding CW1200 capabilities */
383 if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
384 pr_debug("Write aligned len: %zu\n", tx_len);
385
386 wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
387 wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
388
389 if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
390 pr_err("tx blew up, len %zu\n", tx_len);
391 wsm_release_tx_buffer(priv, 1);
392 return -1; /* Error */
393 }
394
395 if (priv->wsm_enable_wsm_dumps)
396 print_hex_dump_bytes("--> ",
397 DUMP_PREFIX_NONE,
398 data,
399 __le16_to_cpu(wsm->len));
400
401 wsm_txed(priv, data);
402 priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
403
404 if (*tx_burst > 1) {
405 cw1200_debug_tx_burst(priv);
406 return 1; /* Work remains */
407 }
408
409 return 0;
410}
411
412static int cw1200_bh(void *arg)
413{
414 struct cw1200_common *priv = arg;
415 int rx, tx, term, suspend;
416 u16 ctrl_reg = 0;
417 int tx_allowed;
418 int pending_tx = 0;
419 int tx_burst;
420 long status;
421 u32 dummy;
422 int ret;
423
424 for (;;) {
425 if (!priv->hw_bufs_used &&
426 priv->powersave_enabled &&
427 !priv->device_can_sleep &&
428 !atomic_read(&priv->recent_scan)) {
429 status = 1 * HZ;
430 pr_debug("[BH] Device wakedown. No data.\n");
431 cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
432 priv->device_can_sleep = true;
433 } else if (priv->hw_bufs_used) {
434 /* Interrupt loss detection */
435 status = 1 * HZ;
436 } else {
437 status = MAX_SCHEDULE_TIMEOUT;
438 }
439
440 /* Dummy Read for SDIO retry mechanism*/
441 if ((priv->hw_type != -1) &&
442 (atomic_read(&priv->bh_rx) == 0) &&
443 (atomic_read(&priv->bh_tx) == 0))
444 cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
445 &dummy, sizeof(dummy));
446
447 pr_debug("[BH] waiting ...\n");
448 status = wait_event_interruptible_timeout(priv->bh_wq, ({
449 rx = atomic_xchg(&priv->bh_rx, 0);
450 tx = atomic_xchg(&priv->bh_tx, 0);
451 term = atomic_xchg(&priv->bh_term, 0);
452 suspend = pending_tx ?
453 0 : atomic_read(&priv->bh_suspend);
454 (rx || tx || term || suspend || priv->bh_error);
455 }), status);
456
457 pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
458 rx, tx, term, suspend, priv->bh_error, status);
459
460 /* Did an error occur? */
461 if ((status < 0 && status != -ERESTARTSYS) ||
462 term || priv->bh_error) {
463 break;
464 }
465 if (!status) { /* wait_event timed out */
466 unsigned long timestamp = jiffies;
467 long timeout;
468 int pending = 0;
469 int i;
470
471 /* Check to see if we have any outstanding frames */
472 if (priv->hw_bufs_used && (!rx || !tx)) {
473 wiphy_warn(priv->hw->wiphy,
474 "Missed interrupt? (%d frames outstanding)\n",
475 priv->hw_bufs_used);
476 rx = 1;
477
478 /* Get a timestamp of "oldest" frame */
479 for (i = 0; i < 4; ++i)
480 pending += cw1200_queue_get_xmit_timestamp(
481 &priv->tx_queue[i],
482 ×tamp,
483 priv->pending_frame_id);
484
485 /* Check if frame transmission is timed out.
486 * Add an extra second with respect to possible
487 * interrupt loss.
488 */
489 timeout = timestamp +
490 WSM_CMD_LAST_CHANCE_TIMEOUT +
491 1 * HZ -
492 jiffies;
493
494 /* And terminate BH thread if the frame is "stuck" */
495 if (pending && timeout < 0) {
496 wiphy_warn(priv->hw->wiphy,
497 "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
498 priv->hw_bufs_used, pending,
499 timestamp, jiffies);
500 break;
501 }
502 } else if (!priv->device_can_sleep &&
503 !atomic_read(&priv->recent_scan)) {
504 pr_debug("[BH] Device wakedown. Timeout.\n");
505 cw1200_reg_write_16(priv,
506 ST90TDS_CONTROL_REG_ID, 0);
507 priv->device_can_sleep = true;
508 }
509 goto done;
510 } else if (suspend) {
511 pr_debug("[BH] Device suspend.\n");
512 if (priv->powersave_enabled) {
513 pr_debug("[BH] Device wakedown. Suspend.\n");
514 cw1200_reg_write_16(priv,
515 ST90TDS_CONTROL_REG_ID, 0);
516 priv->device_can_sleep = true;
517 }
518
519 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
520 wake_up(&priv->bh_evt_wq);
521 status = wait_event_interruptible(priv->bh_wq,
522 CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
523 if (status < 0) {
524 wiphy_err(priv->hw->wiphy,
525 "Failed to wait for resume: %ld.\n",
526 status);
527 break;
528 }
529 pr_debug("[BH] Device resume.\n");
530 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
531 wake_up(&priv->bh_evt_wq);
532 atomic_inc(&priv->bh_rx);
533 goto done;
534 }
535
536 rx:
537 tx += pending_tx;
538 pending_tx = 0;
539
540 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
541 break;
542
543 /* Don't bother trying to rx unless we have data to read */
544 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
545 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
546 if (ret < 0)
547 break;
548 /* Double up here if there's more data.. */
549 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
550 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
551 if (ret < 0)
552 break;
553 }
554 }
555
556 tx:
557 if (tx) {
558 tx = 0;
559
560 BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
561 tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
562 tx_allowed = tx_burst > 0;
563
564 if (!tx_allowed) {
565 /* Buffers full. Ensure we process tx
566 * after we handle rx..
567 */
568 pending_tx = tx;
569 goto done_rx;
570 }
571 ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
572 if (ret < 0)
573 break;
574 if (ret > 0) /* More to transmit */
575 tx = ret;
576
577 /* Re-read ctrl reg */
578 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
579 break;
580 }
581
582 done_rx:
583 if (priv->bh_error)
584 break;
585 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
586 goto rx;
587 if (tx)
588 goto tx;
589
590 done:
591 /* Re-enable device interrupts */
592 priv->hwbus_ops->lock(priv->hwbus_priv);
593 __cw1200_irq_enable(priv, 1);
594 priv->hwbus_ops->unlock(priv->hwbus_priv);
595 }
596
597 /* Explicitly disable device interrupts */
598 priv->hwbus_ops->lock(priv->hwbus_priv);
599 __cw1200_irq_enable(priv, 0);
600 priv->hwbus_ops->unlock(priv->hwbus_priv);
601
602 if (!term) {
603 pr_err("[BH] Fatal error, exiting.\n");
604 priv->bh_error = 1;
605 /* TODO: schedule_work(recovery) */
606 }
607 return 0;
608}