Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/firmware.h>
25#include <linux/etherdevice.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/pm_runtime.h>
30#include <linux/pm_wakeirq.h>
31
32#include "wlcore.h"
33#include "debug.h"
34#include "wl12xx_80211.h"
35#include "io.h"
36#include "tx.h"
37#include "ps.h"
38#include "init.h"
39#include "debugfs.h"
40#include "testmode.h"
41#include "vendor_cmd.h"
42#include "scan.h"
43#include "hw_ops.h"
44#include "sysfs.h"
45
46#define WL1271_BOOT_RETRIES 3
47#define WL1271_SUSPEND_SLEEP 100
48#define WL1271_WAKEUP_TIMEOUT 500
49
50static char *fwlog_param;
51static int fwlog_mem_blocks = -1;
52static int bug_on_recovery = -1;
53static int no_recovery = -1;
54
55static void __wl1271_op_remove_interface(struct wl1271 *wl,
56 struct ieee80211_vif *vif,
57 bool reset_tx_queues);
58static void wlcore_op_stop_locked(struct wl1271 *wl);
59static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60
61static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
62{
63 int ret;
64
65 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
66 return -EINVAL;
67
68 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
69 return 0;
70
71 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
72 return 0;
73
74 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
75 if (ret < 0)
76 return ret;
77
78 wl1271_info("Association completed.");
79 return 0;
80}
81
82static void wl1271_reg_notify(struct wiphy *wiphy,
83 struct regulatory_request *request)
84{
85 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 struct wl1271 *wl = hw->priv;
87
88 /* copy the current dfs region */
89 if (request)
90 wl->dfs_region = request->dfs_region;
91
92 wlcore_regdomain_config(wl);
93}
94
95static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
96 bool enable)
97{
98 int ret = 0;
99
100 /* we should hold wl->mutex */
101 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
102 if (ret < 0)
103 goto out;
104
105 if (enable)
106 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
107 else
108 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
109out:
110 return ret;
111}
112
113/*
114 * this function is being called when the rx_streaming interval
115 * has beed changed or rx_streaming should be disabled
116 */
117int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
118{
119 int ret = 0;
120 int period = wl->conf.rx_streaming.interval;
121
122 /* don't reconfigure if rx_streaming is disabled */
123 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
124 goto out;
125
126 /* reconfigure/disable according to new streaming_period */
127 if (period &&
128 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
129 (wl->conf.rx_streaming.always ||
130 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
131 ret = wl1271_set_rx_streaming(wl, wlvif, true);
132 else {
133 ret = wl1271_set_rx_streaming(wl, wlvif, false);
134 /* don't cancel_work_sync since we might deadlock */
135 del_timer_sync(&wlvif->rx_streaming_timer);
136 }
137out:
138 return ret;
139}
140
141static void wl1271_rx_streaming_enable_work(struct work_struct *work)
142{
143 int ret;
144 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
145 rx_streaming_enable_work);
146 struct wl1271 *wl = wlvif->wl;
147
148 mutex_lock(&wl->mutex);
149
150 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
151 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
152 (!wl->conf.rx_streaming.always &&
153 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
154 goto out;
155
156 if (!wl->conf.rx_streaming.interval)
157 goto out;
158
159 ret = pm_runtime_get_sync(wl->dev);
160 if (ret < 0) {
161 pm_runtime_put_noidle(wl->dev);
162 goto out;
163 }
164
165 ret = wl1271_set_rx_streaming(wl, wlvif, true);
166 if (ret < 0)
167 goto out_sleep;
168
169 /* stop it after some time of inactivity */
170 mod_timer(&wlvif->rx_streaming_timer,
171 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
172
173out_sleep:
174 pm_runtime_mark_last_busy(wl->dev);
175 pm_runtime_put_autosuspend(wl->dev);
176out:
177 mutex_unlock(&wl->mutex);
178}
179
180static void wl1271_rx_streaming_disable_work(struct work_struct *work)
181{
182 int ret;
183 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
184 rx_streaming_disable_work);
185 struct wl1271 *wl = wlvif->wl;
186
187 mutex_lock(&wl->mutex);
188
189 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
190 goto out;
191
192 ret = pm_runtime_get_sync(wl->dev);
193 if (ret < 0) {
194 pm_runtime_put_noidle(wl->dev);
195 goto out;
196 }
197
198 ret = wl1271_set_rx_streaming(wl, wlvif, false);
199 if (ret)
200 goto out_sleep;
201
202out_sleep:
203 pm_runtime_mark_last_busy(wl->dev);
204 pm_runtime_put_autosuspend(wl->dev);
205out:
206 mutex_unlock(&wl->mutex);
207}
208
209static void wl1271_rx_streaming_timer(struct timer_list *t)
210{
211 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
214}
215
216/* wl->mutex must be taken */
217void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
218{
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
221 return;
222
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
226}
227
228static void wlcore_rc_update_work(struct work_struct *work)
229{
230 int ret;
231 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
232 rc_update_work);
233 struct wl1271 *wl = wlvif->wl;
234 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
235
236 mutex_lock(&wl->mutex);
237
238 if (unlikely(wl->state != WLCORE_STATE_ON))
239 goto out;
240
241 ret = pm_runtime_get_sync(wl->dev);
242 if (ret < 0) {
243 pm_runtime_put_noidle(wl->dev);
244 goto out;
245 }
246
247 if (ieee80211_vif_is_mesh(vif)) {
248 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
249 true, wlvif->sta.hlid);
250 if (ret < 0)
251 goto out_sleep;
252 } else {
253 wlcore_hw_sta_rc_update(wl, wlvif);
254 }
255
256out_sleep:
257 pm_runtime_mark_last_busy(wl->dev);
258 pm_runtime_put_autosuspend(wl->dev);
259out:
260 mutex_unlock(&wl->mutex);
261}
262
263static void wl12xx_tx_watchdog_work(struct work_struct *work)
264{
265 struct delayed_work *dwork;
266 struct wl1271 *wl;
267
268 dwork = to_delayed_work(work);
269 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
270
271 mutex_lock(&wl->mutex);
272
273 if (unlikely(wl->state != WLCORE_STATE_ON))
274 goto out;
275
276 /* Tx went out in the meantime - everything is ok */
277 if (unlikely(wl->tx_allocated_blocks == 0))
278 goto out;
279
280 /*
281 * if a ROC is in progress, we might not have any Tx for a long
282 * time (e.g. pending Tx on the non-ROC channels)
283 */
284 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
285 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
286 wl->conf.tx.tx_watchdog_timeout);
287 wl12xx_rearm_tx_watchdog_locked(wl);
288 goto out;
289 }
290
291 /*
292 * if a scan is in progress, we might not have any Tx for a long
293 * time
294 */
295 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_rearm_tx_watchdog_locked(wl);
299 goto out;
300 }
301
302 /*
303 * AP might cache a frame for a long time for a sleeping station,
304 * so rearm the timer if there's an AP interface with stations. If
305 * Tx is genuinely stuck we will most hopefully discover it when all
306 * stations are removed due to inactivity.
307 */
308 if (wl->active_sta_count) {
309 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
310 " %d stations",
311 wl->conf.tx.tx_watchdog_timeout,
312 wl->active_sta_count);
313 wl12xx_rearm_tx_watchdog_locked(wl);
314 goto out;
315 }
316
317 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
318 wl->conf.tx.tx_watchdog_timeout);
319 wl12xx_queue_recovery_work(wl);
320
321out:
322 mutex_unlock(&wl->mutex);
323}
324
325static void wlcore_adjust_conf(struct wl1271 *wl)
326{
327
328 if (fwlog_param) {
329 if (!strcmp(fwlog_param, "continuous")) {
330 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
331 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
332 } else if (!strcmp(fwlog_param, "dbgpins")) {
333 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
334 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
335 } else if (!strcmp(fwlog_param, "disable")) {
336 wl->conf.fwlog.mem_blocks = 0;
337 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
338 } else {
339 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
340 }
341 }
342
343 if (bug_on_recovery != -1)
344 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
345
346 if (no_recovery != -1)
347 wl->conf.recovery.no_recovery = (u8) no_recovery;
348}
349
350static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
351 struct wl12xx_vif *wlvif,
352 u8 hlid, u8 tx_pkts)
353{
354 bool fw_ps;
355
356 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
357
358 /*
359 * Wake up from high level PS if the STA is asleep with too little
360 * packets in FW or if the STA is awake.
361 */
362 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_end(wl, wlvif, hlid);
364
365 /*
366 * Start high-level PS if the STA is asleep with enough blocks in FW.
367 * Make an exception if this is the only connected link. In this
368 * case FW-memory congestion is less of a problem.
369 * Note that a single connected STA means 2*ap_count + 1 active links,
370 * since we must account for the global and broadcast AP links
371 * for each AP. The "fw_ps" check assures us the other link is a STA
372 * connected to the AP. Otherwise the FW would not set the PSM bit.
373 */
374 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
375 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
376 wl12xx_ps_link_start(wl, wlvif, hlid, true);
377}
378
379static void wl12xx_irq_update_links_status(struct wl1271 *wl,
380 struct wl12xx_vif *wlvif,
381 struct wl_fw_status *status)
382{
383 unsigned long cur_fw_ps_map;
384 u8 hlid;
385
386 cur_fw_ps_map = status->link_ps_bitmap;
387 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
388 wl1271_debug(DEBUG_PSM,
389 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
390 wl->ap_fw_ps_map, cur_fw_ps_map,
391 wl->ap_fw_ps_map ^ cur_fw_ps_map);
392
393 wl->ap_fw_ps_map = cur_fw_ps_map;
394 }
395
396 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
397 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
398 wl->links[hlid].allocated_pkts);
399}
400
401static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
402{
403 struct wl12xx_vif *wlvif;
404 u32 old_tx_blk_count = wl->tx_blocks_available;
405 int avail, freed_blocks;
406 int i;
407 int ret;
408 struct wl1271_link *lnk;
409
410 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
411 wl->raw_fw_status,
412 wl->fw_status_len, false);
413 if (ret < 0)
414 return ret;
415
416 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
417
418 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
419 "drv_rx_counter = %d, tx_results_counter = %d)",
420 status->intr,
421 status->fw_rx_counter,
422 status->drv_rx_counter,
423 status->tx_results_counter);
424
425 for (i = 0; i < NUM_TX_QUEUES; i++) {
426 /* prevent wrap-around in freed-packets counter */
427 wl->tx_allocated_pkts[i] -=
428 (status->counters.tx_released_pkts[i] -
429 wl->tx_pkts_freed[i]) & 0xff;
430
431 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
432 }
433
434
435 for_each_set_bit(i, wl->links_map, wl->num_links) {
436 u8 diff;
437 lnk = &wl->links[i];
438
439 /* prevent wrap-around in freed-packets counter */
440 diff = (status->counters.tx_lnk_free_pkts[i] -
441 lnk->prev_freed_pkts) & 0xff;
442
443 if (diff == 0)
444 continue;
445
446 lnk->allocated_pkts -= diff;
447 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
448
449 /* accumulate the prev_freed_pkts counter */
450 lnk->total_freed_pkts += diff;
451 }
452
453 /* prevent wrap-around in total blocks counter */
454 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
455 freed_blocks = status->total_released_blks -
456 wl->tx_blocks_freed;
457 else
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 status->total_released_blks;
460
461 wl->tx_blocks_freed = status->total_released_blks;
462
463 wl->tx_allocated_blocks -= freed_blocks;
464
465 /*
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
469 */
470 if (freed_blocks) {
471 if (wl->tx_allocated_blocks)
472 wl12xx_rearm_tx_watchdog_locked(wl);
473 else
474 cancel_delayed_work(&wl->tx_watchdog_work);
475 }
476
477 avail = status->tx_total - wl->tx_allocated_blocks;
478
479 /*
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
486 */
487 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
488 avail);
489
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl->tx_blocks_available > old_tx_blk_count)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status);
497 }
498
499 /* update the host-chipset time offset */
500 wl->time_offset = (ktime_get_boot_ns() >> 10) -
501 (s64)(status->fw_localtime);
502
503 wl->fw_fast_lnk_map = status->link_fast_bitmap;
504
505 return 0;
506}
507
508static void wl1271_flush_deferred_work(struct wl1271 *wl)
509{
510 struct sk_buff *skb;
511
512 /* Pass all received frames to the network stack */
513 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
514 ieee80211_rx_ni(wl->hw, skb);
515
516 /* Return sent skbs to the network stack */
517 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
518 ieee80211_tx_status_ni(wl->hw, skb);
519}
520
521static void wl1271_netstack_work(struct work_struct *work)
522{
523 struct wl1271 *wl =
524 container_of(work, struct wl1271, netstack_work);
525
526 do {
527 wl1271_flush_deferred_work(wl);
528 } while (skb_queue_len(&wl->deferred_rx_queue));
529}
530
531#define WL1271_IRQ_MAX_LOOPS 256
532
533static int wlcore_irq_locked(struct wl1271 *wl)
534{
535 int ret = 0;
536 u32 intr;
537 int loopcount = WL1271_IRQ_MAX_LOOPS;
538 bool done = false;
539 unsigned int defer_count;
540 unsigned long flags;
541
542 /*
543 * In case edge triggered interrupt must be used, we cannot iterate
544 * more than once without introducing race conditions with the hardirq.
545 */
546 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
547 loopcount = 1;
548
549 wl1271_debug(DEBUG_IRQ, "IRQ work");
550
551 if (unlikely(wl->state != WLCORE_STATE_ON))
552 goto out;
553
554 ret = pm_runtime_get_sync(wl->dev);
555 if (ret < 0) {
556 pm_runtime_put_noidle(wl->dev);
557 goto out;
558 }
559
560 while (!done && loopcount--) {
561 /*
562 * In order to avoid a race with the hardirq, clear the flag
563 * before acknowledging the chip.
564 */
565 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
566 smp_mb__after_atomic();
567
568 ret = wlcore_fw_status(wl, wl->fw_status);
569 if (ret < 0)
570 goto out;
571
572 wlcore_hw_tx_immediate_compl(wl);
573
574 intr = wl->fw_status->intr;
575 intr &= WLCORE_ALL_INTR_MASK;
576 if (!intr) {
577 done = true;
578 continue;
579 }
580
581 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
582 wl1271_error("HW watchdog interrupt received! starting recovery.");
583 wl->watchdog_recovery = true;
584 ret = -EIO;
585
586 /* restarting the chip. ignore any other interrupt. */
587 goto out;
588 }
589
590 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
591 wl1271_error("SW watchdog interrupt received! "
592 "starting recovery.");
593 wl->watchdog_recovery = true;
594 ret = -EIO;
595
596 /* restarting the chip. ignore any other interrupt. */
597 goto out;
598 }
599
600 if (likely(intr & WL1271_ACX_INTR_DATA)) {
601 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
602
603 ret = wlcore_rx(wl, wl->fw_status);
604 if (ret < 0)
605 goto out;
606
607 /* Check if any tx blocks were freed */
608 spin_lock_irqsave(&wl->wl_lock, flags);
609 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
610 wl1271_tx_total_queue_count(wl) > 0) {
611 spin_unlock_irqrestore(&wl->wl_lock, flags);
612 /*
613 * In order to avoid starvation of the TX path,
614 * call the work function directly.
615 */
616 ret = wlcore_tx_work_locked(wl);
617 if (ret < 0)
618 goto out;
619 } else {
620 spin_unlock_irqrestore(&wl->wl_lock, flags);
621 }
622
623 /* check for tx results */
624 ret = wlcore_hw_tx_delayed_compl(wl);
625 if (ret < 0)
626 goto out;
627
628 /* Make sure the deferred queues don't get too long */
629 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
630 skb_queue_len(&wl->deferred_rx_queue);
631 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
632 wl1271_flush_deferred_work(wl);
633 }
634
635 if (intr & WL1271_ACX_INTR_EVENT_A) {
636 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
637 ret = wl1271_event_handle(wl, 0);
638 if (ret < 0)
639 goto out;
640 }
641
642 if (intr & WL1271_ACX_INTR_EVENT_B) {
643 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
644 ret = wl1271_event_handle(wl, 1);
645 if (ret < 0)
646 goto out;
647 }
648
649 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
650 wl1271_debug(DEBUG_IRQ,
651 "WL1271_ACX_INTR_INIT_COMPLETE");
652
653 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
654 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
655 }
656
657 pm_runtime_mark_last_busy(wl->dev);
658 pm_runtime_put_autosuspend(wl->dev);
659
660out:
661 return ret;
662}
663
664static irqreturn_t wlcore_irq(int irq, void *cookie)
665{
666 int ret;
667 unsigned long flags;
668 struct wl1271 *wl = cookie;
669
670 /* complete the ELP completion */
671 spin_lock_irqsave(&wl->wl_lock, flags);
672 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
673 if (wl->elp_compl) {
674 complete(wl->elp_compl);
675 wl->elp_compl = NULL;
676 }
677
678 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
679 /* don't enqueue a work right now. mark it as pending */
680 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
681 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
682 disable_irq_nosync(wl->irq);
683 pm_wakeup_event(wl->dev, 0);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
685 return IRQ_HANDLED;
686 }
687 spin_unlock_irqrestore(&wl->wl_lock, flags);
688
689 /* TX might be handled here, avoid redundant work */
690 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
691 cancel_work_sync(&wl->tx_work);
692
693 mutex_lock(&wl->mutex);
694
695 ret = wlcore_irq_locked(wl);
696 if (ret)
697 wl12xx_queue_recovery_work(wl);
698
699 spin_lock_irqsave(&wl->wl_lock, flags);
700 /* In case TX was not handled here, queue TX work */
701 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
702 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
703 wl1271_tx_total_queue_count(wl) > 0)
704 ieee80211_queue_work(wl->hw, &wl->tx_work);
705 spin_unlock_irqrestore(&wl->wl_lock, flags);
706
707 mutex_unlock(&wl->mutex);
708
709 return IRQ_HANDLED;
710}
711
712struct vif_counter_data {
713 u8 counter;
714
715 struct ieee80211_vif *cur_vif;
716 bool cur_vif_running;
717};
718
719static void wl12xx_vif_count_iter(void *data, u8 *mac,
720 struct ieee80211_vif *vif)
721{
722 struct vif_counter_data *counter = data;
723
724 counter->counter++;
725 if (counter->cur_vif == vif)
726 counter->cur_vif_running = true;
727}
728
729/* caller must not hold wl->mutex, as it might deadlock */
730static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
731 struct ieee80211_vif *cur_vif,
732 struct vif_counter_data *data)
733{
734 memset(data, 0, sizeof(*data));
735 data->cur_vif = cur_vif;
736
737 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
738 wl12xx_vif_count_iter, data);
739}
740
741static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
742{
743 const struct firmware *fw;
744 const char *fw_name;
745 enum wl12xx_fw_type fw_type;
746 int ret;
747
748 if (plt) {
749 fw_type = WL12XX_FW_TYPE_PLT;
750 fw_name = wl->plt_fw_name;
751 } else {
752 /*
753 * we can't call wl12xx_get_vif_count() here because
754 * wl->mutex is taken, so use the cached last_vif_count value
755 */
756 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
757 fw_type = WL12XX_FW_TYPE_MULTI;
758 fw_name = wl->mr_fw_name;
759 } else {
760 fw_type = WL12XX_FW_TYPE_NORMAL;
761 fw_name = wl->sr_fw_name;
762 }
763 }
764
765 if (wl->fw_type == fw_type)
766 return 0;
767
768 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
769
770 ret = request_firmware(&fw, fw_name, wl->dev);
771
772 if (ret < 0) {
773 wl1271_error("could not get firmware %s: %d", fw_name, ret);
774 return ret;
775 }
776
777 if (fw->size % 4) {
778 wl1271_error("firmware size is not multiple of 32 bits: %zu",
779 fw->size);
780 ret = -EILSEQ;
781 goto out;
782 }
783
784 vfree(wl->fw);
785 wl->fw_type = WL12XX_FW_TYPE_NONE;
786 wl->fw_len = fw->size;
787 wl->fw = vmalloc(wl->fw_len);
788
789 if (!wl->fw) {
790 wl1271_error("could not allocate memory for the firmware");
791 ret = -ENOMEM;
792 goto out;
793 }
794
795 memcpy(wl->fw, fw->data, wl->fw_len);
796 ret = 0;
797 wl->fw_type = fw_type;
798out:
799 release_firmware(fw);
800
801 return ret;
802}
803
804void wl12xx_queue_recovery_work(struct wl1271 *wl)
805{
806 /* Avoid a recursive recovery */
807 if (wl->state == WLCORE_STATE_ON) {
808 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
809 &wl->flags));
810
811 wl->state = WLCORE_STATE_RESTARTING;
812 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
813 ieee80211_queue_work(wl->hw, &wl->recovery_work);
814 }
815}
816
817size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
818{
819 size_t len;
820
821 /* Make sure we have enough room */
822 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
823
824 /* Fill the FW log file, consumed by the sysfs fwlog entry */
825 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
826 wl->fwlog_size += len;
827
828 return len;
829}
830
831static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
832{
833 u32 end_of_log = 0;
834 int error;
835
836 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
837 return;
838
839 wl1271_info("Reading FW panic log");
840
841 /*
842 * Make sure the chip is awake and the logger isn't active.
843 * Do not send a stop fwlog command if the fw is hanged or if
844 * dbgpins are used (due to some fw bug).
845 */
846 error = pm_runtime_get_sync(wl->dev);
847 if (error < 0) {
848 pm_runtime_put_noidle(wl->dev);
849 return;
850 }
851 if (!wl->watchdog_recovery &&
852 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
853 wl12xx_cmd_stop_fwlog(wl);
854
855 /* Traverse the memory blocks linked list */
856 do {
857 end_of_log = wlcore_event_fw_logger(wl);
858 if (end_of_log == 0) {
859 msleep(100);
860 end_of_log = wlcore_event_fw_logger(wl);
861 }
862 } while (end_of_log != 0);
863}
864
865static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
866 u8 hlid, struct ieee80211_sta *sta)
867{
868 struct wl1271_station *wl_sta;
869 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
870
871 wl_sta = (void *)sta->drv_priv;
872 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
873
874 /*
875 * increment the initial seq number on recovery to account for
876 * transmitted packets that we haven't yet got in the FW status
877 */
878 if (wlvif->encryption_type == KEY_GEM)
879 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
880
881 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
882 wl_sta->total_freed_pkts += sqn_recovery_padding;
883}
884
885static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
886 struct wl12xx_vif *wlvif,
887 u8 hlid, const u8 *addr)
888{
889 struct ieee80211_sta *sta;
890 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
891
892 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
893 is_zero_ether_addr(addr)))
894 return;
895
896 rcu_read_lock();
897 sta = ieee80211_find_sta(vif, addr);
898 if (sta)
899 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
900 rcu_read_unlock();
901}
902
903static void wlcore_print_recovery(struct wl1271 *wl)
904{
905 u32 pc = 0;
906 u32 hint_sts = 0;
907 int ret;
908
909 wl1271_info("Hardware recovery in progress. FW ver: %s",
910 wl->chip.fw_ver_str);
911
912 /* change partitions momentarily so we can read the FW pc */
913 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
914 if (ret < 0)
915 return;
916
917 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
918 if (ret < 0)
919 return;
920
921 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
922 if (ret < 0)
923 return;
924
925 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
926 pc, hint_sts, ++wl->recovery_count);
927
928 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
929}
930
931
932static void wl1271_recovery_work(struct work_struct *work)
933{
934 struct wl1271 *wl =
935 container_of(work, struct wl1271, recovery_work);
936 struct wl12xx_vif *wlvif;
937 struct ieee80211_vif *vif;
938 int error;
939
940 mutex_lock(&wl->mutex);
941
942 if (wl->state == WLCORE_STATE_OFF || wl->plt)
943 goto out_unlock;
944
945 error = pm_runtime_get_sync(wl->dev);
946 if (error < 0) {
947 wl1271_warning("Enable for recovery failed");
948 pm_runtime_put_noidle(wl->dev);
949 }
950 wlcore_disable_interrupts_nosync(wl);
951
952 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
953 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
954 wl12xx_read_fwlog_panic(wl);
955 wlcore_print_recovery(wl);
956 }
957
958 BUG_ON(wl->conf.recovery.bug_on_recovery &&
959 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
960
961 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
962
963 if (wl->conf.recovery.no_recovery) {
964 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
965 goto out_unlock;
966 }
967
968 /* Prevent spurious TX during FW restart */
969 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970
971 /* reboot the chipset */
972 while (!list_empty(&wl->wlvif_list)) {
973 wlvif = list_first_entry(&wl->wlvif_list,
974 struct wl12xx_vif, list);
975 vif = wl12xx_wlvif_to_vif(wlvif);
976
977 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
978 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
979 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
980 vif->bss_conf.bssid);
981 }
982
983 __wl1271_op_remove_interface(wl, vif, false);
984 }
985
986 wlcore_op_stop_locked(wl);
987 pm_runtime_mark_last_busy(wl->dev);
988 pm_runtime_put_autosuspend(wl->dev);
989
990 ieee80211_restart_hw(wl->hw);
991
992 /*
993 * Its safe to enable TX now - the queues are stopped after a request
994 * to restart the HW.
995 */
996 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
997
998out_unlock:
999 wl->watchdog_recovery = false;
1000 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1001 mutex_unlock(&wl->mutex);
1002}
1003
1004static int wlcore_fw_wakeup(struct wl1271 *wl)
1005{
1006 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1007}
1008
1009static int wl1271_setup(struct wl1271 *wl)
1010{
1011 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1012 if (!wl->raw_fw_status)
1013 goto err;
1014
1015 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1016 if (!wl->fw_status)
1017 goto err;
1018
1019 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1020 if (!wl->tx_res_if)
1021 goto err;
1022
1023 return 0;
1024err:
1025 kfree(wl->fw_status);
1026 kfree(wl->raw_fw_status);
1027 return -ENOMEM;
1028}
1029
1030static int wl12xx_set_power_on(struct wl1271 *wl)
1031{
1032 int ret;
1033
1034 msleep(WL1271_PRE_POWER_ON_SLEEP);
1035 ret = wl1271_power_on(wl);
1036 if (ret < 0)
1037 goto out;
1038 msleep(WL1271_POWER_ON_SLEEP);
1039 wl1271_io_reset(wl);
1040 wl1271_io_init(wl);
1041
1042 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1043 if (ret < 0)
1044 goto fail;
1045
1046 /* ELP module wake up */
1047 ret = wlcore_fw_wakeup(wl);
1048 if (ret < 0)
1049 goto fail;
1050
1051out:
1052 return ret;
1053
1054fail:
1055 wl1271_power_off(wl);
1056 return ret;
1057}
1058
1059static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1060{
1061 int ret = 0;
1062
1063 ret = wl12xx_set_power_on(wl);
1064 if (ret < 0)
1065 goto out;
1066
1067 /*
1068 * For wl127x based devices we could use the default block
1069 * size (512 bytes), but due to a bug in the sdio driver, we
1070 * need to set it explicitly after the chip is powered on. To
1071 * simplify the code and since the performance impact is
1072 * negligible, we use the same block size for all different
1073 * chip types.
1074 *
1075 * Check if the bus supports blocksize alignment and, if it
1076 * doesn't, make sure we don't have the quirk.
1077 */
1078 if (!wl1271_set_block_size(wl))
1079 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1080
1081 /* TODO: make sure the lower driver has set things up correctly */
1082
1083 ret = wl1271_setup(wl);
1084 if (ret < 0)
1085 goto out;
1086
1087 ret = wl12xx_fetch_firmware(wl, plt);
1088 if (ret < 0)
1089 goto out;
1090
1091out:
1092 return ret;
1093}
1094
1095int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1096{
1097 int retries = WL1271_BOOT_RETRIES;
1098 struct wiphy *wiphy = wl->hw->wiphy;
1099
1100 static const char* const PLT_MODE[] = {
1101 "PLT_OFF",
1102 "PLT_ON",
1103 "PLT_FEM_DETECT",
1104 "PLT_CHIP_AWAKE"
1105 };
1106
1107 int ret;
1108
1109 mutex_lock(&wl->mutex);
1110
1111 wl1271_notice("power up");
1112
1113 if (wl->state != WLCORE_STATE_OFF) {
1114 wl1271_error("cannot go into PLT state because not "
1115 "in off state: %d", wl->state);
1116 ret = -EBUSY;
1117 goto out;
1118 }
1119
1120 /* Indicate to lower levels that we are now in PLT mode */
1121 wl->plt = true;
1122 wl->plt_mode = plt_mode;
1123
1124 while (retries) {
1125 retries--;
1126 ret = wl12xx_chip_wakeup(wl, true);
1127 if (ret < 0)
1128 goto power_off;
1129
1130 if (plt_mode != PLT_CHIP_AWAKE) {
1131 ret = wl->ops->plt_init(wl);
1132 if (ret < 0)
1133 goto power_off;
1134 }
1135
1136 wl->state = WLCORE_STATE_ON;
1137 wl1271_notice("firmware booted in PLT mode %s (%s)",
1138 PLT_MODE[plt_mode],
1139 wl->chip.fw_ver_str);
1140
1141 /* update hw/fw version info in wiphy struct */
1142 wiphy->hw_version = wl->chip.id;
1143 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1144 sizeof(wiphy->fw_version));
1145
1146 goto out;
1147
1148power_off:
1149 wl1271_power_off(wl);
1150 }
1151
1152 wl->plt = false;
1153 wl->plt_mode = PLT_OFF;
1154
1155 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1156 WL1271_BOOT_RETRIES);
1157out:
1158 mutex_unlock(&wl->mutex);
1159
1160 return ret;
1161}
1162
1163int wl1271_plt_stop(struct wl1271 *wl)
1164{
1165 int ret = 0;
1166
1167 wl1271_notice("power down");
1168
1169 /*
1170 * Interrupts must be disabled before setting the state to OFF.
1171 * Otherwise, the interrupt handler might be called and exit without
1172 * reading the interrupt status.
1173 */
1174 wlcore_disable_interrupts(wl);
1175 mutex_lock(&wl->mutex);
1176 if (!wl->plt) {
1177 mutex_unlock(&wl->mutex);
1178
1179 /*
1180 * This will not necessarily enable interrupts as interrupts
1181 * may have been disabled when op_stop was called. It will,
1182 * however, balance the above call to disable_interrupts().
1183 */
1184 wlcore_enable_interrupts(wl);
1185
1186 wl1271_error("cannot power down because not in PLT "
1187 "state: %d", wl->state);
1188 ret = -EBUSY;
1189 goto out;
1190 }
1191
1192 mutex_unlock(&wl->mutex);
1193
1194 wl1271_flush_deferred_work(wl);
1195 cancel_work_sync(&wl->netstack_work);
1196 cancel_work_sync(&wl->recovery_work);
1197 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1198
1199 mutex_lock(&wl->mutex);
1200 wl1271_power_off(wl);
1201 wl->flags = 0;
1202 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1203 wl->state = WLCORE_STATE_OFF;
1204 wl->plt = false;
1205 wl->plt_mode = PLT_OFF;
1206 wl->rx_counter = 0;
1207 mutex_unlock(&wl->mutex);
1208
1209out:
1210 return ret;
1211}
1212
1213static void wl1271_op_tx(struct ieee80211_hw *hw,
1214 struct ieee80211_tx_control *control,
1215 struct sk_buff *skb)
1216{
1217 struct wl1271 *wl = hw->priv;
1218 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1219 struct ieee80211_vif *vif = info->control.vif;
1220 struct wl12xx_vif *wlvif = NULL;
1221 unsigned long flags;
1222 int q, mapping;
1223 u8 hlid;
1224
1225 if (!vif) {
1226 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1227 ieee80211_free_txskb(hw, skb);
1228 return;
1229 }
1230
1231 wlvif = wl12xx_vif_to_data(vif);
1232 mapping = skb_get_queue_mapping(skb);
1233 q = wl1271_tx_get_queue(mapping);
1234
1235 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1236
1237 spin_lock_irqsave(&wl->wl_lock, flags);
1238
1239 /*
1240 * drop the packet if the link is invalid or the queue is stopped
1241 * for any reason but watermark. Watermark is a "soft"-stop so we
1242 * allow these packets through.
1243 */
1244 if (hlid == WL12XX_INVALID_LINK_ID ||
1245 (!test_bit(hlid, wlvif->links_map)) ||
1246 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1247 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1248 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1249 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1250 ieee80211_free_txskb(hw, skb);
1251 goto out;
1252 }
1253
1254 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1255 hlid, q, skb->len);
1256 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1257
1258 wl->tx_queue_count[q]++;
1259 wlvif->tx_queue_count[q]++;
1260
1261 /*
1262 * The workqueue is slow to process the tx_queue and we need stop
1263 * the queue here, otherwise the queue will get too long.
1264 */
1265 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1266 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1267 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1268 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1269 wlcore_stop_queue_locked(wl, wlvif, q,
1270 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1271 }
1272
1273 /*
1274 * The chip specific setup must run before the first TX packet -
1275 * before that, the tx_work will not be initialized!
1276 */
1277
1278 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1279 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1280 ieee80211_queue_work(wl->hw, &wl->tx_work);
1281
1282out:
1283 spin_unlock_irqrestore(&wl->wl_lock, flags);
1284}
1285
1286int wl1271_tx_dummy_packet(struct wl1271 *wl)
1287{
1288 unsigned long flags;
1289 int q;
1290
1291 /* no need to queue a new dummy packet if one is already pending */
1292 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1293 return 0;
1294
1295 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1296
1297 spin_lock_irqsave(&wl->wl_lock, flags);
1298 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1299 wl->tx_queue_count[q]++;
1300 spin_unlock_irqrestore(&wl->wl_lock, flags);
1301
1302 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1303 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1304 return wlcore_tx_work_locked(wl);
1305
1306 /*
1307 * If the FW TX is busy, TX work will be scheduled by the threaded
1308 * interrupt handler function
1309 */
1310 return 0;
1311}
1312
1313/*
1314 * The size of the dummy packet should be at least 1400 bytes. However, in
1315 * order to minimize the number of bus transactions, aligning it to 512 bytes
1316 * boundaries could be beneficial, performance wise
1317 */
1318#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1319
1320static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1321{
1322 struct sk_buff *skb;
1323 struct ieee80211_hdr_3addr *hdr;
1324 unsigned int dummy_packet_size;
1325
1326 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1327 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1328
1329 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1330 if (!skb) {
1331 wl1271_warning("Failed to allocate a dummy packet skb");
1332 return NULL;
1333 }
1334
1335 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1336
1337 hdr = skb_put_zero(skb, sizeof(*hdr));
1338 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1339 IEEE80211_STYPE_NULLFUNC |
1340 IEEE80211_FCTL_TODS);
1341
1342 skb_put_zero(skb, dummy_packet_size);
1343
1344 /* Dummy packets require the TID to be management */
1345 skb->priority = WL1271_TID_MGMT;
1346
1347 /* Initialize all fields that might be used */
1348 skb_set_queue_mapping(skb, 0);
1349 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1350
1351 return skb;
1352}
1353
1354
1355static int
1356wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1357{
1358 int num_fields = 0, in_field = 0, fields_size = 0;
1359 int i, pattern_len = 0;
1360
1361 if (!p->mask) {
1362 wl1271_warning("No mask in WoWLAN pattern");
1363 return -EINVAL;
1364 }
1365
1366 /*
1367 * The pattern is broken up into segments of bytes at different offsets
1368 * that need to be checked by the FW filter. Each segment is called
1369 * a field in the FW API. We verify that the total number of fields
1370 * required for this pattern won't exceed FW limits (8)
1371 * as well as the total fields buffer won't exceed the FW limit.
1372 * Note that if there's a pattern which crosses Ethernet/IP header
1373 * boundary a new field is required.
1374 */
1375 for (i = 0; i < p->pattern_len; i++) {
1376 if (test_bit(i, (unsigned long *)p->mask)) {
1377 if (!in_field) {
1378 in_field = 1;
1379 pattern_len = 1;
1380 } else {
1381 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1382 num_fields++;
1383 fields_size += pattern_len +
1384 RX_FILTER_FIELD_OVERHEAD;
1385 pattern_len = 1;
1386 } else
1387 pattern_len++;
1388 }
1389 } else {
1390 if (in_field) {
1391 in_field = 0;
1392 fields_size += pattern_len +
1393 RX_FILTER_FIELD_OVERHEAD;
1394 num_fields++;
1395 }
1396 }
1397 }
1398
1399 if (in_field) {
1400 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1401 num_fields++;
1402 }
1403
1404 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1405 wl1271_warning("RX Filter too complex. Too many segments");
1406 return -EINVAL;
1407 }
1408
1409 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1410 wl1271_warning("RX filter pattern is too big");
1411 return -E2BIG;
1412 }
1413
1414 return 0;
1415}
1416
1417struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1418{
1419 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1420}
1421
1422void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1423{
1424 int i;
1425
1426 if (filter == NULL)
1427 return;
1428
1429 for (i = 0; i < filter->num_fields; i++)
1430 kfree(filter->fields[i].pattern);
1431
1432 kfree(filter);
1433}
1434
1435int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1436 u16 offset, u8 flags,
1437 const u8 *pattern, u8 len)
1438{
1439 struct wl12xx_rx_filter_field *field;
1440
1441 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1442 wl1271_warning("Max fields per RX filter. can't alloc another");
1443 return -EINVAL;
1444 }
1445
1446 field = &filter->fields[filter->num_fields];
1447
1448 field->pattern = kzalloc(len, GFP_KERNEL);
1449 if (!field->pattern) {
1450 wl1271_warning("Failed to allocate RX filter pattern");
1451 return -ENOMEM;
1452 }
1453
1454 filter->num_fields++;
1455
1456 field->offset = cpu_to_le16(offset);
1457 field->flags = flags;
1458 field->len = len;
1459 memcpy(field->pattern, pattern, len);
1460
1461 return 0;
1462}
1463
1464int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1465{
1466 int i, fields_size = 0;
1467
1468 for (i = 0; i < filter->num_fields; i++)
1469 fields_size += filter->fields[i].len +
1470 sizeof(struct wl12xx_rx_filter_field) -
1471 sizeof(u8 *);
1472
1473 return fields_size;
1474}
1475
1476void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1477 u8 *buf)
1478{
1479 int i;
1480 struct wl12xx_rx_filter_field *field;
1481
1482 for (i = 0; i < filter->num_fields; i++) {
1483 field = (struct wl12xx_rx_filter_field *)buf;
1484
1485 field->offset = filter->fields[i].offset;
1486 field->flags = filter->fields[i].flags;
1487 field->len = filter->fields[i].len;
1488
1489 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1490 buf += sizeof(struct wl12xx_rx_filter_field) -
1491 sizeof(u8 *) + field->len;
1492 }
1493}
1494
1495/*
1496 * Allocates an RX filter returned through f
1497 * which needs to be freed using rx_filter_free()
1498 */
1499static int
1500wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1501 struct wl12xx_rx_filter **f)
1502{
1503 int i, j, ret = 0;
1504 struct wl12xx_rx_filter *filter;
1505 u16 offset;
1506 u8 flags, len;
1507
1508 filter = wl1271_rx_filter_alloc();
1509 if (!filter) {
1510 wl1271_warning("Failed to alloc rx filter");
1511 ret = -ENOMEM;
1512 goto err;
1513 }
1514
1515 i = 0;
1516 while (i < p->pattern_len) {
1517 if (!test_bit(i, (unsigned long *)p->mask)) {
1518 i++;
1519 continue;
1520 }
1521
1522 for (j = i; j < p->pattern_len; j++) {
1523 if (!test_bit(j, (unsigned long *)p->mask))
1524 break;
1525
1526 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1527 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1528 break;
1529 }
1530
1531 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1532 offset = i;
1533 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1534 } else {
1535 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1536 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1537 }
1538
1539 len = j - i;
1540
1541 ret = wl1271_rx_filter_alloc_field(filter,
1542 offset,
1543 flags,
1544 &p->pattern[i], len);
1545 if (ret)
1546 goto err;
1547
1548 i = j;
1549 }
1550
1551 filter->action = FILTER_SIGNAL;
1552
1553 *f = filter;
1554 return 0;
1555
1556err:
1557 wl1271_rx_filter_free(filter);
1558 *f = NULL;
1559
1560 return ret;
1561}
1562
1563static int wl1271_configure_wowlan(struct wl1271 *wl,
1564 struct cfg80211_wowlan *wow)
1565{
1566 int i, ret;
1567
1568 if (!wow || wow->any || !wow->n_patterns) {
1569 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1570 FILTER_SIGNAL);
1571 if (ret)
1572 goto out;
1573
1574 ret = wl1271_rx_filter_clear_all(wl);
1575 if (ret)
1576 goto out;
1577
1578 return 0;
1579 }
1580
1581 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1582 return -EINVAL;
1583
1584 /* Validate all incoming patterns before clearing current FW state */
1585 for (i = 0; i < wow->n_patterns; i++) {
1586 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1587 if (ret) {
1588 wl1271_warning("Bad wowlan pattern %d", i);
1589 return ret;
1590 }
1591 }
1592
1593 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1594 if (ret)
1595 goto out;
1596
1597 ret = wl1271_rx_filter_clear_all(wl);
1598 if (ret)
1599 goto out;
1600
1601 /* Translate WoWLAN patterns into filters */
1602 for (i = 0; i < wow->n_patterns; i++) {
1603 struct cfg80211_pkt_pattern *p;
1604 struct wl12xx_rx_filter *filter = NULL;
1605
1606 p = &wow->patterns[i];
1607
1608 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1609 if (ret) {
1610 wl1271_warning("Failed to create an RX filter from "
1611 "wowlan pattern %d", i);
1612 goto out;
1613 }
1614
1615 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1616
1617 wl1271_rx_filter_free(filter);
1618 if (ret)
1619 goto out;
1620 }
1621
1622 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1623
1624out:
1625 return ret;
1626}
1627
1628static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1629 struct wl12xx_vif *wlvif,
1630 struct cfg80211_wowlan *wow)
1631{
1632 int ret = 0;
1633
1634 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1635 goto out;
1636
1637 ret = wl1271_configure_wowlan(wl, wow);
1638 if (ret < 0)
1639 goto out;
1640
1641 if ((wl->conf.conn.suspend_wake_up_event ==
1642 wl->conf.conn.wake_up_event) &&
1643 (wl->conf.conn.suspend_listen_interval ==
1644 wl->conf.conn.listen_interval))
1645 goto out;
1646
1647 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1648 wl->conf.conn.suspend_wake_up_event,
1649 wl->conf.conn.suspend_listen_interval);
1650
1651 if (ret < 0)
1652 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1653out:
1654 return ret;
1655
1656}
1657
1658static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1659 struct wl12xx_vif *wlvif,
1660 struct cfg80211_wowlan *wow)
1661{
1662 int ret = 0;
1663
1664 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1665 goto out;
1666
1667 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1668 if (ret < 0)
1669 goto out;
1670
1671 ret = wl1271_configure_wowlan(wl, wow);
1672 if (ret < 0)
1673 goto out;
1674
1675out:
1676 return ret;
1677
1678}
1679
1680static int wl1271_configure_suspend(struct wl1271 *wl,
1681 struct wl12xx_vif *wlvif,
1682 struct cfg80211_wowlan *wow)
1683{
1684 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1685 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1686 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1687 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1688 return 0;
1689}
1690
1691static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1692{
1693 int ret = 0;
1694 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1695 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1696
1697 if ((!is_ap) && (!is_sta))
1698 return;
1699
1700 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1701 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1702 return;
1703
1704 wl1271_configure_wowlan(wl, NULL);
1705
1706 if (is_sta) {
1707 if ((wl->conf.conn.suspend_wake_up_event ==
1708 wl->conf.conn.wake_up_event) &&
1709 (wl->conf.conn.suspend_listen_interval ==
1710 wl->conf.conn.listen_interval))
1711 return;
1712
1713 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1714 wl->conf.conn.wake_up_event,
1715 wl->conf.conn.listen_interval);
1716
1717 if (ret < 0)
1718 wl1271_error("resume: wake up conditions failed: %d",
1719 ret);
1720
1721 } else if (is_ap) {
1722 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1723 }
1724}
1725
1726static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1727 struct cfg80211_wowlan *wow)
1728{
1729 struct wl1271 *wl = hw->priv;
1730 struct wl12xx_vif *wlvif;
1731 unsigned long flags;
1732 int ret;
1733
1734 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1735 WARN_ON(!wow);
1736
1737 /* we want to perform the recovery before suspending */
1738 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1739 wl1271_warning("postponing suspend to perform recovery");
1740 return -EBUSY;
1741 }
1742
1743 wl1271_tx_flush(wl);
1744
1745 mutex_lock(&wl->mutex);
1746
1747 ret = pm_runtime_get_sync(wl->dev);
1748 if (ret < 0) {
1749 pm_runtime_put_noidle(wl->dev);
1750 mutex_unlock(&wl->mutex);
1751 return ret;
1752 }
1753
1754 wl->wow_enabled = true;
1755 wl12xx_for_each_wlvif(wl, wlvif) {
1756 if (wlcore_is_p2p_mgmt(wlvif))
1757 continue;
1758
1759 ret = wl1271_configure_suspend(wl, wlvif, wow);
1760 if (ret < 0) {
1761 mutex_unlock(&wl->mutex);
1762 wl1271_warning("couldn't prepare device to suspend");
1763 return ret;
1764 }
1765 }
1766
1767 /* disable fast link flow control notifications from FW */
1768 ret = wlcore_hw_interrupt_notify(wl, false);
1769 if (ret < 0)
1770 goto out_sleep;
1771
1772 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1773 ret = wlcore_hw_rx_ba_filter(wl,
1774 !!wl->conf.conn.suspend_rx_ba_activity);
1775 if (ret < 0)
1776 goto out_sleep;
1777
1778out_sleep:
1779 pm_runtime_put_noidle(wl->dev);
1780 mutex_unlock(&wl->mutex);
1781
1782 if (ret < 0) {
1783 wl1271_warning("couldn't prepare device to suspend");
1784 return ret;
1785 }
1786
1787 /* flush any remaining work */
1788 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1789
1790 flush_work(&wl->tx_work);
1791
1792 /*
1793 * Cancel the watchdog even if above tx_flush failed. We will detect
1794 * it on resume anyway.
1795 */
1796 cancel_delayed_work(&wl->tx_watchdog_work);
1797
1798 /*
1799 * set suspended flag to avoid triggering a new threaded_irq
1800 * work.
1801 */
1802 spin_lock_irqsave(&wl->wl_lock, flags);
1803 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1804 spin_unlock_irqrestore(&wl->wl_lock, flags);
1805
1806 return pm_runtime_force_suspend(wl->dev);
1807}
1808
1809static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1810{
1811 struct wl1271 *wl = hw->priv;
1812 struct wl12xx_vif *wlvif;
1813 unsigned long flags;
1814 bool run_irq_work = false, pending_recovery;
1815 int ret;
1816
1817 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1818 wl->wow_enabled);
1819 WARN_ON(!wl->wow_enabled);
1820
1821 ret = pm_runtime_force_resume(wl->dev);
1822 if (ret < 0) {
1823 wl1271_error("ELP wakeup failure!");
1824 goto out_sleep;
1825 }
1826
1827 /*
1828 * re-enable irq_work enqueuing, and call irq_work directly if
1829 * there is a pending work.
1830 */
1831 spin_lock_irqsave(&wl->wl_lock, flags);
1832 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1833 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1834 run_irq_work = true;
1835 spin_unlock_irqrestore(&wl->wl_lock, flags);
1836
1837 mutex_lock(&wl->mutex);
1838
1839 /* test the recovery flag before calling any SDIO functions */
1840 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1841 &wl->flags);
1842
1843 if (run_irq_work) {
1844 wl1271_debug(DEBUG_MAC80211,
1845 "run postponed irq_work directly");
1846
1847 /* don't talk to the HW if recovery is pending */
1848 if (!pending_recovery) {
1849 ret = wlcore_irq_locked(wl);
1850 if (ret)
1851 wl12xx_queue_recovery_work(wl);
1852 }
1853
1854 wlcore_enable_interrupts(wl);
1855 }
1856
1857 if (pending_recovery) {
1858 wl1271_warning("queuing forgotten recovery on resume");
1859 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1860 goto out_sleep;
1861 }
1862
1863 ret = pm_runtime_get_sync(wl->dev);
1864 if (ret < 0) {
1865 pm_runtime_put_noidle(wl->dev);
1866 goto out;
1867 }
1868
1869 wl12xx_for_each_wlvif(wl, wlvif) {
1870 if (wlcore_is_p2p_mgmt(wlvif))
1871 continue;
1872
1873 wl1271_configure_resume(wl, wlvif);
1874 }
1875
1876 ret = wlcore_hw_interrupt_notify(wl, true);
1877 if (ret < 0)
1878 goto out_sleep;
1879
1880 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1881 ret = wlcore_hw_rx_ba_filter(wl, false);
1882 if (ret < 0)
1883 goto out_sleep;
1884
1885out_sleep:
1886 pm_runtime_mark_last_busy(wl->dev);
1887 pm_runtime_put_autosuspend(wl->dev);
1888
1889out:
1890 wl->wow_enabled = false;
1891
1892 /*
1893 * Set a flag to re-init the watchdog on the first Tx after resume.
1894 * That way we avoid possible conditions where Tx-complete interrupts
1895 * fail to arrive and we perform a spurious recovery.
1896 */
1897 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1898 mutex_unlock(&wl->mutex);
1899
1900 return 0;
1901}
1902
1903static int wl1271_op_start(struct ieee80211_hw *hw)
1904{
1905 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1906
1907 /*
1908 * We have to delay the booting of the hardware because
1909 * we need to know the local MAC address before downloading and
1910 * initializing the firmware. The MAC address cannot be changed
1911 * after boot, and without the proper MAC address, the firmware
1912 * will not function properly.
1913 *
1914 * The MAC address is first known when the corresponding interface
1915 * is added. That is where we will initialize the hardware.
1916 */
1917
1918 return 0;
1919}
1920
1921static void wlcore_op_stop_locked(struct wl1271 *wl)
1922{
1923 int i;
1924
1925 if (wl->state == WLCORE_STATE_OFF) {
1926 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1927 &wl->flags))
1928 wlcore_enable_interrupts(wl);
1929
1930 return;
1931 }
1932
1933 /*
1934 * this must be before the cancel_work calls below, so that the work
1935 * functions don't perform further work.
1936 */
1937 wl->state = WLCORE_STATE_OFF;
1938
1939 /*
1940 * Use the nosync variant to disable interrupts, so the mutex could be
1941 * held while doing so without deadlocking.
1942 */
1943 wlcore_disable_interrupts_nosync(wl);
1944
1945 mutex_unlock(&wl->mutex);
1946
1947 wlcore_synchronize_interrupts(wl);
1948 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1949 cancel_work_sync(&wl->recovery_work);
1950 wl1271_flush_deferred_work(wl);
1951 cancel_delayed_work_sync(&wl->scan_complete_work);
1952 cancel_work_sync(&wl->netstack_work);
1953 cancel_work_sync(&wl->tx_work);
1954 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1955
1956 /* let's notify MAC80211 about the remaining pending TX frames */
1957 mutex_lock(&wl->mutex);
1958 wl12xx_tx_reset(wl);
1959
1960 wl1271_power_off(wl);
1961 /*
1962 * In case a recovery was scheduled, interrupts were disabled to avoid
1963 * an interrupt storm. Now that the power is down, it is safe to
1964 * re-enable interrupts to balance the disable depth
1965 */
1966 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1967 wlcore_enable_interrupts(wl);
1968
1969 wl->band = NL80211_BAND_2GHZ;
1970
1971 wl->rx_counter = 0;
1972 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1973 wl->channel_type = NL80211_CHAN_NO_HT;
1974 wl->tx_blocks_available = 0;
1975 wl->tx_allocated_blocks = 0;
1976 wl->tx_results_count = 0;
1977 wl->tx_packets_count = 0;
1978 wl->time_offset = 0;
1979 wl->ap_fw_ps_map = 0;
1980 wl->ap_ps_map = 0;
1981 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1982 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1983 memset(wl->links_map, 0, sizeof(wl->links_map));
1984 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1985 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1986 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1987 wl->active_sta_count = 0;
1988 wl->active_link_count = 0;
1989
1990 /* The system link is always allocated */
1991 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1992 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1993 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1994
1995 /*
1996 * this is performed after the cancel_work calls and the associated
1997 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1998 * get executed before all these vars have been reset.
1999 */
2000 wl->flags = 0;
2001
2002 wl->tx_blocks_freed = 0;
2003
2004 for (i = 0; i < NUM_TX_QUEUES; i++) {
2005 wl->tx_pkts_freed[i] = 0;
2006 wl->tx_allocated_pkts[i] = 0;
2007 }
2008
2009 wl1271_debugfs_reset(wl);
2010
2011 kfree(wl->raw_fw_status);
2012 wl->raw_fw_status = NULL;
2013 kfree(wl->fw_status);
2014 wl->fw_status = NULL;
2015 kfree(wl->tx_res_if);
2016 wl->tx_res_if = NULL;
2017 kfree(wl->target_mem_map);
2018 wl->target_mem_map = NULL;
2019
2020 /*
2021 * FW channels must be re-calibrated after recovery,
2022 * save current Reg-Domain channel configuration and clear it.
2023 */
2024 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2025 sizeof(wl->reg_ch_conf_pending));
2026 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2027}
2028
2029static void wlcore_op_stop(struct ieee80211_hw *hw)
2030{
2031 struct wl1271 *wl = hw->priv;
2032
2033 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2034
2035 mutex_lock(&wl->mutex);
2036
2037 wlcore_op_stop_locked(wl);
2038
2039 mutex_unlock(&wl->mutex);
2040}
2041
2042static void wlcore_channel_switch_work(struct work_struct *work)
2043{
2044 struct delayed_work *dwork;
2045 struct wl1271 *wl;
2046 struct ieee80211_vif *vif;
2047 struct wl12xx_vif *wlvif;
2048 int ret;
2049
2050 dwork = to_delayed_work(work);
2051 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2052 wl = wlvif->wl;
2053
2054 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2055
2056 mutex_lock(&wl->mutex);
2057
2058 if (unlikely(wl->state != WLCORE_STATE_ON))
2059 goto out;
2060
2061 /* check the channel switch is still ongoing */
2062 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2063 goto out;
2064
2065 vif = wl12xx_wlvif_to_vif(wlvif);
2066 ieee80211_chswitch_done(vif, false);
2067
2068 ret = pm_runtime_get_sync(wl->dev);
2069 if (ret < 0) {
2070 pm_runtime_put_noidle(wl->dev);
2071 goto out;
2072 }
2073
2074 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2075
2076 pm_runtime_mark_last_busy(wl->dev);
2077 pm_runtime_put_autosuspend(wl->dev);
2078out:
2079 mutex_unlock(&wl->mutex);
2080}
2081
2082static void wlcore_connection_loss_work(struct work_struct *work)
2083{
2084 struct delayed_work *dwork;
2085 struct wl1271 *wl;
2086 struct ieee80211_vif *vif;
2087 struct wl12xx_vif *wlvif;
2088
2089 dwork = to_delayed_work(work);
2090 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2091 wl = wlvif->wl;
2092
2093 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2094
2095 mutex_lock(&wl->mutex);
2096
2097 if (unlikely(wl->state != WLCORE_STATE_ON))
2098 goto out;
2099
2100 /* Call mac80211 connection loss */
2101 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2102 goto out;
2103
2104 vif = wl12xx_wlvif_to_vif(wlvif);
2105 ieee80211_connection_loss(vif);
2106out:
2107 mutex_unlock(&wl->mutex);
2108}
2109
2110static void wlcore_pending_auth_complete_work(struct work_struct *work)
2111{
2112 struct delayed_work *dwork;
2113 struct wl1271 *wl;
2114 struct wl12xx_vif *wlvif;
2115 unsigned long time_spare;
2116 int ret;
2117
2118 dwork = to_delayed_work(work);
2119 wlvif = container_of(dwork, struct wl12xx_vif,
2120 pending_auth_complete_work);
2121 wl = wlvif->wl;
2122
2123 mutex_lock(&wl->mutex);
2124
2125 if (unlikely(wl->state != WLCORE_STATE_ON))
2126 goto out;
2127
2128 /*
2129 * Make sure a second really passed since the last auth reply. Maybe
2130 * a second auth reply arrived while we were stuck on the mutex.
2131 * Check for a little less than the timeout to protect from scheduler
2132 * irregularities.
2133 */
2134 time_spare = jiffies +
2135 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2136 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2137 goto out;
2138
2139 ret = pm_runtime_get_sync(wl->dev);
2140 if (ret < 0) {
2141 pm_runtime_put_noidle(wl->dev);
2142 goto out;
2143 }
2144
2145 /* cancel the ROC if active */
2146 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2147
2148 pm_runtime_mark_last_busy(wl->dev);
2149 pm_runtime_put_autosuspend(wl->dev);
2150out:
2151 mutex_unlock(&wl->mutex);
2152}
2153
2154static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2155{
2156 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2157 WL12XX_MAX_RATE_POLICIES);
2158 if (policy >= WL12XX_MAX_RATE_POLICIES)
2159 return -EBUSY;
2160
2161 __set_bit(policy, wl->rate_policies_map);
2162 *idx = policy;
2163 return 0;
2164}
2165
2166static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2167{
2168 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2169 return;
2170
2171 __clear_bit(*idx, wl->rate_policies_map);
2172 *idx = WL12XX_MAX_RATE_POLICIES;
2173}
2174
2175static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2176{
2177 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2178 WLCORE_MAX_KLV_TEMPLATES);
2179 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2180 return -EBUSY;
2181
2182 __set_bit(policy, wl->klv_templates_map);
2183 *idx = policy;
2184 return 0;
2185}
2186
2187static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2188{
2189 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2190 return;
2191
2192 __clear_bit(*idx, wl->klv_templates_map);
2193 *idx = WLCORE_MAX_KLV_TEMPLATES;
2194}
2195
2196static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2197{
2198 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2199
2200 switch (wlvif->bss_type) {
2201 case BSS_TYPE_AP_BSS:
2202 if (wlvif->p2p)
2203 return WL1271_ROLE_P2P_GO;
2204 else if (ieee80211_vif_is_mesh(vif))
2205 return WL1271_ROLE_MESH_POINT;
2206 else
2207 return WL1271_ROLE_AP;
2208
2209 case BSS_TYPE_STA_BSS:
2210 if (wlvif->p2p)
2211 return WL1271_ROLE_P2P_CL;
2212 else
2213 return WL1271_ROLE_STA;
2214
2215 case BSS_TYPE_IBSS:
2216 return WL1271_ROLE_IBSS;
2217
2218 default:
2219 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2220 }
2221 return WL12XX_INVALID_ROLE_TYPE;
2222}
2223
2224static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2225{
2226 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2227 int i;
2228
2229 /* clear everything but the persistent data */
2230 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2231
2232 switch (ieee80211_vif_type_p2p(vif)) {
2233 case NL80211_IFTYPE_P2P_CLIENT:
2234 wlvif->p2p = 1;
2235 /* fall-through */
2236 case NL80211_IFTYPE_STATION:
2237 case NL80211_IFTYPE_P2P_DEVICE:
2238 wlvif->bss_type = BSS_TYPE_STA_BSS;
2239 break;
2240 case NL80211_IFTYPE_ADHOC:
2241 wlvif->bss_type = BSS_TYPE_IBSS;
2242 break;
2243 case NL80211_IFTYPE_P2P_GO:
2244 wlvif->p2p = 1;
2245 /* fall-through */
2246 case NL80211_IFTYPE_AP:
2247 case NL80211_IFTYPE_MESH_POINT:
2248 wlvif->bss_type = BSS_TYPE_AP_BSS;
2249 break;
2250 default:
2251 wlvif->bss_type = MAX_BSS_TYPE;
2252 return -EOPNOTSUPP;
2253 }
2254
2255 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2256 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2257 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2258
2259 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2260 wlvif->bss_type == BSS_TYPE_IBSS) {
2261 /* init sta/ibss data */
2262 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2263 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2264 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2265 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2266 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2267 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2268 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2269 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2270 } else {
2271 /* init ap data */
2272 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2273 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2274 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2275 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2276 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2277 wl12xx_allocate_rate_policy(wl,
2278 &wlvif->ap.ucast_rate_idx[i]);
2279 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2280 /*
2281 * TODO: check if basic_rate shouldn't be
2282 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2283 * instead (the same thing for STA above).
2284 */
2285 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2286 /* TODO: this seems to be used only for STA, check it */
2287 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2288 }
2289
2290 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2291 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2292 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2293
2294 /*
2295 * mac80211 configures some values globally, while we treat them
2296 * per-interface. thus, on init, we have to copy them from wl
2297 */
2298 wlvif->band = wl->band;
2299 wlvif->channel = wl->channel;
2300 wlvif->power_level = wl->power_level;
2301 wlvif->channel_type = wl->channel_type;
2302
2303 INIT_WORK(&wlvif->rx_streaming_enable_work,
2304 wl1271_rx_streaming_enable_work);
2305 INIT_WORK(&wlvif->rx_streaming_disable_work,
2306 wl1271_rx_streaming_disable_work);
2307 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2308 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2309 wlcore_channel_switch_work);
2310 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2311 wlcore_connection_loss_work);
2312 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2313 wlcore_pending_auth_complete_work);
2314 INIT_LIST_HEAD(&wlvif->list);
2315
2316 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2317 return 0;
2318}
2319
2320static int wl12xx_init_fw(struct wl1271 *wl)
2321{
2322 int retries = WL1271_BOOT_RETRIES;
2323 bool booted = false;
2324 struct wiphy *wiphy = wl->hw->wiphy;
2325 int ret;
2326
2327 while (retries) {
2328 retries--;
2329 ret = wl12xx_chip_wakeup(wl, false);
2330 if (ret < 0)
2331 goto power_off;
2332
2333 ret = wl->ops->boot(wl);
2334 if (ret < 0)
2335 goto power_off;
2336
2337 ret = wl1271_hw_init(wl);
2338 if (ret < 0)
2339 goto irq_disable;
2340
2341 booted = true;
2342 break;
2343
2344irq_disable:
2345 mutex_unlock(&wl->mutex);
2346 /* Unlocking the mutex in the middle of handling is
2347 inherently unsafe. In this case we deem it safe to do,
2348 because we need to let any possibly pending IRQ out of
2349 the system (and while we are WLCORE_STATE_OFF the IRQ
2350 work function will not do anything.) Also, any other
2351 possible concurrent operations will fail due to the
2352 current state, hence the wl1271 struct should be safe. */
2353 wlcore_disable_interrupts(wl);
2354 wl1271_flush_deferred_work(wl);
2355 cancel_work_sync(&wl->netstack_work);
2356 mutex_lock(&wl->mutex);
2357power_off:
2358 wl1271_power_off(wl);
2359 }
2360
2361 if (!booted) {
2362 wl1271_error("firmware boot failed despite %d retries",
2363 WL1271_BOOT_RETRIES);
2364 goto out;
2365 }
2366
2367 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2368
2369 /* update hw/fw version info in wiphy struct */
2370 wiphy->hw_version = wl->chip.id;
2371 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2372 sizeof(wiphy->fw_version));
2373
2374 /*
2375 * Now we know if 11a is supported (info from the NVS), so disable
2376 * 11a channels if not supported
2377 */
2378 if (!wl->enable_11a)
2379 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2380
2381 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2382 wl->enable_11a ? "" : "not ");
2383
2384 wl->state = WLCORE_STATE_ON;
2385out:
2386 return ret;
2387}
2388
2389static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2390{
2391 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2392}
2393
2394/*
2395 * Check whether a fw switch (i.e. moving from one loaded
2396 * fw to another) is needed. This function is also responsible
2397 * for updating wl->last_vif_count, so it must be called before
2398 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2399 * will be used).
2400 */
2401static bool wl12xx_need_fw_change(struct wl1271 *wl,
2402 struct vif_counter_data vif_counter_data,
2403 bool add)
2404{
2405 enum wl12xx_fw_type current_fw = wl->fw_type;
2406 u8 vif_count = vif_counter_data.counter;
2407
2408 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2409 return false;
2410
2411 /* increase the vif count if this is a new vif */
2412 if (add && !vif_counter_data.cur_vif_running)
2413 vif_count++;
2414
2415 wl->last_vif_count = vif_count;
2416
2417 /* no need for fw change if the device is OFF */
2418 if (wl->state == WLCORE_STATE_OFF)
2419 return false;
2420
2421 /* no need for fw change if a single fw is used */
2422 if (!wl->mr_fw_name)
2423 return false;
2424
2425 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2426 return true;
2427 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2428 return true;
2429
2430 return false;
2431}
2432
2433/*
2434 * Enter "forced psm". Make sure the sta is in psm against the ap,
2435 * to make the fw switch a bit more disconnection-persistent.
2436 */
2437static void wl12xx_force_active_psm(struct wl1271 *wl)
2438{
2439 struct wl12xx_vif *wlvif;
2440
2441 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2442 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2443 }
2444}
2445
2446struct wlcore_hw_queue_iter_data {
2447 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2448 /* current vif */
2449 struct ieee80211_vif *vif;
2450 /* is the current vif among those iterated */
2451 bool cur_running;
2452};
2453
2454static void wlcore_hw_queue_iter(void *data, u8 *mac,
2455 struct ieee80211_vif *vif)
2456{
2457 struct wlcore_hw_queue_iter_data *iter_data = data;
2458
2459 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2460 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2461 return;
2462
2463 if (iter_data->cur_running || vif == iter_data->vif) {
2464 iter_data->cur_running = true;
2465 return;
2466 }
2467
2468 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2469}
2470
2471static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2472 struct wl12xx_vif *wlvif)
2473{
2474 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2475 struct wlcore_hw_queue_iter_data iter_data = {};
2476 int i, q_base;
2477
2478 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2479 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2480 return 0;
2481 }
2482
2483 iter_data.vif = vif;
2484
2485 /* mark all bits taken by active interfaces */
2486 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2487 IEEE80211_IFACE_ITER_RESUME_ALL,
2488 wlcore_hw_queue_iter, &iter_data);
2489
2490 /* the current vif is already running in mac80211 (resume/recovery) */
2491 if (iter_data.cur_running) {
2492 wlvif->hw_queue_base = vif->hw_queue[0];
2493 wl1271_debug(DEBUG_MAC80211,
2494 "using pre-allocated hw queue base %d",
2495 wlvif->hw_queue_base);
2496
2497 /* interface type might have changed type */
2498 goto adjust_cab_queue;
2499 }
2500
2501 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2502 WLCORE_NUM_MAC_ADDRESSES);
2503 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2504 return -EBUSY;
2505
2506 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2507 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2508 wlvif->hw_queue_base);
2509
2510 for (i = 0; i < NUM_TX_QUEUES; i++) {
2511 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2512 /* register hw queues in mac80211 */
2513 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2514 }
2515
2516adjust_cab_queue:
2517 /* the last places are reserved for cab queues per interface */
2518 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2519 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2520 wlvif->hw_queue_base / NUM_TX_QUEUES;
2521 else
2522 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2523
2524 return 0;
2525}
2526
2527static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2528 struct ieee80211_vif *vif)
2529{
2530 struct wl1271 *wl = hw->priv;
2531 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2532 struct vif_counter_data vif_count;
2533 int ret = 0;
2534 u8 role_type;
2535
2536 if (wl->plt) {
2537 wl1271_error("Adding Interface not allowed while in PLT mode");
2538 return -EBUSY;
2539 }
2540
2541 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2542 IEEE80211_VIF_SUPPORTS_UAPSD |
2543 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2544
2545 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2546 ieee80211_vif_type_p2p(vif), vif->addr);
2547
2548 wl12xx_get_vif_count(hw, vif, &vif_count);
2549
2550 mutex_lock(&wl->mutex);
2551
2552 /*
2553 * in some very corner case HW recovery scenarios its possible to
2554 * get here before __wl1271_op_remove_interface is complete, so
2555 * opt out if that is the case.
2556 */
2557 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2558 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2559 ret = -EBUSY;
2560 goto out;
2561 }
2562
2563
2564 ret = wl12xx_init_vif_data(wl, vif);
2565 if (ret < 0)
2566 goto out;
2567
2568 wlvif->wl = wl;
2569 role_type = wl12xx_get_role_type(wl, wlvif);
2570 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2571 ret = -EINVAL;
2572 goto out;
2573 }
2574
2575 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2576 if (ret < 0)
2577 goto out;
2578
2579 /*
2580 * TODO: after the nvs issue will be solved, move this block
2581 * to start(), and make sure here the driver is ON.
2582 */
2583 if (wl->state == WLCORE_STATE_OFF) {
2584 /*
2585 * we still need this in order to configure the fw
2586 * while uploading the nvs
2587 */
2588 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2589
2590 ret = wl12xx_init_fw(wl);
2591 if (ret < 0)
2592 goto out;
2593 }
2594
2595 /*
2596 * Call runtime PM only after possible wl12xx_init_fw() above
2597 * is done. Otherwise we do not have interrupts enabled.
2598 */
2599 ret = pm_runtime_get_sync(wl->dev);
2600 if (ret < 0) {
2601 pm_runtime_put_noidle(wl->dev);
2602 goto out_unlock;
2603 }
2604
2605 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2606 wl12xx_force_active_psm(wl);
2607 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2608 mutex_unlock(&wl->mutex);
2609 wl1271_recovery_work(&wl->recovery_work);
2610 return 0;
2611 }
2612
2613 if (!wlcore_is_p2p_mgmt(wlvif)) {
2614 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2615 role_type, &wlvif->role_id);
2616 if (ret < 0)
2617 goto out;
2618
2619 ret = wl1271_init_vif_specific(wl, vif);
2620 if (ret < 0)
2621 goto out;
2622
2623 } else {
2624 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2625 &wlvif->dev_role_id);
2626 if (ret < 0)
2627 goto out;
2628
2629 /* needed mainly for configuring rate policies */
2630 ret = wl1271_sta_hw_init(wl, wlvif);
2631 if (ret < 0)
2632 goto out;
2633 }
2634
2635 list_add(&wlvif->list, &wl->wlvif_list);
2636 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2637
2638 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2639 wl->ap_count++;
2640 else
2641 wl->sta_count++;
2642out:
2643 pm_runtime_mark_last_busy(wl->dev);
2644 pm_runtime_put_autosuspend(wl->dev);
2645out_unlock:
2646 mutex_unlock(&wl->mutex);
2647
2648 return ret;
2649}
2650
2651static void __wl1271_op_remove_interface(struct wl1271 *wl,
2652 struct ieee80211_vif *vif,
2653 bool reset_tx_queues)
2654{
2655 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2656 int i, ret;
2657 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2658
2659 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2660
2661 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2662 return;
2663
2664 /* because of hardware recovery, we may get here twice */
2665 if (wl->state == WLCORE_STATE_OFF)
2666 return;
2667
2668 wl1271_info("down");
2669
2670 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2671 wl->scan_wlvif == wlvif) {
2672 struct cfg80211_scan_info info = {
2673 .aborted = true,
2674 };
2675
2676 /*
2677 * Rearm the tx watchdog just before idling scan. This
2678 * prevents just-finished scans from triggering the watchdog
2679 */
2680 wl12xx_rearm_tx_watchdog_locked(wl);
2681
2682 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2683 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2684 wl->scan_wlvif = NULL;
2685 wl->scan.req = NULL;
2686 ieee80211_scan_completed(wl->hw, &info);
2687 }
2688
2689 if (wl->sched_vif == wlvif)
2690 wl->sched_vif = NULL;
2691
2692 if (wl->roc_vif == vif) {
2693 wl->roc_vif = NULL;
2694 ieee80211_remain_on_channel_expired(wl->hw);
2695 }
2696
2697 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2698 /* disable active roles */
2699 ret = pm_runtime_get_sync(wl->dev);
2700 if (ret < 0) {
2701 pm_runtime_put_noidle(wl->dev);
2702 goto deinit;
2703 }
2704
2705 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2706 wlvif->bss_type == BSS_TYPE_IBSS) {
2707 if (wl12xx_dev_role_started(wlvif))
2708 wl12xx_stop_dev(wl, wlvif);
2709 }
2710
2711 if (!wlcore_is_p2p_mgmt(wlvif)) {
2712 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2713 if (ret < 0)
2714 goto deinit;
2715 } else {
2716 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2717 if (ret < 0)
2718 goto deinit;
2719 }
2720
2721 pm_runtime_mark_last_busy(wl->dev);
2722 pm_runtime_put_autosuspend(wl->dev);
2723 }
2724deinit:
2725 wl12xx_tx_reset_wlvif(wl, wlvif);
2726
2727 /* clear all hlids (except system_hlid) */
2728 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2729
2730 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2731 wlvif->bss_type == BSS_TYPE_IBSS) {
2732 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2733 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2734 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2735 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2736 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2737 } else {
2738 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2739 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2740 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2741 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2742 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2743 wl12xx_free_rate_policy(wl,
2744 &wlvif->ap.ucast_rate_idx[i]);
2745 wl1271_free_ap_keys(wl, wlvif);
2746 }
2747
2748 dev_kfree_skb(wlvif->probereq);
2749 wlvif->probereq = NULL;
2750 if (wl->last_wlvif == wlvif)
2751 wl->last_wlvif = NULL;
2752 list_del(&wlvif->list);
2753 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2754 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2755 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2756
2757 if (is_ap)
2758 wl->ap_count--;
2759 else
2760 wl->sta_count--;
2761
2762 /*
2763 * Last AP, have more stations. Configure sleep auth according to STA.
2764 * Don't do thin on unintended recovery.
2765 */
2766 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2767 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2768 goto unlock;
2769
2770 if (wl->ap_count == 0 && is_ap) {
2771 /* mask ap events */
2772 wl->event_mask &= ~wl->ap_event_mask;
2773 wl1271_event_unmask(wl);
2774 }
2775
2776 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2777 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2778 /* Configure for power according to debugfs */
2779 if (sta_auth != WL1271_PSM_ILLEGAL)
2780 wl1271_acx_sleep_auth(wl, sta_auth);
2781 /* Configure for ELP power saving */
2782 else
2783 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2784 }
2785
2786unlock:
2787 mutex_unlock(&wl->mutex);
2788
2789 del_timer_sync(&wlvif->rx_streaming_timer);
2790 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2791 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2792 cancel_work_sync(&wlvif->rc_update_work);
2793 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2794 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2795 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2796
2797 mutex_lock(&wl->mutex);
2798}
2799
2800static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2801 struct ieee80211_vif *vif)
2802{
2803 struct wl1271 *wl = hw->priv;
2804 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2805 struct wl12xx_vif *iter;
2806 struct vif_counter_data vif_count;
2807
2808 wl12xx_get_vif_count(hw, vif, &vif_count);
2809 mutex_lock(&wl->mutex);
2810
2811 if (wl->state == WLCORE_STATE_OFF ||
2812 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2813 goto out;
2814
2815 /*
2816 * wl->vif can be null here if someone shuts down the interface
2817 * just when hardware recovery has been started.
2818 */
2819 wl12xx_for_each_wlvif(wl, iter) {
2820 if (iter != wlvif)
2821 continue;
2822
2823 __wl1271_op_remove_interface(wl, vif, true);
2824 break;
2825 }
2826 WARN_ON(iter != wlvif);
2827 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2828 wl12xx_force_active_psm(wl);
2829 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2830 wl12xx_queue_recovery_work(wl);
2831 }
2832out:
2833 mutex_unlock(&wl->mutex);
2834}
2835
2836static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2837 struct ieee80211_vif *vif,
2838 enum nl80211_iftype new_type, bool p2p)
2839{
2840 struct wl1271 *wl = hw->priv;
2841 int ret;
2842
2843 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2844 wl1271_op_remove_interface(hw, vif);
2845
2846 vif->type = new_type;
2847 vif->p2p = p2p;
2848 ret = wl1271_op_add_interface(hw, vif);
2849
2850 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2851 return ret;
2852}
2853
2854static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2855{
2856 int ret;
2857 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2858
2859 /*
2860 * One of the side effects of the JOIN command is that is clears
2861 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2862 * to a WPA/WPA2 access point will therefore kill the data-path.
2863 * Currently the only valid scenario for JOIN during association
2864 * is on roaming, in which case we will also be given new keys.
2865 * Keep the below message for now, unless it starts bothering
2866 * users who really like to roam a lot :)
2867 */
2868 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2869 wl1271_info("JOIN while associated.");
2870
2871 /* clear encryption type */
2872 wlvif->encryption_type = KEY_NONE;
2873
2874 if (is_ibss)
2875 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2876 else {
2877 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2878 /*
2879 * TODO: this is an ugly workaround for wl12xx fw
2880 * bug - we are not able to tx/rx after the first
2881 * start_sta, so make dummy start+stop calls,
2882 * and then call start_sta again.
2883 * this should be fixed in the fw.
2884 */
2885 wl12xx_cmd_role_start_sta(wl, wlvif);
2886 wl12xx_cmd_role_stop_sta(wl, wlvif);
2887 }
2888
2889 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2890 }
2891
2892 return ret;
2893}
2894
2895static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2896 int offset)
2897{
2898 u8 ssid_len;
2899 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2900 skb->len - offset);
2901
2902 if (!ptr) {
2903 wl1271_error("No SSID in IEs!");
2904 return -ENOENT;
2905 }
2906
2907 ssid_len = ptr[1];
2908 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2909 wl1271_error("SSID is too long!");
2910 return -EINVAL;
2911 }
2912
2913 wlvif->ssid_len = ssid_len;
2914 memcpy(wlvif->ssid, ptr+2, ssid_len);
2915 return 0;
2916}
2917
2918static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2919{
2920 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2921 struct sk_buff *skb;
2922 int ieoffset;
2923
2924 /* we currently only support setting the ssid from the ap probe req */
2925 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2926 return -EINVAL;
2927
2928 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2929 if (!skb)
2930 return -EINVAL;
2931
2932 ieoffset = offsetof(struct ieee80211_mgmt,
2933 u.probe_req.variable);
2934 wl1271_ssid_set(wlvif, skb, ieoffset);
2935 dev_kfree_skb(skb);
2936
2937 return 0;
2938}
2939
2940static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2941 struct ieee80211_bss_conf *bss_conf,
2942 u32 sta_rate_set)
2943{
2944 int ieoffset;
2945 int ret;
2946
2947 wlvif->aid = bss_conf->aid;
2948 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2949 wlvif->beacon_int = bss_conf->beacon_int;
2950 wlvif->wmm_enabled = bss_conf->qos;
2951
2952 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2953
2954 /*
2955 * with wl1271, we don't need to update the
2956 * beacon_int and dtim_period, because the firmware
2957 * updates it by itself when the first beacon is
2958 * received after a join.
2959 */
2960 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2961 if (ret < 0)
2962 return ret;
2963
2964 /*
2965 * Get a template for hardware connection maintenance
2966 */
2967 dev_kfree_skb(wlvif->probereq);
2968 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2969 wlvif,
2970 NULL);
2971 ieoffset = offsetof(struct ieee80211_mgmt,
2972 u.probe_req.variable);
2973 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2974
2975 /* enable the connection monitoring feature */
2976 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2977 if (ret < 0)
2978 return ret;
2979
2980 /*
2981 * The join command disable the keep-alive mode, shut down its process,
2982 * and also clear the template config, so we need to reset it all after
2983 * the join. The acx_aid starts the keep-alive process, and the order
2984 * of the commands below is relevant.
2985 */
2986 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2987 if (ret < 0)
2988 return ret;
2989
2990 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2991 if (ret < 0)
2992 return ret;
2993
2994 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2995 if (ret < 0)
2996 return ret;
2997
2998 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2999 wlvif->sta.klv_template_id,
3000 ACX_KEEP_ALIVE_TPL_VALID);
3001 if (ret < 0)
3002 return ret;
3003
3004 /*
3005 * The default fw psm configuration is AUTO, while mac80211 default
3006 * setting is off (ACTIVE), so sync the fw with the correct value.
3007 */
3008 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3009 if (ret < 0)
3010 return ret;
3011
3012 if (sta_rate_set) {
3013 wlvif->rate_set =
3014 wl1271_tx_enabled_rates_get(wl,
3015 sta_rate_set,
3016 wlvif->band);
3017 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3018 if (ret < 0)
3019 return ret;
3020 }
3021
3022 return ret;
3023}
3024
3025static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3026{
3027 int ret;
3028 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3029
3030 /* make sure we are connected (sta) joined */
3031 if (sta &&
3032 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3033 return false;
3034
3035 /* make sure we are joined (ibss) */
3036 if (!sta &&
3037 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3038 return false;
3039
3040 if (sta) {
3041 /* use defaults when not associated */
3042 wlvif->aid = 0;
3043
3044 /* free probe-request template */
3045 dev_kfree_skb(wlvif->probereq);
3046 wlvif->probereq = NULL;
3047
3048 /* disable connection monitor features */
3049 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3050 if (ret < 0)
3051 return ret;
3052
3053 /* Disable the keep-alive feature */
3054 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3055 if (ret < 0)
3056 return ret;
3057
3058 /* disable beacon filtering */
3059 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3060 if (ret < 0)
3061 return ret;
3062 }
3063
3064 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3065 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3066
3067 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3068 ieee80211_chswitch_done(vif, false);
3069 cancel_delayed_work(&wlvif->channel_switch_work);
3070 }
3071
3072 /* invalidate keep-alive template */
3073 wl1271_acx_keep_alive_config(wl, wlvif,
3074 wlvif->sta.klv_template_id,
3075 ACX_KEEP_ALIVE_TPL_INVALID);
3076
3077 return 0;
3078}
3079
3080static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3081{
3082 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3083 wlvif->rate_set = wlvif->basic_rate_set;
3084}
3085
3086static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087 bool idle)
3088{
3089 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3090
3091 if (idle == cur_idle)
3092 return;
3093
3094 if (idle) {
3095 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3096 } else {
3097 /* The current firmware only supports sched_scan in idle */
3098 if (wl->sched_vif == wlvif)
3099 wl->ops->sched_scan_stop(wl, wlvif);
3100
3101 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3102 }
3103}
3104
3105static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3106 struct ieee80211_conf *conf, u32 changed)
3107{
3108 int ret;
3109
3110 if (wlcore_is_p2p_mgmt(wlvif))
3111 return 0;
3112
3113 if (conf->power_level != wlvif->power_level) {
3114 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3115 if (ret < 0)
3116 return ret;
3117
3118 wlvif->power_level = conf->power_level;
3119 }
3120
3121 return 0;
3122}
3123
3124static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3125{
3126 struct wl1271 *wl = hw->priv;
3127 struct wl12xx_vif *wlvif;
3128 struct ieee80211_conf *conf = &hw->conf;
3129 int ret = 0;
3130
3131 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3132 " changed 0x%x",
3133 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3134 conf->power_level,
3135 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3136 changed);
3137
3138 mutex_lock(&wl->mutex);
3139
3140 if (changed & IEEE80211_CONF_CHANGE_POWER)
3141 wl->power_level = conf->power_level;
3142
3143 if (unlikely(wl->state != WLCORE_STATE_ON))
3144 goto out;
3145
3146 ret = pm_runtime_get_sync(wl->dev);
3147 if (ret < 0) {
3148 pm_runtime_put_noidle(wl->dev);
3149 goto out;
3150 }
3151
3152 /* configure each interface */
3153 wl12xx_for_each_wlvif(wl, wlvif) {
3154 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3155 if (ret < 0)
3156 goto out_sleep;
3157 }
3158
3159out_sleep:
3160 pm_runtime_mark_last_busy(wl->dev);
3161 pm_runtime_put_autosuspend(wl->dev);
3162
3163out:
3164 mutex_unlock(&wl->mutex);
3165
3166 return ret;
3167}
3168
3169struct wl1271_filter_params {
3170 bool enabled;
3171 int mc_list_length;
3172 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3173};
3174
3175static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3176 struct netdev_hw_addr_list *mc_list)
3177{
3178 struct wl1271_filter_params *fp;
3179 struct netdev_hw_addr *ha;
3180
3181 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3182 if (!fp) {
3183 wl1271_error("Out of memory setting filters.");
3184 return 0;
3185 }
3186
3187 /* update multicast filtering parameters */
3188 fp->mc_list_length = 0;
3189 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3190 fp->enabled = false;
3191 } else {
3192 fp->enabled = true;
3193 netdev_hw_addr_list_for_each(ha, mc_list) {
3194 memcpy(fp->mc_list[fp->mc_list_length],
3195 ha->addr, ETH_ALEN);
3196 fp->mc_list_length++;
3197 }
3198 }
3199
3200 return (u64)(unsigned long)fp;
3201}
3202
3203#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3204 FIF_FCSFAIL | \
3205 FIF_BCN_PRBRESP_PROMISC | \
3206 FIF_CONTROL | \
3207 FIF_OTHER_BSS)
3208
3209static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3210 unsigned int changed,
3211 unsigned int *total, u64 multicast)
3212{
3213 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3214 struct wl1271 *wl = hw->priv;
3215 struct wl12xx_vif *wlvif;
3216
3217 int ret;
3218
3219 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3220 " total %x", changed, *total);
3221
3222 mutex_lock(&wl->mutex);
3223
3224 *total &= WL1271_SUPPORTED_FILTERS;
3225 changed &= WL1271_SUPPORTED_FILTERS;
3226
3227 if (unlikely(wl->state != WLCORE_STATE_ON))
3228 goto out;
3229
3230 ret = pm_runtime_get_sync(wl->dev);
3231 if (ret < 0) {
3232 pm_runtime_put_noidle(wl->dev);
3233 goto out;
3234 }
3235
3236 wl12xx_for_each_wlvif(wl, wlvif) {
3237 if (wlcore_is_p2p_mgmt(wlvif))
3238 continue;
3239
3240 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3241 if (*total & FIF_ALLMULTI)
3242 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3243 false,
3244 NULL, 0);
3245 else if (fp)
3246 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3247 fp->enabled,
3248 fp->mc_list,
3249 fp->mc_list_length);
3250 if (ret < 0)
3251 goto out_sleep;
3252 }
3253
3254 /*
3255 * If interface in AP mode and created with allmulticast then disable
3256 * the firmware filters so that all multicast packets are passed
3257 * This is mandatory for MDNS based discovery protocols
3258 */
3259 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3260 if (*total & FIF_ALLMULTI) {
3261 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3262 false,
3263 NULL, 0);
3264 if (ret < 0)
3265 goto out_sleep;
3266 }
3267 }
3268 }
3269
3270 /*
3271 * the fw doesn't provide an api to configure the filters. instead,
3272 * the filters configuration is based on the active roles / ROC
3273 * state.
3274 */
3275
3276out_sleep:
3277 pm_runtime_mark_last_busy(wl->dev);
3278 pm_runtime_put_autosuspend(wl->dev);
3279
3280out:
3281 mutex_unlock(&wl->mutex);
3282 kfree(fp);
3283}
3284
3285static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3286 u8 id, u8 key_type, u8 key_size,
3287 const u8 *key, u8 hlid, u32 tx_seq_32,
3288 u16 tx_seq_16)
3289{
3290 struct wl1271_ap_key *ap_key;
3291 int i;
3292
3293 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3294
3295 if (key_size > MAX_KEY_SIZE)
3296 return -EINVAL;
3297
3298 /*
3299 * Find next free entry in ap_keys. Also check we are not replacing
3300 * an existing key.
3301 */
3302 for (i = 0; i < MAX_NUM_KEYS; i++) {
3303 if (wlvif->ap.recorded_keys[i] == NULL)
3304 break;
3305
3306 if (wlvif->ap.recorded_keys[i]->id == id) {
3307 wl1271_warning("trying to record key replacement");
3308 return -EINVAL;
3309 }
3310 }
3311
3312 if (i == MAX_NUM_KEYS)
3313 return -EBUSY;
3314
3315 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3316 if (!ap_key)
3317 return -ENOMEM;
3318
3319 ap_key->id = id;
3320 ap_key->key_type = key_type;
3321 ap_key->key_size = key_size;
3322 memcpy(ap_key->key, key, key_size);
3323 ap_key->hlid = hlid;
3324 ap_key->tx_seq_32 = tx_seq_32;
3325 ap_key->tx_seq_16 = tx_seq_16;
3326
3327 wlvif->ap.recorded_keys[i] = ap_key;
3328 return 0;
3329}
3330
3331static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3332{
3333 int i;
3334
3335 for (i = 0; i < MAX_NUM_KEYS; i++) {
3336 kfree(wlvif->ap.recorded_keys[i]);
3337 wlvif->ap.recorded_keys[i] = NULL;
3338 }
3339}
3340
3341static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3342{
3343 int i, ret = 0;
3344 struct wl1271_ap_key *key;
3345 bool wep_key_added = false;
3346
3347 for (i = 0; i < MAX_NUM_KEYS; i++) {
3348 u8 hlid;
3349 if (wlvif->ap.recorded_keys[i] == NULL)
3350 break;
3351
3352 key = wlvif->ap.recorded_keys[i];
3353 hlid = key->hlid;
3354 if (hlid == WL12XX_INVALID_LINK_ID)
3355 hlid = wlvif->ap.bcast_hlid;
3356
3357 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3358 key->id, key->key_type,
3359 key->key_size, key->key,
3360 hlid, key->tx_seq_32,
3361 key->tx_seq_16);
3362 if (ret < 0)
3363 goto out;
3364
3365 if (key->key_type == KEY_WEP)
3366 wep_key_added = true;
3367 }
3368
3369 if (wep_key_added) {
3370 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3371 wlvif->ap.bcast_hlid);
3372 if (ret < 0)
3373 goto out;
3374 }
3375
3376out:
3377 wl1271_free_ap_keys(wl, wlvif);
3378 return ret;
3379}
3380
3381static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3382 u16 action, u8 id, u8 key_type,
3383 u8 key_size, const u8 *key, u32 tx_seq_32,
3384 u16 tx_seq_16, struct ieee80211_sta *sta)
3385{
3386 int ret;
3387 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3388
3389 if (is_ap) {
3390 struct wl1271_station *wl_sta;
3391 u8 hlid;
3392
3393 if (sta) {
3394 wl_sta = (struct wl1271_station *)sta->drv_priv;
3395 hlid = wl_sta->hlid;
3396 } else {
3397 hlid = wlvif->ap.bcast_hlid;
3398 }
3399
3400 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3401 /*
3402 * We do not support removing keys after AP shutdown.
3403 * Pretend we do to make mac80211 happy.
3404 */
3405 if (action != KEY_ADD_OR_REPLACE)
3406 return 0;
3407
3408 ret = wl1271_record_ap_key(wl, wlvif, id,
3409 key_type, key_size,
3410 key, hlid, tx_seq_32,
3411 tx_seq_16);
3412 } else {
3413 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3414 id, key_type, key_size,
3415 key, hlid, tx_seq_32,
3416 tx_seq_16);
3417 }
3418
3419 if (ret < 0)
3420 return ret;
3421 } else {
3422 const u8 *addr;
3423 static const u8 bcast_addr[ETH_ALEN] = {
3424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3425 };
3426
3427 addr = sta ? sta->addr : bcast_addr;
3428
3429 if (is_zero_ether_addr(addr)) {
3430 /* We dont support TX only encryption */
3431 return -EOPNOTSUPP;
3432 }
3433
3434 /* The wl1271 does not allow to remove unicast keys - they
3435 will be cleared automatically on next CMD_JOIN. Ignore the
3436 request silently, as we dont want the mac80211 to emit
3437 an error message. */
3438 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3439 return 0;
3440
3441 /* don't remove key if hlid was already deleted */
3442 if (action == KEY_REMOVE &&
3443 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3444 return 0;
3445
3446 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3447 id, key_type, key_size,
3448 key, addr, tx_seq_32,
3449 tx_seq_16);
3450 if (ret < 0)
3451 return ret;
3452
3453 }
3454
3455 return 0;
3456}
3457
3458static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3459 struct ieee80211_vif *vif,
3460 struct ieee80211_sta *sta,
3461 struct ieee80211_key_conf *key_conf)
3462{
3463 struct wl1271 *wl = hw->priv;
3464 int ret;
3465 bool might_change_spare =
3466 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3467 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3468
3469 if (might_change_spare) {
3470 /*
3471 * stop the queues and flush to ensure the next packets are
3472 * in sync with FW spare block accounting
3473 */
3474 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3475 wl1271_tx_flush(wl);
3476 }
3477
3478 mutex_lock(&wl->mutex);
3479
3480 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3481 ret = -EAGAIN;
3482 goto out_wake_queues;
3483 }
3484
3485 ret = pm_runtime_get_sync(wl->dev);
3486 if (ret < 0) {
3487 pm_runtime_put_noidle(wl->dev);
3488 goto out_wake_queues;
3489 }
3490
3491 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3492
3493 pm_runtime_mark_last_busy(wl->dev);
3494 pm_runtime_put_autosuspend(wl->dev);
3495
3496out_wake_queues:
3497 if (might_change_spare)
3498 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3499
3500 mutex_unlock(&wl->mutex);
3501
3502 return ret;
3503}
3504
3505int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3506 struct ieee80211_vif *vif,
3507 struct ieee80211_sta *sta,
3508 struct ieee80211_key_conf *key_conf)
3509{
3510 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3511 int ret;
3512 u32 tx_seq_32 = 0;
3513 u16 tx_seq_16 = 0;
3514 u8 key_type;
3515 u8 hlid;
3516
3517 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3518
3519 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3520 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3521 key_conf->cipher, key_conf->keyidx,
3522 key_conf->keylen, key_conf->flags);
3523 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3524
3525 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3526 if (sta) {
3527 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3528 hlid = wl_sta->hlid;
3529 } else {
3530 hlid = wlvif->ap.bcast_hlid;
3531 }
3532 else
3533 hlid = wlvif->sta.hlid;
3534
3535 if (hlid != WL12XX_INVALID_LINK_ID) {
3536 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3537 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3538 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3539 }
3540
3541 switch (key_conf->cipher) {
3542 case WLAN_CIPHER_SUITE_WEP40:
3543 case WLAN_CIPHER_SUITE_WEP104:
3544 key_type = KEY_WEP;
3545
3546 key_conf->hw_key_idx = key_conf->keyidx;
3547 break;
3548 case WLAN_CIPHER_SUITE_TKIP:
3549 key_type = KEY_TKIP;
3550 key_conf->hw_key_idx = key_conf->keyidx;
3551 break;
3552 case WLAN_CIPHER_SUITE_CCMP:
3553 key_type = KEY_AES;
3554 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3555 break;
3556 case WL1271_CIPHER_SUITE_GEM:
3557 key_type = KEY_GEM;
3558 break;
3559 default:
3560 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3561
3562 return -EOPNOTSUPP;
3563 }
3564
3565 switch (cmd) {
3566 case SET_KEY:
3567 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3568 key_conf->keyidx, key_type,
3569 key_conf->keylen, key_conf->key,
3570 tx_seq_32, tx_seq_16, sta);
3571 if (ret < 0) {
3572 wl1271_error("Could not add or replace key");
3573 return ret;
3574 }
3575
3576 /*
3577 * reconfiguring arp response if the unicast (or common)
3578 * encryption key type was changed
3579 */
3580 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3581 (sta || key_type == KEY_WEP) &&
3582 wlvif->encryption_type != key_type) {
3583 wlvif->encryption_type = key_type;
3584 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3585 if (ret < 0) {
3586 wl1271_warning("build arp rsp failed: %d", ret);
3587 return ret;
3588 }
3589 }
3590 break;
3591
3592 case DISABLE_KEY:
3593 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3594 key_conf->keyidx, key_type,
3595 key_conf->keylen, key_conf->key,
3596 0, 0, sta);
3597 if (ret < 0) {
3598 wl1271_error("Could not remove key");
3599 return ret;
3600 }
3601 break;
3602
3603 default:
3604 wl1271_error("Unsupported key cmd 0x%x", cmd);
3605 return -EOPNOTSUPP;
3606 }
3607
3608 return ret;
3609}
3610EXPORT_SYMBOL_GPL(wlcore_set_key);
3611
3612static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3613 struct ieee80211_vif *vif,
3614 int key_idx)
3615{
3616 struct wl1271 *wl = hw->priv;
3617 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3618 int ret;
3619
3620 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3621 key_idx);
3622
3623 /* we don't handle unsetting of default key */
3624 if (key_idx == -1)
3625 return;
3626
3627 mutex_lock(&wl->mutex);
3628
3629 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3630 ret = -EAGAIN;
3631 goto out_unlock;
3632 }
3633
3634 ret = pm_runtime_get_sync(wl->dev);
3635 if (ret < 0) {
3636 pm_runtime_put_noidle(wl->dev);
3637 goto out_unlock;
3638 }
3639
3640 wlvif->default_key = key_idx;
3641
3642 /* the default WEP key needs to be configured at least once */
3643 if (wlvif->encryption_type == KEY_WEP) {
3644 ret = wl12xx_cmd_set_default_wep_key(wl,
3645 key_idx,
3646 wlvif->sta.hlid);
3647 if (ret < 0)
3648 goto out_sleep;
3649 }
3650
3651out_sleep:
3652 pm_runtime_mark_last_busy(wl->dev);
3653 pm_runtime_put_autosuspend(wl->dev);
3654
3655out_unlock:
3656 mutex_unlock(&wl->mutex);
3657}
3658
3659void wlcore_regdomain_config(struct wl1271 *wl)
3660{
3661 int ret;
3662
3663 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3664 return;
3665
3666 mutex_lock(&wl->mutex);
3667
3668 if (unlikely(wl->state != WLCORE_STATE_ON))
3669 goto out;
3670
3671 ret = pm_runtime_get_sync(wl->dev);
3672 if (ret < 0)
3673 goto out;
3674
3675 ret = wlcore_cmd_regdomain_config_locked(wl);
3676 if (ret < 0) {
3677 wl12xx_queue_recovery_work(wl);
3678 goto out;
3679 }
3680
3681 pm_runtime_mark_last_busy(wl->dev);
3682 pm_runtime_put_autosuspend(wl->dev);
3683out:
3684 mutex_unlock(&wl->mutex);
3685}
3686
3687static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3688 struct ieee80211_vif *vif,
3689 struct ieee80211_scan_request *hw_req)
3690{
3691 struct cfg80211_scan_request *req = &hw_req->req;
3692 struct wl1271 *wl = hw->priv;
3693 int ret;
3694 u8 *ssid = NULL;
3695 size_t len = 0;
3696
3697 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3698
3699 if (req->n_ssids) {
3700 ssid = req->ssids[0].ssid;
3701 len = req->ssids[0].ssid_len;
3702 }
3703
3704 mutex_lock(&wl->mutex);
3705
3706 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3707 /*
3708 * We cannot return -EBUSY here because cfg80211 will expect
3709 * a call to ieee80211_scan_completed if we do - in this case
3710 * there won't be any call.
3711 */
3712 ret = -EAGAIN;
3713 goto out;
3714 }
3715
3716 ret = pm_runtime_get_sync(wl->dev);
3717 if (ret < 0) {
3718 pm_runtime_put_noidle(wl->dev);
3719 goto out;
3720 }
3721
3722 /* fail if there is any role in ROC */
3723 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3724 /* don't allow scanning right now */
3725 ret = -EBUSY;
3726 goto out_sleep;
3727 }
3728
3729 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3730out_sleep:
3731 pm_runtime_mark_last_busy(wl->dev);
3732 pm_runtime_put_autosuspend(wl->dev);
3733out:
3734 mutex_unlock(&wl->mutex);
3735
3736 return ret;
3737}
3738
3739static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3740 struct ieee80211_vif *vif)
3741{
3742 struct wl1271 *wl = hw->priv;
3743 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3744 struct cfg80211_scan_info info = {
3745 .aborted = true,
3746 };
3747 int ret;
3748
3749 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3750
3751 mutex_lock(&wl->mutex);
3752
3753 if (unlikely(wl->state != WLCORE_STATE_ON))
3754 goto out;
3755
3756 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3757 goto out;
3758
3759 ret = pm_runtime_get_sync(wl->dev);
3760 if (ret < 0) {
3761 pm_runtime_put_noidle(wl->dev);
3762 goto out;
3763 }
3764
3765 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3766 ret = wl->ops->scan_stop(wl, wlvif);
3767 if (ret < 0)
3768 goto out_sleep;
3769 }
3770
3771 /*
3772 * Rearm the tx watchdog just before idling scan. This
3773 * prevents just-finished scans from triggering the watchdog
3774 */
3775 wl12xx_rearm_tx_watchdog_locked(wl);
3776
3777 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3778 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3779 wl->scan_wlvif = NULL;
3780 wl->scan.req = NULL;
3781 ieee80211_scan_completed(wl->hw, &info);
3782
3783out_sleep:
3784 pm_runtime_mark_last_busy(wl->dev);
3785 pm_runtime_put_autosuspend(wl->dev);
3786out:
3787 mutex_unlock(&wl->mutex);
3788
3789 cancel_delayed_work_sync(&wl->scan_complete_work);
3790}
3791
3792static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3793 struct ieee80211_vif *vif,
3794 struct cfg80211_sched_scan_request *req,
3795 struct ieee80211_scan_ies *ies)
3796{
3797 struct wl1271 *wl = hw->priv;
3798 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3799 int ret;
3800
3801 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3802
3803 mutex_lock(&wl->mutex);
3804
3805 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3806 ret = -EAGAIN;
3807 goto out;
3808 }
3809
3810 ret = pm_runtime_get_sync(wl->dev);
3811 if (ret < 0) {
3812 pm_runtime_put_noidle(wl->dev);
3813 goto out;
3814 }
3815
3816 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3817 if (ret < 0)
3818 goto out_sleep;
3819
3820 wl->sched_vif = wlvif;
3821
3822out_sleep:
3823 pm_runtime_mark_last_busy(wl->dev);
3824 pm_runtime_put_autosuspend(wl->dev);
3825out:
3826 mutex_unlock(&wl->mutex);
3827 return ret;
3828}
3829
3830static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3831 struct ieee80211_vif *vif)
3832{
3833 struct wl1271 *wl = hw->priv;
3834 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3835 int ret;
3836
3837 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3838
3839 mutex_lock(&wl->mutex);
3840
3841 if (unlikely(wl->state != WLCORE_STATE_ON))
3842 goto out;
3843
3844 ret = pm_runtime_get_sync(wl->dev);
3845 if (ret < 0) {
3846 pm_runtime_put_noidle(wl->dev);
3847 goto out;
3848 }
3849
3850 wl->ops->sched_scan_stop(wl, wlvif);
3851
3852 pm_runtime_mark_last_busy(wl->dev);
3853 pm_runtime_put_autosuspend(wl->dev);
3854out:
3855 mutex_unlock(&wl->mutex);
3856
3857 return 0;
3858}
3859
3860static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3861{
3862 struct wl1271 *wl = hw->priv;
3863 int ret = 0;
3864
3865 mutex_lock(&wl->mutex);
3866
3867 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3868 ret = -EAGAIN;
3869 goto out;
3870 }
3871
3872 ret = pm_runtime_get_sync(wl->dev);
3873 if (ret < 0) {
3874 pm_runtime_put_noidle(wl->dev);
3875 goto out;
3876 }
3877
3878 ret = wl1271_acx_frag_threshold(wl, value);
3879 if (ret < 0)
3880 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3881
3882 pm_runtime_mark_last_busy(wl->dev);
3883 pm_runtime_put_autosuspend(wl->dev);
3884
3885out:
3886 mutex_unlock(&wl->mutex);
3887
3888 return ret;
3889}
3890
3891static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3892{
3893 struct wl1271 *wl = hw->priv;
3894 struct wl12xx_vif *wlvif;
3895 int ret = 0;
3896
3897 mutex_lock(&wl->mutex);
3898
3899 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3900 ret = -EAGAIN;
3901 goto out;
3902 }
3903
3904 ret = pm_runtime_get_sync(wl->dev);
3905 if (ret < 0) {
3906 pm_runtime_put_noidle(wl->dev);
3907 goto out;
3908 }
3909
3910 wl12xx_for_each_wlvif(wl, wlvif) {
3911 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3912 if (ret < 0)
3913 wl1271_warning("set rts threshold failed: %d", ret);
3914 }
3915 pm_runtime_mark_last_busy(wl->dev);
3916 pm_runtime_put_autosuspend(wl->dev);
3917
3918out:
3919 mutex_unlock(&wl->mutex);
3920
3921 return ret;
3922}
3923
3924static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3925{
3926 int len;
3927 const u8 *next, *end = skb->data + skb->len;
3928 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3929 skb->len - ieoffset);
3930 if (!ie)
3931 return;
3932 len = ie[1] + 2;
3933 next = ie + len;
3934 memmove(ie, next, end - next);
3935 skb_trim(skb, skb->len - len);
3936}
3937
3938static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3939 unsigned int oui, u8 oui_type,
3940 int ieoffset)
3941{
3942 int len;
3943 const u8 *next, *end = skb->data + skb->len;
3944 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3945 skb->data + ieoffset,
3946 skb->len - ieoffset);
3947 if (!ie)
3948 return;
3949 len = ie[1] + 2;
3950 next = ie + len;
3951 memmove(ie, next, end - next);
3952 skb_trim(skb, skb->len - len);
3953}
3954
3955static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3956 struct ieee80211_vif *vif)
3957{
3958 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3959 struct sk_buff *skb;
3960 int ret;
3961
3962 skb = ieee80211_proberesp_get(wl->hw, vif);
3963 if (!skb)
3964 return -EOPNOTSUPP;
3965
3966 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3967 CMD_TEMPL_AP_PROBE_RESPONSE,
3968 skb->data,
3969 skb->len, 0,
3970 rates);
3971 dev_kfree_skb(skb);
3972
3973 if (ret < 0)
3974 goto out;
3975
3976 wl1271_debug(DEBUG_AP, "probe response updated");
3977 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3978
3979out:
3980 return ret;
3981}
3982
3983static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3984 struct ieee80211_vif *vif,
3985 u8 *probe_rsp_data,
3986 size_t probe_rsp_len,
3987 u32 rates)
3988{
3989 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3990 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3991 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3992 int ssid_ie_offset, ie_offset, templ_len;
3993 const u8 *ptr;
3994
3995 /* no need to change probe response if the SSID is set correctly */
3996 if (wlvif->ssid_len > 0)
3997 return wl1271_cmd_template_set(wl, wlvif->role_id,
3998 CMD_TEMPL_AP_PROBE_RESPONSE,
3999 probe_rsp_data,
4000 probe_rsp_len, 0,
4001 rates);
4002
4003 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4004 wl1271_error("probe_rsp template too big");
4005 return -EINVAL;
4006 }
4007
4008 /* start searching from IE offset */
4009 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4010
4011 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4012 probe_rsp_len - ie_offset);
4013 if (!ptr) {
4014 wl1271_error("No SSID in beacon!");
4015 return -EINVAL;
4016 }
4017
4018 ssid_ie_offset = ptr - probe_rsp_data;
4019 ptr += (ptr[1] + 2);
4020
4021 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4022
4023 /* insert SSID from bss_conf */
4024 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4025 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4026 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4027 bss_conf->ssid, bss_conf->ssid_len);
4028 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4029
4030 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4031 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4032 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4033
4034 return wl1271_cmd_template_set(wl, wlvif->role_id,
4035 CMD_TEMPL_AP_PROBE_RESPONSE,
4036 probe_rsp_templ,
4037 templ_len, 0,
4038 rates);
4039}
4040
4041static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4042 struct ieee80211_vif *vif,
4043 struct ieee80211_bss_conf *bss_conf,
4044 u32 changed)
4045{
4046 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4047 int ret = 0;
4048
4049 if (changed & BSS_CHANGED_ERP_SLOT) {
4050 if (bss_conf->use_short_slot)
4051 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4052 else
4053 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4054 if (ret < 0) {
4055 wl1271_warning("Set slot time failed %d", ret);
4056 goto out;
4057 }
4058 }
4059
4060 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4061 if (bss_conf->use_short_preamble)
4062 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4063 else
4064 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4065 }
4066
4067 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4068 if (bss_conf->use_cts_prot)
4069 ret = wl1271_acx_cts_protect(wl, wlvif,
4070 CTSPROTECT_ENABLE);
4071 else
4072 ret = wl1271_acx_cts_protect(wl, wlvif,
4073 CTSPROTECT_DISABLE);
4074 if (ret < 0) {
4075 wl1271_warning("Set ctsprotect failed %d", ret);
4076 goto out;
4077 }
4078 }
4079
4080out:
4081 return ret;
4082}
4083
4084static int wlcore_set_beacon_template(struct wl1271 *wl,
4085 struct ieee80211_vif *vif,
4086 bool is_ap)
4087{
4088 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4089 struct ieee80211_hdr *hdr;
4090 u32 min_rate;
4091 int ret;
4092 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4093 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4094 u16 tmpl_id;
4095
4096 if (!beacon) {
4097 ret = -EINVAL;
4098 goto out;
4099 }
4100
4101 wl1271_debug(DEBUG_MASTER, "beacon updated");
4102
4103 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4104 if (ret < 0) {
4105 dev_kfree_skb(beacon);
4106 goto out;
4107 }
4108 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4109 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4110 CMD_TEMPL_BEACON;
4111 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4112 beacon->data,
4113 beacon->len, 0,
4114 min_rate);
4115 if (ret < 0) {
4116 dev_kfree_skb(beacon);
4117 goto out;
4118 }
4119
4120 wlvif->wmm_enabled =
4121 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4122 WLAN_OUI_TYPE_MICROSOFT_WMM,
4123 beacon->data + ieoffset,
4124 beacon->len - ieoffset);
4125
4126 /*
4127 * In case we already have a probe-resp beacon set explicitly
4128 * by usermode, don't use the beacon data.
4129 */
4130 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4131 goto end_bcn;
4132
4133 /* remove TIM ie from probe response */
4134 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4135
4136 /*
4137 * remove p2p ie from probe response.
4138 * the fw reponds to probe requests that don't include
4139 * the p2p ie. probe requests with p2p ie will be passed,
4140 * and will be responded by the supplicant (the spec
4141 * forbids including the p2p ie when responding to probe
4142 * requests that didn't include it).
4143 */
4144 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4145 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4146
4147 hdr = (struct ieee80211_hdr *) beacon->data;
4148 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4149 IEEE80211_STYPE_PROBE_RESP);
4150 if (is_ap)
4151 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4152 beacon->data,
4153 beacon->len,
4154 min_rate);
4155 else
4156 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4157 CMD_TEMPL_PROBE_RESPONSE,
4158 beacon->data,
4159 beacon->len, 0,
4160 min_rate);
4161end_bcn:
4162 dev_kfree_skb(beacon);
4163 if (ret < 0)
4164 goto out;
4165
4166out:
4167 return ret;
4168}
4169
4170static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4171 struct ieee80211_vif *vif,
4172 struct ieee80211_bss_conf *bss_conf,
4173 u32 changed)
4174{
4175 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4176 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4177 int ret = 0;
4178
4179 if (changed & BSS_CHANGED_BEACON_INT) {
4180 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4181 bss_conf->beacon_int);
4182
4183 wlvif->beacon_int = bss_conf->beacon_int;
4184 }
4185
4186 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4187 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4188
4189 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4190 }
4191
4192 if (changed & BSS_CHANGED_BEACON) {
4193 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4194 if (ret < 0)
4195 goto out;
4196
4197 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4198 &wlvif->flags)) {
4199 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4200 if (ret < 0)
4201 goto out;
4202 }
4203 }
4204out:
4205 if (ret != 0)
4206 wl1271_error("beacon info change failed: %d", ret);
4207 return ret;
4208}
4209
4210/* AP mode changes */
4211static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4212 struct ieee80211_vif *vif,
4213 struct ieee80211_bss_conf *bss_conf,
4214 u32 changed)
4215{
4216 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4217 int ret = 0;
4218
4219 if (changed & BSS_CHANGED_BASIC_RATES) {
4220 u32 rates = bss_conf->basic_rates;
4221
4222 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4223 wlvif->band);
4224 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4225 wlvif->basic_rate_set);
4226
4227 ret = wl1271_init_ap_rates(wl, wlvif);
4228 if (ret < 0) {
4229 wl1271_error("AP rate policy change failed %d", ret);
4230 goto out;
4231 }
4232
4233 ret = wl1271_ap_init_templates(wl, vif);
4234 if (ret < 0)
4235 goto out;
4236
4237 /* No need to set probe resp template for mesh */
4238 if (!ieee80211_vif_is_mesh(vif)) {
4239 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4240 wlvif->basic_rate,
4241 vif);
4242 if (ret < 0)
4243 goto out;
4244 }
4245
4246 ret = wlcore_set_beacon_template(wl, vif, true);
4247 if (ret < 0)
4248 goto out;
4249 }
4250
4251 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4252 if (ret < 0)
4253 goto out;
4254
4255 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4256 if (bss_conf->enable_beacon) {
4257 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4258 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4259 if (ret < 0)
4260 goto out;
4261
4262 ret = wl1271_ap_init_hwenc(wl, wlvif);
4263 if (ret < 0)
4264 goto out;
4265
4266 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4267 wl1271_debug(DEBUG_AP, "started AP");
4268 }
4269 } else {
4270 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4271 /*
4272 * AP might be in ROC in case we have just
4273 * sent auth reply. handle it.
4274 */
4275 if (test_bit(wlvif->role_id, wl->roc_map))
4276 wl12xx_croc(wl, wlvif->role_id);
4277
4278 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4279 if (ret < 0)
4280 goto out;
4281
4282 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4283 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4284 &wlvif->flags);
4285 wl1271_debug(DEBUG_AP, "stopped AP");
4286 }
4287 }
4288 }
4289
4290 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4291 if (ret < 0)
4292 goto out;
4293
4294 /* Handle HT information change */
4295 if ((changed & BSS_CHANGED_HT) &&
4296 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4297 ret = wl1271_acx_set_ht_information(wl, wlvif,
4298 bss_conf->ht_operation_mode);
4299 if (ret < 0) {
4300 wl1271_warning("Set ht information failed %d", ret);
4301 goto out;
4302 }
4303 }
4304
4305out:
4306 return;
4307}
4308
4309static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4310 struct ieee80211_bss_conf *bss_conf,
4311 u32 sta_rate_set)
4312{
4313 u32 rates;
4314 int ret;
4315
4316 wl1271_debug(DEBUG_MAC80211,
4317 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4318 bss_conf->bssid, bss_conf->aid,
4319 bss_conf->beacon_int,
4320 bss_conf->basic_rates, sta_rate_set);
4321
4322 wlvif->beacon_int = bss_conf->beacon_int;
4323 rates = bss_conf->basic_rates;
4324 wlvif->basic_rate_set =
4325 wl1271_tx_enabled_rates_get(wl, rates,
4326 wlvif->band);
4327 wlvif->basic_rate =
4328 wl1271_tx_min_rate_get(wl,
4329 wlvif->basic_rate_set);
4330
4331 if (sta_rate_set)
4332 wlvif->rate_set =
4333 wl1271_tx_enabled_rates_get(wl,
4334 sta_rate_set,
4335 wlvif->band);
4336
4337 /* we only support sched_scan while not connected */
4338 if (wl->sched_vif == wlvif)
4339 wl->ops->sched_scan_stop(wl, wlvif);
4340
4341 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4342 if (ret < 0)
4343 return ret;
4344
4345 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4346 if (ret < 0)
4347 return ret;
4348
4349 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4350 if (ret < 0)
4351 return ret;
4352
4353 wlcore_set_ssid(wl, wlvif);
4354
4355 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4356
4357 return 0;
4358}
4359
4360static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4361{
4362 int ret;
4363
4364 /* revert back to minimum rates for the current band */
4365 wl1271_set_band_rate(wl, wlvif);
4366 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4367
4368 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4369 if (ret < 0)
4370 return ret;
4371
4372 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4373 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4374 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4375 if (ret < 0)
4376 return ret;
4377 }
4378
4379 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4380 return 0;
4381}
4382/* STA/IBSS mode changes */
4383static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4384 struct ieee80211_vif *vif,
4385 struct ieee80211_bss_conf *bss_conf,
4386 u32 changed)
4387{
4388 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4389 bool do_join = false;
4390 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4391 bool ibss_joined = false;
4392 u32 sta_rate_set = 0;
4393 int ret;
4394 struct ieee80211_sta *sta;
4395 bool sta_exists = false;
4396 struct ieee80211_sta_ht_cap sta_ht_cap;
4397
4398 if (is_ibss) {
4399 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4400 changed);
4401 if (ret < 0)
4402 goto out;
4403 }
4404
4405 if (changed & BSS_CHANGED_IBSS) {
4406 if (bss_conf->ibss_joined) {
4407 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4408 ibss_joined = true;
4409 } else {
4410 wlcore_unset_assoc(wl, wlvif);
4411 wl12xx_cmd_role_stop_sta(wl, wlvif);
4412 }
4413 }
4414
4415 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4416 do_join = true;
4417
4418 /* Need to update the SSID (for filtering etc) */
4419 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4420 do_join = true;
4421
4422 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4423 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4424 bss_conf->enable_beacon ? "enabled" : "disabled");
4425
4426 do_join = true;
4427 }
4428
4429 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4430 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4431
4432 if (changed & BSS_CHANGED_CQM) {
4433 bool enable = false;
4434 if (bss_conf->cqm_rssi_thold)
4435 enable = true;
4436 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4437 bss_conf->cqm_rssi_thold,
4438 bss_conf->cqm_rssi_hyst);
4439 if (ret < 0)
4440 goto out;
4441 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4442 }
4443
4444 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4445 BSS_CHANGED_ASSOC)) {
4446 rcu_read_lock();
4447 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4448 if (sta) {
4449 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4450
4451 /* save the supp_rates of the ap */
4452 sta_rate_set = sta->supp_rates[wlvif->band];
4453 if (sta->ht_cap.ht_supported)
4454 sta_rate_set |=
4455 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4456 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4457 sta_ht_cap = sta->ht_cap;
4458 sta_exists = true;
4459 }
4460
4461 rcu_read_unlock();
4462 }
4463
4464 if (changed & BSS_CHANGED_BSSID) {
4465 if (!is_zero_ether_addr(bss_conf->bssid)) {
4466 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4467 sta_rate_set);
4468 if (ret < 0)
4469 goto out;
4470
4471 /* Need to update the BSSID (for filtering etc) */
4472 do_join = true;
4473 } else {
4474 ret = wlcore_clear_bssid(wl, wlvif);
4475 if (ret < 0)
4476 goto out;
4477 }
4478 }
4479
4480 if (changed & BSS_CHANGED_IBSS) {
4481 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4482 bss_conf->ibss_joined);
4483
4484 if (bss_conf->ibss_joined) {
4485 u32 rates = bss_conf->basic_rates;
4486 wlvif->basic_rate_set =
4487 wl1271_tx_enabled_rates_get(wl, rates,
4488 wlvif->band);
4489 wlvif->basic_rate =
4490 wl1271_tx_min_rate_get(wl,
4491 wlvif->basic_rate_set);
4492
4493 /* by default, use 11b + OFDM rates */
4494 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4495 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4496 if (ret < 0)
4497 goto out;
4498 }
4499 }
4500
4501 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4502 /* enable beacon filtering */
4503 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4504 if (ret < 0)
4505 goto out;
4506 }
4507
4508 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4509 if (ret < 0)
4510 goto out;
4511
4512 if (do_join) {
4513 ret = wlcore_join(wl, wlvif);
4514 if (ret < 0) {
4515 wl1271_warning("cmd join failed %d", ret);
4516 goto out;
4517 }
4518 }
4519
4520 if (changed & BSS_CHANGED_ASSOC) {
4521 if (bss_conf->assoc) {
4522 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4523 sta_rate_set);
4524 if (ret < 0)
4525 goto out;
4526
4527 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4528 wl12xx_set_authorized(wl, wlvif);
4529 } else {
4530 wlcore_unset_assoc(wl, wlvif);
4531 }
4532 }
4533
4534 if (changed & BSS_CHANGED_PS) {
4535 if ((bss_conf->ps) &&
4536 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4537 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4538 int ps_mode;
4539 char *ps_mode_str;
4540
4541 if (wl->conf.conn.forced_ps) {
4542 ps_mode = STATION_POWER_SAVE_MODE;
4543 ps_mode_str = "forced";
4544 } else {
4545 ps_mode = STATION_AUTO_PS_MODE;
4546 ps_mode_str = "auto";
4547 }
4548
4549 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4550
4551 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4552 if (ret < 0)
4553 wl1271_warning("enter %s ps failed %d",
4554 ps_mode_str, ret);
4555 } else if (!bss_conf->ps &&
4556 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4557 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4558
4559 ret = wl1271_ps_set_mode(wl, wlvif,
4560 STATION_ACTIVE_MODE);
4561 if (ret < 0)
4562 wl1271_warning("exit auto ps failed %d", ret);
4563 }
4564 }
4565
4566 /* Handle new association with HT. Do this after join. */
4567 if (sta_exists) {
4568 bool enabled =
4569 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4570
4571 ret = wlcore_hw_set_peer_cap(wl,
4572 &sta_ht_cap,
4573 enabled,
4574 wlvif->rate_set,
4575 wlvif->sta.hlid);
4576 if (ret < 0) {
4577 wl1271_warning("Set ht cap failed %d", ret);
4578 goto out;
4579
4580 }
4581
4582 if (enabled) {
4583 ret = wl1271_acx_set_ht_information(wl, wlvif,
4584 bss_conf->ht_operation_mode);
4585 if (ret < 0) {
4586 wl1271_warning("Set ht information failed %d",
4587 ret);
4588 goto out;
4589 }
4590 }
4591 }
4592
4593 /* Handle arp filtering. Done after join. */
4594 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4595 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4596 __be32 addr = bss_conf->arp_addr_list[0];
4597 wlvif->sta.qos = bss_conf->qos;
4598 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4599
4600 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4601 wlvif->ip_addr = addr;
4602 /*
4603 * The template should have been configured only upon
4604 * association. however, it seems that the correct ip
4605 * isn't being set (when sending), so we have to
4606 * reconfigure the template upon every ip change.
4607 */
4608 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4609 if (ret < 0) {
4610 wl1271_warning("build arp rsp failed: %d", ret);
4611 goto out;
4612 }
4613
4614 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4615 (ACX_ARP_FILTER_ARP_FILTERING |
4616 ACX_ARP_FILTER_AUTO_ARP),
4617 addr);
4618 } else {
4619 wlvif->ip_addr = 0;
4620 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4621 }
4622
4623 if (ret < 0)
4624 goto out;
4625 }
4626
4627out:
4628 return;
4629}
4630
4631static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4632 struct ieee80211_vif *vif,
4633 struct ieee80211_bss_conf *bss_conf,
4634 u32 changed)
4635{
4636 struct wl1271 *wl = hw->priv;
4637 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4638 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4639 int ret;
4640
4641 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4642 wlvif->role_id, (int)changed);
4643
4644 /*
4645 * make sure to cancel pending disconnections if our association
4646 * state changed
4647 */
4648 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4649 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4650
4651 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4652 !bss_conf->enable_beacon)
4653 wl1271_tx_flush(wl);
4654
4655 mutex_lock(&wl->mutex);
4656
4657 if (unlikely(wl->state != WLCORE_STATE_ON))
4658 goto out;
4659
4660 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4661 goto out;
4662
4663 ret = pm_runtime_get_sync(wl->dev);
4664 if (ret < 0) {
4665 pm_runtime_put_noidle(wl->dev);
4666 goto out;
4667 }
4668
4669 if ((changed & BSS_CHANGED_TXPOWER) &&
4670 bss_conf->txpower != wlvif->power_level) {
4671
4672 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4673 if (ret < 0)
4674 goto out;
4675
4676 wlvif->power_level = bss_conf->txpower;
4677 }
4678
4679 if (is_ap)
4680 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4681 else
4682 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4683
4684 pm_runtime_mark_last_busy(wl->dev);
4685 pm_runtime_put_autosuspend(wl->dev);
4686
4687out:
4688 mutex_unlock(&wl->mutex);
4689}
4690
4691static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4692 struct ieee80211_chanctx_conf *ctx)
4693{
4694 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4695 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4696 cfg80211_get_chandef_type(&ctx->def));
4697 return 0;
4698}
4699
4700static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4701 struct ieee80211_chanctx_conf *ctx)
4702{
4703 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4704 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4705 cfg80211_get_chandef_type(&ctx->def));
4706}
4707
4708static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4709 struct ieee80211_chanctx_conf *ctx,
4710 u32 changed)
4711{
4712 struct wl1271 *wl = hw->priv;
4713 struct wl12xx_vif *wlvif;
4714 int ret;
4715 int channel = ieee80211_frequency_to_channel(
4716 ctx->def.chan->center_freq);
4717
4718 wl1271_debug(DEBUG_MAC80211,
4719 "mac80211 change chanctx %d (type %d) changed 0x%x",
4720 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4721
4722 mutex_lock(&wl->mutex);
4723
4724 ret = pm_runtime_get_sync(wl->dev);
4725 if (ret < 0) {
4726 pm_runtime_put_noidle(wl->dev);
4727 goto out;
4728 }
4729
4730 wl12xx_for_each_wlvif(wl, wlvif) {
4731 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4732
4733 rcu_read_lock();
4734 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4735 rcu_read_unlock();
4736 continue;
4737 }
4738 rcu_read_unlock();
4739
4740 /* start radar if needed */
4741 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4742 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4743 ctx->radar_enabled && !wlvif->radar_enabled &&
4744 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4745 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4746 wlcore_hw_set_cac(wl, wlvif, true);
4747 wlvif->radar_enabled = true;
4748 }
4749 }
4750
4751 pm_runtime_mark_last_busy(wl->dev);
4752 pm_runtime_put_autosuspend(wl->dev);
4753out:
4754 mutex_unlock(&wl->mutex);
4755}
4756
4757static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4758 struct ieee80211_vif *vif,
4759 struct ieee80211_chanctx_conf *ctx)
4760{
4761 struct wl1271 *wl = hw->priv;
4762 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4763 int channel = ieee80211_frequency_to_channel(
4764 ctx->def.chan->center_freq);
4765 int ret = -EINVAL;
4766
4767 wl1271_debug(DEBUG_MAC80211,
4768 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4769 wlvif->role_id, channel,
4770 cfg80211_get_chandef_type(&ctx->def),
4771 ctx->radar_enabled, ctx->def.chan->dfs_state);
4772
4773 mutex_lock(&wl->mutex);
4774
4775 if (unlikely(wl->state != WLCORE_STATE_ON))
4776 goto out;
4777
4778 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4779 goto out;
4780
4781 ret = pm_runtime_get_sync(wl->dev);
4782 if (ret < 0) {
4783 pm_runtime_put_noidle(wl->dev);
4784 goto out;
4785 }
4786
4787 wlvif->band = ctx->def.chan->band;
4788 wlvif->channel = channel;
4789 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4790
4791 /* update default rates according to the band */
4792 wl1271_set_band_rate(wl, wlvif);
4793
4794 if (ctx->radar_enabled &&
4795 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4796 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4797 wlcore_hw_set_cac(wl, wlvif, true);
4798 wlvif->radar_enabled = true;
4799 }
4800
4801 pm_runtime_mark_last_busy(wl->dev);
4802 pm_runtime_put_autosuspend(wl->dev);
4803out:
4804 mutex_unlock(&wl->mutex);
4805
4806 return 0;
4807}
4808
4809static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4810 struct ieee80211_vif *vif,
4811 struct ieee80211_chanctx_conf *ctx)
4812{
4813 struct wl1271 *wl = hw->priv;
4814 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4815 int ret;
4816
4817 wl1271_debug(DEBUG_MAC80211,
4818 "mac80211 unassign chanctx (role %d) %d (type %d)",
4819 wlvif->role_id,
4820 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4821 cfg80211_get_chandef_type(&ctx->def));
4822
4823 wl1271_tx_flush(wl);
4824
4825 mutex_lock(&wl->mutex);
4826
4827 if (unlikely(wl->state != WLCORE_STATE_ON))
4828 goto out;
4829
4830 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4831 goto out;
4832
4833 ret = pm_runtime_get_sync(wl->dev);
4834 if (ret < 0) {
4835 pm_runtime_put_noidle(wl->dev);
4836 goto out;
4837 }
4838
4839 if (wlvif->radar_enabled) {
4840 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4841 wlcore_hw_set_cac(wl, wlvif, false);
4842 wlvif->radar_enabled = false;
4843 }
4844
4845 pm_runtime_mark_last_busy(wl->dev);
4846 pm_runtime_put_autosuspend(wl->dev);
4847out:
4848 mutex_unlock(&wl->mutex);
4849}
4850
4851static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4852 struct wl12xx_vif *wlvif,
4853 struct ieee80211_chanctx_conf *new_ctx)
4854{
4855 int channel = ieee80211_frequency_to_channel(
4856 new_ctx->def.chan->center_freq);
4857
4858 wl1271_debug(DEBUG_MAC80211,
4859 "switch vif (role %d) %d -> %d chan_type: %d",
4860 wlvif->role_id, wlvif->channel, channel,
4861 cfg80211_get_chandef_type(&new_ctx->def));
4862
4863 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4864 return 0;
4865
4866 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4867
4868 if (wlvif->radar_enabled) {
4869 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4870 wlcore_hw_set_cac(wl, wlvif, false);
4871 wlvif->radar_enabled = false;
4872 }
4873
4874 wlvif->band = new_ctx->def.chan->band;
4875 wlvif->channel = channel;
4876 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4877
4878 /* start radar if needed */
4879 if (new_ctx->radar_enabled) {
4880 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4881 wlcore_hw_set_cac(wl, wlvif, true);
4882 wlvif->radar_enabled = true;
4883 }
4884
4885 return 0;
4886}
4887
4888static int
4889wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4890 struct ieee80211_vif_chanctx_switch *vifs,
4891 int n_vifs,
4892 enum ieee80211_chanctx_switch_mode mode)
4893{
4894 struct wl1271 *wl = hw->priv;
4895 int i, ret;
4896
4897 wl1271_debug(DEBUG_MAC80211,
4898 "mac80211 switch chanctx n_vifs %d mode %d",
4899 n_vifs, mode);
4900
4901 mutex_lock(&wl->mutex);
4902
4903 ret = pm_runtime_get_sync(wl->dev);
4904 if (ret < 0) {
4905 pm_runtime_put_noidle(wl->dev);
4906 goto out;
4907 }
4908
4909 for (i = 0; i < n_vifs; i++) {
4910 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4911
4912 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4913 if (ret)
4914 goto out_sleep;
4915 }
4916out_sleep:
4917 pm_runtime_mark_last_busy(wl->dev);
4918 pm_runtime_put_autosuspend(wl->dev);
4919out:
4920 mutex_unlock(&wl->mutex);
4921
4922 return 0;
4923}
4924
4925static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4926 struct ieee80211_vif *vif, u16 queue,
4927 const struct ieee80211_tx_queue_params *params)
4928{
4929 struct wl1271 *wl = hw->priv;
4930 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4931 u8 ps_scheme;
4932 int ret = 0;
4933
4934 if (wlcore_is_p2p_mgmt(wlvif))
4935 return 0;
4936
4937 mutex_lock(&wl->mutex);
4938
4939 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4940
4941 if (params->uapsd)
4942 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4943 else
4944 ps_scheme = CONF_PS_SCHEME_LEGACY;
4945
4946 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4947 goto out;
4948
4949 ret = pm_runtime_get_sync(wl->dev);
4950 if (ret < 0) {
4951 pm_runtime_put_noidle(wl->dev);
4952 goto out;
4953 }
4954
4955 /*
4956 * the txop is confed in units of 32us by the mac80211,
4957 * we need us
4958 */
4959 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4960 params->cw_min, params->cw_max,
4961 params->aifs, params->txop << 5);
4962 if (ret < 0)
4963 goto out_sleep;
4964
4965 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4966 CONF_CHANNEL_TYPE_EDCF,
4967 wl1271_tx_get_queue(queue),
4968 ps_scheme, CONF_ACK_POLICY_LEGACY,
4969 0, 0);
4970
4971out_sleep:
4972 pm_runtime_mark_last_busy(wl->dev);
4973 pm_runtime_put_autosuspend(wl->dev);
4974
4975out:
4976 mutex_unlock(&wl->mutex);
4977
4978 return ret;
4979}
4980
4981static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4982 struct ieee80211_vif *vif)
4983{
4984
4985 struct wl1271 *wl = hw->priv;
4986 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4987 u64 mactime = ULLONG_MAX;
4988 int ret;
4989
4990 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4991
4992 mutex_lock(&wl->mutex);
4993
4994 if (unlikely(wl->state != WLCORE_STATE_ON))
4995 goto out;
4996
4997 ret = pm_runtime_get_sync(wl->dev);
4998 if (ret < 0) {
4999 pm_runtime_put_noidle(wl->dev);
5000 goto out;
5001 }
5002
5003 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5004 if (ret < 0)
5005 goto out_sleep;
5006
5007out_sleep:
5008 pm_runtime_mark_last_busy(wl->dev);
5009 pm_runtime_put_autosuspend(wl->dev);
5010
5011out:
5012 mutex_unlock(&wl->mutex);
5013 return mactime;
5014}
5015
5016static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5017 struct survey_info *survey)
5018{
5019 struct ieee80211_conf *conf = &hw->conf;
5020
5021 if (idx != 0)
5022 return -ENOENT;
5023
5024 survey->channel = conf->chandef.chan;
5025 survey->filled = 0;
5026 return 0;
5027}
5028
5029static int wl1271_allocate_sta(struct wl1271 *wl,
5030 struct wl12xx_vif *wlvif,
5031 struct ieee80211_sta *sta)
5032{
5033 struct wl1271_station *wl_sta;
5034 int ret;
5035
5036
5037 if (wl->active_sta_count >= wl->max_ap_stations) {
5038 wl1271_warning("could not allocate HLID - too much stations");
5039 return -EBUSY;
5040 }
5041
5042 wl_sta = (struct wl1271_station *)sta->drv_priv;
5043 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5044 if (ret < 0) {
5045 wl1271_warning("could not allocate HLID - too many links");
5046 return -EBUSY;
5047 }
5048
5049 /* use the previous security seq, if this is a recovery/resume */
5050 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5051
5052 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5053 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5054 wl->active_sta_count++;
5055 return 0;
5056}
5057
5058void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5059{
5060 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5061 return;
5062
5063 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5064 __clear_bit(hlid, &wl->ap_ps_map);
5065 __clear_bit(hlid, &wl->ap_fw_ps_map);
5066
5067 /*
5068 * save the last used PN in the private part of iee80211_sta,
5069 * in case of recovery/suspend
5070 */
5071 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5072
5073 wl12xx_free_link(wl, wlvif, &hlid);
5074 wl->active_sta_count--;
5075
5076 /*
5077 * rearm the tx watchdog when the last STA is freed - give the FW a
5078 * chance to return STA-buffered packets before complaining.
5079 */
5080 if (wl->active_sta_count == 0)
5081 wl12xx_rearm_tx_watchdog_locked(wl);
5082}
5083
5084static int wl12xx_sta_add(struct wl1271 *wl,
5085 struct wl12xx_vif *wlvif,
5086 struct ieee80211_sta *sta)
5087{
5088 struct wl1271_station *wl_sta;
5089 int ret = 0;
5090 u8 hlid;
5091
5092 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5093
5094 ret = wl1271_allocate_sta(wl, wlvif, sta);
5095 if (ret < 0)
5096 return ret;
5097
5098 wl_sta = (struct wl1271_station *)sta->drv_priv;
5099 hlid = wl_sta->hlid;
5100
5101 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5102 if (ret < 0)
5103 wl1271_free_sta(wl, wlvif, hlid);
5104
5105 return ret;
5106}
5107
5108static int wl12xx_sta_remove(struct wl1271 *wl,
5109 struct wl12xx_vif *wlvif,
5110 struct ieee80211_sta *sta)
5111{
5112 struct wl1271_station *wl_sta;
5113 int ret = 0, id;
5114
5115 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5116
5117 wl_sta = (struct wl1271_station *)sta->drv_priv;
5118 id = wl_sta->hlid;
5119 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5120 return -EINVAL;
5121
5122 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5123 if (ret < 0)
5124 return ret;
5125
5126 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5127 return ret;
5128}
5129
5130static void wlcore_roc_if_possible(struct wl1271 *wl,
5131 struct wl12xx_vif *wlvif)
5132{
5133 if (find_first_bit(wl->roc_map,
5134 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5135 return;
5136
5137 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5138 return;
5139
5140 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5141}
5142
5143/*
5144 * when wl_sta is NULL, we treat this call as if coming from a
5145 * pending auth reply.
5146 * wl->mutex must be taken and the FW must be awake when the call
5147 * takes place.
5148 */
5149void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5150 struct wl1271_station *wl_sta, bool in_conn)
5151{
5152 if (in_conn) {
5153 if (WARN_ON(wl_sta && wl_sta->in_connection))
5154 return;
5155
5156 if (!wlvif->ap_pending_auth_reply &&
5157 !wlvif->inconn_count)
5158 wlcore_roc_if_possible(wl, wlvif);
5159
5160 if (wl_sta) {
5161 wl_sta->in_connection = true;
5162 wlvif->inconn_count++;
5163 } else {
5164 wlvif->ap_pending_auth_reply = true;
5165 }
5166 } else {
5167 if (wl_sta && !wl_sta->in_connection)
5168 return;
5169
5170 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5171 return;
5172
5173 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5174 return;
5175
5176 if (wl_sta) {
5177 wl_sta->in_connection = false;
5178 wlvif->inconn_count--;
5179 } else {
5180 wlvif->ap_pending_auth_reply = false;
5181 }
5182
5183 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5184 test_bit(wlvif->role_id, wl->roc_map))
5185 wl12xx_croc(wl, wlvif->role_id);
5186 }
5187}
5188
5189static int wl12xx_update_sta_state(struct wl1271 *wl,
5190 struct wl12xx_vif *wlvif,
5191 struct ieee80211_sta *sta,
5192 enum ieee80211_sta_state old_state,
5193 enum ieee80211_sta_state new_state)
5194{
5195 struct wl1271_station *wl_sta;
5196 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5197 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5198 int ret;
5199
5200 wl_sta = (struct wl1271_station *)sta->drv_priv;
5201
5202 /* Add station (AP mode) */
5203 if (is_ap &&
5204 old_state == IEEE80211_STA_NOTEXIST &&
5205 new_state == IEEE80211_STA_NONE) {
5206 ret = wl12xx_sta_add(wl, wlvif, sta);
5207 if (ret)
5208 return ret;
5209
5210 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5211 }
5212
5213 /* Remove station (AP mode) */
5214 if (is_ap &&
5215 old_state == IEEE80211_STA_NONE &&
5216 new_state == IEEE80211_STA_NOTEXIST) {
5217 /* must not fail */
5218 wl12xx_sta_remove(wl, wlvif, sta);
5219
5220 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5221 }
5222
5223 /* Authorize station (AP mode) */
5224 if (is_ap &&
5225 new_state == IEEE80211_STA_AUTHORIZED) {
5226 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5227 if (ret < 0)
5228 return ret;
5229
5230 /* reconfigure rates */
5231 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5232 if (ret < 0)
5233 return ret;
5234
5235 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5236 wl_sta->hlid);
5237 if (ret)
5238 return ret;
5239
5240 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5241 }
5242
5243 /* Authorize station */
5244 if (is_sta &&
5245 new_state == IEEE80211_STA_AUTHORIZED) {
5246 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5247 ret = wl12xx_set_authorized(wl, wlvif);
5248 if (ret)
5249 return ret;
5250 }
5251
5252 if (is_sta &&
5253 old_state == IEEE80211_STA_AUTHORIZED &&
5254 new_state == IEEE80211_STA_ASSOC) {
5255 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5256 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5257 }
5258
5259 /* save seq number on disassoc (suspend) */
5260 if (is_sta &&
5261 old_state == IEEE80211_STA_ASSOC &&
5262 new_state == IEEE80211_STA_AUTH) {
5263 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5264 wlvif->total_freed_pkts = 0;
5265 }
5266
5267 /* restore seq number on assoc (resume) */
5268 if (is_sta &&
5269 old_state == IEEE80211_STA_AUTH &&
5270 new_state == IEEE80211_STA_ASSOC) {
5271 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5272 }
5273
5274 /* clear ROCs on failure or authorization */
5275 if (is_sta &&
5276 (new_state == IEEE80211_STA_AUTHORIZED ||
5277 new_state == IEEE80211_STA_NOTEXIST)) {
5278 if (test_bit(wlvif->role_id, wl->roc_map))
5279 wl12xx_croc(wl, wlvif->role_id);
5280 }
5281
5282 if (is_sta &&
5283 old_state == IEEE80211_STA_NOTEXIST &&
5284 new_state == IEEE80211_STA_NONE) {
5285 if (find_first_bit(wl->roc_map,
5286 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5287 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5288 wl12xx_roc(wl, wlvif, wlvif->role_id,
5289 wlvif->band, wlvif->channel);
5290 }
5291 }
5292 return 0;
5293}
5294
5295static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5296 struct ieee80211_vif *vif,
5297 struct ieee80211_sta *sta,
5298 enum ieee80211_sta_state old_state,
5299 enum ieee80211_sta_state new_state)
5300{
5301 struct wl1271 *wl = hw->priv;
5302 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5303 int ret;
5304
5305 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5306 sta->aid, old_state, new_state);
5307
5308 mutex_lock(&wl->mutex);
5309
5310 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5311 ret = -EBUSY;
5312 goto out;
5313 }
5314
5315 ret = pm_runtime_get_sync(wl->dev);
5316 if (ret < 0) {
5317 pm_runtime_put_noidle(wl->dev);
5318 goto out;
5319 }
5320
5321 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5322
5323 pm_runtime_mark_last_busy(wl->dev);
5324 pm_runtime_put_autosuspend(wl->dev);
5325out:
5326 mutex_unlock(&wl->mutex);
5327 if (new_state < old_state)
5328 return 0;
5329 return ret;
5330}
5331
5332static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5333 struct ieee80211_vif *vif,
5334 struct ieee80211_ampdu_params *params)
5335{
5336 struct wl1271 *wl = hw->priv;
5337 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5338 int ret;
5339 u8 hlid, *ba_bitmap;
5340 struct ieee80211_sta *sta = params->sta;
5341 enum ieee80211_ampdu_mlme_action action = params->action;
5342 u16 tid = params->tid;
5343 u16 *ssn = ¶ms->ssn;
5344
5345 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5346 tid);
5347
5348 /* sanity check - the fields in FW are only 8bits wide */
5349 if (WARN_ON(tid > 0xFF))
5350 return -ENOTSUPP;
5351
5352 mutex_lock(&wl->mutex);
5353
5354 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5355 ret = -EAGAIN;
5356 goto out;
5357 }
5358
5359 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5360 hlid = wlvif->sta.hlid;
5361 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5362 struct wl1271_station *wl_sta;
5363
5364 wl_sta = (struct wl1271_station *)sta->drv_priv;
5365 hlid = wl_sta->hlid;
5366 } else {
5367 ret = -EINVAL;
5368 goto out;
5369 }
5370
5371 ba_bitmap = &wl->links[hlid].ba_bitmap;
5372
5373 ret = pm_runtime_get_sync(wl->dev);
5374 if (ret < 0) {
5375 pm_runtime_put_noidle(wl->dev);
5376 goto out;
5377 }
5378
5379 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5380 tid, action);
5381
5382 switch (action) {
5383 case IEEE80211_AMPDU_RX_START:
5384 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5385 ret = -ENOTSUPP;
5386 break;
5387 }
5388
5389 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5390 ret = -EBUSY;
5391 wl1271_error("exceeded max RX BA sessions");
5392 break;
5393 }
5394
5395 if (*ba_bitmap & BIT(tid)) {
5396 ret = -EINVAL;
5397 wl1271_error("cannot enable RX BA session on active "
5398 "tid: %d", tid);
5399 break;
5400 }
5401
5402 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5403 hlid,
5404 params->buf_size);
5405
5406 if (!ret) {
5407 *ba_bitmap |= BIT(tid);
5408 wl->ba_rx_session_count++;
5409 }
5410 break;
5411
5412 case IEEE80211_AMPDU_RX_STOP:
5413 if (!(*ba_bitmap & BIT(tid))) {
5414 /*
5415 * this happens on reconfig - so only output a debug
5416 * message for now, and don't fail the function.
5417 */
5418 wl1271_debug(DEBUG_MAC80211,
5419 "no active RX BA session on tid: %d",
5420 tid);
5421 ret = 0;
5422 break;
5423 }
5424
5425 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5426 hlid, 0);
5427 if (!ret) {
5428 *ba_bitmap &= ~BIT(tid);
5429 wl->ba_rx_session_count--;
5430 }
5431 break;
5432
5433 /*
5434 * The BA initiator session management in FW independently.
5435 * Falling break here on purpose for all TX APDU commands.
5436 */
5437 case IEEE80211_AMPDU_TX_START:
5438 case IEEE80211_AMPDU_TX_STOP_CONT:
5439 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5440 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5441 case IEEE80211_AMPDU_TX_OPERATIONAL:
5442 ret = -EINVAL;
5443 break;
5444
5445 default:
5446 wl1271_error("Incorrect ampdu action id=%x\n", action);
5447 ret = -EINVAL;
5448 }
5449
5450 pm_runtime_mark_last_busy(wl->dev);
5451 pm_runtime_put_autosuspend(wl->dev);
5452
5453out:
5454 mutex_unlock(&wl->mutex);
5455
5456 return ret;
5457}
5458
5459static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5460 struct ieee80211_vif *vif,
5461 const struct cfg80211_bitrate_mask *mask)
5462{
5463 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5464 struct wl1271 *wl = hw->priv;
5465 int i, ret = 0;
5466
5467 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5468 mask->control[NL80211_BAND_2GHZ].legacy,
5469 mask->control[NL80211_BAND_5GHZ].legacy);
5470
5471 mutex_lock(&wl->mutex);
5472
5473 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5474 wlvif->bitrate_masks[i] =
5475 wl1271_tx_enabled_rates_get(wl,
5476 mask->control[i].legacy,
5477 i);
5478
5479 if (unlikely(wl->state != WLCORE_STATE_ON))
5480 goto out;
5481
5482 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5483 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5484
5485 ret = pm_runtime_get_sync(wl->dev);
5486 if (ret < 0) {
5487 pm_runtime_put_noidle(wl->dev);
5488 goto out;
5489 }
5490
5491 wl1271_set_band_rate(wl, wlvif);
5492 wlvif->basic_rate =
5493 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5494 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5495
5496 pm_runtime_mark_last_busy(wl->dev);
5497 pm_runtime_put_autosuspend(wl->dev);
5498 }
5499out:
5500 mutex_unlock(&wl->mutex);
5501
5502 return ret;
5503}
5504
5505static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5506 struct ieee80211_vif *vif,
5507 struct ieee80211_channel_switch *ch_switch)
5508{
5509 struct wl1271 *wl = hw->priv;
5510 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5511 int ret;
5512
5513 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5514
5515 wl1271_tx_flush(wl);
5516
5517 mutex_lock(&wl->mutex);
5518
5519 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5520 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5521 ieee80211_chswitch_done(vif, false);
5522 goto out;
5523 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5524 goto out;
5525 }
5526
5527 ret = pm_runtime_get_sync(wl->dev);
5528 if (ret < 0) {
5529 pm_runtime_put_noidle(wl->dev);
5530 goto out;
5531 }
5532
5533 /* TODO: change mac80211 to pass vif as param */
5534
5535 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5536 unsigned long delay_usec;
5537
5538 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5539 if (ret)
5540 goto out_sleep;
5541
5542 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5543
5544 /* indicate failure 5 seconds after channel switch time */
5545 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5546 ch_switch->count;
5547 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5548 usecs_to_jiffies(delay_usec) +
5549 msecs_to_jiffies(5000));
5550 }
5551
5552out_sleep:
5553 pm_runtime_mark_last_busy(wl->dev);
5554 pm_runtime_put_autosuspend(wl->dev);
5555
5556out:
5557 mutex_unlock(&wl->mutex);
5558}
5559
5560static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5561 struct wl12xx_vif *wlvif,
5562 u8 eid)
5563{
5564 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5565 struct sk_buff *beacon =
5566 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5567
5568 if (!beacon)
5569 return NULL;
5570
5571 return cfg80211_find_ie(eid,
5572 beacon->data + ieoffset,
5573 beacon->len - ieoffset);
5574}
5575
5576static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5577 u8 *csa_count)
5578{
5579 const u8 *ie;
5580 const struct ieee80211_channel_sw_ie *ie_csa;
5581
5582 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5583 if (!ie)
5584 return -EINVAL;
5585
5586 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5587 *csa_count = ie_csa->count;
5588
5589 return 0;
5590}
5591
5592static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5593 struct ieee80211_vif *vif,
5594 struct cfg80211_chan_def *chandef)
5595{
5596 struct wl1271 *wl = hw->priv;
5597 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5598 struct ieee80211_channel_switch ch_switch = {
5599 .block_tx = true,
5600 .chandef = *chandef,
5601 };
5602 int ret;
5603
5604 wl1271_debug(DEBUG_MAC80211,
5605 "mac80211 channel switch beacon (role %d)",
5606 wlvif->role_id);
5607
5608 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5609 if (ret < 0) {
5610 wl1271_error("error getting beacon (for CSA counter)");
5611 return;
5612 }
5613
5614 mutex_lock(&wl->mutex);
5615
5616 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5617 ret = -EBUSY;
5618 goto out;
5619 }
5620
5621 ret = pm_runtime_get_sync(wl->dev);
5622 if (ret < 0) {
5623 pm_runtime_put_noidle(wl->dev);
5624 goto out;
5625 }
5626
5627 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5628 if (ret)
5629 goto out_sleep;
5630
5631 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5632
5633out_sleep:
5634 pm_runtime_mark_last_busy(wl->dev);
5635 pm_runtime_put_autosuspend(wl->dev);
5636out:
5637 mutex_unlock(&wl->mutex);
5638}
5639
5640static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5641 u32 queues, bool drop)
5642{
5643 struct wl1271 *wl = hw->priv;
5644
5645 wl1271_tx_flush(wl);
5646}
5647
5648static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5649 struct ieee80211_vif *vif,
5650 struct ieee80211_channel *chan,
5651 int duration,
5652 enum ieee80211_roc_type type)
5653{
5654 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5655 struct wl1271 *wl = hw->priv;
5656 int channel, active_roc, ret = 0;
5657
5658 channel = ieee80211_frequency_to_channel(chan->center_freq);
5659
5660 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5661 channel, wlvif->role_id);
5662
5663 mutex_lock(&wl->mutex);
5664
5665 if (unlikely(wl->state != WLCORE_STATE_ON))
5666 goto out;
5667
5668 /* return EBUSY if we can't ROC right now */
5669 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5670 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5671 wl1271_warning("active roc on role %d", active_roc);
5672 ret = -EBUSY;
5673 goto out;
5674 }
5675
5676 ret = pm_runtime_get_sync(wl->dev);
5677 if (ret < 0) {
5678 pm_runtime_put_noidle(wl->dev);
5679 goto out;
5680 }
5681
5682 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5683 if (ret < 0)
5684 goto out_sleep;
5685
5686 wl->roc_vif = vif;
5687 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5688 msecs_to_jiffies(duration));
5689out_sleep:
5690 pm_runtime_mark_last_busy(wl->dev);
5691 pm_runtime_put_autosuspend(wl->dev);
5692out:
5693 mutex_unlock(&wl->mutex);
5694 return ret;
5695}
5696
5697static int __wlcore_roc_completed(struct wl1271 *wl)
5698{
5699 struct wl12xx_vif *wlvif;
5700 int ret;
5701
5702 /* already completed */
5703 if (unlikely(!wl->roc_vif))
5704 return 0;
5705
5706 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5707
5708 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5709 return -EBUSY;
5710
5711 ret = wl12xx_stop_dev(wl, wlvif);
5712 if (ret < 0)
5713 return ret;
5714
5715 wl->roc_vif = NULL;
5716
5717 return 0;
5718}
5719
5720static int wlcore_roc_completed(struct wl1271 *wl)
5721{
5722 int ret;
5723
5724 wl1271_debug(DEBUG_MAC80211, "roc complete");
5725
5726 mutex_lock(&wl->mutex);
5727
5728 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5729 ret = -EBUSY;
5730 goto out;
5731 }
5732
5733 ret = pm_runtime_get_sync(wl->dev);
5734 if (ret < 0) {
5735 pm_runtime_put_noidle(wl->dev);
5736 goto out;
5737 }
5738
5739 ret = __wlcore_roc_completed(wl);
5740
5741 pm_runtime_mark_last_busy(wl->dev);
5742 pm_runtime_put_autosuspend(wl->dev);
5743out:
5744 mutex_unlock(&wl->mutex);
5745
5746 return ret;
5747}
5748
5749static void wlcore_roc_complete_work(struct work_struct *work)
5750{
5751 struct delayed_work *dwork;
5752 struct wl1271 *wl;
5753 int ret;
5754
5755 dwork = to_delayed_work(work);
5756 wl = container_of(dwork, struct wl1271, roc_complete_work);
5757
5758 ret = wlcore_roc_completed(wl);
5759 if (!ret)
5760 ieee80211_remain_on_channel_expired(wl->hw);
5761}
5762
5763static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5764{
5765 struct wl1271 *wl = hw->priv;
5766
5767 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5768
5769 /* TODO: per-vif */
5770 wl1271_tx_flush(wl);
5771
5772 /*
5773 * we can't just flush_work here, because it might deadlock
5774 * (as we might get called from the same workqueue)
5775 */
5776 cancel_delayed_work_sync(&wl->roc_complete_work);
5777 wlcore_roc_completed(wl);
5778
5779 return 0;
5780}
5781
5782static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5783 struct ieee80211_vif *vif,
5784 struct ieee80211_sta *sta,
5785 u32 changed)
5786{
5787 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5788
5789 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5790
5791 if (!(changed & IEEE80211_RC_BW_CHANGED))
5792 return;
5793
5794 /* this callback is atomic, so schedule a new work */
5795 wlvif->rc_update_bw = sta->bandwidth;
5796 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5797 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5798}
5799
5800static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5801 struct ieee80211_vif *vif,
5802 struct ieee80211_sta *sta,
5803 struct station_info *sinfo)
5804{
5805 struct wl1271 *wl = hw->priv;
5806 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5807 s8 rssi_dbm;
5808 int ret;
5809
5810 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5811
5812 mutex_lock(&wl->mutex);
5813
5814 if (unlikely(wl->state != WLCORE_STATE_ON))
5815 goto out;
5816
5817 ret = pm_runtime_get_sync(wl->dev);
5818 if (ret < 0) {
5819 pm_runtime_put_noidle(wl->dev);
5820 goto out_sleep;
5821 }
5822
5823 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5824 if (ret < 0)
5825 goto out_sleep;
5826
5827 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5828 sinfo->signal = rssi_dbm;
5829
5830out_sleep:
5831 pm_runtime_mark_last_busy(wl->dev);
5832 pm_runtime_put_autosuspend(wl->dev);
5833
5834out:
5835 mutex_unlock(&wl->mutex);
5836}
5837
5838static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5839 struct ieee80211_sta *sta)
5840{
5841 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5842 struct wl1271 *wl = hw->priv;
5843 u8 hlid = wl_sta->hlid;
5844
5845 /* return in units of Kbps */
5846 return (wl->links[hlid].fw_rate_mbps * 1000);
5847}
5848
5849static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5850{
5851 struct wl1271 *wl = hw->priv;
5852 bool ret = false;
5853
5854 mutex_lock(&wl->mutex);
5855
5856 if (unlikely(wl->state != WLCORE_STATE_ON))
5857 goto out;
5858
5859 /* packets are considered pending if in the TX queue or the FW */
5860 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5861out:
5862 mutex_unlock(&wl->mutex);
5863
5864 return ret;
5865}
5866
5867/* can't be const, mac80211 writes to this */
5868static struct ieee80211_rate wl1271_rates[] = {
5869 { .bitrate = 10,
5870 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5871 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5872 { .bitrate = 20,
5873 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5874 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5875 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5876 { .bitrate = 55,
5877 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5878 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5879 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5880 { .bitrate = 110,
5881 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5882 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5883 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5884 { .bitrate = 60,
5885 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5886 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5887 { .bitrate = 90,
5888 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5889 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5890 { .bitrate = 120,
5891 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5892 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5893 { .bitrate = 180,
5894 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5895 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5896 { .bitrate = 240,
5897 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5898 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5899 { .bitrate = 360,
5900 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5901 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5902 { .bitrate = 480,
5903 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5904 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5905 { .bitrate = 540,
5906 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5907 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5908};
5909
5910/* can't be const, mac80211 writes to this */
5911static struct ieee80211_channel wl1271_channels[] = {
5912 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5913 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5914 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5915 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5917 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5918 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5919 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5920 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5921 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5922 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5923 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5924 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5925 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5926};
5927
5928/* can't be const, mac80211 writes to this */
5929static struct ieee80211_supported_band wl1271_band_2ghz = {
5930 .channels = wl1271_channels,
5931 .n_channels = ARRAY_SIZE(wl1271_channels),
5932 .bitrates = wl1271_rates,
5933 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5934};
5935
5936/* 5 GHz data rates for WL1273 */
5937static struct ieee80211_rate wl1271_rates_5ghz[] = {
5938 { .bitrate = 60,
5939 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5940 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5941 { .bitrate = 90,
5942 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5943 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5944 { .bitrate = 120,
5945 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5946 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5947 { .bitrate = 180,
5948 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5949 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5950 { .bitrate = 240,
5951 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5952 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5953 { .bitrate = 360,
5954 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5955 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5956 { .bitrate = 480,
5957 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5958 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5959 { .bitrate = 540,
5960 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5961 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5962};
5963
5964/* 5 GHz band channels for WL1273 */
5965static struct ieee80211_channel wl1271_channels_5ghz[] = {
5966 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5988 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5989 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5990 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5991 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5992 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5993 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5994 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5995 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5996 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5997};
5998
5999static struct ieee80211_supported_band wl1271_band_5ghz = {
6000 .channels = wl1271_channels_5ghz,
6001 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6002 .bitrates = wl1271_rates_5ghz,
6003 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6004};
6005
6006static const struct ieee80211_ops wl1271_ops = {
6007 .start = wl1271_op_start,
6008 .stop = wlcore_op_stop,
6009 .add_interface = wl1271_op_add_interface,
6010 .remove_interface = wl1271_op_remove_interface,
6011 .change_interface = wl12xx_op_change_interface,
6012#ifdef CONFIG_PM
6013 .suspend = wl1271_op_suspend,
6014 .resume = wl1271_op_resume,
6015#endif
6016 .config = wl1271_op_config,
6017 .prepare_multicast = wl1271_op_prepare_multicast,
6018 .configure_filter = wl1271_op_configure_filter,
6019 .tx = wl1271_op_tx,
6020 .set_key = wlcore_op_set_key,
6021 .hw_scan = wl1271_op_hw_scan,
6022 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6023 .sched_scan_start = wl1271_op_sched_scan_start,
6024 .sched_scan_stop = wl1271_op_sched_scan_stop,
6025 .bss_info_changed = wl1271_op_bss_info_changed,
6026 .set_frag_threshold = wl1271_op_set_frag_threshold,
6027 .set_rts_threshold = wl1271_op_set_rts_threshold,
6028 .conf_tx = wl1271_op_conf_tx,
6029 .get_tsf = wl1271_op_get_tsf,
6030 .get_survey = wl1271_op_get_survey,
6031 .sta_state = wl12xx_op_sta_state,
6032 .ampdu_action = wl1271_op_ampdu_action,
6033 .tx_frames_pending = wl1271_tx_frames_pending,
6034 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6035 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6036 .channel_switch = wl12xx_op_channel_switch,
6037 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6038 .flush = wlcore_op_flush,
6039 .remain_on_channel = wlcore_op_remain_on_channel,
6040 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6041 .add_chanctx = wlcore_op_add_chanctx,
6042 .remove_chanctx = wlcore_op_remove_chanctx,
6043 .change_chanctx = wlcore_op_change_chanctx,
6044 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6045 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6046 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6047 .sta_rc_update = wlcore_op_sta_rc_update,
6048 .sta_statistics = wlcore_op_sta_statistics,
6049 .get_expected_throughput = wlcore_op_get_expected_throughput,
6050 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6051};
6052
6053
6054u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6055{
6056 u8 idx;
6057
6058 BUG_ON(band >= 2);
6059
6060 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6061 wl1271_error("Illegal RX rate from HW: %d", rate);
6062 return 0;
6063 }
6064
6065 idx = wl->band_rate_to_idx[band][rate];
6066 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6067 wl1271_error("Unsupported RX rate from HW: %d", rate);
6068 return 0;
6069 }
6070
6071 return idx;
6072}
6073
6074static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6075{
6076 int i;
6077
6078 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6079 oui, nic);
6080
6081 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6082 wl1271_warning("NIC part of the MAC address wraps around!");
6083
6084 for (i = 0; i < wl->num_mac_addr; i++) {
6085 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6086 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6087 wl->addresses[i].addr[2] = (u8) oui;
6088 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6089 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6090 wl->addresses[i].addr[5] = (u8) nic;
6091 nic++;
6092 }
6093
6094 /* we may be one address short at the most */
6095 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6096
6097 /*
6098 * turn on the LAA bit in the first address and use it as
6099 * the last address.
6100 */
6101 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6102 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6103 memcpy(&wl->addresses[idx], &wl->addresses[0],
6104 sizeof(wl->addresses[0]));
6105 /* LAA bit */
6106 wl->addresses[idx].addr[0] |= BIT(1);
6107 }
6108
6109 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6110 wl->hw->wiphy->addresses = wl->addresses;
6111}
6112
6113static int wl12xx_get_hw_info(struct wl1271 *wl)
6114{
6115 int ret;
6116
6117 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6118 if (ret < 0)
6119 goto out;
6120
6121 wl->fuse_oui_addr = 0;
6122 wl->fuse_nic_addr = 0;
6123
6124 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6125 if (ret < 0)
6126 goto out;
6127
6128 if (wl->ops->get_mac)
6129 ret = wl->ops->get_mac(wl);
6130
6131out:
6132 return ret;
6133}
6134
6135static int wl1271_register_hw(struct wl1271 *wl)
6136{
6137 int ret;
6138 u32 oui_addr = 0, nic_addr = 0;
6139 struct platform_device *pdev = wl->pdev;
6140 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6141
6142 if (wl->mac80211_registered)
6143 return 0;
6144
6145 if (wl->nvs_len >= 12) {
6146 /* NOTE: The wl->nvs->nvs element must be first, in
6147 * order to simplify the casting, we assume it is at
6148 * the beginning of the wl->nvs structure.
6149 */
6150 u8 *nvs_ptr = (u8 *)wl->nvs;
6151
6152 oui_addr =
6153 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6154 nic_addr =
6155 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6156 }
6157
6158 /* if the MAC address is zeroed in the NVS derive from fuse */
6159 if (oui_addr == 0 && nic_addr == 0) {
6160 oui_addr = wl->fuse_oui_addr;
6161 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6162 nic_addr = wl->fuse_nic_addr + 1;
6163 }
6164
6165 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6166 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6167 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6168 wl1271_warning("This default nvs file can be removed from the file system");
6169 } else {
6170 wl1271_warning("Your device performance is not optimized.");
6171 wl1271_warning("Please use the calibrator tool to configure your device.");
6172 }
6173
6174 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6175 wl1271_warning("Fuse mac address is zero. using random mac");
6176 /* Use TI oui and a random nic */
6177 oui_addr = WLCORE_TI_OUI_ADDRESS;
6178 nic_addr = get_random_int();
6179 } else {
6180 oui_addr = wl->fuse_oui_addr;
6181 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6182 nic_addr = wl->fuse_nic_addr + 1;
6183 }
6184 }
6185
6186 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6187
6188 ret = ieee80211_register_hw(wl->hw);
6189 if (ret < 0) {
6190 wl1271_error("unable to register mac80211 hw: %d", ret);
6191 goto out;
6192 }
6193
6194 wl->mac80211_registered = true;
6195
6196 wl1271_debugfs_init(wl);
6197
6198 wl1271_notice("loaded");
6199
6200out:
6201 return ret;
6202}
6203
6204static void wl1271_unregister_hw(struct wl1271 *wl)
6205{
6206 if (wl->plt)
6207 wl1271_plt_stop(wl);
6208
6209 ieee80211_unregister_hw(wl->hw);
6210 wl->mac80211_registered = false;
6211
6212}
6213
6214static int wl1271_init_ieee80211(struct wl1271 *wl)
6215{
6216 int i;
6217 static const u32 cipher_suites[] = {
6218 WLAN_CIPHER_SUITE_WEP40,
6219 WLAN_CIPHER_SUITE_WEP104,
6220 WLAN_CIPHER_SUITE_TKIP,
6221 WLAN_CIPHER_SUITE_CCMP,
6222 WL1271_CIPHER_SUITE_GEM,
6223 };
6224
6225 /* The tx descriptor buffer */
6226 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6227
6228 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6229 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6230
6231 /* unit us */
6232 /* FIXME: find a proper value */
6233 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6234
6235 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6236 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6237 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6238 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6239 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6240 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6241 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6242 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6243 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6244 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6245 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6246 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6247 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6248 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6249
6250 wl->hw->wiphy->cipher_suites = cipher_suites;
6251 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6252
6253 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6254 BIT(NL80211_IFTYPE_AP) |
6255 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6256 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6257#ifdef CONFIG_MAC80211_MESH
6258 BIT(NL80211_IFTYPE_MESH_POINT) |
6259#endif
6260 BIT(NL80211_IFTYPE_P2P_GO);
6261
6262 wl->hw->wiphy->max_scan_ssids = 1;
6263 wl->hw->wiphy->max_sched_scan_ssids = 16;
6264 wl->hw->wiphy->max_match_sets = 16;
6265 /*
6266 * Maximum length of elements in scanning probe request templates
6267 * should be the maximum length possible for a template, without
6268 * the IEEE80211 header of the template
6269 */
6270 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6271 sizeof(struct ieee80211_header);
6272
6273 wl->hw->wiphy->max_sched_scan_reqs = 1;
6274 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6275 sizeof(struct ieee80211_header);
6276
6277 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6278
6279 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6280 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6281 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6282
6283 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6284
6285 /* make sure all our channels fit in the scanned_ch bitmask */
6286 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6287 ARRAY_SIZE(wl1271_channels_5ghz) >
6288 WL1271_MAX_CHANNELS);
6289 /*
6290 * clear channel flags from the previous usage
6291 * and restore max_power & max_antenna_gain values.
6292 */
6293 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6294 wl1271_band_2ghz.channels[i].flags = 0;
6295 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6296 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6297 }
6298
6299 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6300 wl1271_band_5ghz.channels[i].flags = 0;
6301 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6302 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6303 }
6304
6305 /*
6306 * We keep local copies of the band structs because we need to
6307 * modify them on a per-device basis.
6308 */
6309 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6310 sizeof(wl1271_band_2ghz));
6311 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6312 &wl->ht_cap[NL80211_BAND_2GHZ],
6313 sizeof(*wl->ht_cap));
6314 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6315 sizeof(wl1271_band_5ghz));
6316 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6317 &wl->ht_cap[NL80211_BAND_5GHZ],
6318 sizeof(*wl->ht_cap));
6319
6320 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6321 &wl->bands[NL80211_BAND_2GHZ];
6322 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6323 &wl->bands[NL80211_BAND_5GHZ];
6324
6325 /*
6326 * allow 4 queues per mac address we support +
6327 * 1 cab queue per mac + one global offchannel Tx queue
6328 */
6329 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6330
6331 /* the last queue is the offchannel queue */
6332 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6333 wl->hw->max_rates = 1;
6334
6335 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6336
6337 /* the FW answers probe-requests in AP-mode */
6338 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6339 wl->hw->wiphy->probe_resp_offload =
6340 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6341 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6342 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6343
6344 /* allowed interface combinations */
6345 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6346 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6347
6348 /* register vendor commands */
6349 wlcore_set_vendor_commands(wl->hw->wiphy);
6350
6351 SET_IEEE80211_DEV(wl->hw, wl->dev);
6352
6353 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6354 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6355
6356 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6357
6358 return 0;
6359}
6360
6361struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6362 u32 mbox_size)
6363{
6364 struct ieee80211_hw *hw;
6365 struct wl1271 *wl;
6366 int i, j, ret;
6367 unsigned int order;
6368
6369 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6370 if (!hw) {
6371 wl1271_error("could not alloc ieee80211_hw");
6372 ret = -ENOMEM;
6373 goto err_hw_alloc;
6374 }
6375
6376 wl = hw->priv;
6377 memset(wl, 0, sizeof(*wl));
6378
6379 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6380 if (!wl->priv) {
6381 wl1271_error("could not alloc wl priv");
6382 ret = -ENOMEM;
6383 goto err_priv_alloc;
6384 }
6385
6386 INIT_LIST_HEAD(&wl->wlvif_list);
6387
6388 wl->hw = hw;
6389
6390 /*
6391 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6392 * we don't allocate any additional resource here, so that's fine.
6393 */
6394 for (i = 0; i < NUM_TX_QUEUES; i++)
6395 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6396 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6397
6398 skb_queue_head_init(&wl->deferred_rx_queue);
6399 skb_queue_head_init(&wl->deferred_tx_queue);
6400
6401 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6402 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6403 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6404 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6405 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6406 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6407
6408 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6409 if (!wl->freezable_wq) {
6410 ret = -ENOMEM;
6411 goto err_hw;
6412 }
6413
6414 wl->channel = 0;
6415 wl->rx_counter = 0;
6416 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6417 wl->band = NL80211_BAND_2GHZ;
6418 wl->channel_type = NL80211_CHAN_NO_HT;
6419 wl->flags = 0;
6420 wl->sg_enabled = true;
6421 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6422 wl->recovery_count = 0;
6423 wl->hw_pg_ver = -1;
6424 wl->ap_ps_map = 0;
6425 wl->ap_fw_ps_map = 0;
6426 wl->quirks = 0;
6427 wl->system_hlid = WL12XX_SYSTEM_HLID;
6428 wl->active_sta_count = 0;
6429 wl->active_link_count = 0;
6430 wl->fwlog_size = 0;
6431
6432 /* The system link is always allocated */
6433 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6434
6435 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6436 for (i = 0; i < wl->num_tx_desc; i++)
6437 wl->tx_frames[i] = NULL;
6438
6439 spin_lock_init(&wl->wl_lock);
6440
6441 wl->state = WLCORE_STATE_OFF;
6442 wl->fw_type = WL12XX_FW_TYPE_NONE;
6443 mutex_init(&wl->mutex);
6444 mutex_init(&wl->flush_mutex);
6445 init_completion(&wl->nvs_loading_complete);
6446
6447 order = get_order(aggr_buf_size);
6448 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6449 if (!wl->aggr_buf) {
6450 ret = -ENOMEM;
6451 goto err_wq;
6452 }
6453 wl->aggr_buf_size = aggr_buf_size;
6454
6455 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6456 if (!wl->dummy_packet) {
6457 ret = -ENOMEM;
6458 goto err_aggr;
6459 }
6460
6461 /* Allocate one page for the FW log */
6462 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6463 if (!wl->fwlog) {
6464 ret = -ENOMEM;
6465 goto err_dummy_packet;
6466 }
6467
6468 wl->mbox_size = mbox_size;
6469 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6470 if (!wl->mbox) {
6471 ret = -ENOMEM;
6472 goto err_fwlog;
6473 }
6474
6475 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6476 if (!wl->buffer_32) {
6477 ret = -ENOMEM;
6478 goto err_mbox;
6479 }
6480
6481 return hw;
6482
6483err_mbox:
6484 kfree(wl->mbox);
6485
6486err_fwlog:
6487 free_page((unsigned long)wl->fwlog);
6488
6489err_dummy_packet:
6490 dev_kfree_skb(wl->dummy_packet);
6491
6492err_aggr:
6493 free_pages((unsigned long)wl->aggr_buf, order);
6494
6495err_wq:
6496 destroy_workqueue(wl->freezable_wq);
6497
6498err_hw:
6499 wl1271_debugfs_exit(wl);
6500 kfree(wl->priv);
6501
6502err_priv_alloc:
6503 ieee80211_free_hw(hw);
6504
6505err_hw_alloc:
6506
6507 return ERR_PTR(ret);
6508}
6509EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6510
6511int wlcore_free_hw(struct wl1271 *wl)
6512{
6513 /* Unblock any fwlog readers */
6514 mutex_lock(&wl->mutex);
6515 wl->fwlog_size = -1;
6516 mutex_unlock(&wl->mutex);
6517
6518 wlcore_sysfs_free(wl);
6519
6520 kfree(wl->buffer_32);
6521 kfree(wl->mbox);
6522 free_page((unsigned long)wl->fwlog);
6523 dev_kfree_skb(wl->dummy_packet);
6524 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6525
6526 wl1271_debugfs_exit(wl);
6527
6528 vfree(wl->fw);
6529 wl->fw = NULL;
6530 wl->fw_type = WL12XX_FW_TYPE_NONE;
6531 kfree(wl->nvs);
6532 wl->nvs = NULL;
6533
6534 kfree(wl->raw_fw_status);
6535 kfree(wl->fw_status);
6536 kfree(wl->tx_res_if);
6537 destroy_workqueue(wl->freezable_wq);
6538
6539 kfree(wl->priv);
6540 ieee80211_free_hw(wl->hw);
6541
6542 return 0;
6543}
6544EXPORT_SYMBOL_GPL(wlcore_free_hw);
6545
6546#ifdef CONFIG_PM
6547static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6548 .flags = WIPHY_WOWLAN_ANY,
6549 .n_patterns = WL1271_MAX_RX_FILTERS,
6550 .pattern_min_len = 1,
6551 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6552};
6553#endif
6554
6555static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6556{
6557 return IRQ_WAKE_THREAD;
6558}
6559
6560static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6561{
6562 struct wl1271 *wl = context;
6563 struct platform_device *pdev = wl->pdev;
6564 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6565 struct resource *res;
6566
6567 int ret;
6568 irq_handler_t hardirq_fn = NULL;
6569
6570 if (fw) {
6571 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6572 if (!wl->nvs) {
6573 wl1271_error("Could not allocate nvs data");
6574 goto out;
6575 }
6576 wl->nvs_len = fw->size;
6577 } else if (pdev_data->family->nvs_name) {
6578 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6579 pdev_data->family->nvs_name);
6580 wl->nvs = NULL;
6581 wl->nvs_len = 0;
6582 } else {
6583 wl->nvs = NULL;
6584 wl->nvs_len = 0;
6585 }
6586
6587 ret = wl->ops->setup(wl);
6588 if (ret < 0)
6589 goto out_free_nvs;
6590
6591 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6592
6593 /* adjust some runtime configuration parameters */
6594 wlcore_adjust_conf(wl);
6595
6596 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6597 if (!res) {
6598 wl1271_error("Could not get IRQ resource");
6599 goto out_free_nvs;
6600 }
6601
6602 wl->irq = res->start;
6603 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6604 wl->if_ops = pdev_data->if_ops;
6605
6606 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6607 hardirq_fn = wlcore_hardirq;
6608 else
6609 wl->irq_flags |= IRQF_ONESHOT;
6610
6611 ret = wl12xx_set_power_on(wl);
6612 if (ret < 0)
6613 goto out_free_nvs;
6614
6615 ret = wl12xx_get_hw_info(wl);
6616 if (ret < 0) {
6617 wl1271_error("couldn't get hw info");
6618 wl1271_power_off(wl);
6619 goto out_free_nvs;
6620 }
6621
6622 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6623 wl->irq_flags, pdev->name, wl);
6624 if (ret < 0) {
6625 wl1271_error("interrupt configuration failed");
6626 wl1271_power_off(wl);
6627 goto out_free_nvs;
6628 }
6629
6630#ifdef CONFIG_PM
6631 device_init_wakeup(wl->dev, true);
6632
6633 ret = enable_irq_wake(wl->irq);
6634 if (!ret) {
6635 wl->irq_wake_enabled = true;
6636 if (pdev_data->pwr_in_suspend)
6637 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6638 }
6639
6640 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6641 if (res) {
6642 wl->wakeirq = res->start;
6643 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6644 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6645 if (ret)
6646 wl->wakeirq = -ENODEV;
6647 } else {
6648 wl->wakeirq = -ENODEV;
6649 }
6650#endif
6651 disable_irq(wl->irq);
6652 wl1271_power_off(wl);
6653
6654 ret = wl->ops->identify_chip(wl);
6655 if (ret < 0)
6656 goto out_irq;
6657
6658 ret = wl1271_init_ieee80211(wl);
6659 if (ret)
6660 goto out_irq;
6661
6662 ret = wl1271_register_hw(wl);
6663 if (ret)
6664 goto out_irq;
6665
6666 ret = wlcore_sysfs_init(wl);
6667 if (ret)
6668 goto out_unreg;
6669
6670 wl->initialized = true;
6671 goto out;
6672
6673out_unreg:
6674 wl1271_unregister_hw(wl);
6675
6676out_irq:
6677 if (wl->wakeirq >= 0)
6678 dev_pm_clear_wake_irq(wl->dev);
6679 device_init_wakeup(wl->dev, false);
6680 free_irq(wl->irq, wl);
6681
6682out_free_nvs:
6683 kfree(wl->nvs);
6684
6685out:
6686 release_firmware(fw);
6687 complete_all(&wl->nvs_loading_complete);
6688}
6689
6690static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6691{
6692 struct wl1271 *wl = dev_get_drvdata(dev);
6693 struct wl12xx_vif *wlvif;
6694 int error;
6695
6696 /* We do not enter elp sleep in PLT mode */
6697 if (wl->plt)
6698 return 0;
6699
6700 /* Nothing to do if no ELP mode requested */
6701 if (wl->sleep_auth != WL1271_PSM_ELP)
6702 return 0;
6703
6704 wl12xx_for_each_wlvif(wl, wlvif) {
6705 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6706 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6707 return -EBUSY;
6708 }
6709
6710 wl1271_debug(DEBUG_PSM, "chip to elp");
6711 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6712 if (error < 0) {
6713 wl12xx_queue_recovery_work(wl);
6714
6715 return error;
6716 }
6717
6718 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6719
6720 return 0;
6721}
6722
6723static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6724{
6725 struct wl1271 *wl = dev_get_drvdata(dev);
6726 DECLARE_COMPLETION_ONSTACK(compl);
6727 unsigned long flags;
6728 int ret;
6729 unsigned long start_time = jiffies;
6730 bool pending = false;
6731 bool recovery = false;
6732
6733 /* Nothing to do if no ELP mode requested */
6734 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6735 return 0;
6736
6737 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6738
6739 spin_lock_irqsave(&wl->wl_lock, flags);
6740 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6741 pending = true;
6742 else
6743 wl->elp_compl = &compl;
6744 spin_unlock_irqrestore(&wl->wl_lock, flags);
6745
6746 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6747 if (ret < 0) {
6748 recovery = true;
6749 goto err;
6750 }
6751
6752 if (!pending) {
6753 ret = wait_for_completion_timeout(&compl,
6754 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6755 if (ret == 0) {
6756 wl1271_warning("ELP wakeup timeout!");
6757
6758 /* Return no error for runtime PM for recovery */
6759 ret = 0;
6760 recovery = true;
6761 goto err;
6762 }
6763 }
6764
6765 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6766
6767 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6768 jiffies_to_msecs(jiffies - start_time));
6769
6770 return 0;
6771
6772err:
6773 spin_lock_irqsave(&wl->wl_lock, flags);
6774 wl->elp_compl = NULL;
6775 spin_unlock_irqrestore(&wl->wl_lock, flags);
6776
6777 if (recovery) {
6778 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6779 wl12xx_queue_recovery_work(wl);
6780 }
6781
6782 return ret;
6783}
6784
6785static const struct dev_pm_ops wlcore_pm_ops = {
6786 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6787 wlcore_runtime_resume,
6788 NULL)
6789};
6790
6791int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6792{
6793 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6794 const char *nvs_name;
6795 int ret = 0;
6796
6797 if (!wl->ops || !wl->ptable || !pdev_data)
6798 return -EINVAL;
6799
6800 wl->dev = &pdev->dev;
6801 wl->pdev = pdev;
6802 platform_set_drvdata(pdev, wl);
6803
6804 if (pdev_data->family && pdev_data->family->nvs_name) {
6805 nvs_name = pdev_data->family->nvs_name;
6806 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6807 nvs_name, &pdev->dev, GFP_KERNEL,
6808 wl, wlcore_nvs_cb);
6809 if (ret < 0) {
6810 wl1271_error("request_firmware_nowait failed for %s: %d",
6811 nvs_name, ret);
6812 complete_all(&wl->nvs_loading_complete);
6813 }
6814 } else {
6815 wlcore_nvs_cb(NULL, wl);
6816 }
6817
6818 wl->dev->driver->pm = &wlcore_pm_ops;
6819 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6820 pm_runtime_use_autosuspend(wl->dev);
6821 pm_runtime_enable(wl->dev);
6822
6823 return ret;
6824}
6825EXPORT_SYMBOL_GPL(wlcore_probe);
6826
6827int wlcore_remove(struct platform_device *pdev)
6828{
6829 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6830 struct wl1271 *wl = platform_get_drvdata(pdev);
6831 int error;
6832
6833 error = pm_runtime_get_sync(wl->dev);
6834 if (error < 0)
6835 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6836
6837 wl->dev->driver->pm = NULL;
6838
6839 if (pdev_data->family && pdev_data->family->nvs_name)
6840 wait_for_completion(&wl->nvs_loading_complete);
6841 if (!wl->initialized)
6842 return 0;
6843
6844 if (wl->wakeirq >= 0) {
6845 dev_pm_clear_wake_irq(wl->dev);
6846 wl->wakeirq = -ENODEV;
6847 }
6848
6849 device_init_wakeup(wl->dev, false);
6850
6851 if (wl->irq_wake_enabled)
6852 disable_irq_wake(wl->irq);
6853
6854 wl1271_unregister_hw(wl);
6855
6856 pm_runtime_put_sync(wl->dev);
6857 pm_runtime_dont_use_autosuspend(wl->dev);
6858 pm_runtime_disable(wl->dev);
6859
6860 free_irq(wl->irq, wl);
6861 wlcore_free_hw(wl);
6862
6863 return 0;
6864}
6865EXPORT_SYMBOL_GPL(wlcore_remove);
6866
6867u32 wl12xx_debug_level = DEBUG_NONE;
6868EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6869module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6870MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6871
6872module_param_named(fwlog, fwlog_param, charp, 0);
6873MODULE_PARM_DESC(fwlog,
6874 "FW logger options: continuous, dbgpins or disable");
6875
6876module_param(fwlog_mem_blocks, int, 0600);
6877MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6878
6879module_param(bug_on_recovery, int, 0600);
6880MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6881
6882module_param(no_recovery, int, 0600);
6883MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6884
6885MODULE_LICENSE("GPL");
6886MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6887MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");