Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef __MT76_H
18#define __MT76_H
19
20#include <linux/kernel.h>
21#include <linux/io.h>
22#include <linux/spinlock.h>
23#include <linux/skbuff.h>
24#include <linux/leds.h>
25#include <linux/usb.h>
26#include <linux/average.h>
27#include <net/mac80211.h>
28#include "util.h"
29
30#define MT_TX_RING_SIZE 256
31#define MT_MCU_RING_SIZE 32
32#define MT_RX_BUF_SIZE 2048
33
34struct mt76_dev;
35struct mt76_wcid;
36
37struct mt76_reg_pair {
38 u32 reg;
39 u32 value;
40};
41
42enum mt76_bus_type {
43 MT76_BUS_MMIO,
44 MT76_BUS_USB,
45};
46
47struct mt76_bus_ops {
48 u32 (*rr)(struct mt76_dev *dev, u32 offset);
49 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
50 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
51 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data,
52 int len);
53 int (*wr_rp)(struct mt76_dev *dev, u32 base,
54 const struct mt76_reg_pair *rp, int len);
55 int (*rd_rp)(struct mt76_dev *dev, u32 base,
56 struct mt76_reg_pair *rp, int len);
57 enum mt76_bus_type type;
58};
59
60#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
61#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
62
63enum mt76_txq_id {
64 MT_TXQ_VO = IEEE80211_AC_VO,
65 MT_TXQ_VI = IEEE80211_AC_VI,
66 MT_TXQ_BE = IEEE80211_AC_BE,
67 MT_TXQ_BK = IEEE80211_AC_BK,
68 MT_TXQ_PSD,
69 MT_TXQ_MCU,
70 MT_TXQ_BEACON,
71 MT_TXQ_CAB,
72 MT_TXQ_FWDL,
73 __MT_TXQ_MAX
74};
75
76enum mt76_rxq_id {
77 MT_RXQ_MAIN,
78 MT_RXQ_MCU,
79 __MT_RXQ_MAX
80};
81
82struct mt76_queue_buf {
83 dma_addr_t addr;
84 int len;
85};
86
87struct mt76_tx_info {
88 struct mt76_queue_buf buf[32];
89 struct sk_buff *skb;
90 int nbuf;
91 u32 info;
92};
93
94struct mt76_queue_entry {
95 union {
96 void *buf;
97 struct sk_buff *skb;
98 };
99 union {
100 struct mt76_txwi_cache *txwi;
101 struct urb *urb;
102 };
103 enum mt76_txq_id qid;
104 bool schedule;
105 bool done;
106};
107
108struct mt76_queue_regs {
109 u32 desc_base;
110 u32 ring_size;
111 u32 cpu_idx;
112 u32 dma_idx;
113} __packed __aligned(4);
114
115struct mt76_queue {
116 struct mt76_queue_regs __iomem *regs;
117
118 spinlock_t lock;
119 struct mt76_queue_entry *entry;
120 struct mt76_desc *desc;
121
122 u16 first;
123 u16 head;
124 u16 tail;
125 int ndesc;
126 int queued;
127 int buf_size;
128 bool stopped;
129
130 u8 buf_offset;
131 u8 hw_idx;
132
133 dma_addr_t desc_dma;
134 struct sk_buff *rx_head;
135 struct page_frag_cache rx_page;
136};
137
138struct mt76_sw_queue {
139 struct mt76_queue *q;
140
141 struct list_head swq;
142 int swq_queued;
143};
144
145struct mt76_mcu_ops {
146 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
147 int len, bool wait_resp);
148 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
149 const struct mt76_reg_pair *rp, int len);
150 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
151 struct mt76_reg_pair *rp, int len);
152 int (*mcu_restart)(struct mt76_dev *dev);
153};
154
155struct mt76_queue_ops {
156 int (*init)(struct mt76_dev *dev);
157
158 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
159 int idx, int n_desc, int bufsize,
160 u32 ring_base);
161
162 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
163 struct mt76_queue_buf *buf, int nbufs, u32 info,
164 struct sk_buff *skb, void *txwi);
165
166 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
167 struct sk_buff *skb, struct mt76_wcid *wcid,
168 struct ieee80211_sta *sta);
169
170 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
171 struct sk_buff *skb, u32 tx_info);
172
173 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
174 int *len, u32 *info, bool *more);
175
176 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
177
178 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
179 bool flush);
180
181 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
182};
183
184enum mt76_wcid_flags {
185 MT_WCID_FLAG_CHECK_PS,
186 MT_WCID_FLAG_PS,
187};
188
189#define MT76_N_WCIDS 128
190
191DECLARE_EWMA(signal, 10, 8);
192
193#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
194#define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
195#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
196#define MT_WCID_TX_INFO_SET BIT(31)
197
198struct mt76_wcid {
199 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
200
201 struct work_struct aggr_work;
202
203 unsigned long flags;
204
205 struct ewma_signal rssi;
206 int inactive_count;
207
208 u8 idx;
209 u8 hw_key_idx;
210
211 u8 sta:1;
212
213 u8 rx_check_pn;
214 u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
215
216 u32 tx_info;
217 bool sw_iv;
218
219 u8 packet_id;
220};
221
222struct mt76_txq {
223 struct mt76_sw_queue *swq;
224 struct mt76_wcid *wcid;
225
226 struct sk_buff_head retry_q;
227
228 u16 agg_ssn;
229 bool send_bar;
230 bool aggr;
231};
232
233struct mt76_txwi_cache {
234 struct list_head list;
235 dma_addr_t dma_addr;
236
237 struct sk_buff *skb;
238};
239
240struct mt76_rx_tid {
241 struct rcu_head rcu_head;
242
243 struct mt76_dev *dev;
244
245 spinlock_t lock;
246 struct delayed_work reorder_work;
247
248 u16 head;
249 u8 size;
250 u8 nframes;
251
252 u8 started:1, stopped:1, timer_pending:1;
253
254 struct sk_buff *reorder_buf[];
255};
256
257#define MT_TX_CB_DMA_DONE BIT(0)
258#define MT_TX_CB_TXS_DONE BIT(1)
259#define MT_TX_CB_TXS_FAILED BIT(2)
260
261#define MT_PACKET_ID_MASK GENMASK(7, 0)
262#define MT_PACKET_ID_NO_ACK 0
263#define MT_PACKET_ID_NO_SKB 1
264#define MT_PACKET_ID_FIRST 2
265
266#define MT_TX_STATUS_SKB_TIMEOUT HZ
267
268struct mt76_tx_cb {
269 unsigned long jiffies;
270 u8 wcid;
271 u8 pktid;
272 u8 flags;
273};
274
275enum {
276 MT76_STATE_INITIALIZED,
277 MT76_STATE_RUNNING,
278 MT76_STATE_MCU_RUNNING,
279 MT76_SCANNING,
280 MT76_RESET,
281 MT76_OFFCHANNEL,
282 MT76_REMOVED,
283 MT76_READING_STATS,
284};
285
286struct mt76_hw_cap {
287 bool has_2ghz;
288 bool has_5ghz;
289};
290
291#define MT_TXWI_NO_FREE BIT(0)
292
293struct mt76_driver_ops {
294 bool tx_aligned4_skbs;
295 u32 txwi_flags;
296 u16 txwi_size;
297
298 void (*update_survey)(struct mt76_dev *dev);
299
300 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
301 enum mt76_txq_id qid, struct mt76_wcid *wcid,
302 struct ieee80211_sta *sta,
303 struct mt76_tx_info *tx_info);
304
305 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
306 struct mt76_queue_entry *e);
307
308 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
309
310 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
311 struct sk_buff *skb);
312
313 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
314
315 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
316 bool ps);
317
318 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
319 struct ieee80211_sta *sta);
320
321 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
322 struct ieee80211_sta *sta);
323
324 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
325 struct ieee80211_sta *sta);
326};
327
328struct mt76_channel_state {
329 u64 cc_active;
330 u64 cc_busy;
331};
332
333struct mt76_sband {
334 struct ieee80211_supported_band sband;
335 struct mt76_channel_state *chan;
336};
337
338struct mt76_rate_power {
339 union {
340 struct {
341 s8 cck[4];
342 s8 ofdm[8];
343 s8 stbc[10];
344 s8 ht[16];
345 s8 vht[10];
346 };
347 s8 all[48];
348 };
349};
350
351/* addr req mask */
352#define MT_VEND_TYPE_EEPROM BIT(31)
353#define MT_VEND_TYPE_CFG BIT(30)
354#define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
355
356#define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n))
357enum mt_vendor_req {
358 MT_VEND_DEV_MODE = 0x1,
359 MT_VEND_WRITE = 0x2,
360 MT_VEND_MULTI_WRITE = 0x6,
361 MT_VEND_MULTI_READ = 0x7,
362 MT_VEND_READ_EEPROM = 0x9,
363 MT_VEND_WRITE_FCE = 0x42,
364 MT_VEND_WRITE_CFG = 0x46,
365 MT_VEND_READ_CFG = 0x47,
366};
367
368enum mt76u_in_ep {
369 MT_EP_IN_PKT_RX,
370 MT_EP_IN_CMD_RESP,
371 __MT_EP_IN_MAX,
372};
373
374enum mt76u_out_ep {
375 MT_EP_OUT_INBAND_CMD,
376 MT_EP_OUT_AC_BK,
377 MT_EP_OUT_AC_BE,
378 MT_EP_OUT_AC_VI,
379 MT_EP_OUT_AC_VO,
380 MT_EP_OUT_HCCA,
381 __MT_EP_OUT_MAX,
382};
383
384#define MT_SG_MAX_SIZE 8
385#define MT_NUM_TX_ENTRIES 256
386#define MT_NUM_RX_ENTRIES 128
387#define MCU_RESP_URB_SIZE 1024
388struct mt76_usb {
389 struct mutex usb_ctrl_mtx;
390 u8 data[32];
391
392 struct tasklet_struct rx_tasklet;
393 struct delayed_work stat_work;
394
395 u8 out_ep[__MT_EP_OUT_MAX];
396 u16 out_max_packet;
397 u8 in_ep[__MT_EP_IN_MAX];
398 u16 in_max_packet;
399 bool sg_en;
400
401 struct mt76u_mcu {
402 struct mutex mutex;
403 u8 *data;
404 u32 msg_seq;
405
406 /* multiple reads */
407 struct mt76_reg_pair *rp;
408 int rp_len;
409 u32 base;
410 bool burst;
411 } mcu;
412};
413
414struct mt76_mmio {
415 struct mt76e_mcu {
416 struct mutex mutex;
417
418 wait_queue_head_t wait;
419 struct sk_buff_head res_q;
420
421 u32 msg_seq;
422 } mcu;
423 void __iomem *regs;
424 spinlock_t irq_lock;
425 u32 irqmask;
426};
427
428struct mt76_dev {
429 struct ieee80211_hw *hw;
430 struct cfg80211_chan_def chandef;
431 struct ieee80211_channel *main_chan;
432
433 spinlock_t lock;
434 spinlock_t cc_lock;
435
436 struct mutex mutex;
437
438 const struct mt76_bus_ops *bus;
439 const struct mt76_driver_ops *drv;
440 const struct mt76_mcu_ops *mcu_ops;
441 struct device *dev;
442
443 struct net_device napi_dev;
444 spinlock_t rx_lock;
445 struct napi_struct napi[__MT_RXQ_MAX];
446 struct sk_buff_head rx_skb[__MT_RXQ_MAX];
447
448 struct list_head txwi_cache;
449 struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
450 struct mt76_queue q_rx[__MT_RXQ_MAX];
451 const struct mt76_queue_ops *queue_ops;
452 int tx_dma_idx[4];
453
454 struct tasklet_struct tx_tasklet;
455 struct delayed_work mac_work;
456
457 wait_queue_head_t tx_wait;
458 struct sk_buff_head status_list;
459
460 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
461
462 struct mt76_wcid global_wcid;
463 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
464
465 u8 macaddr[ETH_ALEN];
466 u32 rev;
467 unsigned long state;
468
469 u8 antenna_mask;
470 u16 chainmask;
471
472 struct tasklet_struct pre_tbtt_tasklet;
473 int beacon_int;
474 u8 beacon_mask;
475
476 struct mt76_sband sband_2g;
477 struct mt76_sband sband_5g;
478 struct debugfs_blob_wrapper eeprom;
479 struct debugfs_blob_wrapper otp;
480 struct mt76_hw_cap cap;
481
482 struct mt76_rate_power rate_power;
483 int txpower_conf;
484 int txpower_cur;
485
486 u32 debugfs_reg;
487
488 struct led_classdev led_cdev;
489 char led_name[32];
490 bool led_al;
491 u8 led_pin;
492
493 u8 csa_complete;
494
495 u32 rxfilter;
496
497 union {
498 struct mt76_mmio mmio;
499 struct mt76_usb usb;
500 };
501};
502
503enum mt76_phy_type {
504 MT_PHY_TYPE_CCK,
505 MT_PHY_TYPE_OFDM,
506 MT_PHY_TYPE_HT,
507 MT_PHY_TYPE_HT_GF,
508 MT_PHY_TYPE_VHT,
509};
510
511struct mt76_rx_status {
512 struct mt76_wcid *wcid;
513
514 unsigned long reorder_time;
515
516 u8 iv[6];
517
518 u8 aggr:1;
519 u8 tid;
520 u16 seqno;
521
522 u16 freq;
523 u32 flag;
524 u8 enc_flags;
525 u8 encoding:2, bw:3;
526 u8 rate_idx;
527 u8 nss;
528 u8 band;
529 s8 signal;
530 u8 chains;
531 s8 chain_signal[IEEE80211_MAX_CHAINS];
532};
533
534#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
535#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
536#define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__)
537#define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__)
538
539#define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val)
540#define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0)
541
542#define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
543#define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
544#define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
545#define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__)
546#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
547#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
548
549#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
550#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
551#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
552#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
553
554#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
555#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
556
557#define mt76_get_field(_dev, _reg, _field) \
558 FIELD_GET(_field, mt76_rr(dev, _reg))
559
560#define mt76_rmw_field(_dev, _reg, _field, _val) \
561 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
562
563#define __mt76_rmw_field(_dev, _reg, _field, _val) \
564 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
565
566#define mt76_hw(dev) (dev)->mt76.hw
567
568bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
569 int timeout);
570
571#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
572
573bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
574 int timeout);
575
576#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
577
578void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
579
580static inline u16 mt76_chip(struct mt76_dev *dev)
581{
582 return dev->rev >> 16;
583}
584
585static inline u16 mt76_rev(struct mt76_dev *dev)
586{
587 return dev->rev & 0xffff;
588}
589
590#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
591#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
592
593#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
594#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
595#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
596#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
597#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
598#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
599#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
600
601static inline struct mt76_channel_state *
602mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
603{
604 struct mt76_sband *msband;
605 int idx;
606
607 if (c->band == NL80211_BAND_2GHZ)
608 msband = &dev->sband_2g;
609 else
610 msband = &dev->sband_5g;
611
612 idx = c - &msband->sband.channels[0];
613 return &msband->chan[idx];
614}
615
616struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
617 const struct ieee80211_ops *ops,
618 const struct mt76_driver_ops *drv_ops);
619int mt76_register_device(struct mt76_dev *dev, bool vht,
620 struct ieee80211_rate *rates, int n_rates);
621void mt76_unregister_device(struct mt76_dev *dev);
622void mt76_free_device(struct mt76_dev *dev);
623
624struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
625void mt76_seq_puts_array(struct seq_file *file, const char *str,
626 s8 *val, int len);
627
628int mt76_eeprom_init(struct mt76_dev *dev, int len);
629void mt76_eeprom_override(struct mt76_dev *dev);
630
631static inline u8 *
632mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
633{
634 return (u8 *)t - dev->drv->txwi_size;
635}
636
637/* increment with wrap-around */
638static inline int mt76_incr(int val, int size)
639{
640 return (val + 1) & (size - 1);
641}
642
643/* decrement with wrap-around */
644static inline int mt76_decr(int val, int size)
645{
646 return (val - 1) & (size - 1);
647}
648
649u8 mt76_ac_to_hwq(u8 ac);
650
651static inline struct ieee80211_txq *
652mtxq_to_txq(struct mt76_txq *mtxq)
653{
654 void *ptr = mtxq;
655
656 return container_of(ptr, struct ieee80211_txq, drv_priv);
657}
658
659static inline struct ieee80211_sta *
660wcid_to_sta(struct mt76_wcid *wcid)
661{
662 void *ptr = wcid;
663
664 if (!wcid || !wcid->sta)
665 return NULL;
666
667 return container_of(ptr, struct ieee80211_sta, drv_priv);
668}
669
670static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
671{
672 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
673 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
674 return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
675}
676
677static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
678{
679 int len = ieee80211_get_hdrlen_from_skb(skb);
680
681 if (len % 4 == 0)
682 return;
683
684 skb_push(skb, 2);
685 memmove(skb->data, skb->data + 2, len);
686
687 skb->data[len] = 0;
688 skb->data[len + 1] = 0;
689}
690
691void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
692void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
693 struct mt76_wcid *wcid, struct sk_buff *skb);
694void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
695void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
696void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
697void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
698 bool send_bar);
699void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
700void mt76_txq_schedule_all(struct mt76_dev *dev);
701void mt76_release_buffered_frames(struct ieee80211_hw *hw,
702 struct ieee80211_sta *sta,
703 u16 tids, int nframes,
704 enum ieee80211_frame_release_type reason,
705 bool more_data);
706bool mt76_has_tx_pending(struct mt76_dev *dev);
707void mt76_set_channel(struct mt76_dev *dev);
708int mt76_get_survey(struct ieee80211_hw *hw, int idx,
709 struct survey_info *survey);
710void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
711
712int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
713 u16 ssn, u8 size);
714void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
715
716void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
717 struct ieee80211_key_conf *key);
718
719void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
720 __acquires(&dev->status_list.lock);
721void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
722 __releases(&dev->status_list.lock);
723
724int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
725 struct sk_buff *skb);
726struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
727 struct mt76_wcid *wcid, int pktid,
728 struct sk_buff_head *list);
729void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
730 struct sk_buff_head *list);
731void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
732void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
733 bool flush);
734int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
735 struct ieee80211_sta *sta,
736 enum ieee80211_sta_state old_state,
737 enum ieee80211_sta_state new_state);
738void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
739 struct ieee80211_sta *sta);
740
741struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
742
743int mt76_get_min_avg_rssi(struct mt76_dev *dev);
744
745int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
746 int *dbm);
747
748void mt76_csa_check(struct mt76_dev *dev);
749void mt76_csa_finish(struct mt76_dev *dev);
750
751int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
752
753/* internal */
754void mt76_tx_free(struct mt76_dev *dev);
755struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
756void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
757void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
758 struct napi_struct *napi);
759void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
760 struct napi_struct *napi);
761void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
762
763/* usb */
764static inline bool mt76u_urb_error(struct urb *urb)
765{
766 return urb->status &&
767 urb->status != -ECONNRESET &&
768 urb->status != -ESHUTDOWN &&
769 urb->status != -ENOENT;
770}
771
772/* Map hardware queues to usb endpoints */
773static inline u8 q2ep(u8 qid)
774{
775 /* TODO: take management packets to queue 5 */
776 return qid + 1;
777}
778
779static inline int
780mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
781 int timeout)
782{
783 struct usb_device *udev = to_usb_device(dev->dev);
784 struct mt76_usb *usb = &dev->usb;
785 unsigned int pipe;
786
787 if (actual_len)
788 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
789 else
790 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
791
792 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
793}
794
795int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
796 u8 req_type, u16 val, u16 offset,
797 void *buf, size_t len);
798void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
799 const u16 offset, const u32 val);
800int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
801int mt76u_alloc_queues(struct mt76_dev *dev);
802void mt76u_stop_tx(struct mt76_dev *dev);
803void mt76u_stop_rx(struct mt76_dev *dev);
804int mt76u_resume_rx(struct mt76_dev *dev);
805void mt76u_queues_deinit(struct mt76_dev *dev);
806
807struct sk_buff *
808mt76_mcu_msg_alloc(const void *data, int head_len,
809 int data_len, int tail_len);
810void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
811struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
812 unsigned long expires);
813
814void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
815
816#endif