2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/kernel.h>
19 #include <linux/irq.h>
22 #include "mt76x02_mcu.h"
23 #include "mt76x02_trace.h"
25 struct beacon_bc_data {
26 struct mt76x02_dev *dev;
27 struct sk_buff_head q;
28 struct sk_buff *tail[8];
32 mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
34 struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
35 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
36 struct sk_buff *skb = NULL;
38 if (!(dev->beacon_mask & BIT(mvif->idx)))
41 skb = ieee80211_beacon_get(mt76_hw(dev), vif);
45 mt76x02_mac_set_beacon(dev, mvif->idx, skb);
49 mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
51 struct beacon_bc_data *data = priv;
52 struct mt76x02_dev *dev = data->dev;
53 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
54 struct ieee80211_tx_info *info;
57 if (!(dev->beacon_mask & BIT(mvif->idx)))
60 skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
64 info = IEEE80211_SKB_CB(skb);
65 info->control.vif = vif;
66 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
67 mt76_skb_set_moredata(skb, true);
68 __skb_queue_tail(&data->q, skb);
69 data->tail[mvif->idx] = skb;
73 mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
75 u32 timer_val = dev->beacon_int << 4;
80 * Beacon timer drifts by 1us every tick, the timer is configured
81 * in 1/16 TU (64us) units.
83 if (dev->tbtt_count < 63)
87 * The updated beacon interval takes effect after two TBTT, because
88 * at this point the original interval has already been loaded into
89 * the next TBTT_TIMER value
91 if (dev->tbtt_count == 63)
94 mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
95 MT_BEACON_TIME_CFG_INTVAL, timer_val);
97 if (dev->tbtt_count >= 64) {
103 static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
105 struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
106 struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
107 struct beacon_bc_data data = {};
111 mt76x02_resync_beacon_timer(dev);
114 __skb_queue_head_init(&data.q);
116 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
117 IEEE80211_IFACE_ITER_RESUME_ALL,
118 mt76x02_update_beacon_iter, dev);
120 mt76_csa_check(&dev->mt76);
122 if (dev->mt76.csa_complete)
126 nframes = skb_queue_len(&data.q);
127 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
128 IEEE80211_IFACE_ITER_RESUME_ALL,
129 mt76x02_add_buffered_bc, &data);
130 } while (nframes != skb_queue_len(&data.q) &&
131 skb_queue_len(&data.q) < 8);
133 if (!skb_queue_len(&data.q))
136 for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
140 mt76_skb_set_moredata(data.tail[i], false);
143 spin_lock_bh(&q->lock);
144 while ((skb = __skb_dequeue(&data.q)) != NULL) {
145 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
146 struct ieee80211_vif *vif = info->control.vif;
147 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
149 mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
152 spin_unlock_bh(&q->lock);
156 mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
161 q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
165 ret = mt76_queue_alloc(dev, q);
169 mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
175 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
176 int idx, int n_desc, int bufsize)
180 q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
182 q->buf_size = bufsize;
184 ret = mt76_queue_alloc(dev, q);
188 mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
193 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
195 struct mt76x02_tx_status stat;
198 while (kfifo_get(&dev->txstatus_fifo, &stat))
199 mt76x02_send_tx_status(dev, &stat, &update);
202 static void mt76x02_tx_tasklet(unsigned long data)
204 struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
207 mt76x02_process_tx_status_fifo(dev);
209 for (i = MT_TXQ_MCU; i >= 0; i--)
210 mt76_queue_tx_cleanup(dev, i, false);
212 mt76x02_mac_poll_tx_status(dev, false);
213 mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
216 int mt76x02_dma_init(struct mt76x02_dev *dev)
218 struct mt76_txwi_cache __maybe_unused *t;
219 int i, ret, fifo_size;
220 struct mt76_queue *q;
223 BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
224 BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
226 fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
227 status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
231 tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
232 tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
235 kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
237 mt76_dma_attach(&dev->mt76);
239 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
241 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
242 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
249 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
250 MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
254 ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
255 MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
259 ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
260 MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
264 q = &dev->mt76.q_rx[MT_RXQ_MAIN];
265 q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
266 ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
271 return mt76_init_queues(dev);
273 EXPORT_SYMBOL_GPL(mt76x02_dma_init);
275 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
277 struct mt76x02_dev *dev;
279 dev = container_of(mdev, struct mt76x02_dev, mt76);
280 mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
282 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
284 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
286 struct mt76x02_dev *dev = dev_instance;
289 intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
290 mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
292 if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
295 trace_dev_irq(dev, intr, dev->mt76.mmio.irqmask);
297 intr &= dev->mt76.mmio.irqmask;
299 if (intr & MT_INT_TX_DONE_ALL) {
300 mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
301 tasklet_schedule(&dev->tx_tasklet);
304 if (intr & MT_INT_RX_DONE(0)) {
305 mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
306 napi_schedule(&dev->mt76.napi[0]);
309 if (intr & MT_INT_RX_DONE(1)) {
310 mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
311 napi_schedule(&dev->mt76.napi[1]);
314 if (intr & MT_INT_PRE_TBTT)
315 tasklet_schedule(&dev->pre_tbtt_tasklet);
317 /* send buffered multicast frames now */
318 if (intr & MT_INT_TBTT) {
319 if (dev->mt76.csa_complete)
320 mt76_csa_finish(&dev->mt76);
322 mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
325 if (intr & MT_INT_TX_STAT) {
326 mt76x02_mac_poll_tx_status(dev, true);
327 tasklet_schedule(&dev->tx_tasklet);
330 if (intr & MT_INT_GPTIMER) {
331 mt76x02_irq_disable(dev, MT_INT_GPTIMER);
332 tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
337 EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
339 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
343 spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
344 dev->mt76.mmio.irqmask &= ~clear;
345 dev->mt76.mmio.irqmask |= set;
346 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
347 spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
349 EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
351 static void mt76x02_dma_enable(struct mt76x02_dev *dev)
355 mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
356 mt76x02_wait_for_wpdma(&dev->mt76, 1000);
357 usleep_range(50, 100);
359 val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
360 MT_WPDMA_GLO_CFG_TX_DMA_EN |
361 MT_WPDMA_GLO_CFG_RX_DMA_EN;
362 mt76_set(dev, MT_WPDMA_GLO_CFG, val);
363 mt76_clear(dev, MT_WPDMA_GLO_CFG,
364 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
367 void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
369 tasklet_kill(&dev->tx_tasklet);
370 mt76_dma_cleanup(&dev->mt76);
372 EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
374 void mt76x02_dma_disable(struct mt76x02_dev *dev)
376 u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
378 val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
379 MT_WPDMA_GLO_CFG_BIG_ENDIAN |
380 MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
381 val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
382 mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
384 EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
386 void mt76x02_mac_start(struct mt76x02_dev *dev)
388 mt76x02_dma_enable(dev);
389 mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
390 mt76_wr(dev, MT_MAC_SYS_CTRL,
391 MT_MAC_SYS_CTRL_ENABLE_TX |
392 MT_MAC_SYS_CTRL_ENABLE_RX);
393 mt76x02_irq_enable(dev,
394 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
397 EXPORT_SYMBOL_GPL(mt76x02_mac_start);
399 static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
401 u32 dma_idx, prev_dma_idx;
402 struct mt76_queue *q;
405 for (i = 0; i < 4; i++) {
406 q = &dev->mt76.q_tx[i];
411 prev_dma_idx = dev->mt76.tx_dma_idx[i];
412 dma_idx = ioread32(&q->regs->dma_idx);
413 dev->mt76.tx_dma_idx[i] = dma_idx;
415 if (prev_dma_idx == dma_idx)
422 static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
423 struct ieee80211_sta *sta,
424 struct ieee80211_key_conf *key, void *data)
426 struct mt76x02_dev *dev = hw->priv;
427 struct mt76_wcid *wcid;
432 wcid = (struct mt76_wcid *) sta->drv_priv;
434 if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
437 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
440 static void mt76x02_reset_state(struct mt76x02_dev *dev)
444 lockdep_assert_held(&dev->mt76.mutex);
446 clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
449 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
452 for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
453 struct ieee80211_sta *sta;
454 struct ieee80211_vif *vif;
455 struct mt76x02_sta *msta;
456 struct mt76_wcid *wcid;
459 wcid = rcu_dereference_protected(dev->mt76.wcid[i],
460 lockdep_is_held(&dev->mt76.mutex));
464 priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
465 sta = container_of(priv, struct ieee80211_sta, drv_priv);
468 vif = container_of(priv, struct ieee80211_vif, drv_priv);
470 __mt76_sta_remove(&dev->mt76, vif, sta);
471 memset(msta, 0, sizeof(*msta));
475 dev->beacon_mask = 0;
478 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
480 u32 mask = dev->mt76.mmio.irqmask;
481 bool restart = dev->mt76.mcu_ops->mcu_restart;
484 ieee80211_stop_queues(dev->mt76.hw);
485 set_bit(MT76_RESET, &dev->mt76.state);
487 tasklet_disable(&dev->pre_tbtt_tasklet);
488 tasklet_disable(&dev->tx_tasklet);
490 for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
491 napi_disable(&dev->mt76.napi[i]);
493 mutex_lock(&dev->mt76.mutex);
496 mt76x02_reset_state(dev);
498 if (dev->beacon_mask)
499 mt76_clear(dev, MT_BEACON_TIME_CFG,
500 MT_BEACON_TIME_CFG_BEACON_TX |
501 MT_BEACON_TIME_CFG_TBTT_EN);
503 mt76x02_irq_disable(dev, mask);
505 /* perform device reset */
506 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
507 mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
508 mt76_clear(dev, MT_WPDMA_GLO_CFG,
509 MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
510 usleep_range(5000, 10000);
511 mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
513 /* let fw reset DMA */
514 mt76_set(dev, 0x734, 0x3);
517 dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
519 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
520 mt76_queue_tx_cleanup(dev, i, true);
522 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
523 mt76_queue_rx_reset(dev, i);
525 mt76x02_mac_start(dev);
528 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
530 if (dev->beacon_mask && !restart)
531 mt76_set(dev, MT_BEACON_TIME_CFG,
532 MT_BEACON_TIME_CFG_BEACON_TX |
533 MT_BEACON_TIME_CFG_TBTT_EN);
535 mt76x02_irq_enable(dev, mask);
537 mutex_unlock(&dev->mt76.mutex);
539 clear_bit(MT76_RESET, &dev->mt76.state);
541 tasklet_enable(&dev->tx_tasklet);
542 tasklet_schedule(&dev->tx_tasklet);
544 tasklet_enable(&dev->pre_tbtt_tasklet);
546 for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
547 napi_enable(&dev->mt76.napi[i]);
548 napi_schedule(&dev->mt76.napi[i]);
552 mt76x02_mcu_function_select(dev, Q_SELECT, 1);
553 ieee80211_restart_hw(dev->mt76.hw);
555 ieee80211_wake_queues(dev->mt76.hw);
556 mt76_txq_schedule_all(&dev->mt76);
560 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
562 if (mt76x02_tx_hang(dev)) {
563 if (++dev->tx_hang_check >= MT_TX_HANG_TH)
566 dev->tx_hang_check = 0;
569 if (dev->mcu_timeout)
575 mt76x02_watchdog_reset(dev);
577 mutex_lock(&dev->mt76.mmio.mcu.mutex);
578 dev->mcu_timeout = 0;
579 mutex_unlock(&dev->mt76.mmio.mcu.mutex);
581 dev->tx_hang_reset++;
582 dev->tx_hang_check = 0;
583 memset(dev->mt76.tx_dma_idx, 0xff,
584 sizeof(dev->mt76.tx_dma_idx));
587 void mt76x02_wdt_work(struct work_struct *work)
589 struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
592 mt76x02_check_tx_hang(dev);
594 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,