1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/timekeeping.h>
11 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
13 static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
14 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
16 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
17 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
18 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
19 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
20 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
21 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
22 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
23 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
28 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
30 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
31 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
32 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
33 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
34 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 static const struct mt7915_dfs_radar_spec jp_radar_specs = {
39 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
41 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
42 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
43 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
44 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
45 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
46 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
47 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
48 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
53 u16 idx, bool unicast)
55 struct mt7915_sta *sta;
56 struct mt76_wcid *wcid;
58 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
61 wcid = rcu_dereference(dev->mt76.wcid[idx]);
68 sta = container_of(wcid, struct mt7915_sta, wcid);
72 return &sta->vif->sta.wcid;
75 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
79 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
81 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
82 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
84 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
88 u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw)
90 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
91 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
93 return MT_WTBL_LMAC_OFFS(wcid, dw);
96 static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
98 static const u8 ac_to_tid[] = {
99 [IEEE80211_AC_BE] = 0,
100 [IEEE80211_AC_BK] = 1,
101 [IEEE80211_AC_VI] = 4,
102 [IEEE80211_AC_VO] = 6
104 struct ieee80211_sta *sta;
105 struct mt7915_sta *msta;
106 struct rate_info *rate;
107 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
108 LIST_HEAD(sta_poll_list);
111 spin_lock_bh(&dev->sta_poll_lock);
112 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
113 spin_unlock_bh(&dev->sta_poll_lock);
123 spin_lock_bh(&dev->sta_poll_lock);
124 if (list_empty(&sta_poll_list)) {
125 spin_unlock_bh(&dev->sta_poll_lock);
128 msta = list_first_entry(&sta_poll_list,
129 struct mt7915_sta, poll_list);
130 list_del_init(&msta->poll_list);
131 spin_unlock_bh(&dev->sta_poll_lock);
133 idx = msta->wcid.idx;
134 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
136 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
137 u32 tx_last = msta->airtime_ac[i];
138 u32 rx_last = msta->airtime_ac[i + 4];
140 msta->airtime_ac[i] = mt76_rr(dev, addr);
141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
143 tx_time[i] = msta->airtime_ac[i] - tx_last;
144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
146 if ((tx_last | rx_last) & BIT(30))
153 mt7915_mac_wtbl_update(dev, idx,
154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
161 sta = container_of((void *)msta, struct ieee80211_sta,
163 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
164 u8 q = mt76_connac_lmac_mapping(i);
165 u32 tx_cur = tx_time[q];
166 u32 rx_cur = rx_time[q];
167 u8 tid = ac_to_tid[i];
169 if (!tx_cur && !rx_cur)
172 ieee80211_sta_register_airtime(sta, tid, tx_cur,
177 * We don't support reading GI info from txs packets.
178 * For accurate tx status reporting and AQL improvement,
179 * we need to make sure that flags match so polling GI
180 * from per-sta counters directly.
182 rate = &msta->wcid.rate;
183 addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7);
184 val = mt76_rr(dev, addr);
187 case RATE_INFO_BW_160:
188 bw = IEEE80211_STA_RX_BW_160;
190 case RATE_INFO_BW_80:
191 bw = IEEE80211_STA_RX_BW_80;
193 case RATE_INFO_BW_40:
194 bw = IEEE80211_STA_RX_BW_40;
197 bw = IEEE80211_STA_RX_BW_20;
201 if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
202 u8 offs = 24 + 2 * bw;
204 rate->he_gi = (val & (0x3 << offs)) >> offs;
205 } else if (rate->flags &
206 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
207 if (val & BIT(12 + bw))
208 rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
210 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
218 mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
220 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
221 struct mt76_phy *mphy = &dev->mt76.phy;
222 struct mt7915_phy *phy = &dev->phy;
223 struct ieee80211_supported_band *sband;
224 __le32 *rxd = (__le32 *)skb->data;
226 u32 rxd0 = le32_to_cpu(rxd[0]);
227 u32 rxd1 = le32_to_cpu(rxd[1]);
228 u32 rxd2 = le32_to_cpu(rxd[2]);
229 u32 rxd3 = le32_to_cpu(rxd[3]);
230 u32 rxd4 = le32_to_cpu(rxd[4]);
231 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
232 bool unicast, insert_ccmp_hdr = false;
233 u8 remove_pad, amsdu_info;
234 u8 mode = 0, qos_ctl = 0;
235 struct mt7915_sta *msta = NULL;
242 memset(status, 0, sizeof(*status));
244 if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->band_idx) {
245 mphy = dev->mt76.phys[MT_BAND1];
253 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
256 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
259 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
260 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
263 /* ICV error or CCMP/BIP/WPI MIC error */
264 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
265 status->flag |= RX_FLAG_ONLY_MONITOR;
267 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
268 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
269 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
272 msta = container_of(status->wcid, struct mt7915_sta, wcid);
273 spin_lock_bh(&dev->sta_poll_lock);
274 if (list_empty(&msta->poll_list))
275 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
276 spin_unlock_bh(&dev->sta_poll_lock);
279 status->freq = mphy->chandef.chan->center_freq;
280 status->band = mphy->chandef.chan->band;
281 if (status->band == NL80211_BAND_5GHZ)
282 sband = &mphy->sband_5g.sband;
283 else if (status->band == NL80211_BAND_6GHZ)
284 sband = &mphy->sband_6g.sband;
286 sband = &mphy->sband_2g.sband;
288 if (!sband->channels)
291 if ((rxd0 & csum_mask) == csum_mask)
292 skb->ip_summed = CHECKSUM_UNNECESSARY;
294 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
295 status->flag |= RX_FLAG_FAILED_FCS_CRC;
297 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
298 status->flag |= RX_FLAG_MMIC_ERROR;
300 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
301 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
302 status->flag |= RX_FLAG_DECRYPTED;
303 status->flag |= RX_FLAG_IV_STRIPPED;
304 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
307 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
309 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
313 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
314 u32 v0 = le32_to_cpu(rxd[0]);
315 u32 v2 = le32_to_cpu(rxd[2]);
317 fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
318 qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
319 seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
322 if ((u8 *)rxd - skb->data >= skb->len)
326 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
327 u8 *data = (u8 *)rxd;
329 if (status->flag & RX_FLAG_DECRYPTED) {
330 switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
331 case MT_CIPHER_AES_CCMP:
332 case MT_CIPHER_CCMP_CCX:
333 case MT_CIPHER_CCMP_256:
335 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
338 case MT_CIPHER_TKIP_NO_MIC:
340 case MT_CIPHER_GCMP_256:
341 status->iv[0] = data[5];
342 status->iv[1] = data[4];
343 status->iv[2] = data[3];
344 status->iv[3] = data[2];
345 status->iv[4] = data[1];
346 status->iv[5] = data[0];
353 if ((u8 *)rxd - skb->data >= skb->len)
357 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
358 status->timestamp = le32_to_cpu(rxd[0]);
359 status->flag |= RX_FLAG_MACTIME_START;
361 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
362 status->flag |= RX_FLAG_AMPDU_DETAILS;
364 /* all subframes of an A-MPDU have the same timestamp */
365 if (phy->rx_ampdu_ts != status->timestamp) {
366 if (!++phy->ampdu_ref)
369 phy->rx_ampdu_ts = status->timestamp;
371 status->ampdu_ref = phy->ampdu_ref;
375 if ((u8 *)rxd - skb->data >= skb->len)
379 /* RXD Group 3 - P-RXV */
380 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
386 if ((u8 *)rxd - skb->data >= skb->len)
389 v0 = le32_to_cpu(rxv[0]);
390 v1 = le32_to_cpu(rxv[1]);
392 if (v0 & MT_PRXV_HT_AD_CODE)
393 status->enc_flags |= RX_ENC_FLAG_LDPC;
395 status->chains = mphy->antenna_mask;
396 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
397 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
398 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
399 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
401 /* RXD Group 5 - C-RXV */
402 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
404 if ((u8 *)rxd - skb->data >= skb->len)
408 if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
409 ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
416 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
417 status->amsdu = !!amsdu_info;
419 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
420 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
423 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
424 if (hdr_trans && ieee80211_has_morefrags(fc)) {
425 struct ieee80211_vif *vif;
428 if (!msta || !msta->vif)
431 vif = container_of((void *)msta->vif, struct ieee80211_vif,
433 err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
441 skb_pull(skb, hdr_gap);
442 if (!hdr_trans && status->amsdu) {
443 pad_start = ieee80211_get_hdrlen_from_skb(skb);
444 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
446 * When header translation failure is indicated,
447 * the hardware will insert an extra 2-byte field
448 * containing the data length after the protocol
452 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
455 if (get_unaligned_be16(skb->data + pad_start) !=
456 skb->len - pad_start - 2)
461 memmove(skb->data + 2, skb->data, pad_start);
467 struct ieee80211_hdr *hdr;
469 if (insert_ccmp_hdr) {
470 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
472 mt76_insert_ccmp_hdr(skb, key_id);
475 hdr = mt76_skb_get_hdr(skb);
476 fc = hdr->frame_control;
477 if (ieee80211_is_data_qos(fc)) {
478 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
479 qos_ctl = *ieee80211_get_qos_ctl(hdr);
482 status->flag |= RX_FLAG_8023;
485 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
486 mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
488 if (!status->wcid || !ieee80211_is_data_qos(fc))
491 status->aggr = unicast &&
492 !ieee80211_is_qos_nullfunc(fc);
493 status->qos_ctl = qos_ctl;
494 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
500 mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
502 #ifdef CONFIG_NL80211_TESTMODE
503 struct mt7915_phy *phy = &dev->phy;
504 __le32 *rxd = (__le32 *)skb->data;
505 __le32 *rxv_hdr = rxd + 2;
506 __le32 *rxv = rxd + 4;
507 u32 rcpi, ib_rssi, wb_rssi, v20, v21;
513 band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
514 if (band_idx && !phy->band_idx) {
515 phy = mt7915_ext_phy(dev);
520 rcpi = le32_to_cpu(rxv[6]);
521 ib_rssi = le32_to_cpu(rxv[7]);
522 wb_rssi = le32_to_cpu(rxv[8]) >> 5;
524 for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
526 wb_rssi = le32_to_cpu(rxv[9]);
528 phy->test.last_rcpi[i] = rcpi & 0xff;
529 phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
530 phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
533 v20 = le32_to_cpu(rxv[20]);
534 v21 = le32_to_cpu(rxv[21]);
536 foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
537 (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
539 snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
541 phy->test.last_freq_offset = foe;
542 phy->test.last_snr = snr;
549 mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
552 #ifdef CONFIG_NL80211_TESTMODE
553 struct mt76_testmode_data *td = &phy->mt76->test;
554 const struct ieee80211_rate *r;
555 u8 bw, mode, nss = td->tx_rate_nss;
556 u8 rate_idx = td->tx_rate_idx;
562 if (skb != phy->mt76->test.tx_skb)
565 switch (td->tx_rate_mode) {
566 case MT76_TM_TX_MODE_HT:
567 nss = 1 + (rate_idx >> 3);
568 mode = MT_PHY_TYPE_HT;
570 case MT76_TM_TX_MODE_VHT:
571 mode = MT_PHY_TYPE_VHT;
573 case MT76_TM_TX_MODE_HE_SU:
574 mode = MT_PHY_TYPE_HE_SU;
576 case MT76_TM_TX_MODE_HE_EXT_SU:
577 mode = MT_PHY_TYPE_HE_EXT_SU;
579 case MT76_TM_TX_MODE_HE_TB:
580 mode = MT_PHY_TYPE_HE_TB;
582 case MT76_TM_TX_MODE_HE_MU:
583 mode = MT_PHY_TYPE_HE_MU;
585 case MT76_TM_TX_MODE_CCK:
588 case MT76_TM_TX_MODE_OFDM:
589 band = phy->mt76->chandef.chan->band;
590 if (band == NL80211_BAND_2GHZ && !cck)
593 r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
594 val = cck ? r->hw_value_short : r->hw_value;
597 rate_idx = val & 0xff;
600 mode = MT_PHY_TYPE_OFDM;
604 switch (phy->mt76->chandef.width) {
605 case NL80211_CHAN_WIDTH_40:
608 case NL80211_CHAN_WIDTH_80:
611 case NL80211_CHAN_WIDTH_80P80:
612 case NL80211_CHAN_WIDTH_160:
620 if (td->tx_rate_stbc && nss == 1) {
622 rateval |= MT_TX_RATE_STBC;
625 rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
626 FIELD_PREP(MT_TX_RATE_MODE, mode) |
627 FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
629 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
631 le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
632 if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
633 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
635 val = MT_TXD6_FIXED_BW |
636 FIELD_PREP(MT_TXD6_BW, bw) |
637 FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
638 FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
640 /* for HE_SU/HE_EXT_SU PPDU
641 * - 1x, 2x, 4x LTF + 0.8us GI
642 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
644 * - 2x, 4x LTF + 0.8us GI
645 * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
647 * - 1x, 2x LTF + 1.6us GI
648 * - 4x LTF + 3.2us GI
650 if (mode >= MT_PHY_TYPE_HE_SU)
651 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
653 if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
656 txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
657 txwi[6] |= cpu_to_le32(val);
658 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
663 void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
664 struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
665 struct ieee80211_key_conf *key,
666 enum mt76_txq_id qid, u32 changed)
668 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
669 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
670 struct mt76_phy *mphy = &dev->phy;
672 if (phy_idx && dev->phys[MT_BAND1])
673 mphy = dev->phys[MT_BAND1];
675 mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
677 if (mt76_testmode_enabled(mphy))
678 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
681 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
682 enum mt76_txq_id qid, struct mt76_wcid *wcid,
683 struct ieee80211_sta *sta,
684 struct mt76_tx_info *tx_info)
686 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
687 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
688 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
689 struct ieee80211_key_conf *key = info->control.hw_key;
690 struct ieee80211_vif *vif = info->control.vif;
691 struct mt76_connac_fw_txp *txp;
692 struct mt76_txwi_cache *t;
693 int id, i, nbuf = tx_info->nbuf - 1;
694 u8 *txwi = (u8 *)txwi_ptr;
697 if (unlikely(tx_info->skb->len <= ETH_HLEN))
701 wcid = &dev->mt76.global_wcid;
704 struct mt7915_sta *msta;
706 msta = (struct mt7915_sta *)sta->drv_priv;
708 if (time_after(jiffies, msta->jiffies + HZ / 4)) {
709 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
710 msta->jiffies = jiffies;
714 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
715 t->skb = tx_info->skb;
717 id = mt76_token_consume(mdev, &t);
721 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
722 mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
725 txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
726 for (i = 0; i < nbuf; i++) {
727 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
728 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
732 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
735 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
737 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
738 ieee80211_is_mgmt(hdr->frame_control))
739 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
742 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
744 txp->bss_idx = mvif->mt76.idx;
747 txp->token = cpu_to_le16(id);
748 if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
749 txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
751 txp->rept_wds_wcid = cpu_to_le16(0x3ff);
752 tx_info->skb = DMA_DUMMY_DATA;
754 /* pass partial skb header to fw */
755 tx_info->buf[1].len = MT_CT_PARSE_LEN;
756 tx_info->buf[1].skip_unmap = true;
757 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
762 u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
764 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
768 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
770 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
771 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
772 txwi[0] = cpu_to_le32(val);
774 val = MT_TXD1_LONG_FORMAT |
775 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
776 txwi[1] = cpu_to_le32(val);
778 txp->token = cpu_to_le16(token_id);
780 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
782 return MT_TXD_SIZE + sizeof(*txp);
786 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
788 struct mt7915_sta *msta;
792 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
795 tid = le32_get_bits(txwi[1], MT_TXD1_TID);
796 if (tid >= 6) /* skip VO queue */
799 val = le32_to_cpu(txwi[2]);
800 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
801 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
802 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
805 msta = (struct mt7915_sta *)sta->drv_priv;
806 if (!test_and_set_bit(tid, &msta->ampdu_state))
807 ieee80211_start_tx_ba_session(sta, tid, 0);
811 mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
812 struct ieee80211_sta *sta, struct list_head *free_list)
814 struct mt76_dev *mdev = &dev->mt76;
815 struct mt7915_sta *msta;
816 struct mt76_wcid *wcid;
820 mt76_connac_txp_skb_unmap(mdev, t);
824 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
826 wcid = (struct mt76_wcid *)sta->drv_priv;
827 wcid_idx = wcid->idx;
829 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
830 wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
832 if (wcid && wcid->sta) {
833 msta = container_of(wcid, struct mt7915_sta, wcid);
834 sta = container_of((void *)msta, struct ieee80211_sta,
836 spin_lock_bh(&dev->sta_poll_lock);
837 if (list_empty(&msta->poll_list))
838 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
839 spin_unlock_bh(&dev->sta_poll_lock);
843 if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
844 mt7915_tx_check_aggr(sta, txwi);
846 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
850 mt76_put_txwi(mdev, t);
854 mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
856 struct mt76_dev *mdev = &dev->mt76;
857 struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
859 /* clean DMA queues and unmap buffers first */
860 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
861 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
863 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
864 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
869 mt7915_mac_tx_free_done(struct mt7915_dev *dev,
870 struct list_head *free_list, bool wake)
872 struct sk_buff *skb, *tmp;
874 mt7915_mac_sta_poll(dev);
877 mt76_set_tx_blocked(&dev->mt76, false);
879 mt76_worker_schedule(&dev->mt76.tx_worker);
881 list_for_each_entry_safe(skb, tmp, free_list, list) {
882 skb_list_del_init(skb);
883 napi_consume_skb(skb, 1);
888 mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
890 struct mt76_connac_tx_free *free = data;
891 __le32 *tx_info = (__le32 *)(data + sizeof(*free));
892 struct mt76_dev *mdev = &dev->mt76;
893 struct mt76_txwi_cache *txwi;
894 struct ieee80211_sta *sta = NULL;
895 LIST_HEAD(free_list);
896 void *end = data + len;
897 bool v3, wake = false;
898 u16 total, count = 0;
899 u32 txd = le32_to_cpu(free->txd);
902 mt7915_mac_tx_free_prepare(dev);
904 total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
905 v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
906 if (WARN_ON_ONCE((void *)&tx_info[total >> v3] > end))
909 for (cur_info = tx_info; count < total; cur_info++) {
910 u32 msdu, info = le32_to_cpu(*cur_info);
914 * 1'b1: new wcid pair.
915 * 1'b0: msdu_id with the same 'wcid pair' as above.
917 if (info & MT_TX_FREE_PAIR) {
918 struct mt7915_sta *msta;
919 struct mt76_wcid *wcid;
922 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
923 wcid = rcu_dereference(dev->mt76.wcid[idx]);
924 sta = wcid_to_sta(wcid);
928 msta = container_of(wcid, struct mt7915_sta, wcid);
929 spin_lock_bh(&dev->sta_poll_lock);
930 if (list_empty(&msta->poll_list))
931 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
932 spin_unlock_bh(&dev->sta_poll_lock);
936 if (v3 && (info & MT_TX_FREE_MPDU_HEADER))
939 for (i = 0; i < 1 + v3; i++) {
941 msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
942 if (msdu == MT_TX_FREE_MSDU_ID_V3)
945 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
948 txwi = mt76_token_release(mdev, msdu, &wake);
952 mt7915_txwi_free(dev, txwi, sta, &free_list);
956 mt7915_mac_tx_free_done(dev, &free_list, wake);
960 mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
962 struct mt76_connac_tx_free *free = data;
963 __le16 *info = (__le16 *)(data + sizeof(*free));
964 struct mt76_dev *mdev = &dev->mt76;
965 void *end = data + len;
966 LIST_HEAD(free_list);
970 mt7915_mac_tx_free_prepare(dev);
972 count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
973 if (WARN_ON_ONCE((void *)&info[count] > end))
976 for (i = 0; i < count; i++) {
977 struct mt76_txwi_cache *txwi;
978 u16 msdu = le16_to_cpu(info[i]);
980 txwi = mt76_token_release(mdev, msdu, &wake);
984 mt7915_txwi_free(dev, txwi, NULL, &free_list);
987 mt7915_mac_tx_free_done(dev, &free_list, wake);
990 static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
992 struct mt7915_sta *msta = NULL;
993 struct mt76_wcid *wcid;
994 __le32 *txs_data = data;
998 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1001 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1002 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1004 if (pid < MT_PACKET_ID_WED)
1007 if (wcidx >= mt7915_wtbl_size(dev))
1012 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1016 msta = container_of(wcid, struct mt7915_sta, wcid);
1018 if (pid == MT_PACKET_ID_WED)
1019 mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
1021 mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
1026 spin_lock_bh(&dev->sta_poll_lock);
1027 if (list_empty(&msta->poll_list))
1028 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1029 spin_unlock_bh(&dev->sta_poll_lock);
1035 bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
1037 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1038 __le32 *rxd = (__le32 *)data;
1039 __le32 *end = (__le32 *)&rxd[len / 4];
1040 enum rx_pkt_type type;
1042 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1045 case PKT_TYPE_TXRX_NOTIFY:
1046 mt7915_mac_tx_free(dev, data, len);
1048 case PKT_TYPE_TXRX_NOTIFY_V0:
1049 mt7915_mac_tx_free_v0(dev, data, len);
1052 for (rxd += 2; rxd + 8 <= end; rxd += 8)
1053 mt7915_mac_add_txs(dev, rxd);
1055 case PKT_TYPE_RX_FW_MONITOR:
1056 mt7915_debugfs_rx_fw_monitor(dev, data, len);
1063 void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1064 struct sk_buff *skb)
1066 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1067 __le32 *rxd = (__le32 *)skb->data;
1068 __le32 *end = (__le32 *)&skb->data[skb->len];
1069 enum rx_pkt_type type;
1071 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1074 case PKT_TYPE_TXRX_NOTIFY:
1075 mt7915_mac_tx_free(dev, skb->data, skb->len);
1076 napi_consume_skb(skb, 1);
1078 case PKT_TYPE_TXRX_NOTIFY_V0:
1079 mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
1080 napi_consume_skb(skb, 1);
1082 case PKT_TYPE_RX_EVENT:
1083 mt7915_mcu_rx_event(dev, skb);
1085 case PKT_TYPE_TXRXV:
1086 mt7915_mac_fill_rx_vector(dev, skb);
1089 for (rxd += 2; rxd + 8 <= end; rxd += 8)
1090 mt7915_mac_add_txs(dev, rxd);
1093 case PKT_TYPE_RX_FW_MONITOR:
1094 mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1097 case PKT_TYPE_NORMAL:
1098 if (!mt7915_mac_fill_rx(dev, skb)) {
1099 mt76_rx(&dev->mt76, q, skb);
1109 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
1111 struct mt7915_dev *dev = phy->dev;
1112 u32 reg = MT_WF_PHY_RX_CTRL1(phy->band_idx);
1114 mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
1115 mt76_set(dev, reg, BIT(11) | BIT(9));
1118 void mt7915_mac_reset_counters(struct mt7915_phy *phy)
1120 struct mt7915_dev *dev = phy->dev;
1123 for (i = 0; i < 4; i++) {
1124 mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
1125 mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
1129 phy->mt76->survey_time = ktime_get_boottime();
1131 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
1133 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
1135 /* reset airtime counters */
1136 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->band_idx),
1137 MT_WF_RMAC_MIB_RXTIME_CLR);
1139 mt7915_mcu_get_chan_mib_info(phy, true);
1142 void mt7915_mac_set_timing(struct mt7915_phy *phy)
1144 s16 coverage_class = phy->coverage_class;
1145 struct mt7915_dev *dev = phy->dev;
1146 struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
1147 u32 val, reg_offset;
1148 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1149 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1150 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1151 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1153 bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
1155 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1159 coverage_class = max_t(s16, dev->phy.coverage_class,
1160 ext_phy->coverage_class);
1162 mt76_set(dev, MT_ARB_SCR(phy->band_idx),
1163 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1166 offset = 3 * coverage_class;
1167 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1168 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1170 mt76_wr(dev, MT_TMAC_CDTR(phy->band_idx), cck + reg_offset);
1171 mt76_wr(dev, MT_TMAC_ODTR(phy->band_idx), ofdm + reg_offset);
1172 mt76_wr(dev, MT_TMAC_ICR0(phy->band_idx),
1173 FIELD_PREP(MT_IFS_EIFS_OFDM, a_band ? 84 : 78) |
1174 FIELD_PREP(MT_IFS_RIFS, 2) |
1175 FIELD_PREP(MT_IFS_SIFS, 10) |
1176 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1178 mt76_wr(dev, MT_TMAC_ICR1(phy->band_idx),
1179 FIELD_PREP(MT_IFS_EIFS_CCK, 314));
1181 if (phy->slottime < 20 || a_band)
1182 val = MT7915_CFEND_RATE_DEFAULT;
1184 val = MT7915_CFEND_RATE_11B;
1186 mt76_rmw_field(dev, MT_AGG_ACR0(phy->band_idx), MT_AGG_ACR_CFEND_RATE, val);
1187 mt76_clear(dev, MT_ARB_SCR(phy->band_idx),
1188 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1191 void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy)
1195 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(ext_phy) :
1196 MT_WF_PHY_RXTD12_MT7916(ext_phy);
1198 MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
1199 MT_WF_PHY_RXTD12_IRPI_SW_CLR);
1201 reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(ext_phy) :
1202 MT_WF_PHY_RX_CTRL1_MT7916(ext_phy);
1203 mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
1207 mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
1209 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1210 struct mt7915_dev *dev = phy->dev;
1211 u32 val, sum = 0, n = 0;
1214 for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
1215 u32 reg = is_mt7915(&dev->mt76) ?
1216 MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
1217 MT_WF_IRPI_NSS_MT7916(idx, nss);
1219 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1220 val = mt76_rr(dev, reg);
1221 sum += val * nf_power[i];
1232 void mt7915_update_channel(struct mt76_phy *mphy)
1234 struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
1235 struct mt76_channel_state *state = mphy->chan_state;
1238 mt7915_mcu_get_chan_mib_info(phy, false);
1240 nf = mt7915_phy_get_nf(phy, phy->band_idx);
1242 phy->noise = nf << 4;
1244 phy->noise += nf - (phy->noise >> 4);
1246 state->noise = -(phy->noise >> 4);
1250 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1254 ret = wait_event_timeout(dev->reset_wait,
1255 (READ_ONCE(dev->reset_state) & state),
1256 MT7915_RESET_TIMEOUT);
1258 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1263 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1265 struct ieee80211_hw *hw = priv;
1267 switch (vif->type) {
1268 case NL80211_IFTYPE_MESH_POINT:
1269 case NL80211_IFTYPE_ADHOC:
1270 case NL80211_IFTYPE_AP:
1271 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
1272 BSS_CHANGED_BEACON_ENABLED);
1280 mt7915_update_beacons(struct mt7915_dev *dev)
1282 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
1284 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1285 IEEE80211_IFACE_ITER_RESUME_ALL,
1286 mt7915_update_vif_beacon, dev->mt76.hw);
1291 ieee80211_iterate_active_interfaces(mphy_ext->hw,
1292 IEEE80211_IFACE_ITER_RESUME_ALL,
1293 mt7915_update_vif_beacon, mphy_ext->hw);
1297 mt7915_dma_reset(struct mt7915_dev *dev)
1299 struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
1300 u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
1303 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1304 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1305 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1307 if (is_mt7915(&dev->mt76))
1308 mt76_clear(dev, MT_WFDMA1_GLO_CFG,
1309 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1310 MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1312 mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1313 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1314 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1316 if (is_mt7915(&dev->mt76))
1317 mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
1318 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1319 MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1322 usleep_range(1000, 2000);
1324 for (i = 0; i < __MT_TXQ_MAX; i++) {
1325 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
1327 mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
1330 for (i = 0; i < __MT_MCUQ_MAX; i++)
1331 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
1333 mt76_for_each_q_rx(&dev->mt76, i)
1334 mt76_queue_rx_reset(dev, i);
1336 mt76_tx_status_check(&dev->mt76, true);
1338 /* re-init prefetch settings after reset */
1339 mt7915_dma_prefetch(dev);
1341 mt76_set(dev, MT_WFDMA0_GLO_CFG,
1342 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1343 if (is_mt7915(&dev->mt76))
1344 mt76_set(dev, MT_WFDMA1_GLO_CFG,
1345 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1346 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
1347 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
1348 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
1350 mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
1351 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
1352 MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1354 if (is_mt7915(&dev->mt76))
1355 mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
1356 MT_WFDMA1_GLO_CFG_TX_DMA_EN |
1357 MT_WFDMA1_GLO_CFG_RX_DMA_EN |
1358 MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
1359 MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
1363 void mt7915_tx_token_put(struct mt7915_dev *dev)
1365 struct mt76_txwi_cache *txwi;
1368 spin_lock_bh(&dev->mt76.token_lock);
1369 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1370 mt7915_txwi_free(dev, txwi, NULL, NULL);
1371 dev->mt76.token_count--;
1373 spin_unlock_bh(&dev->mt76.token_lock);
1374 idr_destroy(&dev->mt76.token);
1377 /* system error recovery */
1378 void mt7915_mac_reset_work(struct work_struct *work)
1380 struct mt7915_phy *phy2;
1381 struct mt76_phy *ext_phy;
1382 struct mt7915_dev *dev;
1385 dev = container_of(work, struct mt7915_dev, reset_work);
1386 ext_phy = dev->mt76.phys[MT_BAND1];
1387 phy2 = ext_phy ? ext_phy->priv : NULL;
1389 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1392 ieee80211_stop_queues(mt76_hw(dev));
1394 ieee80211_stop_queues(ext_phy->hw);
1396 set_bit(MT76_RESET, &dev->mphy.state);
1397 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1398 wake_up(&dev->mt76.mcu.wait);
1399 cancel_delayed_work_sync(&dev->mphy.mac_work);
1401 set_bit(MT76_RESET, &phy2->mt76->state);
1402 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1404 mt76_worker_disable(&dev->mt76.tx_worker);
1405 mt76_for_each_q_rx(&dev->mt76, i)
1406 napi_disable(&dev->mt76.napi[i]);
1407 napi_disable(&dev->mt76.tx_napi);
1409 mutex_lock(&dev->mt76.mutex);
1411 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1413 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1414 mt7915_dma_reset(dev);
1416 mt7915_tx_token_put(dev);
1417 idr_init(&dev->mt76.token);
1419 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1420 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1423 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1424 clear_bit(MT76_RESET, &dev->mphy.state);
1426 clear_bit(MT76_RESET, &phy2->mt76->state);
1429 mt76_for_each_q_rx(&dev->mt76, i) {
1430 napi_enable(&dev->mt76.napi[i]);
1431 napi_schedule(&dev->mt76.napi[i]);
1435 tasklet_schedule(&dev->irq_tasklet);
1437 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1438 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1440 mt76_worker_enable(&dev->mt76.tx_worker);
1443 napi_enable(&dev->mt76.tx_napi);
1444 napi_schedule(&dev->mt76.tx_napi);
1447 ieee80211_wake_queues(mt76_hw(dev));
1449 ieee80211_wake_queues(ext_phy->hw);
1451 mutex_unlock(&dev->mt76.mutex);
1453 mt7915_update_beacons(dev);
1455 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1456 MT7915_WATCHDOG_TIME);
1458 ieee80211_queue_delayed_work(ext_phy->hw,
1459 &phy2->mt76->mac_work,
1460 MT7915_WATCHDOG_TIME);
1463 void mt7915_mac_update_stats(struct mt7915_phy *phy)
1465 struct mt7915_dev *dev = phy->dev;
1466 struct mib_stats *mib = &phy->mib;
1467 int i, aggr0, aggr1, cnt;
1470 cnt = mt76_rr(dev, MT_MIB_SDR3(phy->band_idx));
1471 mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
1472 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
1473 FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
1475 cnt = mt76_rr(dev, MT_MIB_SDR4(phy->band_idx));
1476 mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
1478 cnt = mt76_rr(dev, MT_MIB_SDR5(phy->band_idx));
1479 mib->rx_mpdu_cnt += cnt;
1481 cnt = mt76_rr(dev, MT_MIB_SDR6(phy->band_idx));
1482 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
1484 cnt = mt76_rr(dev, MT_MIB_SDR7(phy->band_idx));
1485 mib->rx_vector_mismatch_cnt +=
1486 FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
1488 cnt = mt76_rr(dev, MT_MIB_SDR8(phy->band_idx));
1489 mib->rx_delimiter_fail_cnt +=
1490 FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
1492 cnt = mt76_rr(dev, MT_MIB_SDR10(phy->band_idx));
1493 mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
1494 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
1495 FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
1497 cnt = mt76_rr(dev, MT_MIB_SDR11(phy->band_idx));
1498 mib->rx_len_mismatch_cnt +=
1499 FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
1501 cnt = mt76_rr(dev, MT_MIB_SDR12(phy->band_idx));
1502 mib->tx_ampdu_cnt += cnt;
1504 cnt = mt76_rr(dev, MT_MIB_SDR13(phy->band_idx));
1505 mib->tx_stop_q_empty_cnt +=
1506 FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
1508 cnt = mt76_rr(dev, MT_MIB_SDR14(phy->band_idx));
1509 mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
1510 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
1511 FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
1513 cnt = mt76_rr(dev, MT_MIB_SDR15(phy->band_idx));
1514 mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
1515 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
1516 FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
1518 cnt = mt76_rr(dev, MT_MIB_SDR16(phy->band_idx));
1519 mib->primary_cca_busy_time +=
1520 FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
1522 cnt = mt76_rr(dev, MT_MIB_SDR17(phy->band_idx));
1523 mib->secondary_cca_busy_time +=
1524 FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
1526 cnt = mt76_rr(dev, MT_MIB_SDR18(phy->band_idx));
1527 mib->primary_energy_detect_time +=
1528 FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
1530 cnt = mt76_rr(dev, MT_MIB_SDR19(phy->band_idx));
1531 mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
1533 cnt = mt76_rr(dev, MT_MIB_SDR20(phy->band_idx));
1534 mib->ofdm_mdrdy_time +=
1535 FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
1537 cnt = mt76_rr(dev, MT_MIB_SDR21(phy->band_idx));
1538 mib->green_mdrdy_time +=
1539 FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
1541 cnt = mt76_rr(dev, MT_MIB_SDR22(phy->band_idx));
1542 mib->rx_ampdu_cnt += cnt;
1544 cnt = mt76_rr(dev, MT_MIB_SDR23(phy->band_idx));
1545 mib->rx_ampdu_bytes_cnt += cnt;
1547 cnt = mt76_rr(dev, MT_MIB_SDR24(phy->band_idx));
1548 mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
1549 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
1550 FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
1552 cnt = mt76_rr(dev, MT_MIB_SDR25(phy->band_idx));
1553 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
1555 cnt = mt76_rr(dev, MT_MIB_SDR27(phy->band_idx));
1556 mib->tx_rwp_fail_cnt +=
1557 FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
1559 cnt = mt76_rr(dev, MT_MIB_SDR28(phy->band_idx));
1560 mib->tx_rwp_need_cnt +=
1561 FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
1563 cnt = mt76_rr(dev, MT_MIB_SDR29(phy->band_idx));
1564 mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
1565 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
1566 FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
1568 cnt = mt76_rr(dev, MT_MIB_SDRVEC(phy->band_idx));
1569 mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
1570 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
1571 FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
1573 cnt = mt76_rr(dev, MT_MIB_SDR31(phy->band_idx));
1574 mib->rx_ba_cnt += cnt;
1576 cnt = mt76_rr(dev, MT_MIB_SDRMUBF(phy->band_idx));
1577 mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
1579 cnt = mt76_rr(dev, MT_MIB_DR8(phy->band_idx));
1580 mib->tx_mu_mpdu_cnt += cnt;
1582 cnt = mt76_rr(dev, MT_MIB_DR9(phy->band_idx));
1583 mib->tx_mu_acked_mpdu_cnt += cnt;
1585 cnt = mt76_rr(dev, MT_MIB_DR11(phy->band_idx));
1586 mib->tx_su_acked_mpdu_cnt += cnt;
1588 cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(phy->band_idx));
1589 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
1590 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
1591 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
1593 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
1594 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
1595 mib->tx_amsdu[i] += cnt;
1596 mib->tx_amsdu_cnt += cnt;
1599 aggr0 = phy->band_idx ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
1600 if (is_mt7915(&dev->mt76)) {
1601 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1602 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 4)));
1604 FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1605 mib->ack_fail_cnt +=
1606 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1608 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 4)));
1609 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1610 mib->rts_retries_cnt +=
1611 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1613 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
1614 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1615 dev->mt76.aggr_stats[aggr0++] += val >> 16;
1617 val = mt76_rr(dev, MT_TX_AGG_CNT2(phy->band_idx, i));
1618 dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
1619 dev->mt76.aggr_stats[aggr1++] += val >> 16;
1622 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
1623 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1625 cnt = mt76_rr(dev, MT_MIB_SDR33(phy->band_idx));
1626 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
1628 cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(phy->band_idx));
1629 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
1630 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
1632 cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(phy->band_idx));
1633 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
1634 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
1636 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(phy->band_idx));
1637 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
1638 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
1639 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
1640 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
1642 for (i = 0; i < 2; i++) {
1644 val = mt76_rr(dev, MT_MIB_MB_SDR0(phy->band_idx, (i << 2)));
1645 mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
1646 mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
1648 /* rts retry count */
1649 val = mt76_rr(dev, MT_MIB_MB_SDR1(phy->band_idx, (i << 2)));
1650 mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
1651 mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
1654 val = mt76_rr(dev, MT_MIB_MB_SDR2(phy->band_idx, (i << 2)));
1655 mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
1656 mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
1658 /* ack fail count */
1659 val = mt76_rr(dev, MT_MIB_MB_BFTF(phy->band_idx, (i << 2)));
1660 mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
1661 mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
1664 for (i = 0; i < 8; i++) {
1665 val = mt76_rr(dev, MT_TX_AGG_CNT(phy->band_idx, i));
1666 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
1667 dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
1670 cnt = mt76_rr(dev, MT_MIB_SDR32(phy->band_idx));
1671 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1672 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
1673 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1674 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
1676 cnt = mt76_rr(dev, MT_MIB_BFCR7(phy->band_idx));
1677 mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
1679 cnt = mt76_rr(dev, MT_MIB_BFCR2(phy->band_idx));
1680 mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
1682 cnt = mt76_rr(dev, MT_MIB_BFCR0(phy->band_idx));
1683 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1684 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
1685 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1686 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
1688 cnt = mt76_rr(dev, MT_MIB_BFCR1(phy->band_idx));
1689 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1690 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
1694 static void mt7915_mac_severe_check(struct mt7915_phy *phy)
1696 struct mt7915_dev *dev = phy->dev;
1697 bool ext_phy = phy != &dev->phy;
1700 if (!phy->omac_mask)
1703 /* In rare cases, TRB pointers might be out of sync leads to RMAC
1704 * stopping Rx, so check status periodically to see if TRB hardware
1705 * requires minimal recovery.
1707 trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->band_idx));
1709 if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
1710 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
1711 (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
1712 FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
1714 mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
1720 void mt7915_mac_sta_rc_work(struct work_struct *work)
1722 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1723 struct ieee80211_sta *sta;
1724 struct ieee80211_vif *vif;
1725 struct mt7915_sta *msta;
1729 spin_lock_bh(&dev->sta_poll_lock);
1730 list_splice_init(&dev->sta_rc_list, &list);
1732 while (!list_empty(&list)) {
1733 msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1734 list_del_init(&msta->rc_list);
1735 changed = msta->changed;
1737 spin_unlock_bh(&dev->sta_poll_lock);
1739 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1740 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1742 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1743 IEEE80211_RC_NSS_CHANGED |
1744 IEEE80211_RC_BW_CHANGED))
1745 mt7915_mcu_add_rate_ctrl(dev, vif, sta, true);
1747 if (changed & IEEE80211_RC_SMPS_CHANGED)
1748 mt7915_mcu_add_smps(dev, vif, sta);
1750 spin_lock_bh(&dev->sta_poll_lock);
1753 spin_unlock_bh(&dev->sta_poll_lock);
1756 void mt7915_mac_work(struct work_struct *work)
1758 struct mt7915_phy *phy;
1759 struct mt76_phy *mphy;
1761 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1765 mutex_lock(&mphy->dev->mutex);
1767 mt76_update_survey(mphy);
1768 if (++mphy->mac_work_count == 5) {
1769 mphy->mac_work_count = 0;
1771 mt7915_mac_update_stats(phy);
1772 mt7915_mac_severe_check(phy);
1775 mutex_unlock(&mphy->dev->mutex);
1777 mt76_tx_status_check(mphy->dev, false);
1779 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1780 MT7915_WATCHDOG_TIME);
1783 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
1785 struct mt7915_dev *dev = phy->dev;
1787 if (phy->rdd_state & BIT(0))
1788 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
1790 if (phy->rdd_state & BIT(1))
1791 mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
1795 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
1799 switch (dev->mt76.region) {
1800 case NL80211_DFS_ETSI:
1803 case NL80211_DFS_JP:
1806 case NL80211_DFS_FCC:
1812 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
1813 MT_RX_SEL0, region);
1817 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
1821 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
1823 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1824 struct mt7915_dev *dev = phy->dev;
1828 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, phy->band_idx,
1833 err = mt7915_dfs_start_rdd(dev, phy->band_idx);
1837 phy->rdd_state |= BIT(phy->band_idx);
1839 if (!is_mt7915(&dev->mt76))
1842 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1843 chandef->width == NL80211_CHAN_WIDTH_80P80) {
1844 err = mt7915_dfs_start_rdd(dev, 1);
1848 phy->rdd_state |= BIT(1);
1855 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
1857 const struct mt7915_dfs_radar_spec *radar_specs;
1858 struct mt7915_dev *dev = phy->dev;
1861 switch (dev->mt76.region) {
1862 case NL80211_DFS_FCC:
1863 radar_specs = &fcc_radar_specs;
1864 err = mt7915_mcu_set_fcc5_lpn(dev, 8);
1868 case NL80211_DFS_ETSI:
1869 radar_specs = &etsi_radar_specs;
1871 case NL80211_DFS_JP:
1872 radar_specs = &jp_radar_specs;
1878 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
1879 err = mt7915_mcu_set_radar_th(dev, i,
1880 &radar_specs->radar_pattern[i]);
1885 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
1888 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
1890 struct mt7915_dev *dev = phy->dev;
1891 enum mt76_dfs_state dfs_state, prev_state;
1894 prev_state = phy->mt76->dfs_state;
1895 dfs_state = mt76_phy_dfs_state(phy->mt76);
1897 if (prev_state == dfs_state)
1900 if (prev_state == MT_DFS_STATE_UNKNOWN)
1901 mt7915_dfs_stop_radar_detector(phy);
1903 if (dfs_state == MT_DFS_STATE_DISABLED)
1906 if (prev_state <= MT_DFS_STATE_DISABLED) {
1907 err = mt7915_dfs_init_radar_specs(phy);
1911 err = mt7915_dfs_start_radar_detector(phy);
1915 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
1918 if (dfs_state == MT_DFS_STATE_CAC)
1921 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
1922 phy->band_idx, MT_RX_SEL0, 0);
1924 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
1928 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
1932 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
1933 phy->band_idx, MT_RX_SEL0, 0);
1937 mt7915_dfs_stop_radar_detector(phy);
1938 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
1944 mt7915_mac_twt_duration_align(int duration)
1946 return duration << 8;
1950 mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev,
1951 struct mt7915_twt_flow *flow)
1953 struct mt7915_twt_flow *iter, *iter_next;
1954 u32 duration = flow->duration << 8;
1957 iter = list_first_entry_or_null(&dev->twt_list,
1958 struct mt7915_twt_flow, list);
1959 if (!iter || !iter->sched || iter->start_tsf > duration) {
1960 /* add flow as first entry in the list */
1961 list_add(&flow->list, &dev->twt_list);
1965 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
1966 start_tsf = iter->start_tsf +
1967 mt7915_mac_twt_duration_align(iter->duration);
1968 if (list_is_last(&iter->list, &dev->twt_list))
1971 if (!iter_next->sched ||
1972 iter_next->start_tsf > start_tsf + duration) {
1973 list_add(&flow->list, &iter->list);
1978 /* add flow as last entry in the list */
1979 list_add_tail(&flow->list, &dev->twt_list);
1984 static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
1986 struct ieee80211_twt_params *twt_agrt;
1987 u64 interval, duration;
1991 /* only individual agreement supported */
1992 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
1995 /* only 256us unit supported */
1996 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
1999 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2001 /* explicit agreement not supported */
2002 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2005 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2006 le16_to_cpu(twt_agrt->req_type));
2007 mantissa = le16_to_cpu(twt_agrt->mantissa);
2008 duration = twt_agrt->min_twt_dur << 8;
2010 interval = (u64)mantissa << exp;
2011 if (interval < duration)
2018 mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
2019 struct ieee80211_twt_params *twt_agrt)
2021 u16 type = le16_to_cpu(twt_agrt->req_type);
2025 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2026 for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
2027 struct mt7915_twt_flow *f;
2029 if (!(msta->twt.flowid_mask & BIT(i)))
2032 f = &msta->twt.flow[i];
2033 if (f->duration == twt_agrt->min_twt_dur &&
2034 f->mantissa == twt_agrt->mantissa &&
2036 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2037 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2038 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2045 void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
2046 struct ieee80211_sta *sta,
2047 struct ieee80211_twt_setup *twt)
2049 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2050 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
2051 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2052 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2053 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2054 struct mt7915_dev *dev = mt7915_hw_dev(hw);
2055 struct mt7915_twt_flow *flow;
2056 int flowid, table_id;
2059 if (mt7915_mac_check_twt_req(twt))
2062 mutex_lock(&dev->mt76.mutex);
2064 if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT)
2067 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2070 if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
2071 setup_cmd = TWT_SETUP_CMD_DICTATE;
2072 twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
2076 flowid = ffs(~msta->twt.flowid_mask) - 1;
2077 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2078 twt_agrt->req_type |= le16_encode_bits(flowid,
2079 IEEE80211_TWT_REQTYPE_FLOWID);
2081 table_id = ffs(~dev->twt.table_mask) - 1;
2082 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2083 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2085 if (mt7915_mac_twt_param_equal(msta, twt_agrt))
2088 flow = &msta->twt.flow[flowid];
2089 memset(flow, 0, sizeof(*flow));
2090 INIT_LIST_HEAD(&flow->list);
2091 flow->wcid = msta->wcid.idx;
2092 flow->table_id = table_id;
2094 flow->duration = twt_agrt->min_twt_dur;
2095 flow->mantissa = twt_agrt->mantissa;
2097 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2098 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2099 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2101 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2102 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2103 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2104 u64 flow_tsf, curr_tsf;
2108 flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow);
2109 curr_tsf = __mt7915_get_tsf(hw, msta->vif);
2110 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2111 flow_tsf = curr_tsf + interval - rem;
2112 twt_agrt->twt = cpu_to_le64(flow_tsf);
2114 list_add_tail(&flow->list, &dev->twt_list);
2116 flow->tsf = le64_to_cpu(twt_agrt->twt);
2118 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2121 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2122 dev->twt.table_mask |= BIT(table_id);
2123 msta->twt.flowid_mask |= BIT(flowid);
2127 mutex_unlock(&dev->mt76.mutex);
2129 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2130 twt_agrt->req_type |=
2131 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2132 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2133 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2136 void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
2137 struct mt7915_sta *msta,
2140 struct mt7915_twt_flow *flow;
2142 lockdep_assert_held(&dev->mt76.mutex);
2144 if (flowid >= ARRAY_SIZE(msta->twt.flow))
2147 if (!(msta->twt.flowid_mask & BIT(flowid)))
2150 flow = &msta->twt.flow[flowid];
2151 if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow,
2152 MCU_TWT_AGRT_DELETE))
2155 list_del_init(&flow->list);
2156 msta->twt.flowid_mask &= ~BIT(flowid);
2157 dev->twt.table_mask &= ~BIT(flow->table_id);