1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
67 #include <net/mac80211.h>
74 * New version of ADD_STA_sta command added new fields at the end of the
75 * structure, so sending the size of the relevant API's structure is enough to
76 * support both API versions.
78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
80 if (iwl_mvm_has_new_rx_api(mvm) ||
81 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
82 return sizeof(struct iwl_mvm_add_sta_cmd);
84 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
87 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
88 enum nl80211_iftype iftype)
93 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
94 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
96 lockdep_assert_held(&mvm->mutex);
98 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
99 if (iftype != NL80211_IFTYPE_STATION)
100 reserved_ids = BIT(0);
102 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
103 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
104 if (BIT(sta_id) & reserved_ids)
107 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
108 lockdep_is_held(&mvm->mutex)))
111 return IWL_MVM_INVALID_STA;
114 /* send station add/update command to firmware */
115 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
116 bool update, unsigned int flags)
118 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
119 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
120 .sta_id = mvm_sta->sta_id,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
124 STA_FLG_MIMO_EN_MSK |
125 STA_FLG_RTS_MIMO_PROT),
126 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
130 u32 agg_size = 0, mpdu_dens = 0;
132 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
133 add_sta_cmd.station_type = mvm_sta->sta_type;
135 if (!update || (flags & STA_MODIFY_QUEUES)) {
136 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
138 if (!iwl_mvm_has_new_tx_api(mvm)) {
139 add_sta_cmd.tfd_queue_msk =
140 cpu_to_le32(mvm_sta->tfd_queue_msk);
142 if (flags & STA_MODIFY_QUEUES)
143 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
145 WARN_ON(flags & STA_MODIFY_QUEUES);
149 switch (sta->bandwidth) {
150 case IEEE80211_STA_RX_BW_160:
151 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
153 case IEEE80211_STA_RX_BW_80:
154 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
156 case IEEE80211_STA_RX_BW_40:
157 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
159 case IEEE80211_STA_RX_BW_20:
160 if (sta->ht_cap.ht_supported)
161 add_sta_cmd.station_flags |=
162 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
166 switch (sta->rx_nss) {
168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
178 switch (sta->smps_mode) {
179 case IEEE80211_SMPS_AUTOMATIC:
180 case IEEE80211_SMPS_NUM_MODES:
183 case IEEE80211_SMPS_STATIC:
185 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
186 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
188 case IEEE80211_SMPS_DYNAMIC:
189 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
191 case IEEE80211_SMPS_OFF:
196 if (sta->ht_cap.ht_supported) {
197 add_sta_cmd.station_flags_msk |=
198 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
199 STA_FLG_AGG_MPDU_DENS_MSK);
201 mpdu_dens = sta->ht_cap.ampdu_density;
204 if (sta->vht_cap.vht_supported) {
205 agg_size = sta->vht_cap.cap &
206 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
208 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
209 } else if (sta->ht_cap.ht_supported) {
210 agg_size = sta->ht_cap.ampdu_factor;
213 add_sta_cmd.station_flags |=
214 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
215 add_sta_cmd.station_flags |=
216 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
217 if (mvm_sta->associated)
218 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
221 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
223 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
224 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
225 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
226 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
227 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
228 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
229 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
230 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
231 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
232 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
235 status = ADD_STA_SUCCESS;
236 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
237 iwl_mvm_add_sta_cmd_size(mvm),
238 &add_sta_cmd, &status);
242 switch (status & IWL_ADD_STA_STATUS_MASK) {
243 case ADD_STA_SUCCESS:
244 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
248 IWL_ERR(mvm, "ADD_STA failed\n");
255 static void iwl_mvm_rx_agg_session_expired(unsigned long data)
257 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
258 struct iwl_mvm_baid_data *ba_data;
259 struct ieee80211_sta *sta;
260 struct iwl_mvm_sta *mvm_sta;
261 unsigned long timeout;
265 ba_data = rcu_dereference(*rcu_ptr);
267 if (WARN_ON(!ba_data))
270 if (!ba_data->timeout)
273 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
274 if (time_is_after_jiffies(timeout)) {
275 mod_timer(&ba_data->session_timer, timeout);
280 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
283 * sta should be valid unless the following happens:
284 * The firmware asserts which triggers a reconfig flow, but
285 * the reconfig fails before we set the pointer to sta into
286 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
287 * A-MDPU and hence the timer continues to run. Then, the
288 * timer expires and sta is NULL.
293 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
294 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
295 sta->addr, ba_data->tid);
300 /* Disable aggregations for a bitmap of TIDs for a given station */
301 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
302 unsigned long disable_agg_tids,
305 struct iwl_mvm_add_sta_cmd cmd = {};
306 struct ieee80211_sta *sta;
307 struct iwl_mvm_sta *mvmsta;
312 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
315 spin_lock_bh(&mvm->queue_info_lock);
316 sta_id = mvm->queue_info[queue].ra_sta_id;
317 spin_unlock_bh(&mvm->queue_info_lock);
321 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
323 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
328 mvmsta = iwl_mvm_sta_from_mac80211(sta);
330 mvmsta->tid_disable_agg |= disable_agg_tids;
332 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
333 cmd.sta_id = mvmsta->sta_id;
334 cmd.add_modify = STA_MODE_MODIFY;
335 cmd.modify_mask = STA_MODIFY_QUEUES;
336 if (disable_agg_tids)
337 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
339 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
340 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
341 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
345 /* Notify FW of queue removal from the STA queues */
346 status = ADD_STA_SUCCESS;
347 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
348 iwl_mvm_add_sta_cmd_size(mvm),
354 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
356 struct ieee80211_sta *sta;
357 struct iwl_mvm_sta *mvmsta;
358 unsigned long tid_bitmap;
359 unsigned long agg_tids = 0;
363 lockdep_assert_held(&mvm->mutex);
365 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
368 spin_lock_bh(&mvm->queue_info_lock);
369 sta_id = mvm->queue_info[queue].ra_sta_id;
370 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
371 spin_unlock_bh(&mvm->queue_info_lock);
373 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
374 lockdep_is_held(&mvm->mutex));
376 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
379 mvmsta = iwl_mvm_sta_from_mac80211(sta);
381 spin_lock_bh(&mvmsta->lock);
382 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
383 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
384 agg_tids |= BIT(tid);
386 spin_unlock_bh(&mvmsta->lock);
392 * Remove a queue from a station's resources.
393 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
394 * doesn't disable the queue
396 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
398 struct ieee80211_sta *sta;
399 struct iwl_mvm_sta *mvmsta;
400 unsigned long tid_bitmap;
401 unsigned long disable_agg_tids = 0;
405 lockdep_assert_held(&mvm->mutex);
407 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
410 spin_lock_bh(&mvm->queue_info_lock);
411 sta_id = mvm->queue_info[queue].ra_sta_id;
412 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
413 spin_unlock_bh(&mvm->queue_info_lock);
417 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
419 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
424 mvmsta = iwl_mvm_sta_from_mac80211(sta);
426 spin_lock_bh(&mvmsta->lock);
427 /* Unmap MAC queues and TIDs from this queue */
428 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
429 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
430 disable_agg_tids |= BIT(tid);
431 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
434 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
435 spin_unlock_bh(&mvmsta->lock);
439 return disable_agg_tids;
442 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
445 struct iwl_mvm_sta *mvmsta;
446 u8 txq_curr_ac, sta_id, tid;
447 unsigned long disable_agg_tids = 0;
450 lockdep_assert_held(&mvm->mutex);
452 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
455 spin_lock_bh(&mvm->queue_info_lock);
456 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
457 sta_id = mvm->queue_info[queue].ra_sta_id;
458 tid = mvm->queue_info[queue].txq_tid;
459 spin_unlock_bh(&mvm->queue_info_lock);
461 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
462 if (WARN_ON(!mvmsta))
465 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
466 /* Disable the queue */
467 if (disable_agg_tids)
468 iwl_mvm_invalidate_sta_queue(mvm, queue,
469 disable_agg_tids, false);
471 ret = iwl_mvm_disable_txq(mvm, queue,
472 mvmsta->vif->hw_queue[txq_curr_ac],
475 /* Re-mark the inactive queue as inactive */
476 spin_lock_bh(&mvm->queue_info_lock);
477 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
478 spin_unlock_bh(&mvm->queue_info_lock);
480 "Failed to free inactive queue %d (ret=%d)\n",
486 /* If TXQ is allocated to another STA, update removal in FW */
488 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
493 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
494 unsigned long tfd_queue_mask, u8 ac)
497 u8 ac_to_queue[IEEE80211_NUM_ACS];
500 lockdep_assert_held(&mvm->queue_info_lock);
501 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
504 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
506 /* See what ACs the existing queues for this STA have */
507 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
508 /* Only DATA queues can be shared */
509 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
510 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
513 /* Don't try and take queues being reconfigured */
514 if (mvm->queue_info[queue].status ==
515 IWL_MVM_QUEUE_RECONFIGURING)
518 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
522 * The queue to share is chosen only from DATA queues as follows (in
523 * descending priority):
526 * 3. Highest AC queue that is lower than new AC
527 * 4. Any existing AC (there always is at least 1 DATA queue)
530 /* Priority 1: An AC_BE queue */
531 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
532 queue = ac_to_queue[IEEE80211_AC_BE];
533 /* Priority 2: Same AC queue */
534 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
535 queue = ac_to_queue[ac];
536 /* Priority 3a: If new AC is VO and VI exists - use VI */
537 else if (ac == IEEE80211_AC_VO &&
538 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
539 queue = ac_to_queue[IEEE80211_AC_VI];
540 /* Priority 3b: No BE so only AC less than the new one is BK */
541 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
542 queue = ac_to_queue[IEEE80211_AC_BK];
543 /* Priority 4a: No BE nor BK - use VI if exists */
544 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
545 queue = ac_to_queue[IEEE80211_AC_VI];
546 /* Priority 4b: No BE, BK nor VI - use VO if exists */
547 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
548 queue = ac_to_queue[IEEE80211_AC_VO];
550 /* Make sure queue found (or not) is legal */
551 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
552 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
553 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
554 IWL_ERR(mvm, "No DATA queues available to share\n");
558 /* Make sure the queue isn't in the middle of being reconfigured */
559 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
561 "TXQ %d is in the middle of re-config - try again\n",
570 * If a given queue has a higher AC than the TID stream that is being compared
571 * to, the queue needs to be redirected to the lower AC. This function does that
572 * in such a case, otherwise - if no redirection required - it does nothing,
573 * unless the %force param is true.
575 int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
576 int ac, int ssn, unsigned int wdg_timeout,
579 struct iwl_scd_txq_cfg_cmd cmd = {
581 .action = SCD_CFG_DISABLE_QUEUE,
587 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
591 * If the AC is lower than current one - FIFO needs to be redirected to
592 * the lowest one of the streams in the queue. Check if this is needed
594 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
595 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
596 * we need to check if the numerical value of X is LARGER than of Y.
598 spin_lock_bh(&mvm->queue_info_lock);
599 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
600 spin_unlock_bh(&mvm->queue_info_lock);
602 IWL_DEBUG_TX_QUEUES(mvm,
603 "No redirection needed on TXQ #%d\n",
608 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
609 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
610 cmd.tid = mvm->queue_info[queue].txq_tid;
611 mq = mvm->hw_queue_to_mac80211[queue];
612 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
613 spin_unlock_bh(&mvm->queue_info_lock);
615 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
616 queue, iwl_mvm_ac_to_tx_fifo[ac]);
618 /* Stop MAC queues and wait for this queue to empty */
619 iwl_mvm_stop_mac_queues(mvm, mq);
620 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
622 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
628 /* Before redirecting the queue we need to de-activate it */
629 iwl_trans_txq_disable(mvm->trans, queue, false);
630 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
632 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
635 /* Make sure the SCD wrptr is correctly set before reconfiguring */
636 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
638 /* Update the TID "owner" of the queue */
639 spin_lock_bh(&mvm->queue_info_lock);
640 mvm->queue_info[queue].txq_tid = tid;
641 spin_unlock_bh(&mvm->queue_info_lock);
643 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
645 /* Redirect to lower AC */
646 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
647 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
650 /* Update AC marking of the queue */
651 spin_lock_bh(&mvm->queue_info_lock);
652 mvm->queue_info[queue].mac80211_ac = ac;
653 spin_unlock_bh(&mvm->queue_info_lock);
656 * Mark queue as shared in transport if shared
657 * Note this has to be done after queue enablement because enablement
658 * can also set this value, and there is no indication there to shared
662 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
665 /* Continue using the MAC queues */
666 iwl_mvm_start_mac_queues(mvm, mq);
671 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
672 struct ieee80211_sta *sta, u8 ac,
675 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
676 unsigned int wdg_timeout =
677 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
678 u8 mac_queue = mvmsta->vif->hw_queue[ac];
681 lockdep_assert_held(&mvm->mutex);
683 IWL_DEBUG_TX_QUEUES(mvm,
684 "Allocating queue for sta %d on tid %d\n",
685 mvmsta->sta_id, tid);
686 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
691 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
693 spin_lock_bh(&mvmsta->lock);
694 mvmsta->tid_data[tid].txq_id = queue;
695 mvmsta->tid_data[tid].is_tid_active = true;
696 spin_unlock_bh(&mvmsta->lock);
701 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
702 struct ieee80211_sta *sta, u8 ac, int tid,
703 struct ieee80211_hdr *hdr)
705 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
706 struct iwl_trans_txq_scd_cfg cfg = {
707 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
708 .sta_id = mvmsta->sta_id,
710 .frame_limit = IWL_FRAME_LIMIT,
712 unsigned int wdg_timeout =
713 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
714 u8 mac_queue = mvmsta->vif->hw_queue[ac];
716 bool using_inactive_queue = false, same_sta = false;
717 unsigned long disable_agg_tids = 0;
718 enum iwl_mvm_agg_state queue_state;
719 bool shared_queue = false, inc_ssn;
721 unsigned long tfd_queue_mask;
724 lockdep_assert_held(&mvm->mutex);
726 if (iwl_mvm_has_new_tx_api(mvm))
727 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
729 spin_lock_bh(&mvmsta->lock);
730 tfd_queue_mask = mvmsta->tfd_queue_msk;
731 spin_unlock_bh(&mvmsta->lock);
733 spin_lock_bh(&mvm->queue_info_lock);
736 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
739 if (!ieee80211_is_data_qos(hdr->frame_control) ||
740 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
741 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
742 IWL_MVM_DQA_MIN_MGMT_QUEUE,
743 IWL_MVM_DQA_MAX_MGMT_QUEUE);
744 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
745 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
748 /* If no such queue is found, we'll use a DATA queue instead */
751 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
752 (mvm->queue_info[mvmsta->reserved_queue].status ==
753 IWL_MVM_QUEUE_RESERVED ||
754 mvm->queue_info[mvmsta->reserved_queue].status ==
755 IWL_MVM_QUEUE_INACTIVE)) {
756 queue = mvmsta->reserved_queue;
757 mvm->queue_info[queue].reserved = true;
758 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
762 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
763 IWL_MVM_DQA_MIN_DATA_QUEUE,
764 IWL_MVM_DQA_MAX_DATA_QUEUE);
767 * Check if this queue is already allocated but inactive.
768 * In such a case, we'll need to first free this queue before enabling
769 * it again, so we'll mark it as reserved to make sure no new traffic
773 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
774 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
775 using_inactive_queue = true;
776 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
777 IWL_DEBUG_TX_QUEUES(mvm,
778 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
779 queue, mvmsta->sta_id, tid);
782 /* No free queue - we'll have to share */
784 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
787 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
792 * Mark TXQ as ready, even though it hasn't been fully configured yet,
793 * to make sure no one else takes it.
794 * This will allow avoiding re-acquiring the lock at the end of the
795 * configuration. On error we'll mark it back as free.
797 if ((queue > 0) && !shared_queue)
798 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
800 spin_unlock_bh(&mvm->queue_info_lock);
802 /* This shouldn't happen - out of queues */
803 if (WARN_ON(queue <= 0)) {
804 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
810 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
811 * but for configuring the SCD to send A-MPDUs we need to mark the queue
813 * Mark all DATA queues as allowing to be aggregated at some point
815 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
816 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
819 * If this queue was previously inactive (idle) - we need to free it
822 if (using_inactive_queue) {
823 ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
828 IWL_DEBUG_TX_QUEUES(mvm,
829 "Allocating %squeue #%d to sta %d on tid %d\n",
830 shared_queue ? "shared " : "", queue,
831 mvmsta->sta_id, tid);
834 /* Disable any open aggs on this queue */
835 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
837 if (disable_agg_tids) {
838 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
840 iwl_mvm_invalidate_sta_queue(mvm, queue,
841 disable_agg_tids, false);
845 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
846 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
847 ssn, &cfg, wdg_timeout);
849 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
850 le16_add_cpu(&hdr->seq_ctrl, 0x10);
854 * Mark queue as shared in transport if shared
855 * Note this has to be done after queue enablement because enablement
856 * can also set this value, and there is no indication there to shared
860 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
862 spin_lock_bh(&mvmsta->lock);
864 * This looks racy, but it is not. We have only one packet for
865 * this ra/tid in our Tx path since we stop the Qdisc when we
866 * need to allocate a new TFD queue.
869 mvmsta->tid_data[tid].seq_number += 0x10;
870 mvmsta->tid_data[tid].txq_id = queue;
871 mvmsta->tid_data[tid].is_tid_active = true;
872 mvmsta->tfd_queue_msk |= BIT(queue);
873 queue_state = mvmsta->tid_data[tid].state;
875 if (mvmsta->reserved_queue == queue)
876 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
877 spin_unlock_bh(&mvmsta->lock);
880 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
884 /* If we need to re-enable aggregations... */
885 if (queue_state == IWL_AGG_ON) {
886 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
891 /* Redirect queue, if needed */
892 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
901 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
906 static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
908 struct iwl_scd_txq_cfg_cmd cmd = {
910 .action = SCD_CFG_UPDATE_QUEUE_TID,
913 unsigned long tid_bitmap;
916 lockdep_assert_held(&mvm->mutex);
918 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
921 spin_lock_bh(&mvm->queue_info_lock);
922 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
923 spin_unlock_bh(&mvm->queue_info_lock);
925 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
928 /* Find any TID for queue */
929 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
931 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
933 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
935 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
940 spin_lock_bh(&mvm->queue_info_lock);
941 mvm->queue_info[queue].txq_tid = tid;
942 spin_unlock_bh(&mvm->queue_info_lock);
943 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
947 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
949 struct ieee80211_sta *sta;
950 struct iwl_mvm_sta *mvmsta;
953 unsigned long tid_bitmap;
954 unsigned int wdg_timeout;
958 /* queue sharing is disabled on new TX path */
959 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
962 lockdep_assert_held(&mvm->mutex);
964 spin_lock_bh(&mvm->queue_info_lock);
965 sta_id = mvm->queue_info[queue].ra_sta_id;
966 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
967 spin_unlock_bh(&mvm->queue_info_lock);
969 /* Find TID for queue, and make sure it is the only one on the queue */
970 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
971 if (tid_bitmap != BIT(tid)) {
972 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
977 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
980 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
981 lockdep_is_held(&mvm->mutex));
983 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
986 mvmsta = iwl_mvm_sta_from_mac80211(sta);
987 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
989 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
991 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
992 tid_to_mac80211_ac[tid], ssn,
995 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
999 /* If aggs should be turned back on - do it */
1000 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1001 struct iwl_mvm_add_sta_cmd cmd = {0};
1003 mvmsta->tid_disable_agg &= ~BIT(tid);
1005 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1006 cmd.sta_id = mvmsta->sta_id;
1007 cmd.add_modify = STA_MODE_MODIFY;
1008 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1009 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1010 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1012 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1013 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1015 IWL_DEBUG_TX_QUEUES(mvm,
1016 "TXQ #%d is now aggregated again\n",
1019 /* Mark queue intenally as aggregating again */
1020 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1024 spin_lock_bh(&mvm->queue_info_lock);
1025 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1026 spin_unlock_bh(&mvm->queue_info_lock);
1029 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1031 if (tid == IWL_MAX_TID_COUNT)
1032 return IEEE80211_AC_VO; /* MGMT */
1034 return tid_to_mac80211_ac[tid];
1037 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1038 struct ieee80211_sta *sta, int tid)
1040 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1041 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1042 struct sk_buff *skb;
1043 struct ieee80211_hdr *hdr;
1044 struct sk_buff_head deferred_tx;
1046 bool no_queue = false; /* Marks if there is a problem with the queue */
1049 lockdep_assert_held(&mvm->mutex);
1051 skb = skb_peek(&tid_data->deferred_tx_frames);
1054 hdr = (void *)skb->data;
1056 ac = iwl_mvm_tid_to_ac_queue(tid);
1057 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1059 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1060 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1062 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1063 mvmsta->sta_id, tid);
1066 * Mark queue as problematic so later the deferred traffic is
1067 * freed, as we can do nothing with it
1072 __skb_queue_head_init(&deferred_tx);
1074 /* Disable bottom-halves when entering TX path */
1076 spin_lock(&mvmsta->lock);
1077 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1078 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1079 spin_unlock(&mvmsta->lock);
1081 while ((skb = __skb_dequeue(&deferred_tx)))
1082 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1083 ieee80211_free_txskb(mvm->hw, skb);
1087 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1090 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1092 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1094 struct ieee80211_sta *sta;
1095 struct iwl_mvm_sta *mvmsta;
1096 unsigned long deferred_tid_traffic;
1097 int queue, sta_id, tid;
1099 /* Check inactivity of queues */
1100 iwl_mvm_inactivity_check(mvm);
1102 mutex_lock(&mvm->mutex);
1104 /* No queue reconfiguration in TVQM mode */
1105 if (iwl_mvm_has_new_tx_api(mvm))
1108 /* Reconfigure queues requiring reconfiguation */
1109 for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
1113 spin_lock_bh(&mvm->queue_info_lock);
1114 reconfig = (mvm->queue_info[queue].status ==
1115 IWL_MVM_QUEUE_RECONFIGURING);
1118 * We need to take into account a situation in which a TXQ was
1119 * allocated to TID x, and then turned shared by adding TIDs y
1120 * and z. If TID x becomes inactive and is removed from the TXQ,
1121 * ownership must be given to one of the remaining TIDs.
1122 * This is mainly because if TID x continues - a new queue can't
1123 * be allocated for it as long as it is an owner of another TXQ.
1125 change_owner = !(mvm->queue_info[queue].tid_bitmap &
1126 BIT(mvm->queue_info[queue].txq_tid)) &&
1127 (mvm->queue_info[queue].status ==
1128 IWL_MVM_QUEUE_SHARED);
1129 spin_unlock_bh(&mvm->queue_info_lock);
1132 iwl_mvm_unshare_queue(mvm, queue);
1133 else if (change_owner)
1134 iwl_mvm_change_queue_owner(mvm, queue);
1138 /* Go over all stations with deferred traffic */
1139 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1140 IWL_MVM_STATION_COUNT) {
1141 clear_bit(sta_id, mvm->sta_deferred_frames);
1142 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1143 lockdep_is_held(&mvm->mutex));
1144 if (IS_ERR_OR_NULL(sta))
1147 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1148 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1150 for_each_set_bit(tid, &deferred_tid_traffic,
1151 IWL_MAX_TID_COUNT + 1)
1152 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1155 mutex_unlock(&mvm->mutex);
1158 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1159 struct ieee80211_sta *sta,
1160 enum nl80211_iftype vif_type)
1162 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1164 bool using_inactive_queue = false, same_sta = false;
1166 /* queue reserving is disabled on new TX path */
1167 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1171 * Check for inactive queues, so we don't reach a situation where we
1172 * can't add a STA due to a shortage in queues that doesn't really exist
1174 iwl_mvm_inactivity_check(mvm);
1176 spin_lock_bh(&mvm->queue_info_lock);
1178 /* Make sure we have free resources for this STA */
1179 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1180 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
1181 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1182 IWL_MVM_QUEUE_FREE))
1183 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1185 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1186 IWL_MVM_DQA_MIN_DATA_QUEUE,
1187 IWL_MVM_DQA_MAX_DATA_QUEUE);
1189 spin_unlock_bh(&mvm->queue_info_lock);
1190 IWL_ERR(mvm, "No available queues for new station\n");
1192 } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
1194 * If this queue is already allocated but inactive we'll need to
1195 * first free this queue before enabling it again, we'll mark
1196 * it as reserved to make sure no new traffic arrives on it
1198 using_inactive_queue = true;
1199 same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
1201 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1203 spin_unlock_bh(&mvm->queue_info_lock);
1205 mvmsta->reserved_queue = queue;
1207 if (using_inactive_queue)
1208 iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
1210 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1211 queue, mvmsta->sta_id);
1217 * In DQA mode, after a HW restart the queues should be allocated as before, in
1218 * order to avoid race conditions when there are shared queues. This function
1219 * does the re-mapping and queue allocation.
1221 * Note that re-enabling aggregations isn't done in this function.
1223 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1224 struct iwl_mvm_sta *mvm_sta)
1226 unsigned int wdg_timeout =
1227 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1229 struct iwl_trans_txq_scd_cfg cfg = {
1230 .sta_id = mvm_sta->sta_id,
1231 .frame_limit = IWL_FRAME_LIMIT,
1234 /* Make sure reserved queue is still marked as such (if allocated) */
1235 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1236 mvm->queue_info[mvm_sta->reserved_queue].status =
1237 IWL_MVM_QUEUE_RESERVED;
1239 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1240 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1241 int txq_id = tid_data->txq_id;
1245 if (txq_id == IWL_MVM_INVALID_QUEUE)
1248 skb_queue_head_init(&tid_data->deferred_tx_frames);
1250 ac = tid_to_mac80211_ac[i];
1251 mac_queue = mvm_sta->vif->hw_queue[ac];
1253 if (iwl_mvm_has_new_tx_api(mvm)) {
1254 IWL_DEBUG_TX_QUEUES(mvm,
1255 "Re-mapping sta %d tid %d\n",
1256 mvm_sta->sta_id, i);
1257 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1260 tid_data->txq_id = txq_id;
1262 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1265 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1266 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1268 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1270 IWL_DEBUG_TX_QUEUES(mvm,
1271 "Re-mapping sta %d tid %d to queue %d\n",
1272 mvm_sta->sta_id, i, txq_id);
1274 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1276 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1281 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1282 struct iwl_mvm_int_sta *sta,
1284 u16 mac_id, u16 color)
1286 struct iwl_mvm_add_sta_cmd cmd;
1288 u32 status = ADD_STA_SUCCESS;
1290 lockdep_assert_held(&mvm->mutex);
1292 memset(&cmd, 0, sizeof(cmd));
1293 cmd.sta_id = sta->sta_id;
1294 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1296 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1297 cmd.station_type = sta->type;
1299 if (!iwl_mvm_has_new_tx_api(mvm))
1300 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1301 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1304 memcpy(cmd.addr, addr, ETH_ALEN);
1306 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1307 iwl_mvm_add_sta_cmd_size(mvm),
1312 switch (status & IWL_ADD_STA_STATUS_MASK) {
1313 case ADD_STA_SUCCESS:
1314 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1318 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1325 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1326 struct ieee80211_vif *vif,
1327 struct ieee80211_sta *sta)
1329 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1330 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1331 struct iwl_mvm_rxq_dup_data *dup_data;
1333 bool sta_update = false;
1334 unsigned int sta_flags = 0;
1336 lockdep_assert_held(&mvm->mutex);
1338 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1339 sta_id = iwl_mvm_find_free_sta_id(mvm,
1340 ieee80211_vif_type_p2p(vif));
1342 sta_id = mvm_sta->sta_id;
1344 if (sta_id == IWL_MVM_INVALID_STA)
1347 spin_lock_init(&mvm_sta->lock);
1349 /* if this is a HW restart re-alloc existing queues */
1350 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1351 struct iwl_mvm_int_sta tmp_sta = {
1353 .type = mvm_sta->sta_type,
1357 * First add an empty station since allocating
1358 * a queue requires a valid station
1360 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1361 mvmvif->id, mvmvif->color);
1365 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1367 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1371 mvm_sta->sta_id = sta_id;
1372 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1375 if (!mvm->trans->cfg->gen2)
1376 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1378 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1379 mvm_sta->tx_protection = 0;
1380 mvm_sta->tt_tx_protection = false;
1381 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1383 /* HW restart, don't assume the memory has been zeroed */
1384 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1385 mvm_sta->tfd_queue_msk = 0;
1387 /* for HW restart - reset everything but the sequence number */
1388 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1389 u16 seq = mvm_sta->tid_data[i].seq_number;
1390 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1391 mvm_sta->tid_data[i].seq_number = seq;
1394 * Mark all queues for this STA as unallocated and defer TX
1395 * frames until the queue is allocated
1397 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1398 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1400 mvm_sta->deferred_traffic_tid_map = 0;
1401 mvm_sta->agg_tids = 0;
1403 if (iwl_mvm_has_new_rx_api(mvm) &&
1404 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1407 dup_data = kcalloc(mvm->trans->num_rx_queues,
1408 sizeof(*dup_data), GFP_KERNEL);
1412 * Initialize all the last_seq values to 0xffff which can never
1413 * compare equal to the frame's seq_ctrl in the check in
1414 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1415 * number and fragmented packets don't reach that function.
1417 * This thus allows receiving a packet with seqno 0 and the
1418 * retry bit set as the very first packet on a new TID.
1420 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1421 memset(dup_data[q].last_seq, 0xff,
1422 sizeof(dup_data[q].last_seq));
1423 mvm_sta->dup_data = dup_data;
1426 if (!iwl_mvm_has_new_tx_api(mvm)) {
1427 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1428 ieee80211_vif_type_p2p(vif));
1434 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1438 if (vif->type == NL80211_IFTYPE_STATION) {
1440 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1441 mvmvif->ap_sta_id = sta_id;
1443 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1447 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1455 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1458 struct iwl_mvm_add_sta_cmd cmd = {};
1462 lockdep_assert_held(&mvm->mutex);
1464 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1465 cmd.sta_id = mvmsta->sta_id;
1466 cmd.add_modify = STA_MODE_MODIFY;
1467 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1468 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1470 status = ADD_STA_SUCCESS;
1471 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1472 iwl_mvm_add_sta_cmd_size(mvm),
1477 switch (status & IWL_ADD_STA_STATUS_MASK) {
1478 case ADD_STA_SUCCESS:
1479 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1484 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1493 * Remove a station from the FW table. Before sending the command to remove
1494 * the station validate that the station is indeed known to the driver (sanity
1497 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1499 struct ieee80211_sta *sta;
1500 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1505 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1506 lockdep_is_held(&mvm->mutex));
1508 /* Note: internal stations are marked as error values */
1510 IWL_ERR(mvm, "Invalid station id\n");
1514 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1515 sizeof(rm_sta_cmd), &rm_sta_cmd);
1517 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1524 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1525 struct ieee80211_vif *vif,
1526 struct iwl_mvm_sta *mvm_sta)
1531 lockdep_assert_held(&mvm->mutex);
1533 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1534 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1537 ac = iwl_mvm_tid_to_ac_queue(i);
1538 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1539 vif->hw_queue[ac], i, 0);
1540 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1544 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1545 struct iwl_mvm_sta *mvm_sta)
1549 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1553 spin_lock_bh(&mvm_sta->lock);
1554 txq_id = mvm_sta->tid_data[i].txq_id;
1555 spin_unlock_bh(&mvm_sta->lock);
1557 if (txq_id == IWL_MVM_INVALID_QUEUE)
1560 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1568 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1569 struct ieee80211_vif *vif,
1570 struct ieee80211_sta *sta)
1572 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1573 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1574 u8 sta_id = mvm_sta->sta_id;
1577 lockdep_assert_held(&mvm->mutex);
1579 if (iwl_mvm_has_new_rx_api(mvm))
1580 kfree(mvm_sta->dup_data);
1582 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1586 /* flush its queues here since we are freeing mvm_sta */
1587 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1590 if (iwl_mvm_has_new_tx_api(mvm)) {
1591 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1593 u32 q_mask = mvm_sta->tfd_queue_msk;
1595 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1601 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1603 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1605 /* If there is a TXQ still marked as reserved - free it */
1606 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1607 u8 reserved_txq = mvm_sta->reserved_queue;
1608 enum iwl_mvm_queue_status *status;
1611 * If no traffic has gone through the reserved TXQ - it
1612 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1613 * should be manually marked as free again
1615 spin_lock_bh(&mvm->queue_info_lock);
1616 status = &mvm->queue_info[reserved_txq].status;
1617 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1618 (*status != IWL_MVM_QUEUE_FREE),
1619 "sta_id %d reserved txq %d status %d",
1620 sta_id, reserved_txq, *status)) {
1621 spin_unlock_bh(&mvm->queue_info_lock);
1625 *status = IWL_MVM_QUEUE_FREE;
1626 spin_unlock_bh(&mvm->queue_info_lock);
1629 if (vif->type == NL80211_IFTYPE_STATION &&
1630 mvmvif->ap_sta_id == sta_id) {
1631 /* if associated - we can't remove the AP STA now */
1632 if (vif->bss_conf.assoc)
1635 /* unassoc - go ahead - remove the AP STA now */
1636 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1638 /* clear d0i3_ap_sta_id if no longer relevant */
1639 if (mvm->d0i3_ap_sta_id == sta_id)
1640 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1644 * This shouldn't happen - the TDLS channel switch should be canceled
1645 * before the STA is removed.
1647 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1648 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1649 cancel_delayed_work(&mvm->tdls_cs.dwork);
1653 * Make sure that the tx response code sees the station as -EBUSY and
1654 * calls the drain worker.
1656 spin_lock_bh(&mvm_sta->lock);
1657 spin_unlock_bh(&mvm_sta->lock);
1659 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1660 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1665 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1666 struct ieee80211_vif *vif,
1669 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1671 lockdep_assert_held(&mvm->mutex);
1673 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1677 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1678 struct iwl_mvm_int_sta *sta,
1679 u32 qmask, enum nl80211_iftype iftype,
1680 enum iwl_sta_type type)
1682 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1683 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1684 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1688 sta->tfd_queue_msk = qmask;
1691 /* put a non-NULL value so iterating over the stations won't stop */
1692 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1696 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1698 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1699 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1700 sta->sta_id = IWL_MVM_INVALID_STA;
1703 static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm)
1705 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
1706 mvm->cfg->base_params->wd_timeout :
1707 IWL_WATCHDOG_DISABLED;
1709 if (iwl_mvm_has_new_tx_api(mvm)) {
1710 int queue = iwl_mvm_tvqm_enable_txq(mvm, mvm->aux_queue,
1711 mvm->aux_sta.sta_id,
1714 mvm->aux_queue = queue;
1716 struct iwl_trans_txq_scd_cfg cfg = {
1717 .fifo = IWL_MVM_TX_FIFO_MCAST,
1718 .sta_id = mvm->aux_sta.sta_id,
1719 .tid = IWL_MAX_TID_COUNT,
1721 .frame_limit = IWL_FRAME_LIMIT,
1724 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg,
1729 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
1733 lockdep_assert_held(&mvm->mutex);
1735 /* Allocate aux station and assign to it the aux queue */
1736 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
1737 NL80211_IFTYPE_UNSPECIFIED,
1738 IWL_STA_AUX_ACTIVITY);
1742 /* Map Aux queue to fifo - needs to happen before adding Aux station */
1743 if (!iwl_mvm_has_new_tx_api(mvm))
1744 iwl_mvm_enable_aux_queue(mvm);
1746 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
1749 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1754 * For a000 firmware and on we cannot add queue to a station unknown
1755 * to firmware so enable queue here - after the station was added
1757 if (iwl_mvm_has_new_tx_api(mvm))
1758 iwl_mvm_enable_aux_queue(mvm);
1763 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1765 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1767 lockdep_assert_held(&mvm->mutex);
1768 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
1772 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1776 lockdep_assert_held(&mvm->mutex);
1778 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
1780 IWL_WARN(mvm, "Failed sending remove station\n");
1785 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
1787 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
1790 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
1792 lockdep_assert_held(&mvm->mutex);
1794 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
1798 * Send the add station command for the vif's broadcast station.
1799 * Assumes that the station was already allocated.
1801 * @mvm: the mvm component
1802 * @vif: the interface to which the broadcast station is added
1803 * @bsta: the broadcast station to add.
1805 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1807 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1808 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1809 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1810 const u8 *baddr = _baddr;
1813 unsigned int wdg_timeout =
1814 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
1815 struct iwl_trans_txq_scd_cfg cfg = {
1816 .fifo = IWL_MVM_TX_FIFO_VO,
1817 .sta_id = mvmvif->bcast_sta.sta_id,
1818 .tid = IWL_MAX_TID_COUNT,
1820 .frame_limit = IWL_FRAME_LIMIT,
1823 lockdep_assert_held(&mvm->mutex);
1825 if (!iwl_mvm_has_new_tx_api(mvm)) {
1826 if (vif->type == NL80211_IFTYPE_AP ||
1827 vif->type == NL80211_IFTYPE_ADHOC)
1828 queue = mvm->probe_queue;
1829 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1830 queue = mvm->p2p_dev_queue;
1831 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
1834 bsta->tfd_queue_msk |= BIT(queue);
1836 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
1840 if (vif->type == NL80211_IFTYPE_ADHOC)
1841 baddr = vif->bss_conf.bssid;
1843 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
1846 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
1847 mvmvif->id, mvmvif->color);
1852 * For a000 firmware and on we cannot add queue to a station unknown
1853 * to firmware so enable queue here - after the station was added
1855 if (iwl_mvm_has_new_tx_api(mvm)) {
1856 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
1861 if (vif->type == NL80211_IFTYPE_AP ||
1862 vif->type == NL80211_IFTYPE_ADHOC)
1863 mvm->probe_queue = queue;
1864 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
1865 mvm->p2p_dev_queue = queue;
1871 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
1872 struct ieee80211_vif *vif)
1874 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1877 lockdep_assert_held(&mvm->mutex);
1879 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
1881 switch (vif->type) {
1882 case NL80211_IFTYPE_AP:
1883 case NL80211_IFTYPE_ADHOC:
1884 queue = mvm->probe_queue;
1886 case NL80211_IFTYPE_P2P_DEVICE:
1887 queue = mvm->p2p_dev_queue;
1890 WARN(1, "Can't free bcast queue on vif type %d\n",
1895 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
1896 if (iwl_mvm_has_new_tx_api(mvm))
1899 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
1900 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
1903 /* Send the FW a request to remove the station from it's internal data
1904 * structures, but DO NOT remove the entry from the local data structures. */
1905 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1907 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1910 lockdep_assert_held(&mvm->mutex);
1912 iwl_mvm_free_bcast_sta_queues(mvm, vif);
1914 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
1916 IWL_WARN(mvm, "Failed sending remove station\n");
1920 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1922 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1924 lockdep_assert_held(&mvm->mutex);
1926 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
1927 ieee80211_vif_type_p2p(vif),
1928 IWL_STA_GENERAL_PURPOSE);
1931 /* Allocate a new station entry for the broadcast station to the given vif,
1932 * and send it to the FW.
1933 * Note that each P2P mac should have its own broadcast station.
1935 * @mvm: the mvm component
1936 * @vif: the interface to which the broadcast station is added
1937 * @bsta: the broadcast station to add. */
1938 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1940 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1941 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
1944 lockdep_assert_held(&mvm->mutex);
1946 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1950 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
1953 iwl_mvm_dealloc_int_sta(mvm, bsta);
1958 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1960 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1962 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
1966 * Send the FW a request to remove the station from it's internal data
1967 * structures, and in addition remove it from the local data structure.
1969 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1973 lockdep_assert_held(&mvm->mutex);
1975 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
1977 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1983 * Allocate a new station entry for the multicast station to the given vif,
1984 * and send it to the FW.
1985 * Note that each AP/GO mac should have its own multicast station.
1987 * @mvm: the mvm component
1988 * @vif: the interface to which the multicast station is added
1990 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1992 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1993 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
1994 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
1995 const u8 *maddr = _maddr;
1996 struct iwl_trans_txq_scd_cfg cfg = {
1997 .fifo = IWL_MVM_TX_FIFO_MCAST,
1998 .sta_id = msta->sta_id,
1999 .tid = IWL_MAX_TID_COUNT,
2001 .frame_limit = IWL_FRAME_LIMIT,
2003 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2006 lockdep_assert_held(&mvm->mutex);
2008 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2009 vif->type != NL80211_IFTYPE_ADHOC))
2013 * While in previous FWs we had to exclude cab queue from TFD queue
2014 * mask, now it is needed as any other queue.
2016 if (!iwl_mvm_has_new_tx_api(mvm) &&
2017 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2018 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2020 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2022 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2023 mvmvif->id, mvmvif->color);
2025 iwl_mvm_dealloc_int_sta(mvm, msta);
2030 * Enable cab queue after the ADD_STA command is sent.
2031 * This is needed for a000 firmware which won't accept SCD_QUEUE_CFG
2032 * command with unknown station id, and for FW that doesn't support
2033 * station API since the cab queue is not included in the
2036 if (iwl_mvm_has_new_tx_api(mvm)) {
2037 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2041 mvmvif->cab_queue = queue;
2042 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2043 IWL_UCODE_TLV_API_STA_TYPE)) {
2045 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2046 * invalid, so make sure we use the queue we want.
2047 * Note that this is done here as we want to avoid making DQA
2048 * changes in mac80211 layer.
2050 if (vif->type == NL80211_IFTYPE_ADHOC) {
2051 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2052 mvmvif->cab_queue = vif->cab_queue;
2054 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2062 * Send the FW a request to remove the station from it's internal data
2063 * structures, and in addition remove it from the local data structure.
2065 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2067 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2070 lockdep_assert_held(&mvm->mutex);
2072 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2074 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2075 IWL_MAX_TID_COUNT, 0);
2077 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2079 IWL_WARN(mvm, "Failed sending remove station\n");
2084 #define IWL_MAX_RX_BA_SESSIONS 16
2086 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2088 struct iwl_mvm_delba_notif notif = {
2089 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2093 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2096 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2097 struct iwl_mvm_baid_data *data)
2101 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2103 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2105 struct iwl_mvm_reorder_buffer *reorder_buf =
2106 &data->reorder_buf[i];
2108 spin_lock_bh(&reorder_buf->lock);
2109 if (likely(!reorder_buf->num_stored)) {
2110 spin_unlock_bh(&reorder_buf->lock);
2115 * This shouldn't happen in regular DELBA since the internal
2116 * delBA notification should trigger a release of all frames in
2117 * the reorder buffer.
2121 for (j = 0; j < reorder_buf->buf_size; j++)
2122 __skb_queue_purge(&reorder_buf->entries[j]);
2124 * Prevent timer re-arm. This prevents a very far fetched case
2125 * where we timed out on the notification. There may be prior
2126 * RX frames pending in the RX queue before the notification
2127 * that might get processed between now and the actual deletion
2128 * and we would re-arm the timer although we are deleting the
2131 reorder_buf->removed = true;
2132 spin_unlock_bh(&reorder_buf->lock);
2133 del_timer_sync(&reorder_buf->reorder_timer);
2137 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2139 struct iwl_mvm_baid_data *data,
2140 u16 ssn, u8 buf_size)
2144 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2145 struct iwl_mvm_reorder_buffer *reorder_buf =
2146 &data->reorder_buf[i];
2149 reorder_buf->num_stored = 0;
2150 reorder_buf->head_sn = ssn;
2151 reorder_buf->buf_size = buf_size;
2152 /* rx reorder timer */
2153 reorder_buf->reorder_timer.function =
2154 iwl_mvm_reorder_timer_expired;
2155 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
2156 init_timer(&reorder_buf->reorder_timer);
2157 spin_lock_init(&reorder_buf->lock);
2158 reorder_buf->mvm = mvm;
2159 reorder_buf->queue = i;
2160 reorder_buf->sta_id = sta_id;
2161 reorder_buf->valid = false;
2162 for (j = 0; j < reorder_buf->buf_size; j++)
2163 __skb_queue_head_init(&reorder_buf->entries[j]);
2167 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2168 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
2170 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2171 struct iwl_mvm_add_sta_cmd cmd = {};
2172 struct iwl_mvm_baid_data *baid_data = NULL;
2176 lockdep_assert_held(&mvm->mutex);
2178 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2179 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2183 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2185 * Allocate here so if allocation fails we can bail out early
2186 * before starting the BA session in the firmware
2188 baid_data = kzalloc(sizeof(*baid_data) +
2189 mvm->trans->num_rx_queues *
2190 sizeof(baid_data->reorder_buf[0]),
2196 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2197 cmd.sta_id = mvm_sta->sta_id;
2198 cmd.add_modify = STA_MODE_MODIFY;
2200 cmd.add_immediate_ba_tid = (u8) tid;
2201 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2202 cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
2204 cmd.remove_immediate_ba_tid = (u8) tid;
2206 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2207 STA_MODIFY_REMOVE_BA_TID;
2209 status = ADD_STA_SUCCESS;
2210 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2211 iwl_mvm_add_sta_cmd_size(mvm),
2216 switch (status & IWL_ADD_STA_STATUS_MASK) {
2217 case ADD_STA_SUCCESS:
2218 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2219 start ? "start" : "stopp");
2221 case ADD_STA_IMMEDIATE_BA_FAILURE:
2222 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2227 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2228 start ? "start" : "stopp", status);
2238 mvm->rx_ba_sessions++;
2240 if (!iwl_mvm_has_new_rx_api(mvm))
2243 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2247 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2248 IWL_ADD_STA_BAID_SHIFT);
2249 baid_data->baid = baid;
2250 baid_data->timeout = timeout;
2251 baid_data->last_rx = jiffies;
2252 setup_timer(&baid_data->session_timer,
2253 iwl_mvm_rx_agg_session_expired,
2254 (unsigned long)&mvm->baid_map[baid]);
2255 baid_data->mvm = mvm;
2256 baid_data->tid = tid;
2257 baid_data->sta_id = mvm_sta->sta_id;
2259 mvm_sta->tid_to_baid[tid] = baid;
2261 mod_timer(&baid_data->session_timer,
2262 TU_TO_EXP_TIME(timeout * 2));
2264 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
2265 baid_data, ssn, buf_size);
2267 * protect the BA data with RCU to cover a case where our
2268 * internal RX sync mechanism will timeout (not that it's
2269 * supposed to happen) and we will free the session data while
2270 * RX is being processed in parallel
2272 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2273 mvm_sta->sta_id, tid, baid);
2274 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2275 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2277 u8 baid = mvm_sta->tid_to_baid[tid];
2279 if (mvm->rx_ba_sessions > 0)
2280 /* check that restart flow didn't zero the counter */
2281 mvm->rx_ba_sessions--;
2282 if (!iwl_mvm_has_new_rx_api(mvm))
2285 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2288 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2289 if (WARN_ON(!baid_data))
2292 /* synchronize all rx queues so we can safely delete */
2293 iwl_mvm_free_reorder(mvm, baid_data);
2294 del_timer_sync(&baid_data->session_timer);
2295 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2296 kfree_rcu(baid_data, rcu_head);
2297 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2306 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2307 int tid, u8 queue, bool start)
2309 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2310 struct iwl_mvm_add_sta_cmd cmd = {};
2314 lockdep_assert_held(&mvm->mutex);
2317 mvm_sta->tfd_queue_msk |= BIT(queue);
2318 mvm_sta->tid_disable_agg &= ~BIT(tid);
2320 /* In DQA-mode the queue isn't removed on agg termination */
2321 mvm_sta->tid_disable_agg |= BIT(tid);
2324 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2325 cmd.sta_id = mvm_sta->sta_id;
2326 cmd.add_modify = STA_MODE_MODIFY;
2327 if (!iwl_mvm_has_new_tx_api(mvm))
2328 cmd.modify_mask = STA_MODIFY_QUEUES;
2329 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2330 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2331 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2333 status = ADD_STA_SUCCESS;
2334 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2335 iwl_mvm_add_sta_cmd_size(mvm),
2340 switch (status & IWL_ADD_STA_STATUS_MASK) {
2341 case ADD_STA_SUCCESS:
2345 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2346 start ? "start" : "stopp", status);
2353 const u8 tid_to_mac80211_ac[] = {
2362 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2365 static const u8 tid_to_ucode_ac[] = {
2376 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2377 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2379 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2380 struct iwl_mvm_tid_data *tid_data;
2385 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2388 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2389 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2391 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2392 mvmsta->tid_data[tid].state);
2396 lockdep_assert_held(&mvm->mutex);
2398 spin_lock_bh(&mvmsta->lock);
2400 /* possible race condition - we entered D0i3 while starting agg */
2401 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2402 spin_unlock_bh(&mvmsta->lock);
2403 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2407 spin_lock(&mvm->queue_info_lock);
2410 * Note the possible cases:
2411 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
2412 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
2413 * one and mark it as reserved
2414 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
2415 * non-DQA mode, since the TXQ hasn't yet been allocated
2416 * Don't support case 3 for new TX path as it is not expected to happen
2417 * and aggregation will be offloaded soon anyway
2419 txq_id = mvmsta->tid_data[tid].txq_id;
2420 if (iwl_mvm_has_new_tx_api(mvm)) {
2421 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2425 } else if (unlikely(mvm->queue_info[txq_id].status ==
2426 IWL_MVM_QUEUE_SHARED)) {
2428 IWL_DEBUG_TX_QUEUES(mvm,
2429 "Can't start tid %d agg on shared queue!\n",
2432 } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
2433 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2434 IWL_MVM_DQA_MIN_DATA_QUEUE,
2435 IWL_MVM_DQA_MAX_DATA_QUEUE);
2438 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2442 * TXQ shouldn't be in inactive mode for non-DQA, so getting
2443 * an inactive queue from iwl_mvm_find_free_queue() is
2446 WARN_ON(mvm->queue_info[txq_id].status ==
2447 IWL_MVM_QUEUE_INACTIVE);
2449 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2450 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2453 spin_unlock(&mvm->queue_info_lock);
2455 IWL_DEBUG_TX_QUEUES(mvm,
2456 "AGG for tid %d will be on queue #%d\n",
2459 tid_data = &mvmsta->tid_data[tid];
2460 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2461 tid_data->txq_id = txq_id;
2462 *ssn = tid_data->ssn;
2464 IWL_DEBUG_TX_QUEUES(mvm,
2465 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2466 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2467 tid_data->next_reclaimed);
2470 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
2471 * to align the wrap around of ssn so we compare relevant values.
2473 normalized_ssn = tid_data->ssn;
2474 if (mvm->trans->cfg->gen2)
2475 normalized_ssn &= 0xff;
2477 if (normalized_ssn == tid_data->next_reclaimed) {
2478 tid_data->state = IWL_AGG_STARTING;
2479 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2481 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2488 spin_unlock(&mvm->queue_info_lock);
2490 spin_unlock_bh(&mvmsta->lock);
2495 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2496 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
2499 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2500 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2501 unsigned int wdg_timeout =
2502 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2504 bool alloc_queue = true;
2505 enum iwl_mvm_queue_status queue_status;
2508 struct iwl_trans_txq_scd_cfg cfg = {
2509 .sta_id = mvmsta->sta_id,
2511 .frame_limit = buf_size,
2515 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2516 != IWL_MAX_TID_COUNT);
2518 if (!mvm->trans->cfg->gen2)
2519 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2521 buf_size = min_t(int, buf_size,
2522 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF);
2524 spin_lock_bh(&mvmsta->lock);
2525 ssn = tid_data->ssn;
2526 queue = tid_data->txq_id;
2527 tid_data->state = IWL_AGG_ON;
2528 mvmsta->agg_tids |= BIT(tid);
2529 tid_data->ssn = 0xffff;
2530 tid_data->amsdu_in_ampdu_allowed = amsdu;
2531 spin_unlock_bh(&mvmsta->lock);
2533 if (iwl_mvm_has_new_tx_api(mvm)) {
2535 * If no queue iwl_mvm_sta_tx_agg_start() would have failed so
2536 * no need to check queue's status
2538 if (buf_size < mvmsta->max_agg_bufsize)
2541 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2547 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2549 spin_lock_bh(&mvm->queue_info_lock);
2550 queue_status = mvm->queue_info[queue].status;
2551 spin_unlock_bh(&mvm->queue_info_lock);
2553 /* Maybe there is no need to even alloc a queue... */
2554 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2555 alloc_queue = false;
2558 * Only reconfig the SCD for the queue if the window size has
2559 * changed from current (become smaller)
2561 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
2563 * If reconfiguring an existing queue, it first must be
2566 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2570 "Error draining queue before reconfig\n");
2574 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2575 mvmsta->sta_id, tid,
2579 "Error reconfiguring TXQ #%d\n", queue);
2585 iwl_mvm_enable_txq(mvm, queue,
2586 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2589 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2590 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2591 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2596 /* No need to mark as reserved */
2597 spin_lock_bh(&mvm->queue_info_lock);
2598 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
2599 spin_unlock_bh(&mvm->queue_info_lock);
2603 * Even though in theory the peer could have different
2604 * aggregation reorder buffer sizes for different sessions,
2605 * our ucode doesn't allow for that and has a global limit
2606 * for each station. Therefore, use the minimum of all the
2607 * aggregation sessions and our default value.
2609 mvmsta->max_agg_bufsize =
2610 min(mvmsta->max_agg_bufsize, buf_size);
2611 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
2613 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
2616 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
2619 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
2620 struct iwl_mvm_sta *mvmsta,
2623 if (iwl_mvm_has_new_tx_api(mvm))
2626 spin_lock_bh(&mvm->queue_info_lock);
2628 * The TXQ is marked as reserved only if no traffic came through yet
2629 * This means no traffic has been sent on this TID (agg'd or not), so
2630 * we no longer have use for the queue. Since it hasn't even been
2631 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
2634 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2635 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2637 spin_unlock_bh(&mvm->queue_info_lock);
2640 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2641 struct ieee80211_sta *sta, u16 tid)
2643 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2644 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2649 * If mac80211 is cleaning its state, then say that we finished since
2650 * our state has been cleared anyway.
2652 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2653 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2657 spin_lock_bh(&mvmsta->lock);
2659 txq_id = tid_data->txq_id;
2661 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
2662 mvmsta->sta_id, tid, txq_id, tid_data->state);
2664 mvmsta->agg_tids &= ~BIT(tid);
2666 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
2668 switch (tid_data->state) {
2670 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2672 IWL_DEBUG_TX_QUEUES(mvm,
2673 "ssn = %d, next_recl = %d\n",
2674 tid_data->ssn, tid_data->next_reclaimed);
2676 tid_data->ssn = 0xffff;
2677 tid_data->state = IWL_AGG_OFF;
2678 spin_unlock_bh(&mvmsta->lock);
2680 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2682 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2684 case IWL_AGG_STARTING:
2685 case IWL_EMPTYING_HW_QUEUE_ADDBA:
2687 * The agg session has been stopped before it was set up. This
2688 * can happen when the AddBA timer times out for example.
2691 /* No barriers since we are under mutex */
2692 lockdep_assert_held(&mvm->mutex);
2694 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2695 tid_data->state = IWL_AGG_OFF;
2700 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
2701 mvmsta->sta_id, tid, tid_data->state);
2703 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
2707 spin_unlock_bh(&mvmsta->lock);
2712 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2713 struct ieee80211_sta *sta, u16 tid)
2715 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2716 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2718 enum iwl_mvm_agg_state old_state;
2721 * First set the agg state to OFF to avoid calling
2722 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
2724 spin_lock_bh(&mvmsta->lock);
2725 txq_id = tid_data->txq_id;
2726 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
2727 mvmsta->sta_id, tid, txq_id, tid_data->state);
2728 old_state = tid_data->state;
2729 tid_data->state = IWL_AGG_OFF;
2730 mvmsta->agg_tids &= ~BIT(tid);
2731 spin_unlock_bh(&mvmsta->lock);
2733 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, txq_id);
2735 if (old_state >= IWL_AGG_ON) {
2736 iwl_mvm_drain_sta(mvm, mvmsta, true);
2738 if (iwl_mvm_has_new_tx_api(mvm)) {
2739 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
2741 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2742 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
2744 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
2745 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
2746 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
2749 iwl_mvm_drain_sta(mvm, mvmsta, false);
2751 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
2757 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
2759 int i, max = -1, max_offs = -1;
2761 lockdep_assert_held(&mvm->mutex);
2763 /* Pick the unused key offset with the highest 'deleted'
2764 * counter. Every time a key is deleted, all the counters
2765 * are incremented and the one that was just deleted is
2766 * reset to zero. Thus, the highest counter is the one
2767 * that was deleted longest ago. Pick that one.
2769 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2770 if (test_bit(i, mvm->fw_key_table))
2772 if (mvm->fw_key_deleted[i] > max) {
2773 max = mvm->fw_key_deleted[i];
2779 return STA_KEY_IDX_INVALID;
2784 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
2785 struct ieee80211_vif *vif,
2786 struct ieee80211_sta *sta)
2788 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2791 return iwl_mvm_sta_from_mac80211(sta);
2794 * The device expects GTKs for station interfaces to be
2795 * installed as GTKs for the AP station. If we have no
2796 * station ID, then use AP's station ID.
2798 if (vif->type == NL80211_IFTYPE_STATION &&
2799 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2800 u8 sta_id = mvmvif->ap_sta_id;
2802 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
2803 lockdep_is_held(&mvm->mutex));
2806 * It is possible that the 'sta' parameter is NULL,
2807 * for example when a GTK is removed - the sta_id will then
2808 * be the AP ID, and no station was passed by mac80211.
2810 if (IS_ERR_OR_NULL(sta))
2813 return iwl_mvm_sta_from_mac80211(sta);
2819 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
2821 struct ieee80211_key_conf *key, bool mcast,
2822 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
2826 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2827 struct iwl_mvm_add_sta_key_cmd cmd;
2835 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2836 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2838 if (sta_id == IWL_MVM_INVALID_STA)
2841 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
2842 STA_KEY_FLG_KEYID_MSK;
2843 key_flags = cpu_to_le16(keyidx);
2844 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
2846 switch (key->cipher) {
2847 case WLAN_CIPHER_SUITE_TKIP:
2848 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
2850 memcpy((void *)&u.cmd.tx_mic_key,
2851 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
2854 memcpy((void *)&u.cmd.rx_mic_key,
2855 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
2857 pn = atomic64_read(&key->tx_pn);
2860 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
2861 for (i = 0; i < 5; i++)
2862 u.cmd_v1.tkip_rx_ttak[i] =
2863 cpu_to_le16(tkip_p1k[i]);
2865 memcpy(u.cmd.common.key, key->key, key->keylen);
2867 case WLAN_CIPHER_SUITE_CCMP:
2868 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
2869 memcpy(u.cmd.common.key, key->key, key->keylen);
2871 pn = atomic64_read(&key->tx_pn);
2873 case WLAN_CIPHER_SUITE_WEP104:
2874 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
2876 case WLAN_CIPHER_SUITE_WEP40:
2877 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
2878 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
2880 case WLAN_CIPHER_SUITE_GCMP_256:
2881 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
2883 case WLAN_CIPHER_SUITE_GCMP:
2884 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
2885 memcpy(u.cmd.common.key, key->key, key->keylen);
2887 pn = atomic64_read(&key->tx_pn);
2890 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
2891 memcpy(u.cmd.common.key, key->key, key->keylen);
2895 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2897 u.cmd.common.key_offset = key_offset;
2898 u.cmd.common.key_flags = key_flags;
2899 u.cmd.common.sta_id = sta_id;
2902 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
2903 size = sizeof(u.cmd);
2905 size = sizeof(u.cmd_v1);
2908 status = ADD_STA_SUCCESS;
2909 if (cmd_flags & CMD_ASYNC)
2910 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
2913 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
2917 case ADD_STA_SUCCESS:
2918 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
2922 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
2929 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
2930 struct ieee80211_key_conf *keyconf,
2931 u8 sta_id, bool remove_key)
2933 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
2935 /* verify the key details match the required command's expectations */
2936 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
2937 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
2938 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
2939 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
2940 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
2943 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
2944 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
2947 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
2948 igtk_cmd.sta_id = cpu_to_le32(sta_id);
2951 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
2953 struct ieee80211_key_seq seq;
2956 switch (keyconf->cipher) {
2957 case WLAN_CIPHER_SUITE_AES_CMAC:
2958 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
2960 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2961 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2962 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
2968 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
2969 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2970 igtk_cmd.ctrl_flags |=
2971 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
2972 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
2973 pn = seq.aes_cmac.pn;
2974 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
2975 ((u64) pn[4] << 8) |
2976 ((u64) pn[3] << 16) |
2977 ((u64) pn[2] << 24) |
2978 ((u64) pn[1] << 32) |
2979 ((u64) pn[0] << 40));
2982 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
2983 remove_key ? "removing" : "installing",
2986 if (!iwl_mvm_has_new_rx_api(mvm)) {
2987 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
2988 .ctrl_flags = igtk_cmd.ctrl_flags,
2989 .key_id = igtk_cmd.key_id,
2990 .sta_id = igtk_cmd.sta_id,
2991 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
2994 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
2995 ARRAY_SIZE(igtk_cmd_v1.igtk));
2996 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
2997 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
2999 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3000 sizeof(igtk_cmd), &igtk_cmd);
3004 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3005 struct ieee80211_vif *vif,
3006 struct ieee80211_sta *sta)
3008 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3013 if (vif->type == NL80211_IFTYPE_STATION &&
3014 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3015 u8 sta_id = mvmvif->ap_sta_id;
3016 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3017 lockdep_is_held(&mvm->mutex));
3025 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3026 struct ieee80211_vif *vif,
3027 struct ieee80211_sta *sta,
3028 struct ieee80211_key_conf *keyconf,
3034 struct ieee80211_key_seq seq;
3039 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3041 sta_id = mvm_sta->sta_id;
3042 } else if (vif->type == NL80211_IFTYPE_AP &&
3043 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3044 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3046 sta_id = mvmvif->mcast_sta.sta_id;
3048 IWL_ERR(mvm, "Failed to find station id\n");
3052 switch (keyconf->cipher) {
3053 case WLAN_CIPHER_SUITE_TKIP:
3054 if (vif->type == NL80211_IFTYPE_AP) {
3058 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3059 /* get phase 1 key from mac80211 */
3060 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3061 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3062 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3063 seq.tkip.iv32, p1k, 0, key_offset);
3065 case WLAN_CIPHER_SUITE_CCMP:
3066 case WLAN_CIPHER_SUITE_WEP40:
3067 case WLAN_CIPHER_SUITE_WEP104:
3068 case WLAN_CIPHER_SUITE_GCMP:
3069 case WLAN_CIPHER_SUITE_GCMP_256:
3070 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3071 0, NULL, 0, key_offset);
3074 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3075 0, NULL, 0, key_offset);
3081 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3082 struct ieee80211_key_conf *keyconf,
3086 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3087 struct iwl_mvm_add_sta_key_cmd cmd;
3089 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3090 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3095 if (sta_id == IWL_MVM_INVALID_STA)
3098 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3099 STA_KEY_FLG_KEYID_MSK);
3100 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3101 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3104 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3107 * The fields assigned here are in the same location at the start
3108 * of the command, so we can do this union trick.
3110 u.cmd.common.key_flags = key_flags;
3111 u.cmd.common.key_offset = keyconf->hw_key_idx;
3112 u.cmd.common.sta_id = sta_id;
3114 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3116 status = ADD_STA_SUCCESS;
3117 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3121 case ADD_STA_SUCCESS:
3122 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3126 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3133 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3134 struct ieee80211_vif *vif,
3135 struct ieee80211_sta *sta,
3136 struct ieee80211_key_conf *keyconf,
3139 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3140 struct iwl_mvm_sta *mvm_sta;
3141 u8 sta_id = IWL_MVM_INVALID_STA;
3143 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3145 lockdep_assert_held(&mvm->mutex);
3147 if (vif->type != NL80211_IFTYPE_AP ||
3148 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3149 /* Get the station id from the mvm local station table */
3150 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3152 IWL_ERR(mvm, "Failed to find station\n");
3155 sta_id = mvm_sta->sta_id;
3157 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3158 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3159 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3160 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id,
3166 * It is possible that the 'sta' parameter is NULL, and thus
3167 * there is a need to retrieve the sta from the local station
3171 sta = rcu_dereference_protected(
3172 mvm->fw_id_to_mac_id[sta_id],
3173 lockdep_is_held(&mvm->mutex));
3174 if (IS_ERR_OR_NULL(sta)) {
3175 IWL_ERR(mvm, "Invalid station id\n");
3180 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3184 /* If the key_offset is not pre-assigned, we need to find a
3185 * new offset to use. In normal cases, the offset is not
3186 * pre-assigned, but during HW_RESTART we want to reuse the
3187 * same indices, so we pass them when this function is called.
3189 * In D3 entry, we need to hardcoded the indices (because the
3190 * firmware hardcodes the PTK offset to 0). In this case, we
3191 * need to make sure we don't overwrite the hw_key_idx in the
3192 * keyconf structure, because otherwise we cannot configure
3193 * the original ones back when resuming.
3195 if (key_offset == STA_KEY_IDX_INVALID) {
3196 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3197 if (key_offset == STA_KEY_IDX_INVALID)
3199 keyconf->hw_key_idx = key_offset;
3202 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3207 * For WEP, the same key is used for multicast and unicast. Upload it
3208 * again, using the same key offset, and now pointing the other one
3209 * to the same key slot (offset).
3210 * If this fails, remove the original as well.
3212 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3213 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3215 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3216 key_offset, !mcast);
3218 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3223 __set_bit(key_offset, mvm->fw_key_table);
3226 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3227 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3228 sta ? sta->addr : zero_addr, ret);
3232 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3233 struct ieee80211_vif *vif,
3234 struct ieee80211_sta *sta,
3235 struct ieee80211_key_conf *keyconf)
3237 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3238 struct iwl_mvm_sta *mvm_sta;
3239 u8 sta_id = IWL_MVM_INVALID_STA;
3242 lockdep_assert_held(&mvm->mutex);
3244 /* Get the station from the mvm local station table */
3245 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3247 sta_id = mvm_sta->sta_id;
3248 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3249 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3252 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3253 keyconf->keyidx, sta_id);
3255 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3256 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3257 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3258 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3260 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3261 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3262 keyconf->hw_key_idx);
3266 /* track which key was deleted last */
3267 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3268 if (mvm->fw_key_deleted[i] < U8_MAX)
3269 mvm->fw_key_deleted[i]++;
3271 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3273 if (sta && !mvm_sta) {
3274 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3278 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3282 /* delete WEP key twice to get rid of (now useless) offset */
3283 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3284 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3285 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3290 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3291 struct ieee80211_vif *vif,
3292 struct ieee80211_key_conf *keyconf,
3293 struct ieee80211_sta *sta, u32 iv32,
3296 struct iwl_mvm_sta *mvm_sta;
3297 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3301 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3302 if (WARN_ON_ONCE(!mvm_sta))
3304 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3305 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
3311 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3312 struct ieee80211_sta *sta)
3314 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3315 struct iwl_mvm_add_sta_cmd cmd = {
3316 .add_modify = STA_MODE_MODIFY,
3317 .sta_id = mvmsta->sta_id,
3318 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3319 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3323 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3324 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3326 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3329 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3330 struct ieee80211_sta *sta,
3331 enum ieee80211_frame_release_type reason,
3332 u16 cnt, u16 tids, bool more_data,
3333 bool single_sta_queue)
3335 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3336 struct iwl_mvm_add_sta_cmd cmd = {
3337 .add_modify = STA_MODE_MODIFY,
3338 .sta_id = mvmsta->sta_id,
3339 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3340 .sleep_tx_count = cpu_to_le16(cnt),
3341 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3344 unsigned long _tids = tids;
3346 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3347 * Note that this field is reserved and unused by firmware not
3348 * supporting GO uAPSD, so it's safe to always do this.
3350 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3351 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3353 /* If we're releasing frames from aggregation or dqa queues then check
3354 * if all the queues that we're releasing frames from, combined, have:
3355 * - more frames than the service period, in which case more_data
3357 * - fewer than 'cnt' frames, in which case we need to adjust the
3358 * firmware command (but do that unconditionally)
3360 if (single_sta_queue) {
3361 int remaining = cnt;
3364 spin_lock_bh(&mvmsta->lock);
3365 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3366 struct iwl_mvm_tid_data *tid_data;
3369 tid_data = &mvmsta->tid_data[tid];
3371 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3372 if (n_queued > remaining) {
3377 remaining -= n_queued;
3379 sleep_tx_count = cnt - remaining;
3380 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3381 mvmsta->sleep_tx_count = sleep_tx_count;
3382 spin_unlock_bh(&mvmsta->lock);
3384 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3385 if (WARN_ON(cnt - remaining == 0)) {
3386 ieee80211_sta_eosp(sta);
3391 /* Note: this is ignored by firmware not supporting GO uAPSD */
3393 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3395 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3396 mvmsta->next_status_eosp = true;
3397 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3399 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3402 /* block the Tx queues until the FW updated the sleep Tx count */
3403 iwl_trans_block_txq_ptrs(mvm->trans, true);
3405 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3406 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3407 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3409 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3412 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3413 struct iwl_rx_cmd_buffer *rxb)
3415 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3416 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3417 struct ieee80211_sta *sta;
3418 u32 sta_id = le32_to_cpu(notif->sta_id);
3420 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3424 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3425 if (!IS_ERR_OR_NULL(sta))
3426 ieee80211_sta_eosp(sta);
3430 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3431 struct iwl_mvm_sta *mvmsta, bool disable)
3433 struct iwl_mvm_add_sta_cmd cmd = {
3434 .add_modify = STA_MODE_MODIFY,
3435 .sta_id = mvmsta->sta_id,
3436 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3437 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3438 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3442 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3443 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3445 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3448 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3449 struct ieee80211_sta *sta,
3452 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3454 spin_lock_bh(&mvm_sta->lock);
3456 if (mvm_sta->disable_tx == disable) {
3457 spin_unlock_bh(&mvm_sta->lock);
3461 mvm_sta->disable_tx = disable;
3463 /* Tell mac80211 to start/stop queuing tx for this station */
3464 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3466 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3468 spin_unlock_bh(&mvm_sta->lock);
3471 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3472 struct iwl_mvm_vif *mvmvif,
3473 struct iwl_mvm_int_sta *sta,
3476 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3477 struct iwl_mvm_add_sta_cmd cmd = {
3478 .add_modify = STA_MODE_MODIFY,
3479 .sta_id = sta->sta_id,
3480 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3481 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3482 .mac_id_n_color = cpu_to_le32(id),
3486 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3487 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3489 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3492 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3493 struct iwl_mvm_vif *mvmvif,
3496 struct ieee80211_sta *sta;
3497 struct iwl_mvm_sta *mvm_sta;
3500 lockdep_assert_held(&mvm->mutex);
3502 /* Block/unblock all the stations of the given mvmvif */
3503 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3504 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3505 lockdep_is_held(&mvm->mutex));
3506 if (IS_ERR_OR_NULL(sta))
3509 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3510 if (mvm_sta->mac_id_n_color !=
3511 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3514 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3517 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3520 /* Need to block/unblock also multicast station */
3521 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3522 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3523 &mvmvif->mcast_sta, disable);
3526 * Only unblock the broadcast station (FW blocks it for immediate
3527 * quiet, not the driver)
3529 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3530 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3531 &mvmvif->bcast_sta, disable);
3534 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3536 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3537 struct iwl_mvm_sta *mvmsta;
3541 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3543 if (!WARN_ON(!mvmsta))
3544 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3549 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3551 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3554 * In A000 HW, the next_reclaimed index is only 8 bit, so we'll need
3555 * to align the wrap around of ssn so we compare relevant values.
3557 if (mvm->trans->cfg->gen2)
3560 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);