2 * This file is part of wlcore
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/pm_wakeirq.h>
34 #include "wl12xx_80211.h"
41 #include "vendor_cmd.h"
46 #define WL1271_BOOT_RETRIES 3
47 #define WL1271_SUSPEND_SLEEP 100
48 #define WL1271_WAKEUP_TIMEOUT 500
50 static char *fwlog_param;
51 static int fwlog_mem_blocks = -1;
52 static int bug_on_recovery = -1;
53 static int no_recovery = -1;
55 static void __wl1271_op_remove_interface(struct wl1271 *wl,
56 struct ieee80211_vif *vif,
57 bool reset_tx_queues);
58 static void wlcore_op_stop_locked(struct wl1271 *wl);
59 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
61 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
65 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
68 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
71 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
74 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
78 wl1271_info("Association completed.");
82 static void wl1271_reg_notify(struct wiphy *wiphy,
83 struct regulatory_request *request)
85 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 struct wl1271 *wl = hw->priv;
88 /* copy the current dfs region */
90 wl->dfs_region = request->dfs_region;
92 wlcore_regdomain_config(wl);
95 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
100 /* we should hold wl->mutex */
101 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
106 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
108 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 * this function is being called when the rx_streaming interval
115 * has beed changed or rx_streaming should be disabled
117 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
120 int period = wl->conf.rx_streaming.interval;
122 /* don't reconfigure if rx_streaming is disabled */
123 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
126 /* reconfigure/disable according to new streaming_period */
128 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
129 (wl->conf.rx_streaming.always ||
130 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
131 ret = wl1271_set_rx_streaming(wl, wlvif, true);
133 ret = wl1271_set_rx_streaming(wl, wlvif, false);
134 /* don't cancel_work_sync since we might deadlock */
135 del_timer_sync(&wlvif->rx_streaming_timer);
141 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
144 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
145 rx_streaming_enable_work);
146 struct wl1271 *wl = wlvif->wl;
148 mutex_lock(&wl->mutex);
150 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
151 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
152 (!wl->conf.rx_streaming.always &&
153 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
156 if (!wl->conf.rx_streaming.interval)
159 ret = pm_runtime_get_sync(wl->dev);
161 pm_runtime_put_noidle(wl->dev);
165 ret = wl1271_set_rx_streaming(wl, wlvif, true);
169 /* stop it after some time of inactivity */
170 mod_timer(&wlvif->rx_streaming_timer,
171 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
174 pm_runtime_mark_last_busy(wl->dev);
175 pm_runtime_put_autosuspend(wl->dev);
177 mutex_unlock(&wl->mutex);
180 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
183 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
184 rx_streaming_disable_work);
185 struct wl1271 *wl = wlvif->wl;
187 mutex_lock(&wl->mutex);
189 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
192 ret = pm_runtime_get_sync(wl->dev);
194 pm_runtime_put_noidle(wl->dev);
198 ret = wl1271_set_rx_streaming(wl, wlvif, false);
203 pm_runtime_mark_last_busy(wl->dev);
204 pm_runtime_put_autosuspend(wl->dev);
206 mutex_unlock(&wl->mutex);
209 static void wl1271_rx_streaming_timer(struct timer_list *t)
211 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
228 static void wlcore_rc_update_work(struct work_struct *work)
231 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
233 struct wl1271 *wl = wlvif->wl;
234 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
236 mutex_lock(&wl->mutex);
238 if (unlikely(wl->state != WLCORE_STATE_ON))
241 ret = pm_runtime_get_sync(wl->dev);
243 pm_runtime_put_noidle(wl->dev);
247 if (ieee80211_vif_is_mesh(vif)) {
248 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
249 true, wlvif->sta.hlid);
253 wlcore_hw_sta_rc_update(wl, wlvif);
257 pm_runtime_mark_last_busy(wl->dev);
258 pm_runtime_put_autosuspend(wl->dev);
260 mutex_unlock(&wl->mutex);
263 static void wl12xx_tx_watchdog_work(struct work_struct *work)
265 struct delayed_work *dwork;
268 dwork = to_delayed_work(work);
269 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
271 mutex_lock(&wl->mutex);
273 if (unlikely(wl->state != WLCORE_STATE_ON))
276 /* Tx went out in the meantime - everything is ok */
277 if (unlikely(wl->tx_allocated_blocks == 0))
281 * if a ROC is in progress, we might not have any Tx for a long
282 * time (e.g. pending Tx on the non-ROC channels)
284 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
285 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
286 wl->conf.tx.tx_watchdog_timeout);
287 wl12xx_rearm_tx_watchdog_locked(wl);
292 * if a scan is in progress, we might not have any Tx for a long
295 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
297 wl->conf.tx.tx_watchdog_timeout);
298 wl12xx_rearm_tx_watchdog_locked(wl);
303 * AP might cache a frame for a long time for a sleeping station,
304 * so rearm the timer if there's an AP interface with stations. If
305 * Tx is genuinely stuck we will most hopefully discover it when all
306 * stations are removed due to inactivity.
308 if (wl->active_sta_count) {
309 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
311 wl->conf.tx.tx_watchdog_timeout,
312 wl->active_sta_count);
313 wl12xx_rearm_tx_watchdog_locked(wl);
317 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
318 wl->conf.tx.tx_watchdog_timeout);
319 wl12xx_queue_recovery_work(wl);
322 mutex_unlock(&wl->mutex);
325 static void wlcore_adjust_conf(struct wl1271 *wl)
329 if (!strcmp(fwlog_param, "continuous")) {
330 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
331 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
332 } else if (!strcmp(fwlog_param, "dbgpins")) {
333 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
334 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
335 } else if (!strcmp(fwlog_param, "disable")) {
336 wl->conf.fwlog.mem_blocks = 0;
337 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
339 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
343 if (bug_on_recovery != -1)
344 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
346 if (no_recovery != -1)
347 wl->conf.recovery.no_recovery = (u8) no_recovery;
350 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
351 struct wl12xx_vif *wlvif,
356 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
359 * Wake up from high level PS if the STA is asleep with too little
360 * packets in FW or if the STA is awake.
362 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_end(wl, wlvif, hlid);
366 * Start high-level PS if the STA is asleep with enough blocks in FW.
367 * Make an exception if this is the only connected link. In this
368 * case FW-memory congestion is less of a problem.
369 * Note that a single connected STA means 2*ap_count + 1 active links,
370 * since we must account for the global and broadcast AP links
371 * for each AP. The "fw_ps" check assures us the other link is a STA
372 * connected to the AP. Otherwise the FW would not set the PSM bit.
374 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
375 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
376 wl12xx_ps_link_start(wl, wlvif, hlid, true);
379 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
380 struct wl12xx_vif *wlvif,
381 struct wl_fw_status *status)
383 unsigned long cur_fw_ps_map;
386 cur_fw_ps_map = status->link_ps_bitmap;
387 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
388 wl1271_debug(DEBUG_PSM,
389 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
390 wl->ap_fw_ps_map, cur_fw_ps_map,
391 wl->ap_fw_ps_map ^ cur_fw_ps_map);
393 wl->ap_fw_ps_map = cur_fw_ps_map;
396 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
397 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
398 wl->links[hlid].allocated_pkts);
401 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
403 struct wl12xx_vif *wlvif;
404 u32 old_tx_blk_count = wl->tx_blocks_available;
405 int avail, freed_blocks;
408 struct wl1271_link *lnk;
410 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
412 wl->fw_status_len, false);
416 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
418 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
419 "drv_rx_counter = %d, tx_results_counter = %d)",
421 status->fw_rx_counter,
422 status->drv_rx_counter,
423 status->tx_results_counter);
425 for (i = 0; i < NUM_TX_QUEUES; i++) {
426 /* prevent wrap-around in freed-packets counter */
427 wl->tx_allocated_pkts[i] -=
428 (status->counters.tx_released_pkts[i] -
429 wl->tx_pkts_freed[i]) & 0xff;
431 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
435 for_each_set_bit(i, wl->links_map, wl->num_links) {
439 /* prevent wrap-around in freed-packets counter */
440 diff = (status->counters.tx_lnk_free_pkts[i] -
441 lnk->prev_freed_pkts) & 0xff;
446 lnk->allocated_pkts -= diff;
447 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
449 /* accumulate the prev_freed_pkts counter */
450 lnk->total_freed_pkts += diff;
453 /* prevent wrap-around in total blocks counter */
454 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
455 freed_blocks = status->total_released_blks -
458 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
459 status->total_released_blks;
461 wl->tx_blocks_freed = status->total_released_blks;
463 wl->tx_allocated_blocks -= freed_blocks;
466 * If the FW freed some blocks:
467 * If we still have allocated blocks - re-arm the timer, Tx is
468 * not stuck. Otherwise, cancel the timer (no Tx currently).
471 if (wl->tx_allocated_blocks)
472 wl12xx_rearm_tx_watchdog_locked(wl);
474 cancel_delayed_work(&wl->tx_watchdog_work);
477 avail = status->tx_total - wl->tx_allocated_blocks;
480 * The FW might change the total number of TX memblocks before
481 * we get a notification about blocks being released. Thus, the
482 * available blocks calculation might yield a temporary result
483 * which is lower than the actual available blocks. Keeping in
484 * mind that only blocks that were allocated can be moved from
485 * TX to RX, tx_blocks_available should never decrease here.
487 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
490 /* if more blocks are available now, tx work can be scheduled */
491 if (wl->tx_blocks_available > old_tx_blk_count)
492 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
494 /* for AP update num of allocated TX blocks per link and ps status */
495 wl12xx_for_each_wlvif_ap(wl, wlvif) {
496 wl12xx_irq_update_links_status(wl, wlvif, status);
499 /* update the host-chipset time offset */
500 wl->time_offset = (ktime_get_boot_ns() >> 10) -
501 (s64)(status->fw_localtime);
503 wl->fw_fast_lnk_map = status->link_fast_bitmap;
508 static void wl1271_flush_deferred_work(struct wl1271 *wl)
512 /* Pass all received frames to the network stack */
513 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
514 ieee80211_rx_ni(wl->hw, skb);
516 /* Return sent skbs to the network stack */
517 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
518 ieee80211_tx_status_ni(wl->hw, skb);
521 static void wl1271_netstack_work(struct work_struct *work)
524 container_of(work, struct wl1271, netstack_work);
527 wl1271_flush_deferred_work(wl);
528 } while (skb_queue_len(&wl->deferred_rx_queue));
531 #define WL1271_IRQ_MAX_LOOPS 256
533 static int wlcore_irq_locked(struct wl1271 *wl)
537 int loopcount = WL1271_IRQ_MAX_LOOPS;
539 unsigned int defer_count;
543 * In case edge triggered interrupt must be used, we cannot iterate
544 * more than once without introducing race conditions with the hardirq.
546 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
549 wl1271_debug(DEBUG_IRQ, "IRQ work");
551 if (unlikely(wl->state != WLCORE_STATE_ON))
554 ret = pm_runtime_get_sync(wl->dev);
556 pm_runtime_put_noidle(wl->dev);
560 while (!done && loopcount--) {
562 * In order to avoid a race with the hardirq, clear the flag
563 * before acknowledging the chip.
565 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
566 smp_mb__after_atomic();
568 ret = wlcore_fw_status(wl, wl->fw_status);
572 wlcore_hw_tx_immediate_compl(wl);
574 intr = wl->fw_status->intr;
575 intr &= WLCORE_ALL_INTR_MASK;
581 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
582 wl1271_error("HW watchdog interrupt received! starting recovery.");
583 wl->watchdog_recovery = true;
586 /* restarting the chip. ignore any other interrupt. */
590 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
591 wl1271_error("SW watchdog interrupt received! "
592 "starting recovery.");
593 wl->watchdog_recovery = true;
596 /* restarting the chip. ignore any other interrupt. */
600 if (likely(intr & WL1271_ACX_INTR_DATA)) {
601 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
603 ret = wlcore_rx(wl, wl->fw_status);
607 /* Check if any tx blocks were freed */
608 spin_lock_irqsave(&wl->wl_lock, flags);
609 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
610 wl1271_tx_total_queue_count(wl) > 0) {
611 spin_unlock_irqrestore(&wl->wl_lock, flags);
613 * In order to avoid starvation of the TX path,
614 * call the work function directly.
616 ret = wlcore_tx_work_locked(wl);
620 spin_unlock_irqrestore(&wl->wl_lock, flags);
623 /* check for tx results */
624 ret = wlcore_hw_tx_delayed_compl(wl);
628 /* Make sure the deferred queues don't get too long */
629 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
630 skb_queue_len(&wl->deferred_rx_queue);
631 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
632 wl1271_flush_deferred_work(wl);
635 if (intr & WL1271_ACX_INTR_EVENT_A) {
636 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
637 ret = wl1271_event_handle(wl, 0);
642 if (intr & WL1271_ACX_INTR_EVENT_B) {
643 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
644 ret = wl1271_event_handle(wl, 1);
649 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
650 wl1271_debug(DEBUG_IRQ,
651 "WL1271_ACX_INTR_INIT_COMPLETE");
653 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
654 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
657 pm_runtime_mark_last_busy(wl->dev);
658 pm_runtime_put_autosuspend(wl->dev);
664 static irqreturn_t wlcore_irq(int irq, void *cookie)
668 struct wl1271 *wl = cookie;
670 /* complete the ELP completion */
671 spin_lock_irqsave(&wl->wl_lock, flags);
672 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
674 complete(wl->elp_compl);
675 wl->elp_compl = NULL;
678 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
679 /* don't enqueue a work right now. mark it as pending */
680 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
681 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
682 disable_irq_nosync(wl->irq);
683 pm_wakeup_event(wl->dev, 0);
684 spin_unlock_irqrestore(&wl->wl_lock, flags);
687 spin_unlock_irqrestore(&wl->wl_lock, flags);
689 /* TX might be handled here, avoid redundant work */
690 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
691 cancel_work_sync(&wl->tx_work);
693 mutex_lock(&wl->mutex);
695 ret = wlcore_irq_locked(wl);
697 wl12xx_queue_recovery_work(wl);
699 spin_lock_irqsave(&wl->wl_lock, flags);
700 /* In case TX was not handled here, queue TX work */
701 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
702 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
703 wl1271_tx_total_queue_count(wl) > 0)
704 ieee80211_queue_work(wl->hw, &wl->tx_work);
705 spin_unlock_irqrestore(&wl->wl_lock, flags);
707 mutex_unlock(&wl->mutex);
712 struct vif_counter_data {
715 struct ieee80211_vif *cur_vif;
716 bool cur_vif_running;
719 static void wl12xx_vif_count_iter(void *data, u8 *mac,
720 struct ieee80211_vif *vif)
722 struct vif_counter_data *counter = data;
725 if (counter->cur_vif == vif)
726 counter->cur_vif_running = true;
729 /* caller must not hold wl->mutex, as it might deadlock */
730 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
731 struct ieee80211_vif *cur_vif,
732 struct vif_counter_data *data)
734 memset(data, 0, sizeof(*data));
735 data->cur_vif = cur_vif;
737 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
738 wl12xx_vif_count_iter, data);
741 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
743 const struct firmware *fw;
745 enum wl12xx_fw_type fw_type;
749 fw_type = WL12XX_FW_TYPE_PLT;
750 fw_name = wl->plt_fw_name;
753 * we can't call wl12xx_get_vif_count() here because
754 * wl->mutex is taken, so use the cached last_vif_count value
756 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
757 fw_type = WL12XX_FW_TYPE_MULTI;
758 fw_name = wl->mr_fw_name;
760 fw_type = WL12XX_FW_TYPE_NORMAL;
761 fw_name = wl->sr_fw_name;
765 if (wl->fw_type == fw_type)
768 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
770 ret = request_firmware(&fw, fw_name, wl->dev);
773 wl1271_error("could not get firmware %s: %d", fw_name, ret);
778 wl1271_error("firmware size is not multiple of 32 bits: %zu",
785 wl->fw_type = WL12XX_FW_TYPE_NONE;
786 wl->fw_len = fw->size;
787 wl->fw = vmalloc(wl->fw_len);
790 wl1271_error("could not allocate memory for the firmware");
795 memcpy(wl->fw, fw->data, wl->fw_len);
797 wl->fw_type = fw_type;
799 release_firmware(fw);
804 void wl12xx_queue_recovery_work(struct wl1271 *wl)
806 /* Avoid a recursive recovery */
807 if (wl->state == WLCORE_STATE_ON) {
808 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
811 wl->state = WLCORE_STATE_RESTARTING;
812 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
813 ieee80211_queue_work(wl->hw, &wl->recovery_work);
817 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
821 /* Make sure we have enough room */
822 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
824 /* Fill the FW log file, consumed by the sysfs fwlog entry */
825 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
826 wl->fwlog_size += len;
831 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
836 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
839 wl1271_info("Reading FW panic log");
842 * Make sure the chip is awake and the logger isn't active.
843 * Do not send a stop fwlog command if the fw is hanged or if
844 * dbgpins are used (due to some fw bug).
846 error = pm_runtime_get_sync(wl->dev);
848 pm_runtime_put_noidle(wl->dev);
851 if (!wl->watchdog_recovery &&
852 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
853 wl12xx_cmd_stop_fwlog(wl);
855 /* Traverse the memory blocks linked list */
857 end_of_log = wlcore_event_fw_logger(wl);
858 if (end_of_log == 0) {
860 end_of_log = wlcore_event_fw_logger(wl);
862 } while (end_of_log != 0);
865 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
866 u8 hlid, struct ieee80211_sta *sta)
868 struct wl1271_station *wl_sta;
869 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
871 wl_sta = (void *)sta->drv_priv;
872 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
875 * increment the initial seq number on recovery to account for
876 * transmitted packets that we haven't yet got in the FW status
878 if (wlvif->encryption_type == KEY_GEM)
879 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
881 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
882 wl_sta->total_freed_pkts += sqn_recovery_padding;
885 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
886 struct wl12xx_vif *wlvif,
887 u8 hlid, const u8 *addr)
889 struct ieee80211_sta *sta;
890 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
892 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
893 is_zero_ether_addr(addr)))
897 sta = ieee80211_find_sta(vif, addr);
899 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
903 static void wlcore_print_recovery(struct wl1271 *wl)
909 wl1271_info("Hardware recovery in progress. FW ver: %s",
910 wl->chip.fw_ver_str);
912 /* change partitions momentarily so we can read the FW pc */
913 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
917 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
921 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
925 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
926 pc, hint_sts, ++wl->recovery_count);
928 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
932 static void wl1271_recovery_work(struct work_struct *work)
935 container_of(work, struct wl1271, recovery_work);
936 struct wl12xx_vif *wlvif;
937 struct ieee80211_vif *vif;
940 mutex_lock(&wl->mutex);
942 if (wl->state == WLCORE_STATE_OFF || wl->plt)
945 error = pm_runtime_get_sync(wl->dev);
947 wl1271_warning("Enable for recovery failed");
948 pm_runtime_put_noidle(wl->dev);
950 wlcore_disable_interrupts_nosync(wl);
952 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
953 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
954 wl12xx_read_fwlog_panic(wl);
955 wlcore_print_recovery(wl);
958 BUG_ON(wl->conf.recovery.bug_on_recovery &&
959 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
961 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
963 if (wl->conf.recovery.no_recovery) {
964 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
968 /* Prevent spurious TX during FW restart */
969 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
971 /* reboot the chipset */
972 while (!list_empty(&wl->wlvif_list)) {
973 wlvif = list_first_entry(&wl->wlvif_list,
974 struct wl12xx_vif, list);
975 vif = wl12xx_wlvif_to_vif(wlvif);
977 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
978 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
979 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
980 vif->bss_conf.bssid);
983 __wl1271_op_remove_interface(wl, vif, false);
986 wlcore_op_stop_locked(wl);
987 pm_runtime_mark_last_busy(wl->dev);
988 pm_runtime_put_autosuspend(wl->dev);
990 ieee80211_restart_hw(wl->hw);
993 * Its safe to enable TX now - the queues are stopped after a request
996 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
999 wl->watchdog_recovery = false;
1000 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1001 mutex_unlock(&wl->mutex);
1004 static int wlcore_fw_wakeup(struct wl1271 *wl)
1006 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1009 static int wl1271_setup(struct wl1271 *wl)
1011 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1012 if (!wl->raw_fw_status)
1015 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1019 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1025 kfree(wl->fw_status);
1026 kfree(wl->raw_fw_status);
1030 static int wl12xx_set_power_on(struct wl1271 *wl)
1034 msleep(WL1271_PRE_POWER_ON_SLEEP);
1035 ret = wl1271_power_on(wl);
1038 msleep(WL1271_POWER_ON_SLEEP);
1039 wl1271_io_reset(wl);
1042 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1046 /* ELP module wake up */
1047 ret = wlcore_fw_wakeup(wl);
1055 wl1271_power_off(wl);
1059 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1063 ret = wl12xx_set_power_on(wl);
1068 * For wl127x based devices we could use the default block
1069 * size (512 bytes), but due to a bug in the sdio driver, we
1070 * need to set it explicitly after the chip is powered on. To
1071 * simplify the code and since the performance impact is
1072 * negligible, we use the same block size for all different
1075 * Check if the bus supports blocksize alignment and, if it
1076 * doesn't, make sure we don't have the quirk.
1078 if (!wl1271_set_block_size(wl))
1079 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1081 /* TODO: make sure the lower driver has set things up correctly */
1083 ret = wl1271_setup(wl);
1087 ret = wl12xx_fetch_firmware(wl, plt);
1089 kfree(wl->fw_status);
1090 kfree(wl->raw_fw_status);
1091 kfree(wl->tx_res_if);
1098 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1100 int retries = WL1271_BOOT_RETRIES;
1101 struct wiphy *wiphy = wl->hw->wiphy;
1103 static const char* const PLT_MODE[] = {
1112 mutex_lock(&wl->mutex);
1114 wl1271_notice("power up");
1116 if (wl->state != WLCORE_STATE_OFF) {
1117 wl1271_error("cannot go into PLT state because not "
1118 "in off state: %d", wl->state);
1123 /* Indicate to lower levels that we are now in PLT mode */
1125 wl->plt_mode = plt_mode;
1129 ret = wl12xx_chip_wakeup(wl, true);
1133 if (plt_mode != PLT_CHIP_AWAKE) {
1134 ret = wl->ops->plt_init(wl);
1139 wl->state = WLCORE_STATE_ON;
1140 wl1271_notice("firmware booted in PLT mode %s (%s)",
1142 wl->chip.fw_ver_str);
1144 /* update hw/fw version info in wiphy struct */
1145 wiphy->hw_version = wl->chip.id;
1146 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1147 sizeof(wiphy->fw_version));
1152 wl1271_power_off(wl);
1156 wl->plt_mode = PLT_OFF;
1158 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1159 WL1271_BOOT_RETRIES);
1161 mutex_unlock(&wl->mutex);
1166 int wl1271_plt_stop(struct wl1271 *wl)
1170 wl1271_notice("power down");
1173 * Interrupts must be disabled before setting the state to OFF.
1174 * Otherwise, the interrupt handler might be called and exit without
1175 * reading the interrupt status.
1177 wlcore_disable_interrupts(wl);
1178 mutex_lock(&wl->mutex);
1180 mutex_unlock(&wl->mutex);
1183 * This will not necessarily enable interrupts as interrupts
1184 * may have been disabled when op_stop was called. It will,
1185 * however, balance the above call to disable_interrupts().
1187 wlcore_enable_interrupts(wl);
1189 wl1271_error("cannot power down because not in PLT "
1190 "state: %d", wl->state);
1195 mutex_unlock(&wl->mutex);
1197 wl1271_flush_deferred_work(wl);
1198 cancel_work_sync(&wl->netstack_work);
1199 cancel_work_sync(&wl->recovery_work);
1200 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1202 mutex_lock(&wl->mutex);
1203 wl1271_power_off(wl);
1205 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1206 wl->state = WLCORE_STATE_OFF;
1208 wl->plt_mode = PLT_OFF;
1210 mutex_unlock(&wl->mutex);
1216 static void wl1271_op_tx(struct ieee80211_hw *hw,
1217 struct ieee80211_tx_control *control,
1218 struct sk_buff *skb)
1220 struct wl1271 *wl = hw->priv;
1221 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1222 struct ieee80211_vif *vif = info->control.vif;
1223 struct wl12xx_vif *wlvif = NULL;
1224 unsigned long flags;
1229 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1230 ieee80211_free_txskb(hw, skb);
1234 wlvif = wl12xx_vif_to_data(vif);
1235 mapping = skb_get_queue_mapping(skb);
1236 q = wl1271_tx_get_queue(mapping);
1238 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1240 spin_lock_irqsave(&wl->wl_lock, flags);
1243 * drop the packet if the link is invalid or the queue is stopped
1244 * for any reason but watermark. Watermark is a "soft"-stop so we
1245 * allow these packets through.
1247 if (hlid == WL12XX_INVALID_LINK_ID ||
1248 (!test_bit(hlid, wlvif->links_map)) ||
1249 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1250 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1251 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1252 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1253 ieee80211_free_txskb(hw, skb);
1257 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1259 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1261 wl->tx_queue_count[q]++;
1262 wlvif->tx_queue_count[q]++;
1265 * The workqueue is slow to process the tx_queue and we need stop
1266 * the queue here, otherwise the queue will get too long.
1268 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1269 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1270 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1271 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1272 wlcore_stop_queue_locked(wl, wlvif, q,
1273 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1277 * The chip specific setup must run before the first TX packet -
1278 * before that, the tx_work will not be initialized!
1281 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1282 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1283 ieee80211_queue_work(wl->hw, &wl->tx_work);
1286 spin_unlock_irqrestore(&wl->wl_lock, flags);
1289 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1291 unsigned long flags;
1294 /* no need to queue a new dummy packet if one is already pending */
1295 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1298 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1300 spin_lock_irqsave(&wl->wl_lock, flags);
1301 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1302 wl->tx_queue_count[q]++;
1303 spin_unlock_irqrestore(&wl->wl_lock, flags);
1305 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1306 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1307 return wlcore_tx_work_locked(wl);
1310 * If the FW TX is busy, TX work will be scheduled by the threaded
1311 * interrupt handler function
1317 * The size of the dummy packet should be at least 1400 bytes. However, in
1318 * order to minimize the number of bus transactions, aligning it to 512 bytes
1319 * boundaries could be beneficial, performance wise
1321 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1323 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1325 struct sk_buff *skb;
1326 struct ieee80211_hdr_3addr *hdr;
1327 unsigned int dummy_packet_size;
1329 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1330 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1332 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1334 wl1271_warning("Failed to allocate a dummy packet skb");
1338 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1340 hdr = skb_put_zero(skb, sizeof(*hdr));
1341 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1342 IEEE80211_STYPE_NULLFUNC |
1343 IEEE80211_FCTL_TODS);
1345 skb_put_zero(skb, dummy_packet_size);
1347 /* Dummy packets require the TID to be management */
1348 skb->priority = WL1271_TID_MGMT;
1350 /* Initialize all fields that might be used */
1351 skb_set_queue_mapping(skb, 0);
1352 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1359 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1361 int num_fields = 0, in_field = 0, fields_size = 0;
1362 int i, pattern_len = 0;
1365 wl1271_warning("No mask in WoWLAN pattern");
1370 * The pattern is broken up into segments of bytes at different offsets
1371 * that need to be checked by the FW filter. Each segment is called
1372 * a field in the FW API. We verify that the total number of fields
1373 * required for this pattern won't exceed FW limits (8)
1374 * as well as the total fields buffer won't exceed the FW limit.
1375 * Note that if there's a pattern which crosses Ethernet/IP header
1376 * boundary a new field is required.
1378 for (i = 0; i < p->pattern_len; i++) {
1379 if (test_bit(i, (unsigned long *)p->mask)) {
1384 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1386 fields_size += pattern_len +
1387 RX_FILTER_FIELD_OVERHEAD;
1395 fields_size += pattern_len +
1396 RX_FILTER_FIELD_OVERHEAD;
1403 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1407 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1408 wl1271_warning("RX Filter too complex. Too many segments");
1412 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1413 wl1271_warning("RX filter pattern is too big");
1420 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1422 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1425 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1432 for (i = 0; i < filter->num_fields; i++)
1433 kfree(filter->fields[i].pattern);
1438 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1439 u16 offset, u8 flags,
1440 const u8 *pattern, u8 len)
1442 struct wl12xx_rx_filter_field *field;
1444 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1445 wl1271_warning("Max fields per RX filter. can't alloc another");
1449 field = &filter->fields[filter->num_fields];
1451 field->pattern = kzalloc(len, GFP_KERNEL);
1452 if (!field->pattern) {
1453 wl1271_warning("Failed to allocate RX filter pattern");
1457 filter->num_fields++;
1459 field->offset = cpu_to_le16(offset);
1460 field->flags = flags;
1462 memcpy(field->pattern, pattern, len);
1467 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1469 int i, fields_size = 0;
1471 for (i = 0; i < filter->num_fields; i++)
1472 fields_size += filter->fields[i].len +
1473 sizeof(struct wl12xx_rx_filter_field) -
1479 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1483 struct wl12xx_rx_filter_field *field;
1485 for (i = 0; i < filter->num_fields; i++) {
1486 field = (struct wl12xx_rx_filter_field *)buf;
1488 field->offset = filter->fields[i].offset;
1489 field->flags = filter->fields[i].flags;
1490 field->len = filter->fields[i].len;
1492 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1493 buf += sizeof(struct wl12xx_rx_filter_field) -
1494 sizeof(u8 *) + field->len;
1499 * Allocates an RX filter returned through f
1500 * which needs to be freed using rx_filter_free()
1503 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1504 struct wl12xx_rx_filter **f)
1507 struct wl12xx_rx_filter *filter;
1511 filter = wl1271_rx_filter_alloc();
1513 wl1271_warning("Failed to alloc rx filter");
1519 while (i < p->pattern_len) {
1520 if (!test_bit(i, (unsigned long *)p->mask)) {
1525 for (j = i; j < p->pattern_len; j++) {
1526 if (!test_bit(j, (unsigned long *)p->mask))
1529 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1530 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1534 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1536 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1538 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1539 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1544 ret = wl1271_rx_filter_alloc_field(filter,
1547 &p->pattern[i], len);
1554 filter->action = FILTER_SIGNAL;
1560 wl1271_rx_filter_free(filter);
1566 static int wl1271_configure_wowlan(struct wl1271 *wl,
1567 struct cfg80211_wowlan *wow)
1571 if (!wow || wow->any || !wow->n_patterns) {
1572 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1577 ret = wl1271_rx_filter_clear_all(wl);
1584 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1587 /* Validate all incoming patterns before clearing current FW state */
1588 for (i = 0; i < wow->n_patterns; i++) {
1589 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1591 wl1271_warning("Bad wowlan pattern %d", i);
1596 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1600 ret = wl1271_rx_filter_clear_all(wl);
1604 /* Translate WoWLAN patterns into filters */
1605 for (i = 0; i < wow->n_patterns; i++) {
1606 struct cfg80211_pkt_pattern *p;
1607 struct wl12xx_rx_filter *filter = NULL;
1609 p = &wow->patterns[i];
1611 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1613 wl1271_warning("Failed to create an RX filter from "
1614 "wowlan pattern %d", i);
1618 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1620 wl1271_rx_filter_free(filter);
1625 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1631 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1632 struct wl12xx_vif *wlvif,
1633 struct cfg80211_wowlan *wow)
1637 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1640 ret = wl1271_configure_wowlan(wl, wow);
1644 if ((wl->conf.conn.suspend_wake_up_event ==
1645 wl->conf.conn.wake_up_event) &&
1646 (wl->conf.conn.suspend_listen_interval ==
1647 wl->conf.conn.listen_interval))
1650 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1651 wl->conf.conn.suspend_wake_up_event,
1652 wl->conf.conn.suspend_listen_interval);
1655 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1661 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1662 struct wl12xx_vif *wlvif,
1663 struct cfg80211_wowlan *wow)
1667 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1670 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1674 ret = wl1271_configure_wowlan(wl, wow);
1683 static int wl1271_configure_suspend(struct wl1271 *wl,
1684 struct wl12xx_vif *wlvif,
1685 struct cfg80211_wowlan *wow)
1687 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1688 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1689 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1690 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1694 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1697 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1698 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1700 if ((!is_ap) && (!is_sta))
1703 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1704 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1707 wl1271_configure_wowlan(wl, NULL);
1710 if ((wl->conf.conn.suspend_wake_up_event ==
1711 wl->conf.conn.wake_up_event) &&
1712 (wl->conf.conn.suspend_listen_interval ==
1713 wl->conf.conn.listen_interval))
1716 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1717 wl->conf.conn.wake_up_event,
1718 wl->conf.conn.listen_interval);
1721 wl1271_error("resume: wake up conditions failed: %d",
1725 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1729 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1730 struct cfg80211_wowlan *wow)
1732 struct wl1271 *wl = hw->priv;
1733 struct wl12xx_vif *wlvif;
1734 unsigned long flags;
1737 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1740 /* we want to perform the recovery before suspending */
1741 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1742 wl1271_warning("postponing suspend to perform recovery");
1746 wl1271_tx_flush(wl);
1748 mutex_lock(&wl->mutex);
1750 ret = pm_runtime_get_sync(wl->dev);
1752 pm_runtime_put_noidle(wl->dev);
1753 mutex_unlock(&wl->mutex);
1757 wl->wow_enabled = true;
1758 wl12xx_for_each_wlvif(wl, wlvif) {
1759 if (wlcore_is_p2p_mgmt(wlvif))
1762 ret = wl1271_configure_suspend(wl, wlvif, wow);
1764 mutex_unlock(&wl->mutex);
1765 wl1271_warning("couldn't prepare device to suspend");
1770 /* disable fast link flow control notifications from FW */
1771 ret = wlcore_hw_interrupt_notify(wl, false);
1775 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1776 ret = wlcore_hw_rx_ba_filter(wl,
1777 !!wl->conf.conn.suspend_rx_ba_activity);
1782 pm_runtime_put_noidle(wl->dev);
1783 mutex_unlock(&wl->mutex);
1786 wl1271_warning("couldn't prepare device to suspend");
1790 /* flush any remaining work */
1791 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1793 flush_work(&wl->tx_work);
1796 * Cancel the watchdog even if above tx_flush failed. We will detect
1797 * it on resume anyway.
1799 cancel_delayed_work(&wl->tx_watchdog_work);
1802 * set suspended flag to avoid triggering a new threaded_irq
1805 spin_lock_irqsave(&wl->wl_lock, flags);
1806 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1807 spin_unlock_irqrestore(&wl->wl_lock, flags);
1809 return pm_runtime_force_suspend(wl->dev);
1812 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1814 struct wl1271 *wl = hw->priv;
1815 struct wl12xx_vif *wlvif;
1816 unsigned long flags;
1817 bool run_irq_work = false, pending_recovery;
1820 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1822 WARN_ON(!wl->wow_enabled);
1824 ret = pm_runtime_force_resume(wl->dev);
1826 wl1271_error("ELP wakeup failure!");
1831 * re-enable irq_work enqueuing, and call irq_work directly if
1832 * there is a pending work.
1834 spin_lock_irqsave(&wl->wl_lock, flags);
1835 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1836 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1837 run_irq_work = true;
1838 spin_unlock_irqrestore(&wl->wl_lock, flags);
1840 mutex_lock(&wl->mutex);
1842 /* test the recovery flag before calling any SDIO functions */
1843 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1847 wl1271_debug(DEBUG_MAC80211,
1848 "run postponed irq_work directly");
1850 /* don't talk to the HW if recovery is pending */
1851 if (!pending_recovery) {
1852 ret = wlcore_irq_locked(wl);
1854 wl12xx_queue_recovery_work(wl);
1857 wlcore_enable_interrupts(wl);
1860 if (pending_recovery) {
1861 wl1271_warning("queuing forgotten recovery on resume");
1862 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1866 ret = pm_runtime_get_sync(wl->dev);
1868 pm_runtime_put_noidle(wl->dev);
1872 wl12xx_for_each_wlvif(wl, wlvif) {
1873 if (wlcore_is_p2p_mgmt(wlvif))
1876 wl1271_configure_resume(wl, wlvif);
1879 ret = wlcore_hw_interrupt_notify(wl, true);
1883 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1884 ret = wlcore_hw_rx_ba_filter(wl, false);
1889 pm_runtime_mark_last_busy(wl->dev);
1890 pm_runtime_put_autosuspend(wl->dev);
1893 wl->wow_enabled = false;
1896 * Set a flag to re-init the watchdog on the first Tx after resume.
1897 * That way we avoid possible conditions where Tx-complete interrupts
1898 * fail to arrive and we perform a spurious recovery.
1900 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1901 mutex_unlock(&wl->mutex);
1906 static int wl1271_op_start(struct ieee80211_hw *hw)
1908 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1911 * We have to delay the booting of the hardware because
1912 * we need to know the local MAC address before downloading and
1913 * initializing the firmware. The MAC address cannot be changed
1914 * after boot, and without the proper MAC address, the firmware
1915 * will not function properly.
1917 * The MAC address is first known when the corresponding interface
1918 * is added. That is where we will initialize the hardware.
1924 static void wlcore_op_stop_locked(struct wl1271 *wl)
1928 if (wl->state == WLCORE_STATE_OFF) {
1929 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1931 wlcore_enable_interrupts(wl);
1937 * this must be before the cancel_work calls below, so that the work
1938 * functions don't perform further work.
1940 wl->state = WLCORE_STATE_OFF;
1943 * Use the nosync variant to disable interrupts, so the mutex could be
1944 * held while doing so without deadlocking.
1946 wlcore_disable_interrupts_nosync(wl);
1948 mutex_unlock(&wl->mutex);
1950 wlcore_synchronize_interrupts(wl);
1951 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1952 cancel_work_sync(&wl->recovery_work);
1953 wl1271_flush_deferred_work(wl);
1954 cancel_delayed_work_sync(&wl->scan_complete_work);
1955 cancel_work_sync(&wl->netstack_work);
1956 cancel_work_sync(&wl->tx_work);
1957 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1959 /* let's notify MAC80211 about the remaining pending TX frames */
1960 mutex_lock(&wl->mutex);
1961 wl12xx_tx_reset(wl);
1963 wl1271_power_off(wl);
1965 * In case a recovery was scheduled, interrupts were disabled to avoid
1966 * an interrupt storm. Now that the power is down, it is safe to
1967 * re-enable interrupts to balance the disable depth
1969 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1970 wlcore_enable_interrupts(wl);
1972 wl->band = NL80211_BAND_2GHZ;
1975 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1976 wl->channel_type = NL80211_CHAN_NO_HT;
1977 wl->tx_blocks_available = 0;
1978 wl->tx_allocated_blocks = 0;
1979 wl->tx_results_count = 0;
1980 wl->tx_packets_count = 0;
1981 wl->time_offset = 0;
1982 wl->ap_fw_ps_map = 0;
1984 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1985 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1986 memset(wl->links_map, 0, sizeof(wl->links_map));
1987 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1988 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1989 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1990 wl->active_sta_count = 0;
1991 wl->active_link_count = 0;
1993 /* The system link is always allocated */
1994 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1995 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1996 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1999 * this is performed after the cancel_work calls and the associated
2000 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2001 * get executed before all these vars have been reset.
2005 wl->tx_blocks_freed = 0;
2007 for (i = 0; i < NUM_TX_QUEUES; i++) {
2008 wl->tx_pkts_freed[i] = 0;
2009 wl->tx_allocated_pkts[i] = 0;
2012 wl1271_debugfs_reset(wl);
2014 kfree(wl->raw_fw_status);
2015 wl->raw_fw_status = NULL;
2016 kfree(wl->fw_status);
2017 wl->fw_status = NULL;
2018 kfree(wl->tx_res_if);
2019 wl->tx_res_if = NULL;
2020 kfree(wl->target_mem_map);
2021 wl->target_mem_map = NULL;
2024 * FW channels must be re-calibrated after recovery,
2025 * save current Reg-Domain channel configuration and clear it.
2027 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2028 sizeof(wl->reg_ch_conf_pending));
2029 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2032 static void wlcore_op_stop(struct ieee80211_hw *hw)
2034 struct wl1271 *wl = hw->priv;
2036 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2038 mutex_lock(&wl->mutex);
2040 wlcore_op_stop_locked(wl);
2042 mutex_unlock(&wl->mutex);
2045 static void wlcore_channel_switch_work(struct work_struct *work)
2047 struct delayed_work *dwork;
2049 struct ieee80211_vif *vif;
2050 struct wl12xx_vif *wlvif;
2053 dwork = to_delayed_work(work);
2054 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2057 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2059 mutex_lock(&wl->mutex);
2061 if (unlikely(wl->state != WLCORE_STATE_ON))
2064 /* check the channel switch is still ongoing */
2065 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2068 vif = wl12xx_wlvif_to_vif(wlvif);
2069 ieee80211_chswitch_done(vif, false);
2071 ret = pm_runtime_get_sync(wl->dev);
2073 pm_runtime_put_noidle(wl->dev);
2077 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2079 pm_runtime_mark_last_busy(wl->dev);
2080 pm_runtime_put_autosuspend(wl->dev);
2082 mutex_unlock(&wl->mutex);
2085 static void wlcore_connection_loss_work(struct work_struct *work)
2087 struct delayed_work *dwork;
2089 struct ieee80211_vif *vif;
2090 struct wl12xx_vif *wlvif;
2092 dwork = to_delayed_work(work);
2093 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2096 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2098 mutex_lock(&wl->mutex);
2100 if (unlikely(wl->state != WLCORE_STATE_ON))
2103 /* Call mac80211 connection loss */
2104 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2107 vif = wl12xx_wlvif_to_vif(wlvif);
2108 ieee80211_connection_loss(vif);
2110 mutex_unlock(&wl->mutex);
2113 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2115 struct delayed_work *dwork;
2117 struct wl12xx_vif *wlvif;
2118 unsigned long time_spare;
2121 dwork = to_delayed_work(work);
2122 wlvif = container_of(dwork, struct wl12xx_vif,
2123 pending_auth_complete_work);
2126 mutex_lock(&wl->mutex);
2128 if (unlikely(wl->state != WLCORE_STATE_ON))
2132 * Make sure a second really passed since the last auth reply. Maybe
2133 * a second auth reply arrived while we were stuck on the mutex.
2134 * Check for a little less than the timeout to protect from scheduler
2137 time_spare = jiffies +
2138 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2139 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2142 ret = pm_runtime_get_sync(wl->dev);
2144 pm_runtime_put_noidle(wl->dev);
2148 /* cancel the ROC if active */
2149 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2151 pm_runtime_mark_last_busy(wl->dev);
2152 pm_runtime_put_autosuspend(wl->dev);
2154 mutex_unlock(&wl->mutex);
2157 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2159 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2160 WL12XX_MAX_RATE_POLICIES);
2161 if (policy >= WL12XX_MAX_RATE_POLICIES)
2164 __set_bit(policy, wl->rate_policies_map);
2169 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2171 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2174 __clear_bit(*idx, wl->rate_policies_map);
2175 *idx = WL12XX_MAX_RATE_POLICIES;
2178 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2180 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2181 WLCORE_MAX_KLV_TEMPLATES);
2182 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2185 __set_bit(policy, wl->klv_templates_map);
2190 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2192 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2195 __clear_bit(*idx, wl->klv_templates_map);
2196 *idx = WLCORE_MAX_KLV_TEMPLATES;
2199 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2201 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2203 switch (wlvif->bss_type) {
2204 case BSS_TYPE_AP_BSS:
2206 return WL1271_ROLE_P2P_GO;
2207 else if (ieee80211_vif_is_mesh(vif))
2208 return WL1271_ROLE_MESH_POINT;
2210 return WL1271_ROLE_AP;
2212 case BSS_TYPE_STA_BSS:
2214 return WL1271_ROLE_P2P_CL;
2216 return WL1271_ROLE_STA;
2219 return WL1271_ROLE_IBSS;
2222 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2224 return WL12XX_INVALID_ROLE_TYPE;
2227 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2229 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2232 /* clear everything but the persistent data */
2233 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2235 switch (ieee80211_vif_type_p2p(vif)) {
2236 case NL80211_IFTYPE_P2P_CLIENT:
2239 case NL80211_IFTYPE_STATION:
2240 case NL80211_IFTYPE_P2P_DEVICE:
2241 wlvif->bss_type = BSS_TYPE_STA_BSS;
2243 case NL80211_IFTYPE_ADHOC:
2244 wlvif->bss_type = BSS_TYPE_IBSS;
2246 case NL80211_IFTYPE_P2P_GO:
2249 case NL80211_IFTYPE_AP:
2250 case NL80211_IFTYPE_MESH_POINT:
2251 wlvif->bss_type = BSS_TYPE_AP_BSS;
2254 wlvif->bss_type = MAX_BSS_TYPE;
2258 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2259 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2260 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2262 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2263 wlvif->bss_type == BSS_TYPE_IBSS) {
2264 /* init sta/ibss data */
2265 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2266 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2267 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2268 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2269 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2270 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2271 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2272 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2275 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2276 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2277 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2278 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2279 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2280 wl12xx_allocate_rate_policy(wl,
2281 &wlvif->ap.ucast_rate_idx[i]);
2282 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2284 * TODO: check if basic_rate shouldn't be
2285 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2286 * instead (the same thing for STA above).
2288 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2289 /* TODO: this seems to be used only for STA, check it */
2290 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2293 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2294 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2295 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2298 * mac80211 configures some values globally, while we treat them
2299 * per-interface. thus, on init, we have to copy them from wl
2301 wlvif->band = wl->band;
2302 wlvif->channel = wl->channel;
2303 wlvif->power_level = wl->power_level;
2304 wlvif->channel_type = wl->channel_type;
2306 INIT_WORK(&wlvif->rx_streaming_enable_work,
2307 wl1271_rx_streaming_enable_work);
2308 INIT_WORK(&wlvif->rx_streaming_disable_work,
2309 wl1271_rx_streaming_disable_work);
2310 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2311 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2312 wlcore_channel_switch_work);
2313 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2314 wlcore_connection_loss_work);
2315 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2316 wlcore_pending_auth_complete_work);
2317 INIT_LIST_HEAD(&wlvif->list);
2319 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2323 static int wl12xx_init_fw(struct wl1271 *wl)
2325 int retries = WL1271_BOOT_RETRIES;
2326 bool booted = false;
2327 struct wiphy *wiphy = wl->hw->wiphy;
2332 ret = wl12xx_chip_wakeup(wl, false);
2336 ret = wl->ops->boot(wl);
2340 ret = wl1271_hw_init(wl);
2348 mutex_unlock(&wl->mutex);
2349 /* Unlocking the mutex in the middle of handling is
2350 inherently unsafe. In this case we deem it safe to do,
2351 because we need to let any possibly pending IRQ out of
2352 the system (and while we are WLCORE_STATE_OFF the IRQ
2353 work function will not do anything.) Also, any other
2354 possible concurrent operations will fail due to the
2355 current state, hence the wl1271 struct should be safe. */
2356 wlcore_disable_interrupts(wl);
2357 wl1271_flush_deferred_work(wl);
2358 cancel_work_sync(&wl->netstack_work);
2359 mutex_lock(&wl->mutex);
2361 wl1271_power_off(wl);
2365 wl1271_error("firmware boot failed despite %d retries",
2366 WL1271_BOOT_RETRIES);
2370 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2372 /* update hw/fw version info in wiphy struct */
2373 wiphy->hw_version = wl->chip.id;
2374 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2375 sizeof(wiphy->fw_version));
2378 * Now we know if 11a is supported (info from the NVS), so disable
2379 * 11a channels if not supported
2381 if (!wl->enable_11a)
2382 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2384 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2385 wl->enable_11a ? "" : "not ");
2387 wl->state = WLCORE_STATE_ON;
2392 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2394 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2398 * Check whether a fw switch (i.e. moving from one loaded
2399 * fw to another) is needed. This function is also responsible
2400 * for updating wl->last_vif_count, so it must be called before
2401 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2404 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2405 struct vif_counter_data vif_counter_data,
2408 enum wl12xx_fw_type current_fw = wl->fw_type;
2409 u8 vif_count = vif_counter_data.counter;
2411 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2414 /* increase the vif count if this is a new vif */
2415 if (add && !vif_counter_data.cur_vif_running)
2418 wl->last_vif_count = vif_count;
2420 /* no need for fw change if the device is OFF */
2421 if (wl->state == WLCORE_STATE_OFF)
2424 /* no need for fw change if a single fw is used */
2425 if (!wl->mr_fw_name)
2428 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2430 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2437 * Enter "forced psm". Make sure the sta is in psm against the ap,
2438 * to make the fw switch a bit more disconnection-persistent.
2440 static void wl12xx_force_active_psm(struct wl1271 *wl)
2442 struct wl12xx_vif *wlvif;
2444 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2445 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2449 struct wlcore_hw_queue_iter_data {
2450 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2452 struct ieee80211_vif *vif;
2453 /* is the current vif among those iterated */
2457 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2458 struct ieee80211_vif *vif)
2460 struct wlcore_hw_queue_iter_data *iter_data = data;
2462 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2463 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2466 if (iter_data->cur_running || vif == iter_data->vif) {
2467 iter_data->cur_running = true;
2471 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2474 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2475 struct wl12xx_vif *wlvif)
2477 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2478 struct wlcore_hw_queue_iter_data iter_data = {};
2481 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2482 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2486 iter_data.vif = vif;
2488 /* mark all bits taken by active interfaces */
2489 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2490 IEEE80211_IFACE_ITER_RESUME_ALL,
2491 wlcore_hw_queue_iter, &iter_data);
2493 /* the current vif is already running in mac80211 (resume/recovery) */
2494 if (iter_data.cur_running) {
2495 wlvif->hw_queue_base = vif->hw_queue[0];
2496 wl1271_debug(DEBUG_MAC80211,
2497 "using pre-allocated hw queue base %d",
2498 wlvif->hw_queue_base);
2500 /* interface type might have changed type */
2501 goto adjust_cab_queue;
2504 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2505 WLCORE_NUM_MAC_ADDRESSES);
2506 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2509 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2510 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2511 wlvif->hw_queue_base);
2513 for (i = 0; i < NUM_TX_QUEUES; i++) {
2514 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2515 /* register hw queues in mac80211 */
2516 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2520 /* the last places are reserved for cab queues per interface */
2521 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2522 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2523 wlvif->hw_queue_base / NUM_TX_QUEUES;
2525 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2530 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2531 struct ieee80211_vif *vif)
2533 struct wl1271 *wl = hw->priv;
2534 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2535 struct vif_counter_data vif_count;
2540 wl1271_error("Adding Interface not allowed while in PLT mode");
2544 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2545 IEEE80211_VIF_SUPPORTS_UAPSD |
2546 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2548 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2549 ieee80211_vif_type_p2p(vif), vif->addr);
2551 wl12xx_get_vif_count(hw, vif, &vif_count);
2553 mutex_lock(&wl->mutex);
2556 * in some very corner case HW recovery scenarios its possible to
2557 * get here before __wl1271_op_remove_interface is complete, so
2558 * opt out if that is the case.
2560 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2561 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2567 ret = wl12xx_init_vif_data(wl, vif);
2572 role_type = wl12xx_get_role_type(wl, wlvif);
2573 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2578 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2583 * TODO: after the nvs issue will be solved, move this block
2584 * to start(), and make sure here the driver is ON.
2586 if (wl->state == WLCORE_STATE_OFF) {
2588 * we still need this in order to configure the fw
2589 * while uploading the nvs
2591 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2593 ret = wl12xx_init_fw(wl);
2599 * Call runtime PM only after possible wl12xx_init_fw() above
2600 * is done. Otherwise we do not have interrupts enabled.
2602 ret = pm_runtime_get_sync(wl->dev);
2604 pm_runtime_put_noidle(wl->dev);
2608 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2609 wl12xx_force_active_psm(wl);
2610 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2611 mutex_unlock(&wl->mutex);
2612 wl1271_recovery_work(&wl->recovery_work);
2616 if (!wlcore_is_p2p_mgmt(wlvif)) {
2617 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2618 role_type, &wlvif->role_id);
2622 ret = wl1271_init_vif_specific(wl, vif);
2627 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2628 &wlvif->dev_role_id);
2632 /* needed mainly for configuring rate policies */
2633 ret = wl1271_sta_hw_init(wl, wlvif);
2638 list_add(&wlvif->list, &wl->wlvif_list);
2639 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2641 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2646 pm_runtime_mark_last_busy(wl->dev);
2647 pm_runtime_put_autosuspend(wl->dev);
2649 mutex_unlock(&wl->mutex);
2654 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2655 struct ieee80211_vif *vif,
2656 bool reset_tx_queues)
2658 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2660 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2662 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2664 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2667 /* because of hardware recovery, we may get here twice */
2668 if (wl->state == WLCORE_STATE_OFF)
2671 wl1271_info("down");
2673 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2674 wl->scan_wlvif == wlvif) {
2675 struct cfg80211_scan_info info = {
2680 * Rearm the tx watchdog just before idling scan. This
2681 * prevents just-finished scans from triggering the watchdog
2683 wl12xx_rearm_tx_watchdog_locked(wl);
2685 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2686 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2687 wl->scan_wlvif = NULL;
2688 wl->scan.req = NULL;
2689 ieee80211_scan_completed(wl->hw, &info);
2692 if (wl->sched_vif == wlvif)
2693 wl->sched_vif = NULL;
2695 if (wl->roc_vif == vif) {
2697 ieee80211_remain_on_channel_expired(wl->hw);
2700 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2701 /* disable active roles */
2702 ret = pm_runtime_get_sync(wl->dev);
2704 pm_runtime_put_noidle(wl->dev);
2708 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2709 wlvif->bss_type == BSS_TYPE_IBSS) {
2710 if (wl12xx_dev_role_started(wlvif))
2711 wl12xx_stop_dev(wl, wlvif);
2714 if (!wlcore_is_p2p_mgmt(wlvif)) {
2715 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2719 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2724 pm_runtime_mark_last_busy(wl->dev);
2725 pm_runtime_put_autosuspend(wl->dev);
2728 wl12xx_tx_reset_wlvif(wl, wlvif);
2730 /* clear all hlids (except system_hlid) */
2731 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2733 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2734 wlvif->bss_type == BSS_TYPE_IBSS) {
2735 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2736 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2737 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2738 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2739 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2741 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2742 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2743 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2744 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2745 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2746 wl12xx_free_rate_policy(wl,
2747 &wlvif->ap.ucast_rate_idx[i]);
2748 wl1271_free_ap_keys(wl, wlvif);
2751 dev_kfree_skb(wlvif->probereq);
2752 wlvif->probereq = NULL;
2753 if (wl->last_wlvif == wlvif)
2754 wl->last_wlvif = NULL;
2755 list_del(&wlvif->list);
2756 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2757 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2758 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2766 * Last AP, have more stations. Configure sleep auth according to STA.
2767 * Don't do thin on unintended recovery.
2769 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2770 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2773 if (wl->ap_count == 0 && is_ap) {
2774 /* mask ap events */
2775 wl->event_mask &= ~wl->ap_event_mask;
2776 wl1271_event_unmask(wl);
2779 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2780 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2781 /* Configure for power according to debugfs */
2782 if (sta_auth != WL1271_PSM_ILLEGAL)
2783 wl1271_acx_sleep_auth(wl, sta_auth);
2784 /* Configure for ELP power saving */
2786 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2790 mutex_unlock(&wl->mutex);
2792 del_timer_sync(&wlvif->rx_streaming_timer);
2793 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2794 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2795 cancel_work_sync(&wlvif->rc_update_work);
2796 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2797 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2798 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2800 mutex_lock(&wl->mutex);
2803 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2804 struct ieee80211_vif *vif)
2806 struct wl1271 *wl = hw->priv;
2807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2808 struct wl12xx_vif *iter;
2809 struct vif_counter_data vif_count;
2811 wl12xx_get_vif_count(hw, vif, &vif_count);
2812 mutex_lock(&wl->mutex);
2814 if (wl->state == WLCORE_STATE_OFF ||
2815 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2819 * wl->vif can be null here if someone shuts down the interface
2820 * just when hardware recovery has been started.
2822 wl12xx_for_each_wlvif(wl, iter) {
2826 __wl1271_op_remove_interface(wl, vif, true);
2829 WARN_ON(iter != wlvif);
2830 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2831 wl12xx_force_active_psm(wl);
2832 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2833 wl12xx_queue_recovery_work(wl);
2836 mutex_unlock(&wl->mutex);
2839 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2840 struct ieee80211_vif *vif,
2841 enum nl80211_iftype new_type, bool p2p)
2843 struct wl1271 *wl = hw->priv;
2846 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2847 wl1271_op_remove_interface(hw, vif);
2849 vif->type = new_type;
2851 ret = wl1271_op_add_interface(hw, vif);
2853 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2857 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2860 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2863 * One of the side effects of the JOIN command is that is clears
2864 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2865 * to a WPA/WPA2 access point will therefore kill the data-path.
2866 * Currently the only valid scenario for JOIN during association
2867 * is on roaming, in which case we will also be given new keys.
2868 * Keep the below message for now, unless it starts bothering
2869 * users who really like to roam a lot :)
2871 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2872 wl1271_info("JOIN while associated.");
2874 /* clear encryption type */
2875 wlvif->encryption_type = KEY_NONE;
2878 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2880 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2882 * TODO: this is an ugly workaround for wl12xx fw
2883 * bug - we are not able to tx/rx after the first
2884 * start_sta, so make dummy start+stop calls,
2885 * and then call start_sta again.
2886 * this should be fixed in the fw.
2888 wl12xx_cmd_role_start_sta(wl, wlvif);
2889 wl12xx_cmd_role_stop_sta(wl, wlvif);
2892 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2898 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2902 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2906 wl1271_error("No SSID in IEs!");
2911 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2912 wl1271_error("SSID is too long!");
2916 wlvif->ssid_len = ssid_len;
2917 memcpy(wlvif->ssid, ptr+2, ssid_len);
2921 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2923 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2924 struct sk_buff *skb;
2927 /* we currently only support setting the ssid from the ap probe req */
2928 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2931 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2935 ieoffset = offsetof(struct ieee80211_mgmt,
2936 u.probe_req.variable);
2937 wl1271_ssid_set(wlvif, skb, ieoffset);
2943 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2944 struct ieee80211_bss_conf *bss_conf,
2950 wlvif->aid = bss_conf->aid;
2951 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2952 wlvif->beacon_int = bss_conf->beacon_int;
2953 wlvif->wmm_enabled = bss_conf->qos;
2955 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2958 * with wl1271, we don't need to update the
2959 * beacon_int and dtim_period, because the firmware
2960 * updates it by itself when the first beacon is
2961 * received after a join.
2963 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2968 * Get a template for hardware connection maintenance
2970 dev_kfree_skb(wlvif->probereq);
2971 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2974 ieoffset = offsetof(struct ieee80211_mgmt,
2975 u.probe_req.variable);
2976 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2978 /* enable the connection monitoring feature */
2979 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2984 * The join command disable the keep-alive mode, shut down its process,
2985 * and also clear the template config, so we need to reset it all after
2986 * the join. The acx_aid starts the keep-alive process, and the order
2987 * of the commands below is relevant.
2989 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2993 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2997 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
3001 ret = wl1271_acx_keep_alive_config(wl, wlvif,
3002 wlvif->sta.klv_template_id,
3003 ACX_KEEP_ALIVE_TPL_VALID);
3008 * The default fw psm configuration is AUTO, while mac80211 default
3009 * setting is off (ACTIVE), so sync the fw with the correct value.
3011 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3017 wl1271_tx_enabled_rates_get(wl,
3020 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3028 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3031 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3033 /* make sure we are connected (sta) joined */
3035 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3038 /* make sure we are joined (ibss) */
3040 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3044 /* use defaults when not associated */
3047 /* free probe-request template */
3048 dev_kfree_skb(wlvif->probereq);
3049 wlvif->probereq = NULL;
3051 /* disable connection monitor features */
3052 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3056 /* Disable the keep-alive feature */
3057 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3061 /* disable beacon filtering */
3062 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3067 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3068 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3070 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3071 ieee80211_chswitch_done(vif, false);
3072 cancel_delayed_work(&wlvif->channel_switch_work);
3075 /* invalidate keep-alive template */
3076 wl1271_acx_keep_alive_config(wl, wlvif,
3077 wlvif->sta.klv_template_id,
3078 ACX_KEEP_ALIVE_TPL_INVALID);
3083 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3085 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3086 wlvif->rate_set = wlvif->basic_rate_set;
3089 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3092 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3094 if (idle == cur_idle)
3098 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3100 /* The current firmware only supports sched_scan in idle */
3101 if (wl->sched_vif == wlvif)
3102 wl->ops->sched_scan_stop(wl, wlvif);
3104 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3108 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3109 struct ieee80211_conf *conf, u32 changed)
3113 if (wlcore_is_p2p_mgmt(wlvif))
3116 if (conf->power_level != wlvif->power_level) {
3117 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3121 wlvif->power_level = conf->power_level;
3127 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3129 struct wl1271 *wl = hw->priv;
3130 struct wl12xx_vif *wlvif;
3131 struct ieee80211_conf *conf = &hw->conf;
3134 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3136 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3138 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3141 mutex_lock(&wl->mutex);
3143 if (changed & IEEE80211_CONF_CHANGE_POWER)
3144 wl->power_level = conf->power_level;
3146 if (unlikely(wl->state != WLCORE_STATE_ON))
3149 ret = pm_runtime_get_sync(wl->dev);
3151 pm_runtime_put_noidle(wl->dev);
3155 /* configure each interface */
3156 wl12xx_for_each_wlvif(wl, wlvif) {
3157 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3163 pm_runtime_mark_last_busy(wl->dev);
3164 pm_runtime_put_autosuspend(wl->dev);
3167 mutex_unlock(&wl->mutex);
3172 struct wl1271_filter_params {
3175 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3178 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3179 struct netdev_hw_addr_list *mc_list)
3181 struct wl1271_filter_params *fp;
3182 struct netdev_hw_addr *ha;
3184 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3186 wl1271_error("Out of memory setting filters.");
3190 /* update multicast filtering parameters */
3191 fp->mc_list_length = 0;
3192 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3193 fp->enabled = false;
3196 netdev_hw_addr_list_for_each(ha, mc_list) {
3197 memcpy(fp->mc_list[fp->mc_list_length],
3198 ha->addr, ETH_ALEN);
3199 fp->mc_list_length++;
3203 return (u64)(unsigned long)fp;
3206 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3208 FIF_BCN_PRBRESP_PROMISC | \
3212 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3213 unsigned int changed,
3214 unsigned int *total, u64 multicast)
3216 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3217 struct wl1271 *wl = hw->priv;
3218 struct wl12xx_vif *wlvif;
3222 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3223 " total %x", changed, *total);
3225 mutex_lock(&wl->mutex);
3227 *total &= WL1271_SUPPORTED_FILTERS;
3228 changed &= WL1271_SUPPORTED_FILTERS;
3230 if (unlikely(wl->state != WLCORE_STATE_ON))
3233 ret = pm_runtime_get_sync(wl->dev);
3235 pm_runtime_put_noidle(wl->dev);
3239 wl12xx_for_each_wlvif(wl, wlvif) {
3240 if (wlcore_is_p2p_mgmt(wlvif))
3243 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3244 if (*total & FIF_ALLMULTI)
3245 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3249 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3252 fp->mc_list_length);
3258 * If interface in AP mode and created with allmulticast then disable
3259 * the firmware filters so that all multicast packets are passed
3260 * This is mandatory for MDNS based discovery protocols
3262 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3263 if (*total & FIF_ALLMULTI) {
3264 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3274 * the fw doesn't provide an api to configure the filters. instead,
3275 * the filters configuration is based on the active roles / ROC
3280 pm_runtime_mark_last_busy(wl->dev);
3281 pm_runtime_put_autosuspend(wl->dev);
3284 mutex_unlock(&wl->mutex);
3288 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3289 u8 id, u8 key_type, u8 key_size,
3290 const u8 *key, u8 hlid, u32 tx_seq_32,
3293 struct wl1271_ap_key *ap_key;
3296 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3298 if (key_size > MAX_KEY_SIZE)
3302 * Find next free entry in ap_keys. Also check we are not replacing
3305 for (i = 0; i < MAX_NUM_KEYS; i++) {
3306 if (wlvif->ap.recorded_keys[i] == NULL)
3309 if (wlvif->ap.recorded_keys[i]->id == id) {
3310 wl1271_warning("trying to record key replacement");
3315 if (i == MAX_NUM_KEYS)
3318 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3323 ap_key->key_type = key_type;
3324 ap_key->key_size = key_size;
3325 memcpy(ap_key->key, key, key_size);
3326 ap_key->hlid = hlid;
3327 ap_key->tx_seq_32 = tx_seq_32;
3328 ap_key->tx_seq_16 = tx_seq_16;
3330 wlvif->ap.recorded_keys[i] = ap_key;
3334 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3338 for (i = 0; i < MAX_NUM_KEYS; i++) {
3339 kfree(wlvif->ap.recorded_keys[i]);
3340 wlvif->ap.recorded_keys[i] = NULL;
3344 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3347 struct wl1271_ap_key *key;
3348 bool wep_key_added = false;
3350 for (i = 0; i < MAX_NUM_KEYS; i++) {
3352 if (wlvif->ap.recorded_keys[i] == NULL)
3355 key = wlvif->ap.recorded_keys[i];
3357 if (hlid == WL12XX_INVALID_LINK_ID)
3358 hlid = wlvif->ap.bcast_hlid;
3360 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3361 key->id, key->key_type,
3362 key->key_size, key->key,
3363 hlid, key->tx_seq_32,
3368 if (key->key_type == KEY_WEP)
3369 wep_key_added = true;
3372 if (wep_key_added) {
3373 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3374 wlvif->ap.bcast_hlid);
3380 wl1271_free_ap_keys(wl, wlvif);
3384 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3385 u16 action, u8 id, u8 key_type,
3386 u8 key_size, const u8 *key, u32 tx_seq_32,
3387 u16 tx_seq_16, struct ieee80211_sta *sta)
3390 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3393 struct wl1271_station *wl_sta;
3397 wl_sta = (struct wl1271_station *)sta->drv_priv;
3398 hlid = wl_sta->hlid;
3400 hlid = wlvif->ap.bcast_hlid;
3403 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3405 * We do not support removing keys after AP shutdown.
3406 * Pretend we do to make mac80211 happy.
3408 if (action != KEY_ADD_OR_REPLACE)
3411 ret = wl1271_record_ap_key(wl, wlvif, id,
3413 key, hlid, tx_seq_32,
3416 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3417 id, key_type, key_size,
3418 key, hlid, tx_seq_32,
3426 static const u8 bcast_addr[ETH_ALEN] = {
3427 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3430 addr = sta ? sta->addr : bcast_addr;
3432 if (is_zero_ether_addr(addr)) {
3433 /* We dont support TX only encryption */
3437 /* The wl1271 does not allow to remove unicast keys - they
3438 will be cleared automatically on next CMD_JOIN. Ignore the
3439 request silently, as we dont want the mac80211 to emit
3440 an error message. */
3441 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3444 /* don't remove key if hlid was already deleted */
3445 if (action == KEY_REMOVE &&
3446 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3449 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3450 id, key_type, key_size,
3451 key, addr, tx_seq_32,
3461 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3462 struct ieee80211_vif *vif,
3463 struct ieee80211_sta *sta,
3464 struct ieee80211_key_conf *key_conf)
3466 struct wl1271 *wl = hw->priv;
3468 bool might_change_spare =
3469 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3470 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3472 if (might_change_spare) {
3474 * stop the queues and flush to ensure the next packets are
3475 * in sync with FW spare block accounting
3477 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3478 wl1271_tx_flush(wl);
3481 mutex_lock(&wl->mutex);
3483 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3485 goto out_wake_queues;
3488 ret = pm_runtime_get_sync(wl->dev);
3490 pm_runtime_put_noidle(wl->dev);
3491 goto out_wake_queues;
3494 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3496 pm_runtime_mark_last_busy(wl->dev);
3497 pm_runtime_put_autosuspend(wl->dev);
3500 if (might_change_spare)
3501 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3503 mutex_unlock(&wl->mutex);
3508 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3509 struct ieee80211_vif *vif,
3510 struct ieee80211_sta *sta,
3511 struct ieee80211_key_conf *key_conf)
3513 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3520 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3522 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3523 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3524 key_conf->cipher, key_conf->keyidx,
3525 key_conf->keylen, key_conf->flags);
3526 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3528 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3530 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3531 hlid = wl_sta->hlid;
3533 hlid = wlvif->ap.bcast_hlid;
3536 hlid = wlvif->sta.hlid;
3538 if (hlid != WL12XX_INVALID_LINK_ID) {
3539 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3540 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3541 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3544 switch (key_conf->cipher) {
3545 case WLAN_CIPHER_SUITE_WEP40:
3546 case WLAN_CIPHER_SUITE_WEP104:
3549 key_conf->hw_key_idx = key_conf->keyidx;
3551 case WLAN_CIPHER_SUITE_TKIP:
3552 key_type = KEY_TKIP;
3553 key_conf->hw_key_idx = key_conf->keyidx;
3555 case WLAN_CIPHER_SUITE_CCMP:
3557 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3559 case WL1271_CIPHER_SUITE_GEM:
3563 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3570 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3571 key_conf->keyidx, key_type,
3572 key_conf->keylen, key_conf->key,
3573 tx_seq_32, tx_seq_16, sta);
3575 wl1271_error("Could not add or replace key");
3580 * reconfiguring arp response if the unicast (or common)
3581 * encryption key type was changed
3583 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3584 (sta || key_type == KEY_WEP) &&
3585 wlvif->encryption_type != key_type) {
3586 wlvif->encryption_type = key_type;
3587 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3589 wl1271_warning("build arp rsp failed: %d", ret);
3596 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3597 key_conf->keyidx, key_type,
3598 key_conf->keylen, key_conf->key,
3601 wl1271_error("Could not remove key");
3607 wl1271_error("Unsupported key cmd 0x%x", cmd);
3613 EXPORT_SYMBOL_GPL(wlcore_set_key);
3615 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3616 struct ieee80211_vif *vif,
3619 struct wl1271 *wl = hw->priv;
3620 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3623 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3626 /* we don't handle unsetting of default key */
3630 mutex_lock(&wl->mutex);
3632 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3637 ret = pm_runtime_get_sync(wl->dev);
3639 pm_runtime_put_noidle(wl->dev);
3643 wlvif->default_key = key_idx;
3645 /* the default WEP key needs to be configured at least once */
3646 if (wlvif->encryption_type == KEY_WEP) {
3647 ret = wl12xx_cmd_set_default_wep_key(wl,
3655 pm_runtime_mark_last_busy(wl->dev);
3656 pm_runtime_put_autosuspend(wl->dev);
3659 mutex_unlock(&wl->mutex);
3662 void wlcore_regdomain_config(struct wl1271 *wl)
3666 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3669 mutex_lock(&wl->mutex);
3671 if (unlikely(wl->state != WLCORE_STATE_ON))
3674 ret = pm_runtime_get_sync(wl->dev);
3678 ret = wlcore_cmd_regdomain_config_locked(wl);
3680 wl12xx_queue_recovery_work(wl);
3684 pm_runtime_mark_last_busy(wl->dev);
3685 pm_runtime_put_autosuspend(wl->dev);
3687 mutex_unlock(&wl->mutex);
3690 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3691 struct ieee80211_vif *vif,
3692 struct ieee80211_scan_request *hw_req)
3694 struct cfg80211_scan_request *req = &hw_req->req;
3695 struct wl1271 *wl = hw->priv;
3700 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3703 ssid = req->ssids[0].ssid;
3704 len = req->ssids[0].ssid_len;
3707 mutex_lock(&wl->mutex);
3709 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3711 * We cannot return -EBUSY here because cfg80211 will expect
3712 * a call to ieee80211_scan_completed if we do - in this case
3713 * there won't be any call.
3719 ret = pm_runtime_get_sync(wl->dev);
3721 pm_runtime_put_noidle(wl->dev);
3725 /* fail if there is any role in ROC */
3726 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3727 /* don't allow scanning right now */
3732 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3734 pm_runtime_mark_last_busy(wl->dev);
3735 pm_runtime_put_autosuspend(wl->dev);
3737 mutex_unlock(&wl->mutex);
3742 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3743 struct ieee80211_vif *vif)
3745 struct wl1271 *wl = hw->priv;
3746 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3747 struct cfg80211_scan_info info = {
3752 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3754 mutex_lock(&wl->mutex);
3756 if (unlikely(wl->state != WLCORE_STATE_ON))
3759 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3762 ret = pm_runtime_get_sync(wl->dev);
3764 pm_runtime_put_noidle(wl->dev);
3768 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3769 ret = wl->ops->scan_stop(wl, wlvif);
3775 * Rearm the tx watchdog just before idling scan. This
3776 * prevents just-finished scans from triggering the watchdog
3778 wl12xx_rearm_tx_watchdog_locked(wl);
3780 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3781 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3782 wl->scan_wlvif = NULL;
3783 wl->scan.req = NULL;
3784 ieee80211_scan_completed(wl->hw, &info);
3787 pm_runtime_mark_last_busy(wl->dev);
3788 pm_runtime_put_autosuspend(wl->dev);
3790 mutex_unlock(&wl->mutex);
3792 cancel_delayed_work_sync(&wl->scan_complete_work);
3795 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3796 struct ieee80211_vif *vif,
3797 struct cfg80211_sched_scan_request *req,
3798 struct ieee80211_scan_ies *ies)
3800 struct wl1271 *wl = hw->priv;
3801 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3804 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3806 mutex_lock(&wl->mutex);
3808 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3813 ret = pm_runtime_get_sync(wl->dev);
3815 pm_runtime_put_noidle(wl->dev);
3819 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3823 wl->sched_vif = wlvif;
3826 pm_runtime_mark_last_busy(wl->dev);
3827 pm_runtime_put_autosuspend(wl->dev);
3829 mutex_unlock(&wl->mutex);
3833 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3834 struct ieee80211_vif *vif)
3836 struct wl1271 *wl = hw->priv;
3837 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3840 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3842 mutex_lock(&wl->mutex);
3844 if (unlikely(wl->state != WLCORE_STATE_ON))
3847 ret = pm_runtime_get_sync(wl->dev);
3849 pm_runtime_put_noidle(wl->dev);
3853 wl->ops->sched_scan_stop(wl, wlvif);
3855 pm_runtime_mark_last_busy(wl->dev);
3856 pm_runtime_put_autosuspend(wl->dev);
3858 mutex_unlock(&wl->mutex);
3863 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3865 struct wl1271 *wl = hw->priv;
3868 mutex_lock(&wl->mutex);
3870 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3875 ret = pm_runtime_get_sync(wl->dev);
3877 pm_runtime_put_noidle(wl->dev);
3881 ret = wl1271_acx_frag_threshold(wl, value);
3883 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3885 pm_runtime_mark_last_busy(wl->dev);
3886 pm_runtime_put_autosuspend(wl->dev);
3889 mutex_unlock(&wl->mutex);
3894 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3896 struct wl1271 *wl = hw->priv;
3897 struct wl12xx_vif *wlvif;
3900 mutex_lock(&wl->mutex);
3902 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3907 ret = pm_runtime_get_sync(wl->dev);
3909 pm_runtime_put_noidle(wl->dev);
3913 wl12xx_for_each_wlvif(wl, wlvif) {
3914 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3916 wl1271_warning("set rts threshold failed: %d", ret);
3918 pm_runtime_mark_last_busy(wl->dev);
3919 pm_runtime_put_autosuspend(wl->dev);
3922 mutex_unlock(&wl->mutex);
3927 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3930 const u8 *next, *end = skb->data + skb->len;
3931 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3932 skb->len - ieoffset);
3937 memmove(ie, next, end - next);
3938 skb_trim(skb, skb->len - len);
3941 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3942 unsigned int oui, u8 oui_type,
3946 const u8 *next, *end = skb->data + skb->len;
3947 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3948 skb->data + ieoffset,
3949 skb->len - ieoffset);
3954 memmove(ie, next, end - next);
3955 skb_trim(skb, skb->len - len);
3958 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3959 struct ieee80211_vif *vif)
3961 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3962 struct sk_buff *skb;
3965 skb = ieee80211_proberesp_get(wl->hw, vif);
3969 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3970 CMD_TEMPL_AP_PROBE_RESPONSE,
3979 wl1271_debug(DEBUG_AP, "probe response updated");
3980 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3986 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3987 struct ieee80211_vif *vif,
3989 size_t probe_rsp_len,
3992 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3993 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3994 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3995 int ssid_ie_offset, ie_offset, templ_len;
3998 /* no need to change probe response if the SSID is set correctly */
3999 if (wlvif->ssid_len > 0)
4000 return wl1271_cmd_template_set(wl, wlvif->role_id,
4001 CMD_TEMPL_AP_PROBE_RESPONSE,
4006 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4007 wl1271_error("probe_rsp template too big");
4011 /* start searching from IE offset */
4012 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4014 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4015 probe_rsp_len - ie_offset);
4017 wl1271_error("No SSID in beacon!");
4021 ssid_ie_offset = ptr - probe_rsp_data;
4022 ptr += (ptr[1] + 2);
4024 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4026 /* insert SSID from bss_conf */
4027 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4028 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4029 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4030 bss_conf->ssid, bss_conf->ssid_len);
4031 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4033 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4034 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4035 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4037 return wl1271_cmd_template_set(wl, wlvif->role_id,
4038 CMD_TEMPL_AP_PROBE_RESPONSE,
4044 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4045 struct ieee80211_vif *vif,
4046 struct ieee80211_bss_conf *bss_conf,
4049 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4052 if (changed & BSS_CHANGED_ERP_SLOT) {
4053 if (bss_conf->use_short_slot)
4054 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4056 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4058 wl1271_warning("Set slot time failed %d", ret);
4063 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4064 if (bss_conf->use_short_preamble)
4065 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4067 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4070 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4071 if (bss_conf->use_cts_prot)
4072 ret = wl1271_acx_cts_protect(wl, wlvif,
4075 ret = wl1271_acx_cts_protect(wl, wlvif,
4076 CTSPROTECT_DISABLE);
4078 wl1271_warning("Set ctsprotect failed %d", ret);
4087 static int wlcore_set_beacon_template(struct wl1271 *wl,
4088 struct ieee80211_vif *vif,
4091 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4092 struct ieee80211_hdr *hdr;
4095 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4096 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4104 wl1271_debug(DEBUG_MASTER, "beacon updated");
4106 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4108 dev_kfree_skb(beacon);
4111 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4112 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4114 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4119 dev_kfree_skb(beacon);
4123 wlvif->wmm_enabled =
4124 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4125 WLAN_OUI_TYPE_MICROSOFT_WMM,
4126 beacon->data + ieoffset,
4127 beacon->len - ieoffset);
4130 * In case we already have a probe-resp beacon set explicitly
4131 * by usermode, don't use the beacon data.
4133 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4136 /* remove TIM ie from probe response */
4137 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4140 * remove p2p ie from probe response.
4141 * the fw reponds to probe requests that don't include
4142 * the p2p ie. probe requests with p2p ie will be passed,
4143 * and will be responded by the supplicant (the spec
4144 * forbids including the p2p ie when responding to probe
4145 * requests that didn't include it).
4147 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4148 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4150 hdr = (struct ieee80211_hdr *) beacon->data;
4151 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4152 IEEE80211_STYPE_PROBE_RESP);
4154 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4159 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4160 CMD_TEMPL_PROBE_RESPONSE,
4165 dev_kfree_skb(beacon);
4173 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4174 struct ieee80211_vif *vif,
4175 struct ieee80211_bss_conf *bss_conf,
4178 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4179 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4182 if (changed & BSS_CHANGED_BEACON_INT) {
4183 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4184 bss_conf->beacon_int);
4186 wlvif->beacon_int = bss_conf->beacon_int;
4189 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4190 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4192 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4195 if (changed & BSS_CHANGED_BEACON) {
4196 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4200 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4202 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4209 wl1271_error("beacon info change failed: %d", ret);
4213 /* AP mode changes */
4214 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4215 struct ieee80211_vif *vif,
4216 struct ieee80211_bss_conf *bss_conf,
4219 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4222 if (changed & BSS_CHANGED_BASIC_RATES) {
4223 u32 rates = bss_conf->basic_rates;
4225 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4227 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4228 wlvif->basic_rate_set);
4230 ret = wl1271_init_ap_rates(wl, wlvif);
4232 wl1271_error("AP rate policy change failed %d", ret);
4236 ret = wl1271_ap_init_templates(wl, vif);
4240 /* No need to set probe resp template for mesh */
4241 if (!ieee80211_vif_is_mesh(vif)) {
4242 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4249 ret = wlcore_set_beacon_template(wl, vif, true);
4254 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4258 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4259 if (bss_conf->enable_beacon) {
4260 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4261 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4265 ret = wl1271_ap_init_hwenc(wl, wlvif);
4269 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4270 wl1271_debug(DEBUG_AP, "started AP");
4273 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4275 * AP might be in ROC in case we have just
4276 * sent auth reply. handle it.
4278 if (test_bit(wlvif->role_id, wl->roc_map))
4279 wl12xx_croc(wl, wlvif->role_id);
4281 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4285 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4286 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4288 wl1271_debug(DEBUG_AP, "stopped AP");
4293 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4297 /* Handle HT information change */
4298 if ((changed & BSS_CHANGED_HT) &&
4299 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4300 ret = wl1271_acx_set_ht_information(wl, wlvif,
4301 bss_conf->ht_operation_mode);
4303 wl1271_warning("Set ht information failed %d", ret);
4312 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4313 struct ieee80211_bss_conf *bss_conf,
4319 wl1271_debug(DEBUG_MAC80211,
4320 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4321 bss_conf->bssid, bss_conf->aid,
4322 bss_conf->beacon_int,
4323 bss_conf->basic_rates, sta_rate_set);
4325 wlvif->beacon_int = bss_conf->beacon_int;
4326 rates = bss_conf->basic_rates;
4327 wlvif->basic_rate_set =
4328 wl1271_tx_enabled_rates_get(wl, rates,
4331 wl1271_tx_min_rate_get(wl,
4332 wlvif->basic_rate_set);
4336 wl1271_tx_enabled_rates_get(wl,
4340 /* we only support sched_scan while not connected */
4341 if (wl->sched_vif == wlvif)
4342 wl->ops->sched_scan_stop(wl, wlvif);
4344 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4348 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4352 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4356 wlcore_set_ssid(wl, wlvif);
4358 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4363 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4367 /* revert back to minimum rates for the current band */
4368 wl1271_set_band_rate(wl, wlvif);
4369 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4371 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4375 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4376 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4377 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4382 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4385 /* STA/IBSS mode changes */
4386 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4387 struct ieee80211_vif *vif,
4388 struct ieee80211_bss_conf *bss_conf,
4391 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4392 bool do_join = false;
4393 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4394 bool ibss_joined = false;
4395 u32 sta_rate_set = 0;
4397 struct ieee80211_sta *sta;
4398 bool sta_exists = false;
4399 struct ieee80211_sta_ht_cap sta_ht_cap;
4402 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4408 if (changed & BSS_CHANGED_IBSS) {
4409 if (bss_conf->ibss_joined) {
4410 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4413 wlcore_unset_assoc(wl, wlvif);
4414 wl12xx_cmd_role_stop_sta(wl, wlvif);
4418 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4421 /* Need to update the SSID (for filtering etc) */
4422 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4425 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4426 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4427 bss_conf->enable_beacon ? "enabled" : "disabled");
4432 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4433 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4435 if (changed & BSS_CHANGED_CQM) {
4436 bool enable = false;
4437 if (bss_conf->cqm_rssi_thold)
4439 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4440 bss_conf->cqm_rssi_thold,
4441 bss_conf->cqm_rssi_hyst);
4444 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4447 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4448 BSS_CHANGED_ASSOC)) {
4450 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4452 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4454 /* save the supp_rates of the ap */
4455 sta_rate_set = sta->supp_rates[wlvif->band];
4456 if (sta->ht_cap.ht_supported)
4458 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4459 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4460 sta_ht_cap = sta->ht_cap;
4467 if (changed & BSS_CHANGED_BSSID) {
4468 if (!is_zero_ether_addr(bss_conf->bssid)) {
4469 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4474 /* Need to update the BSSID (for filtering etc) */
4477 ret = wlcore_clear_bssid(wl, wlvif);
4483 if (changed & BSS_CHANGED_IBSS) {
4484 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4485 bss_conf->ibss_joined);
4487 if (bss_conf->ibss_joined) {
4488 u32 rates = bss_conf->basic_rates;
4489 wlvif->basic_rate_set =
4490 wl1271_tx_enabled_rates_get(wl, rates,
4493 wl1271_tx_min_rate_get(wl,
4494 wlvif->basic_rate_set);
4496 /* by default, use 11b + OFDM rates */
4497 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4498 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4504 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4505 /* enable beacon filtering */
4506 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4511 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4516 ret = wlcore_join(wl, wlvif);
4518 wl1271_warning("cmd join failed %d", ret);
4523 if (changed & BSS_CHANGED_ASSOC) {
4524 if (bss_conf->assoc) {
4525 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4530 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4531 wl12xx_set_authorized(wl, wlvif);
4533 wlcore_unset_assoc(wl, wlvif);
4537 if (changed & BSS_CHANGED_PS) {
4538 if ((bss_conf->ps) &&
4539 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4540 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4544 if (wl->conf.conn.forced_ps) {
4545 ps_mode = STATION_POWER_SAVE_MODE;
4546 ps_mode_str = "forced";
4548 ps_mode = STATION_AUTO_PS_MODE;
4549 ps_mode_str = "auto";
4552 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4554 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4556 wl1271_warning("enter %s ps failed %d",
4558 } else if (!bss_conf->ps &&
4559 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4560 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4562 ret = wl1271_ps_set_mode(wl, wlvif,
4563 STATION_ACTIVE_MODE);
4565 wl1271_warning("exit auto ps failed %d", ret);
4569 /* Handle new association with HT. Do this after join. */
4572 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4574 ret = wlcore_hw_set_peer_cap(wl,
4580 wl1271_warning("Set ht cap failed %d", ret);
4586 ret = wl1271_acx_set_ht_information(wl, wlvif,
4587 bss_conf->ht_operation_mode);
4589 wl1271_warning("Set ht information failed %d",
4596 /* Handle arp filtering. Done after join. */
4597 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4598 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4599 __be32 addr = bss_conf->arp_addr_list[0];
4600 wlvif->sta.qos = bss_conf->qos;
4601 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4603 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4604 wlvif->ip_addr = addr;
4606 * The template should have been configured only upon
4607 * association. however, it seems that the correct ip
4608 * isn't being set (when sending), so we have to
4609 * reconfigure the template upon every ip change.
4611 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4613 wl1271_warning("build arp rsp failed: %d", ret);
4617 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4618 (ACX_ARP_FILTER_ARP_FILTERING |
4619 ACX_ARP_FILTER_AUTO_ARP),
4623 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4634 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4635 struct ieee80211_vif *vif,
4636 struct ieee80211_bss_conf *bss_conf,
4639 struct wl1271 *wl = hw->priv;
4640 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4641 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4644 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4645 wlvif->role_id, (int)changed);
4648 * make sure to cancel pending disconnections if our association
4651 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4652 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4654 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4655 !bss_conf->enable_beacon)
4656 wl1271_tx_flush(wl);
4658 mutex_lock(&wl->mutex);
4660 if (unlikely(wl->state != WLCORE_STATE_ON))
4663 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4666 ret = pm_runtime_get_sync(wl->dev);
4668 pm_runtime_put_noidle(wl->dev);
4672 if ((changed & BSS_CHANGED_TXPOWER) &&
4673 bss_conf->txpower != wlvif->power_level) {
4675 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4679 wlvif->power_level = bss_conf->txpower;
4683 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4685 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4687 pm_runtime_mark_last_busy(wl->dev);
4688 pm_runtime_put_autosuspend(wl->dev);
4691 mutex_unlock(&wl->mutex);
4694 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4695 struct ieee80211_chanctx_conf *ctx)
4697 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4698 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4699 cfg80211_get_chandef_type(&ctx->def));
4703 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4704 struct ieee80211_chanctx_conf *ctx)
4706 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4707 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4708 cfg80211_get_chandef_type(&ctx->def));
4711 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4712 struct ieee80211_chanctx_conf *ctx,
4715 struct wl1271 *wl = hw->priv;
4716 struct wl12xx_vif *wlvif;
4718 int channel = ieee80211_frequency_to_channel(
4719 ctx->def.chan->center_freq);
4721 wl1271_debug(DEBUG_MAC80211,
4722 "mac80211 change chanctx %d (type %d) changed 0x%x",
4723 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4725 mutex_lock(&wl->mutex);
4727 ret = pm_runtime_get_sync(wl->dev);
4729 pm_runtime_put_noidle(wl->dev);
4733 wl12xx_for_each_wlvif(wl, wlvif) {
4734 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4737 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4743 /* start radar if needed */
4744 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4745 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4746 ctx->radar_enabled && !wlvif->radar_enabled &&
4747 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4748 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4749 wlcore_hw_set_cac(wl, wlvif, true);
4750 wlvif->radar_enabled = true;
4754 pm_runtime_mark_last_busy(wl->dev);
4755 pm_runtime_put_autosuspend(wl->dev);
4757 mutex_unlock(&wl->mutex);
4760 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4761 struct ieee80211_vif *vif,
4762 struct ieee80211_chanctx_conf *ctx)
4764 struct wl1271 *wl = hw->priv;
4765 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4766 int channel = ieee80211_frequency_to_channel(
4767 ctx->def.chan->center_freq);
4770 wl1271_debug(DEBUG_MAC80211,
4771 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4772 wlvif->role_id, channel,
4773 cfg80211_get_chandef_type(&ctx->def),
4774 ctx->radar_enabled, ctx->def.chan->dfs_state);
4776 mutex_lock(&wl->mutex);
4778 if (unlikely(wl->state != WLCORE_STATE_ON))
4781 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4784 ret = pm_runtime_get_sync(wl->dev);
4786 pm_runtime_put_noidle(wl->dev);
4790 wlvif->band = ctx->def.chan->band;
4791 wlvif->channel = channel;
4792 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4794 /* update default rates according to the band */
4795 wl1271_set_band_rate(wl, wlvif);
4797 if (ctx->radar_enabled &&
4798 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4799 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4800 wlcore_hw_set_cac(wl, wlvif, true);
4801 wlvif->radar_enabled = true;
4804 pm_runtime_mark_last_busy(wl->dev);
4805 pm_runtime_put_autosuspend(wl->dev);
4807 mutex_unlock(&wl->mutex);
4812 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4813 struct ieee80211_vif *vif,
4814 struct ieee80211_chanctx_conf *ctx)
4816 struct wl1271 *wl = hw->priv;
4817 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4820 wl1271_debug(DEBUG_MAC80211,
4821 "mac80211 unassign chanctx (role %d) %d (type %d)",
4823 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4824 cfg80211_get_chandef_type(&ctx->def));
4826 wl1271_tx_flush(wl);
4828 mutex_lock(&wl->mutex);
4830 if (unlikely(wl->state != WLCORE_STATE_ON))
4833 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4836 ret = pm_runtime_get_sync(wl->dev);
4838 pm_runtime_put_noidle(wl->dev);
4842 if (wlvif->radar_enabled) {
4843 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4844 wlcore_hw_set_cac(wl, wlvif, false);
4845 wlvif->radar_enabled = false;
4848 pm_runtime_mark_last_busy(wl->dev);
4849 pm_runtime_put_autosuspend(wl->dev);
4851 mutex_unlock(&wl->mutex);
4854 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4855 struct wl12xx_vif *wlvif,
4856 struct ieee80211_chanctx_conf *new_ctx)
4858 int channel = ieee80211_frequency_to_channel(
4859 new_ctx->def.chan->center_freq);
4861 wl1271_debug(DEBUG_MAC80211,
4862 "switch vif (role %d) %d -> %d chan_type: %d",
4863 wlvif->role_id, wlvif->channel, channel,
4864 cfg80211_get_chandef_type(&new_ctx->def));
4866 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4869 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4871 if (wlvif->radar_enabled) {
4872 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4873 wlcore_hw_set_cac(wl, wlvif, false);
4874 wlvif->radar_enabled = false;
4877 wlvif->band = new_ctx->def.chan->band;
4878 wlvif->channel = channel;
4879 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4881 /* start radar if needed */
4882 if (new_ctx->radar_enabled) {
4883 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4884 wlcore_hw_set_cac(wl, wlvif, true);
4885 wlvif->radar_enabled = true;
4892 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4893 struct ieee80211_vif_chanctx_switch *vifs,
4895 enum ieee80211_chanctx_switch_mode mode)
4897 struct wl1271 *wl = hw->priv;
4900 wl1271_debug(DEBUG_MAC80211,
4901 "mac80211 switch chanctx n_vifs %d mode %d",
4904 mutex_lock(&wl->mutex);
4906 ret = pm_runtime_get_sync(wl->dev);
4908 pm_runtime_put_noidle(wl->dev);
4912 for (i = 0; i < n_vifs; i++) {
4913 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4915 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4920 pm_runtime_mark_last_busy(wl->dev);
4921 pm_runtime_put_autosuspend(wl->dev);
4923 mutex_unlock(&wl->mutex);
4928 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4929 struct ieee80211_vif *vif, u16 queue,
4930 const struct ieee80211_tx_queue_params *params)
4932 struct wl1271 *wl = hw->priv;
4933 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4937 if (wlcore_is_p2p_mgmt(wlvif))
4940 mutex_lock(&wl->mutex);
4942 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4945 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4947 ps_scheme = CONF_PS_SCHEME_LEGACY;
4949 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4952 ret = pm_runtime_get_sync(wl->dev);
4954 pm_runtime_put_noidle(wl->dev);
4959 * the txop is confed in units of 32us by the mac80211,
4962 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4963 params->cw_min, params->cw_max,
4964 params->aifs, params->txop << 5);
4968 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4969 CONF_CHANNEL_TYPE_EDCF,
4970 wl1271_tx_get_queue(queue),
4971 ps_scheme, CONF_ACK_POLICY_LEGACY,
4975 pm_runtime_mark_last_busy(wl->dev);
4976 pm_runtime_put_autosuspend(wl->dev);
4979 mutex_unlock(&wl->mutex);
4984 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4985 struct ieee80211_vif *vif)
4988 struct wl1271 *wl = hw->priv;
4989 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4990 u64 mactime = ULLONG_MAX;
4993 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4995 mutex_lock(&wl->mutex);
4997 if (unlikely(wl->state != WLCORE_STATE_ON))
5000 ret = pm_runtime_get_sync(wl->dev);
5002 pm_runtime_put_noidle(wl->dev);
5006 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5011 pm_runtime_mark_last_busy(wl->dev);
5012 pm_runtime_put_autosuspend(wl->dev);
5015 mutex_unlock(&wl->mutex);
5019 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5020 struct survey_info *survey)
5022 struct ieee80211_conf *conf = &hw->conf;
5027 survey->channel = conf->chandef.chan;
5032 static int wl1271_allocate_sta(struct wl1271 *wl,
5033 struct wl12xx_vif *wlvif,
5034 struct ieee80211_sta *sta)
5036 struct wl1271_station *wl_sta;
5040 if (wl->active_sta_count >= wl->max_ap_stations) {
5041 wl1271_warning("could not allocate HLID - too much stations");
5045 wl_sta = (struct wl1271_station *)sta->drv_priv;
5046 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5048 wl1271_warning("could not allocate HLID - too many links");
5052 /* use the previous security seq, if this is a recovery/resume */
5053 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5055 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5056 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5057 wl->active_sta_count++;
5061 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5063 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5066 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5067 __clear_bit(hlid, &wl->ap_ps_map);
5068 __clear_bit(hlid, &wl->ap_fw_ps_map);
5071 * save the last used PN in the private part of iee80211_sta,
5072 * in case of recovery/suspend
5074 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5076 wl12xx_free_link(wl, wlvif, &hlid);
5077 wl->active_sta_count--;
5080 * rearm the tx watchdog when the last STA is freed - give the FW a
5081 * chance to return STA-buffered packets before complaining.
5083 if (wl->active_sta_count == 0)
5084 wl12xx_rearm_tx_watchdog_locked(wl);
5087 static int wl12xx_sta_add(struct wl1271 *wl,
5088 struct wl12xx_vif *wlvif,
5089 struct ieee80211_sta *sta)
5091 struct wl1271_station *wl_sta;
5095 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5097 ret = wl1271_allocate_sta(wl, wlvif, sta);
5101 wl_sta = (struct wl1271_station *)sta->drv_priv;
5102 hlid = wl_sta->hlid;
5104 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5106 wl1271_free_sta(wl, wlvif, hlid);
5111 static int wl12xx_sta_remove(struct wl1271 *wl,
5112 struct wl12xx_vif *wlvif,
5113 struct ieee80211_sta *sta)
5115 struct wl1271_station *wl_sta;
5118 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5120 wl_sta = (struct wl1271_station *)sta->drv_priv;
5122 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5125 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5129 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5133 static void wlcore_roc_if_possible(struct wl1271 *wl,
5134 struct wl12xx_vif *wlvif)
5136 if (find_first_bit(wl->roc_map,
5137 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5140 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5143 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5147 * when wl_sta is NULL, we treat this call as if coming from a
5148 * pending auth reply.
5149 * wl->mutex must be taken and the FW must be awake when the call
5152 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5153 struct wl1271_station *wl_sta, bool in_conn)
5156 if (WARN_ON(wl_sta && wl_sta->in_connection))
5159 if (!wlvif->ap_pending_auth_reply &&
5160 !wlvif->inconn_count)
5161 wlcore_roc_if_possible(wl, wlvif);
5164 wl_sta->in_connection = true;
5165 wlvif->inconn_count++;
5167 wlvif->ap_pending_auth_reply = true;
5170 if (wl_sta && !wl_sta->in_connection)
5173 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5176 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5180 wl_sta->in_connection = false;
5181 wlvif->inconn_count--;
5183 wlvif->ap_pending_auth_reply = false;
5186 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5187 test_bit(wlvif->role_id, wl->roc_map))
5188 wl12xx_croc(wl, wlvif->role_id);
5192 static int wl12xx_update_sta_state(struct wl1271 *wl,
5193 struct wl12xx_vif *wlvif,
5194 struct ieee80211_sta *sta,
5195 enum ieee80211_sta_state old_state,
5196 enum ieee80211_sta_state new_state)
5198 struct wl1271_station *wl_sta;
5199 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5200 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5203 wl_sta = (struct wl1271_station *)sta->drv_priv;
5205 /* Add station (AP mode) */
5207 old_state == IEEE80211_STA_NOTEXIST &&
5208 new_state == IEEE80211_STA_NONE) {
5209 ret = wl12xx_sta_add(wl, wlvif, sta);
5213 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5216 /* Remove station (AP mode) */
5218 old_state == IEEE80211_STA_NONE &&
5219 new_state == IEEE80211_STA_NOTEXIST) {
5221 wl12xx_sta_remove(wl, wlvif, sta);
5223 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5226 /* Authorize station (AP mode) */
5228 new_state == IEEE80211_STA_AUTHORIZED) {
5229 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5233 /* reconfigure rates */
5234 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5238 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5243 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5246 /* Authorize station */
5248 new_state == IEEE80211_STA_AUTHORIZED) {
5249 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5250 ret = wl12xx_set_authorized(wl, wlvif);
5256 old_state == IEEE80211_STA_AUTHORIZED &&
5257 new_state == IEEE80211_STA_ASSOC) {
5258 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5259 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5262 /* save seq number on disassoc (suspend) */
5264 old_state == IEEE80211_STA_ASSOC &&
5265 new_state == IEEE80211_STA_AUTH) {
5266 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5267 wlvif->total_freed_pkts = 0;
5270 /* restore seq number on assoc (resume) */
5272 old_state == IEEE80211_STA_AUTH &&
5273 new_state == IEEE80211_STA_ASSOC) {
5274 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5277 /* clear ROCs on failure or authorization */
5279 (new_state == IEEE80211_STA_AUTHORIZED ||
5280 new_state == IEEE80211_STA_NOTEXIST)) {
5281 if (test_bit(wlvif->role_id, wl->roc_map))
5282 wl12xx_croc(wl, wlvif->role_id);
5286 old_state == IEEE80211_STA_NOTEXIST &&
5287 new_state == IEEE80211_STA_NONE) {
5288 if (find_first_bit(wl->roc_map,
5289 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5290 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5291 wl12xx_roc(wl, wlvif, wlvif->role_id,
5292 wlvif->band, wlvif->channel);
5298 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5299 struct ieee80211_vif *vif,
5300 struct ieee80211_sta *sta,
5301 enum ieee80211_sta_state old_state,
5302 enum ieee80211_sta_state new_state)
5304 struct wl1271 *wl = hw->priv;
5305 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5308 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5309 sta->aid, old_state, new_state);
5311 mutex_lock(&wl->mutex);
5313 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5318 ret = pm_runtime_get_sync(wl->dev);
5320 pm_runtime_put_noidle(wl->dev);
5324 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5326 pm_runtime_mark_last_busy(wl->dev);
5327 pm_runtime_put_autosuspend(wl->dev);
5329 mutex_unlock(&wl->mutex);
5330 if (new_state < old_state)
5335 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5336 struct ieee80211_vif *vif,
5337 struct ieee80211_ampdu_params *params)
5339 struct wl1271 *wl = hw->priv;
5340 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5342 u8 hlid, *ba_bitmap;
5343 struct ieee80211_sta *sta = params->sta;
5344 enum ieee80211_ampdu_mlme_action action = params->action;
5345 u16 tid = params->tid;
5346 u16 *ssn = ¶ms->ssn;
5348 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5351 /* sanity check - the fields in FW are only 8bits wide */
5352 if (WARN_ON(tid > 0xFF))
5355 mutex_lock(&wl->mutex);
5357 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5362 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5363 hlid = wlvif->sta.hlid;
5364 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5365 struct wl1271_station *wl_sta;
5367 wl_sta = (struct wl1271_station *)sta->drv_priv;
5368 hlid = wl_sta->hlid;
5374 ba_bitmap = &wl->links[hlid].ba_bitmap;
5376 ret = pm_runtime_get_sync(wl->dev);
5378 pm_runtime_put_noidle(wl->dev);
5382 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5386 case IEEE80211_AMPDU_RX_START:
5387 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5392 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5394 wl1271_error("exceeded max RX BA sessions");
5398 if (*ba_bitmap & BIT(tid)) {
5400 wl1271_error("cannot enable RX BA session on active "
5405 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5410 *ba_bitmap |= BIT(tid);
5411 wl->ba_rx_session_count++;
5415 case IEEE80211_AMPDU_RX_STOP:
5416 if (!(*ba_bitmap & BIT(tid))) {
5418 * this happens on reconfig - so only output a debug
5419 * message for now, and don't fail the function.
5421 wl1271_debug(DEBUG_MAC80211,
5422 "no active RX BA session on tid: %d",
5428 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5431 *ba_bitmap &= ~BIT(tid);
5432 wl->ba_rx_session_count--;
5437 * The BA initiator session management in FW independently.
5438 * Falling break here on purpose for all TX APDU commands.
5440 case IEEE80211_AMPDU_TX_START:
5441 case IEEE80211_AMPDU_TX_STOP_CONT:
5442 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5443 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5444 case IEEE80211_AMPDU_TX_OPERATIONAL:
5449 wl1271_error("Incorrect ampdu action id=%x\n", action);
5453 pm_runtime_mark_last_busy(wl->dev);
5454 pm_runtime_put_autosuspend(wl->dev);
5457 mutex_unlock(&wl->mutex);
5462 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5463 struct ieee80211_vif *vif,
5464 const struct cfg80211_bitrate_mask *mask)
5466 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5467 struct wl1271 *wl = hw->priv;
5470 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5471 mask->control[NL80211_BAND_2GHZ].legacy,
5472 mask->control[NL80211_BAND_5GHZ].legacy);
5474 mutex_lock(&wl->mutex);
5476 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5477 wlvif->bitrate_masks[i] =
5478 wl1271_tx_enabled_rates_get(wl,
5479 mask->control[i].legacy,
5482 if (unlikely(wl->state != WLCORE_STATE_ON))
5485 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5486 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5488 ret = pm_runtime_get_sync(wl->dev);
5490 pm_runtime_put_noidle(wl->dev);
5494 wl1271_set_band_rate(wl, wlvif);
5496 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5497 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5499 pm_runtime_mark_last_busy(wl->dev);
5500 pm_runtime_put_autosuspend(wl->dev);
5503 mutex_unlock(&wl->mutex);
5508 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5509 struct ieee80211_vif *vif,
5510 struct ieee80211_channel_switch *ch_switch)
5512 struct wl1271 *wl = hw->priv;
5513 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5516 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5518 wl1271_tx_flush(wl);
5520 mutex_lock(&wl->mutex);
5522 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5523 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5524 ieee80211_chswitch_done(vif, false);
5526 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5530 ret = pm_runtime_get_sync(wl->dev);
5532 pm_runtime_put_noidle(wl->dev);
5536 /* TODO: change mac80211 to pass vif as param */
5538 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5539 unsigned long delay_usec;
5541 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5545 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5547 /* indicate failure 5 seconds after channel switch time */
5548 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5550 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5551 usecs_to_jiffies(delay_usec) +
5552 msecs_to_jiffies(5000));
5556 pm_runtime_mark_last_busy(wl->dev);
5557 pm_runtime_put_autosuspend(wl->dev);
5560 mutex_unlock(&wl->mutex);
5563 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5564 struct wl12xx_vif *wlvif,
5567 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5568 struct sk_buff *beacon =
5569 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5574 return cfg80211_find_ie(eid,
5575 beacon->data + ieoffset,
5576 beacon->len - ieoffset);
5579 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5583 const struct ieee80211_channel_sw_ie *ie_csa;
5585 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5589 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5590 *csa_count = ie_csa->count;
5595 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5596 struct ieee80211_vif *vif,
5597 struct cfg80211_chan_def *chandef)
5599 struct wl1271 *wl = hw->priv;
5600 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5601 struct ieee80211_channel_switch ch_switch = {
5603 .chandef = *chandef,
5607 wl1271_debug(DEBUG_MAC80211,
5608 "mac80211 channel switch beacon (role %d)",
5611 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5613 wl1271_error("error getting beacon (for CSA counter)");
5617 mutex_lock(&wl->mutex);
5619 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5624 ret = pm_runtime_get_sync(wl->dev);
5626 pm_runtime_put_noidle(wl->dev);
5630 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5634 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5637 pm_runtime_mark_last_busy(wl->dev);
5638 pm_runtime_put_autosuspend(wl->dev);
5640 mutex_unlock(&wl->mutex);
5643 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5644 u32 queues, bool drop)
5646 struct wl1271 *wl = hw->priv;
5648 wl1271_tx_flush(wl);
5651 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5652 struct ieee80211_vif *vif,
5653 struct ieee80211_channel *chan,
5655 enum ieee80211_roc_type type)
5657 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5658 struct wl1271 *wl = hw->priv;
5659 int channel, active_roc, ret = 0;
5661 channel = ieee80211_frequency_to_channel(chan->center_freq);
5663 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5664 channel, wlvif->role_id);
5666 mutex_lock(&wl->mutex);
5668 if (unlikely(wl->state != WLCORE_STATE_ON))
5671 /* return EBUSY if we can't ROC right now */
5672 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5673 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5674 wl1271_warning("active roc on role %d", active_roc);
5679 ret = pm_runtime_get_sync(wl->dev);
5681 pm_runtime_put_noidle(wl->dev);
5685 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5690 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5691 msecs_to_jiffies(duration));
5693 pm_runtime_mark_last_busy(wl->dev);
5694 pm_runtime_put_autosuspend(wl->dev);
5696 mutex_unlock(&wl->mutex);
5700 static int __wlcore_roc_completed(struct wl1271 *wl)
5702 struct wl12xx_vif *wlvif;
5705 /* already completed */
5706 if (unlikely(!wl->roc_vif))
5709 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5711 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5714 ret = wl12xx_stop_dev(wl, wlvif);
5723 static int wlcore_roc_completed(struct wl1271 *wl)
5727 wl1271_debug(DEBUG_MAC80211, "roc complete");
5729 mutex_lock(&wl->mutex);
5731 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5736 ret = pm_runtime_get_sync(wl->dev);
5738 pm_runtime_put_noidle(wl->dev);
5742 ret = __wlcore_roc_completed(wl);
5744 pm_runtime_mark_last_busy(wl->dev);
5745 pm_runtime_put_autosuspend(wl->dev);
5747 mutex_unlock(&wl->mutex);
5752 static void wlcore_roc_complete_work(struct work_struct *work)
5754 struct delayed_work *dwork;
5758 dwork = to_delayed_work(work);
5759 wl = container_of(dwork, struct wl1271, roc_complete_work);
5761 ret = wlcore_roc_completed(wl);
5763 ieee80211_remain_on_channel_expired(wl->hw);
5766 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5768 struct wl1271 *wl = hw->priv;
5770 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5773 wl1271_tx_flush(wl);
5776 * we can't just flush_work here, because it might deadlock
5777 * (as we might get called from the same workqueue)
5779 cancel_delayed_work_sync(&wl->roc_complete_work);
5780 wlcore_roc_completed(wl);
5785 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5786 struct ieee80211_vif *vif,
5787 struct ieee80211_sta *sta,
5790 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5792 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5794 if (!(changed & IEEE80211_RC_BW_CHANGED))
5797 /* this callback is atomic, so schedule a new work */
5798 wlvif->rc_update_bw = sta->bandwidth;
5799 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5800 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5803 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5804 struct ieee80211_vif *vif,
5805 struct ieee80211_sta *sta,
5806 struct station_info *sinfo)
5808 struct wl1271 *wl = hw->priv;
5809 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5813 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5815 mutex_lock(&wl->mutex);
5817 if (unlikely(wl->state != WLCORE_STATE_ON))
5820 ret = pm_runtime_get_sync(wl->dev);
5822 pm_runtime_put_noidle(wl->dev);
5826 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5830 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5831 sinfo->signal = rssi_dbm;
5834 pm_runtime_mark_last_busy(wl->dev);
5835 pm_runtime_put_autosuspend(wl->dev);
5838 mutex_unlock(&wl->mutex);
5841 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5842 struct ieee80211_sta *sta)
5844 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5845 struct wl1271 *wl = hw->priv;
5846 u8 hlid = wl_sta->hlid;
5848 /* return in units of Kbps */
5849 return (wl->links[hlid].fw_rate_mbps * 1000);
5852 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5854 struct wl1271 *wl = hw->priv;
5857 mutex_lock(&wl->mutex);
5859 if (unlikely(wl->state != WLCORE_STATE_ON))
5862 /* packets are considered pending if in the TX queue or the FW */
5863 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5865 mutex_unlock(&wl->mutex);
5870 /* can't be const, mac80211 writes to this */
5871 static struct ieee80211_rate wl1271_rates[] = {
5873 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5874 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5876 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5877 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5878 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5880 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5881 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5882 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5884 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5885 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5886 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5888 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5889 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5891 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5892 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5894 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5895 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5897 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5898 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5900 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5901 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5903 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5904 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5906 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5907 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5909 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5910 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5913 /* can't be const, mac80211 writes to this */
5914 static struct ieee80211_channel wl1271_channels[] = {
5915 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5916 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5917 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5918 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5919 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5920 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5921 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5922 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5923 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5924 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5925 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5926 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5927 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5928 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5931 /* can't be const, mac80211 writes to this */
5932 static struct ieee80211_supported_band wl1271_band_2ghz = {
5933 .channels = wl1271_channels,
5934 .n_channels = ARRAY_SIZE(wl1271_channels),
5935 .bitrates = wl1271_rates,
5936 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5939 /* 5 GHz data rates for WL1273 */
5940 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5942 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5943 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5945 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5946 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5948 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5949 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5951 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5952 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5954 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5955 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5957 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5958 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5960 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5961 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5963 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5964 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5967 /* 5 GHz band channels for WL1273 */
5968 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5969 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5986 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5987 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5988 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5989 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5990 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5991 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5992 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5993 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5994 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5995 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5996 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5997 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5998 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5999 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6002 static struct ieee80211_supported_band wl1271_band_5ghz = {
6003 .channels = wl1271_channels_5ghz,
6004 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6005 .bitrates = wl1271_rates_5ghz,
6006 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6009 static const struct ieee80211_ops wl1271_ops = {
6010 .start = wl1271_op_start,
6011 .stop = wlcore_op_stop,
6012 .add_interface = wl1271_op_add_interface,
6013 .remove_interface = wl1271_op_remove_interface,
6014 .change_interface = wl12xx_op_change_interface,
6016 .suspend = wl1271_op_suspend,
6017 .resume = wl1271_op_resume,
6019 .config = wl1271_op_config,
6020 .prepare_multicast = wl1271_op_prepare_multicast,
6021 .configure_filter = wl1271_op_configure_filter,
6023 .set_key = wlcore_op_set_key,
6024 .hw_scan = wl1271_op_hw_scan,
6025 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6026 .sched_scan_start = wl1271_op_sched_scan_start,
6027 .sched_scan_stop = wl1271_op_sched_scan_stop,
6028 .bss_info_changed = wl1271_op_bss_info_changed,
6029 .set_frag_threshold = wl1271_op_set_frag_threshold,
6030 .set_rts_threshold = wl1271_op_set_rts_threshold,
6031 .conf_tx = wl1271_op_conf_tx,
6032 .get_tsf = wl1271_op_get_tsf,
6033 .get_survey = wl1271_op_get_survey,
6034 .sta_state = wl12xx_op_sta_state,
6035 .ampdu_action = wl1271_op_ampdu_action,
6036 .tx_frames_pending = wl1271_tx_frames_pending,
6037 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6038 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6039 .channel_switch = wl12xx_op_channel_switch,
6040 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6041 .flush = wlcore_op_flush,
6042 .remain_on_channel = wlcore_op_remain_on_channel,
6043 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6044 .add_chanctx = wlcore_op_add_chanctx,
6045 .remove_chanctx = wlcore_op_remove_chanctx,
6046 .change_chanctx = wlcore_op_change_chanctx,
6047 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6048 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6049 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6050 .sta_rc_update = wlcore_op_sta_rc_update,
6051 .sta_statistics = wlcore_op_sta_statistics,
6052 .get_expected_throughput = wlcore_op_get_expected_throughput,
6053 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6057 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6063 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6064 wl1271_error("Illegal RX rate from HW: %d", rate);
6068 idx = wl->band_rate_to_idx[band][rate];
6069 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6070 wl1271_error("Unsupported RX rate from HW: %d", rate);
6077 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6081 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6084 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6085 wl1271_warning("NIC part of the MAC address wraps around!");
6087 for (i = 0; i < wl->num_mac_addr; i++) {
6088 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6089 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6090 wl->addresses[i].addr[2] = (u8) oui;
6091 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6092 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6093 wl->addresses[i].addr[5] = (u8) nic;
6097 /* we may be one address short at the most */
6098 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6101 * turn on the LAA bit in the first address and use it as
6104 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6105 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6106 memcpy(&wl->addresses[idx], &wl->addresses[0],
6107 sizeof(wl->addresses[0]));
6109 wl->addresses[idx].addr[0] |= BIT(1);
6112 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6113 wl->hw->wiphy->addresses = wl->addresses;
6116 static int wl12xx_get_hw_info(struct wl1271 *wl)
6120 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6124 wl->fuse_oui_addr = 0;
6125 wl->fuse_nic_addr = 0;
6127 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6131 if (wl->ops->get_mac)
6132 ret = wl->ops->get_mac(wl);
6138 static int wl1271_register_hw(struct wl1271 *wl)
6141 u32 oui_addr = 0, nic_addr = 0;
6142 struct platform_device *pdev = wl->pdev;
6143 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6145 if (wl->mac80211_registered)
6148 if (wl->nvs_len >= 12) {
6149 /* NOTE: The wl->nvs->nvs element must be first, in
6150 * order to simplify the casting, we assume it is at
6151 * the beginning of the wl->nvs structure.
6153 u8 *nvs_ptr = (u8 *)wl->nvs;
6156 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6158 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6161 /* if the MAC address is zeroed in the NVS derive from fuse */
6162 if (oui_addr == 0 && nic_addr == 0) {
6163 oui_addr = wl->fuse_oui_addr;
6164 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6165 nic_addr = wl->fuse_nic_addr + 1;
6168 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6169 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6170 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6171 wl1271_warning("This default nvs file can be removed from the file system");
6173 wl1271_warning("Your device performance is not optimized.");
6174 wl1271_warning("Please use the calibrator tool to configure your device.");
6177 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6178 wl1271_warning("Fuse mac address is zero. using random mac");
6179 /* Use TI oui and a random nic */
6180 oui_addr = WLCORE_TI_OUI_ADDRESS;
6181 nic_addr = get_random_int();
6183 oui_addr = wl->fuse_oui_addr;
6184 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6185 nic_addr = wl->fuse_nic_addr + 1;
6189 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6191 ret = ieee80211_register_hw(wl->hw);
6193 wl1271_error("unable to register mac80211 hw: %d", ret);
6197 wl->mac80211_registered = true;
6199 wl1271_debugfs_init(wl);
6201 wl1271_notice("loaded");
6207 static void wl1271_unregister_hw(struct wl1271 *wl)
6210 wl1271_plt_stop(wl);
6212 ieee80211_unregister_hw(wl->hw);
6213 wl->mac80211_registered = false;
6217 static int wl1271_init_ieee80211(struct wl1271 *wl)
6220 static const u32 cipher_suites[] = {
6221 WLAN_CIPHER_SUITE_WEP40,
6222 WLAN_CIPHER_SUITE_WEP104,
6223 WLAN_CIPHER_SUITE_TKIP,
6224 WLAN_CIPHER_SUITE_CCMP,
6225 WL1271_CIPHER_SUITE_GEM,
6228 /* The tx descriptor buffer */
6229 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6231 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6232 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6235 /* FIXME: find a proper value */
6236 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6238 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6239 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6240 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6241 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6242 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6243 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6244 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6245 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6246 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6247 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6248 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6249 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6250 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6251 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6253 wl->hw->wiphy->cipher_suites = cipher_suites;
6254 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6256 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6257 BIT(NL80211_IFTYPE_AP) |
6258 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6259 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6260 #ifdef CONFIG_MAC80211_MESH
6261 BIT(NL80211_IFTYPE_MESH_POINT) |
6263 BIT(NL80211_IFTYPE_P2P_GO);
6265 wl->hw->wiphy->max_scan_ssids = 1;
6266 wl->hw->wiphy->max_sched_scan_ssids = 16;
6267 wl->hw->wiphy->max_match_sets = 16;
6269 * Maximum length of elements in scanning probe request templates
6270 * should be the maximum length possible for a template, without
6271 * the IEEE80211 header of the template
6273 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6274 sizeof(struct ieee80211_header);
6276 wl->hw->wiphy->max_sched_scan_reqs = 1;
6277 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6278 sizeof(struct ieee80211_header);
6280 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6282 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6283 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6284 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6286 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6288 /* make sure all our channels fit in the scanned_ch bitmask */
6289 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6290 ARRAY_SIZE(wl1271_channels_5ghz) >
6291 WL1271_MAX_CHANNELS);
6293 * clear channel flags from the previous usage
6294 * and restore max_power & max_antenna_gain values.
6296 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6297 wl1271_band_2ghz.channels[i].flags = 0;
6298 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6299 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6302 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6303 wl1271_band_5ghz.channels[i].flags = 0;
6304 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6305 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6309 * We keep local copies of the band structs because we need to
6310 * modify them on a per-device basis.
6312 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6313 sizeof(wl1271_band_2ghz));
6314 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6315 &wl->ht_cap[NL80211_BAND_2GHZ],
6316 sizeof(*wl->ht_cap));
6317 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6318 sizeof(wl1271_band_5ghz));
6319 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6320 &wl->ht_cap[NL80211_BAND_5GHZ],
6321 sizeof(*wl->ht_cap));
6323 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6324 &wl->bands[NL80211_BAND_2GHZ];
6325 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6326 &wl->bands[NL80211_BAND_5GHZ];
6329 * allow 4 queues per mac address we support +
6330 * 1 cab queue per mac + one global offchannel Tx queue
6332 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6334 /* the last queue is the offchannel queue */
6335 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6336 wl->hw->max_rates = 1;
6338 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6340 /* the FW answers probe-requests in AP-mode */
6341 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6342 wl->hw->wiphy->probe_resp_offload =
6343 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6344 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6345 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6347 /* allowed interface combinations */
6348 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6349 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6351 /* register vendor commands */
6352 wlcore_set_vendor_commands(wl->hw->wiphy);
6354 SET_IEEE80211_DEV(wl->hw, wl->dev);
6356 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6357 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6359 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6364 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6367 struct ieee80211_hw *hw;
6372 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6374 wl1271_error("could not alloc ieee80211_hw");
6380 memset(wl, 0, sizeof(*wl));
6382 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6384 wl1271_error("could not alloc wl priv");
6386 goto err_priv_alloc;
6389 INIT_LIST_HEAD(&wl->wlvif_list);
6394 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6395 * we don't allocate any additional resource here, so that's fine.
6397 for (i = 0; i < NUM_TX_QUEUES; i++)
6398 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6399 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6401 skb_queue_head_init(&wl->deferred_rx_queue);
6402 skb_queue_head_init(&wl->deferred_tx_queue);
6404 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6405 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6406 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6407 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6408 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6409 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6411 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6412 if (!wl->freezable_wq) {
6419 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6420 wl->band = NL80211_BAND_2GHZ;
6421 wl->channel_type = NL80211_CHAN_NO_HT;
6423 wl->sg_enabled = true;
6424 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6425 wl->recovery_count = 0;
6428 wl->ap_fw_ps_map = 0;
6430 wl->system_hlid = WL12XX_SYSTEM_HLID;
6431 wl->active_sta_count = 0;
6432 wl->active_link_count = 0;
6435 /* The system link is always allocated */
6436 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6438 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6439 for (i = 0; i < wl->num_tx_desc; i++)
6440 wl->tx_frames[i] = NULL;
6442 spin_lock_init(&wl->wl_lock);
6444 wl->state = WLCORE_STATE_OFF;
6445 wl->fw_type = WL12XX_FW_TYPE_NONE;
6446 mutex_init(&wl->mutex);
6447 mutex_init(&wl->flush_mutex);
6448 init_completion(&wl->nvs_loading_complete);
6450 order = get_order(aggr_buf_size);
6451 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6452 if (!wl->aggr_buf) {
6456 wl->aggr_buf_size = aggr_buf_size;
6458 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6459 if (!wl->dummy_packet) {
6464 /* Allocate one page for the FW log */
6465 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6468 goto err_dummy_packet;
6471 wl->mbox_size = mbox_size;
6472 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6478 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6479 if (!wl->buffer_32) {
6490 free_page((unsigned long)wl->fwlog);
6493 dev_kfree_skb(wl->dummy_packet);
6496 free_pages((unsigned long)wl->aggr_buf, order);
6499 destroy_workqueue(wl->freezable_wq);
6502 wl1271_debugfs_exit(wl);
6506 ieee80211_free_hw(hw);
6510 return ERR_PTR(ret);
6512 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6514 int wlcore_free_hw(struct wl1271 *wl)
6516 /* Unblock any fwlog readers */
6517 mutex_lock(&wl->mutex);
6518 wl->fwlog_size = -1;
6519 mutex_unlock(&wl->mutex);
6521 wlcore_sysfs_free(wl);
6523 kfree(wl->buffer_32);
6525 free_page((unsigned long)wl->fwlog);
6526 dev_kfree_skb(wl->dummy_packet);
6527 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6529 wl1271_debugfs_exit(wl);
6533 wl->fw_type = WL12XX_FW_TYPE_NONE;
6537 kfree(wl->raw_fw_status);
6538 kfree(wl->fw_status);
6539 kfree(wl->tx_res_if);
6540 destroy_workqueue(wl->freezable_wq);
6543 ieee80211_free_hw(wl->hw);
6547 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6550 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6551 .flags = WIPHY_WOWLAN_ANY,
6552 .n_patterns = WL1271_MAX_RX_FILTERS,
6553 .pattern_min_len = 1,
6554 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6558 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6560 return IRQ_WAKE_THREAD;
6563 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6565 struct wl1271 *wl = context;
6566 struct platform_device *pdev = wl->pdev;
6567 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6568 struct resource *res;
6571 irq_handler_t hardirq_fn = NULL;
6574 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6576 wl1271_error("Could not allocate nvs data");
6579 wl->nvs_len = fw->size;
6580 } else if (pdev_data->family->nvs_name) {
6581 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6582 pdev_data->family->nvs_name);
6590 ret = wl->ops->setup(wl);
6594 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6596 /* adjust some runtime configuration parameters */
6597 wlcore_adjust_conf(wl);
6599 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6601 wl1271_error("Could not get IRQ resource");
6605 wl->irq = res->start;
6606 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6607 wl->if_ops = pdev_data->if_ops;
6609 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6610 hardirq_fn = wlcore_hardirq;
6612 wl->irq_flags |= IRQF_ONESHOT;
6614 ret = wl12xx_set_power_on(wl);
6618 ret = wl12xx_get_hw_info(wl);
6620 wl1271_error("couldn't get hw info");
6621 wl1271_power_off(wl);
6625 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6626 wl->irq_flags, pdev->name, wl);
6628 wl1271_error("interrupt configuration failed");
6629 wl1271_power_off(wl);
6634 device_init_wakeup(wl->dev, true);
6636 ret = enable_irq_wake(wl->irq);
6638 wl->irq_wake_enabled = true;
6639 if (pdev_data->pwr_in_suspend)
6640 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6643 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6645 wl->wakeirq = res->start;
6646 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6647 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6649 wl->wakeirq = -ENODEV;
6651 wl->wakeirq = -ENODEV;
6654 disable_irq(wl->irq);
6655 wl1271_power_off(wl);
6657 ret = wl->ops->identify_chip(wl);
6661 ret = wl1271_init_ieee80211(wl);
6665 ret = wl1271_register_hw(wl);
6669 ret = wlcore_sysfs_init(wl);
6673 wl->initialized = true;
6677 wl1271_unregister_hw(wl);
6680 if (wl->wakeirq >= 0)
6681 dev_pm_clear_wake_irq(wl->dev);
6682 device_init_wakeup(wl->dev, false);
6683 free_irq(wl->irq, wl);
6689 release_firmware(fw);
6690 complete_all(&wl->nvs_loading_complete);
6693 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6695 struct wl1271 *wl = dev_get_drvdata(dev);
6696 struct wl12xx_vif *wlvif;
6699 /* We do not enter elp sleep in PLT mode */
6703 /* Nothing to do if no ELP mode requested */
6704 if (wl->sleep_auth != WL1271_PSM_ELP)
6707 wl12xx_for_each_wlvif(wl, wlvif) {
6708 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6709 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6713 wl1271_debug(DEBUG_PSM, "chip to elp");
6714 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6716 wl12xx_queue_recovery_work(wl);
6721 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6726 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6728 struct wl1271 *wl = dev_get_drvdata(dev);
6729 DECLARE_COMPLETION_ONSTACK(compl);
6730 unsigned long flags;
6732 unsigned long start_time = jiffies;
6733 bool pending = false;
6734 bool recovery = false;
6736 /* Nothing to do if no ELP mode requested */
6737 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6740 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6742 spin_lock_irqsave(&wl->wl_lock, flags);
6743 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6746 wl->elp_compl = &compl;
6747 spin_unlock_irqrestore(&wl->wl_lock, flags);
6749 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6756 ret = wait_for_completion_timeout(&compl,
6757 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6759 wl1271_warning("ELP wakeup timeout!");
6761 /* Return no error for runtime PM for recovery */
6768 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6770 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6771 jiffies_to_msecs(jiffies - start_time));
6776 spin_lock_irqsave(&wl->wl_lock, flags);
6777 wl->elp_compl = NULL;
6778 spin_unlock_irqrestore(&wl->wl_lock, flags);
6781 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6782 wl12xx_queue_recovery_work(wl);
6788 static const struct dev_pm_ops wlcore_pm_ops = {
6789 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6790 wlcore_runtime_resume,
6794 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6796 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6797 const char *nvs_name;
6800 if (!wl->ops || !wl->ptable || !pdev_data)
6803 wl->dev = &pdev->dev;
6805 platform_set_drvdata(pdev, wl);
6807 if (pdev_data->family && pdev_data->family->nvs_name) {
6808 nvs_name = pdev_data->family->nvs_name;
6809 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6810 nvs_name, &pdev->dev, GFP_KERNEL,
6813 wl1271_error("request_firmware_nowait failed for %s: %d",
6815 complete_all(&wl->nvs_loading_complete);
6818 wlcore_nvs_cb(NULL, wl);
6821 wl->dev->driver->pm = &wlcore_pm_ops;
6822 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6823 pm_runtime_use_autosuspend(wl->dev);
6824 pm_runtime_enable(wl->dev);
6828 EXPORT_SYMBOL_GPL(wlcore_probe);
6830 int wlcore_remove(struct platform_device *pdev)
6832 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6833 struct wl1271 *wl = platform_get_drvdata(pdev);
6836 error = pm_runtime_get_sync(wl->dev);
6838 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6840 wl->dev->driver->pm = NULL;
6842 if (pdev_data->family && pdev_data->family->nvs_name)
6843 wait_for_completion(&wl->nvs_loading_complete);
6844 if (!wl->initialized)
6847 if (wl->wakeirq >= 0) {
6848 dev_pm_clear_wake_irq(wl->dev);
6849 wl->wakeirq = -ENODEV;
6852 device_init_wakeup(wl->dev, false);
6854 if (wl->irq_wake_enabled)
6855 disable_irq_wake(wl->irq);
6857 wl1271_unregister_hw(wl);
6859 pm_runtime_put_sync(wl->dev);
6860 pm_runtime_dont_use_autosuspend(wl->dev);
6861 pm_runtime_disable(wl->dev);
6863 free_irq(wl->irq, wl);
6868 EXPORT_SYMBOL_GPL(wlcore_remove);
6870 u32 wl12xx_debug_level = DEBUG_NONE;
6871 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6872 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6873 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6875 module_param_named(fwlog, fwlog_param, charp, 0);
6876 MODULE_PARM_DESC(fwlog,
6877 "FW logger options: continuous, dbgpins or disable");
6879 module_param(fwlog_mem_blocks, int, 0600);
6880 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6882 module_param(bug_on_recovery, int, 0600);
6883 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6885 module_param(no_recovery, int, 0600);
6886 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6888 MODULE_LICENSE("GPL");
6889 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6890 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");