1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
20 #include "wl12xx_80211.h"
27 #include "vendor_cmd.h"
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_SUSPEND_SLEEP 100
34 #define WL1271_WAKEUP_TIMEOUT 500
36 static char *fwlog_param;
37 static int fwlog_mem_blocks = -1;
38 static int bug_on_recovery = -1;
39 static int no_recovery = -1;
41 static void __wl1271_op_remove_interface(struct wl1271 *wl,
42 struct ieee80211_vif *vif,
43 bool reset_tx_queues);
44 static void wlcore_op_stop_locked(struct wl1271 *wl);
45 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
47 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
51 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
54 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
57 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
60 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
64 wl1271_info("Association completed.");
68 static void wl1271_reg_notify(struct wiphy *wiphy,
69 struct regulatory_request *request)
71 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
72 struct wl1271 *wl = hw->priv;
74 /* copy the current dfs region */
76 wl->dfs_region = request->dfs_region;
78 wlcore_regdomain_config(wl);
81 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
86 /* we should hold wl->mutex */
87 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
92 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
100 * this function is being called when the rx_streaming interval
101 * has beed changed or rx_streaming should be disabled
103 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
106 int period = wl->conf.rx_streaming.interval;
108 /* don't reconfigure if rx_streaming is disabled */
109 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
112 /* reconfigure/disable according to new streaming_period */
114 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
115 (wl->conf.rx_streaming.always ||
116 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
117 ret = wl1271_set_rx_streaming(wl, wlvif, true);
119 ret = wl1271_set_rx_streaming(wl, wlvif, false);
120 /* don't cancel_work_sync since we might deadlock */
121 del_timer_sync(&wlvif->rx_streaming_timer);
127 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
130 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
131 rx_streaming_enable_work);
132 struct wl1271 *wl = wlvif->wl;
134 mutex_lock(&wl->mutex);
136 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
137 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
138 (!wl->conf.rx_streaming.always &&
139 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
142 if (!wl->conf.rx_streaming.interval)
145 ret = pm_runtime_get_sync(wl->dev);
147 pm_runtime_put_noidle(wl->dev);
151 ret = wl1271_set_rx_streaming(wl, wlvif, true);
155 /* stop it after some time of inactivity */
156 mod_timer(&wlvif->rx_streaming_timer,
157 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
160 pm_runtime_mark_last_busy(wl->dev);
161 pm_runtime_put_autosuspend(wl->dev);
163 mutex_unlock(&wl->mutex);
166 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
169 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
170 rx_streaming_disable_work);
171 struct wl1271 *wl = wlvif->wl;
173 mutex_lock(&wl->mutex);
175 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
178 ret = pm_runtime_get_sync(wl->dev);
180 pm_runtime_put_noidle(wl->dev);
184 ret = wl1271_set_rx_streaming(wl, wlvif, false);
189 pm_runtime_mark_last_busy(wl->dev);
190 pm_runtime_put_autosuspend(wl->dev);
192 mutex_unlock(&wl->mutex);
195 static void wl1271_rx_streaming_timer(struct timer_list *t)
197 struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
198 struct wl1271 *wl = wlvif->wl;
199 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
202 /* wl->mutex must be taken */
203 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
205 /* if the watchdog is not armed, don't do anything */
206 if (wl->tx_allocated_blocks == 0)
209 cancel_delayed_work(&wl->tx_watchdog_work);
210 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
211 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
214 static void wlcore_rc_update_work(struct work_struct *work)
217 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
219 struct wl1271 *wl = wlvif->wl;
220 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
222 mutex_lock(&wl->mutex);
224 if (unlikely(wl->state != WLCORE_STATE_ON))
227 ret = pm_runtime_get_sync(wl->dev);
229 pm_runtime_put_noidle(wl->dev);
233 if (ieee80211_vif_is_mesh(vif)) {
234 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
235 true, wlvif->sta.hlid);
239 wlcore_hw_sta_rc_update(wl, wlvif);
243 pm_runtime_mark_last_busy(wl->dev);
244 pm_runtime_put_autosuspend(wl->dev);
246 mutex_unlock(&wl->mutex);
249 static void wl12xx_tx_watchdog_work(struct work_struct *work)
251 struct delayed_work *dwork;
254 dwork = to_delayed_work(work);
255 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
257 mutex_lock(&wl->mutex);
259 if (unlikely(wl->state != WLCORE_STATE_ON))
262 /* Tx went out in the meantime - everything is ok */
263 if (unlikely(wl->tx_allocated_blocks == 0))
267 * if a ROC is in progress, we might not have any Tx for a long
268 * time (e.g. pending Tx on the non-ROC channels)
270 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
271 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
272 wl->conf.tx.tx_watchdog_timeout);
273 wl12xx_rearm_tx_watchdog_locked(wl);
278 * if a scan is in progress, we might not have any Tx for a long
281 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
282 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_rearm_tx_watchdog_locked(wl);
289 * AP might cache a frame for a long time for a sleeping station,
290 * so rearm the timer if there's an AP interface with stations. If
291 * Tx is genuinely stuck we will most hopefully discover it when all
292 * stations are removed due to inactivity.
294 if (wl->active_sta_count) {
295 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
297 wl->conf.tx.tx_watchdog_timeout,
298 wl->active_sta_count);
299 wl12xx_rearm_tx_watchdog_locked(wl);
303 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
304 wl->conf.tx.tx_watchdog_timeout);
305 wl12xx_queue_recovery_work(wl);
308 mutex_unlock(&wl->mutex);
311 static void wlcore_adjust_conf(struct wl1271 *wl)
315 if (!strcmp(fwlog_param, "continuous")) {
316 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
318 } else if (!strcmp(fwlog_param, "dbgpins")) {
319 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
320 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
321 } else if (!strcmp(fwlog_param, "disable")) {
322 wl->conf.fwlog.mem_blocks = 0;
323 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
325 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
329 if (bug_on_recovery != -1)
330 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
332 if (no_recovery != -1)
333 wl->conf.recovery.no_recovery = (u8) no_recovery;
336 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
337 struct wl12xx_vif *wlvif,
342 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
345 * Wake up from high level PS if the STA is asleep with too little
346 * packets in FW or if the STA is awake.
348 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
349 wl12xx_ps_link_end(wl, wlvif, hlid);
352 * Start high-level PS if the STA is asleep with enough blocks in FW.
353 * Make an exception if this is the only connected link. In this
354 * case FW-memory congestion is less of a problem.
355 * Note that a single connected STA means 2*ap_count + 1 active links,
356 * since we must account for the global and broadcast AP links
357 * for each AP. The "fw_ps" check assures us the other link is a STA
358 * connected to the AP. Otherwise the FW would not set the PSM bit.
360 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
361 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
362 wl12xx_ps_link_start(wl, wlvif, hlid, true);
365 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
366 struct wl12xx_vif *wlvif,
367 struct wl_fw_status *status)
369 unsigned long cur_fw_ps_map;
372 cur_fw_ps_map = status->link_ps_bitmap;
373 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
374 wl1271_debug(DEBUG_PSM,
375 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
376 wl->ap_fw_ps_map, cur_fw_ps_map,
377 wl->ap_fw_ps_map ^ cur_fw_ps_map);
379 wl->ap_fw_ps_map = cur_fw_ps_map;
382 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
383 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
384 wl->links[hlid].allocated_pkts);
387 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
389 struct wl12xx_vif *wlvif;
390 u32 old_tx_blk_count = wl->tx_blocks_available;
391 int avail, freed_blocks;
394 struct wl1271_link *lnk;
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
398 wl->fw_status_len, false);
402 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
404 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
405 "drv_rx_counter = %d, tx_results_counter = %d)",
407 status->fw_rx_counter,
408 status->drv_rx_counter,
409 status->tx_results_counter);
411 for (i = 0; i < NUM_TX_QUEUES; i++) {
412 /* prevent wrap-around in freed-packets counter */
413 wl->tx_allocated_pkts[i] -=
414 (status->counters.tx_released_pkts[i] -
415 wl->tx_pkts_freed[i]) & 0xff;
417 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
421 for_each_set_bit(i, wl->links_map, wl->num_links) {
425 /* prevent wrap-around in freed-packets counter */
426 diff = (status->counters.tx_lnk_free_pkts[i] -
427 lnk->prev_freed_pkts) & 0xff;
432 lnk->allocated_pkts -= diff;
433 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
435 /* accumulate the prev_freed_pkts counter */
436 lnk->total_freed_pkts += diff;
439 /* prevent wrap-around in total blocks counter */
440 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
441 freed_blocks = status->total_released_blks -
444 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
445 status->total_released_blks;
447 wl->tx_blocks_freed = status->total_released_blks;
449 wl->tx_allocated_blocks -= freed_blocks;
452 * If the FW freed some blocks:
453 * If we still have allocated blocks - re-arm the timer, Tx is
454 * not stuck. Otherwise, cancel the timer (no Tx currently).
457 if (wl->tx_allocated_blocks)
458 wl12xx_rearm_tx_watchdog_locked(wl);
460 cancel_delayed_work(&wl->tx_watchdog_work);
463 avail = status->tx_total - wl->tx_allocated_blocks;
466 * The FW might change the total number of TX memblocks before
467 * we get a notification about blocks being released. Thus, the
468 * available blocks calculation might yield a temporary result
469 * which is lower than the actual available blocks. Keeping in
470 * mind that only blocks that were allocated can be moved from
471 * TX to RX, tx_blocks_available should never decrease here.
473 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 /* if more blocks are available now, tx work can be scheduled */
477 if (wl->tx_blocks_available > old_tx_blk_count)
478 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
480 /* for AP update num of allocated TX blocks per link and ps status */
481 wl12xx_for_each_wlvif_ap(wl, wlvif) {
482 wl12xx_irq_update_links_status(wl, wlvif, status);
485 /* update the host-chipset time offset */
486 wl->time_offset = (ktime_get_boot_ns() >> 10) -
487 (s64)(status->fw_localtime);
489 wl->fw_fast_lnk_map = status->link_fast_bitmap;
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 /* Pass all received frames to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 ieee80211_rx_ni(wl->hw, skb);
502 /* Return sent skbs to the network stack */
503 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 ieee80211_tx_status_ni(wl->hw, skb);
507 static void wl1271_netstack_work(struct work_struct *work)
510 container_of(work, struct wl1271, netstack_work);
513 wl1271_flush_deferred_work(wl);
514 } while (skb_queue_len(&wl->deferred_rx_queue));
517 #define WL1271_IRQ_MAX_LOOPS 256
519 static int wlcore_irq_locked(struct wl1271 *wl)
523 int loopcount = WL1271_IRQ_MAX_LOOPS;
525 unsigned int defer_count;
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
532 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
537 if (unlikely(wl->state != WLCORE_STATE_ON))
540 ret = pm_runtime_get_sync(wl->dev);
542 pm_runtime_put_noidle(wl->dev);
546 while (!done && loopcount--) {
548 * In order to avoid a race with the hardirq, clear the flag
549 * before acknowledging the chip.
551 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
552 smp_mb__after_atomic();
554 ret = wlcore_fw_status(wl, wl->fw_status);
558 wlcore_hw_tx_immediate_compl(wl);
560 intr = wl->fw_status->intr;
561 intr &= WLCORE_ALL_INTR_MASK;
567 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
568 wl1271_error("HW watchdog interrupt received! starting recovery.");
569 wl->watchdog_recovery = true;
572 /* restarting the chip. ignore any other interrupt. */
576 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
577 wl1271_error("SW watchdog interrupt received! "
578 "starting recovery.");
579 wl->watchdog_recovery = true;
582 /* restarting the chip. ignore any other interrupt. */
586 if (likely(intr & WL1271_ACX_INTR_DATA)) {
587 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
589 ret = wlcore_rx(wl, wl->fw_status);
593 /* Check if any tx blocks were freed */
594 spin_lock_irqsave(&wl->wl_lock, flags);
595 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
596 wl1271_tx_total_queue_count(wl) > 0) {
597 spin_unlock_irqrestore(&wl->wl_lock, flags);
599 * In order to avoid starvation of the TX path,
600 * call the work function directly.
602 ret = wlcore_tx_work_locked(wl);
606 spin_unlock_irqrestore(&wl->wl_lock, flags);
609 /* check for tx results */
610 ret = wlcore_hw_tx_delayed_compl(wl);
614 /* Make sure the deferred queues don't get too long */
615 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
616 skb_queue_len(&wl->deferred_rx_queue);
617 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
618 wl1271_flush_deferred_work(wl);
621 if (intr & WL1271_ACX_INTR_EVENT_A) {
622 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
623 ret = wl1271_event_handle(wl, 0);
628 if (intr & WL1271_ACX_INTR_EVENT_B) {
629 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
630 ret = wl1271_event_handle(wl, 1);
635 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
636 wl1271_debug(DEBUG_IRQ,
637 "WL1271_ACX_INTR_INIT_COMPLETE");
639 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
640 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
643 pm_runtime_mark_last_busy(wl->dev);
644 pm_runtime_put_autosuspend(wl->dev);
650 static irqreturn_t wlcore_irq(int irq, void *cookie)
654 struct wl1271 *wl = cookie;
656 /* complete the ELP completion */
657 spin_lock_irqsave(&wl->wl_lock, flags);
658 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
660 complete(wl->elp_compl);
661 wl->elp_compl = NULL;
664 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
665 /* don't enqueue a work right now. mark it as pending */
666 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
667 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
668 disable_irq_nosync(wl->irq);
669 pm_wakeup_event(wl->dev, 0);
670 spin_unlock_irqrestore(&wl->wl_lock, flags);
673 spin_unlock_irqrestore(&wl->wl_lock, flags);
675 /* TX might be handled here, avoid redundant work */
676 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
677 cancel_work_sync(&wl->tx_work);
679 mutex_lock(&wl->mutex);
681 ret = wlcore_irq_locked(wl);
683 wl12xx_queue_recovery_work(wl);
685 spin_lock_irqsave(&wl->wl_lock, flags);
686 /* In case TX was not handled here, queue TX work */
687 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
688 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
689 wl1271_tx_total_queue_count(wl) > 0)
690 ieee80211_queue_work(wl->hw, &wl->tx_work);
691 spin_unlock_irqrestore(&wl->wl_lock, flags);
693 mutex_unlock(&wl->mutex);
698 struct vif_counter_data {
701 struct ieee80211_vif *cur_vif;
702 bool cur_vif_running;
705 static void wl12xx_vif_count_iter(void *data, u8 *mac,
706 struct ieee80211_vif *vif)
708 struct vif_counter_data *counter = data;
711 if (counter->cur_vif == vif)
712 counter->cur_vif_running = true;
715 /* caller must not hold wl->mutex, as it might deadlock */
716 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
717 struct ieee80211_vif *cur_vif,
718 struct vif_counter_data *data)
720 memset(data, 0, sizeof(*data));
721 data->cur_vif = cur_vif;
723 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
724 wl12xx_vif_count_iter, data);
727 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 const struct firmware *fw;
731 enum wl12xx_fw_type fw_type;
735 fw_type = WL12XX_FW_TYPE_PLT;
736 fw_name = wl->plt_fw_name;
739 * we can't call wl12xx_get_vif_count() here because
740 * wl->mutex is taken, so use the cached last_vif_count value
742 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
743 fw_type = WL12XX_FW_TYPE_MULTI;
744 fw_name = wl->mr_fw_name;
746 fw_type = WL12XX_FW_TYPE_NORMAL;
747 fw_name = wl->sr_fw_name;
751 if (wl->fw_type == fw_type)
754 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756 ret = request_firmware(&fw, fw_name, wl->dev);
759 wl1271_error("could not get firmware %s: %d", fw_name, ret);
764 wl1271_error("firmware size is not multiple of 32 bits: %zu",
771 wl->fw_type = WL12XX_FW_TYPE_NONE;
772 wl->fw_len = fw->size;
773 wl->fw = vmalloc(wl->fw_len);
776 wl1271_error("could not allocate memory for the firmware");
781 memcpy(wl->fw, fw->data, wl->fw_len);
783 wl->fw_type = fw_type;
785 release_firmware(fw);
790 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
797 wl->state = WLCORE_STATE_RESTARTING;
798 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
799 ieee80211_queue_work(wl->hw, &wl->recovery_work);
803 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
807 /* Make sure we have enough room */
808 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
810 /* Fill the FW log file, consumed by the sysfs fwlog entry */
811 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
812 wl->fwlog_size += len;
817 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
822 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
825 wl1271_info("Reading FW panic log");
828 * Make sure the chip is awake and the logger isn't active.
829 * Do not send a stop fwlog command if the fw is hanged or if
830 * dbgpins are used (due to some fw bug).
832 error = pm_runtime_get_sync(wl->dev);
834 pm_runtime_put_noidle(wl->dev);
837 if (!wl->watchdog_recovery &&
838 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
839 wl12xx_cmd_stop_fwlog(wl);
841 /* Traverse the memory blocks linked list */
843 end_of_log = wlcore_event_fw_logger(wl);
844 if (end_of_log == 0) {
846 end_of_log = wlcore_event_fw_logger(wl);
848 } while (end_of_log != 0);
851 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
852 u8 hlid, struct ieee80211_sta *sta)
854 struct wl1271_station *wl_sta;
855 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
857 wl_sta = (void *)sta->drv_priv;
858 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
861 * increment the initial seq number on recovery to account for
862 * transmitted packets that we haven't yet got in the FW status
864 if (wlvif->encryption_type == KEY_GEM)
865 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
867 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
868 wl_sta->total_freed_pkts += sqn_recovery_padding;
871 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
872 struct wl12xx_vif *wlvif,
873 u8 hlid, const u8 *addr)
875 struct ieee80211_sta *sta;
876 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
878 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
879 is_zero_ether_addr(addr)))
883 sta = ieee80211_find_sta(vif, addr);
885 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
889 static void wlcore_print_recovery(struct wl1271 *wl)
895 wl1271_info("Hardware recovery in progress. FW ver: %s",
896 wl->chip.fw_ver_str);
898 /* change partitions momentarily so we can read the FW pc */
899 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
903 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
907 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
911 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
912 pc, hint_sts, ++wl->recovery_count);
914 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
918 static void wl1271_recovery_work(struct work_struct *work)
921 container_of(work, struct wl1271, recovery_work);
922 struct wl12xx_vif *wlvif;
923 struct ieee80211_vif *vif;
926 mutex_lock(&wl->mutex);
928 if (wl->state == WLCORE_STATE_OFF || wl->plt)
931 error = pm_runtime_get_sync(wl->dev);
933 wl1271_warning("Enable for recovery failed");
934 pm_runtime_put_noidle(wl->dev);
936 wlcore_disable_interrupts_nosync(wl);
938 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
939 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
940 wl12xx_read_fwlog_panic(wl);
941 wlcore_print_recovery(wl);
944 BUG_ON(wl->conf.recovery.bug_on_recovery &&
945 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
947 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
949 if (wl->conf.recovery.no_recovery) {
950 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
954 /* Prevent spurious TX during FW restart */
955 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
957 /* reboot the chipset */
958 while (!list_empty(&wl->wlvif_list)) {
959 wlvif = list_first_entry(&wl->wlvif_list,
960 struct wl12xx_vif, list);
961 vif = wl12xx_wlvif_to_vif(wlvif);
963 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
964 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
965 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
966 vif->bss_conf.bssid);
969 __wl1271_op_remove_interface(wl, vif, false);
972 wlcore_op_stop_locked(wl);
973 pm_runtime_mark_last_busy(wl->dev);
974 pm_runtime_put_autosuspend(wl->dev);
976 ieee80211_restart_hw(wl->hw);
979 * Its safe to enable TX now - the queues are stopped after a request
982 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
985 wl->watchdog_recovery = false;
986 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
987 mutex_unlock(&wl->mutex);
990 static int wlcore_fw_wakeup(struct wl1271 *wl)
992 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
995 static int wl1271_setup(struct wl1271 *wl)
997 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
998 if (!wl->raw_fw_status)
1001 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1005 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1011 kfree(wl->fw_status);
1012 kfree(wl->raw_fw_status);
1016 static int wl12xx_set_power_on(struct wl1271 *wl)
1020 msleep(WL1271_PRE_POWER_ON_SLEEP);
1021 ret = wl1271_power_on(wl);
1024 msleep(WL1271_POWER_ON_SLEEP);
1025 wl1271_io_reset(wl);
1028 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1032 /* ELP module wake up */
1033 ret = wlcore_fw_wakeup(wl);
1041 wl1271_power_off(wl);
1045 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1049 ret = wl12xx_set_power_on(wl);
1054 * For wl127x based devices we could use the default block
1055 * size (512 bytes), but due to a bug in the sdio driver, we
1056 * need to set it explicitly after the chip is powered on. To
1057 * simplify the code and since the performance impact is
1058 * negligible, we use the same block size for all different
1061 * Check if the bus supports blocksize alignment and, if it
1062 * doesn't, make sure we don't have the quirk.
1064 if (!wl1271_set_block_size(wl))
1065 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1067 /* TODO: make sure the lower driver has set things up correctly */
1069 ret = wl1271_setup(wl);
1073 ret = wl12xx_fetch_firmware(wl, plt);
1075 kfree(wl->fw_status);
1076 kfree(wl->raw_fw_status);
1077 kfree(wl->tx_res_if);
1084 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1086 int retries = WL1271_BOOT_RETRIES;
1087 struct wiphy *wiphy = wl->hw->wiphy;
1089 static const char* const PLT_MODE[] = {
1098 mutex_lock(&wl->mutex);
1100 wl1271_notice("power up");
1102 if (wl->state != WLCORE_STATE_OFF) {
1103 wl1271_error("cannot go into PLT state because not "
1104 "in off state: %d", wl->state);
1109 /* Indicate to lower levels that we are now in PLT mode */
1111 wl->plt_mode = plt_mode;
1115 ret = wl12xx_chip_wakeup(wl, true);
1119 if (plt_mode != PLT_CHIP_AWAKE) {
1120 ret = wl->ops->plt_init(wl);
1125 wl->state = WLCORE_STATE_ON;
1126 wl1271_notice("firmware booted in PLT mode %s (%s)",
1128 wl->chip.fw_ver_str);
1130 /* update hw/fw version info in wiphy struct */
1131 wiphy->hw_version = wl->chip.id;
1132 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1133 sizeof(wiphy->fw_version));
1138 wl1271_power_off(wl);
1142 wl->plt_mode = PLT_OFF;
1144 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1145 WL1271_BOOT_RETRIES);
1147 mutex_unlock(&wl->mutex);
1152 int wl1271_plt_stop(struct wl1271 *wl)
1156 wl1271_notice("power down");
1159 * Interrupts must be disabled before setting the state to OFF.
1160 * Otherwise, the interrupt handler might be called and exit without
1161 * reading the interrupt status.
1163 wlcore_disable_interrupts(wl);
1164 mutex_lock(&wl->mutex);
1166 mutex_unlock(&wl->mutex);
1169 * This will not necessarily enable interrupts as interrupts
1170 * may have been disabled when op_stop was called. It will,
1171 * however, balance the above call to disable_interrupts().
1173 wlcore_enable_interrupts(wl);
1175 wl1271_error("cannot power down because not in PLT "
1176 "state: %d", wl->state);
1181 mutex_unlock(&wl->mutex);
1183 wl1271_flush_deferred_work(wl);
1184 cancel_work_sync(&wl->netstack_work);
1185 cancel_work_sync(&wl->recovery_work);
1186 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1188 mutex_lock(&wl->mutex);
1189 wl1271_power_off(wl);
1191 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1192 wl->state = WLCORE_STATE_OFF;
1194 wl->plt_mode = PLT_OFF;
1196 mutex_unlock(&wl->mutex);
1202 static void wl1271_op_tx(struct ieee80211_hw *hw,
1203 struct ieee80211_tx_control *control,
1204 struct sk_buff *skb)
1206 struct wl1271 *wl = hw->priv;
1207 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1208 struct ieee80211_vif *vif = info->control.vif;
1209 struct wl12xx_vif *wlvif = NULL;
1210 unsigned long flags;
1215 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1216 ieee80211_free_txskb(hw, skb);
1220 wlvif = wl12xx_vif_to_data(vif);
1221 mapping = skb_get_queue_mapping(skb);
1222 q = wl1271_tx_get_queue(mapping);
1224 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1226 spin_lock_irqsave(&wl->wl_lock, flags);
1229 * drop the packet if the link is invalid or the queue is stopped
1230 * for any reason but watermark. Watermark is a "soft"-stop so we
1231 * allow these packets through.
1233 if (hlid == WL12XX_INVALID_LINK_ID ||
1234 (!test_bit(hlid, wlvif->links_map)) ||
1235 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1236 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1237 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1238 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1239 ieee80211_free_txskb(hw, skb);
1243 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1245 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1247 wl->tx_queue_count[q]++;
1248 wlvif->tx_queue_count[q]++;
1251 * The workqueue is slow to process the tx_queue and we need stop
1252 * the queue here, otherwise the queue will get too long.
1254 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1255 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1256 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1257 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1258 wlcore_stop_queue_locked(wl, wlvif, q,
1259 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1263 * The chip specific setup must run before the first TX packet -
1264 * before that, the tx_work will not be initialized!
1267 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1268 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1269 ieee80211_queue_work(wl->hw, &wl->tx_work);
1272 spin_unlock_irqrestore(&wl->wl_lock, flags);
1275 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1277 unsigned long flags;
1280 /* no need to queue a new dummy packet if one is already pending */
1281 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1284 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1286 spin_lock_irqsave(&wl->wl_lock, flags);
1287 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1288 wl->tx_queue_count[q]++;
1289 spin_unlock_irqrestore(&wl->wl_lock, flags);
1291 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1292 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1293 return wlcore_tx_work_locked(wl);
1296 * If the FW TX is busy, TX work will be scheduled by the threaded
1297 * interrupt handler function
1303 * The size of the dummy packet should be at least 1400 bytes. However, in
1304 * order to minimize the number of bus transactions, aligning it to 512 bytes
1305 * boundaries could be beneficial, performance wise
1307 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1309 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1311 struct sk_buff *skb;
1312 struct ieee80211_hdr_3addr *hdr;
1313 unsigned int dummy_packet_size;
1315 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1316 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1318 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1320 wl1271_warning("Failed to allocate a dummy packet skb");
1324 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1326 hdr = skb_put_zero(skb, sizeof(*hdr));
1327 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1328 IEEE80211_STYPE_NULLFUNC |
1329 IEEE80211_FCTL_TODS);
1331 skb_put_zero(skb, dummy_packet_size);
1333 /* Dummy packets require the TID to be management */
1334 skb->priority = WL1271_TID_MGMT;
1336 /* Initialize all fields that might be used */
1337 skb_set_queue_mapping(skb, 0);
1338 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1345 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1347 int num_fields = 0, in_field = 0, fields_size = 0;
1348 int i, pattern_len = 0;
1351 wl1271_warning("No mask in WoWLAN pattern");
1356 * The pattern is broken up into segments of bytes at different offsets
1357 * that need to be checked by the FW filter. Each segment is called
1358 * a field in the FW API. We verify that the total number of fields
1359 * required for this pattern won't exceed FW limits (8)
1360 * as well as the total fields buffer won't exceed the FW limit.
1361 * Note that if there's a pattern which crosses Ethernet/IP header
1362 * boundary a new field is required.
1364 for (i = 0; i < p->pattern_len; i++) {
1365 if (test_bit(i, (unsigned long *)p->mask)) {
1370 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1372 fields_size += pattern_len +
1373 RX_FILTER_FIELD_OVERHEAD;
1381 fields_size += pattern_len +
1382 RX_FILTER_FIELD_OVERHEAD;
1389 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1393 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1394 wl1271_warning("RX Filter too complex. Too many segments");
1398 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1399 wl1271_warning("RX filter pattern is too big");
1406 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1408 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1411 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1418 for (i = 0; i < filter->num_fields; i++)
1419 kfree(filter->fields[i].pattern);
1424 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1425 u16 offset, u8 flags,
1426 const u8 *pattern, u8 len)
1428 struct wl12xx_rx_filter_field *field;
1430 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1431 wl1271_warning("Max fields per RX filter. can't alloc another");
1435 field = &filter->fields[filter->num_fields];
1437 field->pattern = kzalloc(len, GFP_KERNEL);
1438 if (!field->pattern) {
1439 wl1271_warning("Failed to allocate RX filter pattern");
1443 filter->num_fields++;
1445 field->offset = cpu_to_le16(offset);
1446 field->flags = flags;
1448 memcpy(field->pattern, pattern, len);
1453 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1455 int i, fields_size = 0;
1457 for (i = 0; i < filter->num_fields; i++)
1458 fields_size += filter->fields[i].len +
1459 sizeof(struct wl12xx_rx_filter_field) -
1465 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1469 struct wl12xx_rx_filter_field *field;
1471 for (i = 0; i < filter->num_fields; i++) {
1472 field = (struct wl12xx_rx_filter_field *)buf;
1474 field->offset = filter->fields[i].offset;
1475 field->flags = filter->fields[i].flags;
1476 field->len = filter->fields[i].len;
1478 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1479 buf += sizeof(struct wl12xx_rx_filter_field) -
1480 sizeof(u8 *) + field->len;
1485 * Allocates an RX filter returned through f
1486 * which needs to be freed using rx_filter_free()
1489 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1490 struct wl12xx_rx_filter **f)
1493 struct wl12xx_rx_filter *filter;
1497 filter = wl1271_rx_filter_alloc();
1499 wl1271_warning("Failed to alloc rx filter");
1505 while (i < p->pattern_len) {
1506 if (!test_bit(i, (unsigned long *)p->mask)) {
1511 for (j = i; j < p->pattern_len; j++) {
1512 if (!test_bit(j, (unsigned long *)p->mask))
1515 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1516 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1520 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1522 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1524 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1525 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1530 ret = wl1271_rx_filter_alloc_field(filter,
1533 &p->pattern[i], len);
1540 filter->action = FILTER_SIGNAL;
1546 wl1271_rx_filter_free(filter);
1552 static int wl1271_configure_wowlan(struct wl1271 *wl,
1553 struct cfg80211_wowlan *wow)
1557 if (!wow || wow->any || !wow->n_patterns) {
1558 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1563 ret = wl1271_rx_filter_clear_all(wl);
1570 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1573 /* Validate all incoming patterns before clearing current FW state */
1574 for (i = 0; i < wow->n_patterns; i++) {
1575 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1577 wl1271_warning("Bad wowlan pattern %d", i);
1582 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1586 ret = wl1271_rx_filter_clear_all(wl);
1590 /* Translate WoWLAN patterns into filters */
1591 for (i = 0; i < wow->n_patterns; i++) {
1592 struct cfg80211_pkt_pattern *p;
1593 struct wl12xx_rx_filter *filter = NULL;
1595 p = &wow->patterns[i];
1597 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1599 wl1271_warning("Failed to create an RX filter from "
1600 "wowlan pattern %d", i);
1604 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1606 wl1271_rx_filter_free(filter);
1611 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1617 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1618 struct wl12xx_vif *wlvif,
1619 struct cfg80211_wowlan *wow)
1623 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1626 ret = wl1271_configure_wowlan(wl, wow);
1630 if ((wl->conf.conn.suspend_wake_up_event ==
1631 wl->conf.conn.wake_up_event) &&
1632 (wl->conf.conn.suspend_listen_interval ==
1633 wl->conf.conn.listen_interval))
1636 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1637 wl->conf.conn.suspend_wake_up_event,
1638 wl->conf.conn.suspend_listen_interval);
1641 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1647 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1648 struct wl12xx_vif *wlvif,
1649 struct cfg80211_wowlan *wow)
1653 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1656 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1660 ret = wl1271_configure_wowlan(wl, wow);
1669 static int wl1271_configure_suspend(struct wl1271 *wl,
1670 struct wl12xx_vif *wlvif,
1671 struct cfg80211_wowlan *wow)
1673 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1674 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1675 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1676 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1680 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1683 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1684 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1686 if ((!is_ap) && (!is_sta))
1689 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1690 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1693 wl1271_configure_wowlan(wl, NULL);
1696 if ((wl->conf.conn.suspend_wake_up_event ==
1697 wl->conf.conn.wake_up_event) &&
1698 (wl->conf.conn.suspend_listen_interval ==
1699 wl->conf.conn.listen_interval))
1702 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1703 wl->conf.conn.wake_up_event,
1704 wl->conf.conn.listen_interval);
1707 wl1271_error("resume: wake up conditions failed: %d",
1711 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1715 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1716 struct cfg80211_wowlan *wow)
1718 struct wl1271 *wl = hw->priv;
1719 struct wl12xx_vif *wlvif;
1720 unsigned long flags;
1723 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1726 /* we want to perform the recovery before suspending */
1727 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1728 wl1271_warning("postponing suspend to perform recovery");
1732 wl1271_tx_flush(wl);
1734 mutex_lock(&wl->mutex);
1736 ret = pm_runtime_get_sync(wl->dev);
1738 pm_runtime_put_noidle(wl->dev);
1739 mutex_unlock(&wl->mutex);
1743 wl->wow_enabled = true;
1744 wl12xx_for_each_wlvif(wl, wlvif) {
1745 if (wlcore_is_p2p_mgmt(wlvif))
1748 ret = wl1271_configure_suspend(wl, wlvif, wow);
1750 mutex_unlock(&wl->mutex);
1751 wl1271_warning("couldn't prepare device to suspend");
1756 /* disable fast link flow control notifications from FW */
1757 ret = wlcore_hw_interrupt_notify(wl, false);
1761 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1762 ret = wlcore_hw_rx_ba_filter(wl,
1763 !!wl->conf.conn.suspend_rx_ba_activity);
1768 pm_runtime_put_noidle(wl->dev);
1769 mutex_unlock(&wl->mutex);
1772 wl1271_warning("couldn't prepare device to suspend");
1776 /* flush any remaining work */
1777 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1779 flush_work(&wl->tx_work);
1782 * Cancel the watchdog even if above tx_flush failed. We will detect
1783 * it on resume anyway.
1785 cancel_delayed_work(&wl->tx_watchdog_work);
1788 * set suspended flag to avoid triggering a new threaded_irq
1791 spin_lock_irqsave(&wl->wl_lock, flags);
1792 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1793 spin_unlock_irqrestore(&wl->wl_lock, flags);
1795 return pm_runtime_force_suspend(wl->dev);
1798 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1800 struct wl1271 *wl = hw->priv;
1801 struct wl12xx_vif *wlvif;
1802 unsigned long flags;
1803 bool run_irq_work = false, pending_recovery;
1806 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1808 WARN_ON(!wl->wow_enabled);
1810 ret = pm_runtime_force_resume(wl->dev);
1812 wl1271_error("ELP wakeup failure!");
1817 * re-enable irq_work enqueuing, and call irq_work directly if
1818 * there is a pending work.
1820 spin_lock_irqsave(&wl->wl_lock, flags);
1821 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1822 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1823 run_irq_work = true;
1824 spin_unlock_irqrestore(&wl->wl_lock, flags);
1826 mutex_lock(&wl->mutex);
1828 /* test the recovery flag before calling any SDIO functions */
1829 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1833 wl1271_debug(DEBUG_MAC80211,
1834 "run postponed irq_work directly");
1836 /* don't talk to the HW if recovery is pending */
1837 if (!pending_recovery) {
1838 ret = wlcore_irq_locked(wl);
1840 wl12xx_queue_recovery_work(wl);
1843 wlcore_enable_interrupts(wl);
1846 if (pending_recovery) {
1847 wl1271_warning("queuing forgotten recovery on resume");
1848 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1852 ret = pm_runtime_get_sync(wl->dev);
1854 pm_runtime_put_noidle(wl->dev);
1858 wl12xx_for_each_wlvif(wl, wlvif) {
1859 if (wlcore_is_p2p_mgmt(wlvif))
1862 wl1271_configure_resume(wl, wlvif);
1865 ret = wlcore_hw_interrupt_notify(wl, true);
1869 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1870 ret = wlcore_hw_rx_ba_filter(wl, false);
1875 pm_runtime_mark_last_busy(wl->dev);
1876 pm_runtime_put_autosuspend(wl->dev);
1879 wl->wow_enabled = false;
1882 * Set a flag to re-init the watchdog on the first Tx after resume.
1883 * That way we avoid possible conditions where Tx-complete interrupts
1884 * fail to arrive and we perform a spurious recovery.
1886 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1887 mutex_unlock(&wl->mutex);
1892 static int wl1271_op_start(struct ieee80211_hw *hw)
1894 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1897 * We have to delay the booting of the hardware because
1898 * we need to know the local MAC address before downloading and
1899 * initializing the firmware. The MAC address cannot be changed
1900 * after boot, and without the proper MAC address, the firmware
1901 * will not function properly.
1903 * The MAC address is first known when the corresponding interface
1904 * is added. That is where we will initialize the hardware.
1910 static void wlcore_op_stop_locked(struct wl1271 *wl)
1914 if (wl->state == WLCORE_STATE_OFF) {
1915 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1917 wlcore_enable_interrupts(wl);
1923 * this must be before the cancel_work calls below, so that the work
1924 * functions don't perform further work.
1926 wl->state = WLCORE_STATE_OFF;
1929 * Use the nosync variant to disable interrupts, so the mutex could be
1930 * held while doing so without deadlocking.
1932 wlcore_disable_interrupts_nosync(wl);
1934 mutex_unlock(&wl->mutex);
1936 wlcore_synchronize_interrupts(wl);
1937 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1938 cancel_work_sync(&wl->recovery_work);
1939 wl1271_flush_deferred_work(wl);
1940 cancel_delayed_work_sync(&wl->scan_complete_work);
1941 cancel_work_sync(&wl->netstack_work);
1942 cancel_work_sync(&wl->tx_work);
1943 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1945 /* let's notify MAC80211 about the remaining pending TX frames */
1946 mutex_lock(&wl->mutex);
1947 wl12xx_tx_reset(wl);
1949 wl1271_power_off(wl);
1951 * In case a recovery was scheduled, interrupts were disabled to avoid
1952 * an interrupt storm. Now that the power is down, it is safe to
1953 * re-enable interrupts to balance the disable depth
1955 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1956 wlcore_enable_interrupts(wl);
1958 wl->band = NL80211_BAND_2GHZ;
1961 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1962 wl->channel_type = NL80211_CHAN_NO_HT;
1963 wl->tx_blocks_available = 0;
1964 wl->tx_allocated_blocks = 0;
1965 wl->tx_results_count = 0;
1966 wl->tx_packets_count = 0;
1967 wl->time_offset = 0;
1968 wl->ap_fw_ps_map = 0;
1970 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1971 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1972 memset(wl->links_map, 0, sizeof(wl->links_map));
1973 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1974 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1975 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1976 wl->active_sta_count = 0;
1977 wl->active_link_count = 0;
1979 /* The system link is always allocated */
1980 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1981 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1982 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1985 * this is performed after the cancel_work calls and the associated
1986 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1987 * get executed before all these vars have been reset.
1991 wl->tx_blocks_freed = 0;
1993 for (i = 0; i < NUM_TX_QUEUES; i++) {
1994 wl->tx_pkts_freed[i] = 0;
1995 wl->tx_allocated_pkts[i] = 0;
1998 wl1271_debugfs_reset(wl);
2000 kfree(wl->raw_fw_status);
2001 wl->raw_fw_status = NULL;
2002 kfree(wl->fw_status);
2003 wl->fw_status = NULL;
2004 kfree(wl->tx_res_if);
2005 wl->tx_res_if = NULL;
2006 kfree(wl->target_mem_map);
2007 wl->target_mem_map = NULL;
2010 * FW channels must be re-calibrated after recovery,
2011 * save current Reg-Domain channel configuration and clear it.
2013 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2014 sizeof(wl->reg_ch_conf_pending));
2015 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2018 static void wlcore_op_stop(struct ieee80211_hw *hw)
2020 struct wl1271 *wl = hw->priv;
2022 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2024 mutex_lock(&wl->mutex);
2026 wlcore_op_stop_locked(wl);
2028 mutex_unlock(&wl->mutex);
2031 static void wlcore_channel_switch_work(struct work_struct *work)
2033 struct delayed_work *dwork;
2035 struct ieee80211_vif *vif;
2036 struct wl12xx_vif *wlvif;
2039 dwork = to_delayed_work(work);
2040 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2043 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2045 mutex_lock(&wl->mutex);
2047 if (unlikely(wl->state != WLCORE_STATE_ON))
2050 /* check the channel switch is still ongoing */
2051 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2054 vif = wl12xx_wlvif_to_vif(wlvif);
2055 ieee80211_chswitch_done(vif, false);
2057 ret = pm_runtime_get_sync(wl->dev);
2059 pm_runtime_put_noidle(wl->dev);
2063 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2065 pm_runtime_mark_last_busy(wl->dev);
2066 pm_runtime_put_autosuspend(wl->dev);
2068 mutex_unlock(&wl->mutex);
2071 static void wlcore_connection_loss_work(struct work_struct *work)
2073 struct delayed_work *dwork;
2075 struct ieee80211_vif *vif;
2076 struct wl12xx_vif *wlvif;
2078 dwork = to_delayed_work(work);
2079 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2082 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2084 mutex_lock(&wl->mutex);
2086 if (unlikely(wl->state != WLCORE_STATE_ON))
2089 /* Call mac80211 connection loss */
2090 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2093 vif = wl12xx_wlvif_to_vif(wlvif);
2094 ieee80211_connection_loss(vif);
2096 mutex_unlock(&wl->mutex);
2099 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2101 struct delayed_work *dwork;
2103 struct wl12xx_vif *wlvif;
2104 unsigned long time_spare;
2107 dwork = to_delayed_work(work);
2108 wlvif = container_of(dwork, struct wl12xx_vif,
2109 pending_auth_complete_work);
2112 mutex_lock(&wl->mutex);
2114 if (unlikely(wl->state != WLCORE_STATE_ON))
2118 * Make sure a second really passed since the last auth reply. Maybe
2119 * a second auth reply arrived while we were stuck on the mutex.
2120 * Check for a little less than the timeout to protect from scheduler
2123 time_spare = jiffies +
2124 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2125 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2128 ret = pm_runtime_get_sync(wl->dev);
2130 pm_runtime_put_noidle(wl->dev);
2134 /* cancel the ROC if active */
2135 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2137 pm_runtime_mark_last_busy(wl->dev);
2138 pm_runtime_put_autosuspend(wl->dev);
2140 mutex_unlock(&wl->mutex);
2143 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2145 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2146 WL12XX_MAX_RATE_POLICIES);
2147 if (policy >= WL12XX_MAX_RATE_POLICIES)
2150 __set_bit(policy, wl->rate_policies_map);
2155 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2157 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2160 __clear_bit(*idx, wl->rate_policies_map);
2161 *idx = WL12XX_MAX_RATE_POLICIES;
2164 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2166 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2167 WLCORE_MAX_KLV_TEMPLATES);
2168 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2171 __set_bit(policy, wl->klv_templates_map);
2176 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2178 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2181 __clear_bit(*idx, wl->klv_templates_map);
2182 *idx = WLCORE_MAX_KLV_TEMPLATES;
2185 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2187 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2189 switch (wlvif->bss_type) {
2190 case BSS_TYPE_AP_BSS:
2192 return WL1271_ROLE_P2P_GO;
2193 else if (ieee80211_vif_is_mesh(vif))
2194 return WL1271_ROLE_MESH_POINT;
2196 return WL1271_ROLE_AP;
2198 case BSS_TYPE_STA_BSS:
2200 return WL1271_ROLE_P2P_CL;
2202 return WL1271_ROLE_STA;
2205 return WL1271_ROLE_IBSS;
2208 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2210 return WL12XX_INVALID_ROLE_TYPE;
2213 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2215 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2218 /* clear everything but the persistent data */
2219 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2221 switch (ieee80211_vif_type_p2p(vif)) {
2222 case NL80211_IFTYPE_P2P_CLIENT:
2225 case NL80211_IFTYPE_STATION:
2226 case NL80211_IFTYPE_P2P_DEVICE:
2227 wlvif->bss_type = BSS_TYPE_STA_BSS;
2229 case NL80211_IFTYPE_ADHOC:
2230 wlvif->bss_type = BSS_TYPE_IBSS;
2232 case NL80211_IFTYPE_P2P_GO:
2235 case NL80211_IFTYPE_AP:
2236 case NL80211_IFTYPE_MESH_POINT:
2237 wlvif->bss_type = BSS_TYPE_AP_BSS;
2240 wlvif->bss_type = MAX_BSS_TYPE;
2244 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2245 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2246 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2248 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2249 wlvif->bss_type == BSS_TYPE_IBSS) {
2250 /* init sta/ibss data */
2251 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2252 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2253 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2254 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2255 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2256 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2257 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2258 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2261 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2262 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2263 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2264 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2265 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2266 wl12xx_allocate_rate_policy(wl,
2267 &wlvif->ap.ucast_rate_idx[i]);
2268 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2270 * TODO: check if basic_rate shouldn't be
2271 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2272 * instead (the same thing for STA above).
2274 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2275 /* TODO: this seems to be used only for STA, check it */
2276 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2279 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2280 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2281 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2284 * mac80211 configures some values globally, while we treat them
2285 * per-interface. thus, on init, we have to copy them from wl
2287 wlvif->band = wl->band;
2288 wlvif->channel = wl->channel;
2289 wlvif->power_level = wl->power_level;
2290 wlvif->channel_type = wl->channel_type;
2292 INIT_WORK(&wlvif->rx_streaming_enable_work,
2293 wl1271_rx_streaming_enable_work);
2294 INIT_WORK(&wlvif->rx_streaming_disable_work,
2295 wl1271_rx_streaming_disable_work);
2296 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2297 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2298 wlcore_channel_switch_work);
2299 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2300 wlcore_connection_loss_work);
2301 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2302 wlcore_pending_auth_complete_work);
2303 INIT_LIST_HEAD(&wlvif->list);
2305 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2309 static int wl12xx_init_fw(struct wl1271 *wl)
2311 int retries = WL1271_BOOT_RETRIES;
2312 bool booted = false;
2313 struct wiphy *wiphy = wl->hw->wiphy;
2318 ret = wl12xx_chip_wakeup(wl, false);
2322 ret = wl->ops->boot(wl);
2326 ret = wl1271_hw_init(wl);
2334 mutex_unlock(&wl->mutex);
2335 /* Unlocking the mutex in the middle of handling is
2336 inherently unsafe. In this case we deem it safe to do,
2337 because we need to let any possibly pending IRQ out of
2338 the system (and while we are WLCORE_STATE_OFF the IRQ
2339 work function will not do anything.) Also, any other
2340 possible concurrent operations will fail due to the
2341 current state, hence the wl1271 struct should be safe. */
2342 wlcore_disable_interrupts(wl);
2343 wl1271_flush_deferred_work(wl);
2344 cancel_work_sync(&wl->netstack_work);
2345 mutex_lock(&wl->mutex);
2347 wl1271_power_off(wl);
2351 wl1271_error("firmware boot failed despite %d retries",
2352 WL1271_BOOT_RETRIES);
2356 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2358 /* update hw/fw version info in wiphy struct */
2359 wiphy->hw_version = wl->chip.id;
2360 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2361 sizeof(wiphy->fw_version));
2364 * Now we know if 11a is supported (info from the NVS), so disable
2365 * 11a channels if not supported
2367 if (!wl->enable_11a)
2368 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2370 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2371 wl->enable_11a ? "" : "not ");
2373 wl->state = WLCORE_STATE_ON;
2378 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2380 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2384 * Check whether a fw switch (i.e. moving from one loaded
2385 * fw to another) is needed. This function is also responsible
2386 * for updating wl->last_vif_count, so it must be called before
2387 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2390 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2391 struct vif_counter_data vif_counter_data,
2394 enum wl12xx_fw_type current_fw = wl->fw_type;
2395 u8 vif_count = vif_counter_data.counter;
2397 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2400 /* increase the vif count if this is a new vif */
2401 if (add && !vif_counter_data.cur_vif_running)
2404 wl->last_vif_count = vif_count;
2406 /* no need for fw change if the device is OFF */
2407 if (wl->state == WLCORE_STATE_OFF)
2410 /* no need for fw change if a single fw is used */
2411 if (!wl->mr_fw_name)
2414 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2416 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2423 * Enter "forced psm". Make sure the sta is in psm against the ap,
2424 * to make the fw switch a bit more disconnection-persistent.
2426 static void wl12xx_force_active_psm(struct wl1271 *wl)
2428 struct wl12xx_vif *wlvif;
2430 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2431 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2435 struct wlcore_hw_queue_iter_data {
2436 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2438 struct ieee80211_vif *vif;
2439 /* is the current vif among those iterated */
2443 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2444 struct ieee80211_vif *vif)
2446 struct wlcore_hw_queue_iter_data *iter_data = data;
2448 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2449 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2452 if (iter_data->cur_running || vif == iter_data->vif) {
2453 iter_data->cur_running = true;
2457 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2460 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2461 struct wl12xx_vif *wlvif)
2463 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2464 struct wlcore_hw_queue_iter_data iter_data = {};
2467 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2468 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2472 iter_data.vif = vif;
2474 /* mark all bits taken by active interfaces */
2475 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2476 IEEE80211_IFACE_ITER_RESUME_ALL,
2477 wlcore_hw_queue_iter, &iter_data);
2479 /* the current vif is already running in mac80211 (resume/recovery) */
2480 if (iter_data.cur_running) {
2481 wlvif->hw_queue_base = vif->hw_queue[0];
2482 wl1271_debug(DEBUG_MAC80211,
2483 "using pre-allocated hw queue base %d",
2484 wlvif->hw_queue_base);
2486 /* interface type might have changed type */
2487 goto adjust_cab_queue;
2490 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2491 WLCORE_NUM_MAC_ADDRESSES);
2492 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2495 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2496 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2497 wlvif->hw_queue_base);
2499 for (i = 0; i < NUM_TX_QUEUES; i++) {
2500 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2501 /* register hw queues in mac80211 */
2502 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2506 /* the last places are reserved for cab queues per interface */
2507 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2508 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2509 wlvif->hw_queue_base / NUM_TX_QUEUES;
2511 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2516 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2517 struct ieee80211_vif *vif)
2519 struct wl1271 *wl = hw->priv;
2520 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2521 struct vif_counter_data vif_count;
2526 wl1271_error("Adding Interface not allowed while in PLT mode");
2530 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2531 IEEE80211_VIF_SUPPORTS_UAPSD |
2532 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2534 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2535 ieee80211_vif_type_p2p(vif), vif->addr);
2537 wl12xx_get_vif_count(hw, vif, &vif_count);
2539 mutex_lock(&wl->mutex);
2542 * in some very corner case HW recovery scenarios its possible to
2543 * get here before __wl1271_op_remove_interface is complete, so
2544 * opt out if that is the case.
2546 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2547 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2553 ret = wl12xx_init_vif_data(wl, vif);
2558 role_type = wl12xx_get_role_type(wl, wlvif);
2559 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2564 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2569 * TODO: after the nvs issue will be solved, move this block
2570 * to start(), and make sure here the driver is ON.
2572 if (wl->state == WLCORE_STATE_OFF) {
2574 * we still need this in order to configure the fw
2575 * while uploading the nvs
2577 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2579 ret = wl12xx_init_fw(wl);
2585 * Call runtime PM only after possible wl12xx_init_fw() above
2586 * is done. Otherwise we do not have interrupts enabled.
2588 ret = pm_runtime_get_sync(wl->dev);
2590 pm_runtime_put_noidle(wl->dev);
2594 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2595 wl12xx_force_active_psm(wl);
2596 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2597 mutex_unlock(&wl->mutex);
2598 wl1271_recovery_work(&wl->recovery_work);
2602 if (!wlcore_is_p2p_mgmt(wlvif)) {
2603 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2604 role_type, &wlvif->role_id);
2608 ret = wl1271_init_vif_specific(wl, vif);
2613 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2614 &wlvif->dev_role_id);
2618 /* needed mainly for configuring rate policies */
2619 ret = wl1271_sta_hw_init(wl, wlvif);
2624 list_add(&wlvif->list, &wl->wlvif_list);
2625 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2627 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2632 pm_runtime_mark_last_busy(wl->dev);
2633 pm_runtime_put_autosuspend(wl->dev);
2635 mutex_unlock(&wl->mutex);
2640 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2641 struct ieee80211_vif *vif,
2642 bool reset_tx_queues)
2644 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2646 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2648 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2650 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2653 /* because of hardware recovery, we may get here twice */
2654 if (wl->state == WLCORE_STATE_OFF)
2657 wl1271_info("down");
2659 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2660 wl->scan_wlvif == wlvif) {
2661 struct cfg80211_scan_info info = {
2666 * Rearm the tx watchdog just before idling scan. This
2667 * prevents just-finished scans from triggering the watchdog
2669 wl12xx_rearm_tx_watchdog_locked(wl);
2671 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2672 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2673 wl->scan_wlvif = NULL;
2674 wl->scan.req = NULL;
2675 ieee80211_scan_completed(wl->hw, &info);
2678 if (wl->sched_vif == wlvif)
2679 wl->sched_vif = NULL;
2681 if (wl->roc_vif == vif) {
2683 ieee80211_remain_on_channel_expired(wl->hw);
2686 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2687 /* disable active roles */
2688 ret = pm_runtime_get_sync(wl->dev);
2690 pm_runtime_put_noidle(wl->dev);
2694 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2695 wlvif->bss_type == BSS_TYPE_IBSS) {
2696 if (wl12xx_dev_role_started(wlvif))
2697 wl12xx_stop_dev(wl, wlvif);
2700 if (!wlcore_is_p2p_mgmt(wlvif)) {
2701 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2705 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2710 pm_runtime_mark_last_busy(wl->dev);
2711 pm_runtime_put_autosuspend(wl->dev);
2714 wl12xx_tx_reset_wlvif(wl, wlvif);
2716 /* clear all hlids (except system_hlid) */
2717 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2719 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2720 wlvif->bss_type == BSS_TYPE_IBSS) {
2721 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2722 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2723 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2724 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2725 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2727 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2728 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2729 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2730 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2731 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2732 wl12xx_free_rate_policy(wl,
2733 &wlvif->ap.ucast_rate_idx[i]);
2734 wl1271_free_ap_keys(wl, wlvif);
2737 dev_kfree_skb(wlvif->probereq);
2738 wlvif->probereq = NULL;
2739 if (wl->last_wlvif == wlvif)
2740 wl->last_wlvif = NULL;
2741 list_del(&wlvif->list);
2742 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2743 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2744 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2752 * Last AP, have more stations. Configure sleep auth according to STA.
2753 * Don't do thin on unintended recovery.
2755 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2756 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2759 if (wl->ap_count == 0 && is_ap) {
2760 /* mask ap events */
2761 wl->event_mask &= ~wl->ap_event_mask;
2762 wl1271_event_unmask(wl);
2765 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2766 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2767 /* Configure for power according to debugfs */
2768 if (sta_auth != WL1271_PSM_ILLEGAL)
2769 wl1271_acx_sleep_auth(wl, sta_auth);
2770 /* Configure for ELP power saving */
2772 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2776 mutex_unlock(&wl->mutex);
2778 del_timer_sync(&wlvif->rx_streaming_timer);
2779 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2780 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2781 cancel_work_sync(&wlvif->rc_update_work);
2782 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2783 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2784 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2786 mutex_lock(&wl->mutex);
2789 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2790 struct ieee80211_vif *vif)
2792 struct wl1271 *wl = hw->priv;
2793 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2794 struct wl12xx_vif *iter;
2795 struct vif_counter_data vif_count;
2797 wl12xx_get_vif_count(hw, vif, &vif_count);
2798 mutex_lock(&wl->mutex);
2800 if (wl->state == WLCORE_STATE_OFF ||
2801 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2805 * wl->vif can be null here if someone shuts down the interface
2806 * just when hardware recovery has been started.
2808 wl12xx_for_each_wlvif(wl, iter) {
2812 __wl1271_op_remove_interface(wl, vif, true);
2815 WARN_ON(iter != wlvif);
2816 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2817 wl12xx_force_active_psm(wl);
2818 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2819 wl12xx_queue_recovery_work(wl);
2822 mutex_unlock(&wl->mutex);
2825 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2826 struct ieee80211_vif *vif,
2827 enum nl80211_iftype new_type, bool p2p)
2829 struct wl1271 *wl = hw->priv;
2832 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2833 wl1271_op_remove_interface(hw, vif);
2835 vif->type = new_type;
2837 ret = wl1271_op_add_interface(hw, vif);
2839 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2843 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2846 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2849 * One of the side effects of the JOIN command is that is clears
2850 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2851 * to a WPA/WPA2 access point will therefore kill the data-path.
2852 * Currently the only valid scenario for JOIN during association
2853 * is on roaming, in which case we will also be given new keys.
2854 * Keep the below message for now, unless it starts bothering
2855 * users who really like to roam a lot :)
2857 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2858 wl1271_info("JOIN while associated.");
2860 /* clear encryption type */
2861 wlvif->encryption_type = KEY_NONE;
2864 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2866 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2868 * TODO: this is an ugly workaround for wl12xx fw
2869 * bug - we are not able to tx/rx after the first
2870 * start_sta, so make dummy start+stop calls,
2871 * and then call start_sta again.
2872 * this should be fixed in the fw.
2874 wl12xx_cmd_role_start_sta(wl, wlvif);
2875 wl12xx_cmd_role_stop_sta(wl, wlvif);
2878 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2884 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2888 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2892 wl1271_error("No SSID in IEs!");
2897 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2898 wl1271_error("SSID is too long!");
2902 wlvif->ssid_len = ssid_len;
2903 memcpy(wlvif->ssid, ptr+2, ssid_len);
2907 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2909 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2910 struct sk_buff *skb;
2913 /* we currently only support setting the ssid from the ap probe req */
2914 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2917 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2921 ieoffset = offsetof(struct ieee80211_mgmt,
2922 u.probe_req.variable);
2923 wl1271_ssid_set(wlvif, skb, ieoffset);
2929 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2930 struct ieee80211_bss_conf *bss_conf,
2936 wlvif->aid = bss_conf->aid;
2937 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2938 wlvif->beacon_int = bss_conf->beacon_int;
2939 wlvif->wmm_enabled = bss_conf->qos;
2941 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2944 * with wl1271, we don't need to update the
2945 * beacon_int and dtim_period, because the firmware
2946 * updates it by itself when the first beacon is
2947 * received after a join.
2949 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2954 * Get a template for hardware connection maintenance
2956 dev_kfree_skb(wlvif->probereq);
2957 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2960 ieoffset = offsetof(struct ieee80211_mgmt,
2961 u.probe_req.variable);
2962 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2964 /* enable the connection monitoring feature */
2965 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2970 * The join command disable the keep-alive mode, shut down its process,
2971 * and also clear the template config, so we need to reset it all after
2972 * the join. The acx_aid starts the keep-alive process, and the order
2973 * of the commands below is relevant.
2975 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2979 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2983 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2987 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2988 wlvif->sta.klv_template_id,
2989 ACX_KEEP_ALIVE_TPL_VALID);
2994 * The default fw psm configuration is AUTO, while mac80211 default
2995 * setting is off (ACTIVE), so sync the fw with the correct value.
2997 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3003 wl1271_tx_enabled_rates_get(wl,
3006 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3014 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3017 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3019 /* make sure we are connected (sta) joined */
3021 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3024 /* make sure we are joined (ibss) */
3026 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3030 /* use defaults when not associated */
3033 /* free probe-request template */
3034 dev_kfree_skb(wlvif->probereq);
3035 wlvif->probereq = NULL;
3037 /* disable connection monitor features */
3038 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3042 /* Disable the keep-alive feature */
3043 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3047 /* disable beacon filtering */
3048 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3053 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3054 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3056 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3057 ieee80211_chswitch_done(vif, false);
3058 cancel_delayed_work(&wlvif->channel_switch_work);
3061 /* invalidate keep-alive template */
3062 wl1271_acx_keep_alive_config(wl, wlvif,
3063 wlvif->sta.klv_template_id,
3064 ACX_KEEP_ALIVE_TPL_INVALID);
3069 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3071 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3072 wlvif->rate_set = wlvif->basic_rate_set;
3075 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3078 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3080 if (idle == cur_idle)
3084 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3086 /* The current firmware only supports sched_scan in idle */
3087 if (wl->sched_vif == wlvif)
3088 wl->ops->sched_scan_stop(wl, wlvif);
3090 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3094 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3095 struct ieee80211_conf *conf, u32 changed)
3099 if (wlcore_is_p2p_mgmt(wlvif))
3102 if (conf->power_level != wlvif->power_level) {
3103 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3107 wlvif->power_level = conf->power_level;
3113 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3115 struct wl1271 *wl = hw->priv;
3116 struct wl12xx_vif *wlvif;
3117 struct ieee80211_conf *conf = &hw->conf;
3120 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3122 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3124 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3127 mutex_lock(&wl->mutex);
3129 if (changed & IEEE80211_CONF_CHANGE_POWER)
3130 wl->power_level = conf->power_level;
3132 if (unlikely(wl->state != WLCORE_STATE_ON))
3135 ret = pm_runtime_get_sync(wl->dev);
3137 pm_runtime_put_noidle(wl->dev);
3141 /* configure each interface */
3142 wl12xx_for_each_wlvif(wl, wlvif) {
3143 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3149 pm_runtime_mark_last_busy(wl->dev);
3150 pm_runtime_put_autosuspend(wl->dev);
3153 mutex_unlock(&wl->mutex);
3158 struct wl1271_filter_params {
3161 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3164 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3165 struct netdev_hw_addr_list *mc_list)
3167 struct wl1271_filter_params *fp;
3168 struct netdev_hw_addr *ha;
3170 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3172 wl1271_error("Out of memory setting filters.");
3176 /* update multicast filtering parameters */
3177 fp->mc_list_length = 0;
3178 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3179 fp->enabled = false;
3182 netdev_hw_addr_list_for_each(ha, mc_list) {
3183 memcpy(fp->mc_list[fp->mc_list_length],
3184 ha->addr, ETH_ALEN);
3185 fp->mc_list_length++;
3189 return (u64)(unsigned long)fp;
3192 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3194 FIF_BCN_PRBRESP_PROMISC | \
3198 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3199 unsigned int changed,
3200 unsigned int *total, u64 multicast)
3202 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3203 struct wl1271 *wl = hw->priv;
3204 struct wl12xx_vif *wlvif;
3208 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3209 " total %x", changed, *total);
3211 mutex_lock(&wl->mutex);
3213 *total &= WL1271_SUPPORTED_FILTERS;
3214 changed &= WL1271_SUPPORTED_FILTERS;
3216 if (unlikely(wl->state != WLCORE_STATE_ON))
3219 ret = pm_runtime_get_sync(wl->dev);
3221 pm_runtime_put_noidle(wl->dev);
3225 wl12xx_for_each_wlvif(wl, wlvif) {
3226 if (wlcore_is_p2p_mgmt(wlvif))
3229 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3230 if (*total & FIF_ALLMULTI)
3231 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3235 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3238 fp->mc_list_length);
3244 * If interface in AP mode and created with allmulticast then disable
3245 * the firmware filters so that all multicast packets are passed
3246 * This is mandatory for MDNS based discovery protocols
3248 if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3249 if (*total & FIF_ALLMULTI) {
3250 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3260 * the fw doesn't provide an api to configure the filters. instead,
3261 * the filters configuration is based on the active roles / ROC
3266 pm_runtime_mark_last_busy(wl->dev);
3267 pm_runtime_put_autosuspend(wl->dev);
3270 mutex_unlock(&wl->mutex);
3274 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3275 u8 id, u8 key_type, u8 key_size,
3276 const u8 *key, u8 hlid, u32 tx_seq_32,
3279 struct wl1271_ap_key *ap_key;
3282 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3284 if (key_size > MAX_KEY_SIZE)
3288 * Find next free entry in ap_keys. Also check we are not replacing
3291 for (i = 0; i < MAX_NUM_KEYS; i++) {
3292 if (wlvif->ap.recorded_keys[i] == NULL)
3295 if (wlvif->ap.recorded_keys[i]->id == id) {
3296 wl1271_warning("trying to record key replacement");
3301 if (i == MAX_NUM_KEYS)
3304 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3309 ap_key->key_type = key_type;
3310 ap_key->key_size = key_size;
3311 memcpy(ap_key->key, key, key_size);
3312 ap_key->hlid = hlid;
3313 ap_key->tx_seq_32 = tx_seq_32;
3314 ap_key->tx_seq_16 = tx_seq_16;
3316 wlvif->ap.recorded_keys[i] = ap_key;
3320 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3324 for (i = 0; i < MAX_NUM_KEYS; i++) {
3325 kfree(wlvif->ap.recorded_keys[i]);
3326 wlvif->ap.recorded_keys[i] = NULL;
3330 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3333 struct wl1271_ap_key *key;
3334 bool wep_key_added = false;
3336 for (i = 0; i < MAX_NUM_KEYS; i++) {
3338 if (wlvif->ap.recorded_keys[i] == NULL)
3341 key = wlvif->ap.recorded_keys[i];
3343 if (hlid == WL12XX_INVALID_LINK_ID)
3344 hlid = wlvif->ap.bcast_hlid;
3346 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3347 key->id, key->key_type,
3348 key->key_size, key->key,
3349 hlid, key->tx_seq_32,
3354 if (key->key_type == KEY_WEP)
3355 wep_key_added = true;
3358 if (wep_key_added) {
3359 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3360 wlvif->ap.bcast_hlid);
3366 wl1271_free_ap_keys(wl, wlvif);
3370 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3371 u16 action, u8 id, u8 key_type,
3372 u8 key_size, const u8 *key, u32 tx_seq_32,
3373 u16 tx_seq_16, struct ieee80211_sta *sta)
3376 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3379 struct wl1271_station *wl_sta;
3383 wl_sta = (struct wl1271_station *)sta->drv_priv;
3384 hlid = wl_sta->hlid;
3386 hlid = wlvif->ap.bcast_hlid;
3389 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3391 * We do not support removing keys after AP shutdown.
3392 * Pretend we do to make mac80211 happy.
3394 if (action != KEY_ADD_OR_REPLACE)
3397 ret = wl1271_record_ap_key(wl, wlvif, id,
3399 key, hlid, tx_seq_32,
3402 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3403 id, key_type, key_size,
3404 key, hlid, tx_seq_32,
3412 static const u8 bcast_addr[ETH_ALEN] = {
3413 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3416 addr = sta ? sta->addr : bcast_addr;
3418 if (is_zero_ether_addr(addr)) {
3419 /* We dont support TX only encryption */
3423 /* The wl1271 does not allow to remove unicast keys - they
3424 will be cleared automatically on next CMD_JOIN. Ignore the
3425 request silently, as we dont want the mac80211 to emit
3426 an error message. */
3427 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3430 /* don't remove key if hlid was already deleted */
3431 if (action == KEY_REMOVE &&
3432 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3435 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3436 id, key_type, key_size,
3437 key, addr, tx_seq_32,
3447 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3448 struct ieee80211_vif *vif,
3449 struct ieee80211_sta *sta,
3450 struct ieee80211_key_conf *key_conf)
3452 struct wl1271 *wl = hw->priv;
3454 bool might_change_spare =
3455 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3456 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3458 if (might_change_spare) {
3460 * stop the queues and flush to ensure the next packets are
3461 * in sync with FW spare block accounting
3463 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3464 wl1271_tx_flush(wl);
3467 mutex_lock(&wl->mutex);
3469 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3471 goto out_wake_queues;
3474 ret = pm_runtime_get_sync(wl->dev);
3476 pm_runtime_put_noidle(wl->dev);
3477 goto out_wake_queues;
3480 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3482 pm_runtime_mark_last_busy(wl->dev);
3483 pm_runtime_put_autosuspend(wl->dev);
3486 if (might_change_spare)
3487 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3489 mutex_unlock(&wl->mutex);
3494 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3495 struct ieee80211_vif *vif,
3496 struct ieee80211_sta *sta,
3497 struct ieee80211_key_conf *key_conf)
3499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3506 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3508 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 key_conf->cipher, key_conf->keyidx,
3511 key_conf->keylen, key_conf->flags);
3512 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3514 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3516 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 hlid = wl_sta->hlid;
3519 hlid = wlvif->ap.bcast_hlid;
3522 hlid = wlvif->sta.hlid;
3524 if (hlid != WL12XX_INVALID_LINK_ID) {
3525 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3530 switch (key_conf->cipher) {
3531 case WLAN_CIPHER_SUITE_WEP40:
3532 case WLAN_CIPHER_SUITE_WEP104:
3535 key_conf->hw_key_idx = key_conf->keyidx;
3537 case WLAN_CIPHER_SUITE_TKIP:
3538 key_type = KEY_TKIP;
3539 key_conf->hw_key_idx = key_conf->keyidx;
3541 case WLAN_CIPHER_SUITE_CCMP:
3543 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3545 case WL1271_CIPHER_SUITE_GEM:
3549 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3556 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3557 key_conf->keyidx, key_type,
3558 key_conf->keylen, key_conf->key,
3559 tx_seq_32, tx_seq_16, sta);
3561 wl1271_error("Could not add or replace key");
3566 * reconfiguring arp response if the unicast (or common)
3567 * encryption key type was changed
3569 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3570 (sta || key_type == KEY_WEP) &&
3571 wlvif->encryption_type != key_type) {
3572 wlvif->encryption_type = key_type;
3573 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3575 wl1271_warning("build arp rsp failed: %d", ret);
3582 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3583 key_conf->keyidx, key_type,
3584 key_conf->keylen, key_conf->key,
3587 wl1271_error("Could not remove key");
3593 wl1271_error("Unsupported key cmd 0x%x", cmd);
3599 EXPORT_SYMBOL_GPL(wlcore_set_key);
3601 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3602 struct ieee80211_vif *vif,
3605 struct wl1271 *wl = hw->priv;
3606 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3609 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3612 /* we don't handle unsetting of default key */
3616 mutex_lock(&wl->mutex);
3618 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3623 ret = pm_runtime_get_sync(wl->dev);
3625 pm_runtime_put_noidle(wl->dev);
3629 wlvif->default_key = key_idx;
3631 /* the default WEP key needs to be configured at least once */
3632 if (wlvif->encryption_type == KEY_WEP) {
3633 ret = wl12xx_cmd_set_default_wep_key(wl,
3641 pm_runtime_mark_last_busy(wl->dev);
3642 pm_runtime_put_autosuspend(wl->dev);
3645 mutex_unlock(&wl->mutex);
3648 void wlcore_regdomain_config(struct wl1271 *wl)
3652 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3655 mutex_lock(&wl->mutex);
3657 if (unlikely(wl->state != WLCORE_STATE_ON))
3660 ret = pm_runtime_get_sync(wl->dev);
3664 ret = wlcore_cmd_regdomain_config_locked(wl);
3666 wl12xx_queue_recovery_work(wl);
3670 pm_runtime_mark_last_busy(wl->dev);
3671 pm_runtime_put_autosuspend(wl->dev);
3673 mutex_unlock(&wl->mutex);
3676 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3677 struct ieee80211_vif *vif,
3678 struct ieee80211_scan_request *hw_req)
3680 struct cfg80211_scan_request *req = &hw_req->req;
3681 struct wl1271 *wl = hw->priv;
3686 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3689 ssid = req->ssids[0].ssid;
3690 len = req->ssids[0].ssid_len;
3693 mutex_lock(&wl->mutex);
3695 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3697 * We cannot return -EBUSY here because cfg80211 will expect
3698 * a call to ieee80211_scan_completed if we do - in this case
3699 * there won't be any call.
3705 ret = pm_runtime_get_sync(wl->dev);
3707 pm_runtime_put_noidle(wl->dev);
3711 /* fail if there is any role in ROC */
3712 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3713 /* don't allow scanning right now */
3718 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3720 pm_runtime_mark_last_busy(wl->dev);
3721 pm_runtime_put_autosuspend(wl->dev);
3723 mutex_unlock(&wl->mutex);
3728 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3729 struct ieee80211_vif *vif)
3731 struct wl1271 *wl = hw->priv;
3732 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3733 struct cfg80211_scan_info info = {
3738 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3740 mutex_lock(&wl->mutex);
3742 if (unlikely(wl->state != WLCORE_STATE_ON))
3745 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3748 ret = pm_runtime_get_sync(wl->dev);
3750 pm_runtime_put_noidle(wl->dev);
3754 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3755 ret = wl->ops->scan_stop(wl, wlvif);
3761 * Rearm the tx watchdog just before idling scan. This
3762 * prevents just-finished scans from triggering the watchdog
3764 wl12xx_rearm_tx_watchdog_locked(wl);
3766 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3767 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3768 wl->scan_wlvif = NULL;
3769 wl->scan.req = NULL;
3770 ieee80211_scan_completed(wl->hw, &info);
3773 pm_runtime_mark_last_busy(wl->dev);
3774 pm_runtime_put_autosuspend(wl->dev);
3776 mutex_unlock(&wl->mutex);
3778 cancel_delayed_work_sync(&wl->scan_complete_work);
3781 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3782 struct ieee80211_vif *vif,
3783 struct cfg80211_sched_scan_request *req,
3784 struct ieee80211_scan_ies *ies)
3786 struct wl1271 *wl = hw->priv;
3787 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3790 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3792 mutex_lock(&wl->mutex);
3794 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3799 ret = pm_runtime_get_sync(wl->dev);
3801 pm_runtime_put_noidle(wl->dev);
3805 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3809 wl->sched_vif = wlvif;
3812 pm_runtime_mark_last_busy(wl->dev);
3813 pm_runtime_put_autosuspend(wl->dev);
3815 mutex_unlock(&wl->mutex);
3819 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3820 struct ieee80211_vif *vif)
3822 struct wl1271 *wl = hw->priv;
3823 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3826 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3828 mutex_lock(&wl->mutex);
3830 if (unlikely(wl->state != WLCORE_STATE_ON))
3833 ret = pm_runtime_get_sync(wl->dev);
3835 pm_runtime_put_noidle(wl->dev);
3839 wl->ops->sched_scan_stop(wl, wlvif);
3841 pm_runtime_mark_last_busy(wl->dev);
3842 pm_runtime_put_autosuspend(wl->dev);
3844 mutex_unlock(&wl->mutex);
3849 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3851 struct wl1271 *wl = hw->priv;
3854 mutex_lock(&wl->mutex);
3856 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3861 ret = pm_runtime_get_sync(wl->dev);
3863 pm_runtime_put_noidle(wl->dev);
3867 ret = wl1271_acx_frag_threshold(wl, value);
3869 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3871 pm_runtime_mark_last_busy(wl->dev);
3872 pm_runtime_put_autosuspend(wl->dev);
3875 mutex_unlock(&wl->mutex);
3880 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3882 struct wl1271 *wl = hw->priv;
3883 struct wl12xx_vif *wlvif;
3886 mutex_lock(&wl->mutex);
3888 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3893 ret = pm_runtime_get_sync(wl->dev);
3895 pm_runtime_put_noidle(wl->dev);
3899 wl12xx_for_each_wlvif(wl, wlvif) {
3900 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3902 wl1271_warning("set rts threshold failed: %d", ret);
3904 pm_runtime_mark_last_busy(wl->dev);
3905 pm_runtime_put_autosuspend(wl->dev);
3908 mutex_unlock(&wl->mutex);
3913 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3916 const u8 *next, *end = skb->data + skb->len;
3917 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3918 skb->len - ieoffset);
3923 memmove(ie, next, end - next);
3924 skb_trim(skb, skb->len - len);
3927 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3928 unsigned int oui, u8 oui_type,
3932 const u8 *next, *end = skb->data + skb->len;
3933 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3934 skb->data + ieoffset,
3935 skb->len - ieoffset);
3940 memmove(ie, next, end - next);
3941 skb_trim(skb, skb->len - len);
3944 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3945 struct ieee80211_vif *vif)
3947 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3948 struct sk_buff *skb;
3951 skb = ieee80211_proberesp_get(wl->hw, vif);
3955 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3956 CMD_TEMPL_AP_PROBE_RESPONSE,
3965 wl1271_debug(DEBUG_AP, "probe response updated");
3966 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3972 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3973 struct ieee80211_vif *vif,
3975 size_t probe_rsp_len,
3978 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3979 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3980 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3981 int ssid_ie_offset, ie_offset, templ_len;
3984 /* no need to change probe response if the SSID is set correctly */
3985 if (wlvif->ssid_len > 0)
3986 return wl1271_cmd_template_set(wl, wlvif->role_id,
3987 CMD_TEMPL_AP_PROBE_RESPONSE,
3992 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3993 wl1271_error("probe_rsp template too big");
3997 /* start searching from IE offset */
3998 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4000 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4001 probe_rsp_len - ie_offset);
4003 wl1271_error("No SSID in beacon!");
4007 ssid_ie_offset = ptr - probe_rsp_data;
4008 ptr += (ptr[1] + 2);
4010 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4012 /* insert SSID from bss_conf */
4013 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4014 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4015 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4016 bss_conf->ssid, bss_conf->ssid_len);
4017 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4019 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4020 ptr, probe_rsp_len - (ptr - probe_rsp_data));
4021 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4023 return wl1271_cmd_template_set(wl, wlvif->role_id,
4024 CMD_TEMPL_AP_PROBE_RESPONSE,
4030 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4031 struct ieee80211_vif *vif,
4032 struct ieee80211_bss_conf *bss_conf,
4035 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4038 if (changed & BSS_CHANGED_ERP_SLOT) {
4039 if (bss_conf->use_short_slot)
4040 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4042 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4044 wl1271_warning("Set slot time failed %d", ret);
4049 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4050 if (bss_conf->use_short_preamble)
4051 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4053 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4056 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4057 if (bss_conf->use_cts_prot)
4058 ret = wl1271_acx_cts_protect(wl, wlvif,
4061 ret = wl1271_acx_cts_protect(wl, wlvif,
4062 CTSPROTECT_DISABLE);
4064 wl1271_warning("Set ctsprotect failed %d", ret);
4073 static int wlcore_set_beacon_template(struct wl1271 *wl,
4074 struct ieee80211_vif *vif,
4077 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4078 struct ieee80211_hdr *hdr;
4081 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4082 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4090 wl1271_debug(DEBUG_MASTER, "beacon updated");
4092 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4094 dev_kfree_skb(beacon);
4097 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4098 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4100 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4105 dev_kfree_skb(beacon);
4109 wlvif->wmm_enabled =
4110 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4111 WLAN_OUI_TYPE_MICROSOFT_WMM,
4112 beacon->data + ieoffset,
4113 beacon->len - ieoffset);
4116 * In case we already have a probe-resp beacon set explicitly
4117 * by usermode, don't use the beacon data.
4119 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4122 /* remove TIM ie from probe response */
4123 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4126 * remove p2p ie from probe response.
4127 * the fw reponds to probe requests that don't include
4128 * the p2p ie. probe requests with p2p ie will be passed,
4129 * and will be responded by the supplicant (the spec
4130 * forbids including the p2p ie when responding to probe
4131 * requests that didn't include it).
4133 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4134 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4136 hdr = (struct ieee80211_hdr *) beacon->data;
4137 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4138 IEEE80211_STYPE_PROBE_RESP);
4140 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4145 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4146 CMD_TEMPL_PROBE_RESPONSE,
4151 dev_kfree_skb(beacon);
4159 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4160 struct ieee80211_vif *vif,
4161 struct ieee80211_bss_conf *bss_conf,
4164 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4165 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4168 if (changed & BSS_CHANGED_BEACON_INT) {
4169 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4170 bss_conf->beacon_int);
4172 wlvif->beacon_int = bss_conf->beacon_int;
4175 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4176 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4178 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4181 if (changed & BSS_CHANGED_BEACON) {
4182 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4186 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4188 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4195 wl1271_error("beacon info change failed: %d", ret);
4199 /* AP mode changes */
4200 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4201 struct ieee80211_vif *vif,
4202 struct ieee80211_bss_conf *bss_conf,
4205 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4208 if (changed & BSS_CHANGED_BASIC_RATES) {
4209 u32 rates = bss_conf->basic_rates;
4211 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4213 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4214 wlvif->basic_rate_set);
4216 ret = wl1271_init_ap_rates(wl, wlvif);
4218 wl1271_error("AP rate policy change failed %d", ret);
4222 ret = wl1271_ap_init_templates(wl, vif);
4226 /* No need to set probe resp template for mesh */
4227 if (!ieee80211_vif_is_mesh(vif)) {
4228 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4235 ret = wlcore_set_beacon_template(wl, vif, true);
4240 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4244 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4245 if (bss_conf->enable_beacon) {
4246 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4247 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4251 ret = wl1271_ap_init_hwenc(wl, wlvif);
4255 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4256 wl1271_debug(DEBUG_AP, "started AP");
4259 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4261 * AP might be in ROC in case we have just
4262 * sent auth reply. handle it.
4264 if (test_bit(wlvif->role_id, wl->roc_map))
4265 wl12xx_croc(wl, wlvif->role_id);
4267 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4271 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4272 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4274 wl1271_debug(DEBUG_AP, "stopped AP");
4279 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4283 /* Handle HT information change */
4284 if ((changed & BSS_CHANGED_HT) &&
4285 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4286 ret = wl1271_acx_set_ht_information(wl, wlvif,
4287 bss_conf->ht_operation_mode);
4289 wl1271_warning("Set ht information failed %d", ret);
4298 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4299 struct ieee80211_bss_conf *bss_conf,
4305 wl1271_debug(DEBUG_MAC80211,
4306 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4307 bss_conf->bssid, bss_conf->aid,
4308 bss_conf->beacon_int,
4309 bss_conf->basic_rates, sta_rate_set);
4311 wlvif->beacon_int = bss_conf->beacon_int;
4312 rates = bss_conf->basic_rates;
4313 wlvif->basic_rate_set =
4314 wl1271_tx_enabled_rates_get(wl, rates,
4317 wl1271_tx_min_rate_get(wl,
4318 wlvif->basic_rate_set);
4322 wl1271_tx_enabled_rates_get(wl,
4326 /* we only support sched_scan while not connected */
4327 if (wl->sched_vif == wlvif)
4328 wl->ops->sched_scan_stop(wl, wlvif);
4330 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4334 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4338 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4342 wlcore_set_ssid(wl, wlvif);
4344 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4349 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4353 /* revert back to minimum rates for the current band */
4354 wl1271_set_band_rate(wl, wlvif);
4355 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4357 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4361 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4362 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4363 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4368 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4371 /* STA/IBSS mode changes */
4372 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4373 struct ieee80211_vif *vif,
4374 struct ieee80211_bss_conf *bss_conf,
4377 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4378 bool do_join = false;
4379 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4380 bool ibss_joined = false;
4381 u32 sta_rate_set = 0;
4383 struct ieee80211_sta *sta;
4384 bool sta_exists = false;
4385 struct ieee80211_sta_ht_cap sta_ht_cap;
4388 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4394 if (changed & BSS_CHANGED_IBSS) {
4395 if (bss_conf->ibss_joined) {
4396 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4399 wlcore_unset_assoc(wl, wlvif);
4400 wl12xx_cmd_role_stop_sta(wl, wlvif);
4404 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4407 /* Need to update the SSID (for filtering etc) */
4408 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4411 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4412 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4413 bss_conf->enable_beacon ? "enabled" : "disabled");
4418 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4419 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4421 if (changed & BSS_CHANGED_CQM) {
4422 bool enable = false;
4423 if (bss_conf->cqm_rssi_thold)
4425 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4426 bss_conf->cqm_rssi_thold,
4427 bss_conf->cqm_rssi_hyst);
4430 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4433 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4434 BSS_CHANGED_ASSOC)) {
4436 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4438 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4440 /* save the supp_rates of the ap */
4441 sta_rate_set = sta->supp_rates[wlvif->band];
4442 if (sta->ht_cap.ht_supported)
4444 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4445 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4446 sta_ht_cap = sta->ht_cap;
4453 if (changed & BSS_CHANGED_BSSID) {
4454 if (!is_zero_ether_addr(bss_conf->bssid)) {
4455 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4460 /* Need to update the BSSID (for filtering etc) */
4463 ret = wlcore_clear_bssid(wl, wlvif);
4469 if (changed & BSS_CHANGED_IBSS) {
4470 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4471 bss_conf->ibss_joined);
4473 if (bss_conf->ibss_joined) {
4474 u32 rates = bss_conf->basic_rates;
4475 wlvif->basic_rate_set =
4476 wl1271_tx_enabled_rates_get(wl, rates,
4479 wl1271_tx_min_rate_get(wl,
4480 wlvif->basic_rate_set);
4482 /* by default, use 11b + OFDM rates */
4483 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4484 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4490 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4491 /* enable beacon filtering */
4492 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4497 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4502 ret = wlcore_join(wl, wlvif);
4504 wl1271_warning("cmd join failed %d", ret);
4509 if (changed & BSS_CHANGED_ASSOC) {
4510 if (bss_conf->assoc) {
4511 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4516 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4517 wl12xx_set_authorized(wl, wlvif);
4519 wlcore_unset_assoc(wl, wlvif);
4523 if (changed & BSS_CHANGED_PS) {
4524 if ((bss_conf->ps) &&
4525 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4526 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4530 if (wl->conf.conn.forced_ps) {
4531 ps_mode = STATION_POWER_SAVE_MODE;
4532 ps_mode_str = "forced";
4534 ps_mode = STATION_AUTO_PS_MODE;
4535 ps_mode_str = "auto";
4538 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4540 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4542 wl1271_warning("enter %s ps failed %d",
4544 } else if (!bss_conf->ps &&
4545 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4546 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4548 ret = wl1271_ps_set_mode(wl, wlvif,
4549 STATION_ACTIVE_MODE);
4551 wl1271_warning("exit auto ps failed %d", ret);
4555 /* Handle new association with HT. Do this after join. */
4558 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4560 ret = wlcore_hw_set_peer_cap(wl,
4566 wl1271_warning("Set ht cap failed %d", ret);
4572 ret = wl1271_acx_set_ht_information(wl, wlvif,
4573 bss_conf->ht_operation_mode);
4575 wl1271_warning("Set ht information failed %d",
4582 /* Handle arp filtering. Done after join. */
4583 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4584 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4585 __be32 addr = bss_conf->arp_addr_list[0];
4586 wlvif->sta.qos = bss_conf->qos;
4587 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4589 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4590 wlvif->ip_addr = addr;
4592 * The template should have been configured only upon
4593 * association. however, it seems that the correct ip
4594 * isn't being set (when sending), so we have to
4595 * reconfigure the template upon every ip change.
4597 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4599 wl1271_warning("build arp rsp failed: %d", ret);
4603 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4604 (ACX_ARP_FILTER_ARP_FILTERING |
4605 ACX_ARP_FILTER_AUTO_ARP),
4609 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4620 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4621 struct ieee80211_vif *vif,
4622 struct ieee80211_bss_conf *bss_conf,
4625 struct wl1271 *wl = hw->priv;
4626 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4627 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4630 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4631 wlvif->role_id, (int)changed);
4634 * make sure to cancel pending disconnections if our association
4637 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4638 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4640 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4641 !bss_conf->enable_beacon)
4642 wl1271_tx_flush(wl);
4644 mutex_lock(&wl->mutex);
4646 if (unlikely(wl->state != WLCORE_STATE_ON))
4649 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4652 ret = pm_runtime_get_sync(wl->dev);
4654 pm_runtime_put_noidle(wl->dev);
4658 if ((changed & BSS_CHANGED_TXPOWER) &&
4659 bss_conf->txpower != wlvif->power_level) {
4661 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4665 wlvif->power_level = bss_conf->txpower;
4669 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4671 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4673 pm_runtime_mark_last_busy(wl->dev);
4674 pm_runtime_put_autosuspend(wl->dev);
4677 mutex_unlock(&wl->mutex);
4680 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4681 struct ieee80211_chanctx_conf *ctx)
4683 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4684 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4685 cfg80211_get_chandef_type(&ctx->def));
4689 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4690 struct ieee80211_chanctx_conf *ctx)
4692 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4693 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4694 cfg80211_get_chandef_type(&ctx->def));
4697 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4698 struct ieee80211_chanctx_conf *ctx,
4701 struct wl1271 *wl = hw->priv;
4702 struct wl12xx_vif *wlvif;
4704 int channel = ieee80211_frequency_to_channel(
4705 ctx->def.chan->center_freq);
4707 wl1271_debug(DEBUG_MAC80211,
4708 "mac80211 change chanctx %d (type %d) changed 0x%x",
4709 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4711 mutex_lock(&wl->mutex);
4713 ret = pm_runtime_get_sync(wl->dev);
4715 pm_runtime_put_noidle(wl->dev);
4719 wl12xx_for_each_wlvif(wl, wlvif) {
4720 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4723 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4729 /* start radar if needed */
4730 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4731 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4732 ctx->radar_enabled && !wlvif->radar_enabled &&
4733 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4734 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4735 wlcore_hw_set_cac(wl, wlvif, true);
4736 wlvif->radar_enabled = true;
4740 pm_runtime_mark_last_busy(wl->dev);
4741 pm_runtime_put_autosuspend(wl->dev);
4743 mutex_unlock(&wl->mutex);
4746 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4747 struct ieee80211_vif *vif,
4748 struct ieee80211_chanctx_conf *ctx)
4750 struct wl1271 *wl = hw->priv;
4751 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4752 int channel = ieee80211_frequency_to_channel(
4753 ctx->def.chan->center_freq);
4756 wl1271_debug(DEBUG_MAC80211,
4757 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4758 wlvif->role_id, channel,
4759 cfg80211_get_chandef_type(&ctx->def),
4760 ctx->radar_enabled, ctx->def.chan->dfs_state);
4762 mutex_lock(&wl->mutex);
4764 if (unlikely(wl->state != WLCORE_STATE_ON))
4767 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4770 ret = pm_runtime_get_sync(wl->dev);
4772 pm_runtime_put_noidle(wl->dev);
4776 wlvif->band = ctx->def.chan->band;
4777 wlvif->channel = channel;
4778 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4780 /* update default rates according to the band */
4781 wl1271_set_band_rate(wl, wlvif);
4783 if (ctx->radar_enabled &&
4784 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4785 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4786 wlcore_hw_set_cac(wl, wlvif, true);
4787 wlvif->radar_enabled = true;
4790 pm_runtime_mark_last_busy(wl->dev);
4791 pm_runtime_put_autosuspend(wl->dev);
4793 mutex_unlock(&wl->mutex);
4798 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4799 struct ieee80211_vif *vif,
4800 struct ieee80211_chanctx_conf *ctx)
4802 struct wl1271 *wl = hw->priv;
4803 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4806 wl1271_debug(DEBUG_MAC80211,
4807 "mac80211 unassign chanctx (role %d) %d (type %d)",
4809 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4810 cfg80211_get_chandef_type(&ctx->def));
4812 wl1271_tx_flush(wl);
4814 mutex_lock(&wl->mutex);
4816 if (unlikely(wl->state != WLCORE_STATE_ON))
4819 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4822 ret = pm_runtime_get_sync(wl->dev);
4824 pm_runtime_put_noidle(wl->dev);
4828 if (wlvif->radar_enabled) {
4829 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4830 wlcore_hw_set_cac(wl, wlvif, false);
4831 wlvif->radar_enabled = false;
4834 pm_runtime_mark_last_busy(wl->dev);
4835 pm_runtime_put_autosuspend(wl->dev);
4837 mutex_unlock(&wl->mutex);
4840 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4841 struct wl12xx_vif *wlvif,
4842 struct ieee80211_chanctx_conf *new_ctx)
4844 int channel = ieee80211_frequency_to_channel(
4845 new_ctx->def.chan->center_freq);
4847 wl1271_debug(DEBUG_MAC80211,
4848 "switch vif (role %d) %d -> %d chan_type: %d",
4849 wlvif->role_id, wlvif->channel, channel,
4850 cfg80211_get_chandef_type(&new_ctx->def));
4852 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4855 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4857 if (wlvif->radar_enabled) {
4858 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4859 wlcore_hw_set_cac(wl, wlvif, false);
4860 wlvif->radar_enabled = false;
4863 wlvif->band = new_ctx->def.chan->band;
4864 wlvif->channel = channel;
4865 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4867 /* start radar if needed */
4868 if (new_ctx->radar_enabled) {
4869 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4870 wlcore_hw_set_cac(wl, wlvif, true);
4871 wlvif->radar_enabled = true;
4878 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4879 struct ieee80211_vif_chanctx_switch *vifs,
4881 enum ieee80211_chanctx_switch_mode mode)
4883 struct wl1271 *wl = hw->priv;
4886 wl1271_debug(DEBUG_MAC80211,
4887 "mac80211 switch chanctx n_vifs %d mode %d",
4890 mutex_lock(&wl->mutex);
4892 ret = pm_runtime_get_sync(wl->dev);
4894 pm_runtime_put_noidle(wl->dev);
4898 for (i = 0; i < n_vifs; i++) {
4899 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4901 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4906 pm_runtime_mark_last_busy(wl->dev);
4907 pm_runtime_put_autosuspend(wl->dev);
4909 mutex_unlock(&wl->mutex);
4914 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4915 struct ieee80211_vif *vif, u16 queue,
4916 const struct ieee80211_tx_queue_params *params)
4918 struct wl1271 *wl = hw->priv;
4919 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4923 if (wlcore_is_p2p_mgmt(wlvif))
4926 mutex_lock(&wl->mutex);
4928 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4931 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4933 ps_scheme = CONF_PS_SCHEME_LEGACY;
4935 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4938 ret = pm_runtime_get_sync(wl->dev);
4940 pm_runtime_put_noidle(wl->dev);
4945 * the txop is confed in units of 32us by the mac80211,
4948 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4949 params->cw_min, params->cw_max,
4950 params->aifs, params->txop << 5);
4954 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4955 CONF_CHANNEL_TYPE_EDCF,
4956 wl1271_tx_get_queue(queue),
4957 ps_scheme, CONF_ACK_POLICY_LEGACY,
4961 pm_runtime_mark_last_busy(wl->dev);
4962 pm_runtime_put_autosuspend(wl->dev);
4965 mutex_unlock(&wl->mutex);
4970 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4971 struct ieee80211_vif *vif)
4974 struct wl1271 *wl = hw->priv;
4975 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4976 u64 mactime = ULLONG_MAX;
4979 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4981 mutex_lock(&wl->mutex);
4983 if (unlikely(wl->state != WLCORE_STATE_ON))
4986 ret = pm_runtime_get_sync(wl->dev);
4988 pm_runtime_put_noidle(wl->dev);
4992 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4997 pm_runtime_mark_last_busy(wl->dev);
4998 pm_runtime_put_autosuspend(wl->dev);
5001 mutex_unlock(&wl->mutex);
5005 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5006 struct survey_info *survey)
5008 struct ieee80211_conf *conf = &hw->conf;
5013 survey->channel = conf->chandef.chan;
5018 static int wl1271_allocate_sta(struct wl1271 *wl,
5019 struct wl12xx_vif *wlvif,
5020 struct ieee80211_sta *sta)
5022 struct wl1271_station *wl_sta;
5026 if (wl->active_sta_count >= wl->max_ap_stations) {
5027 wl1271_warning("could not allocate HLID - too much stations");
5031 wl_sta = (struct wl1271_station *)sta->drv_priv;
5032 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5034 wl1271_warning("could not allocate HLID - too many links");
5038 /* use the previous security seq, if this is a recovery/resume */
5039 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5041 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5042 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5043 wl->active_sta_count++;
5047 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5049 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5052 clear_bit(hlid, wlvif->ap.sta_hlid_map);
5053 __clear_bit(hlid, &wl->ap_ps_map);
5054 __clear_bit(hlid, &wl->ap_fw_ps_map);
5057 * save the last used PN in the private part of iee80211_sta,
5058 * in case of recovery/suspend
5060 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5062 wl12xx_free_link(wl, wlvif, &hlid);
5063 wl->active_sta_count--;
5066 * rearm the tx watchdog when the last STA is freed - give the FW a
5067 * chance to return STA-buffered packets before complaining.
5069 if (wl->active_sta_count == 0)
5070 wl12xx_rearm_tx_watchdog_locked(wl);
5073 static int wl12xx_sta_add(struct wl1271 *wl,
5074 struct wl12xx_vif *wlvif,
5075 struct ieee80211_sta *sta)
5077 struct wl1271_station *wl_sta;
5081 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5083 ret = wl1271_allocate_sta(wl, wlvif, sta);
5087 wl_sta = (struct wl1271_station *)sta->drv_priv;
5088 hlid = wl_sta->hlid;
5090 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5092 wl1271_free_sta(wl, wlvif, hlid);
5097 static int wl12xx_sta_remove(struct wl1271 *wl,
5098 struct wl12xx_vif *wlvif,
5099 struct ieee80211_sta *sta)
5101 struct wl1271_station *wl_sta;
5104 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5106 wl_sta = (struct wl1271_station *)sta->drv_priv;
5108 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5111 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5115 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5119 static void wlcore_roc_if_possible(struct wl1271 *wl,
5120 struct wl12xx_vif *wlvif)
5122 if (find_first_bit(wl->roc_map,
5123 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5126 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5129 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5133 * when wl_sta is NULL, we treat this call as if coming from a
5134 * pending auth reply.
5135 * wl->mutex must be taken and the FW must be awake when the call
5138 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5139 struct wl1271_station *wl_sta, bool in_conn)
5142 if (WARN_ON(wl_sta && wl_sta->in_connection))
5145 if (!wlvif->ap_pending_auth_reply &&
5146 !wlvif->inconn_count)
5147 wlcore_roc_if_possible(wl, wlvif);
5150 wl_sta->in_connection = true;
5151 wlvif->inconn_count++;
5153 wlvif->ap_pending_auth_reply = true;
5156 if (wl_sta && !wl_sta->in_connection)
5159 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5162 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5166 wl_sta->in_connection = false;
5167 wlvif->inconn_count--;
5169 wlvif->ap_pending_auth_reply = false;
5172 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5173 test_bit(wlvif->role_id, wl->roc_map))
5174 wl12xx_croc(wl, wlvif->role_id);
5178 static int wl12xx_update_sta_state(struct wl1271 *wl,
5179 struct wl12xx_vif *wlvif,
5180 struct ieee80211_sta *sta,
5181 enum ieee80211_sta_state old_state,
5182 enum ieee80211_sta_state new_state)
5184 struct wl1271_station *wl_sta;
5185 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5186 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5189 wl_sta = (struct wl1271_station *)sta->drv_priv;
5191 /* Add station (AP mode) */
5193 old_state == IEEE80211_STA_NOTEXIST &&
5194 new_state == IEEE80211_STA_NONE) {
5195 ret = wl12xx_sta_add(wl, wlvif, sta);
5199 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5202 /* Remove station (AP mode) */
5204 old_state == IEEE80211_STA_NONE &&
5205 new_state == IEEE80211_STA_NOTEXIST) {
5207 wl12xx_sta_remove(wl, wlvif, sta);
5209 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5212 /* Authorize station (AP mode) */
5214 new_state == IEEE80211_STA_AUTHORIZED) {
5215 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5219 /* reconfigure rates */
5220 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5224 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5229 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5232 /* Authorize station */
5234 new_state == IEEE80211_STA_AUTHORIZED) {
5235 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5236 ret = wl12xx_set_authorized(wl, wlvif);
5242 old_state == IEEE80211_STA_AUTHORIZED &&
5243 new_state == IEEE80211_STA_ASSOC) {
5244 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5245 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5248 /* save seq number on disassoc (suspend) */
5250 old_state == IEEE80211_STA_ASSOC &&
5251 new_state == IEEE80211_STA_AUTH) {
5252 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5253 wlvif->total_freed_pkts = 0;
5256 /* restore seq number on assoc (resume) */
5258 old_state == IEEE80211_STA_AUTH &&
5259 new_state == IEEE80211_STA_ASSOC) {
5260 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5263 /* clear ROCs on failure or authorization */
5265 (new_state == IEEE80211_STA_AUTHORIZED ||
5266 new_state == IEEE80211_STA_NOTEXIST)) {
5267 if (test_bit(wlvif->role_id, wl->roc_map))
5268 wl12xx_croc(wl, wlvif->role_id);
5272 old_state == IEEE80211_STA_NOTEXIST &&
5273 new_state == IEEE80211_STA_NONE) {
5274 if (find_first_bit(wl->roc_map,
5275 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5276 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5277 wl12xx_roc(wl, wlvif, wlvif->role_id,
5278 wlvif->band, wlvif->channel);
5284 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5285 struct ieee80211_vif *vif,
5286 struct ieee80211_sta *sta,
5287 enum ieee80211_sta_state old_state,
5288 enum ieee80211_sta_state new_state)
5290 struct wl1271 *wl = hw->priv;
5291 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5294 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5295 sta->aid, old_state, new_state);
5297 mutex_lock(&wl->mutex);
5299 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5304 ret = pm_runtime_get_sync(wl->dev);
5306 pm_runtime_put_noidle(wl->dev);
5310 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5312 pm_runtime_mark_last_busy(wl->dev);
5313 pm_runtime_put_autosuspend(wl->dev);
5315 mutex_unlock(&wl->mutex);
5316 if (new_state < old_state)
5321 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5322 struct ieee80211_vif *vif,
5323 struct ieee80211_ampdu_params *params)
5325 struct wl1271 *wl = hw->priv;
5326 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5328 u8 hlid, *ba_bitmap;
5329 struct ieee80211_sta *sta = params->sta;
5330 enum ieee80211_ampdu_mlme_action action = params->action;
5331 u16 tid = params->tid;
5332 u16 *ssn = ¶ms->ssn;
5334 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5337 /* sanity check - the fields in FW are only 8bits wide */
5338 if (WARN_ON(tid > 0xFF))
5341 mutex_lock(&wl->mutex);
5343 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5348 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5349 hlid = wlvif->sta.hlid;
5350 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5351 struct wl1271_station *wl_sta;
5353 wl_sta = (struct wl1271_station *)sta->drv_priv;
5354 hlid = wl_sta->hlid;
5360 ba_bitmap = &wl->links[hlid].ba_bitmap;
5362 ret = pm_runtime_get_sync(wl->dev);
5364 pm_runtime_put_noidle(wl->dev);
5368 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5372 case IEEE80211_AMPDU_RX_START:
5373 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5378 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5380 wl1271_error("exceeded max RX BA sessions");
5384 if (*ba_bitmap & BIT(tid)) {
5386 wl1271_error("cannot enable RX BA session on active "
5391 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5396 *ba_bitmap |= BIT(tid);
5397 wl->ba_rx_session_count++;
5401 case IEEE80211_AMPDU_RX_STOP:
5402 if (!(*ba_bitmap & BIT(tid))) {
5404 * this happens on reconfig - so only output a debug
5405 * message for now, and don't fail the function.
5407 wl1271_debug(DEBUG_MAC80211,
5408 "no active RX BA session on tid: %d",
5414 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5417 *ba_bitmap &= ~BIT(tid);
5418 wl->ba_rx_session_count--;
5423 * The BA initiator session management in FW independently.
5424 * Falling break here on purpose for all TX APDU commands.
5426 case IEEE80211_AMPDU_TX_START:
5427 case IEEE80211_AMPDU_TX_STOP_CONT:
5428 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5429 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5430 case IEEE80211_AMPDU_TX_OPERATIONAL:
5435 wl1271_error("Incorrect ampdu action id=%x\n", action);
5439 pm_runtime_mark_last_busy(wl->dev);
5440 pm_runtime_put_autosuspend(wl->dev);
5443 mutex_unlock(&wl->mutex);
5448 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5449 struct ieee80211_vif *vif,
5450 const struct cfg80211_bitrate_mask *mask)
5452 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5453 struct wl1271 *wl = hw->priv;
5456 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5457 mask->control[NL80211_BAND_2GHZ].legacy,
5458 mask->control[NL80211_BAND_5GHZ].legacy);
5460 mutex_lock(&wl->mutex);
5462 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5463 wlvif->bitrate_masks[i] =
5464 wl1271_tx_enabled_rates_get(wl,
5465 mask->control[i].legacy,
5468 if (unlikely(wl->state != WLCORE_STATE_ON))
5471 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5472 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5474 ret = pm_runtime_get_sync(wl->dev);
5476 pm_runtime_put_noidle(wl->dev);
5480 wl1271_set_band_rate(wl, wlvif);
5482 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5483 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5485 pm_runtime_mark_last_busy(wl->dev);
5486 pm_runtime_put_autosuspend(wl->dev);
5489 mutex_unlock(&wl->mutex);
5494 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5495 struct ieee80211_vif *vif,
5496 struct ieee80211_channel_switch *ch_switch)
5498 struct wl1271 *wl = hw->priv;
5499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5502 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5504 wl1271_tx_flush(wl);
5506 mutex_lock(&wl->mutex);
5508 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5509 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5510 ieee80211_chswitch_done(vif, false);
5512 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5516 ret = pm_runtime_get_sync(wl->dev);
5518 pm_runtime_put_noidle(wl->dev);
5522 /* TODO: change mac80211 to pass vif as param */
5524 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5525 unsigned long delay_usec;
5527 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5531 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5533 /* indicate failure 5 seconds after channel switch time */
5534 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5536 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5537 usecs_to_jiffies(delay_usec) +
5538 msecs_to_jiffies(5000));
5542 pm_runtime_mark_last_busy(wl->dev);
5543 pm_runtime_put_autosuspend(wl->dev);
5546 mutex_unlock(&wl->mutex);
5549 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5550 struct wl12xx_vif *wlvif,
5553 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5554 struct sk_buff *beacon =
5555 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5560 return cfg80211_find_ie(eid,
5561 beacon->data + ieoffset,
5562 beacon->len - ieoffset);
5565 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5569 const struct ieee80211_channel_sw_ie *ie_csa;
5571 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5575 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5576 *csa_count = ie_csa->count;
5581 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5582 struct ieee80211_vif *vif,
5583 struct cfg80211_chan_def *chandef)
5585 struct wl1271 *wl = hw->priv;
5586 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5587 struct ieee80211_channel_switch ch_switch = {
5589 .chandef = *chandef,
5593 wl1271_debug(DEBUG_MAC80211,
5594 "mac80211 channel switch beacon (role %d)",
5597 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5599 wl1271_error("error getting beacon (for CSA counter)");
5603 mutex_lock(&wl->mutex);
5605 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5610 ret = pm_runtime_get_sync(wl->dev);
5612 pm_runtime_put_noidle(wl->dev);
5616 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5620 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5623 pm_runtime_mark_last_busy(wl->dev);
5624 pm_runtime_put_autosuspend(wl->dev);
5626 mutex_unlock(&wl->mutex);
5629 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5630 u32 queues, bool drop)
5632 struct wl1271 *wl = hw->priv;
5634 wl1271_tx_flush(wl);
5637 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5638 struct ieee80211_vif *vif,
5639 struct ieee80211_channel *chan,
5641 enum ieee80211_roc_type type)
5643 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5644 struct wl1271 *wl = hw->priv;
5645 int channel, active_roc, ret = 0;
5647 channel = ieee80211_frequency_to_channel(chan->center_freq);
5649 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5650 channel, wlvif->role_id);
5652 mutex_lock(&wl->mutex);
5654 if (unlikely(wl->state != WLCORE_STATE_ON))
5657 /* return EBUSY if we can't ROC right now */
5658 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5659 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5660 wl1271_warning("active roc on role %d", active_roc);
5665 ret = pm_runtime_get_sync(wl->dev);
5667 pm_runtime_put_noidle(wl->dev);
5671 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5676 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5677 msecs_to_jiffies(duration));
5679 pm_runtime_mark_last_busy(wl->dev);
5680 pm_runtime_put_autosuspend(wl->dev);
5682 mutex_unlock(&wl->mutex);
5686 static int __wlcore_roc_completed(struct wl1271 *wl)
5688 struct wl12xx_vif *wlvif;
5691 /* already completed */
5692 if (unlikely(!wl->roc_vif))
5695 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5697 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5700 ret = wl12xx_stop_dev(wl, wlvif);
5709 static int wlcore_roc_completed(struct wl1271 *wl)
5713 wl1271_debug(DEBUG_MAC80211, "roc complete");
5715 mutex_lock(&wl->mutex);
5717 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5722 ret = pm_runtime_get_sync(wl->dev);
5724 pm_runtime_put_noidle(wl->dev);
5728 ret = __wlcore_roc_completed(wl);
5730 pm_runtime_mark_last_busy(wl->dev);
5731 pm_runtime_put_autosuspend(wl->dev);
5733 mutex_unlock(&wl->mutex);
5738 static void wlcore_roc_complete_work(struct work_struct *work)
5740 struct delayed_work *dwork;
5744 dwork = to_delayed_work(work);
5745 wl = container_of(dwork, struct wl1271, roc_complete_work);
5747 ret = wlcore_roc_completed(wl);
5749 ieee80211_remain_on_channel_expired(wl->hw);
5752 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5754 struct wl1271 *wl = hw->priv;
5756 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5759 wl1271_tx_flush(wl);
5762 * we can't just flush_work here, because it might deadlock
5763 * (as we might get called from the same workqueue)
5765 cancel_delayed_work_sync(&wl->roc_complete_work);
5766 wlcore_roc_completed(wl);
5771 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5772 struct ieee80211_vif *vif,
5773 struct ieee80211_sta *sta,
5776 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5778 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5780 if (!(changed & IEEE80211_RC_BW_CHANGED))
5783 /* this callback is atomic, so schedule a new work */
5784 wlvif->rc_update_bw = sta->bandwidth;
5785 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5786 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5789 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5790 struct ieee80211_vif *vif,
5791 struct ieee80211_sta *sta,
5792 struct station_info *sinfo)
5794 struct wl1271 *wl = hw->priv;
5795 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5799 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5801 mutex_lock(&wl->mutex);
5803 if (unlikely(wl->state != WLCORE_STATE_ON))
5806 ret = pm_runtime_get_sync(wl->dev);
5808 pm_runtime_put_noidle(wl->dev);
5812 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5816 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5817 sinfo->signal = rssi_dbm;
5820 pm_runtime_mark_last_busy(wl->dev);
5821 pm_runtime_put_autosuspend(wl->dev);
5824 mutex_unlock(&wl->mutex);
5827 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5828 struct ieee80211_sta *sta)
5830 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5831 struct wl1271 *wl = hw->priv;
5832 u8 hlid = wl_sta->hlid;
5834 /* return in units of Kbps */
5835 return (wl->links[hlid].fw_rate_mbps * 1000);
5838 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5840 struct wl1271 *wl = hw->priv;
5843 mutex_lock(&wl->mutex);
5845 if (unlikely(wl->state != WLCORE_STATE_ON))
5848 /* packets are considered pending if in the TX queue or the FW */
5849 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5851 mutex_unlock(&wl->mutex);
5856 /* can't be const, mac80211 writes to this */
5857 static struct ieee80211_rate wl1271_rates[] = {
5859 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5860 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5862 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5863 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5864 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5866 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5867 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5868 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5870 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5871 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5872 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5874 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5875 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5877 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5878 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5880 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5881 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5883 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5884 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5886 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5887 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5889 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5890 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5892 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5893 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5895 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5896 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5899 /* can't be const, mac80211 writes to this */
5900 static struct ieee80211_channel wl1271_channels[] = {
5901 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5902 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5903 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5904 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5905 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5906 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5907 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5908 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5909 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5910 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5911 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5912 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5913 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5914 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5917 /* can't be const, mac80211 writes to this */
5918 static struct ieee80211_supported_band wl1271_band_2ghz = {
5919 .channels = wl1271_channels,
5920 .n_channels = ARRAY_SIZE(wl1271_channels),
5921 .bitrates = wl1271_rates,
5922 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5925 /* 5 GHz data rates for WL1273 */
5926 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5928 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5929 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5931 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5932 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5934 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5935 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5937 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5938 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5940 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5941 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5943 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5944 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5946 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5947 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5949 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5950 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5953 /* 5 GHz band channels for WL1273 */
5954 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5955 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5956 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5957 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5958 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5959 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5960 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5961 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5962 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5963 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5964 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5965 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5966 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5967 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5968 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5969 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5970 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5971 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5972 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5973 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5974 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5975 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5976 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5977 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5978 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5979 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5980 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5981 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5982 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5983 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5984 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5985 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5988 static struct ieee80211_supported_band wl1271_band_5ghz = {
5989 .channels = wl1271_channels_5ghz,
5990 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5991 .bitrates = wl1271_rates_5ghz,
5992 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5995 static const struct ieee80211_ops wl1271_ops = {
5996 .start = wl1271_op_start,
5997 .stop = wlcore_op_stop,
5998 .add_interface = wl1271_op_add_interface,
5999 .remove_interface = wl1271_op_remove_interface,
6000 .change_interface = wl12xx_op_change_interface,
6002 .suspend = wl1271_op_suspend,
6003 .resume = wl1271_op_resume,
6005 .config = wl1271_op_config,
6006 .prepare_multicast = wl1271_op_prepare_multicast,
6007 .configure_filter = wl1271_op_configure_filter,
6009 .set_key = wlcore_op_set_key,
6010 .hw_scan = wl1271_op_hw_scan,
6011 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
6012 .sched_scan_start = wl1271_op_sched_scan_start,
6013 .sched_scan_stop = wl1271_op_sched_scan_stop,
6014 .bss_info_changed = wl1271_op_bss_info_changed,
6015 .set_frag_threshold = wl1271_op_set_frag_threshold,
6016 .set_rts_threshold = wl1271_op_set_rts_threshold,
6017 .conf_tx = wl1271_op_conf_tx,
6018 .get_tsf = wl1271_op_get_tsf,
6019 .get_survey = wl1271_op_get_survey,
6020 .sta_state = wl12xx_op_sta_state,
6021 .ampdu_action = wl1271_op_ampdu_action,
6022 .tx_frames_pending = wl1271_tx_frames_pending,
6023 .set_bitrate_mask = wl12xx_set_bitrate_mask,
6024 .set_default_unicast_key = wl1271_op_set_default_key_idx,
6025 .channel_switch = wl12xx_op_channel_switch,
6026 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
6027 .flush = wlcore_op_flush,
6028 .remain_on_channel = wlcore_op_remain_on_channel,
6029 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6030 .add_chanctx = wlcore_op_add_chanctx,
6031 .remove_chanctx = wlcore_op_remove_chanctx,
6032 .change_chanctx = wlcore_op_change_chanctx,
6033 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6034 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6035 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6036 .sta_rc_update = wlcore_op_sta_rc_update,
6037 .sta_statistics = wlcore_op_sta_statistics,
6038 .get_expected_throughput = wlcore_op_get_expected_throughput,
6039 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6043 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6049 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6050 wl1271_error("Illegal RX rate from HW: %d", rate);
6054 idx = wl->band_rate_to_idx[band][rate];
6055 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6056 wl1271_error("Unsupported RX rate from HW: %d", rate);
6063 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6067 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6070 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6071 wl1271_warning("NIC part of the MAC address wraps around!");
6073 for (i = 0; i < wl->num_mac_addr; i++) {
6074 wl->addresses[i].addr[0] = (u8)(oui >> 16);
6075 wl->addresses[i].addr[1] = (u8)(oui >> 8);
6076 wl->addresses[i].addr[2] = (u8) oui;
6077 wl->addresses[i].addr[3] = (u8)(nic >> 16);
6078 wl->addresses[i].addr[4] = (u8)(nic >> 8);
6079 wl->addresses[i].addr[5] = (u8) nic;
6083 /* we may be one address short at the most */
6084 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6087 * turn on the LAA bit in the first address and use it as
6090 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6091 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6092 memcpy(&wl->addresses[idx], &wl->addresses[0],
6093 sizeof(wl->addresses[0]));
6095 wl->addresses[idx].addr[0] |= BIT(1);
6098 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6099 wl->hw->wiphy->addresses = wl->addresses;
6102 static int wl12xx_get_hw_info(struct wl1271 *wl)
6106 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6110 wl->fuse_oui_addr = 0;
6111 wl->fuse_nic_addr = 0;
6113 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6117 if (wl->ops->get_mac)
6118 ret = wl->ops->get_mac(wl);
6124 static int wl1271_register_hw(struct wl1271 *wl)
6127 u32 oui_addr = 0, nic_addr = 0;
6128 struct platform_device *pdev = wl->pdev;
6129 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6131 if (wl->mac80211_registered)
6134 if (wl->nvs_len >= 12) {
6135 /* NOTE: The wl->nvs->nvs element must be first, in
6136 * order to simplify the casting, we assume it is at
6137 * the beginning of the wl->nvs structure.
6139 u8 *nvs_ptr = (u8 *)wl->nvs;
6142 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6144 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6147 /* if the MAC address is zeroed in the NVS derive from fuse */
6148 if (oui_addr == 0 && nic_addr == 0) {
6149 oui_addr = wl->fuse_oui_addr;
6150 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6151 nic_addr = wl->fuse_nic_addr + 1;
6154 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6155 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6156 if (!strcmp(pdev_data->family->name, "wl18xx")) {
6157 wl1271_warning("This default nvs file can be removed from the file system");
6159 wl1271_warning("Your device performance is not optimized.");
6160 wl1271_warning("Please use the calibrator tool to configure your device.");
6163 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6164 wl1271_warning("Fuse mac address is zero. using random mac");
6165 /* Use TI oui and a random nic */
6166 oui_addr = WLCORE_TI_OUI_ADDRESS;
6167 nic_addr = get_random_int();
6169 oui_addr = wl->fuse_oui_addr;
6170 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6171 nic_addr = wl->fuse_nic_addr + 1;
6175 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6177 ret = ieee80211_register_hw(wl->hw);
6179 wl1271_error("unable to register mac80211 hw: %d", ret);
6183 wl->mac80211_registered = true;
6185 wl1271_debugfs_init(wl);
6187 wl1271_notice("loaded");
6193 static void wl1271_unregister_hw(struct wl1271 *wl)
6196 wl1271_plt_stop(wl);
6198 ieee80211_unregister_hw(wl->hw);
6199 wl->mac80211_registered = false;
6203 static int wl1271_init_ieee80211(struct wl1271 *wl)
6206 static const u32 cipher_suites[] = {
6207 WLAN_CIPHER_SUITE_WEP40,
6208 WLAN_CIPHER_SUITE_WEP104,
6209 WLAN_CIPHER_SUITE_TKIP,
6210 WLAN_CIPHER_SUITE_CCMP,
6211 WL1271_CIPHER_SUITE_GEM,
6214 /* The tx descriptor buffer */
6215 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6217 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6218 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6221 /* FIXME: find a proper value */
6222 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6224 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6225 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6226 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6227 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6228 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6229 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6230 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6231 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6232 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6233 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6234 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6235 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6236 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6237 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6239 wl->hw->wiphy->cipher_suites = cipher_suites;
6240 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6242 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6243 BIT(NL80211_IFTYPE_AP) |
6244 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6245 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6246 #ifdef CONFIG_MAC80211_MESH
6247 BIT(NL80211_IFTYPE_MESH_POINT) |
6249 BIT(NL80211_IFTYPE_P2P_GO);
6251 wl->hw->wiphy->max_scan_ssids = 1;
6252 wl->hw->wiphy->max_sched_scan_ssids = 16;
6253 wl->hw->wiphy->max_match_sets = 16;
6255 * Maximum length of elements in scanning probe request templates
6256 * should be the maximum length possible for a template, without
6257 * the IEEE80211 header of the template
6259 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6260 sizeof(struct ieee80211_header);
6262 wl->hw->wiphy->max_sched_scan_reqs = 1;
6263 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6264 sizeof(struct ieee80211_header);
6266 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6268 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6269 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6270 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6272 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6274 /* make sure all our channels fit in the scanned_ch bitmask */
6275 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6276 ARRAY_SIZE(wl1271_channels_5ghz) >
6277 WL1271_MAX_CHANNELS);
6279 * clear channel flags from the previous usage
6280 * and restore max_power & max_antenna_gain values.
6282 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6283 wl1271_band_2ghz.channels[i].flags = 0;
6284 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6285 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6288 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6289 wl1271_band_5ghz.channels[i].flags = 0;
6290 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6291 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6295 * We keep local copies of the band structs because we need to
6296 * modify them on a per-device basis.
6298 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6299 sizeof(wl1271_band_2ghz));
6300 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6301 &wl->ht_cap[NL80211_BAND_2GHZ],
6302 sizeof(*wl->ht_cap));
6303 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6304 sizeof(wl1271_band_5ghz));
6305 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6306 &wl->ht_cap[NL80211_BAND_5GHZ],
6307 sizeof(*wl->ht_cap));
6309 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6310 &wl->bands[NL80211_BAND_2GHZ];
6311 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6312 &wl->bands[NL80211_BAND_5GHZ];
6315 * allow 4 queues per mac address we support +
6316 * 1 cab queue per mac + one global offchannel Tx queue
6318 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6320 /* the last queue is the offchannel queue */
6321 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6322 wl->hw->max_rates = 1;
6324 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6326 /* the FW answers probe-requests in AP-mode */
6327 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6328 wl->hw->wiphy->probe_resp_offload =
6329 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6330 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6331 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6333 /* allowed interface combinations */
6334 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6335 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6337 /* register vendor commands */
6338 wlcore_set_vendor_commands(wl->hw->wiphy);
6340 SET_IEEE80211_DEV(wl->hw, wl->dev);
6342 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6343 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6345 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6350 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6353 struct ieee80211_hw *hw;
6358 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6360 wl1271_error("could not alloc ieee80211_hw");
6366 memset(wl, 0, sizeof(*wl));
6368 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6370 wl1271_error("could not alloc wl priv");
6372 goto err_priv_alloc;
6375 INIT_LIST_HEAD(&wl->wlvif_list);
6380 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6381 * we don't allocate any additional resource here, so that's fine.
6383 for (i = 0; i < NUM_TX_QUEUES; i++)
6384 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6385 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6387 skb_queue_head_init(&wl->deferred_rx_queue);
6388 skb_queue_head_init(&wl->deferred_tx_queue);
6390 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6391 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6392 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6393 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6394 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6395 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6397 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6398 if (!wl->freezable_wq) {
6405 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6406 wl->band = NL80211_BAND_2GHZ;
6407 wl->channel_type = NL80211_CHAN_NO_HT;
6409 wl->sg_enabled = true;
6410 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6411 wl->recovery_count = 0;
6414 wl->ap_fw_ps_map = 0;
6416 wl->system_hlid = WL12XX_SYSTEM_HLID;
6417 wl->active_sta_count = 0;
6418 wl->active_link_count = 0;
6421 /* The system link is always allocated */
6422 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6424 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6425 for (i = 0; i < wl->num_tx_desc; i++)
6426 wl->tx_frames[i] = NULL;
6428 spin_lock_init(&wl->wl_lock);
6430 wl->state = WLCORE_STATE_OFF;
6431 wl->fw_type = WL12XX_FW_TYPE_NONE;
6432 mutex_init(&wl->mutex);
6433 mutex_init(&wl->flush_mutex);
6434 init_completion(&wl->nvs_loading_complete);
6436 order = get_order(aggr_buf_size);
6437 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6438 if (!wl->aggr_buf) {
6442 wl->aggr_buf_size = aggr_buf_size;
6444 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6445 if (!wl->dummy_packet) {
6450 /* Allocate one page for the FW log */
6451 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6454 goto err_dummy_packet;
6457 wl->mbox_size = mbox_size;
6458 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6464 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6465 if (!wl->buffer_32) {
6476 free_page((unsigned long)wl->fwlog);
6479 dev_kfree_skb(wl->dummy_packet);
6482 free_pages((unsigned long)wl->aggr_buf, order);
6485 destroy_workqueue(wl->freezable_wq);
6488 wl1271_debugfs_exit(wl);
6492 ieee80211_free_hw(hw);
6496 return ERR_PTR(ret);
6498 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6500 int wlcore_free_hw(struct wl1271 *wl)
6502 /* Unblock any fwlog readers */
6503 mutex_lock(&wl->mutex);
6504 wl->fwlog_size = -1;
6505 mutex_unlock(&wl->mutex);
6507 wlcore_sysfs_free(wl);
6509 kfree(wl->buffer_32);
6511 free_page((unsigned long)wl->fwlog);
6512 dev_kfree_skb(wl->dummy_packet);
6513 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6515 wl1271_debugfs_exit(wl);
6519 wl->fw_type = WL12XX_FW_TYPE_NONE;
6523 kfree(wl->raw_fw_status);
6524 kfree(wl->fw_status);
6525 kfree(wl->tx_res_if);
6526 destroy_workqueue(wl->freezable_wq);
6529 ieee80211_free_hw(wl->hw);
6533 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6536 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6537 .flags = WIPHY_WOWLAN_ANY,
6538 .n_patterns = WL1271_MAX_RX_FILTERS,
6539 .pattern_min_len = 1,
6540 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6544 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6546 return IRQ_WAKE_THREAD;
6549 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6551 struct wl1271 *wl = context;
6552 struct platform_device *pdev = wl->pdev;
6553 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6554 struct resource *res;
6557 irq_handler_t hardirq_fn = NULL;
6560 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6562 wl1271_error("Could not allocate nvs data");
6565 wl->nvs_len = fw->size;
6566 } else if (pdev_data->family->nvs_name) {
6567 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6568 pdev_data->family->nvs_name);
6576 ret = wl->ops->setup(wl);
6580 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6582 /* adjust some runtime configuration parameters */
6583 wlcore_adjust_conf(wl);
6585 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6587 wl1271_error("Could not get IRQ resource");
6591 wl->irq = res->start;
6592 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6593 wl->if_ops = pdev_data->if_ops;
6595 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6596 hardirq_fn = wlcore_hardirq;
6598 wl->irq_flags |= IRQF_ONESHOT;
6600 ret = wl12xx_set_power_on(wl);
6604 ret = wl12xx_get_hw_info(wl);
6606 wl1271_error("couldn't get hw info");
6607 wl1271_power_off(wl);
6611 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6612 wl->irq_flags, pdev->name, wl);
6614 wl1271_error("interrupt configuration failed");
6615 wl1271_power_off(wl);
6620 device_init_wakeup(wl->dev, true);
6622 ret = enable_irq_wake(wl->irq);
6624 wl->irq_wake_enabled = true;
6625 if (pdev_data->pwr_in_suspend)
6626 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6629 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6631 wl->wakeirq = res->start;
6632 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6633 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6635 wl->wakeirq = -ENODEV;
6637 wl->wakeirq = -ENODEV;
6640 disable_irq(wl->irq);
6641 wl1271_power_off(wl);
6643 ret = wl->ops->identify_chip(wl);
6647 ret = wl1271_init_ieee80211(wl);
6651 ret = wl1271_register_hw(wl);
6655 ret = wlcore_sysfs_init(wl);
6659 wl->initialized = true;
6663 wl1271_unregister_hw(wl);
6666 if (wl->wakeirq >= 0)
6667 dev_pm_clear_wake_irq(wl->dev);
6668 device_init_wakeup(wl->dev, false);
6669 free_irq(wl->irq, wl);
6675 release_firmware(fw);
6676 complete_all(&wl->nvs_loading_complete);
6679 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6681 struct wl1271 *wl = dev_get_drvdata(dev);
6682 struct wl12xx_vif *wlvif;
6685 /* We do not enter elp sleep in PLT mode */
6689 /* Nothing to do if no ELP mode requested */
6690 if (wl->sleep_auth != WL1271_PSM_ELP)
6693 wl12xx_for_each_wlvif(wl, wlvif) {
6694 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6695 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6699 wl1271_debug(DEBUG_PSM, "chip to elp");
6700 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6702 wl12xx_queue_recovery_work(wl);
6707 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6712 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6714 struct wl1271 *wl = dev_get_drvdata(dev);
6715 DECLARE_COMPLETION_ONSTACK(compl);
6716 unsigned long flags;
6718 unsigned long start_time = jiffies;
6719 bool pending = false;
6720 bool recovery = false;
6722 /* Nothing to do if no ELP mode requested */
6723 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6726 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6728 spin_lock_irqsave(&wl->wl_lock, flags);
6729 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6732 wl->elp_compl = &compl;
6733 spin_unlock_irqrestore(&wl->wl_lock, flags);
6735 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6742 ret = wait_for_completion_timeout(&compl,
6743 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6745 wl1271_warning("ELP wakeup timeout!");
6747 /* Return no error for runtime PM for recovery */
6754 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6756 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6757 jiffies_to_msecs(jiffies - start_time));
6762 spin_lock_irqsave(&wl->wl_lock, flags);
6763 wl->elp_compl = NULL;
6764 spin_unlock_irqrestore(&wl->wl_lock, flags);
6767 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6768 wl12xx_queue_recovery_work(wl);
6774 static const struct dev_pm_ops wlcore_pm_ops = {
6775 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6776 wlcore_runtime_resume,
6780 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6782 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6783 const char *nvs_name;
6786 if (!wl->ops || !wl->ptable || !pdev_data)
6789 wl->dev = &pdev->dev;
6791 platform_set_drvdata(pdev, wl);
6793 if (pdev_data->family && pdev_data->family->nvs_name) {
6794 nvs_name = pdev_data->family->nvs_name;
6795 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6796 nvs_name, &pdev->dev, GFP_KERNEL,
6799 wl1271_error("request_firmware_nowait failed for %s: %d",
6801 complete_all(&wl->nvs_loading_complete);
6804 wlcore_nvs_cb(NULL, wl);
6807 wl->dev->driver->pm = &wlcore_pm_ops;
6808 pm_runtime_set_autosuspend_delay(wl->dev, 50);
6809 pm_runtime_use_autosuspend(wl->dev);
6810 pm_runtime_enable(wl->dev);
6814 EXPORT_SYMBOL_GPL(wlcore_probe);
6816 int wlcore_remove(struct platform_device *pdev)
6818 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6819 struct wl1271 *wl = platform_get_drvdata(pdev);
6822 error = pm_runtime_get_sync(wl->dev);
6824 dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6826 wl->dev->driver->pm = NULL;
6828 if (pdev_data->family && pdev_data->family->nvs_name)
6829 wait_for_completion(&wl->nvs_loading_complete);
6830 if (!wl->initialized)
6833 if (wl->wakeirq >= 0) {
6834 dev_pm_clear_wake_irq(wl->dev);
6835 wl->wakeirq = -ENODEV;
6838 device_init_wakeup(wl->dev, false);
6840 if (wl->irq_wake_enabled)
6841 disable_irq_wake(wl->irq);
6843 wl1271_unregister_hw(wl);
6845 pm_runtime_put_sync(wl->dev);
6846 pm_runtime_dont_use_autosuspend(wl->dev);
6847 pm_runtime_disable(wl->dev);
6849 free_irq(wl->irq, wl);
6854 EXPORT_SYMBOL_GPL(wlcore_remove);
6856 u32 wl12xx_debug_level = DEBUG_NONE;
6857 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6858 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6859 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6861 module_param_named(fwlog, fwlog_param, charp, 0);
6862 MODULE_PARM_DESC(fwlog,
6863 "FW logger options: continuous, dbgpins or disable");
6865 module_param(fwlog_mem_blocks, int, 0600);
6866 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6868 module_param(bug_on_recovery, int, 0600);
6869 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6871 module_param(no_recovery, int, 0600);
6872 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6874 MODULE_LICENSE("GPL");
6875 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6876 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");