1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #ifndef __iwl_trans_h__
65 #define __iwl_trans_h__
67 #include <linux/ieee80211.h>
68 #include <linux/mm.h> /* for page_address */
69 #include <linux/lockdep.h>
70 #include <linux/kernel.h>
72 #include "iwl-debug.h"
73 #include "iwl-config.h"
75 #include "iwl-op-mode.h"
76 #include "fw/api/cmdhdr.h"
77 #include "fw/api/txq.h"
78 #include "fw/api/dbg-tlv.h"
79 #include "iwl-dbg-tlv.h"
82 * DOC: Transport layer - what is it ?
84 * The transport layer is the layer that deals with the HW directly. It provides
85 * an abstraction of the underlying HW to the upper layer. The transport layer
86 * doesn't provide any policy, algorithm or anything of this kind, but only
87 * mechanisms to make the HW do something. It is not completely stateless but
89 * We will have an implementation for each different supported bus.
93 * DOC: Life cycle of the transport layer
95 * The transport layer has a very precise life cycle.
97 * 1) A helper function is called during the module initialization and
98 * registers the bus driver's ops with the transport's alloc function.
99 * 2) Bus's probe calls to the transport layer's allocation functions.
100 * Of course this function is bus specific.
101 * 3) This allocation functions will spawn the upper layer which will
104 * 4) At some point (i.e. mac80211's start call), the op_mode will call
105 * the following sequence:
109 * 5) Then when finished (or reset):
112 * 6) Eventually, the free function will be called.
115 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
116 #define FH_RSCSR_FRAME_INVALID 0x55550000
117 #define FH_RSCSR_FRAME_ALIGN 0x40
118 #define FH_RSCSR_RPA_EN BIT(25)
119 #define FH_RSCSR_RADA_EN BIT(26)
120 #define FH_RSCSR_RXQ_POS 16
121 #define FH_RSCSR_RXQ_MASK 0x3F0000
123 struct iwl_rx_packet {
125 * The first 4 bytes of the RX frame header contain both the RX frame
126 * size and some flags.
128 * 31: flag flush RB request
129 * 30: flag ignore TC (terminal counter) request
130 * 29: flag fast IRQ request
133 * 25: Offload enabled
136 * 22: Checksum enabled
139 * 13-00: RX frame size
142 struct iwl_cmd_header hdr;
146 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
148 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
151 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
153 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
157 * enum CMD_MODE - how to send the host commands ?
159 * @CMD_ASYNC: Return right away and don't wait for the response
160 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
161 * the response. The caller needs to call iwl_free_resp when done.
162 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
163 * command queue, but after other high priority commands. Valid only
165 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
166 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
167 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
168 * (i.e. mark it as non-idle).
169 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
170 * called after this command completes. Valid only with CMD_ASYNC.
174 CMD_WANT_SKB = BIT(1),
175 CMD_SEND_IN_RFKILL = BIT(2),
176 CMD_HIGH_PRIO = BIT(3),
177 CMD_SEND_IN_IDLE = BIT(4),
178 CMD_MAKE_TRANS_IDLE = BIT(5),
179 CMD_WAKE_UP_TRANS = BIT(6),
180 CMD_WANT_ASYNC_CALLBACK = BIT(7),
183 #define DEF_CMD_PAYLOAD_SIZE 320
186 * struct iwl_device_cmd
188 * For allocation of the command and tx queues, this establishes the overall
189 * size of the largest command we send to uCode, except for commands that
190 * aren't fully copied and use other TFD space.
192 struct iwl_device_cmd {
195 struct iwl_cmd_header hdr; /* uCode API */
196 u8 payload[DEF_CMD_PAYLOAD_SIZE];
199 struct iwl_cmd_header_wide hdr_wide;
200 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
201 sizeof(struct iwl_cmd_header_wide) +
202 sizeof(struct iwl_cmd_header)];
207 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
210 * number of transfer buffers (fragments) per transmit frame descriptor;
211 * this is just the driver's idea, the hardware supports 20
213 #define IWL_MAX_CMD_TBS_PER_TFD 2
216 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
218 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
219 * ring. The transport layer doesn't map the command's buffer to DMA, but
220 * rather copies it to a previously allocated DMA buffer. This flag tells
221 * the transport layer not to copy the command, but to map the existing
222 * buffer (that is passed in) instead. This saves the memcpy and allows
223 * commands that are bigger than the fixed buffer to be submitted.
224 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
225 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
226 * chunk internally and free it again after the command completes. This
227 * can (currently) be used only once per command.
228 * Note that a TFD entry after a DUP one cannot be a normal copied one.
230 enum iwl_hcmd_dataflag {
231 IWL_HCMD_DFL_NOCOPY = BIT(0),
232 IWL_HCMD_DFL_DUP = BIT(1),
235 enum iwl_error_event_table_status {
236 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
237 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
238 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
242 * struct iwl_host_cmd - Host command to the uCode
244 * @data: array of chunks that composes the data of the host command
245 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
246 * @_rx_page_order: (internally used to free response packet)
247 * @_rx_page_addr: (internally used to free response packet)
248 * @flags: can be CMD_*
249 * @len: array of the lengths of the chunks in data
250 * @dataflags: IWL_HCMD_DFL_*
251 * @id: command id of the host command, for wide commands encoding the
252 * version and group as well
254 struct iwl_host_cmd {
255 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
256 struct iwl_rx_packet *resp_pkt;
257 unsigned long _rx_page_addr;
262 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
263 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
266 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
268 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
271 struct iwl_rx_cmd_buffer {
276 unsigned int truesize;
280 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
282 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
285 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
290 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
292 r->_page_stolen = true;
297 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
299 __free_pages(r->_page, r->_rx_page_order);
302 #define MAX_NO_RECLAIM_CMDS 6
304 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
307 * Maximum number of HW queues the transport layer
310 #define IWL_MAX_HW_QUEUES 32
311 #define IWL_MAX_TVQM_QUEUES 512
313 #define IWL_MAX_TID_COUNT 8
314 #define IWL_MGMT_TID 15
315 #define IWL_FRAME_LIMIT 64
316 #define IWL_MAX_RX_HW_QUEUES 16
319 * enum iwl_wowlan_status - WoWLAN image/device status
320 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
321 * @IWL_D3_STATUS_RESET: device was reset while suspended
329 * enum iwl_trans_status: transport status flags
330 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
331 * @STATUS_DEVICE_ENABLED: APM is enabled
332 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
333 * @STATUS_INT_ENABLED: interrupts are enabled
334 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
335 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
336 * @STATUS_FW_ERROR: the fw is in error state
337 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
339 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
340 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
342 enum iwl_trans_status {
343 STATUS_SYNC_HCMD_ACTIVE,
344 STATUS_DEVICE_ENABLED,
348 STATUS_RFKILL_OPMODE,
350 STATUS_TRANS_GOING_IDLE,
356 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
360 return get_order(2 * 1024);
362 return get_order(4 * 1024);
364 return get_order(8 * 1024);
366 return get_order(12 * 1024);
373 struct iwl_hcmd_names {
375 const char *const cmd_name;
378 #define HCMD_NAME(x) \
379 { .cmd_id = x, .cmd_name = #x }
381 struct iwl_hcmd_arr {
382 const struct iwl_hcmd_names *arr;
386 #define HCMD_ARR(x) \
387 { .arr = x, .size = ARRAY_SIZE(x) }
390 * struct iwl_trans_config - transport configuration
392 * @op_mode: pointer to the upper layer.
393 * @cmd_queue: the index of the command queue.
394 * Must be set before start_fw.
395 * @cmd_fifo: the fifo for host commands
396 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
397 * @no_reclaim_cmds: Some devices erroneously don't set the
398 * SEQ_RX_FRAME bit on some notifications, this is the
399 * list of such notifications to filter. Max length is
400 * %MAX_NO_RECLAIM_CMDS.
401 * @n_no_reclaim_cmds: # of commands in list
402 * @rx_buf_size: RX buffer size needed for A-MSDUs
403 * if unset 4k will be the RX buffer size
404 * @bc_table_dword: set to true if the BC table expects the byte count to be
405 * in DWORD (as opposed to bytes)
406 * @scd_set_active: should the transport configure the SCD for HCMD queue
407 * @sw_csum_tx: transport should compute the TCP checksum
408 * @command_groups: array of command groups, each member is an array of the
409 * commands in the group; for debugging only
410 * @command_groups_size: number of command groups, to avoid illegal access
411 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
412 * space for at least two pointers
414 struct iwl_trans_config {
415 struct iwl_op_mode *op_mode;
419 unsigned int cmd_q_wdg_timeout;
420 const u8 *no_reclaim_cmds;
421 unsigned int n_no_reclaim_cmds;
423 enum iwl_amsdu_size rx_buf_size;
427 const struct iwl_hcmd_arr *command_groups;
428 int command_groups_size;
433 struct iwl_trans_dump_data {
440 struct iwl_trans_txq_scd_cfg {
449 * struct iwl_trans_rxq_dma_data - RX queue DMA data
450 * @fr_bd_cb: DMA address of free BD cyclic buffer
451 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
452 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
453 * @ur_bd_cb: DMA address of used BD cyclic buffer
455 struct iwl_trans_rxq_dma_data {
463 * struct iwl_trans_ops - transport specific operations
465 * All the handlers MUST be implemented
467 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
468 * out of a low power state. From that point on, the HW can send
469 * interrupts. May sleep.
470 * @op_mode_leave: Turn off the HW RF kill indication if on
472 * @start_fw: allocates and inits all the resources for the transport
473 * layer. Also kick a fw image.
475 * @fw_alive: called when the fw sends alive notification. If the fw provides
476 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
478 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
479 * the HW. If low_power is true, the NIC will be put in low power state.
480 * From that point on, the HW will be stopped but will still issue an
481 * interrupt if the HW RF kill switch is triggered.
482 * This callback must do the right thing and not crash even if %start_hw()
483 * was called but not &start_fw(). May sleep.
484 * @d3_suspend: put the device into the correct mode for WoWLAN during
485 * suspend. This is optional, if not implemented WoWLAN will not be
486 * supported. This callback may sleep.
487 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
488 * talk to the WoWLAN image to get its status. This is optional, if not
489 * implemented WoWLAN will not be supported. This callback may sleep.
490 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
491 * If RFkill is asserted in the middle of a SYNC host command, it must
492 * return -ERFKILL straight away.
493 * May sleep only if CMD_ASYNC is not set
494 * @tx: send an skb. The transport relies on the op_mode to zero the
495 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
496 * the CSUM will be taken care of (TCP CSUM and IP header in case of
497 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
498 * header if it is IPv4.
500 * @reclaim: free packet until ssn. Returns a list of freed packets.
502 * @txq_enable: setup a queue. To setup an AC queue, use the
503 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
504 * this one. The op_mode must not configure the HCMD queue. The scheduler
505 * configuration may be %NULL, in which case the hardware will not be
506 * configured. If true is returned, the operation mode needs to increment
507 * the sequence number of the packets routed to this queue because of a
508 * hardware scheduler bug. May sleep.
509 * @txq_disable: de-configure a Tx queue to send AMPDUs
511 * @txq_set_shared_mode: change Tx queue shared/unshared marking
512 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
513 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
514 * @freeze_txq_timer: prevents the timer of the queue from firing until the
515 * queue is set to awake. Must be atomic.
516 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
517 * that the transport needs to refcount the calls since this function
518 * will be called several times with block = true, and then the queues
519 * need to be unblocked only after the same number of calls with
521 * @write8: write a u8 to a register at offset ofs from the BAR
522 * @write32: write a u32 to a register at offset ofs from the BAR
523 * @read32: read a u32 register at offset ofs from the BAR
524 * @read_prph: read a DWORD from a periphery register
525 * @write_prph: write a DWORD to a periphery register
526 * @read_mem: read device's SRAM in DWORD
527 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
529 * @configure: configure parameters required by the transport layer from
530 * the op_mode. May be called several times before start_fw, can't be
532 * @set_pmi: set the power pmi state
533 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
534 * Sleeping is not allowed between grab_nic_access and
535 * release_nic_access.
536 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
537 * must be the same one that was sent before to the grab_nic_access.
538 * @set_bits_mask - set SRAM register according to value and mask.
539 * @ref: grab a reference to the transport/FW layers, disallowing
540 * certain low power states
541 * @unref: release a reference previously taken with @ref. Note that
542 * initially the reference count is 1, making an initial @unref
543 * necessary to allow low power states.
544 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
545 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
546 * Note that the transport must fill in the proper file headers.
547 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
548 * of the trans debugfs
550 struct iwl_trans_ops {
552 int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
553 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
554 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
556 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
557 void (*stop_device)(struct iwl_trans *trans, bool low_power);
559 void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
560 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
561 bool test, bool reset);
563 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
565 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
566 struct iwl_device_cmd *dev_cmd, int queue);
567 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
568 struct sk_buff_head *skbs);
570 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
571 const struct iwl_trans_txq_scd_cfg *cfg,
572 unsigned int queue_wdg_timeout);
573 void (*txq_disable)(struct iwl_trans *trans, int queue,
575 /* 22000 functions */
576 int (*txq_alloc)(struct iwl_trans *trans,
577 __le16 flags, u8 sta_id, u8 tid,
578 int cmd_id, int size,
579 unsigned int queue_wdg_timeout);
580 void (*txq_free)(struct iwl_trans *trans, int queue);
581 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
582 struct iwl_trans_rxq_dma_data *data);
584 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
587 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
588 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
589 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
591 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
593 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
594 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
595 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
596 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
597 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
598 int (*read_mem)(struct iwl_trans *trans, u32 addr,
599 void *buf, int dwords);
600 int (*write_mem)(struct iwl_trans *trans, u32 addr,
601 const void *buf, int dwords);
602 void (*configure)(struct iwl_trans *trans,
603 const struct iwl_trans_config *trans_cfg);
604 void (*set_pmi)(struct iwl_trans *trans, bool state);
605 void (*sw_reset)(struct iwl_trans *trans);
606 bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
607 void (*release_nic_access)(struct iwl_trans *trans,
608 unsigned long *flags);
609 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
611 void (*ref)(struct iwl_trans *trans);
612 void (*unref)(struct iwl_trans *trans);
613 int (*suspend)(struct iwl_trans *trans);
614 void (*resume)(struct iwl_trans *trans);
616 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
618 void (*debugfs_cleanup)(struct iwl_trans *trans);
619 void (*sync_nmi)(struct iwl_trans *trans);
623 * enum iwl_trans_state - state of the transport layer
625 * @IWL_TRANS_NO_FW: no fw has sent an alive response
626 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
628 enum iwl_trans_state {
630 IWL_TRANS_FW_ALIVE = 1,
634 * DOC: Platform power management
636 * There are two types of platform power management: system-wide
637 * (WoWLAN) and runtime.
639 * In system-wide power management the entire platform goes into a low
640 * power state (e.g. idle or suspend to RAM) at the same time and the
641 * device is configured as a wakeup source for the entire platform.
642 * This is usually triggered by userspace activity (e.g. the user
643 * presses the suspend button or a power management daemon decides to
644 * put the platform in low power mode). The device's behavior in this
645 * mode is dictated by the wake-on-WLAN configuration.
647 * In runtime power management, only the devices which are themselves
648 * idle enter a low power state. This is done at runtime, which means
649 * that the entire system is still running normally. This mode is
650 * usually triggered automatically by the device driver and requires
651 * the ability to enter and exit the low power modes in a very short
652 * time, so there is not much impact in usability.
654 * The terms used for the device's behavior are as follows:
656 * - D0: the device is fully powered and the host is awake;
657 * - D3: the device is in low power mode and only reacts to
658 * specific events (e.g. magic-packet received or scan
660 * - D0I3: the device is in low power mode and reacts to any
661 * activity (e.g. RX);
663 * These terms reflect the power modes in the firmware and are not to
664 * be confused with the physical device power state. The NIC can be
665 * in D0I3 mode even if, for instance, the PCI device is in D3 state.
669 * enum iwl_plat_pm_mode - platform power management mode
671 * This enumeration describes the device's platform power management
672 * behavior when in idle mode (i.e. runtime power management) or when
673 * in system-wide suspend (i.e WoWLAN).
675 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
676 * device. At runtime, this means that nothing happens and the
677 * device always remains in active. In system-wide suspend mode,
678 * it means that the all connections will be closed automatically
679 * by mac80211 before the platform is suspended.
680 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
681 * For runtime power management, this mode is not officially
683 * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
685 enum iwl_plat_pm_mode {
686 IWL_PLAT_PM_MODE_DISABLED,
688 IWL_PLAT_PM_MODE_D0I3,
691 /* Max time to wait for trans to become idle/non-idle on d0i3
692 * enter/exit (in msecs).
694 #define IWL_TRANS_IDLE_TIMEOUT 2000
696 /* Max time to wait for nmi interrupt */
697 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
700 * struct iwl_dram_data
701 * @physical: page phy pointer
702 * @block: pointer to the allocated block/page
703 * @size: size of the block/page
705 struct iwl_dram_data {
712 * struct iwl_self_init_dram - dram data used by self init process
713 * @fw: lmac and umac dram data
714 * @fw_cnt: total number of items in array
715 * @paging: paging dram data
716 * @paging_cnt: total number of items in array
718 struct iwl_self_init_dram {
719 struct iwl_dram_data *fw;
721 struct iwl_dram_data *paging;
726 * struct iwl_trans - transport common data
728 * @ops - pointer to iwl_trans_ops
729 * @op_mode - pointer to the op_mode
730 * @cfg - pointer to the configuration
731 * @drv - pointer to iwl_drv
732 * @status: a bit-mask of transport status flags
733 * @dev - pointer to struct device * that represents the device
734 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
735 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
736 * @hw_rf_id a u32 with the device RF ID
737 * @hw_id: a u32 with the ID of the device / sub-device.
738 * Set during transport allocation.
739 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
740 * @pm_support: set to true in start_hw if link pm is supported
741 * @ltr_enabled: set to true if the LTR is enabled
742 * @wide_cmd_header: true when ucode supports wide command header format
743 * @num_rx_queues: number of RX queues allocated by the transport;
744 * the transport must set this before calling iwl_drv_start()
745 * @iml_len: the length of the image loader
746 * @iml: a pointer to the image loader itself
747 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
748 * The user should use iwl_trans_{alloc,free}_tx_cmd.
749 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
750 * starting the firmware, used for tracing
751 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
752 * start of the 802.11 header in the @rx_mpdu_cmd
753 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
754 * @dbg_dest_tlv: points to the destination TLV for debug
755 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
756 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
757 * @dbg_n_dest_reg: num of reg_ops in %dbg_dest_tlv
758 * @num_blocks: number of blocks in fw_mon
759 * @fw_mon: address of the buffers for firmware monitor
760 * @system_pm_mode: the system-wide power management mode in use.
761 * This mode is set dynamically, depending on the WoWLAN values
762 * configured from the userspace at runtime.
763 * @runtime_pm_mode: the runtime power management mode in use. This
764 * mode is set during the initialization phase and is not
765 * supposed to change during runtime.
766 * @dbg_rec_on: true iff there is a fw debug recording currently active
767 * @lmac_error_event_table: addrs of lmacs error tables
768 * @umac_error_event_table: addr of umac error table
769 * @error_event_table_tlv_status: bitmap that indicates what error table
770 * pointers was recevied via TLV. use enum &iwl_error_event_table_status
773 const struct iwl_trans_ops *ops;
774 struct iwl_op_mode *op_mode;
775 const struct iwl_cfg *cfg;
777 enum iwl_trans_state state;
778 unsigned long status;
787 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
792 const struct iwl_hcmd_arr *command_groups;
793 int command_groups_size;
794 bool wide_cmd_header;
801 /* The following fields are internal only */
802 struct kmem_cache *dev_cmd_pool;
803 char dev_cmd_pool_name[50];
805 struct dentry *dbgfs_dir;
807 #ifdef CONFIG_LOCKDEP
808 struct lockdep_map sync_cmd_lockdep_map;
811 struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
812 struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
814 bool external_ini_loaded;
817 const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
818 const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
819 struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
822 struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
823 struct iwl_self_init_dram init_dram;
825 enum iwl_plat_pm_mode system_pm_mode;
826 enum iwl_plat_pm_mode runtime_pm_mode;
830 u32 lmac_error_event_table[2];
831 u32 umac_error_event_table;
832 unsigned int error_event_table_tlv_status;
834 /* pointer to trans specific struct */
835 /*Ensure that this pointer will always be aligned to sizeof pointer */
836 char trans_specific[0] __aligned(sizeof(void *));
839 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
840 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
842 static inline void iwl_trans_configure(struct iwl_trans *trans,
843 const struct iwl_trans_config *trans_cfg)
845 trans->op_mode = trans_cfg->op_mode;
847 trans->ops->configure(trans, trans_cfg);
848 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
851 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
855 return trans->ops->start_hw(trans, low_power);
858 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
860 return trans->ops->start_hw(trans, true);
863 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
867 if (trans->ops->op_mode_leave)
868 trans->ops->op_mode_leave(trans);
870 trans->op_mode = NULL;
872 trans->state = IWL_TRANS_NO_FW;
875 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
879 trans->state = IWL_TRANS_FW_ALIVE;
881 trans->ops->fw_alive(trans, scd_addr);
884 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
885 const struct fw_img *fw,
890 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
892 clear_bit(STATUS_FW_ERROR, &trans->status);
893 return trans->ops->start_fw(trans, fw, run_in_rfkill);
896 static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
901 trans->ops->stop_device(trans, low_power);
903 trans->state = IWL_TRANS_NO_FW;
906 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
908 _iwl_trans_stop_device(trans, true);
911 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
915 if (trans->ops->d3_suspend)
916 trans->ops->d3_suspend(trans, test, reset);
919 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
920 enum iwl_d3_status *status,
921 bool test, bool reset)
924 if (!trans->ops->d3_resume)
927 return trans->ops->d3_resume(trans, status, test, reset);
930 static inline int iwl_trans_suspend(struct iwl_trans *trans)
932 if (!trans->ops->suspend)
935 return trans->ops->suspend(trans);
938 static inline void iwl_trans_resume(struct iwl_trans *trans)
940 if (trans->ops->resume)
941 trans->ops->resume(trans);
944 static inline struct iwl_trans_dump_data *
945 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
947 if (!trans->ops->dump_data)
949 return trans->ops->dump_data(trans, dump_mask);
952 static inline struct iwl_device_cmd *
953 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
955 return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
958 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
960 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
961 struct iwl_device_cmd *dev_cmd)
963 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
966 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
967 struct iwl_device_cmd *dev_cmd, int queue)
969 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
972 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
973 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
977 return trans->ops->tx(trans, skb, dev_cmd, queue);
980 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
981 int ssn, struct sk_buff_head *skbs)
983 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
984 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
988 trans->ops->reclaim(trans, queue, ssn, skbs);
991 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
994 trans->ops->txq_disable(trans, queue, configure_scd);
998 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
999 const struct iwl_trans_txq_scd_cfg *cfg,
1000 unsigned int queue_wdg_timeout)
1004 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1005 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1009 return trans->ops->txq_enable(trans, queue, ssn,
1010 cfg, queue_wdg_timeout);
1014 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1015 struct iwl_trans_rxq_dma_data *data)
1017 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1020 return trans->ops->rxq_dma_data(trans, queue, data);
1024 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1026 if (WARN_ON_ONCE(!trans->ops->txq_free))
1029 trans->ops->txq_free(trans, queue);
1033 iwl_trans_txq_alloc(struct iwl_trans *trans,
1034 __le16 flags, u8 sta_id, u8 tid,
1035 int cmd_id, int size,
1036 unsigned int wdg_timeout)
1040 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1043 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1044 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1048 return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1049 cmd_id, size, wdg_timeout);
1052 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1053 int queue, bool shared_mode)
1055 if (trans->ops->txq_set_shared_mode)
1056 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1059 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1060 int fifo, int sta_id, int tid,
1061 int frame_limit, u16 ssn,
1062 unsigned int queue_wdg_timeout)
1064 struct iwl_trans_txq_scd_cfg cfg = {
1068 .frame_limit = frame_limit,
1069 .aggregate = sta_id >= 0,
1072 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1076 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1077 unsigned int queue_wdg_timeout)
1079 struct iwl_trans_txq_scd_cfg cfg = {
1082 .tid = IWL_MAX_TID_COUNT,
1083 .frame_limit = IWL_FRAME_LIMIT,
1087 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1090 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1094 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1095 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1099 if (trans->ops->freeze_txq_timer)
1100 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1103 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1106 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1107 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1111 if (trans->ops->block_txq_ptrs)
1112 trans->ops->block_txq_ptrs(trans, block);
1115 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1118 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1121 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1122 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1126 return trans->ops->wait_tx_queues_empty(trans, txqs);
1129 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1131 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1134 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1135 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1139 return trans->ops->wait_txq_empty(trans, queue);
1142 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1144 trans->ops->write8(trans, ofs, val);
1147 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1149 trans->ops->write32(trans, ofs, val);
1152 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1154 return trans->ops->read32(trans, ofs);
1157 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1159 return trans->ops->read_prph(trans, ofs);
1162 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1165 return trans->ops->write_prph(trans, ofs, val);
1168 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1169 void *buf, int dwords)
1171 return trans->ops->read_mem(trans, addr, buf, dwords);
1174 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1176 if (__builtin_constant_p(bufsize)) \
1177 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1178 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1181 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1185 if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1191 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1192 const void *buf, int dwords)
1194 return trans->ops->write_mem(trans, addr, buf, dwords);
1197 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1200 return iwl_trans_write_mem(trans, addr, &val, 1);
1203 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1205 if (trans->ops->set_pmi)
1206 trans->ops->set_pmi(trans, state);
1209 static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1211 if (trans->ops->sw_reset)
1212 trans->ops->sw_reset(trans);
1216 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1218 trans->ops->set_bits_mask(trans, reg, mask, value);
1221 #define iwl_trans_grab_nic_access(trans, flags) \
1222 __cond_lock(nic_access, \
1223 likely((trans)->ops->grab_nic_access(trans, flags)))
1225 static inline void __releases(nic_access)
1226 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1228 trans->ops->release_nic_access(trans, flags);
1229 __release(nic_access);
1232 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1234 if (WARN_ON_ONCE(!trans->op_mode))
1237 /* prevent double restarts due to the same erroneous FW */
1238 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1239 iwl_op_mode_nic_error(trans->op_mode);
1242 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1244 if (trans->ops->sync_nmi)
1245 trans->ops->sync_nmi(trans);
1248 /*****************************************************
1249 * transport helper functions
1250 *****************************************************/
1251 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1253 const struct iwl_cfg *cfg,
1254 const struct iwl_trans_ops *ops);
1255 void iwl_trans_free(struct iwl_trans *trans);
1256 void iwl_trans_ref(struct iwl_trans *trans);
1257 void iwl_trans_unref(struct iwl_trans *trans);
1259 /*****************************************************
1260 * driver (transport) register/unregister functions
1261 ******************************************************/
1262 int __must_check iwl_pci_register_driver(void);
1263 void iwl_pci_unregister_driver(void);
1265 #endif /* __iwl_trans_h__ */