2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F)
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
204 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
206 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209 for ( ; nflit; nflit--, mbox_addr += 8)
210 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
214 * Handle a FW assertion reported in a mailbox.
216 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
218 struct fw_debug_cmd asrt;
220 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
221 dev_alert(adap->pdev_dev,
222 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
223 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
224 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
228 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
229 * @adapter: the adapter
230 * @cmd: the Firmware Mailbox Command or Reply
231 * @size: command length in bytes
232 * @access: the time (ms) needed to access the Firmware Mailbox
233 * @execute: the time (ms) the command spent being executed
235 static void t4_record_mbox(struct adapter *adapter,
236 const __be64 *cmd, unsigned int size,
237 int access, int execute)
239 struct mbox_cmd_log *log = adapter->mbox_log;
240 struct mbox_cmd *entry;
243 entry = mbox_cmd_log_entry(log, log->cursor++);
244 if (log->cursor == log->size)
247 for (i = 0; i < size / 8; i++)
248 entry->cmd[i] = be64_to_cpu(cmd[i]);
249 while (i < MBOX_LEN / 8)
251 entry->timestamp = jiffies;
252 entry->seqno = log->seqno++;
253 entry->access = access;
254 entry->execute = execute;
258 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
260 * @mbox: index of the mailbox to use
261 * @cmd: the command to write
262 * @size: command length in bytes
263 * @rpl: where to optionally store the reply
264 * @sleep_ok: if true we may sleep while awaiting command completion
265 * @timeout: time to wait for command to finish before timing out
267 * Sends the given command to FW through the selected mailbox and waits
268 * for the FW to execute the command. If @rpl is not %NULL it is used to
269 * store the FW's reply to the command. The command and its optional
270 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
271 * to respond. @sleep_ok determines whether we may sleep while awaiting
272 * the response. If sleeping is allowed we use progressive backoff
275 * The return value is 0 on success or a negative errno on failure. A
276 * failure can happen either because we are not able to execute the
277 * command or FW executes it but signals an error. In the latter case
278 * the return value is the error code indicated by FW (negated).
280 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
281 int size, void *rpl, bool sleep_ok, int timeout)
283 static const int delay[] = {
284 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287 struct mbox_list entry;
292 int i, ms, delay_idx, ret;
293 const __be64 *p = cmd;
294 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
295 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
296 __be64 cmd_rpl[MBOX_LEN / 8];
299 if ((size & 15) || size > MBOX_LEN)
303 * If the device is off-line, as in EEH, commands will time out.
304 * Fail them early so we don't waste time waiting.
306 if (adap->pdev->error_state != pci_channel_io_normal)
309 /* If we have a negative timeout, that implies that we can't sleep. */
315 /* Queue ourselves onto the mailbox access list. When our entry is at
316 * the front of the list, we have rights to access the mailbox. So we
317 * wait [for a while] till we're at the front [or bail out with an
320 spin_lock(&adap->mbox_lock);
321 list_add_tail(&entry.list, &adap->mlist.list);
322 spin_unlock(&adap->mbox_lock);
327 for (i = 0; ; i += ms) {
328 /* If we've waited too long, return a busy indication. This
329 * really ought to be based on our initial position in the
330 * mailbox access list but this is a start. We very rearely
331 * contend on access to the mailbox ...
333 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
334 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
335 spin_lock(&adap->mbox_lock);
336 list_del(&entry.list);
337 spin_unlock(&adap->mbox_lock);
338 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
339 t4_record_mbox(adap, cmd, size, access, ret);
343 /* If we're at the head, break out and start the mailbox
346 if (list_first_entry(&adap->mlist.list, struct mbox_list,
350 /* Delay for a bit before checking again ... */
352 ms = delay[delay_idx]; /* last element may repeat */
353 if (delay_idx < ARRAY_SIZE(delay) - 1)
361 /* Loop trying to get ownership of the mailbox. Return an error
362 * if we can't gain ownership.
364 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
365 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 if (v != MBOX_OWNER_DRV) {
368 spin_lock(&adap->mbox_lock);
369 list_del(&entry.list);
370 spin_unlock(&adap->mbox_lock);
371 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
372 t4_record_mbox(adap, cmd, size, access, ret);
376 /* Copy in the new mailbox command and send it on its way ... */
377 t4_record_mbox(adap, cmd, size, access, 0);
378 for (i = 0; i < size; i += 8)
379 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
381 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
382 t4_read_reg(adap, ctl_reg); /* flush write */
388 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
392 ms = delay[delay_idx]; /* last element may repeat */
393 if (delay_idx < ARRAY_SIZE(delay) - 1)
399 v = t4_read_reg(adap, ctl_reg);
400 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
401 if (!(v & MBMSGVALID_F)) {
402 t4_write_reg(adap, ctl_reg, 0);
406 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
407 res = be64_to_cpu(cmd_rpl[0]);
409 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
410 fw_asrt(adap, data_reg);
411 res = FW_CMD_RETVAL_V(EIO);
413 memcpy(rpl, cmd_rpl, size);
416 t4_write_reg(adap, ctl_reg, 0);
419 t4_record_mbox(adap, cmd_rpl,
420 MBOX_LEN, access, execute);
421 spin_lock(&adap->mbox_lock);
422 list_del(&entry.list);
423 spin_unlock(&adap->mbox_lock);
424 return -FW_CMD_RETVAL_G((int)res);
428 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
429 t4_record_mbox(adap, cmd, size, access, ret);
430 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
431 *(const u8 *)cmd, mbox);
432 t4_report_fw_error(adap);
433 spin_lock(&adap->mbox_lock);
434 list_del(&entry.list);
435 spin_unlock(&adap->mbox_lock);
440 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
441 void *rpl, bool sleep_ok)
443 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
447 static int t4_edc_err_read(struct adapter *adap, int idx)
449 u32 edc_ecc_err_addr_reg;
452 if (is_t4(adap->params.chip)) {
453 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456 if (idx != 0 && idx != 1) {
457 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
461 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
462 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465 "edc%d err addr 0x%x: 0x%x.\n",
466 idx, edc_ecc_err_addr_reg,
467 t4_read_reg(adap, edc_ecc_err_addr_reg));
469 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
471 (unsigned long long)t4_read_reg64(adap, rdata_reg),
472 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
473 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
485 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
487 * @win: PCI-E Memory Window to use
488 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
489 * @addr: address within indicated memory type
490 * @len: amount of memory to transfer
491 * @hbuf: host memory buffer
492 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
494 * Reads/writes an [almost] arbitrary memory region in the firmware: the
495 * firmware memory address and host buffer must be aligned on 32-bit
496 * boudaries; the length may be arbitrary. The memory is transferred as
497 * a raw byte sequence from/to the firmware's memory. If this memory
498 * contains data structures which contain multi-byte integers, it's the
499 * caller's responsibility to perform appropriate byte order conversions.
501 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
502 u32 len, void *hbuf, int dir)
504 u32 pos, offset, resid, memoffset;
505 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
508 /* Argument sanity checks ...
510 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
514 /* It's convenient to be able to handle lengths which aren't a
515 * multiple of 32-bits because we often end up transferring files to
516 * the firmware. So we'll handle that by normalizing the length here
517 * and then handling any residual transfer at the end.
522 /* Offset into the region of memory which is being accessed
525 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
526 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
528 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
529 if (mtype != MEM_MC1)
530 memoffset = (mtype * (edc_size * 1024 * 1024));
532 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
533 MA_EXT_MEMORY0_BAR_A));
534 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
537 /* Determine the PCIE_MEM_ACCESS_OFFSET */
538 addr = addr + memoffset;
540 /* Each PCI-E Memory Window is programmed with a window size -- or
541 * "aperture" -- which controls the granularity of its mapping onto
542 * adapter memory. We need to grab that aperture in order to know
543 * how to use the specified window. The window is also programmed
544 * with the base address of the Memory Window in BAR0's address
545 * space. For T4 this is an absolute PCI-E Bus Address. For T5
546 * the address is relative to BAR0.
548 mem_reg = t4_read_reg(adap,
549 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
551 mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
552 mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
553 if (is_t4(adap->params.chip))
554 mem_base -= adap->t4_bar0;
555 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
557 /* Calculate our initial PCI-E Memory Window Position and Offset into
560 pos = addr & ~(mem_aperture-1);
563 /* Set up initial PCI-E Memory Window to cover the start of our
564 * transfer. (Read it back to ensure that changes propagate before we
565 * attempt to use the new value.)
568 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
571 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
573 /* Transfer data to/from the adapter as long as there's an integral
574 * number of 32-bit transfers to complete.
576 * A note on Endianness issues:
578 * The "register" reads and writes below from/to the PCI-E Memory
579 * Window invoke the standard adapter Big-Endian to PCI-E Link
580 * Little-Endian "swizzel." As a result, if we have the following
581 * data in adapter memory:
583 * Memory: ... | b0 | b1 | b2 | b3 | ...
584 * Address: i+0 i+1 i+2 i+3
586 * Then a read of the adapter memory via the PCI-E Memory Window
591 * [ b3 | b2 | b1 | b0 ]
593 * If this value is stored into local memory on a Little-Endian system
594 * it will show up correctly in local memory as:
596 * ( ..., b0, b1, b2, b3, ... )
598 * But on a Big-Endian system, the store will show up in memory
599 * incorrectly swizzled as:
601 * ( ..., b3, b2, b1, b0, ... )
603 * So we need to account for this in the reads and writes to the
604 * PCI-E Memory Window below by undoing the register read/write
608 if (dir == T4_MEMORY_READ)
609 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
612 t4_write_reg(adap, mem_base + offset,
613 (__force u32)cpu_to_le32(*buf++));
614 offset += sizeof(__be32);
615 len -= sizeof(__be32);
617 /* If we've reached the end of our current window aperture,
618 * move the PCI-E Memory Window on to the next. Note that
619 * doing this here after "len" may be 0 allows us to set up
620 * the PCI-E Memory Window for a possible final residual
623 if (offset == mem_aperture) {
627 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
630 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
635 /* If the original transfer had a length which wasn't a multiple of
636 * 32-bits, now's where we need to finish off the transfer of the
637 * residual amount. The PCI-E Memory Window has already been moved
638 * above (if necessary) to cover this final transfer.
648 if (dir == T4_MEMORY_READ) {
649 last.word = le32_to_cpu(
650 (__force __le32)t4_read_reg(adap,
652 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
653 bp[i] = last.byte[i];
656 for (i = resid; i < 4; i++)
658 t4_write_reg(adap, mem_base + offset,
659 (__force u32)cpu_to_le32(last.word));
666 /* Return the specified PCI-E Configuration Space register from our Physical
667 * Function. We try first via a Firmware LDST Command since we prefer to let
668 * the firmware own all of these registers, but if that fails we go for it
669 * directly ourselves.
671 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
673 u32 val, ldst_addrspace;
675 /* If fw_attach != 0, construct and send the Firmware LDST Command to
676 * retrieve the specified PCI-E Configuration Space register.
678 struct fw_ldst_cmd ldst_cmd;
681 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
682 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
683 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
687 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
688 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
689 ldst_cmd.u.pcie.ctrl_to_fn =
690 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
691 ldst_cmd.u.pcie.r = reg;
693 /* If the LDST Command succeeds, return the result, otherwise
694 * fall through to reading it directly ourselves ...
696 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
699 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
701 /* Read the desired Configuration Space register via the PCI-E
702 * Backdoor mechanism.
704 t4_hw_pci_read_cfg4(adap, reg, &val);
708 /* Get the window based on base passed to it.
709 * Window aperture is currently unhandled, but there is no use case for it
712 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
717 if (is_t4(adap->params.chip)) {
720 /* Truncation intentional: we only read the bottom 32-bits of
721 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
722 * mechanism to read BAR0 instead of using
723 * pci_resource_start() because we could be operating from
724 * within a Virtual Machine which is trapping our accesses to
725 * our Configuration Space and we need to set up the PCI-E
726 * Memory Window decoders with the actual addresses which will
727 * be coming across the PCI-E link.
729 bar0 = t4_read_pcie_cfg4(adap, pci_base);
731 adap->t4_bar0 = bar0;
733 ret = bar0 + memwin_base;
735 /* For T5, only relative offset inside the PCIe BAR is passed */
741 /* Get the default utility window (win0) used by everyone */
742 u32 t4_get_util_window(struct adapter *adap)
744 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
745 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
748 /* Set up memory window for accessing adapter memory ranges. (Read
749 * back MA register to ensure that changes propagate before we attempt
750 * to use the new values.)
752 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
755 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
756 memwin_base | BIR_V(0) |
757 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
759 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
763 * t4_get_regs_len - return the size of the chips register set
764 * @adapter: the adapter
766 * Returns the size of the chip's BAR0 register space.
768 unsigned int t4_get_regs_len(struct adapter *adapter)
770 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
772 switch (chip_version) {
774 return T4_REGMAP_SIZE;
778 return T5_REGMAP_SIZE;
781 dev_err(adapter->pdev_dev,
782 "Unsupported chip version %d\n", chip_version);
787 * t4_get_regs - read chip registers into provided buffer
789 * @buf: register buffer
790 * @buf_size: size (in bytes) of register buffer
792 * If the provided register buffer isn't large enough for the chip's
793 * full register range, the register dump will be truncated to the
794 * register buffer's size.
796 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
798 static const unsigned int t4_reg_ranges[] = {
1257 static const unsigned int t5_reg_ranges[] = {
2024 static const unsigned int t6_reg_ranges[] = {
2585 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2586 const unsigned int *reg_ranges;
2587 int reg_ranges_size, range;
2588 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2590 /* Select the right set of register ranges to dump depending on the
2591 * adapter chip type.
2593 switch (chip_version) {
2595 reg_ranges = t4_reg_ranges;
2596 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2600 reg_ranges = t5_reg_ranges;
2601 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2605 reg_ranges = t6_reg_ranges;
2606 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2610 dev_err(adap->pdev_dev,
2611 "Unsupported chip version %d\n", chip_version);
2615 /* Clear the register buffer and insert the appropriate register
2616 * values selected by the above register ranges.
2618 memset(buf, 0, buf_size);
2619 for (range = 0; range < reg_ranges_size; range += 2) {
2620 unsigned int reg = reg_ranges[range];
2621 unsigned int last_reg = reg_ranges[range + 1];
2622 u32 *bufp = (u32 *)((char *)buf + reg);
2624 /* Iterate across the register range filling in the register
2625 * buffer but don't write past the end of the register buffer.
2627 while (reg <= last_reg && bufp < buf_end) {
2628 *bufp++ = t4_read_reg(adap, reg);
2634 #define EEPROM_STAT_ADDR 0x7bfc
2635 #define VPD_SIZE 0x800
2636 #define VPD_BASE 0x400
2637 #define VPD_BASE_OLD 0
2638 #define VPD_LEN 1024
2639 #define CHELSIO_VPD_UNIQUE_ID 0x82
2642 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2643 * @phys_addr: the physical EEPROM address
2644 * @fn: the PCI function number
2645 * @sz: size of function-specific area
2647 * Translate a physical EEPROM address to virtual. The first 1K is
2648 * accessed through virtual addresses starting at 31K, the rest is
2649 * accessed through virtual addresses starting at 0.
2651 * The mapping is as follows:
2652 * [0..1K) -> [31K..32K)
2653 * [1K..1K+A) -> [31K-A..31K)
2654 * [1K+A..ES) -> [0..ES-A-1K)
2656 * where A = @fn * @sz, and ES = EEPROM size.
2658 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2661 if (phys_addr < 1024)
2662 return phys_addr + (31 << 10);
2663 if (phys_addr < 1024 + fn)
2664 return 31744 - fn + phys_addr - 1024;
2665 if (phys_addr < EEPROMSIZE)
2666 return phys_addr - 1024 - fn;
2671 * t4_seeprom_wp - enable/disable EEPROM write protection
2672 * @adapter: the adapter
2673 * @enable: whether to enable or disable write protection
2675 * Enables or disables write protection on the serial EEPROM.
2677 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2679 unsigned int v = enable ? 0xc : 0;
2680 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2681 return ret < 0 ? ret : 0;
2685 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2686 * @adapter: adapter to read
2687 * @p: where to store the parameters
2689 * Reads card parameters stored in VPD EEPROM.
2691 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2693 int i, ret = 0, addr;
2696 unsigned int vpdr_len, kw_offset, id_len;
2698 vpd = vmalloc(VPD_LEN);
2702 /* We have two VPD data structures stored in the adapter VPD area.
2703 * By default, Linux calculates the size of the VPD area by traversing
2704 * the first VPD area at offset 0x0, so we need to tell the OS what
2705 * our real VPD size is.
2707 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2711 /* Card information normally starts at VPD_BASE but early cards had
2714 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2718 /* The VPD shall have a unique identifier specified by the PCI SIG.
2719 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2720 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2721 * is expected to automatically put this entry at the
2722 * beginning of the VPD.
2724 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2726 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2730 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2731 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2736 id_len = pci_vpd_lrdt_size(vpd);
2737 if (id_len > ID_LEN)
2740 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2742 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2747 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2748 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2749 if (vpdr_len + kw_offset > VPD_LEN) {
2750 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2755 #define FIND_VPD_KW(var, name) do { \
2756 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2758 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2762 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2765 FIND_VPD_KW(i, "RV");
2766 for (csum = 0; i >= 0; i--)
2770 dev_err(adapter->pdev_dev,
2771 "corrupted VPD EEPROM, actual csum %u\n", csum);
2776 FIND_VPD_KW(ec, "EC");
2777 FIND_VPD_KW(sn, "SN");
2778 FIND_VPD_KW(pn, "PN");
2779 FIND_VPD_KW(na, "NA");
2782 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2784 memcpy(p->ec, vpd + ec, EC_LEN);
2786 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2787 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2789 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2790 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2792 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2793 strim((char *)p->na);
2797 return ret < 0 ? ret : 0;
2801 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2802 * @adapter: adapter to read
2803 * @p: where to store the parameters
2805 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2806 * Clock. This can only be called after a connection to the firmware
2809 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2811 u32 cclk_param, cclk_val;
2814 /* Grab the raw VPD parameters.
2816 ret = t4_get_raw_vpd_params(adapter, p);
2820 /* Ask firmware for the Core Clock since it knows how to translate the
2821 * Reference Clock ('V2') VPD field into a Core Clock value ...
2823 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2824 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2825 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2826 1, &cclk_param, &cclk_val);
2835 /* serial flash and firmware constants */
2837 SF_ATTEMPTS = 10, /* max retries for SF operations */
2839 /* flash command opcodes */
2840 SF_PROG_PAGE = 2, /* program page */
2841 SF_WR_DISABLE = 4, /* disable writes */
2842 SF_RD_STATUS = 5, /* read status register */
2843 SF_WR_ENABLE = 6, /* enable writes */
2844 SF_RD_DATA_FAST = 0xb, /* read flash */
2845 SF_RD_ID = 0x9f, /* read ID */
2846 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2850 * sf1_read - read data from the serial flash
2851 * @adapter: the adapter
2852 * @byte_cnt: number of bytes to read
2853 * @cont: whether another operation will be chained
2854 * @lock: whether to lock SF for PL access only
2855 * @valp: where to store the read data
2857 * Reads up to 4 bytes of data from the serial flash. The location of
2858 * the read needs to be specified prior to calling this by issuing the
2859 * appropriate commands to the serial flash.
2861 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2862 int lock, u32 *valp)
2866 if (!byte_cnt || byte_cnt > 4)
2868 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2870 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2871 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2872 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2874 *valp = t4_read_reg(adapter, SF_DATA_A);
2879 * sf1_write - write data to the serial flash
2880 * @adapter: the adapter
2881 * @byte_cnt: number of bytes to write
2882 * @cont: whether another operation will be chained
2883 * @lock: whether to lock SF for PL access only
2884 * @val: value to write
2886 * Writes up to 4 bytes of data to the serial flash. The location of
2887 * the write needs to be specified prior to calling this by issuing the
2888 * appropriate commands to the serial flash.
2890 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2893 if (!byte_cnt || byte_cnt > 4)
2895 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2897 t4_write_reg(adapter, SF_DATA_A, val);
2898 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2899 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2900 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2904 * flash_wait_op - wait for a flash operation to complete
2905 * @adapter: the adapter
2906 * @attempts: max number of polls of the status register
2907 * @delay: delay between polls in ms
2909 * Wait for a flash operation to complete by polling the status register.
2911 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
2917 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
2918 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
2922 if (--attempts == 0)
2930 * t4_read_flash - read words from serial flash
2931 * @adapter: the adapter
2932 * @addr: the start address for the read
2933 * @nwords: how many 32-bit words to read
2934 * @data: where to store the read data
2935 * @byte_oriented: whether to store data as bytes or as words
2937 * Read the specified number of 32-bit words from the serial flash.
2938 * If @byte_oriented is set the read data is stored as a byte array
2939 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2940 * natural endianness.
2942 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2943 unsigned int nwords, u32 *data, int byte_oriented)
2947 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
2950 addr = swab32(addr) | SF_RD_DATA_FAST;
2952 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
2953 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
2956 for ( ; nwords; nwords--, data++) {
2957 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2959 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
2963 *data = (__force __u32)(cpu_to_be32(*data));
2969 * t4_write_flash - write up to a page of data to the serial flash
2970 * @adapter: the adapter
2971 * @addr: the start address to write
2972 * @n: length of data to write in bytes
2973 * @data: the data to write
2975 * Writes up to a page of data (256 bytes) to the serial flash starting
2976 * at the given address. All the data must be written to the same page.
2978 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
2979 unsigned int n, const u8 *data)
2983 unsigned int i, c, left, val, offset = addr & 0xff;
2985 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
2988 val = swab32(addr) | SF_PROG_PAGE;
2990 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
2991 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
2994 for (left = n; left; left -= c) {
2996 for (val = 0, i = 0; i < c; ++i)
2997 val = (val << 8) + *data++;
2999 ret = sf1_write(adapter, c, c != left, 1, val);
3003 ret = flash_wait_op(adapter, 8, 1);
3007 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3009 /* Read the page to verify the write succeeded */
3010 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3014 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3015 dev_err(adapter->pdev_dev,
3016 "failed to correctly write the flash page at %#x\n",
3023 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3028 * t4_get_fw_version - read the firmware version
3029 * @adapter: the adapter
3030 * @vers: where to place the version
3032 * Reads the FW version from flash.
3034 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3036 return t4_read_flash(adapter, FLASH_FW_START +
3037 offsetof(struct fw_hdr, fw_ver), 1,
3042 * t4_get_bs_version - read the firmware bootstrap version
3043 * @adapter: the adapter
3044 * @vers: where to place the version
3046 * Reads the FW Bootstrap version from flash.
3048 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3050 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3051 offsetof(struct fw_hdr, fw_ver), 1,
3056 * t4_get_tp_version - read the TP microcode version
3057 * @adapter: the adapter
3058 * @vers: where to place the version
3060 * Reads the TP microcode version from flash.
3062 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3064 return t4_read_flash(adapter, FLASH_FW_START +
3065 offsetof(struct fw_hdr, tp_microcode_ver),
3070 * t4_get_exprom_version - return the Expansion ROM version (if any)
3071 * @adapter: the adapter
3072 * @vers: where to place the version
3074 * Reads the Expansion ROM header from FLASH and returns the version
3075 * number (if present) through the @vers return value pointer. We return
3076 * this in the Firmware Version Format since it's convenient. Return
3077 * 0 on success, -ENOENT if no Expansion ROM is present.
3079 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3081 struct exprom_header {
3082 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3083 unsigned char hdr_ver[4]; /* Expansion ROM version */
3085 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3089 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3090 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3095 hdr = (struct exprom_header *)exprom_header_buf;
3096 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3099 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3100 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3101 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3102 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3107 * t4_get_vpd_version - return the VPD version
3108 * @adapter: the adapter
3109 * @vers: where to place the version
3111 * Reads the VPD via the Firmware interface (thus this can only be called
3112 * once we're ready to issue Firmware commands). The format of the
3113 * VPD version is adapter specific. Returns 0 on success, an error on
3116 * Note that early versions of the Firmware didn't include the ability
3117 * to retrieve the VPD version, so we zero-out the return-value parameter
3118 * in that case to avoid leaving it with garbage in it.
3120 * Also note that the Firmware will return its cached copy of the VPD
3121 * Revision ID, not the actual Revision ID as written in the Serial
3122 * EEPROM. This is only an issue if a new VPD has been written and the
3123 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3124 * to defer calling this routine till after a FW_RESET_CMD has been issued
3125 * if the Host Driver will be performing a full adapter initialization.
3127 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3132 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3133 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3134 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3135 1, &vpdrev_param, vers);
3142 * t4_get_scfg_version - return the Serial Configuration version
3143 * @adapter: the adapter
3144 * @vers: where to place the version
3146 * Reads the Serial Configuration Version via the Firmware interface
3147 * (thus this can only be called once we're ready to issue Firmware
3148 * commands). The format of the Serial Configuration version is
3149 * adapter specific. Returns 0 on success, an error on failure.
3151 * Note that early versions of the Firmware didn't include the ability
3152 * to retrieve the Serial Configuration version, so we zero-out the
3153 * return-value parameter in that case to avoid leaving it with
3156 * Also note that the Firmware will return its cached copy of the Serial
3157 * Initialization Revision ID, not the actual Revision ID as written in
3158 * the Serial EEPROM. This is only an issue if a new VPD has been written
3159 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3160 * it's best to defer calling this routine till after a FW_RESET_CMD has
3161 * been issued if the Host Driver will be performing a full adapter
3164 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3169 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3170 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3171 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3172 1, &scfgrev_param, vers);
3179 * t4_get_version_info - extract various chip/firmware version information
3180 * @adapter: the adapter
3182 * Reads various chip/firmware version numbers and stores them into the
3183 * adapter Adapter Parameters structure. If any of the efforts fails
3184 * the first failure will be returned, but all of the version numbers
3187 int t4_get_version_info(struct adapter *adapter)
3191 #define FIRST_RET(__getvinfo) \
3193 int __ret = __getvinfo; \
3194 if (__ret && !ret) \
3198 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3199 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3200 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3201 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3202 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3203 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3210 * t4_dump_version_info - dump all of the adapter configuration IDs
3211 * @adapter: the adapter
3213 * Dumps all of the various bits of adapter configuration version/revision
3214 * IDs information. This is typically called at some point after
3215 * t4_get_version_info() has been called.
3217 void t4_dump_version_info(struct adapter *adapter)
3219 /* Device information */
3220 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3221 adapter->params.vpd.id,
3222 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3223 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3224 adapter->params.vpd.sn, adapter->params.vpd.pn);
3226 /* Firmware Version */
3227 if (!adapter->params.fw_vers)
3228 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3230 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3231 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3232 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3233 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3234 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3236 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3237 * Firmware, so dev_info() is more appropriate here.)
3239 if (!adapter->params.bs_vers)
3240 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3242 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3243 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3244 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3245 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3246 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3248 /* TP Microcode Version */
3249 if (!adapter->params.tp_vers)
3250 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3252 dev_info(adapter->pdev_dev,
3253 "TP Microcode version: %u.%u.%u.%u\n",
3254 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3255 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3256 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3257 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3259 /* Expansion ROM version */
3260 if (!adapter->params.er_vers)
3261 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3263 dev_info(adapter->pdev_dev,
3264 "Expansion ROM version: %u.%u.%u.%u\n",
3265 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3266 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3267 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3268 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3270 /* Serial Configuration version */
3271 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3272 adapter->params.scfg_vers);
3275 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3276 adapter->params.vpd_vers);
3280 * t4_check_fw_version - check if the FW is supported with this driver
3281 * @adap: the adapter
3283 * Checks if an adapter's FW is compatible with the driver. Returns 0
3284 * if there's exact match, a negative error if the version could not be
3285 * read or there's a major version mismatch
3287 int t4_check_fw_version(struct adapter *adap)
3289 int i, ret, major, minor, micro;
3290 int exp_major, exp_minor, exp_micro;
3291 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3293 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3294 /* Try multiple times before returning error */
3295 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3296 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3301 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3302 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3303 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3305 switch (chip_version) {
3307 exp_major = T4FW_MIN_VERSION_MAJOR;
3308 exp_minor = T4FW_MIN_VERSION_MINOR;
3309 exp_micro = T4FW_MIN_VERSION_MICRO;
3312 exp_major = T5FW_MIN_VERSION_MAJOR;
3313 exp_minor = T5FW_MIN_VERSION_MINOR;
3314 exp_micro = T5FW_MIN_VERSION_MICRO;
3317 exp_major = T6FW_MIN_VERSION_MAJOR;
3318 exp_minor = T6FW_MIN_VERSION_MINOR;
3319 exp_micro = T6FW_MIN_VERSION_MICRO;
3322 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3327 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3328 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3329 dev_err(adap->pdev_dev,
3330 "Card has firmware version %u.%u.%u, minimum "
3331 "supported firmware is %u.%u.%u.\n", major, minor,
3332 micro, exp_major, exp_minor, exp_micro);
3338 /* Is the given firmware API compatible with the one the driver was compiled
3341 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3344 /* short circuit if it's the exact same firmware version */
3345 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3348 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3349 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3350 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3357 /* The firmware in the filesystem is usable, but should it be installed?
3358 * This routine explains itself in detail if it indicates the filesystem
3359 * firmware should be installed.
3361 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3366 if (!card_fw_usable) {
3367 reason = "incompatible or unusable";
3372 reason = "older than the version supported with this driver";
3379 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3380 "installing firmware %u.%u.%u.%u on card.\n",
3381 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3382 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3383 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3384 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3389 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3390 const u8 *fw_data, unsigned int fw_size,
3391 struct fw_hdr *card_fw, enum dev_state state,
3394 int ret, card_fw_usable, fs_fw_usable;
3395 const struct fw_hdr *fs_fw;
3396 const struct fw_hdr *drv_fw;
3398 drv_fw = &fw_info->fw_hdr;
3400 /* Read the header of the firmware on the card */
3401 ret = -t4_read_flash(adap, FLASH_FW_START,
3402 sizeof(*card_fw) / sizeof(uint32_t),
3403 (uint32_t *)card_fw, 1);
3405 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3407 dev_err(adap->pdev_dev,
3408 "Unable to read card's firmware header: %d\n", ret);
3412 if (fw_data != NULL) {
3413 fs_fw = (const void *)fw_data;
3414 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3420 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3421 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3422 /* Common case: the firmware on the card is an exact match and
3423 * the filesystem one is an exact match too, or the filesystem
3424 * one is absent/incompatible.
3426 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3427 should_install_fs_fw(adap, card_fw_usable,
3428 be32_to_cpu(fs_fw->fw_ver),
3429 be32_to_cpu(card_fw->fw_ver))) {
3430 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
3433 dev_err(adap->pdev_dev,
3434 "failed to install firmware: %d\n", ret);
3438 /* Installed successfully, update the cached header too. */
3441 *reset = 0; /* already reset as part of load_fw */
3444 if (!card_fw_usable) {
3447 d = be32_to_cpu(drv_fw->fw_ver);
3448 c = be32_to_cpu(card_fw->fw_ver);
3449 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3451 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3453 "driver compiled with %d.%d.%d.%d, "
3454 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3456 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3457 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3458 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3459 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3460 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3461 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3466 /* We're using whatever's on the card and it's known to be good. */
3467 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3468 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3475 * t4_flash_erase_sectors - erase a range of flash sectors
3476 * @adapter: the adapter
3477 * @start: the first sector to erase
3478 * @end: the last sector to erase
3480 * Erases the sectors in the given inclusive range.
3482 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3486 if (end >= adapter->params.sf_nsec)
3489 while (start <= end) {
3490 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3491 (ret = sf1_write(adapter, 4, 0, 1,
3492 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3493 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3494 dev_err(adapter->pdev_dev,
3495 "erase of flash sector %d failed, error %d\n",
3501 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3506 * t4_flash_cfg_addr - return the address of the flash configuration file
3507 * @adapter: the adapter
3509 * Return the address within the flash where the Firmware Configuration
3512 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3514 if (adapter->params.sf_size == 0x100000)
3515 return FLASH_FPGA_CFG_START;
3517 return FLASH_CFG_START;
3520 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3521 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3522 * and emit an error message for mismatched firmware to save our caller the
3525 static bool t4_fw_matches_chip(const struct adapter *adap,
3526 const struct fw_hdr *hdr)
3528 /* The expression below will return FALSE for any unsupported adapter
3529 * which will keep us "honest" in the future ...
3531 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3532 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3533 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3536 dev_err(adap->pdev_dev,
3537 "FW image (%d) is not suitable for this adapter (%d)\n",
3538 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3543 * t4_load_fw - download firmware
3544 * @adap: the adapter
3545 * @fw_data: the firmware image to write
3548 * Write the supplied firmware image to the card's serial flash.
3550 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3555 u8 first_page[SF_PAGE_SIZE];
3556 const __be32 *p = (const __be32 *)fw_data;
3557 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3558 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3559 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3560 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3561 unsigned int fw_start = FLASH_FW_START;
3564 dev_err(adap->pdev_dev, "FW image has no data\n");
3568 dev_err(adap->pdev_dev,
3569 "FW image size not multiple of 512 bytes\n");
3572 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3573 dev_err(adap->pdev_dev,
3574 "FW image size differs from size in FW header\n");
3577 if (size > fw_size) {
3578 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3582 if (!t4_fw_matches_chip(adap, hdr))
3585 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3586 csum += be32_to_cpu(p[i]);
3588 if (csum != 0xffffffff) {
3589 dev_err(adap->pdev_dev,
3590 "corrupted firmware image, checksum %#x\n", csum);
3594 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3595 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3600 * We write the correct version at the end so the driver can see a bad
3601 * version if the FW write fails. Start by writing a copy of the
3602 * first page with a bad version.
3604 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3605 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3606 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3611 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3612 addr += SF_PAGE_SIZE;
3613 fw_data += SF_PAGE_SIZE;
3614 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3619 ret = t4_write_flash(adap,
3620 fw_start + offsetof(struct fw_hdr, fw_ver),
3621 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3624 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3627 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3632 * t4_phy_fw_ver - return current PHY firmware version
3633 * @adap: the adapter
3634 * @phy_fw_ver: return value buffer for PHY firmware version
3636 * Returns the current version of external PHY firmware on the
3639 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3644 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3645 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3646 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3647 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3648 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3657 * t4_load_phy_fw - download port PHY firmware
3658 * @adap: the adapter
3659 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3660 * @win_lock: the lock to use to guard the memory copy
3661 * @phy_fw_version: function to check PHY firmware versions
3662 * @phy_fw_data: the PHY firmware image to write
3663 * @phy_fw_size: image size
3665 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3666 * @phy_fw_version is supplied, then it will be used to determine if
3667 * it's necessary to perform the transfer by comparing the version
3668 * of any existing adapter PHY firmware with that of the passed in
3669 * PHY firmware image. If @win_lock is non-NULL then it will be used
3670 * around the call to t4_memory_rw() which transfers the PHY firmware
3673 * A negative error number will be returned if an error occurs. If
3674 * version number support is available and there's no need to upgrade
3675 * the firmware, 0 will be returned. If firmware is successfully
3676 * transferred to the adapter, 1 will be retured.
3678 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3679 * a result, a RESET of the adapter would cause that RAM to lose its
3680 * contents. Thus, loading PHY firmware on such adapters must happen
3681 * after any FW_RESET_CMDs ...
3683 int t4_load_phy_fw(struct adapter *adap,
3684 int win, spinlock_t *win_lock,
3685 int (*phy_fw_version)(const u8 *, size_t),
3686 const u8 *phy_fw_data, size_t phy_fw_size)
3688 unsigned long mtype = 0, maddr = 0;
3690 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3693 /* If we have version number support, then check to see if the adapter
3694 * already has up-to-date PHY firmware loaded.
3696 if (phy_fw_version) {
3697 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3698 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3702 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3703 CH_WARN(adap, "PHY Firmware already up-to-date, "
3704 "version %#x\n", cur_phy_fw_ver);
3709 /* Ask the firmware where it wants us to copy the PHY firmware image.
3710 * The size of the file requires a special version of the READ coommand
3711 * which will pass the file size via the values field in PARAMS_CMD and
3712 * retrieve the return value from firmware and place it in the same
3715 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3716 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3717 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3718 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3720 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3721 ¶m, &val, 1, true);
3725 maddr = (val & 0xff) << 16;
3727 /* Copy the supplied PHY Firmware image to the adapter memory location
3728 * allocated by the adapter firmware.
3731 spin_lock_bh(win_lock);
3732 ret = t4_memory_rw(adap, win, mtype, maddr,
3733 phy_fw_size, (__be32 *)phy_fw_data,
3736 spin_unlock_bh(win_lock);
3740 /* Tell the firmware that the PHY firmware image has been written to
3741 * RAM and it can now start copying it over to the PHYs. The chip
3742 * firmware will RESET the affected PHYs as part of this operation
3743 * leaving them running the new PHY firmware image.
3745 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3746 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3747 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3748 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3749 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3750 ¶m, &val, 30000);
3752 /* If we have version number support, then check to see that the new
3753 * firmware got loaded properly.
3755 if (phy_fw_version) {
3756 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3760 if (cur_phy_fw_ver != new_phy_fw_vers) {
3761 CH_WARN(adap, "PHY Firmware did not update: "
3762 "version on adapter %#x, "
3763 "version flashed %#x\n",
3764 cur_phy_fw_ver, new_phy_fw_vers);
3773 * t4_fwcache - firmware cache operation
3774 * @adap: the adapter
3775 * @op : the operation (flush or flush and invalidate)
3777 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3779 struct fw_params_cmd c;
3781 memset(&c, 0, sizeof(c));
3783 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3784 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3785 FW_PARAMS_CMD_PFN_V(adap->pf) |
3786 FW_PARAMS_CMD_VFN_V(0));
3787 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3789 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3790 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3791 c.param[0].val = (__force __be32)op;
3793 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3796 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3797 unsigned int *pif_req_wrptr,
3798 unsigned int *pif_rsp_wrptr)
3801 u32 cfg, val, req, rsp;
3803 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3804 if (cfg & LADBGEN_F)
3805 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3807 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3808 req = POLADBGWRPTR_G(val);
3809 rsp = PILADBGWRPTR_G(val);
3811 *pif_req_wrptr = req;
3813 *pif_rsp_wrptr = rsp;
3815 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3816 for (j = 0; j < 6; j++) {
3817 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3818 PILADBGRDPTR_V(rsp));
3819 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3820 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3824 req = (req + 2) & POLADBGRDPTR_M;
3825 rsp = (rsp + 2) & PILADBGRDPTR_M;
3827 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3830 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3835 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3836 if (cfg & LADBGEN_F)
3837 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3839 for (i = 0; i < CIM_MALA_SIZE; i++) {
3840 for (j = 0; j < 5; j++) {
3842 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3843 PILADBGRDPTR_V(idx));
3844 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3845 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3848 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3851 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3855 for (i = 0; i < 8; i++) {
3856 u32 *p = la_buf + i;
3858 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3859 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3860 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3861 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3862 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3866 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3870 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3871 * @caps16: a 16-bit Port Capabilities value
3873 * Returns the equivalent 32-bit Port Capabilities value.
3875 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3877 fw_port_cap32_t caps32 = 0;
3879 #define CAP16_TO_CAP32(__cap) \
3881 if (caps16 & FW_PORT_CAP_##__cap) \
3882 caps32 |= FW_PORT_CAP32_##__cap; \
3885 CAP16_TO_CAP32(SPEED_100M);
3886 CAP16_TO_CAP32(SPEED_1G);
3887 CAP16_TO_CAP32(SPEED_25G);
3888 CAP16_TO_CAP32(SPEED_10G);
3889 CAP16_TO_CAP32(SPEED_40G);
3890 CAP16_TO_CAP32(SPEED_100G);
3891 CAP16_TO_CAP32(FC_RX);
3892 CAP16_TO_CAP32(FC_TX);
3893 CAP16_TO_CAP32(ANEG);
3894 CAP16_TO_CAP32(MDIX);
3895 CAP16_TO_CAP32(MDIAUTO);
3896 CAP16_TO_CAP32(FEC_RS);
3897 CAP16_TO_CAP32(FEC_BASER_RS);
3898 CAP16_TO_CAP32(802_3_PAUSE);
3899 CAP16_TO_CAP32(802_3_ASM_DIR);
3901 #undef CAP16_TO_CAP32
3907 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
3908 * @caps32: a 32-bit Port Capabilities value
3910 * Returns the equivalent 16-bit Port Capabilities value. Note that
3911 * not all 32-bit Port Capabilities can be represented in the 16-bit
3912 * Port Capabilities and some fields/values may not make it.
3914 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
3916 fw_port_cap16_t caps16 = 0;
3918 #define CAP32_TO_CAP16(__cap) \
3920 if (caps32 & FW_PORT_CAP32_##__cap) \
3921 caps16 |= FW_PORT_CAP_##__cap; \
3924 CAP32_TO_CAP16(SPEED_100M);
3925 CAP32_TO_CAP16(SPEED_1G);
3926 CAP32_TO_CAP16(SPEED_10G);
3927 CAP32_TO_CAP16(SPEED_25G);
3928 CAP32_TO_CAP16(SPEED_40G);
3929 CAP32_TO_CAP16(SPEED_100G);
3930 CAP32_TO_CAP16(FC_RX);
3931 CAP32_TO_CAP16(FC_TX);
3932 CAP32_TO_CAP16(802_3_PAUSE);
3933 CAP32_TO_CAP16(802_3_ASM_DIR);
3934 CAP32_TO_CAP16(ANEG);
3935 CAP32_TO_CAP16(MDIX);
3936 CAP32_TO_CAP16(MDIAUTO);
3937 CAP32_TO_CAP16(FEC_RS);
3938 CAP32_TO_CAP16(FEC_BASER_RS);
3940 #undef CAP32_TO_CAP16
3945 /* Translate Firmware Port Capabilities Pause specification to Common Code */
3946 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
3948 enum cc_pause cc_pause = 0;
3950 if (fw_pause & FW_PORT_CAP32_FC_RX)
3951 cc_pause |= PAUSE_RX;
3952 if (fw_pause & FW_PORT_CAP32_FC_TX)
3953 cc_pause |= PAUSE_TX;
3958 /* Translate Common Code Pause specification into Firmware Port Capabilities */
3959 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
3961 fw_port_cap32_t fw_pause = 0;
3963 if (cc_pause & PAUSE_RX)
3964 fw_pause |= FW_PORT_CAP32_FC_RX;
3965 if (cc_pause & PAUSE_TX)
3966 fw_pause |= FW_PORT_CAP32_FC_TX;
3971 /* Translate Firmware Forward Error Correction specification to Common Code */
3972 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
3974 enum cc_fec cc_fec = 0;
3976 if (fw_fec & FW_PORT_CAP32_FEC_RS)
3978 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
3979 cc_fec |= FEC_BASER_RS;
3984 /* Translate Common Code Forward Error Correction specification to Firmware */
3985 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
3987 fw_port_cap32_t fw_fec = 0;
3989 if (cc_fec & FEC_RS)
3990 fw_fec |= FW_PORT_CAP32_FEC_RS;
3991 if (cc_fec & FEC_BASER_RS)
3992 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
3998 * t4_link_l1cfg - apply link configuration to MAC/PHY
3999 * @adapter: the adapter
4000 * @mbox: the Firmware Mailbox to use
4001 * @port: the Port ID
4002 * @lc: the Port's Link Configuration
4004 * Set up a port's MAC and PHY according to a desired link configuration.
4005 * - If the PHY can auto-negotiate first decide what to advertise, then
4006 * enable/disable auto-negotiation as desired, and reset.
4007 * - If the PHY does not auto-negotiate just reset it.
4008 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4009 * otherwise do it later based on the outcome of auto-negotiation.
4011 int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
4012 unsigned int port, struct link_config *lc)
4014 unsigned int fw_caps = adapter->params.fw_caps_support;
4015 struct fw_port_cmd cmd;
4016 unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO);
4017 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
4021 /* Convert driver coding of Pause Frame Flow Control settings into the
4024 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4026 /* Convert Common Code Forward Error Control settings into the
4027 * Firmware's API. If the current Requested FEC has "Automatic"
4028 * (IEEE 802.3) specified, then we use whatever the Firmware
4029 * sent us as part of it's IEEE 802.3-based interpratation of
4030 * the Transceiver Module EPROM FEC parameters. Otherwise we
4031 * use whatever is in the current Requested FEC settings.
4033 if (lc->requested_fec & FEC_AUTO)
4034 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4036 cc_fec = lc->requested_fec;
4037 fw_fec = cc_to_fwcap_fec(cc_fec);
4039 /* Figure out what our Requested Port Capabilities are going to be.
4041 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4042 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
4043 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4045 } else if (lc->autoneg == AUTONEG_DISABLE) {
4046 rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4047 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4050 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
4053 /* And send that on to the Firmware ...
4055 memset(&cmd, 0, sizeof(cmd));
4056 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4057 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4058 FW_PORT_CMD_PORTID_V(port));
4059 cmd.action_to_len16 =
4060 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4061 ? FW_PORT_ACTION_L1_CFG
4062 : FW_PORT_ACTION_L1_CFG32) |
4064 if (fw_caps == FW_CAPS16)
4065 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4067 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4068 return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4072 * t4_restart_aneg - restart autonegotiation
4073 * @adap: the adapter
4074 * @mbox: mbox to use for the FW command
4075 * @port: the port id
4077 * Restarts autonegotiation for the selected port.
4079 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4081 struct fw_port_cmd c;
4083 memset(&c, 0, sizeof(c));
4084 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4085 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4086 FW_PORT_CMD_PORTID_V(port));
4088 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
4090 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
4091 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4094 typedef void (*int_handler_t)(struct adapter *adap);
4097 unsigned int mask; /* bits to check in interrupt status */
4098 const char *msg; /* message to print or NULL */
4099 short stat_idx; /* stat counter to increment or -1 */
4100 unsigned short fatal; /* whether the condition reported is fatal */
4101 int_handler_t int_handler; /* platform-specific int handler */
4105 * t4_handle_intr_status - table driven interrupt handler
4106 * @adapter: the adapter that generated the interrupt
4107 * @reg: the interrupt status register to process
4108 * @acts: table of interrupt actions
4110 * A table driven interrupt handler that applies a set of masks to an
4111 * interrupt status word and performs the corresponding actions if the
4112 * interrupts described by the mask have occurred. The actions include
4113 * optionally emitting a warning or alert message. The table is terminated
4114 * by an entry specifying mask 0. Returns the number of fatal interrupt
4117 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4118 const struct intr_info *acts)
4121 unsigned int mask = 0;
4122 unsigned int status = t4_read_reg(adapter, reg);
4124 for ( ; acts->mask; ++acts) {
4125 if (!(status & acts->mask))
4129 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4130 status & acts->mask);
4131 } else if (acts->msg && printk_ratelimit())
4132 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4133 status & acts->mask);
4134 if (acts->int_handler)
4135 acts->int_handler(adapter);
4139 if (status) /* clear processed interrupts */
4140 t4_write_reg(adapter, reg, status);
4145 * Interrupt handler for the PCIE module.
4147 static void pcie_intr_handler(struct adapter *adapter)
4149 static const struct intr_info sysbus_intr_info[] = {
4150 { RNPP_F, "RXNP array parity error", -1, 1 },
4151 { RPCP_F, "RXPC array parity error", -1, 1 },
4152 { RCIP_F, "RXCIF array parity error", -1, 1 },
4153 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4154 { RFTP_F, "RXFT array parity error", -1, 1 },
4157 static const struct intr_info pcie_port_intr_info[] = {
4158 { TPCP_F, "TXPC array parity error", -1, 1 },
4159 { TNPP_F, "TXNP array parity error", -1, 1 },
4160 { TFTP_F, "TXFT array parity error", -1, 1 },
4161 { TCAP_F, "TXCA array parity error", -1, 1 },
4162 { TCIP_F, "TXCIF array parity error", -1, 1 },
4163 { RCAP_F, "RXCA array parity error", -1, 1 },
4164 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4165 { RDPE_F, "Rx data parity error", -1, 1 },
4166 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4169 static const struct intr_info pcie_intr_info[] = {
4170 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4171 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4172 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4173 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4174 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4175 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4176 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4177 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4178 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4179 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4180 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4181 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4182 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4183 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4184 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4185 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4186 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4187 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4188 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4189 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4190 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4191 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4192 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4193 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4194 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4195 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4196 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4197 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4198 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4199 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4204 static struct intr_info t5_pcie_intr_info[] = {
4205 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4207 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4208 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4209 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4210 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4211 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4212 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4213 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4215 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4217 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4218 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4219 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4220 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4221 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4223 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4224 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4225 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4226 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4227 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4228 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4229 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4230 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4231 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4232 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4233 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4235 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4237 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4238 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4239 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4240 { READRSPERR_F, "Outbound read error", -1, 0 },
4246 if (is_t4(adapter->params.chip))
4247 fat = t4_handle_intr_status(adapter,
4248 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4250 t4_handle_intr_status(adapter,
4251 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4252 pcie_port_intr_info) +
4253 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4256 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4260 t4_fatal_err(adapter);
4264 * TP interrupt handler.
4266 static void tp_intr_handler(struct adapter *adapter)
4268 static const struct intr_info tp_intr_info[] = {
4269 { 0x3fffffff, "TP parity error", -1, 1 },
4270 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4274 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4275 t4_fatal_err(adapter);
4279 * SGE interrupt handler.
4281 static void sge_intr_handler(struct adapter *adapter)
4286 static const struct intr_info sge_intr_info[] = {
4287 { ERR_CPL_EXCEED_IQE_SIZE_F,
4288 "SGE received CPL exceeding IQE size", -1, 1 },
4289 { ERR_INVALID_CIDX_INC_F,
4290 "SGE GTS CIDX increment too large", -1, 0 },
4291 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4292 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4293 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4294 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4295 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4297 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4299 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4301 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4303 { ERR_ING_CTXT_PRIO_F,
4304 "SGE too many priority ingress contexts", -1, 0 },
4305 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4306 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4310 static struct intr_info t4t5_sge_intr_info[] = {
4311 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4312 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4313 { ERR_EGR_CTXT_PRIO_F,
4314 "SGE too many priority egress contexts", -1, 0 },
4318 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
4319 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
4321 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
4322 (unsigned long long)v);
4323 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4324 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
4327 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4328 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4329 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4330 t4t5_sge_intr_info);
4332 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4333 if (err & ERROR_QID_VALID_F) {
4334 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4336 if (err & UNCAPTURED_ERROR_F)
4337 dev_err(adapter->pdev_dev,
4338 "SGE UNCAPTURED_ERROR set (clearing)\n");
4339 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4340 UNCAPTURED_ERROR_F);
4344 t4_fatal_err(adapter);
4347 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4348 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4349 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4350 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4353 * CIM interrupt handler.
4355 static void cim_intr_handler(struct adapter *adapter)
4357 static const struct intr_info cim_intr_info[] = {
4358 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4359 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4360 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4361 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4362 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4363 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4364 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4365 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4368 static const struct intr_info cim_upintr_info[] = {
4369 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4370 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4371 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4372 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4373 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4374 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4375 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4376 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4377 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4378 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4379 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4380 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4381 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4382 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4383 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4384 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4385 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4386 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4387 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4388 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4389 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4390 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4391 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4392 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4393 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4394 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4395 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4396 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4403 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4404 if (fw_err & PCIE_FW_ERR_F)
4405 t4_report_fw_error(adapter);
4407 /* When the Firmware detects an internal error which normally
4408 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4409 * in order to make sure the Host sees the Firmware Crash. So
4410 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4411 * ignore the Timer0 interrupt.
4414 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4415 if (val & TIMER0INT_F)
4416 if (!(fw_err & PCIE_FW_ERR_F) ||
4417 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4418 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4421 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4423 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4426 t4_fatal_err(adapter);
4430 * ULP RX interrupt handler.
4432 static void ulprx_intr_handler(struct adapter *adapter)
4434 static const struct intr_info ulprx_intr_info[] = {
4435 { 0x1800000, "ULPRX context error", -1, 1 },
4436 { 0x7fffff, "ULPRX parity error", -1, 1 },
4440 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4441 t4_fatal_err(adapter);
4445 * ULP TX interrupt handler.
4447 static void ulptx_intr_handler(struct adapter *adapter)
4449 static const struct intr_info ulptx_intr_info[] = {
4450 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4452 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4454 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4456 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4458 { 0xfffffff, "ULPTX parity error", -1, 1 },
4462 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4463 t4_fatal_err(adapter);
4467 * PM TX interrupt handler.
4469 static void pmtx_intr_handler(struct adapter *adapter)
4471 static const struct intr_info pmtx_intr_info[] = {
4472 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4473 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4474 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4475 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4476 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4477 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4478 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4480 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4481 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4485 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4486 t4_fatal_err(adapter);
4490 * PM RX interrupt handler.
4492 static void pmrx_intr_handler(struct adapter *adapter)
4494 static const struct intr_info pmrx_intr_info[] = {
4495 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4496 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4497 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4498 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4500 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4501 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4505 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4506 t4_fatal_err(adapter);
4510 * CPL switch interrupt handler.
4512 static void cplsw_intr_handler(struct adapter *adapter)
4514 static const struct intr_info cplsw_intr_info[] = {
4515 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4516 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4517 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4518 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4519 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4520 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4524 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4525 t4_fatal_err(adapter);
4529 * LE interrupt handler.
4531 static void le_intr_handler(struct adapter *adap)
4533 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4534 static const struct intr_info le_intr_info[] = {
4535 { LIPMISS_F, "LE LIP miss", -1, 0 },
4536 { LIP0_F, "LE 0 LIP error", -1, 0 },
4537 { PARITYERR_F, "LE parity error", -1, 1 },
4538 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4539 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4543 static struct intr_info t6_le_intr_info[] = {
4544 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4545 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4546 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4547 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4548 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4552 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4553 (chip <= CHELSIO_T5) ?
4554 le_intr_info : t6_le_intr_info))
4559 * MPS interrupt handler.
4561 static void mps_intr_handler(struct adapter *adapter)
4563 static const struct intr_info mps_rx_intr_info[] = {
4564 { 0xffffff, "MPS Rx parity error", -1, 1 },
4567 static const struct intr_info mps_tx_intr_info[] = {
4568 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4569 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4570 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4572 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4574 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4575 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4576 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4579 static const struct intr_info t6_mps_tx_intr_info[] = {
4580 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4581 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4582 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4584 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4586 /* MPS Tx Bubble is normal for T6 */
4587 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4588 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4591 static const struct intr_info mps_trc_intr_info[] = {
4592 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4593 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4595 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4598 static const struct intr_info mps_stat_sram_intr_info[] = {
4599 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4602 static const struct intr_info mps_stat_tx_intr_info[] = {
4603 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4606 static const struct intr_info mps_stat_rx_intr_info[] = {
4607 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4610 static const struct intr_info mps_cls_intr_info[] = {
4611 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4612 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4613 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4619 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4621 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4622 is_t6(adapter->params.chip)
4623 ? t6_mps_tx_intr_info
4624 : mps_tx_intr_info) +
4625 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4626 mps_trc_intr_info) +
4627 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4628 mps_stat_sram_intr_info) +
4629 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4630 mps_stat_tx_intr_info) +
4631 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4632 mps_stat_rx_intr_info) +
4633 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4636 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4637 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4639 t4_fatal_err(adapter);
4642 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4646 * EDC/MC interrupt handler.
4648 static void mem_intr_handler(struct adapter *adapter, int idx)
4650 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4652 unsigned int addr, cnt_addr, v;
4654 if (idx <= MEM_EDC1) {
4655 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4656 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4657 } else if (idx == MEM_MC) {
4658 if (is_t4(adapter->params.chip)) {
4659 addr = MC_INT_CAUSE_A;
4660 cnt_addr = MC_ECC_STATUS_A;
4662 addr = MC_P_INT_CAUSE_A;
4663 cnt_addr = MC_P_ECC_STATUS_A;
4666 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4667 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4670 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4671 if (v & PERR_INT_CAUSE_F)
4672 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4674 if (v & ECC_CE_INT_CAUSE_F) {
4675 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4677 t4_edc_err_read(adapter, idx);
4679 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4680 if (printk_ratelimit())
4681 dev_warn(adapter->pdev_dev,
4682 "%u %s correctable ECC data error%s\n",
4683 cnt, name[idx], cnt > 1 ? "s" : "");
4685 if (v & ECC_UE_INT_CAUSE_F)
4686 dev_alert(adapter->pdev_dev,
4687 "%s uncorrectable ECC data error\n", name[idx]);
4689 t4_write_reg(adapter, addr, v);
4690 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4691 t4_fatal_err(adapter);
4695 * MA interrupt handler.
4697 static void ma_intr_handler(struct adapter *adap)
4699 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4701 if (status & MEM_PERR_INT_CAUSE_F) {
4702 dev_alert(adap->pdev_dev,
4703 "MA parity error, parity status %#x\n",
4704 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4705 if (is_t5(adap->params.chip))
4706 dev_alert(adap->pdev_dev,
4707 "MA parity error, parity status %#x\n",
4709 MA_PARITY_ERROR_STATUS2_A));
4711 if (status & MEM_WRAP_INT_CAUSE_F) {
4712 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4713 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4714 "client %u to address %#x\n",
4715 MEM_WRAP_CLIENT_NUM_G(v),
4716 MEM_WRAP_ADDRESS_G(v) << 4);
4718 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4723 * SMB interrupt handler.
4725 static void smb_intr_handler(struct adapter *adap)
4727 static const struct intr_info smb_intr_info[] = {
4728 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4729 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4730 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4734 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4739 * NC-SI interrupt handler.
4741 static void ncsi_intr_handler(struct adapter *adap)
4743 static const struct intr_info ncsi_intr_info[] = {
4744 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4745 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4746 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4747 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4751 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4756 * XGMAC interrupt handler.
4758 static void xgmac_intr_handler(struct adapter *adap, int port)
4760 u32 v, int_cause_reg;
4762 if (is_t4(adap->params.chip))
4763 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4765 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4767 v = t4_read_reg(adap, int_cause_reg);
4769 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4773 if (v & TXFIFO_PRTY_ERR_F)
4774 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4776 if (v & RXFIFO_PRTY_ERR_F)
4777 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4779 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4784 * PL interrupt handler.
4786 static void pl_intr_handler(struct adapter *adap)
4788 static const struct intr_info pl_intr_info[] = {
4789 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4790 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4794 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4798 #define PF_INTR_MASK (PFSW_F)
4799 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4800 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4801 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4804 * t4_slow_intr_handler - control path interrupt handler
4805 * @adapter: the adapter
4807 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4808 * The designation 'slow' is because it involves register reads, while
4809 * data interrupts typically don't involve any MMIOs.
4811 int t4_slow_intr_handler(struct adapter *adapter)
4813 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
4815 if (!(cause & GLBL_INTR_MASK))
4818 cim_intr_handler(adapter);
4820 mps_intr_handler(adapter);
4822 ncsi_intr_handler(adapter);
4824 pl_intr_handler(adapter);
4826 smb_intr_handler(adapter);
4827 if (cause & XGMAC0_F)
4828 xgmac_intr_handler(adapter, 0);
4829 if (cause & XGMAC1_F)
4830 xgmac_intr_handler(adapter, 1);
4831 if (cause & XGMAC_KR0_F)
4832 xgmac_intr_handler(adapter, 2);
4833 if (cause & XGMAC_KR1_F)
4834 xgmac_intr_handler(adapter, 3);
4836 pcie_intr_handler(adapter);
4838 mem_intr_handler(adapter, MEM_MC);
4839 if (is_t5(adapter->params.chip) && (cause & MC1_F))
4840 mem_intr_handler(adapter, MEM_MC1);
4842 mem_intr_handler(adapter, MEM_EDC0);
4844 mem_intr_handler(adapter, MEM_EDC1);
4846 le_intr_handler(adapter);
4848 tp_intr_handler(adapter);
4850 ma_intr_handler(adapter);
4851 if (cause & PM_TX_F)
4852 pmtx_intr_handler(adapter);
4853 if (cause & PM_RX_F)
4854 pmrx_intr_handler(adapter);
4855 if (cause & ULP_RX_F)
4856 ulprx_intr_handler(adapter);
4857 if (cause & CPL_SWITCH_F)
4858 cplsw_intr_handler(adapter);
4860 sge_intr_handler(adapter);
4861 if (cause & ULP_TX_F)
4862 ulptx_intr_handler(adapter);
4864 /* Clear the interrupts just processed for which we are the master. */
4865 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4866 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
4871 * t4_intr_enable - enable interrupts
4872 * @adapter: the adapter whose interrupts should be enabled
4874 * Enable PF-specific interrupts for the calling function and the top-level
4875 * interrupt concentrator for global interrupts. Interrupts are already
4876 * enabled at each module, here we just enable the roots of the interrupt
4879 * Note: this function should be called only when the driver manages
4880 * non PF-specific interrupts from the various HW modules. Only one PCI
4881 * function at a time should be doing this.
4883 void t4_intr_enable(struct adapter *adapter)
4886 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4887 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4888 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4890 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4891 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
4892 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
4893 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
4894 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
4895 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
4896 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
4897 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
4898 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
4899 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
4900 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
4904 * t4_intr_disable - disable interrupts
4905 * @adapter: the adapter whose interrupts should be disabled
4907 * Disable interrupts. We only disable the top-level interrupt
4908 * concentrators. The caller must be a PCI function managing global
4911 void t4_intr_disable(struct adapter *adapter)
4915 if (pci_channel_offline(adapter->pdev))
4918 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
4919 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
4920 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
4922 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
4923 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
4927 * t4_config_rss_range - configure a portion of the RSS mapping table
4928 * @adapter: the adapter
4929 * @mbox: mbox to use for the FW command
4930 * @viid: virtual interface whose RSS subtable is to be written
4931 * @start: start entry in the table to write
4932 * @n: how many table entries to write
4933 * @rspq: values for the response queue lookup table
4934 * @nrspq: number of values in @rspq
4936 * Programs the selected part of the VI's RSS mapping table with the
4937 * provided values. If @nrspq < @n the supplied values are used repeatedly
4938 * until the full table range is populated.
4940 * The caller must ensure the values in @rspq are in the range allowed for
4943 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
4944 int start, int n, const u16 *rspq, unsigned int nrspq)
4947 const u16 *rsp = rspq;
4948 const u16 *rsp_end = rspq + nrspq;
4949 struct fw_rss_ind_tbl_cmd cmd;
4951 memset(&cmd, 0, sizeof(cmd));
4952 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
4953 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
4954 FW_RSS_IND_TBL_CMD_VIID_V(viid));
4955 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
4957 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
4959 int nq = min(n, 32);
4960 __be32 *qp = &cmd.iq0_to_iq2;
4962 cmd.niqid = cpu_to_be16(nq);
4963 cmd.startidx = cpu_to_be16(start);
4971 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
4972 if (++rsp >= rsp_end)
4974 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
4975 if (++rsp >= rsp_end)
4977 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
4978 if (++rsp >= rsp_end)
4981 *qp++ = cpu_to_be32(v);
4985 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
4993 * t4_config_glbl_rss - configure the global RSS mode
4994 * @adapter: the adapter
4995 * @mbox: mbox to use for the FW command
4996 * @mode: global RSS mode
4997 * @flags: mode-specific flags
4999 * Sets the global RSS mode.
5001 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5004 struct fw_rss_glb_config_cmd c;
5006 memset(&c, 0, sizeof(c));
5007 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5008 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5009 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5010 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5011 c.u.manual.mode_pkd =
5012 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5013 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5014 c.u.basicvirtual.mode_pkd =
5015 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5016 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5019 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5023 * t4_config_vi_rss - configure per VI RSS settings
5024 * @adapter: the adapter
5025 * @mbox: mbox to use for the FW command
5028 * @defq: id of the default RSS queue for the VI.
5030 * Configures VI-specific RSS properties.
5032 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5033 unsigned int flags, unsigned int defq)
5035 struct fw_rss_vi_config_cmd c;
5037 memset(&c, 0, sizeof(c));
5038 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5039 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5040 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5041 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5042 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5043 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5044 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5047 /* Read an RSS table row */
5048 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5050 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5051 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5056 * t4_read_rss - read the contents of the RSS mapping table
5057 * @adapter: the adapter
5058 * @map: holds the contents of the RSS mapping table
5060 * Reads the contents of the RSS hash->queue mapping table.
5062 int t4_read_rss(struct adapter *adapter, u16 *map)
5067 for (i = 0; i < RSS_NENTRIES / 2; ++i) {
5068 ret = rd_rss_row(adapter, i, &val);
5071 *map++ = LKPTBLQUEUE0_G(val);
5072 *map++ = LKPTBLQUEUE1_G(val);
5077 static unsigned int t4_use_ldst(struct adapter *adap)
5079 return (adap->flags & FW_OK) || !adap->use_bd;
5083 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5084 * @adap: the adapter
5085 * @cmd: TP fw ldst address space type
5086 * @vals: where the indirect register values are stored/written
5087 * @nregs: how many indirect registers to read/write
5088 * @start_idx: index of first indirect register to read/write
5089 * @rw: Read (1) or Write (0)
5090 * @sleep_ok: if true we may sleep while awaiting command completion
5092 * Access TP indirect registers through LDST
5094 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5095 unsigned int nregs, unsigned int start_index,
5096 unsigned int rw, bool sleep_ok)
5100 struct fw_ldst_cmd c;
5102 for (i = 0; i < nregs; i++) {
5103 memset(&c, 0, sizeof(c));
5104 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5106 (rw ? FW_CMD_READ_F :
5108 FW_LDST_CMD_ADDRSPACE_V(cmd));
5109 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5111 c.u.addrval.addr = cpu_to_be32(start_index + i);
5112 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5113 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5119 vals[i] = be32_to_cpu(c.u.addrval.val);
5125 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5126 * @adap: the adapter
5127 * @reg_addr: Address Register
5128 * @reg_data: Data register
5129 * @buff: where the indirect register values are stored/written
5130 * @nregs: how many indirect registers to read/write
5131 * @start_index: index of first indirect register to read/write
5132 * @rw: READ(1) or WRITE(0)
5133 * @sleep_ok: if true we may sleep while awaiting command completion
5135 * Read/Write TP indirect registers through LDST if possible.
5136 * Else, use backdoor access
5138 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5139 u32 *buff, u32 nregs, u32 start_index, int rw,
5147 cmd = FW_LDST_ADDRSPC_TP_PIO;
5149 case TP_TM_PIO_ADDR_A:
5150 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5152 case TP_MIB_INDEX_A:
5153 cmd = FW_LDST_ADDRSPC_TP_MIB;
5156 goto indirect_access;
5159 if (t4_use_ldst(adap))
5160 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5167 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5170 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5176 * t4_tp_pio_read - Read TP PIO registers
5177 * @adap: the adapter
5178 * @buff: where the indirect register values are written
5179 * @nregs: how many indirect registers to read
5180 * @start_index: index of first indirect register to read
5181 * @sleep_ok: if true we may sleep while awaiting command completion
5183 * Read TP PIO Registers
5185 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5186 u32 start_index, bool sleep_ok)
5188 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5189 start_index, 1, sleep_ok);
5193 * t4_tp_pio_write - Write TP PIO registers
5194 * @adap: the adapter
5195 * @buff: where the indirect register values are stored
5196 * @nregs: how many indirect registers to write
5197 * @start_index: index of first indirect register to write
5198 * @sleep_ok: if true we may sleep while awaiting command completion
5200 * Write TP PIO Registers
5202 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5203 u32 start_index, bool sleep_ok)
5205 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5206 start_index, 0, sleep_ok);
5210 * t4_tp_tm_pio_read - Read TP TM PIO registers
5211 * @adap: the adapter
5212 * @buff: where the indirect register values are written
5213 * @nregs: how many indirect registers to read
5214 * @start_index: index of first indirect register to read
5215 * @sleep_ok: if true we may sleep while awaiting command completion
5217 * Read TP TM PIO Registers
5219 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5220 u32 start_index, bool sleep_ok)
5222 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5223 nregs, start_index, 1, sleep_ok);
5227 * t4_tp_mib_read - Read TP MIB registers
5228 * @adap: the adapter
5229 * @buff: where the indirect register values are written
5230 * @nregs: how many indirect registers to read
5231 * @start_index: index of first indirect register to read
5232 * @sleep_ok: if true we may sleep while awaiting command completion
5234 * Read TP MIB Registers
5236 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5239 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5240 start_index, 1, sleep_ok);
5244 * t4_read_rss_key - read the global RSS key
5245 * @adap: the adapter
5246 * @key: 10-entry array holding the 320-bit RSS key
5247 * @sleep_ok: if true we may sleep while awaiting command completion
5249 * Reads the global 320-bit RSS key.
5251 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5253 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5257 * t4_write_rss_key - program one of the RSS keys
5258 * @adap: the adapter
5259 * @key: 10-entry array holding the 320-bit RSS key
5260 * @idx: which RSS key to write
5261 * @sleep_ok: if true we may sleep while awaiting command completion
5263 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5264 * 0..15 the corresponding entry in the RSS key table is written,
5265 * otherwise the global RSS key is written.
5267 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5270 u8 rss_key_addr_cnt = 16;
5271 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5273 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5274 * allows access to key addresses 16-63 by using KeyWrAddrX
5275 * as index[5:4](upper 2) into key table
5277 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5278 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5279 rss_key_addr_cnt = 32;
5281 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5283 if (idx >= 0 && idx < rss_key_addr_cnt) {
5284 if (rss_key_addr_cnt > 16)
5285 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5286 KEYWRADDRX_V(idx >> 4) |
5287 T6_VFWRADDR_V(idx) | KEYWREN_F);
5289 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5290 KEYWRADDR_V(idx) | KEYWREN_F);
5295 * t4_read_rss_pf_config - read PF RSS Configuration Table
5296 * @adapter: the adapter
5297 * @index: the entry in the PF RSS table to read
5298 * @valp: where to store the returned value
5299 * @sleep_ok: if true we may sleep while awaiting command completion
5301 * Reads the PF RSS Configuration Table at the specified index and returns
5302 * the value found there.
5304 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5305 u32 *valp, bool sleep_ok)
5307 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5311 * t4_read_rss_vf_config - read VF RSS Configuration Table
5312 * @adapter: the adapter
5313 * @index: the entry in the VF RSS table to read
5314 * @vfl: where to store the returned VFL
5315 * @vfh: where to store the returned VFH
5316 * @sleep_ok: if true we may sleep while awaiting command completion
5318 * Reads the VF RSS Configuration Table at the specified index and returns
5319 * the (VFL, VFH) values found there.
5321 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5322 u32 *vfl, u32 *vfh, bool sleep_ok)
5324 u32 vrt, mask, data;
5326 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5327 mask = VFWRADDR_V(VFWRADDR_M);
5328 data = VFWRADDR_V(index);
5330 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5331 data = T6_VFWRADDR_V(index);
5334 /* Request that the index'th VF Table values be read into VFL/VFH.
5336 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5337 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5338 vrt |= data | VFRDEN_F;
5339 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5341 /* Grab the VFL/VFH values ...
5343 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5344 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5348 * t4_read_rss_pf_map - read PF RSS Map
5349 * @adapter: the adapter
5350 * @sleep_ok: if true we may sleep while awaiting command completion
5352 * Reads the PF RSS Map register and returns its value.
5354 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5358 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5363 * t4_read_rss_pf_mask - read PF RSS Mask
5364 * @adapter: the adapter
5365 * @sleep_ok: if true we may sleep while awaiting command completion
5367 * Reads the PF RSS Mask register and returns its value.
5369 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5373 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5378 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5379 * @adap: the adapter
5380 * @v4: holds the TCP/IP counter values
5381 * @v6: holds the TCP/IPv6 counter values
5382 * @sleep_ok: if true we may sleep while awaiting command completion
5384 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5385 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5387 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5388 struct tp_tcp_stats *v6, bool sleep_ok)
5390 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5392 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5393 #define STAT(x) val[STAT_IDX(x)]
5394 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5397 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5398 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5399 v4->tcp_out_rsts = STAT(OUT_RST);
5400 v4->tcp_in_segs = STAT64(IN_SEG);
5401 v4->tcp_out_segs = STAT64(OUT_SEG);
5402 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5405 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5406 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5407 v6->tcp_out_rsts = STAT(OUT_RST);
5408 v6->tcp_in_segs = STAT64(IN_SEG);
5409 v6->tcp_out_segs = STAT64(OUT_SEG);
5410 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5418 * t4_tp_get_err_stats - read TP's error MIB counters
5419 * @adap: the adapter
5420 * @st: holds the counter values
5421 * @sleep_ok: if true we may sleep while awaiting command completion
5423 * Returns the values of TP's error counters.
5425 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5428 int nchan = adap->params.arch.nchan;
5430 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5432 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5434 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5436 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5437 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5438 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5439 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5440 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5442 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5443 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5444 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5445 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5446 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5451 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5452 * @adap: the adapter
5453 * @st: holds the counter values
5454 * @sleep_ok: if true we may sleep while awaiting command completion
5456 * Returns the values of TP's CPL counters.
5458 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5461 int nchan = adap->params.arch.nchan;
5463 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5465 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5469 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5470 * @adap: the adapter
5471 * @st: holds the counter values
5472 * @sleep_ok: if true we may sleep while awaiting command completion
5474 * Returns the values of TP's RDMA counters.
5476 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5479 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5484 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5485 * @adap: the adapter
5486 * @idx: the port index
5487 * @st: holds the counter values
5488 * @sleep_ok: if true we may sleep while awaiting command completion
5490 * Returns the values of TP's FCoE counters for the selected port.
5492 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5493 struct tp_fcoe_stats *st, bool sleep_ok)
5497 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5500 t4_tp_mib_read(adap, &st->frames_drop, 1,
5501 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5503 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5506 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5510 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5511 * @adap: the adapter
5512 * @st: holds the counter values
5513 * @sleep_ok: if true we may sleep while awaiting command completion
5515 * Returns the values of TP's counters for non-TCP directly-placed packets.
5517 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5522 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5523 st->frames = val[0];
5525 st->octets = ((u64)val[2] << 32) | val[3];
5529 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5530 * @adap: the adapter
5531 * @mtus: where to store the MTU values
5532 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5534 * Reads the HW path MTU table.
5536 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5541 for (i = 0; i < NMTUS; ++i) {
5542 t4_write_reg(adap, TP_MTU_TABLE_A,
5543 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5544 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5545 mtus[i] = MTUVALUE_G(v);
5547 mtu_log[i] = MTUWIDTH_G(v);
5552 * t4_read_cong_tbl - reads the congestion control table
5553 * @adap: the adapter
5554 * @incr: where to store the alpha values
5556 * Reads the additive increments programmed into the HW congestion
5559 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5561 unsigned int mtu, w;
5563 for (mtu = 0; mtu < NMTUS; ++mtu)
5564 for (w = 0; w < NCCTRL_WIN; ++w) {
5565 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5566 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5567 incr[mtu][w] = (u16)t4_read_reg(adap,
5568 TP_CCTRL_TABLE_A) & 0x1fff;
5573 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5574 * @adap: the adapter
5575 * @addr: the indirect TP register address
5576 * @mask: specifies the field within the register to modify
5577 * @val: new value for the field
5579 * Sets a field of an indirect TP register to the given value.
5581 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5582 unsigned int mask, unsigned int val)
5584 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5585 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5586 t4_write_reg(adap, TP_PIO_DATA_A, val);
5590 * init_cong_ctrl - initialize congestion control parameters
5591 * @a: the alpha values for congestion control
5592 * @b: the beta values for congestion control
5594 * Initialize the congestion control parameters.
5596 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5598 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5623 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5626 b[13] = b[14] = b[15] = b[16] = 3;
5627 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5628 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5633 /* The minimum additive increment value for the congestion control table */
5634 #define CC_MIN_INCR 2U
5637 * t4_load_mtus - write the MTU and congestion control HW tables
5638 * @adap: the adapter
5639 * @mtus: the values for the MTU table
5640 * @alpha: the values for the congestion control alpha parameter
5641 * @beta: the values for the congestion control beta parameter
5643 * Write the HW MTU table with the supplied MTUs and the high-speed
5644 * congestion control table with the supplied alpha, beta, and MTUs.
5645 * We write the two tables together because the additive increments
5646 * depend on the MTUs.
5648 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5649 const unsigned short *alpha, const unsigned short *beta)
5651 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5652 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5653 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5654 28672, 40960, 57344, 81920, 114688, 163840, 229376
5659 for (i = 0; i < NMTUS; ++i) {
5660 unsigned int mtu = mtus[i];
5661 unsigned int log2 = fls(mtu);
5663 if (!(mtu & ((1 << log2) >> 2))) /* round */
5665 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5666 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5668 for (w = 0; w < NCCTRL_WIN; ++w) {
5671 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5674 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5675 (w << 16) | (beta[w] << 13) | inc);
5680 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5681 * clocks. The formula is
5683 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5685 * which is equivalent to
5687 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5689 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5691 u64 v = bytes256 * adap->params.vpd.cclk;
5693 return v * 62 + v / 2;
5697 * t4_get_chan_txrate - get the current per channel Tx rates
5698 * @adap: the adapter
5699 * @nic_rate: rates for NIC traffic
5700 * @ofld_rate: rates for offloaded traffic
5702 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5705 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5709 v = t4_read_reg(adap, TP_TX_TRATE_A);
5710 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5711 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5712 if (adap->params.arch.nchan == NCHAN) {
5713 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5714 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5717 v = t4_read_reg(adap, TP_TX_ORATE_A);
5718 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5719 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5720 if (adap->params.arch.nchan == NCHAN) {
5721 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5722 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5727 * t4_set_trace_filter - configure one of the tracing filters
5728 * @adap: the adapter
5729 * @tp: the desired trace filter parameters
5730 * @idx: which filter to configure
5731 * @enable: whether to enable or disable the filter
5733 * Configures one of the tracing filters available in HW. If @enable is
5734 * %0 @tp is not examined and may be %NULL. The user is responsible to
5735 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5737 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5738 int idx, int enable)
5740 int i, ofst = idx * 4;
5741 u32 data_reg, mask_reg, cfg;
5742 u32 multitrc = TRCMULTIFILTER_F;
5745 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5749 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5750 if (cfg & TRCMULTIFILTER_F) {
5751 /* If multiple tracers are enabled, then maximum
5752 * capture size is 2.5KB (FIFO size of a single channel)
5753 * minus 2 flits for CPL_TRACE_PKT header.
5755 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5758 /* If multiple tracers are disabled, to avoid deadlocks
5759 * maximum packet capture size of 9600 bytes is recommended.
5760 * Also in this mode, only trace0 can be enabled and running.
5763 if (tp->snap_len > 9600 || idx)
5767 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5768 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5769 tp->min_len > TFMINPKTSIZE_M)
5772 /* stop the tracer we'll be changing */
5773 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5775 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5776 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5777 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5779 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5780 t4_write_reg(adap, data_reg, tp->data[i]);
5781 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5783 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5784 TFCAPTUREMAX_V(tp->snap_len) |
5785 TFMINPKTSIZE_V(tp->min_len));
5786 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5787 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5788 (is_t4(adap->params.chip) ?
5789 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5790 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5791 T5_TFINVERTMATCH_V(tp->invert)));
5797 * t4_get_trace_filter - query one of the tracing filters
5798 * @adap: the adapter
5799 * @tp: the current trace filter parameters
5800 * @idx: which trace filter to query
5801 * @enabled: non-zero if the filter is enabled
5803 * Returns the current settings of one of the HW tracing filters.
5805 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5809 int i, ofst = idx * 4;
5810 u32 data_reg, mask_reg;
5812 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5813 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5815 if (is_t4(adap->params.chip)) {
5816 *enabled = !!(ctla & TFEN_F);
5817 tp->port = TFPORT_G(ctla);
5818 tp->invert = !!(ctla & TFINVERTMATCH_F);
5820 *enabled = !!(ctla & T5_TFEN_F);
5821 tp->port = T5_TFPORT_G(ctla);
5822 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5824 tp->snap_len = TFCAPTUREMAX_G(ctlb);
5825 tp->min_len = TFMINPKTSIZE_G(ctlb);
5826 tp->skip_ofst = TFOFFSET_G(ctla);
5827 tp->skip_len = TFLENGTH_G(ctla);
5829 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5830 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5831 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5833 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5834 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5835 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5840 * t4_pmtx_get_stats - returns the HW stats from PMTX
5841 * @adap: the adapter
5842 * @cnt: where to store the count statistics
5843 * @cycles: where to store the cycle statistics
5845 * Returns performance statistics from PMTX.
5847 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5852 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5853 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5854 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5855 if (is_t4(adap->params.chip)) {
5856 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5858 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5859 PM_TX_DBG_DATA_A, data, 2,
5860 PM_TX_DBG_STAT_MSB_A);
5861 cycles[i] = (((u64)data[0] << 32) | data[1]);
5867 * t4_pmrx_get_stats - returns the HW stats from PMRX
5868 * @adap: the adapter
5869 * @cnt: where to store the count statistics
5870 * @cycles: where to store the cycle statistics
5872 * Returns performance statistics from PMRX.
5874 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5879 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5880 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
5881 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
5882 if (is_t4(adap->params.chip)) {
5883 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
5885 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
5886 PM_RX_DBG_DATA_A, data, 2,
5887 PM_RX_DBG_STAT_MSB_A);
5888 cycles[i] = (((u64)data[0] << 32) | data[1]);
5894 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
5895 * @adap: the adapter
5896 * @pidx: the port index
5898 * Computes and returns a bitmap indicating which MPS buffer groups are
5899 * associated with the given Port. Bit i is set if buffer group i is
5902 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
5905 unsigned int chip_version, nports;
5907 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
5908 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5910 switch (chip_version) {
5915 case 2: return 3 << (2 * pidx);
5916 case 4: return 1 << pidx;
5922 case 2: return 1 << (2 * pidx);
5927 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
5928 chip_version, nports);
5934 * t4_get_mps_bg_map - return the buffer groups associated with a port
5935 * @adapter: the adapter
5936 * @pidx: the port index
5938 * Returns a bitmap indicating which MPS buffer groups are associated
5939 * with the given Port. Bit i is set if buffer group i is used by the
5942 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
5945 unsigned int nports;
5947 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
5948 if (pidx >= nports) {
5949 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
5954 /* If we've already retrieved/computed this, just return the result.
5956 mps_bg_map = adapter->params.mps_bg_map;
5957 if (mps_bg_map[pidx])
5958 return mps_bg_map[pidx];
5960 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
5961 * If we're talking to such Firmware, let it tell us. If the new
5962 * API isn't supported, revert back to old hardcoded way. The value
5963 * obtained from Firmware is encoded in below format:
5965 * val = (( MPSBGMAP[Port 3] << 24 ) |
5966 * ( MPSBGMAP[Port 2] << 16 ) |
5967 * ( MPSBGMAP[Port 1] << 8 ) |
5968 * ( MPSBGMAP[Port 0] << 0 ))
5970 if (adapter->flags & FW_OK) {
5974 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5975 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
5976 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
5977 0, 1, ¶m, &val);
5981 /* Store the BG Map for all of the Ports in order to
5982 * avoid more calls to the Firmware in the future.
5984 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
5985 mps_bg_map[p] = val & 0xff;
5987 return mps_bg_map[pidx];
5991 /* Either we're not talking to the Firmware or we're dealing with
5992 * older Firmware which doesn't support the new API to get the MPS
5993 * Buffer Group Map. Fall back to computing it ourselves.
5995 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
5996 return mps_bg_map[pidx];
6000 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6001 * @adapter: the adapter
6002 * @pidx: the port index
6004 * Returns a bitmap indicating which TP Ingress Channels are associated
6005 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6008 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6010 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6011 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6013 if (pidx >= nports) {
6014 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6019 switch (chip_version) {
6022 /* Note that this happens to be the same values as the MPS
6023 * Buffer Group Map for these Chips. But we replicate the code
6024 * here because they're really separate concepts.
6028 case 2: return 3 << (2 * pidx);
6029 case 4: return 1 << pidx;
6035 case 2: return 1 << pidx;
6040 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6041 chip_version, nports);
6046 * t4_get_port_type_description - return Port Type string description
6047 * @port_type: firmware Port Type enumeration
6049 const char *t4_get_port_type_description(enum fw_port_type port_type)
6051 static const char *const port_type_description[] = {
6076 if (port_type < ARRAY_SIZE(port_type_description))
6077 return port_type_description[port_type];
6082 * t4_get_port_stats_offset - collect port stats relative to a previous
6084 * @adap: The adapter
6086 * @stats: Current stats to fill
6087 * @offset: Previous stats snapshot
6089 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6090 struct port_stats *stats,
6091 struct port_stats *offset)
6096 t4_get_port_stats(adap, idx, stats);
6097 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6098 i < (sizeof(struct port_stats) / sizeof(u64));
6104 * t4_get_port_stats - collect port statistics
6105 * @adap: the adapter
6106 * @idx: the port index
6107 * @p: the stats structure to fill
6109 * Collect statistics related to the given port from HW.
6111 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6113 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6114 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6116 #define GET_STAT(name) \
6117 t4_read_reg64(adap, \
6118 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6119 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6120 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6122 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6123 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6124 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6125 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6126 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6127 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6128 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6129 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6130 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6131 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6132 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6133 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6134 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6135 p->tx_drop = GET_STAT(TX_PORT_DROP);
6136 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6137 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6138 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6139 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6140 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6141 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6142 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6143 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6144 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6146 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6147 if (stat_ctl & COUNTPAUSESTATTX_F)
6148 p->tx_frames_64 -= p->tx_pause;
6149 if (stat_ctl & COUNTPAUSEMCTX_F)
6150 p->tx_mcast_frames -= p->tx_pause;
6152 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6153 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6154 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6155 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6156 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6157 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6158 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6159 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6160 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6161 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6162 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6163 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6164 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6165 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6166 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6167 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6168 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6169 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6170 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6171 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6172 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6173 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6174 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6175 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6176 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6177 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6178 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6180 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6181 if (stat_ctl & COUNTPAUSESTATRX_F)
6182 p->rx_frames_64 -= p->rx_pause;
6183 if (stat_ctl & COUNTPAUSEMCRX_F)
6184 p->rx_mcast_frames -= p->rx_pause;
6187 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6188 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6189 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6190 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6191 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6192 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6193 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6194 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6201 * t4_get_lb_stats - collect loopback port statistics
6202 * @adap: the adapter
6203 * @idx: the loopback port index
6204 * @p: the stats structure to fill
6206 * Return HW statistics for the given loopback port.
6208 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6210 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6212 #define GET_STAT(name) \
6213 t4_read_reg64(adap, \
6214 (is_t4(adap->params.chip) ? \
6215 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6216 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6217 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6219 p->octets = GET_STAT(BYTES);
6220 p->frames = GET_STAT(FRAMES);
6221 p->bcast_frames = GET_STAT(BCAST);
6222 p->mcast_frames = GET_STAT(MCAST);
6223 p->ucast_frames = GET_STAT(UCAST);
6224 p->error_frames = GET_STAT(ERROR);
6226 p->frames_64 = GET_STAT(64B);
6227 p->frames_65_127 = GET_STAT(65B_127B);
6228 p->frames_128_255 = GET_STAT(128B_255B);
6229 p->frames_256_511 = GET_STAT(256B_511B);
6230 p->frames_512_1023 = GET_STAT(512B_1023B);
6231 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6232 p->frames_1519_max = GET_STAT(1519B_MAX);
6233 p->drop = GET_STAT(DROP_FRAMES);
6235 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6236 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6237 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6238 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6239 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6240 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6241 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6242 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6248 /* t4_mk_filtdelwr - create a delete filter WR
6249 * @ftid: the filter ID
6250 * @wr: the filter work request to populate
6251 * @qid: ingress queue to receive the delete notification
6253 * Creates a filter work request to delete the supplied filter. If @qid is
6254 * negative the delete notification is suppressed.
6256 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6258 memset(wr, 0, sizeof(*wr));
6259 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6260 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6261 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6262 FW_FILTER_WR_NOREPLY_V(qid < 0));
6263 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6265 wr->rx_chan_rx_rpl_iq =
6266 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6269 #define INIT_CMD(var, cmd, rd_wr) do { \
6270 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6271 FW_CMD_REQUEST_F | \
6272 FW_CMD_##rd_wr##_F); \
6273 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6276 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6280 struct fw_ldst_cmd c;
6282 memset(&c, 0, sizeof(c));
6283 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6284 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6288 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6289 c.u.addrval.addr = cpu_to_be32(addr);
6290 c.u.addrval.val = cpu_to_be32(val);
6292 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6296 * t4_mdio_rd - read a PHY register through MDIO
6297 * @adap: the adapter
6298 * @mbox: mailbox to use for the FW command
6299 * @phy_addr: the PHY address
6300 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6301 * @reg: the register to read
6302 * @valp: where to store the value
6304 * Issues a FW command through the given mailbox to read a PHY register.
6306 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6307 unsigned int mmd, unsigned int reg, u16 *valp)
6311 struct fw_ldst_cmd c;
6313 memset(&c, 0, sizeof(c));
6314 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6315 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6316 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6318 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6319 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6320 FW_LDST_CMD_MMD_V(mmd));
6321 c.u.mdio.raddr = cpu_to_be16(reg);
6323 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6325 *valp = be16_to_cpu(c.u.mdio.rval);
6330 * t4_mdio_wr - write a PHY register through MDIO
6331 * @adap: the adapter
6332 * @mbox: mailbox to use for the FW command
6333 * @phy_addr: the PHY address
6334 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6335 * @reg: the register to write
6336 * @valp: value to write
6338 * Issues a FW command through the given mailbox to write a PHY register.
6340 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6341 unsigned int mmd, unsigned int reg, u16 val)
6344 struct fw_ldst_cmd c;
6346 memset(&c, 0, sizeof(c));
6347 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6348 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6349 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6351 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6352 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6353 FW_LDST_CMD_MMD_V(mmd));
6354 c.u.mdio.raddr = cpu_to_be16(reg);
6355 c.u.mdio.rval = cpu_to_be16(val);
6357 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6361 * t4_sge_decode_idma_state - decode the idma state
6362 * @adap: the adapter
6363 * @state: the state idma is stuck in
6365 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6367 static const char * const t4_decode[] = {
6369 "IDMA_PUSH_MORE_CPL_FIFO",
6370 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6372 "IDMA_PHYSADDR_SEND_PCIEHDR",
6373 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6374 "IDMA_PHYSADDR_SEND_PAYLOAD",
6375 "IDMA_SEND_FIFO_TO_IMSG",
6376 "IDMA_FL_REQ_DATA_FL_PREP",
6377 "IDMA_FL_REQ_DATA_FL",
6379 "IDMA_FL_H_REQ_HEADER_FL",
6380 "IDMA_FL_H_SEND_PCIEHDR",
6381 "IDMA_FL_H_PUSH_CPL_FIFO",
6382 "IDMA_FL_H_SEND_CPL",
6383 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6384 "IDMA_FL_H_SEND_IP_HDR",
6385 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6386 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6387 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6388 "IDMA_FL_D_SEND_PCIEHDR",
6389 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6390 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6391 "IDMA_FL_SEND_PCIEHDR",
6392 "IDMA_FL_PUSH_CPL_FIFO",
6394 "IDMA_FL_SEND_PAYLOAD_FIRST",
6395 "IDMA_FL_SEND_PAYLOAD",
6396 "IDMA_FL_REQ_NEXT_DATA_FL",
6397 "IDMA_FL_SEND_NEXT_PCIEHDR",
6398 "IDMA_FL_SEND_PADDING",
6399 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6400 "IDMA_FL_SEND_FIFO_TO_IMSG",
6401 "IDMA_FL_REQ_DATAFL_DONE",
6402 "IDMA_FL_REQ_HEADERFL_DONE",
6404 static const char * const t5_decode[] = {
6407 "IDMA_PUSH_MORE_CPL_FIFO",
6408 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6409 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6410 "IDMA_PHYSADDR_SEND_PCIEHDR",
6411 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6412 "IDMA_PHYSADDR_SEND_PAYLOAD",
6413 "IDMA_SEND_FIFO_TO_IMSG",
6414 "IDMA_FL_REQ_DATA_FL",
6416 "IDMA_FL_DROP_SEND_INC",
6417 "IDMA_FL_H_REQ_HEADER_FL",
6418 "IDMA_FL_H_SEND_PCIEHDR",
6419 "IDMA_FL_H_PUSH_CPL_FIFO",
6420 "IDMA_FL_H_SEND_CPL",
6421 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6422 "IDMA_FL_H_SEND_IP_HDR",
6423 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6424 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6425 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6426 "IDMA_FL_D_SEND_PCIEHDR",
6427 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6428 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6429 "IDMA_FL_SEND_PCIEHDR",
6430 "IDMA_FL_PUSH_CPL_FIFO",
6432 "IDMA_FL_SEND_PAYLOAD_FIRST",
6433 "IDMA_FL_SEND_PAYLOAD",
6434 "IDMA_FL_REQ_NEXT_DATA_FL",
6435 "IDMA_FL_SEND_NEXT_PCIEHDR",
6436 "IDMA_FL_SEND_PADDING",
6437 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6439 static const char * const t6_decode[] = {
6441 "IDMA_PUSH_MORE_CPL_FIFO",
6442 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6443 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6444 "IDMA_PHYSADDR_SEND_PCIEHDR",
6445 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6446 "IDMA_PHYSADDR_SEND_PAYLOAD",
6447 "IDMA_FL_REQ_DATA_FL",
6449 "IDMA_FL_DROP_SEND_INC",
6450 "IDMA_FL_H_REQ_HEADER_FL",
6451 "IDMA_FL_H_SEND_PCIEHDR",
6452 "IDMA_FL_H_PUSH_CPL_FIFO",
6453 "IDMA_FL_H_SEND_CPL",
6454 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6455 "IDMA_FL_H_SEND_IP_HDR",
6456 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6457 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6458 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6459 "IDMA_FL_D_SEND_PCIEHDR",
6460 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6461 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6462 "IDMA_FL_SEND_PCIEHDR",
6463 "IDMA_FL_PUSH_CPL_FIFO",
6465 "IDMA_FL_SEND_PAYLOAD_FIRST",
6466 "IDMA_FL_SEND_PAYLOAD",
6467 "IDMA_FL_REQ_NEXT_DATA_FL",
6468 "IDMA_FL_SEND_NEXT_PCIEHDR",
6469 "IDMA_FL_SEND_PADDING",
6470 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6472 static const u32 sge_regs[] = {
6473 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6474 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6475 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6477 const char **sge_idma_decode;
6478 int sge_idma_decode_nstates;
6480 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6482 /* Select the right set of decode strings to dump depending on the
6483 * adapter chip type.
6485 switch (chip_version) {
6487 sge_idma_decode = (const char **)t4_decode;
6488 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6492 sge_idma_decode = (const char **)t5_decode;
6493 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6497 sge_idma_decode = (const char **)t6_decode;
6498 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6502 dev_err(adapter->pdev_dev,
6503 "Unsupported chip version %d\n", chip_version);
6507 if (is_t4(adapter->params.chip)) {
6508 sge_idma_decode = (const char **)t4_decode;
6509 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6511 sge_idma_decode = (const char **)t5_decode;
6512 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6515 if (state < sge_idma_decode_nstates)
6516 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6518 CH_WARN(adapter, "idma state %d unknown\n", state);
6520 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6521 CH_WARN(adapter, "SGE register %#x value %#x\n",
6522 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6526 * t4_sge_ctxt_flush - flush the SGE context cache
6527 * @adap: the adapter
6528 * @mbox: mailbox to use for the FW command
6530 * Issues a FW command through the given mailbox to flush the
6531 * SGE context cache.
6533 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
6537 struct fw_ldst_cmd c;
6539 memset(&c, 0, sizeof(c));
6540 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
6541 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6542 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6544 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6545 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6547 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6552 * t4_fw_hello - establish communication with FW
6553 * @adap: the adapter
6554 * @mbox: mailbox to use for the FW command
6555 * @evt_mbox: mailbox to receive async FW events
6556 * @master: specifies the caller's willingness to be the device master
6557 * @state: returns the current device state (if non-NULL)
6559 * Issues a command to establish communication with FW. Returns either
6560 * an error (negative integer) or the mailbox of the Master PF.
6562 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6563 enum dev_master master, enum dev_state *state)
6566 struct fw_hello_cmd c;
6568 unsigned int master_mbox;
6569 int retries = FW_CMD_HELLO_RETRIES;
6572 memset(&c, 0, sizeof(c));
6573 INIT_CMD(c, HELLO, WRITE);
6574 c.err_to_clearinit = cpu_to_be32(
6575 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6576 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6577 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6578 mbox : FW_HELLO_CMD_MBMASTER_M) |
6579 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6580 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6581 FW_HELLO_CMD_CLEARINIT_F);
6584 * Issue the HELLO command to the firmware. If it's not successful
6585 * but indicates that we got a "busy" or "timeout" condition, retry
6586 * the HELLO until we exhaust our retry limit. If we do exceed our
6587 * retry limit, check to see if the firmware left us any error
6588 * information and report that if so.
6590 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6592 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6594 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6595 t4_report_fw_error(adap);
6599 v = be32_to_cpu(c.err_to_clearinit);
6600 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6602 if (v & FW_HELLO_CMD_ERR_F)
6603 *state = DEV_STATE_ERR;
6604 else if (v & FW_HELLO_CMD_INIT_F)
6605 *state = DEV_STATE_INIT;
6607 *state = DEV_STATE_UNINIT;
6611 * If we're not the Master PF then we need to wait around for the
6612 * Master PF Driver to finish setting up the adapter.
6614 * Note that we also do this wait if we're a non-Master-capable PF and
6615 * there is no current Master PF; a Master PF may show up momentarily
6616 * and we wouldn't want to fail pointlessly. (This can happen when an
6617 * OS loads lots of different drivers rapidly at the same time). In
6618 * this case, the Master PF returned by the firmware will be
6619 * PCIE_FW_MASTER_M so the test below will work ...
6621 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6622 master_mbox != mbox) {
6623 int waiting = FW_CMD_HELLO_TIMEOUT;
6626 * Wait for the firmware to either indicate an error or
6627 * initialized state. If we see either of these we bail out
6628 * and report the issue to the caller. If we exhaust the
6629 * "hello timeout" and we haven't exhausted our retries, try
6630 * again. Otherwise bail with a timeout error.
6639 * If neither Error nor Initialialized are indicated
6640 * by the firmware keep waiting till we exaust our
6641 * timeout ... and then retry if we haven't exhausted
6644 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6645 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6656 * We either have an Error or Initialized condition
6657 * report errors preferentially.
6660 if (pcie_fw & PCIE_FW_ERR_F)
6661 *state = DEV_STATE_ERR;
6662 else if (pcie_fw & PCIE_FW_INIT_F)
6663 *state = DEV_STATE_INIT;
6667 * If we arrived before a Master PF was selected and
6668 * there's not a valid Master PF, grab its identity
6671 if (master_mbox == PCIE_FW_MASTER_M &&
6672 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6673 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6682 * t4_fw_bye - end communication with FW
6683 * @adap: the adapter
6684 * @mbox: mailbox to use for the FW command
6686 * Issues a command to terminate communication with FW.
6688 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6690 struct fw_bye_cmd c;
6692 memset(&c, 0, sizeof(c));
6693 INIT_CMD(c, BYE, WRITE);
6694 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6698 * t4_init_cmd - ask FW to initialize the device
6699 * @adap: the adapter
6700 * @mbox: mailbox to use for the FW command
6702 * Issues a command to FW to partially initialize the device. This
6703 * performs initialization that generally doesn't depend on user input.
6705 int t4_early_init(struct adapter *adap, unsigned int mbox)
6707 struct fw_initialize_cmd c;
6709 memset(&c, 0, sizeof(c));
6710 INIT_CMD(c, INITIALIZE, WRITE);
6711 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6715 * t4_fw_reset - issue a reset to FW
6716 * @adap: the adapter
6717 * @mbox: mailbox to use for the FW command
6718 * @reset: specifies the type of reset to perform
6720 * Issues a reset command of the specified type to FW.
6722 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6724 struct fw_reset_cmd c;
6726 memset(&c, 0, sizeof(c));
6727 INIT_CMD(c, RESET, WRITE);
6728 c.val = cpu_to_be32(reset);
6729 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6733 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6734 * @adap: the adapter
6735 * @mbox: mailbox to use for the FW RESET command (if desired)
6736 * @force: force uP into RESET even if FW RESET command fails
6738 * Issues a RESET command to firmware (if desired) with a HALT indication
6739 * and then puts the microprocessor into RESET state. The RESET command
6740 * will only be issued if a legitimate mailbox is provided (mbox <=
6741 * PCIE_FW_MASTER_M).
6743 * This is generally used in order for the host to safely manipulate the
6744 * adapter without fear of conflicting with whatever the firmware might
6745 * be doing. The only way out of this state is to RESTART the firmware
6748 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6753 * If a legitimate mailbox is provided, issue a RESET command
6754 * with a HALT indication.
6756 if (mbox <= PCIE_FW_MASTER_M) {
6757 struct fw_reset_cmd c;
6759 memset(&c, 0, sizeof(c));
6760 INIT_CMD(c, RESET, WRITE);
6761 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6762 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
6763 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6767 * Normally we won't complete the operation if the firmware RESET
6768 * command fails but if our caller insists we'll go ahead and put the
6769 * uP into RESET. This can be useful if the firmware is hung or even
6770 * missing ... We'll have to take the risk of putting the uP into
6771 * RESET without the cooperation of firmware in that case.
6773 * We also force the firmware's HALT flag to be on in case we bypassed
6774 * the firmware RESET command above or we're dealing with old firmware
6775 * which doesn't have the HALT capability. This will serve as a flag
6776 * for the incoming firmware to know that it's coming out of a HALT
6777 * rather than a RESET ... if it's new enough to understand that ...
6779 if (ret == 0 || force) {
6780 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
6781 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
6786 * And we always return the result of the firmware RESET command
6787 * even when we force the uP into RESET ...
6793 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6794 * @adap: the adapter
6795 * @reset: if we want to do a RESET to restart things
6797 * Restart firmware previously halted by t4_fw_halt(). On successful
6798 * return the previous PF Master remains as the new PF Master and there
6799 * is no need to issue a new HELLO command, etc.
6801 * We do this in two ways:
6803 * 1. If we're dealing with newer firmware we'll simply want to take
6804 * the chip's microprocessor out of RESET. This will cause the
6805 * firmware to start up from its start vector. And then we'll loop
6806 * until the firmware indicates it's started again (PCIE_FW.HALT
6807 * reset to 0) or we timeout.
6809 * 2. If we're dealing with older firmware then we'll need to RESET
6810 * the chip since older firmware won't recognize the PCIE_FW.HALT
6811 * flag and automatically RESET itself on startup.
6813 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6817 * Since we're directing the RESET instead of the firmware
6818 * doing it automatically, we need to clear the PCIE_FW.HALT
6821 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
6824 * If we've been given a valid mailbox, first try to get the
6825 * firmware to do the RESET. If that works, great and we can
6826 * return success. Otherwise, if we haven't been given a
6827 * valid mailbox or the RESET command failed, fall back to
6828 * hitting the chip with a hammer.
6830 if (mbox <= PCIE_FW_MASTER_M) {
6831 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6833 if (t4_fw_reset(adap, mbox,
6834 PIORST_F | PIORSTMODE_F) == 0)
6838 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
6843 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6844 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6845 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
6856 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6857 * @adap: the adapter
6858 * @mbox: mailbox to use for the FW RESET command (if desired)
6859 * @fw_data: the firmware image to write
6861 * @force: force upgrade even if firmware doesn't cooperate
6863 * Perform all of the steps necessary for upgrading an adapter's
6864 * firmware image. Normally this requires the cooperation of the
6865 * existing firmware in order to halt all existing activities
6866 * but if an invalid mailbox token is passed in we skip that step
6867 * (though we'll still put the adapter microprocessor into RESET in
6870 * On successful return the new firmware will have been loaded and
6871 * the adapter will have been fully RESET losing all previous setup
6872 * state. On unsuccessful return the adapter may be completely hosed ...
6873 * positive errno indicates that the adapter is ~probably~ intact, a
6874 * negative errno indicates that things are looking bad ...
6876 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
6877 const u8 *fw_data, unsigned int size, int force)
6879 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
6882 if (!t4_fw_matches_chip(adap, fw_hdr))
6885 /* Disable FW_OK flag so that mbox commands with FW_OK flag set
6886 * wont be sent when we are flashing FW.
6888 adap->flags &= ~FW_OK;
6890 ret = t4_fw_halt(adap, mbox, force);
6891 if (ret < 0 && !force)
6894 ret = t4_load_fw(adap, fw_data, size);
6899 * If there was a Firmware Configuration File stored in FLASH,
6900 * there's a good chance that it won't be compatible with the new
6901 * Firmware. In order to prevent difficult to diagnose adapter
6902 * initialization issues, we clear out the Firmware Configuration File
6903 * portion of the FLASH . The user will need to re-FLASH a new
6904 * Firmware Configuration File which is compatible with the new
6905 * Firmware if that's desired.
6907 (void)t4_load_cfg(adap, NULL, 0);
6910 * Older versions of the firmware don't understand the new
6911 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
6912 * restart. So for newly loaded older firmware we'll have to do the
6913 * RESET for it so it starts up on a clean slate. We can tell if
6914 * the newly loaded firmware will handle this right by checking
6915 * its header flags to see if it advertises the capability.
6917 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
6918 ret = t4_fw_restart(adap, mbox, reset);
6920 /* Grab potentially new Firmware Device Log parameters so we can see
6921 * how healthy the new Firmware is. It's okay to contact the new
6922 * Firmware for these parameters even though, as far as it's
6923 * concerned, we've never said "HELLO" to it ...
6925 (void)t4_init_devlog_params(adap);
6927 adap->flags |= FW_OK;
6932 * t4_fl_pkt_align - return the fl packet alignment
6933 * @adap: the adapter
6935 * T4 has a single field to specify the packing and padding boundary.
6936 * T5 onwards has separate fields for this and hence the alignment for
6937 * next packet offset is maximum of these two.
6940 int t4_fl_pkt_align(struct adapter *adap)
6942 u32 sge_control, sge_control2;
6943 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
6945 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
6947 /* T4 uses a single control field to specify both the PCIe Padding and
6948 * Packing Boundary. T5 introduced the ability to specify these
6949 * separately. The actual Ingress Packet Data alignment boundary
6950 * within Packed Buffer Mode is the maximum of these two
6951 * specifications. (Note that it makes no real practical sense to
6952 * have the Pading Boudary be larger than the Packing Boundary but you
6953 * could set the chip up that way and, in fact, legacy T4 code would
6954 * end doing this because it would initialize the Padding Boundary and
6955 * leave the Packing Boundary initialized to 0 (16 bytes).)
6956 * Padding Boundary values in T6 starts from 8B,
6957 * where as it is 32B for T4 and T5.
6959 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
6960 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
6962 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
6964 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
6966 fl_align = ingpadboundary;
6967 if (!is_t4(adap->params.chip)) {
6968 /* T5 has a weird interpretation of one of the PCIe Packing
6969 * Boundary values. No idea why ...
6971 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
6972 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
6973 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
6974 ingpackboundary = 16;
6976 ingpackboundary = 1 << (ingpackboundary +
6977 INGPACKBOUNDARY_SHIFT_X);
6979 fl_align = max(ingpadboundary, ingpackboundary);
6985 * t4_fixup_host_params - fix up host-dependent parameters
6986 * @adap: the adapter
6987 * @page_size: the host's Base Page Size
6988 * @cache_line_size: the host's Cache Line Size
6990 * Various registers in T4 contain values which are dependent on the
6991 * host's Base Page and Cache Line Sizes. This function will fix all of
6992 * those registers with the appropriate values as passed in ...
6994 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
6995 unsigned int cache_line_size)
6997 unsigned int page_shift = fls(page_size) - 1;
6998 unsigned int sge_hps = page_shift - 10;
6999 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7000 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7001 unsigned int fl_align_log = fls(fl_align) - 1;
7003 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7004 HOSTPAGESIZEPF0_V(sge_hps) |
7005 HOSTPAGESIZEPF1_V(sge_hps) |
7006 HOSTPAGESIZEPF2_V(sge_hps) |
7007 HOSTPAGESIZEPF3_V(sge_hps) |
7008 HOSTPAGESIZEPF4_V(sge_hps) |
7009 HOSTPAGESIZEPF5_V(sge_hps) |
7010 HOSTPAGESIZEPF6_V(sge_hps) |
7011 HOSTPAGESIZEPF7_V(sge_hps));
7013 if (is_t4(adap->params.chip)) {
7014 t4_set_reg_field(adap, SGE_CONTROL_A,
7015 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7016 EGRSTATUSPAGESIZE_F,
7017 INGPADBOUNDARY_V(fl_align_log -
7018 INGPADBOUNDARY_SHIFT_X) |
7019 EGRSTATUSPAGESIZE_V(stat_len != 64));
7021 unsigned int pack_align;
7022 unsigned int ingpad, ingpack;
7023 unsigned int pcie_cap;
7025 /* T5 introduced the separation of the Free List Padding and
7026 * Packing Boundaries. Thus, we can select a smaller Padding
7027 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7028 * Bandwidth, and use a Packing Boundary which is large enough
7029 * to avoid false sharing between CPUs, etc.
7031 * For the PCI Link, the smaller the Padding Boundary the
7032 * better. For the Memory Controller, a smaller Padding
7033 * Boundary is better until we cross under the Memory Line
7034 * Size (the minimum unit of transfer to/from Memory). If we
7035 * have a Padding Boundary which is smaller than the Memory
7036 * Line Size, that'll involve a Read-Modify-Write cycle on the
7037 * Memory Controller which is never good.
7040 /* We want the Packing Boundary to be based on the Cache Line
7041 * Size in order to help avoid False Sharing performance
7042 * issues between CPUs, etc. We also want the Packing
7043 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7044 * get best performance when the Packing Boundary is a
7045 * multiple of the Maximum Payload Size.
7047 pack_align = fl_align;
7048 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
7050 unsigned int mps, mps_log;
7053 /* The PCIe Device Control Maximum Payload Size field
7054 * [bits 7:5] encodes sizes as powers of 2 starting at
7057 pci_read_config_word(adap->pdev,
7058 pcie_cap + PCI_EXP_DEVCTL,
7060 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7062 if (mps > pack_align)
7066 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7067 * value for the Packing Boundary. This corresponds to 16
7068 * bytes instead of the expected 32 bytes. So if we want 32
7069 * bytes, the best we can really do is 64 bytes ...
7071 if (pack_align <= 16) {
7072 ingpack = INGPACKBOUNDARY_16B_X;
7074 } else if (pack_align == 32) {
7075 ingpack = INGPACKBOUNDARY_64B_X;
7078 unsigned int pack_align_log = fls(pack_align) - 1;
7080 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7081 fl_align = pack_align;
7084 /* Use the smallest Ingress Padding which isn't smaller than
7085 * the Memory Controller Read/Write Size. We'll take that as
7086 * being 8 bytes since we don't know of any system with a
7087 * wider Memory Controller Bus Width.
7089 if (is_t5(adap->params.chip))
7090 ingpad = INGPADBOUNDARY_32B_X;
7092 ingpad = T6_INGPADBOUNDARY_8B_X;
7094 t4_set_reg_field(adap, SGE_CONTROL_A,
7095 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7096 EGRSTATUSPAGESIZE_F,
7097 INGPADBOUNDARY_V(ingpad) |
7098 EGRSTATUSPAGESIZE_V(stat_len != 64));
7099 t4_set_reg_field(adap, SGE_CONTROL2_A,
7100 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7101 INGPACKBOUNDARY_V(ingpack));
7104 * Adjust various SGE Free List Host Buffer Sizes.
7106 * This is something of a crock since we're using fixed indices into
7107 * the array which are also known by the sge.c code and the T4
7108 * Firmware Configuration File. We need to come up with a much better
7109 * approach to managing this array. For now, the first four entries
7114 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7115 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7117 * For the single-MTU buffers in unpacked mode we need to include
7118 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7119 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7120 * Padding boundary. All of these are accommodated in the Factory
7121 * Default Firmware Configuration File but we need to adjust it for
7122 * this host's cache line size.
7124 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7125 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7126 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7128 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7129 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7132 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7138 * t4_fw_initialize - ask FW to initialize the device
7139 * @adap: the adapter
7140 * @mbox: mailbox to use for the FW command
7142 * Issues a command to FW to partially initialize the device. This
7143 * performs initialization that generally doesn't depend on user input.
7145 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7147 struct fw_initialize_cmd c;
7149 memset(&c, 0, sizeof(c));
7150 INIT_CMD(c, INITIALIZE, WRITE);
7151 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7155 * t4_query_params_rw - query FW or device parameters
7156 * @adap: the adapter
7157 * @mbox: mailbox to use for the FW command
7160 * @nparams: the number of parameters
7161 * @params: the parameter names
7162 * @val: the parameter values
7163 * @rw: Write and read flag
7164 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7166 * Reads the value of FW or device parameters. Up to 7 parameters can be
7169 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7170 unsigned int vf, unsigned int nparams, const u32 *params,
7171 u32 *val, int rw, bool sleep_ok)
7174 struct fw_params_cmd c;
7175 __be32 *p = &c.param[0].mnem;
7180 memset(&c, 0, sizeof(c));
7181 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7182 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7183 FW_PARAMS_CMD_PFN_V(pf) |
7184 FW_PARAMS_CMD_VFN_V(vf));
7185 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7187 for (i = 0; i < nparams; i++) {
7188 *p++ = cpu_to_be32(*params++);
7190 *p = cpu_to_be32(*(val + i));
7194 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7196 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7197 *val++ = be32_to_cpu(*p);
7201 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7202 unsigned int vf, unsigned int nparams, const u32 *params,
7205 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7209 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7210 unsigned int vf, unsigned int nparams, const u32 *params,
7213 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7218 * t4_set_params_timeout - sets FW or device parameters
7219 * @adap: the adapter
7220 * @mbox: mailbox to use for the FW command
7223 * @nparams: the number of parameters
7224 * @params: the parameter names
7225 * @val: the parameter values
7226 * @timeout: the timeout time
7228 * Sets the value of FW or device parameters. Up to 7 parameters can be
7229 * specified at once.
7231 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7232 unsigned int pf, unsigned int vf,
7233 unsigned int nparams, const u32 *params,
7234 const u32 *val, int timeout)
7236 struct fw_params_cmd c;
7237 __be32 *p = &c.param[0].mnem;
7242 memset(&c, 0, sizeof(c));
7243 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7244 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7245 FW_PARAMS_CMD_PFN_V(pf) |
7246 FW_PARAMS_CMD_VFN_V(vf));
7247 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7250 *p++ = cpu_to_be32(*params++);
7251 *p++ = cpu_to_be32(*val++);
7254 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7258 * t4_set_params - sets FW or device parameters
7259 * @adap: the adapter
7260 * @mbox: mailbox to use for the FW command
7263 * @nparams: the number of parameters
7264 * @params: the parameter names
7265 * @val: the parameter values
7267 * Sets the value of FW or device parameters. Up to 7 parameters can be
7268 * specified at once.
7270 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7271 unsigned int vf, unsigned int nparams, const u32 *params,
7274 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7275 FW_CMD_MAX_TIMEOUT);
7279 * t4_cfg_pfvf - configure PF/VF resource limits
7280 * @adap: the adapter
7281 * @mbox: mailbox to use for the FW command
7282 * @pf: the PF being configured
7283 * @vf: the VF being configured
7284 * @txq: the max number of egress queues
7285 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7286 * @rxqi: the max number of interrupt-capable ingress queues
7287 * @rxq: the max number of interruptless ingress queues
7288 * @tc: the PCI traffic class
7289 * @vi: the max number of virtual interfaces
7290 * @cmask: the channel access rights mask for the PF/VF
7291 * @pmask: the port access rights mask for the PF/VF
7292 * @nexact: the maximum number of exact MPS filters
7293 * @rcaps: read capabilities
7294 * @wxcaps: write/execute capabilities
7296 * Configures resource limits and capabilities for a physical or virtual
7299 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7300 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7301 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7302 unsigned int vi, unsigned int cmask, unsigned int pmask,
7303 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7305 struct fw_pfvf_cmd c;
7307 memset(&c, 0, sizeof(c));
7308 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7309 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7310 FW_PFVF_CMD_VFN_V(vf));
7311 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7312 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7313 FW_PFVF_CMD_NIQ_V(rxq));
7314 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7315 FW_PFVF_CMD_PMASK_V(pmask) |
7316 FW_PFVF_CMD_NEQ_V(txq));
7317 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7318 FW_PFVF_CMD_NVI_V(vi) |
7319 FW_PFVF_CMD_NEXACTF_V(nexact));
7320 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7321 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7322 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7323 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7327 * t4_alloc_vi - allocate a virtual interface
7328 * @adap: the adapter
7329 * @mbox: mailbox to use for the FW command
7330 * @port: physical port associated with the VI
7331 * @pf: the PF owning the VI
7332 * @vf: the VF owning the VI
7333 * @nmac: number of MAC addresses needed (1 to 5)
7334 * @mac: the MAC addresses of the VI
7335 * @rss_size: size of RSS table slice associated with this VI
7337 * Allocates a virtual interface for the given physical port. If @mac is
7338 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7339 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7340 * stored consecutively so the space needed is @nmac * 6 bytes.
7341 * Returns a negative error number or the non-negative VI id.
7343 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7344 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7345 unsigned int *rss_size)
7350 memset(&c, 0, sizeof(c));
7351 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7352 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7353 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7354 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7355 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7358 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7363 memcpy(mac, c.mac, sizeof(c.mac));
7366 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7368 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7370 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7372 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7376 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7377 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7381 * t4_free_vi - free a virtual interface
7382 * @adap: the adapter
7383 * @mbox: mailbox to use for the FW command
7384 * @pf: the PF owning the VI
7385 * @vf: the VF owning the VI
7386 * @viid: virtual interface identifiler
7388 * Free a previously allocated virtual interface.
7390 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7391 unsigned int vf, unsigned int viid)
7395 memset(&c, 0, sizeof(c));
7396 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7399 FW_VI_CMD_PFN_V(pf) |
7400 FW_VI_CMD_VFN_V(vf));
7401 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7402 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7404 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7408 * t4_set_rxmode - set Rx properties of a virtual interface
7409 * @adap: the adapter
7410 * @mbox: mailbox to use for the FW command
7412 * @mtu: the new MTU or -1
7413 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7414 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7415 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7416 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7417 * @sleep_ok: if true we may sleep while awaiting command completion
7419 * Sets Rx properties of a virtual interface.
7421 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7422 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7425 struct fw_vi_rxmode_cmd c;
7427 /* convert to FW values */
7429 mtu = FW_RXMODE_MTU_NO_CHG;
7431 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7433 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7435 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7437 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7439 memset(&c, 0, sizeof(c));
7440 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7441 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7442 FW_VI_RXMODE_CMD_VIID_V(viid));
7443 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7445 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7446 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7447 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7448 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7449 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7450 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7454 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7455 * @adap: the adapter
7456 * @mbox: mailbox to use for the FW command
7458 * @free: if true any existing filters for this VI id are first removed
7459 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7460 * @addr: the MAC address(es)
7461 * @idx: where to store the index of each allocated filter
7462 * @hash: pointer to hash address filter bitmap
7463 * @sleep_ok: call is allowed to sleep
7465 * Allocates an exact-match filter for each of the supplied addresses and
7466 * sets it to the corresponding address. If @idx is not %NULL it should
7467 * have at least @naddr entries, each of which will be set to the index of
7468 * the filter allocated for the corresponding MAC address. If a filter
7469 * could not be allocated for an address its index is set to 0xffff.
7470 * If @hash is not %NULL addresses that fail to allocate an exact filter
7471 * are hashed and update the hash filter bitmap pointed at by @hash.
7473 * Returns a negative error number or the number of filters allocated.
7475 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7476 unsigned int viid, bool free, unsigned int naddr,
7477 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7479 int offset, ret = 0;
7480 struct fw_vi_mac_cmd c;
7481 unsigned int nfilters = 0;
7482 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7483 unsigned int rem = naddr;
7485 if (naddr > max_naddr)
7488 for (offset = 0; offset < naddr ; /**/) {
7489 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7490 rem : ARRAY_SIZE(c.u.exact));
7491 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7492 u.exact[fw_naddr]), 16);
7493 struct fw_vi_mac_exact *p;
7496 memset(&c, 0, sizeof(c));
7497 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7500 FW_CMD_EXEC_V(free) |
7501 FW_VI_MAC_CMD_VIID_V(viid));
7502 c.freemacs_to_len16 =
7503 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
7504 FW_CMD_LEN16_V(len16));
7506 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7508 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7509 FW_VI_MAC_CMD_IDX_V(
7510 FW_VI_MAC_ADD_MAC));
7511 memcpy(p->macaddr, addr[offset + i],
7512 sizeof(p->macaddr));
7515 /* It's okay if we run out of space in our MAC address arena.
7516 * Some of the addresses we submit may get stored so we need
7517 * to run through the reply to see what the results were ...
7519 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7520 if (ret && ret != -FW_ENOMEM)
7523 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7524 u16 index = FW_VI_MAC_CMD_IDX_G(
7525 be16_to_cpu(p->valid_to_idx));
7528 idx[offset + i] = (index >= max_naddr ?
7530 if (index < max_naddr)
7534 hash_mac_addr(addr[offset + i]));
7542 if (ret == 0 || ret == -FW_ENOMEM)
7548 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
7549 * @adap: the adapter
7550 * @mbox: mailbox to use for the FW command
7552 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7553 * @addr: the MAC address(es)
7554 * @sleep_ok: call is allowed to sleep
7556 * Frees the exact-match filter for each of the supplied addresses
7558 * Returns a negative error number or the number of filters freed.
7560 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
7561 unsigned int viid, unsigned int naddr,
7562 const u8 **addr, bool sleep_ok)
7564 int offset, ret = 0;
7565 struct fw_vi_mac_cmd c;
7566 unsigned int nfilters = 0;
7567 unsigned int max_naddr = is_t4(adap->params.chip) ?
7568 NUM_MPS_CLS_SRAM_L_INSTANCES :
7569 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7570 unsigned int rem = naddr;
7572 if (naddr > max_naddr)
7575 for (offset = 0; offset < (int)naddr ; /**/) {
7576 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7578 : ARRAY_SIZE(c.u.exact));
7579 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7580 u.exact[fw_naddr]), 16);
7581 struct fw_vi_mac_exact *p;
7584 memset(&c, 0, sizeof(c));
7585 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7589 FW_VI_MAC_CMD_VIID_V(viid));
7590 c.freemacs_to_len16 =
7591 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7592 FW_CMD_LEN16_V(len16));
7594 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7595 p->valid_to_idx = cpu_to_be16(
7596 FW_VI_MAC_CMD_VALID_F |
7597 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7598 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7601 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7605 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7606 u16 index = FW_VI_MAC_CMD_IDX_G(
7607 be16_to_cpu(p->valid_to_idx));
7609 if (index < max_naddr)
7623 * t4_change_mac - modifies the exact-match filter for a MAC address
7624 * @adap: the adapter
7625 * @mbox: mailbox to use for the FW command
7627 * @idx: index of existing filter for old value of MAC address, or -1
7628 * @addr: the new MAC address value
7629 * @persist: whether a new MAC allocation should be persistent
7630 * @add_smt: if true also add the address to the HW SMT
7632 * Modifies an exact-match filter and sets it to the new MAC address.
7633 * Note that in general it is not possible to modify the value of a given
7634 * filter so the generic way to modify an address filter is to free the one
7635 * being used by the old address value and allocate a new filter for the
7636 * new address value. @idx can be -1 if the address is a new addition.
7638 * Returns a negative error number or the index of the filter with the new
7641 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7642 int idx, const u8 *addr, bool persist, bool add_smt)
7645 struct fw_vi_mac_cmd c;
7646 struct fw_vi_mac_exact *p = c.u.exact;
7647 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
7649 if (idx < 0) /* new allocation */
7650 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7651 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7653 memset(&c, 0, sizeof(c));
7654 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7655 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7656 FW_VI_MAC_CMD_VIID_V(viid));
7657 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7658 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7659 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7660 FW_VI_MAC_CMD_IDX_V(idx));
7661 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7663 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7665 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7666 if (ret >= max_mac_addr)
7673 * t4_set_addr_hash - program the MAC inexact-match hash filter
7674 * @adap: the adapter
7675 * @mbox: mailbox to use for the FW command
7677 * @ucast: whether the hash filter should also match unicast addresses
7678 * @vec: the value to be written to the hash filter
7679 * @sleep_ok: call is allowed to sleep
7681 * Sets the 64-bit inexact-match hash filter for a virtual interface.
7683 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
7684 bool ucast, u64 vec, bool sleep_ok)
7686 struct fw_vi_mac_cmd c;
7688 memset(&c, 0, sizeof(c));
7689 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7690 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7691 FW_VI_ENABLE_CMD_VIID_V(viid));
7692 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
7693 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
7695 c.u.hash.hashvec = cpu_to_be64(vec);
7696 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7700 * t4_enable_vi_params - enable/disable a virtual interface
7701 * @adap: the adapter
7702 * @mbox: mailbox to use for the FW command
7704 * @rx_en: 1=enable Rx, 0=disable Rx
7705 * @tx_en: 1=enable Tx, 0=disable Tx
7706 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
7708 * Enables/disables a virtual interface. Note that setting DCB Enable
7709 * only makes sense when enabling a Virtual Interface ...
7711 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
7712 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
7714 struct fw_vi_enable_cmd c;
7716 memset(&c, 0, sizeof(c));
7717 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7718 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7719 FW_VI_ENABLE_CMD_VIID_V(viid));
7720 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
7721 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
7722 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
7724 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
7728 * t4_enable_vi - enable/disable a virtual interface
7729 * @adap: the adapter
7730 * @mbox: mailbox to use for the FW command
7732 * @rx_en: 1=enable Rx, 0=disable Rx
7733 * @tx_en: 1=enable Tx, 0=disable Tx
7735 * Enables/disables a virtual interface.
7737 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
7738 bool rx_en, bool tx_en)
7740 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
7744 * t4_identify_port - identify a VI's port by blinking its LED
7745 * @adap: the adapter
7746 * @mbox: mailbox to use for the FW command
7748 * @nblinks: how many times to blink LED at 2.5 Hz
7750 * Identifies a VI's port by blinking its LED.
7752 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
7753 unsigned int nblinks)
7755 struct fw_vi_enable_cmd c;
7757 memset(&c, 0, sizeof(c));
7758 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
7759 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7760 FW_VI_ENABLE_CMD_VIID_V(viid));
7761 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
7762 c.blinkdur = cpu_to_be16(nblinks);
7763 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7767 * t4_iq_stop - stop an ingress queue and its FLs
7768 * @adap: the adapter
7769 * @mbox: mailbox to use for the FW command
7770 * @pf: the PF owning the queues
7771 * @vf: the VF owning the queues
7772 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
7773 * @iqid: ingress queue id
7774 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7775 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7777 * Stops an ingress queue and its associated FLs, if any. This causes
7778 * any current or future data/messages destined for these queues to be
7781 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
7782 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7783 unsigned int fl0id, unsigned int fl1id)
7787 memset(&c, 0, sizeof(c));
7788 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7789 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7790 FW_IQ_CMD_VFN_V(vf));
7791 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
7792 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7793 c.iqid = cpu_to_be16(iqid);
7794 c.fl0id = cpu_to_be16(fl0id);
7795 c.fl1id = cpu_to_be16(fl1id);
7796 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7800 * t4_iq_free - free an ingress queue and its FLs
7801 * @adap: the adapter
7802 * @mbox: mailbox to use for the FW command
7803 * @pf: the PF owning the queues
7804 * @vf: the VF owning the queues
7805 * @iqtype: the ingress queue type
7806 * @iqid: ingress queue id
7807 * @fl0id: FL0 queue id or 0xffff if no attached FL0
7808 * @fl1id: FL1 queue id or 0xffff if no attached FL1
7810 * Frees an ingress queue and its associated FLs, if any.
7812 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7813 unsigned int vf, unsigned int iqtype, unsigned int iqid,
7814 unsigned int fl0id, unsigned int fl1id)
7818 memset(&c, 0, sizeof(c));
7819 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
7820 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
7821 FW_IQ_CMD_VFN_V(vf));
7822 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
7823 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
7824 c.iqid = cpu_to_be16(iqid);
7825 c.fl0id = cpu_to_be16(fl0id);
7826 c.fl1id = cpu_to_be16(fl1id);
7827 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7831 * t4_eth_eq_free - free an Ethernet egress queue
7832 * @adap: the adapter
7833 * @mbox: mailbox to use for the FW command
7834 * @pf: the PF owning the queue
7835 * @vf: the VF owning the queue
7836 * @eqid: egress queue id
7838 * Frees an Ethernet egress queue.
7840 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7841 unsigned int vf, unsigned int eqid)
7843 struct fw_eq_eth_cmd c;
7845 memset(&c, 0, sizeof(c));
7846 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
7847 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7848 FW_EQ_ETH_CMD_PFN_V(pf) |
7849 FW_EQ_ETH_CMD_VFN_V(vf));
7850 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
7851 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
7852 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7856 * t4_ctrl_eq_free - free a control egress queue
7857 * @adap: the adapter
7858 * @mbox: mailbox to use for the FW command
7859 * @pf: the PF owning the queue
7860 * @vf: the VF owning the queue
7861 * @eqid: egress queue id
7863 * Frees a control egress queue.
7865 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7866 unsigned int vf, unsigned int eqid)
7868 struct fw_eq_ctrl_cmd c;
7870 memset(&c, 0, sizeof(c));
7871 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
7872 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7873 FW_EQ_CTRL_CMD_PFN_V(pf) |
7874 FW_EQ_CTRL_CMD_VFN_V(vf));
7875 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
7876 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
7877 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7881 * t4_ofld_eq_free - free an offload egress queue
7882 * @adap: the adapter
7883 * @mbox: mailbox to use for the FW command
7884 * @pf: the PF owning the queue
7885 * @vf: the VF owning the queue
7886 * @eqid: egress queue id
7888 * Frees a control egress queue.
7890 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
7891 unsigned int vf, unsigned int eqid)
7893 struct fw_eq_ofld_cmd c;
7895 memset(&c, 0, sizeof(c));
7896 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
7897 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
7898 FW_EQ_OFLD_CMD_PFN_V(pf) |
7899 FW_EQ_OFLD_CMD_VFN_V(vf));
7900 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
7901 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
7902 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7906 * t4_link_down_rc_str - return a string for a Link Down Reason Code
7907 * @adap: the adapter
7908 * @link_down_rc: Link Down Reason Code
7910 * Returns a string representation of the Link Down Reason Code.
7912 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
7914 static const char * const reason[] = {
7917 "Auto-negotiation Failure",
7919 "Insufficient Airflow",
7920 "Unable To Determine Reason",
7921 "No RX Signal Detected",
7925 if (link_down_rc >= ARRAY_SIZE(reason))
7926 return "Bad Reason Code";
7928 return reason[link_down_rc];
7932 * Return the highest speed set in the port capabilities, in Mb/s.
7934 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
7936 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
7938 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7942 TEST_SPEED_RETURN(400G, 400000);
7943 TEST_SPEED_RETURN(200G, 200000);
7944 TEST_SPEED_RETURN(100G, 100000);
7945 TEST_SPEED_RETURN(50G, 50000);
7946 TEST_SPEED_RETURN(40G, 40000);
7947 TEST_SPEED_RETURN(25G, 25000);
7948 TEST_SPEED_RETURN(10G, 10000);
7949 TEST_SPEED_RETURN(1G, 1000);
7950 TEST_SPEED_RETURN(100M, 100);
7952 #undef TEST_SPEED_RETURN
7958 * fwcap_to_fwspeed - return highest speed in Port Capabilities
7959 * @acaps: advertised Port Capabilities
7961 * Get the highest speed for the port from the advertised Port
7962 * Capabilities. It will be either the highest speed from the list of
7963 * speeds or whatever user has set using ethtool.
7965 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
7967 #define TEST_SPEED_RETURN(__caps_speed) \
7969 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
7970 return FW_PORT_CAP32_SPEED_##__caps_speed; \
7973 TEST_SPEED_RETURN(400G);
7974 TEST_SPEED_RETURN(200G);
7975 TEST_SPEED_RETURN(100G);
7976 TEST_SPEED_RETURN(50G);
7977 TEST_SPEED_RETURN(40G);
7978 TEST_SPEED_RETURN(25G);
7979 TEST_SPEED_RETURN(10G);
7980 TEST_SPEED_RETURN(1G);
7981 TEST_SPEED_RETURN(100M);
7983 #undef TEST_SPEED_RETURN
7989 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
7990 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
7992 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
7993 * 32-bit Port Capabilities value.
7995 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
7997 fw_port_cap32_t linkattr = 0;
7999 /* Unfortunately the format of the Link Status in the old
8000 * 16-bit Port Information message isn't the same as the
8001 * 16-bit Port Capabilities bitfield used everywhere else ...
8003 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8004 linkattr |= FW_PORT_CAP32_FC_RX;
8005 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8006 linkattr |= FW_PORT_CAP32_FC_TX;
8007 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8008 linkattr |= FW_PORT_CAP32_SPEED_100M;
8009 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8010 linkattr |= FW_PORT_CAP32_SPEED_1G;
8011 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8012 linkattr |= FW_PORT_CAP32_SPEED_10G;
8013 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8014 linkattr |= FW_PORT_CAP32_SPEED_25G;
8015 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8016 linkattr |= FW_PORT_CAP32_SPEED_40G;
8017 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8018 linkattr |= FW_PORT_CAP32_SPEED_100G;
8024 * t4_handle_get_port_info - process a FW reply message
8025 * @pi: the port info
8026 * @rpl: start of the FW message
8028 * Processes a GET_PORT_INFO FW reply message.
8030 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8032 const struct fw_port_cmd *cmd = (const void *)rpl;
8033 int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8034 struct adapter *adapter = pi->adapter;
8035 struct link_config *lc = &pi->link_cfg;
8036 int link_ok, linkdnrc;
8037 enum fw_port_type port_type;
8038 enum fw_port_module_type mod_type;
8039 unsigned int speed, fc, fec;
8040 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8042 /* Extract the various fields from the Port Information message.
8045 case FW_PORT_ACTION_GET_PORT_INFO: {
8046 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8048 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8049 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8050 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8051 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8052 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8053 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8054 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8055 linkattr = lstatus_to_fwcap(lstatus);
8059 case FW_PORT_ACTION_GET_PORT_INFO32: {
8062 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8063 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8064 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8065 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8066 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8067 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8068 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8069 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8070 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8075 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8076 be32_to_cpu(cmd->action_to_len16));
8080 fec = fwcap_to_cc_fec(acaps);
8081 fc = fwcap_to_cc_pause(linkattr);
8082 speed = fwcap_to_speed(linkattr);
8084 if (mod_type != pi->mod_type) {
8085 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8086 * various fundamental Port Capabilities which used to be
8087 * immutable can now change radically. We can now have
8088 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8089 * all change based on what Transceiver Module is inserted.
8090 * So we need to record the Physical "Port" Capabilities on
8091 * every Transceiver Module change.
8095 /* When a new Transceiver Module is inserted, the Firmware
8096 * will examine its i2c EPROM to determine its type and
8097 * general operating parameters including things like Forward
8098 * Error Control, etc. Various IEEE 802.3 standards dictate
8099 * how to interpret these i2c values to determine default
8100 * "sutomatic" settings. We record these for future use when
8101 * the user explicitly requests these standards-based values.
8103 lc->def_acaps = acaps;
8105 /* Some versions of the early T6 Firmware "cheated" when
8106 * handling different Transceiver Modules by changing the
8107 * underlaying Port Type reported to the Host Drivers. As
8108 * such we need to capture whatever Port Type the Firmware
8109 * sends us and record it in case it's different from what we
8110 * were told earlier. Unfortunately, since Firmware is
8111 * forever, we'll need to keep this code here forever, but in
8112 * later T6 Firmware it should just be an assignment of the
8113 * same value already recorded.
8115 pi->port_type = port_type;
8117 pi->mod_type = mod_type;
8118 t4_os_portmod_changed(adapter, pi->port_id);
8121 if (link_ok != lc->link_ok || speed != lc->speed ||
8122 fc != lc->fc || fec != lc->fec) { /* something changed */
8123 if (!link_ok && lc->link_ok) {
8124 lc->link_down_rc = linkdnrc;
8125 dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
8126 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
8128 lc->link_ok = link_ok;
8133 lc->lpacaps = lpacaps;
8134 lc->acaps = acaps & ADVERT_MASK;
8136 if (lc->acaps & FW_PORT_CAP32_ANEG) {
8137 lc->autoneg = AUTONEG_ENABLE;
8139 /* When Autoneg is disabled, user needs to set
8141 * Similar to cxgb4_ethtool.c: set_link_ksettings
8144 lc->speed_caps = fwcap_to_fwspeed(acaps);
8145 lc->autoneg = AUTONEG_DISABLE;
8148 t4_os_link_changed(adapter, pi->port_id, link_ok);
8153 * t4_update_port_info - retrieve and update port information if changed
8154 * @pi: the port_info
8156 * We issue a Get Port Information Command to the Firmware and, if
8157 * successful, we check to see if anything is different from what we
8158 * last recorded and update things accordingly.
8160 int t4_update_port_info(struct port_info *pi)
8162 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8163 struct fw_port_cmd port_cmd;
8166 memset(&port_cmd, 0, sizeof(port_cmd));
8167 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8168 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8169 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8170 port_cmd.action_to_len16 = cpu_to_be32(
8171 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8172 ? FW_PORT_ACTION_GET_PORT_INFO
8173 : FW_PORT_ACTION_GET_PORT_INFO32) |
8174 FW_LEN16(port_cmd));
8175 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8176 &port_cmd, sizeof(port_cmd), &port_cmd);
8180 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8185 * t4_get_link_params - retrieve basic link parameters for given port
8187 * @link_okp: value return pointer for link up/down
8188 * @speedp: value return pointer for speed (Mb/s)
8189 * @mtup: value return pointer for mtu
8191 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8192 * and MTU for a specified port. A negative error is returned on
8193 * failure; 0 on success.
8195 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8196 unsigned int *speedp, unsigned int *mtup)
8198 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8199 struct fw_port_cmd port_cmd;
8200 unsigned int action, link_ok, speed, mtu;
8201 fw_port_cap32_t linkattr;
8204 memset(&port_cmd, 0, sizeof(port_cmd));
8205 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8206 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8207 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8208 action = (fw_caps == FW_CAPS16
8209 ? FW_PORT_ACTION_GET_PORT_INFO
8210 : FW_PORT_ACTION_GET_PORT_INFO32);
8211 port_cmd.action_to_len16 = cpu_to_be32(
8212 FW_PORT_CMD_ACTION_V(action) |
8213 FW_LEN16(port_cmd));
8214 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8215 &port_cmd, sizeof(port_cmd), &port_cmd);
8219 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8220 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8222 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8223 linkattr = lstatus_to_fwcap(lstatus);
8224 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8227 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8229 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8230 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8231 mtu = FW_PORT_CMD_MTU32_G(
8232 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8234 speed = fwcap_to_speed(linkattr);
8236 *link_okp = link_ok;
8237 *speedp = fwcap_to_speed(linkattr);
8244 * t4_handle_fw_rpl - process a FW reply message
8245 * @adap: the adapter
8246 * @rpl: start of the FW message
8248 * Processes a FW message, such as link state change messages.
8250 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8252 u8 opcode = *(const u8 *)rpl;
8254 /* This might be a port command ... this simplifies the following
8255 * conditionals ... We can get away with pre-dereferencing
8256 * action_to_len16 because it's in the first 16 bytes and all messages
8257 * will be at least that long.
8259 const struct fw_port_cmd *p = (const void *)rpl;
8260 unsigned int action =
8261 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8263 if (opcode == FW_PORT_CMD &&
8264 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8265 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8267 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8268 struct port_info *pi = NULL;
8270 for_each_port(adap, i) {
8271 pi = adap2pinfo(adap, i);
8272 if (pi->tx_chan == chan)
8276 t4_handle_get_port_info(pi, rpl);
8278 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8285 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8289 if (pci_is_pcie(adapter->pdev)) {
8290 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8291 p->speed = val & PCI_EXP_LNKSTA_CLS;
8292 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8297 * init_link_config - initialize a link's SW state
8298 * @lc: pointer to structure holding the link state
8299 * @pcaps: link Port Capabilities
8300 * @acaps: link current Advertised Port Capabilities
8302 * Initializes the SW state maintained for each link, including the link's
8303 * capabilities and default speed/flow-control/autonegotiation settings.
8305 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8306 fw_port_cap32_t acaps)
8309 lc->def_acaps = acaps;
8313 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8315 /* For Forward Error Control, we default to whatever the Firmware
8316 * tells us the Link is currently advertising.
8318 lc->requested_fec = FEC_AUTO;
8319 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8321 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8322 lc->acaps = lc->pcaps & ADVERT_MASK;
8323 lc->autoneg = AUTONEG_ENABLE;
8324 lc->requested_fc |= PAUSE_AUTONEG;
8327 lc->autoneg = AUTONEG_DISABLE;
8331 #define CIM_PF_NOACCESS 0xeeeeeeee
8333 int t4_wait_dev_ready(void __iomem *regs)
8337 whoami = readl(regs + PL_WHOAMI_A);
8338 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8342 whoami = readl(regs + PL_WHOAMI_A);
8343 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8347 u32 vendor_and_model_id;
8351 static int t4_get_flash_params(struct adapter *adap)
8353 /* Table for non-Numonix supported flash parts. Numonix parts are left
8354 * to the preexisting code. All flash parts have 64KB sectors.
8356 static struct flash_desc supported_flash[] = {
8357 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8360 unsigned int part, manufacturer;
8361 unsigned int density, size;
8365 /* Issue a Read ID Command to the Flash part. We decode supported
8366 * Flash parts and their sizes from this. There's a newer Query
8367 * Command which can retrieve detailed geometry information but many
8368 * Flash parts don't support it.
8371 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8373 ret = sf1_read(adap, 3, 0, 1, &flashid);
8374 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8378 /* Check to see if it's one of our non-standard supported Flash parts.
8380 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8381 if (supported_flash[part].vendor_and_model_id == flashid) {
8382 adap->params.sf_size = supported_flash[part].size_mb;
8383 adap->params.sf_nsec =
8384 adap->params.sf_size / SF_SEC_SIZE;
8388 /* Decode Flash part size. The code below looks repetative with
8389 * common encodings, but that's not guaranteed in the JEDEC
8390 * specification for the Read JADEC ID command. The only thing that
8391 * we're guaranteed by the JADEC specification is where the
8392 * Manufacturer ID is in the returned result. After that each
8393 * Manufacturer ~could~ encode things completely differently.
8394 * Note, all Flash parts must have 64KB sectors.
8396 manufacturer = flashid & 0xff;
8397 switch (manufacturer) {
8398 case 0x20: { /* Micron/Numonix */
8399 /* This Density -> Size decoding table is taken from Micron
8402 density = (flashid >> 16) & 0xff;
8404 case 0x14: /* 1MB */
8407 case 0x15: /* 2MB */
8410 case 0x16: /* 4MB */
8413 case 0x17: /* 8MB */
8416 case 0x18: /* 16MB */
8419 case 0x19: /* 32MB */
8422 case 0x20: /* 64MB */
8425 case 0x21: /* 128MB */
8428 case 0x22: /* 256MB */
8433 dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8439 case 0xc2: { /* Macronix */
8440 /* This Density -> Size decoding table is taken from Macronix
8443 density = (flashid >> 16) & 0xff;
8445 case 0x17: /* 8MB */
8448 case 0x18: /* 16MB */
8452 dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8458 case 0xef: { /* Winbond */
8459 /* This Density -> Size decoding table is taken from Winbond
8462 density = (flashid >> 16) & 0xff;
8464 case 0x17: /* 8MB */
8467 case 0x18: /* 16MB */
8471 dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8478 dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
8483 /* Store decoded Flash size and fall through into vetting code. */
8484 adap->params.sf_size = size;
8485 adap->params.sf_nsec = size / SF_SEC_SIZE;
8488 if (adap->params.sf_size < FLASH_MIN_SIZE)
8489 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8490 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
8494 static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
8499 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
8501 pci_read_config_word(adapter->pdev,
8502 pcie_cap + PCI_EXP_DEVCTL2, &val);
8503 val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
8505 pci_write_config_word(adapter->pdev,
8506 pcie_cap + PCI_EXP_DEVCTL2, val);
8511 * t4_prep_adapter - prepare SW and HW for operation
8512 * @adapter: the adapter
8513 * @reset: if true perform a HW reset
8515 * Initialize adapter SW state for the various HW modules, set initial
8516 * values for some adapter tunables, take PHYs out of reset, and
8517 * initialize the MDIO interface.
8519 int t4_prep_adapter(struct adapter *adapter)
8525 get_pci_mode(adapter, &adapter->params.pci);
8526 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
8528 ret = t4_get_flash_params(adapter);
8530 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
8534 /* Retrieve adapter's device ID
8536 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
8537 ver = device_id >> 12;
8538 adapter->params.chip = 0;
8541 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8542 adapter->params.arch.sge_fl_db = DBPRIO_F;
8543 adapter->params.arch.mps_tcam_size =
8544 NUM_MPS_CLS_SRAM_L_INSTANCES;
8545 adapter->params.arch.mps_rplc_size = 128;
8546 adapter->params.arch.nchan = NCHAN;
8547 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8548 adapter->params.arch.vfcount = 128;
8549 /* Congestion map is for 4 channels so that
8550 * MPS can have 4 priority per port.
8552 adapter->params.arch.cng_ch_bits_log = 2;
8555 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
8556 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
8557 adapter->params.arch.mps_tcam_size =
8558 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8559 adapter->params.arch.mps_rplc_size = 128;
8560 adapter->params.arch.nchan = NCHAN;
8561 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8562 adapter->params.arch.vfcount = 128;
8563 adapter->params.arch.cng_ch_bits_log = 2;
8566 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
8567 adapter->params.arch.sge_fl_db = 0;
8568 adapter->params.arch.mps_tcam_size =
8569 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8570 adapter->params.arch.mps_rplc_size = 256;
8571 adapter->params.arch.nchan = 2;
8572 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
8573 adapter->params.arch.vfcount = 256;
8574 /* Congestion map will be for 2 channels so that
8575 * MPS can have 8 priority per port.
8577 adapter->params.arch.cng_ch_bits_log = 3;
8580 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
8585 adapter->params.cim_la_size = CIMLA_SIZE;
8586 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8589 * Default port for debugging in case we can't reach FW.
8591 adapter->params.nports = 1;
8592 adapter->params.portvec = 1;
8593 adapter->params.vpd.cclk = 50000;
8595 /* Set pci completion timeout value to 4 seconds. */
8596 set_pcie_completion_timeout(adapter, 0xd);
8601 * t4_shutdown_adapter - shut down adapter, host & wire
8602 * @adapter: the adapter
8604 * Perform an emergency shutdown of the adapter and stop it from
8605 * continuing any further communication on the ports or DMA to the
8606 * host. This is typically used when the adapter and/or firmware
8607 * have crashed and we want to prevent any further accidental
8608 * communication with the rest of the world. This will also force
8609 * the port Link Status to go down -- if register writes work --
8610 * which should help our peers figure out that we're down.
8612 int t4_shutdown_adapter(struct adapter *adapter)
8616 t4_intr_disable(adapter);
8617 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
8618 for_each_port(adapter, port) {
8619 u32 a_port_cfg = is_t4(adapter->params.chip) ?
8620 PORT_REG(port, XGMAC_PORT_CFG_A) :
8621 T5_PORT_REG(port, MAC_PORT_CFG_A);
8623 t4_write_reg(adapter, a_port_cfg,
8624 t4_read_reg(adapter, a_port_cfg)
8625 & ~SIGNAL_DET_V(1));
8627 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
8633 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
8634 * @adapter: the adapter
8635 * @qid: the Queue ID
8636 * @qtype: the Ingress or Egress type for @qid
8637 * @user: true if this request is for a user mode queue
8638 * @pbar2_qoffset: BAR2 Queue Offset
8639 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
8641 * Returns the BAR2 SGE Queue Registers information associated with the
8642 * indicated Absolute Queue ID. These are passed back in return value
8643 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
8644 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
8646 * This may return an error which indicates that BAR2 SGE Queue
8647 * registers aren't available. If an error is not returned, then the
8648 * following values are returned:
8650 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
8651 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
8653 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
8654 * require the "Inferred Queue ID" ability may be used. E.g. the
8655 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
8656 * then these "Inferred Queue ID" register may not be used.
8658 int t4_bar2_sge_qregs(struct adapter *adapter,
8660 enum t4_bar2_qtype qtype,
8663 unsigned int *pbar2_qid)
8665 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
8666 u64 bar2_page_offset, bar2_qoffset;
8667 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
8669 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
8670 if (!user && is_t4(adapter->params.chip))
8673 /* Get our SGE Page Size parameters.
8675 page_shift = adapter->params.sge.hps + 10;
8676 page_size = 1 << page_shift;
8678 /* Get the right Queues per Page parameters for our Queue.
8680 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
8681 ? adapter->params.sge.eq_qpp
8682 : adapter->params.sge.iq_qpp);
8683 qpp_mask = (1 << qpp_shift) - 1;
8685 /* Calculate the basics of the BAR2 SGE Queue register area:
8686 * o The BAR2 page the Queue registers will be in.
8687 * o The BAR2 Queue ID.
8688 * o The BAR2 Queue ID Offset into the BAR2 page.
8690 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
8691 bar2_qid = qid & qpp_mask;
8692 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
8694 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
8695 * hardware will infer the Absolute Queue ID simply from the writes to
8696 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
8697 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
8698 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
8699 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
8700 * from the BAR2 Page and BAR2 Queue ID.
8702 * One important censequence of this is that some BAR2 SGE registers
8703 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
8704 * there. But other registers synthesize the SGE Queue ID purely
8705 * from the writes to the registers -- the Write Combined Doorbell
8706 * Buffer is a good example. These BAR2 SGE Registers are only
8707 * available for those BAR2 SGE Register areas where the SGE Absolute
8708 * Queue ID can be inferred from simple writes.
8710 bar2_qoffset = bar2_page_offset;
8711 bar2_qinferred = (bar2_qid_offset < page_size);
8712 if (bar2_qinferred) {
8713 bar2_qoffset += bar2_qid_offset;
8717 *pbar2_qoffset = bar2_qoffset;
8718 *pbar2_qid = bar2_qid;
8723 * t4_init_devlog_params - initialize adapter->params.devlog
8724 * @adap: the adapter
8726 * Initialize various fields of the adapter's Firmware Device Log
8727 * Parameters structure.
8729 int t4_init_devlog_params(struct adapter *adap)
8731 struct devlog_params *dparams = &adap->params.devlog;
8733 unsigned int devlog_meminfo;
8734 struct fw_devlog_cmd devlog_cmd;
8737 /* If we're dealing with newer firmware, the Device Log Paramerters
8738 * are stored in a designated register which allows us to access the
8739 * Device Log even if we can't talk to the firmware.
8742 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
8744 unsigned int nentries, nentries128;
8746 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
8747 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
8749 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
8750 nentries = (nentries128 + 1) * 128;
8751 dparams->size = nentries * sizeof(struct fw_devlog_e);
8756 /* Otherwise, ask the firmware for it's Device Log Parameters.
8758 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
8759 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
8760 FW_CMD_REQUEST_F | FW_CMD_READ_F);
8761 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
8762 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
8768 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
8769 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
8770 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
8771 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
8777 * t4_init_sge_params - initialize adap->params.sge
8778 * @adapter: the adapter
8780 * Initialize various fields of the adapter's SGE Parameters structure.
8782 int t4_init_sge_params(struct adapter *adapter)
8784 struct sge_params *sge_params = &adapter->params.sge;
8786 unsigned int s_hps, s_qpp;
8788 /* Extract the SGE Page Size for our PF.
8790 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
8791 s_hps = (HOSTPAGESIZEPF0_S +
8792 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
8793 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
8795 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
8797 s_qpp = (QUEUESPERPAGEPF0_S +
8798 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
8799 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
8800 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8801 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
8802 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
8808 * t4_init_tp_params - initialize adap->params.tp
8809 * @adap: the adapter
8810 * @sleep_ok: if true we may sleep while awaiting command completion
8812 * Initialize various fields of the adapter's TP Parameters structure.
8814 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
8819 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
8820 adap->params.tp.tre = TIMERRESOLUTION_G(v);
8821 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
8823 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
8824 for (chan = 0; chan < NCHAN; chan++)
8825 adap->params.tp.tx_modq[chan] = chan;
8827 /* Cache the adapter's Compressed Filter Mode and global Incress
8830 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
8831 TP_VLAN_PRI_MAP_A, sleep_ok);
8832 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
8833 TP_INGRESS_CONFIG_A, sleep_ok);
8835 /* For T6, cache the adapter's compressed error vector
8836 * and passing outer header info for encapsulated packets.
8838 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
8839 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
8840 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
8843 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
8844 * shift positions of several elements of the Compressed Filter Tuple
8845 * for this adapter which we need frequently ...
8847 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
8848 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
8849 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
8850 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
8851 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
8852 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
8854 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
8856 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
8858 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
8860 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
8863 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
8864 * represents the presence of an Outer VLAN instead of a VNIC ID.
8866 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
8867 adap->params.tp.vnic_shift = -1;
8869 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
8870 adap->params.tp.hash_filter_mask = v;
8871 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
8872 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
8877 * t4_filter_field_shift - calculate filter field shift
8878 * @adap: the adapter
8879 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
8881 * Return the shift position of a filter field within the Compressed
8882 * Filter Tuple. The filter field is specified via its selection bit
8883 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
8885 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
8887 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
8891 if ((filter_mode & filter_sel) == 0)
8894 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
8895 switch (filter_mode & sel) {
8897 field_shift += FT_FCOE_W;
8900 field_shift += FT_PORT_W;
8903 field_shift += FT_VNIC_ID_W;
8906 field_shift += FT_VLAN_W;
8909 field_shift += FT_TOS_W;
8912 field_shift += FT_PROTOCOL_W;
8915 field_shift += FT_ETHERTYPE_W;
8918 field_shift += FT_MACMATCH_W;
8921 field_shift += FT_MPSHITTYPE_W;
8923 case FRAGMENTATION_F:
8924 field_shift += FT_FRAGMENTATION_W;
8931 int t4_init_rss_mode(struct adapter *adap, int mbox)
8934 struct fw_rss_vi_config_cmd rvc;
8936 memset(&rvc, 0, sizeof(rvc));
8938 for_each_port(adap, i) {
8939 struct port_info *p = adap2pinfo(adap, i);
8942 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
8943 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8944 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
8945 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
8946 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
8949 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
8955 * t4_init_portinfo - allocate a virtual interface and initialize port_info
8956 * @pi: the port_info
8957 * @mbox: mailbox to use for the FW command
8958 * @port: physical port associated with the VI
8959 * @pf: the PF owning the VI
8960 * @vf: the VF owning the VI
8961 * @mac: the MAC address of the VI
8963 * Allocates a virtual interface for the given physical port. If @mac is
8964 * not %NULL it contains the MAC address of the VI as assigned by FW.
8965 * @mac should be large enough to hold an Ethernet address.
8966 * Returns < 0 on error.
8968 int t4_init_portinfo(struct port_info *pi, int mbox,
8969 int port, int pf, int vf, u8 mac[])
8971 struct adapter *adapter = pi->adapter;
8972 unsigned int fw_caps = adapter->params.fw_caps_support;
8973 struct fw_port_cmd cmd;
8974 unsigned int rss_size;
8975 enum fw_port_type port_type;
8977 fw_port_cap32_t pcaps, acaps;
8980 /* If we haven't yet determined whether we're talking to Firmware
8981 * which knows the new 32-bit Port Capabilities, it's time to find
8982 * out now. This will also tell new Firmware to send us Port Status
8983 * Updates using the new 32-bit Port Capabilities version of the
8984 * Port Information message.
8986 if (fw_caps == FW_CAPS_UNKNOWN) {
8989 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
8990 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
8992 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
8993 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
8994 adapter->params.fw_caps_support = fw_caps;
8997 memset(&cmd, 0, sizeof(cmd));
8998 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8999 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9000 FW_PORT_CMD_PORTID_V(port));
9001 cmd.action_to_len16 = cpu_to_be32(
9002 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9003 ? FW_PORT_ACTION_GET_PORT_INFO
9004 : FW_PORT_ACTION_GET_PORT_INFO32) |
9006 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9010 /* Extract the various fields from the Port Information message.
9012 if (fw_caps == FW_CAPS16) {
9013 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9015 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9016 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9017 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9019 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9020 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9022 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9024 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9025 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9026 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9028 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9029 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9032 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9039 pi->rss_size = rss_size;
9041 pi->port_type = port_type;
9042 pi->mdio_addr = mdio_addr;
9043 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9045 init_link_config(&pi->link_cfg, pcaps, acaps);
9049 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9054 for_each_port(adap, i) {
9055 struct port_info *pi = adap2pinfo(adap, i);
9057 while ((adap->params.portvec & (1 << j)) == 0)
9060 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9064 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9071 * t4_read_cimq_cfg - read CIM queue configuration
9072 * @adap: the adapter
9073 * @base: holds the queue base addresses in bytes
9074 * @size: holds the queue sizes in bytes
9075 * @thres: holds the queue full thresholds in bytes
9077 * Returns the current configuration of the CIM queues, starting with
9078 * the IBQs, then the OBQs.
9080 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9083 int cim_num_obq = is_t4(adap->params.chip) ?
9084 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9086 for (i = 0; i < CIM_NUM_IBQ; i++) {
9087 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9089 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9090 /* value is in 256-byte units */
9091 *base++ = CIMQBASE_G(v) * 256;
9092 *size++ = CIMQSIZE_G(v) * 256;
9093 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9095 for (i = 0; i < cim_num_obq; i++) {
9096 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9098 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9099 /* value is in 256-byte units */
9100 *base++ = CIMQBASE_G(v) * 256;
9101 *size++ = CIMQSIZE_G(v) * 256;
9106 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9107 * @adap: the adapter
9108 * @qid: the queue index
9109 * @data: where to store the queue contents
9110 * @n: capacity of @data in 32-bit words
9112 * Reads the contents of the selected CIM queue starting at address 0 up
9113 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9114 * error and the number of 32-bit words actually read on success.
9116 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9118 int i, err, attempts;
9120 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9122 if (qid > 5 || (n & 3))
9125 addr = qid * nwords;
9129 /* It might take 3-10ms before the IBQ debug read access is allowed.
9130 * Wait for 1 Sec with a delay of 1 usec.
9134 for (i = 0; i < n; i++, addr++) {
9135 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9137 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9141 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9143 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9148 * t4_read_cim_obq - read the contents of a CIM outbound queue
9149 * @adap: the adapter
9150 * @qid: the queue index
9151 * @data: where to store the queue contents
9152 * @n: capacity of @data in 32-bit words
9154 * Reads the contents of the selected CIM queue starting at address 0 up
9155 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9156 * error and the number of 32-bit words actually read on success.
9158 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9161 unsigned int addr, v, nwords;
9162 int cim_num_obq = is_t4(adap->params.chip) ?
9163 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9165 if ((qid > (cim_num_obq - 1)) || (n & 3))
9168 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9169 QUENUMSELECT_V(qid));
9170 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9172 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9173 nwords = CIMQSIZE_G(v) * 64; /* same */
9177 for (i = 0; i < n; i++, addr++) {
9178 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9180 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9184 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9186 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9191 * t4_cim_read - read a block from CIM internal address space
9192 * @adap: the adapter
9193 * @addr: the start address within the CIM address space
9194 * @n: number of words to read
9195 * @valp: where to store the result
9197 * Reads a block of 4-byte words from the CIM intenal address space.
9199 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9204 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9207 for ( ; !ret && n--; addr += 4) {
9208 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9209 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9212 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9218 * t4_cim_write - write a block into CIM internal address space
9219 * @adap: the adapter
9220 * @addr: the start address within the CIM address space
9221 * @n: number of words to write
9222 * @valp: set of values to write
9224 * Writes a block of 4-byte words into the CIM intenal address space.
9226 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9227 const unsigned int *valp)
9231 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9234 for ( ; !ret && n--; addr += 4) {
9235 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9236 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9237 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9243 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9246 return t4_cim_write(adap, addr, 1, &val);
9250 * t4_cim_read_la - read CIM LA capture buffer
9251 * @adap: the adapter
9252 * @la_buf: where to store the LA data
9253 * @wrptr: the HW write pointer within the capture buffer
9255 * Reads the contents of the CIM LA buffer with the most recent entry at
9256 * the end of the returned data and with the entry at @wrptr first.
9257 * We try to leave the LA in the running state we find it in.
9259 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9262 unsigned int cfg, val, idx;
9264 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9268 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9269 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9274 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9278 idx = UPDBGLAWRPTR_G(val);
9282 for (i = 0; i < adap->params.cim_la_size; i++) {
9283 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9284 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9287 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9290 if (val & UPDBGLARDEN_F) {
9294 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9298 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9299 * identify the 32-bit portion of the full 312-bit data
9301 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9302 idx = (idx & 0xff0) + 0x10;
9305 /* address can't exceed 0xfff */
9306 idx &= UPDBGLARDPTR_M;
9309 if (cfg & UPDBGLAEN_F) {
9310 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9311 cfg & ~UPDBGLARDEN_F);
9319 * t4_tp_read_la - read TP LA capture buffer
9320 * @adap: the adapter
9321 * @la_buf: where to store the LA data
9322 * @wrptr: the HW write pointer within the capture buffer
9324 * Reads the contents of the TP LA buffer with the most recent entry at
9325 * the end of the returned data and with the entry at @wrptr first.
9326 * We leave the LA in the running state we find it in.
9328 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9330 bool last_incomplete;
9331 unsigned int i, cfg, val, idx;
9333 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9334 if (cfg & DBGLAENABLE_F) /* freeze LA */
9335 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9336 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
9338 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
9339 idx = DBGLAWPTR_G(val);
9340 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
9341 if (last_incomplete)
9342 idx = (idx + 1) & DBGLARPTR_M;
9347 val &= ~DBGLARPTR_V(DBGLARPTR_M);
9348 val |= adap->params.tp.la_mask;
9350 for (i = 0; i < TPLA_SIZE; i++) {
9351 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
9352 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
9353 idx = (idx + 1) & DBGLARPTR_M;
9356 /* Wipe out last entry if it isn't valid */
9357 if (last_incomplete)
9358 la_buf[TPLA_SIZE - 1] = ~0ULL;
9360 if (cfg & DBGLAENABLE_F) /* restore running state */
9361 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9362 cfg | adap->params.tp.la_mask);
9365 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9366 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9367 * state for more than the Warning Threshold then we'll issue a warning about
9368 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9369 * appears to be hung every Warning Repeat second till the situation clears.
9370 * If the situation clears, we'll note that as well.
9372 #define SGE_IDMA_WARN_THRESH 1
9373 #define SGE_IDMA_WARN_REPEAT 300
9376 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9377 * @adapter: the adapter
9378 * @idma: the adapter IDMA Monitor state
9380 * Initialize the state of an SGE Ingress DMA Monitor.
9382 void t4_idma_monitor_init(struct adapter *adapter,
9383 struct sge_idma_monitor_state *idma)
9385 /* Initialize the state variables for detecting an SGE Ingress DMA
9386 * hang. The SGE has internal counters which count up on each clock
9387 * tick whenever the SGE finds its Ingress DMA State Engines in the
9388 * same state they were on the previous clock tick. The clock used is
9389 * the Core Clock so we have a limit on the maximum "time" they can
9390 * record; typically a very small number of seconds. For instance,
9391 * with a 600MHz Core Clock, we can only count up to a bit more than
9392 * 7s. So we'll synthesize a larger counter in order to not run the
9393 * risk of having the "timers" overflow and give us the flexibility to
9394 * maintain a Hung SGE State Machine of our own which operates across
9395 * a longer time frame.
9397 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9398 idma->idma_stalled[0] = 0;
9399 idma->idma_stalled[1] = 0;
9403 * t4_idma_monitor - monitor SGE Ingress DMA state
9404 * @adapter: the adapter
9405 * @idma: the adapter IDMA Monitor state
9406 * @hz: number of ticks/second
9407 * @ticks: number of ticks since the last IDMA Monitor call
9409 void t4_idma_monitor(struct adapter *adapter,
9410 struct sge_idma_monitor_state *idma,
9413 int i, idma_same_state_cnt[2];
9415 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9416 * are counters inside the SGE which count up on each clock when the
9417 * SGE finds its Ingress DMA State Engines in the same states they
9418 * were in the previous clock. The counters will peg out at
9419 * 0xffffffff without wrapping around so once they pass the 1s
9420 * threshold they'll stay above that till the IDMA state changes.
9422 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
9423 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
9424 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9426 for (i = 0; i < 2; i++) {
9427 u32 debug0, debug11;
9429 /* If the Ingress DMA Same State Counter ("timer") is less
9430 * than 1s, then we can reset our synthesized Stall Timer and
9431 * continue. If we have previously emitted warnings about a
9432 * potential stalled Ingress Queue, issue a note indicating
9433 * that the Ingress Queue has resumed forward progress.
9435 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9436 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
9437 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
9438 "resumed after %d seconds\n",
9439 i, idma->idma_qid[i],
9440 idma->idma_stalled[i] / hz);
9441 idma->idma_stalled[i] = 0;
9445 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9446 * domain. The first time we get here it'll be because we
9447 * passed the 1s Threshold; each additional time it'll be
9448 * because the RX Timer Callback is being fired on its regular
9451 * If the stall is below our Potential Hung Ingress Queue
9452 * Warning Threshold, continue.
9454 if (idma->idma_stalled[i] == 0) {
9455 idma->idma_stalled[i] = hz;
9456 idma->idma_warn[i] = 0;
9458 idma->idma_stalled[i] += ticks;
9459 idma->idma_warn[i] -= ticks;
9462 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
9465 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9467 if (idma->idma_warn[i] > 0)
9469 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
9471 /* Read and save the SGE IDMA State and Queue ID information.
9472 * We do this every time in case it changes across time ...
9473 * can't be too careful ...
9475 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
9476 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9477 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9479 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
9480 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9481 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9483 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
9484 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9485 i, idma->idma_qid[i], idma->idma_state[i],
9486 idma->idma_stalled[i] / hz,
9488 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9493 * t4_load_cfg - download config file
9494 * @adap: the adapter
9495 * @cfg_data: the cfg text file to write
9496 * @size: text file size
9498 * Write the supplied config text file to the card's serial flash.
9500 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9502 int ret, i, n, cfg_addr;
9504 unsigned int flash_cfg_start_sec;
9505 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9507 cfg_addr = t4_flash_cfg_addr(adap);
9512 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9514 if (size > FLASH_CFG_MAX_SIZE) {
9515 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
9516 FLASH_CFG_MAX_SIZE);
9520 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9522 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9523 flash_cfg_start_sec + i - 1);
9524 /* If size == 0 then we're simply erasing the FLASH sectors associated
9525 * with the on-adapter Firmware Configuration File.
9527 if (ret || size == 0)
9530 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9531 for (i = 0; i < size; i += SF_PAGE_SIZE) {
9532 if ((size - i) < SF_PAGE_SIZE)
9536 ret = t4_write_flash(adap, addr, n, cfg_data);
9540 addr += SF_PAGE_SIZE;
9541 cfg_data += SF_PAGE_SIZE;
9546 dev_err(adap->pdev_dev, "config file %s failed %d\n",
9547 (size == 0 ? "clear" : "download"), ret);
9552 * t4_set_vf_mac - Set MAC address for the specified VF
9553 * @adapter: The adapter
9554 * @vf: one of the VFs instantiated by the specified PF
9555 * @naddr: the number of MAC addresses
9556 * @addr: the MAC address(es) to be set to the specified VF
9558 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
9559 unsigned int naddr, u8 *addr)
9561 struct fw_acl_mac_cmd cmd;
9563 memset(&cmd, 0, sizeof(cmd));
9564 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
9567 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
9568 FW_ACL_MAC_CMD_VFN_V(vf));
9570 /* Note: Do not enable the ACL */
9571 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
9574 switch (adapter->pf) {
9576 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
9579 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
9582 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
9585 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
9589 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
9593 * t4_read_pace_tbl - read the pace table
9594 * @adap: the adapter
9595 * @pace_vals: holds the returned values
9597 * Returns the values of TP's pace table in microseconds.
9599 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9603 for (i = 0; i < NTX_SCHED; i++) {
9604 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
9605 v = t4_read_reg(adap, TP_PACE_TABLE_A);
9606 pace_vals[i] = dack_ticks_to_usec(adap, v);
9611 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
9612 * @adap: the adapter
9613 * @sched: the scheduler index
9614 * @kbps: the byte rate in Kbps
9615 * @ipg: the interpacket delay in tenths of nanoseconds
9616 * @sleep_ok: if true we may sleep while awaiting command completion
9618 * Return the current configuration of a HW Tx scheduler.
9620 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
9621 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
9623 unsigned int v, addr, bpt, cpt;
9626 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
9627 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9630 bpt = (v >> 8) & 0xff;
9633 *kbps = 0; /* scheduler disabled */
9635 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
9636 *kbps = (v * bpt) / 125;
9640 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
9641 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
9645 *ipg = (10000 * v) / core_ticks_per_usec(adap);
9649 /* t4_sge_ctxt_rd - read an SGE context through FW
9650 * @adap: the adapter
9651 * @mbox: mailbox to use for the FW command
9652 * @cid: the context id
9653 * @ctype: the context type
9654 * @data: where to store the context data
9656 * Issues a FW command through the given mailbox to read an SGE context.
9658 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
9659 enum ctxt_type ctype, u32 *data)
9661 struct fw_ldst_cmd c;
9664 if (ctype == CTXT_FLM)
9665 ret = FW_LDST_ADDRSPC_SGE_FLMC;
9667 ret = FW_LDST_ADDRSPC_SGE_CONMC;
9669 memset(&c, 0, sizeof(c));
9670 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
9671 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9672 FW_LDST_CMD_ADDRSPACE_V(ret));
9673 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
9674 c.u.idctxt.physid = cpu_to_be32(cid);
9676 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
9678 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
9679 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
9680 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
9681 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
9682 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
9683 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
9689 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
9690 * @adap: the adapter
9691 * @cid: the context id
9692 * @ctype: the context type
9693 * @data: where to store the context data
9695 * Reads an SGE context directly, bypassing FW. This is only for
9696 * debugging when FW is unavailable.
9698 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
9699 enum ctxt_type ctype, u32 *data)
9703 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
9704 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
9706 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
9707 *data++ = t4_read_reg(adap, i);
9711 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
9712 int rateunit, int ratemode, int channel, int class,
9713 int minrate, int maxrate, int weight, int pktsize)
9715 struct fw_sched_cmd cmd;
9717 memset(&cmd, 0, sizeof(cmd));
9718 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
9721 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
9723 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
9724 cmd.u.params.type = type;
9725 cmd.u.params.level = level;
9726 cmd.u.params.mode = mode;
9727 cmd.u.params.ch = channel;
9728 cmd.u.params.cl = class;
9729 cmd.u.params.unit = rateunit;
9730 cmd.u.params.rate = ratemode;
9731 cmd.u.params.min = cpu_to_be32(minrate);
9732 cmd.u.params.max = cpu_to_be32(maxrate);
9733 cmd.u.params.weight = cpu_to_be16(weight);
9734 cmd.u.params.pktsize = cpu_to_be16(pktsize);
9736 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),