1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/slab.h>
7 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include "hclge_cmd.h"
12 #include "hclge_main.h"
14 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
16 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
18 static int hclge_ring_space(struct hclge_cmq_ring *ring)
20 int ntu = ring->next_to_use;
21 int ntc = ring->next_to_clean;
22 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
24 return ring->desc_num - used - 1;
27 static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
29 int ntu = ring->next_to_use;
30 int ntc = ring->next_to_clean;
33 return head >= ntc && head <= ntu;
35 return head >= ntc || head <= ntu;
38 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
40 int size = ring->desc_num * sizeof(struct hclge_desc);
42 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
43 &ring->desc_dma_addr, GFP_KERNEL);
50 static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
52 int size = ring->desc_num * sizeof(struct hclge_desc);
55 dma_free_coherent(cmq_ring_to_dev(ring), size,
56 ring->desc, ring->desc_dma_addr);
61 static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
63 struct hclge_hw *hw = &hdev->hw;
64 struct hclge_cmq_ring *ring =
65 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
68 ring->ring_type = ring_type;
71 ret = hclge_alloc_cmd_desc(ring);
73 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
74 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
81 void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
83 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
85 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
87 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
90 void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
91 enum hclge_opcode_type opcode, bool is_read)
93 memset((void *)desc, 0, sizeof(struct hclge_desc));
94 desc->opcode = cpu_to_le16(opcode);
95 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
98 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
101 static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
103 dma_addr_t dma = ring->desc_dma_addr;
104 struct hclge_dev *hdev = ring->dev;
105 struct hclge_hw *hw = &hdev->hw;
107 if (ring->ring_type == HCLGE_TYPE_CSQ) {
108 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
110 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
112 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
113 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
114 HCLGE_NIC_CMQ_ENABLE);
115 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
118 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
124 HCLGE_NIC_CMQ_ENABLE);
125 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
126 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
130 static void hclge_cmd_init_regs(struct hclge_hw *hw)
132 hclge_cmd_config_regs(&hw->cmq.csq);
133 hclge_cmd_config_regs(&hw->cmq.crq);
136 static int hclge_cmd_csq_clean(struct hclge_hw *hw)
138 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
139 struct hclge_cmq_ring *csq = &hw->cmq.csq;
143 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
144 rmb(); /* Make sure head is ready before touch any data */
146 if (!is_valid_csq_clean_head(csq, head)) {
147 dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
148 csq->next_to_use, csq->next_to_clean);
149 dev_warn(&hdev->pdev->dev,
150 "Disabling any further commands to IMP firmware\n");
151 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
152 dev_warn(&hdev->pdev->dev,
153 "IMP firmware watchdog reset soon expected!\n");
157 clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
158 csq->next_to_clean = head;
162 static int hclge_cmd_csq_done(struct hclge_hw *hw)
164 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
165 return head == hw->cmq.csq.next_to_use;
168 static bool hclge_is_special_opcode(u16 opcode)
170 /* these commands have several descriptors,
171 * and use the first one to save opcode and return value
173 u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT,
174 HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC};
177 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
178 if (spec_opcode[i] == opcode)
186 * hclge_cmd_send - send command to command queue
187 * @hw: pointer to the hw struct
188 * @desc: prefilled descriptor for describing the command
189 * @num : the number of descriptors to be sent
191 * This is the main send command for command queue, it
192 * sends the queue, cleans the queue, etc
194 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
196 struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
197 struct hclge_desc *desc_to_use;
198 bool complete = false;
202 u16 opcode, desc_ret;
205 spin_lock_bh(&hw->cmq.csq.lock);
207 if (num > hclge_ring_space(&hw->cmq.csq) ||
208 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
209 spin_unlock_bh(&hw->cmq.csq.lock);
214 * Record the location of desc in the ring for this time
215 * which will be use for hardware to write back
217 ntc = hw->cmq.csq.next_to_use;
218 opcode = le16_to_cpu(desc[0].opcode);
219 while (handle < num) {
220 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
221 *desc_to_use = desc[handle];
222 (hw->cmq.csq.next_to_use)++;
223 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
224 hw->cmq.csq.next_to_use = 0;
228 /* Write to hardware */
229 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
232 * If the command is sync, wait for the firmware to write back,
233 * if multi descriptors to be sent, use the first one to check
235 if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
237 if (hclge_cmd_csq_done(hw)) {
243 } while (timeout < hw->cmq.tx_timeout);
250 while (handle < num) {
251 /* Get the result of hardware write back */
252 desc_to_use = &hw->cmq.csq.desc[ntc];
253 desc[handle] = *desc_to_use;
255 if (likely(!hclge_is_special_opcode(opcode)))
256 desc_ret = le16_to_cpu(desc[handle].retval);
258 desc_ret = le16_to_cpu(desc[0].retval);
260 if (desc_ret == HCLGE_CMD_EXEC_SUCCESS)
264 hw->cmq.last_status = desc_ret;
267 if (ntc == hw->cmq.csq.desc_num)
272 /* Clean the command send queue */
273 handle = hclge_cmd_csq_clean(hw);
276 else if (handle != num)
277 dev_warn(&hdev->pdev->dev,
278 "cleaned %d, need to clean %d\n", handle, num);
280 spin_unlock_bh(&hw->cmq.csq.lock);
285 static enum hclge_cmd_status hclge_cmd_query_firmware_version(
286 struct hclge_hw *hw, u32 *version)
288 struct hclge_query_version_cmd *resp;
289 struct hclge_desc desc;
292 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
293 resp = (struct hclge_query_version_cmd *)desc.data;
295 ret = hclge_cmd_send(hw, &desc, 1);
297 *version = le32_to_cpu(resp->firmware);
302 int hclge_cmd_queue_init(struct hclge_dev *hdev)
306 /* Setup the lock for command queue */
307 spin_lock_init(&hdev->hw.cmq.csq.lock);
308 spin_lock_init(&hdev->hw.cmq.crq.lock);
310 /* Setup the queue entries for use cmd queue */
311 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
312 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
314 /* Setup Tx write back timeout */
315 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
317 /* Setup queue rings */
318 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
320 dev_err(&hdev->pdev->dev,
321 "CSQ ring setup error %d\n", ret);
325 ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
327 dev_err(&hdev->pdev->dev,
328 "CRQ ring setup error %d\n", ret);
334 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
338 int hclge_cmd_init(struct hclge_dev *hdev)
343 spin_lock_bh(&hdev->hw.cmq.csq.lock);
344 spin_lock_bh(&hdev->hw.cmq.crq.lock);
346 hdev->hw.cmq.csq.next_to_clean = 0;
347 hdev->hw.cmq.csq.next_to_use = 0;
348 hdev->hw.cmq.crq.next_to_clean = 0;
349 hdev->hw.cmq.crq.next_to_use = 0;
351 hclge_cmd_init_regs(&hdev->hw);
353 spin_unlock_bh(&hdev->hw.cmq.crq.lock);
354 spin_unlock_bh(&hdev->hw.cmq.csq.lock);
356 clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
358 /* Check if there is new reset pending, because the higher level
359 * reset may happen when lower level reset is being processed.
361 if ((hclge_is_reset_pending(hdev))) {
362 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
366 ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
368 dev_err(&hdev->pdev->dev,
369 "firmware version query failed %d\n", ret);
372 hdev->fw_version = version;
374 dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
379 static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
381 spin_lock(&ring->lock);
382 hclge_free_cmd_desc(ring);
383 spin_unlock(&ring->lock);
386 void hclge_destroy_cmd_queue(struct hclge_hw *hw)
388 hclge_destroy_queue(&hw->cmq.csq);
389 hclge_destroy_queue(&hw->cmq.crq);