1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
4 * Copyright (C) 2018 Marvell.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
14 #include "rvu_trace.h"
16 static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
18 void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
20 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
21 struct mbox_hdr *tx_hdr, *rx_hdr;
22 void *hw_mbase = mdev->hwbase;
24 tx_hdr = hw_mbase + mbox->tx_start;
25 rx_hdr = hw_mbase + mbox->rx_start;
34 EXPORT_SYMBOL(__otx2_mbox_reset);
36 void otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
38 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
40 spin_lock(&mdev->mbox_lock);
41 __otx2_mbox_reset(mbox, devid);
42 spin_unlock(&mdev->mbox_lock);
44 EXPORT_SYMBOL(otx2_mbox_reset);
46 void otx2_mbox_destroy(struct otx2_mbox *mbox)
48 mbox->reg_base = NULL;
54 EXPORT_SYMBOL(otx2_mbox_destroy);
56 static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
57 void *reg_base, int direction, int ndevs)
62 mbox->tx_start = MBOX_DOWN_TX_START;
63 mbox->rx_start = MBOX_DOWN_RX_START;
64 mbox->tx_size = MBOX_DOWN_TX_SIZE;
65 mbox->rx_size = MBOX_DOWN_RX_SIZE;
69 mbox->tx_start = MBOX_DOWN_RX_START;
70 mbox->rx_start = MBOX_DOWN_TX_START;
71 mbox->tx_size = MBOX_DOWN_RX_SIZE;
72 mbox->rx_size = MBOX_DOWN_TX_SIZE;
74 case MBOX_DIR_AFPF_UP:
75 case MBOX_DIR_PFVF_UP:
76 mbox->tx_start = MBOX_UP_TX_START;
77 mbox->rx_start = MBOX_UP_RX_START;
78 mbox->tx_size = MBOX_UP_TX_SIZE;
79 mbox->rx_size = MBOX_UP_RX_SIZE;
81 case MBOX_DIR_PFAF_UP:
82 case MBOX_DIR_VFPF_UP:
83 mbox->tx_start = MBOX_UP_RX_START;
84 mbox->rx_start = MBOX_UP_TX_START;
85 mbox->tx_size = MBOX_UP_RX_SIZE;
86 mbox->rx_size = MBOX_UP_TX_SIZE;
94 case MBOX_DIR_AFPF_UP:
95 mbox->trigger = RVU_AF_AFPF_MBOX0;
99 case MBOX_DIR_PFAF_UP:
100 mbox->trigger = RVU_PF_PFAF_MBOX1;
104 case MBOX_DIR_PFVF_UP:
105 mbox->trigger = RVU_PF_VFX_PFVF_MBOX0;
109 case MBOX_DIR_VFPF_UP:
110 mbox->trigger = RVU_VF_VFPF_MBOX1;
117 mbox->reg_base = reg_base;
120 mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
122 otx2_mbox_destroy(mbox);
130 int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
131 void *reg_base, int direction, int ndevs)
133 struct otx2_mbox_dev *mdev;
136 err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
140 mbox->hwbase = hwbase;
142 for (devid = 0; devid < ndevs; devid++) {
143 mdev = &mbox->dev[devid];
144 mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
145 mdev->hwbase = mdev->mbase;
146 spin_lock_init(&mdev->mbox_lock);
147 /* Init header to reset value */
148 otx2_mbox_reset(mbox, devid);
153 EXPORT_SYMBOL(otx2_mbox_init);
155 /* Initialize mailbox with the set of mailbox region addresses
156 * in the array hwbase.
158 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
159 struct pci_dev *pdev, void *reg_base,
160 int direction, int ndevs, unsigned long *pf_bmap)
162 struct otx2_mbox_dev *mdev;
165 err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
169 mbox->hwbase = hwbase[0];
171 for (devid = 0; devid < ndevs; devid++) {
172 if (!test_bit(devid, pf_bmap))
175 mdev = &mbox->dev[devid];
176 mdev->mbase = hwbase[devid];
177 mdev->hwbase = hwbase[devid];
178 spin_lock_init(&mdev->mbox_lock);
179 /* Init header to reset value */
180 otx2_mbox_reset(mbox, devid);
185 EXPORT_SYMBOL(otx2_mbox_regions_init);
187 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
189 unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
190 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
191 struct device *sender = &mbox->pdev->dev;
193 while (!time_after(jiffies, timeout)) {
194 if (mdev->num_msgs == mdev->msgs_acked)
196 usleep_range(800, 1000);
198 dev_dbg(sender, "timed out while waiting for rsp\n");
201 EXPORT_SYMBOL(otx2_mbox_wait_for_rsp);
203 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
205 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
206 unsigned long timeout = jiffies + 1 * HZ;
208 while (!time_after(jiffies, timeout)) {
209 if (mdev->num_msgs == mdev->msgs_acked)
215 EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
217 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
219 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
220 struct mbox_hdr *tx_hdr, *rx_hdr;
221 void *hw_mbase = mdev->hwbase;
223 tx_hdr = hw_mbase + mbox->tx_start;
224 rx_hdr = hw_mbase + mbox->rx_start;
226 /* If bounce buffer is implemented copy mbox messages from
227 * bounce buffer to hw mbox memory.
229 if (mdev->mbase != hw_mbase)
230 memcpy(hw_mbase + mbox->tx_start + msgs_offset,
231 mdev->mbase + mbox->tx_start + msgs_offset,
234 spin_lock(&mdev->mbox_lock);
236 tx_hdr->msg_size = mdev->msg_size;
238 /* Reset header for next messages */
241 mdev->msgs_acked = 0;
243 /* Sync mbox data into memory */
246 /* num_msgs != 0 signals to the peer that the buffer has a number of
247 * messages. So this should be written after writing all the messages
248 * to the shared memory.
250 tx_hdr->num_msgs = mdev->num_msgs;
251 rx_hdr->num_msgs = 0;
253 trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
255 spin_unlock(&mdev->mbox_lock);
257 /* The interrupt should be fired after num_msgs is written
258 * to the shared memory
260 writeq(1, (void __iomem *)mbox->reg_base +
261 (mbox->trigger | (devid << mbox->tr_shift)));
263 EXPORT_SYMBOL(otx2_mbox_msg_send);
265 struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
266 int size, int size_rsp)
268 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
269 struct mbox_msghdr *msghdr = NULL;
271 spin_lock(&mdev->mbox_lock);
272 size = ALIGN(size, MBOX_MSG_ALIGN);
273 size_rsp = ALIGN(size_rsp, MBOX_MSG_ALIGN);
274 /* Check if there is space in mailbox */
275 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset)
277 if ((mdev->rsp_size + size_rsp) > mbox->rx_size - msgs_offset)
280 if (mdev->msg_size == 0)
284 msghdr = mdev->mbase + mbox->tx_start + msgs_offset + mdev->msg_size;
286 /* Clear the whole msg region */
287 memset(msghdr, 0, size);
288 /* Init message header with reset values */
289 msghdr->ver = OTX2_MBOX_VERSION;
290 mdev->msg_size += size;
291 mdev->rsp_size += size_rsp;
292 msghdr->next_msgoff = mdev->msg_size + msgs_offset;
294 spin_unlock(&mdev->mbox_lock);
298 EXPORT_SYMBOL(otx2_mbox_alloc_msg_rsp);
300 struct mbox_msghdr *otx2_mbox_get_rsp(struct otx2_mbox *mbox, int devid,
301 struct mbox_msghdr *msg)
303 unsigned long imsg = mbox->tx_start + msgs_offset;
304 unsigned long irsp = mbox->rx_start + msgs_offset;
305 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
308 spin_lock(&mdev->mbox_lock);
310 if (mdev->num_msgs != mdev->msgs_acked)
313 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
314 struct mbox_msghdr *pmsg = mdev->mbase + imsg;
315 struct mbox_msghdr *prsp = mdev->mbase + irsp;
318 if (pmsg->id != prsp->id)
320 spin_unlock(&mdev->mbox_lock);
324 imsg = mbox->tx_start + pmsg->next_msgoff;
325 irsp = mbox->rx_start + prsp->next_msgoff;
329 spin_unlock(&mdev->mbox_lock);
330 return ERR_PTR(-ENODEV);
332 EXPORT_SYMBOL(otx2_mbox_get_rsp);
334 int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
336 unsigned long ireq = mbox->tx_start + msgs_offset;
337 unsigned long irsp = mbox->rx_start + msgs_offset;
338 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
342 spin_lock(&mdev->mbox_lock);
344 if (mdev->num_msgs != mdev->msgs_acked)
347 for (msgs = 0; msgs < mdev->msgs_acked; msgs++) {
348 struct mbox_msghdr *preq = mdev->mbase + ireq;
349 struct mbox_msghdr *prsp = mdev->mbase + irsp;
351 if (preq->id != prsp->id) {
352 trace_otx2_msg_check(mbox->pdev, preq->id,
358 trace_otx2_msg_check(mbox->pdev, preq->id,
363 ireq = mbox->tx_start + preq->next_msgoff;
364 irsp = mbox->rx_start + prsp->next_msgoff;
368 spin_unlock(&mdev->mbox_lock);
371 EXPORT_SYMBOL(otx2_mbox_check_rsp_msgs);
374 otx2_reply_invalid_msg(struct otx2_mbox *mbox, int devid, u16 pcifunc, u16 id)
378 rsp = (struct msg_rsp *)
379 otx2_mbox_alloc_msg(mbox, devid, sizeof(*rsp));
383 rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
384 rsp->hdr.rc = MBOX_MSG_INVALID;
385 rsp->hdr.pcifunc = pcifunc;
388 EXPORT_SYMBOL(otx2_reply_invalid_msg);
390 bool otx2_mbox_nonempty(struct otx2_mbox *mbox, int devid)
392 struct otx2_mbox_dev *mdev = &mbox->dev[devid];
395 spin_lock(&mdev->mbox_lock);
396 ret = mdev->num_msgs != 0;
397 spin_unlock(&mdev->mbox_lock);
401 EXPORT_SYMBOL(otx2_mbox_nonempty);
403 const char *otx2_mbox_id2name(u16 id)
406 #define M(_name, _id, _1, _2, _3) case _id: return # _name;
413 EXPORT_SYMBOL(otx2_mbox_id2name);
415 MODULE_AUTHOR("Marvell.");
416 MODULE_LICENSE("GPL v2");