HID: input: avoid polling stylus battery on Chromebook Pompom
[sfrench/cifs-2.6.git] / drivers / mailbox / imx-mailbox.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
4  * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
5  */
6
7 #include <linux/clk.h>
8 #include <linux/firmware/imx/ipc.h>
9 #include <linux/firmware/imx/s4.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/mailbox_controller.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/suspend.h>
21 #include <linux/slab.h>
22
23 #include "mailbox.h"
24
25 #define IMX_MU_CHANS            24
26 /* TX0/RX0/RXDB[0-3] */
27 #define IMX_MU_SCU_CHANS        6
28 /* TX0/RX0 */
29 #define IMX_MU_S4_CHANS         2
30 #define IMX_MU_CHAN_NAME_SIZE   20
31
32 #define IMX_MU_NUM_RR           4
33
34 #define IMX_MU_SECO_TX_TOUT (msecs_to_jiffies(3000))
35 #define IMX_MU_SECO_RX_TOUT (msecs_to_jiffies(3000))
36
37 /* Please not change TX & RX */
38 enum imx_mu_chan_type {
39         IMX_MU_TYPE_TX          = 0, /* Tx */
40         IMX_MU_TYPE_RX          = 1, /* Rx */
41         IMX_MU_TYPE_TXDB        = 2, /* Tx doorbell */
42         IMX_MU_TYPE_RXDB        = 3, /* Rx doorbell */
43         IMX_MU_TYPE_RST         = 4, /* Reset */
44         IMX_MU_TYPE_TXDB_V2     = 5, /* Tx doorbell with S/W ACK */
45 };
46
47 enum imx_mu_xcr {
48         IMX_MU_CR,
49         IMX_MU_GIER,
50         IMX_MU_GCR,
51         IMX_MU_TCR,
52         IMX_MU_RCR,
53         IMX_MU_xCR_MAX,
54 };
55
56 enum imx_mu_xsr {
57         IMX_MU_SR,
58         IMX_MU_GSR,
59         IMX_MU_TSR,
60         IMX_MU_RSR,
61         IMX_MU_xSR_MAX,
62 };
63
64 struct imx_sc_rpc_msg_max {
65         struct imx_sc_rpc_msg hdr;
66         u32 data[30];
67 };
68
69 struct imx_s4_rpc_msg_max {
70         struct imx_s4_rpc_msg hdr;
71         u32 data[254];
72 };
73
74 struct imx_mu_con_priv {
75         unsigned int            idx;
76         char                    irq_desc[IMX_MU_CHAN_NAME_SIZE];
77         enum imx_mu_chan_type   type;
78         struct mbox_chan        *chan;
79         struct tasklet_struct   txdb_tasklet;
80 };
81
82 struct imx_mu_priv {
83         struct device           *dev;
84         void __iomem            *base;
85         void                    *msg;
86         spinlock_t              xcr_lock; /* control register lock */
87
88         struct mbox_controller  mbox;
89         struct mbox_chan        mbox_chans[IMX_MU_CHANS];
90
91         struct imx_mu_con_priv  con_priv[IMX_MU_CHANS];
92         const struct imx_mu_dcfg        *dcfg;
93         struct clk              *clk;
94         int                     irq[IMX_MU_CHANS];
95         bool                    suspend;
96
97         u32 xcr[IMX_MU_xCR_MAX];
98
99         bool                    side_b;
100 };
101
102 enum imx_mu_type {
103         IMX_MU_V1,
104         IMX_MU_V2 = BIT(1),
105         IMX_MU_V2_S4 = BIT(15),
106         IMX_MU_V2_IRQ = BIT(16),
107 };
108
109 struct imx_mu_dcfg {
110         int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
111         int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
112         int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
113         void (*init)(struct imx_mu_priv *priv);
114         enum imx_mu_type type;
115         u32     xTR;            /* Transmit Register0 */
116         u32     xRR;            /* Receive Register0 */
117         u32     xSR[IMX_MU_xSR_MAX];    /* Status Registers */
118         u32     xCR[IMX_MU_xCR_MAX];    /* Control Registers */
119 };
120
121 #define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
122 #define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
123 #define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
124
125 /* General Purpose Interrupt Enable */
126 #define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
127 /* Receive Interrupt Enable */
128 #define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
129 /* Transmit Interrupt Enable */
130 #define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
131 /* General Purpose Interrupt Request */
132 #define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
133 /* MU reset */
134 #define IMX_MU_xCR_RST(type)    (type & IMX_MU_V2 ? BIT(0) : BIT(5))
135 #define IMX_MU_xSR_RST(type)    (type & IMX_MU_V2 ? BIT(0) : BIT(7))
136
137
138 static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
139 {
140         return container_of(mbox, struct imx_mu_priv, mbox);
141 }
142
143 static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
144 {
145         iowrite32(val, priv->base + offs);
146 }
147
148 static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
149 {
150         return ioread32(priv->base + offs);
151 }
152
153 static int imx_mu_tx_waiting_write(struct imx_mu_priv *priv, u32 val, u32 idx)
154 {
155         u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_TX_TOUT;
156         u32 status;
157         u32 can_write;
158
159         dev_dbg(priv->dev, "Trying to write %.8x to idx %d\n", val, idx);
160
161         do {
162                 status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
163                 can_write = status & IMX_MU_xSR_TEn(priv->dcfg->type, idx % 4);
164         } while (!can_write && time_is_after_jiffies64(timeout_time));
165
166         if (!can_write) {
167                 dev_err(priv->dev, "timeout trying to write %.8x at %d(%.8x)\n",
168                         val, idx, status);
169                 return -ETIME;
170         }
171
172         imx_mu_write(priv, val, priv->dcfg->xTR + (idx % 4) * 4);
173
174         return 0;
175 }
176
177 static int imx_mu_rx_waiting_read(struct imx_mu_priv *priv, u32 *val, u32 idx)
178 {
179         u64 timeout_time = get_jiffies_64() + IMX_MU_SECO_RX_TOUT;
180         u32 status;
181         u32 can_read;
182
183         dev_dbg(priv->dev, "Trying to read from idx %d\n", idx);
184
185         do {
186                 status = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
187                 can_read = status & IMX_MU_xSR_RFn(priv->dcfg->type, idx % 4);
188         } while (!can_read && time_is_after_jiffies64(timeout_time));
189
190         if (!can_read) {
191                 dev_err(priv->dev, "timeout trying to read idx %d (%.8x)\n",
192                         idx, status);
193                 return -ETIME;
194         }
195
196         *val = imx_mu_read(priv, priv->dcfg->xRR + (idx % 4) * 4);
197         dev_dbg(priv->dev, "Read %.8x\n", *val);
198
199         return 0;
200 }
201
202 static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, enum imx_mu_xcr type, u32 set, u32 clr)
203 {
204         unsigned long flags;
205         u32 val;
206
207         spin_lock_irqsave(&priv->xcr_lock, flags);
208         val = imx_mu_read(priv, priv->dcfg->xCR[type]);
209         val &= ~clr;
210         val |= set;
211         imx_mu_write(priv, val, priv->dcfg->xCR[type]);
212         spin_unlock_irqrestore(&priv->xcr_lock, flags);
213
214         return val;
215 }
216
217 static int imx_mu_generic_tx(struct imx_mu_priv *priv,
218                              struct imx_mu_con_priv *cp,
219                              void *data)
220 {
221         u32 *arg = data;
222
223         switch (cp->type) {
224         case IMX_MU_TYPE_TX:
225                 imx_mu_write(priv, *arg, priv->dcfg->xTR + cp->idx * 4);
226                 imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
227                 break;
228         case IMX_MU_TYPE_TXDB:
229                 imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
230                 tasklet_schedule(&cp->txdb_tasklet);
231                 break;
232         case IMX_MU_TYPE_TXDB_V2:
233                 imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
234                 break;
235         default:
236                 dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
237                 return -EINVAL;
238         }
239
240         return 0;
241 }
242
243 static int imx_mu_generic_rx(struct imx_mu_priv *priv,
244                              struct imx_mu_con_priv *cp)
245 {
246         u32 dat;
247
248         dat = imx_mu_read(priv, priv->dcfg->xRR + (cp->idx) * 4);
249         mbox_chan_received_data(cp->chan, (void *)&dat);
250
251         return 0;
252 }
253
254 static int imx_mu_generic_rxdb(struct imx_mu_priv *priv,
255                                struct imx_mu_con_priv *cp)
256 {
257         imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
258                      priv->dcfg->xSR[IMX_MU_GSR]);
259         mbox_chan_received_data(cp->chan, NULL);
260
261         return 0;
262 }
263
264 static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
265 {
266         u32 *arg = data;
267         int i, ret;
268         u32 xsr;
269         u32 size, max_size, num_tr;
270
271         if (priv->dcfg->type & IMX_MU_V2_S4) {
272                 size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
273                 max_size = sizeof(struct imx_s4_rpc_msg_max);
274                 num_tr = 8;
275         } else {
276                 size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
277                 max_size = sizeof(struct imx_sc_rpc_msg_max);
278                 num_tr = 4;
279         }
280
281         switch (cp->type) {
282         case IMX_MU_TYPE_TX:
283                 /*
284                  * msg->hdr.size specifies the number of u32 words while
285                  * sizeof yields bytes.
286                  */
287
288                 if (size > max_size / 4) {
289                         /*
290                          * The real message size can be different to
291                          * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
292                          */
293                         dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
294                         return -EINVAL;
295                 }
296
297                 for (i = 0; i < num_tr && i < size; i++)
298                         imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
299                 for (; i < size; i++) {
300                         ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
301                                                  xsr,
302                                                  xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
303                                                  0, 5 * USEC_PER_SEC);
304                         if (ret) {
305                                 dev_err(priv->dev, "Send data index: %d timeout\n", i);
306                                 return ret;
307                         }
308                         imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
309                 }
310
311                 imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
312                 break;
313         default:
314                 dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
315                 return -EINVAL;
316         }
317
318         return 0;
319 }
320
321 static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
322 {
323         u32 *data;
324         int i, ret;
325         u32 xsr;
326         u32 size, max_size;
327
328         data = (u32 *)priv->msg;
329
330         imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
331         *data++ = imx_mu_read(priv, priv->dcfg->xRR);
332
333         if (priv->dcfg->type & IMX_MU_V2_S4) {
334                 size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
335                 max_size = sizeof(struct imx_s4_rpc_msg_max);
336         } else {
337                 size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
338                 max_size = sizeof(struct imx_sc_rpc_msg_max);
339         }
340
341         if (size > max_size / 4) {
342                 dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
343                 return -EINVAL;
344         }
345
346         for (i = 1; i < size; i++) {
347                 ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
348                                          xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0,
349                                          5 * USEC_PER_SEC);
350                 if (ret) {
351                         dev_err(priv->dev, "timeout read idx %d\n", i);
352                         return ret;
353                 }
354                 *data++ = imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
355         }
356
357         imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
358         mbox_chan_received_data(cp->chan, (void *)priv->msg);
359
360         return 0;
361 }
362
363 static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
364                           void *data)
365 {
366         struct imx_sc_rpc_msg_max *msg = data;
367         u32 *arg = data;
368         u32 byte_size;
369         int err;
370         int i;
371
372         dev_dbg(priv->dev, "Sending message\n");
373
374         switch (cp->type) {
375         case IMX_MU_TYPE_TXDB:
376                 byte_size = msg->hdr.size * sizeof(u32);
377                 if (byte_size > sizeof(*msg)) {
378                         /*
379                          * The real message size can be different to
380                          * struct imx_sc_rpc_msg_max size
381                          */
382                         dev_err(priv->dev,
383                                 "Exceed max msg size (%zu) on TX, got: %i\n",
384                                 sizeof(*msg), byte_size);
385                         return -EINVAL;
386                 }
387
388                 print_hex_dump_debug("from client ", DUMP_PREFIX_OFFSET, 4, 4,
389                                      data, byte_size, false);
390
391                 /* Send first word */
392                 dev_dbg(priv->dev, "Sending header\n");
393                 imx_mu_write(priv, *arg++, priv->dcfg->xTR);
394
395                 /* Send signaling */
396                 dev_dbg(priv->dev, "Sending signaling\n");
397                 imx_mu_xcr_rmw(priv, IMX_MU_GCR,
398                                IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
399
400                 /* Send words to fill the mailbox */
401                 for (i = 1; i < 4 && i < msg->hdr.size; i++) {
402                         dev_dbg(priv->dev, "Sending word %d\n", i);
403                         imx_mu_write(priv, *arg++,
404                                      priv->dcfg->xTR + (i % 4) * 4);
405                 }
406
407                 /* Send rest of message waiting for remote read */
408                 for (; i < msg->hdr.size; i++) {
409                         dev_dbg(priv->dev, "Sending word %d\n", i);
410                         err = imx_mu_tx_waiting_write(priv, *arg++, i);
411                         if (err) {
412                                 dev_err(priv->dev, "Timeout tx %d\n", i);
413                                 return err;
414                         }
415                 }
416
417                 /* Simulate hack for mbox framework */
418                 tasklet_schedule(&cp->txdb_tasklet);
419
420                 break;
421         default:
422                 dev_warn_ratelimited(priv->dev,
423                                      "Send data on wrong channel type: %d\n",
424                                      cp->type);
425                 return -EINVAL;
426         }
427
428         return 0;
429 }
430
431 static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
432 {
433         struct imx_sc_rpc_msg_max msg;
434         u32 *data = (u32 *)&msg;
435         u32 byte_size;
436         int err = 0;
437         int i;
438
439         dev_dbg(priv->dev, "Receiving message\n");
440
441         /* Read header */
442         dev_dbg(priv->dev, "Receiving header\n");
443         *data++ = imx_mu_read(priv, priv->dcfg->xRR);
444         byte_size = msg.hdr.size * sizeof(u32);
445         if (byte_size > sizeof(msg)) {
446                 dev_err(priv->dev, "Exceed max msg size (%zu) on RX, got: %i\n",
447                         sizeof(msg), byte_size);
448                 err = -EINVAL;
449                 goto error;
450         }
451
452         /* Read message waiting they are written */
453         for (i = 1; i < msg.hdr.size; i++) {
454                 dev_dbg(priv->dev, "Receiving word %d\n", i);
455                 err = imx_mu_rx_waiting_read(priv, data++, i);
456                 if (err) {
457                         dev_err(priv->dev, "Timeout rx %d\n", i);
458                         goto error;
459                 }
460         }
461
462         /* Clear GIP */
463         imx_mu_write(priv, IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx),
464                      priv->dcfg->xSR[IMX_MU_GSR]);
465
466         print_hex_dump_debug("to client ", DUMP_PREFIX_OFFSET, 4, 4,
467                              &msg, byte_size, false);
468
469         /* send data to client */
470         dev_dbg(priv->dev, "Sending message to client\n");
471         mbox_chan_received_data(cp->chan, (void *)&msg);
472
473         goto exit;
474
475 error:
476         mbox_chan_received_data(cp->chan, ERR_PTR(err));
477
478 exit:
479         return err;
480 }
481
482 static void imx_mu_txdb_tasklet(unsigned long data)
483 {
484         struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
485
486         mbox_chan_txdone(cp->chan, 0);
487 }
488
489 static irqreturn_t imx_mu_isr(int irq, void *p)
490 {
491         struct mbox_chan *chan = p;
492         struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
493         struct imx_mu_con_priv *cp = chan->con_priv;
494         u32 val, ctrl;
495
496         switch (cp->type) {
497         case IMX_MU_TYPE_TX:
498                 ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_TCR]);
499                 val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_TSR]);
500                 val &= IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx) &
501                         (ctrl & IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
502                 break;
503         case IMX_MU_TYPE_RX:
504                 ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_RCR]);
505                 val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_RSR]);
506                 val &= IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx) &
507                         (ctrl & IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
508                 break;
509         case IMX_MU_TYPE_RXDB:
510                 ctrl = imx_mu_read(priv, priv->dcfg->xCR[IMX_MU_GIER]);
511                 val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
512                 val &= IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx) &
513                         (ctrl & IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
514                 break;
515         case IMX_MU_TYPE_RST:
516                 return IRQ_NONE;
517         default:
518                 dev_warn_ratelimited(priv->dev, "Unhandled channel type %d\n",
519                                      cp->type);
520                 return IRQ_NONE;
521         }
522
523         if (!val)
524                 return IRQ_NONE;
525
526         if ((val == IMX_MU_xSR_TEn(priv->dcfg->type, cp->idx)) &&
527             (cp->type == IMX_MU_TYPE_TX)) {
528                 imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
529                 mbox_chan_txdone(chan, 0);
530         } else if ((val == IMX_MU_xSR_RFn(priv->dcfg->type, cp->idx)) &&
531                    (cp->type == IMX_MU_TYPE_RX)) {
532                 priv->dcfg->rx(priv, cp);
533         } else if ((val == IMX_MU_xSR_GIPn(priv->dcfg->type, cp->idx)) &&
534                    (cp->type == IMX_MU_TYPE_RXDB)) {
535                 priv->dcfg->rxdb(priv, cp);
536         } else {
537                 dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
538                 return IRQ_NONE;
539         }
540
541         if (priv->suspend)
542                 pm_system_wakeup();
543
544         return IRQ_HANDLED;
545 }
546
547 static int imx_mu_send_data(struct mbox_chan *chan, void *data)
548 {
549         struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
550         struct imx_mu_con_priv *cp = chan->con_priv;
551
552         return priv->dcfg->tx(priv, cp, data);
553 }
554
555 static int imx_mu_startup(struct mbox_chan *chan)
556 {
557         struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
558         struct imx_mu_con_priv *cp = chan->con_priv;
559         unsigned long irq_flag = 0;
560         int ret;
561
562         pm_runtime_get_sync(priv->dev);
563         if (cp->type == IMX_MU_TYPE_TXDB_V2)
564                 return 0;
565
566         if (cp->type == IMX_MU_TYPE_TXDB) {
567                 /* Tx doorbell don't have ACK support */
568                 tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
569                              (unsigned long)cp);
570                 return 0;
571         }
572
573         /* IPC MU should be with IRQF_NO_SUSPEND set */
574         if (!priv->dev->pm_domain)
575                 irq_flag |= IRQF_NO_SUSPEND;
576
577         if (!(priv->dcfg->type & IMX_MU_V2_IRQ))
578                 irq_flag |= IRQF_SHARED;
579
580         ret = request_irq(priv->irq[cp->type], imx_mu_isr, irq_flag, cp->irq_desc, chan);
581         if (ret) {
582                 dev_err(priv->dev, "Unable to acquire IRQ %d\n", priv->irq[cp->type]);
583                 return ret;
584         }
585
586         switch (cp->type) {
587         case IMX_MU_TYPE_RX:
588                 imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx), 0);
589                 break;
590         case IMX_MU_TYPE_RXDB:
591                 imx_mu_xcr_rmw(priv, IMX_MU_GIER, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx), 0);
592                 break;
593         default:
594                 break;
595         }
596
597         return 0;
598 }
599
600 static void imx_mu_shutdown(struct mbox_chan *chan)
601 {
602         struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
603         struct imx_mu_con_priv *cp = chan->con_priv;
604         int ret;
605         u32 sr;
606
607         if (cp->type == IMX_MU_TYPE_TXDB_V2) {
608                 pm_runtime_put_sync(priv->dev);
609                 return;
610         }
611
612         if (cp->type == IMX_MU_TYPE_TXDB) {
613                 tasklet_kill(&cp->txdb_tasklet);
614                 pm_runtime_put_sync(priv->dev);
615                 return;
616         }
617
618         switch (cp->type) {
619         case IMX_MU_TYPE_TX:
620                 imx_mu_xcr_rmw(priv, IMX_MU_TCR, 0, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx));
621                 break;
622         case IMX_MU_TYPE_RX:
623                 imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, cp->idx));
624                 break;
625         case IMX_MU_TYPE_RXDB:
626                 imx_mu_xcr_rmw(priv, IMX_MU_GIER, 0, IMX_MU_xCR_GIEn(priv->dcfg->type, cp->idx));
627                 break;
628         case IMX_MU_TYPE_RST:
629                 imx_mu_xcr_rmw(priv, IMX_MU_CR, IMX_MU_xCR_RST(priv->dcfg->type), 0);
630                 ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_SR], sr,
631                                          !(sr & IMX_MU_xSR_RST(priv->dcfg->type)), 1, 5);
632                 if (ret)
633                         dev_warn(priv->dev, "RST channel timeout\n");
634                 break;
635         default:
636                 break;
637         }
638
639         free_irq(priv->irq[cp->type], chan);
640         pm_runtime_put_sync(priv->dev);
641 }
642
643 static const struct mbox_chan_ops imx_mu_ops = {
644         .send_data = imx_mu_send_data,
645         .startup = imx_mu_startup,
646         .shutdown = imx_mu_shutdown,
647 };
648
649 static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
650                                                const struct of_phandle_args *sp)
651 {
652         u32 type, idx, chan;
653
654         if (sp->args_count != 2) {
655                 dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
656                 return ERR_PTR(-EINVAL);
657         }
658
659         type = sp->args[0]; /* channel type */
660         idx = sp->args[1]; /* index */
661
662         switch (type) {
663         case IMX_MU_TYPE_TX:
664         case IMX_MU_TYPE_RX:
665                 if (idx != 0)
666                         dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
667                 chan = type;
668                 break;
669         case IMX_MU_TYPE_RXDB:
670                 chan = 2 + idx;
671                 break;
672         default:
673                 dev_err(mbox->dev, "Invalid chan type: %d\n", type);
674                 return ERR_PTR(-EINVAL);
675         }
676
677         if (chan >= mbox->num_chans) {
678                 dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
679                 return ERR_PTR(-EINVAL);
680         }
681
682         return &mbox->chans[chan];
683 }
684
685 static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
686                                        const struct of_phandle_args *sp)
687 {
688         struct mbox_chan *p_chan;
689         u32 type, idx, chan;
690
691         if (sp->args_count != 2) {
692                 dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
693                 return ERR_PTR(-EINVAL);
694         }
695
696         type = sp->args[0]; /* channel type */
697         idx = sp->args[1]; /* index */
698
699         /* RST only supports 1 channel */
700         if ((type == IMX_MU_TYPE_RST) && idx) {
701                 dev_err(mbox->dev, "Invalid RST channel %d\n", idx);
702                 return ERR_PTR(-EINVAL);
703         }
704
705         chan = type * 4 + idx;
706         if (chan >= mbox->num_chans) {
707                 dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
708                 return ERR_PTR(-EINVAL);
709         }
710
711         p_chan = &mbox->chans[chan];
712
713         if (type == IMX_MU_TYPE_TXDB_V2)
714                 p_chan->txdone_method = TXDONE_BY_ACK;
715
716         return p_chan;
717 }
718
719 static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
720                                            const struct of_phandle_args *sp)
721 {
722         u32 type;
723
724         if (sp->args_count < 1) {
725                 dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
726                 return ERR_PTR(-EINVAL);
727         }
728
729         type = sp->args[0]; /* channel type */
730
731         /* Only supports TXDB and RXDB */
732         if (type == IMX_MU_TYPE_TX || type == IMX_MU_TYPE_RX) {
733                 dev_err(mbox->dev, "Invalid type: %d\n", type);
734                 return ERR_PTR(-EINVAL);
735         }
736
737         return imx_mu_xlate(mbox, sp);
738 }
739
740 static void imx_mu_init_generic(struct imx_mu_priv *priv)
741 {
742         unsigned int i;
743         unsigned int val;
744
745         for (i = 0; i < IMX_MU_CHANS; i++) {
746                 struct imx_mu_con_priv *cp = &priv->con_priv[i];
747
748                 cp->idx = i % 4;
749                 cp->type = i >> 2;
750                 cp->chan = &priv->mbox_chans[i];
751                 priv->mbox_chans[i].con_priv = cp;
752                 snprintf(cp->irq_desc, sizeof(cp->irq_desc),
753                          "imx_mu_chan[%i-%i]", cp->type, cp->idx);
754         }
755
756         priv->mbox.num_chans = IMX_MU_CHANS;
757         priv->mbox.of_xlate = imx_mu_xlate;
758
759         if (priv->side_b)
760                 return;
761
762         /* Set default MU configuration */
763         for (i = 0; i < IMX_MU_xCR_MAX; i++)
764                 imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
765
766         /* Clear any pending GIP */
767         val = imx_mu_read(priv, priv->dcfg->xSR[IMX_MU_GSR]);
768         imx_mu_write(priv, val, priv->dcfg->xSR[IMX_MU_GSR]);
769
770         /* Clear any pending RSR */
771         for (i = 0; i < IMX_MU_NUM_RR; i++)
772                 imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
773 }
774
775 static void imx_mu_init_specific(struct imx_mu_priv *priv)
776 {
777         unsigned int i;
778         int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
779
780         for (i = 0; i < num_chans; i++) {
781                 struct imx_mu_con_priv *cp = &priv->con_priv[i];
782
783                 cp->idx = i < 2 ? 0 : i - 2;
784                 cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
785                 cp->chan = &priv->mbox_chans[i];
786                 priv->mbox_chans[i].con_priv = cp;
787                 snprintf(cp->irq_desc, sizeof(cp->irq_desc),
788                          "imx_mu_chan[%i-%i]", cp->type, cp->idx);
789         }
790
791         priv->mbox.num_chans = num_chans;
792         priv->mbox.of_xlate = imx_mu_specific_xlate;
793
794         /* Set default MU configuration */
795         for (i = 0; i < IMX_MU_xCR_MAX; i++)
796                 imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
797 }
798
799 static void imx_mu_init_seco(struct imx_mu_priv *priv)
800 {
801         imx_mu_init_generic(priv);
802         priv->mbox.of_xlate = imx_mu_seco_xlate;
803 }
804
805 static int imx_mu_probe(struct platform_device *pdev)
806 {
807         struct device *dev = &pdev->dev;
808         struct device_node *np = dev->of_node;
809         struct imx_mu_priv *priv;
810         const struct imx_mu_dcfg *dcfg;
811         int i, ret;
812         u32 size;
813
814         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
815         if (!priv)
816                 return -ENOMEM;
817
818         priv->dev = dev;
819
820         priv->base = devm_platform_ioremap_resource(pdev, 0);
821         if (IS_ERR(priv->base))
822                 return PTR_ERR(priv->base);
823
824         dcfg = of_device_get_match_data(dev);
825         if (!dcfg)
826                 return -EINVAL;
827         priv->dcfg = dcfg;
828         if (priv->dcfg->type & IMX_MU_V2_IRQ) {
829                 priv->irq[IMX_MU_TYPE_TX] = platform_get_irq_byname(pdev, "tx");
830                 if (priv->irq[IMX_MU_TYPE_TX] < 0)
831                         return priv->irq[IMX_MU_TYPE_TX];
832                 priv->irq[IMX_MU_TYPE_RX] = platform_get_irq_byname(pdev, "rx");
833                 if (priv->irq[IMX_MU_TYPE_RX] < 0)
834                         return priv->irq[IMX_MU_TYPE_RX];
835         } else {
836                 ret = platform_get_irq(pdev, 0);
837                 if (ret < 0)
838                         return ret;
839
840                 for (i = 0; i < IMX_MU_CHANS; i++)
841                         priv->irq[i] = ret;
842         }
843
844         if (priv->dcfg->type & IMX_MU_V2_S4)
845                 size = sizeof(struct imx_s4_rpc_msg_max);
846         else
847                 size = sizeof(struct imx_sc_rpc_msg_max);
848
849         priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
850         if (!priv->msg)
851                 return -ENOMEM;
852
853         priv->clk = devm_clk_get(dev, NULL);
854         if (IS_ERR(priv->clk)) {
855                 if (PTR_ERR(priv->clk) != -ENOENT)
856                         return PTR_ERR(priv->clk);
857
858                 priv->clk = NULL;
859         }
860
861         ret = clk_prepare_enable(priv->clk);
862         if (ret) {
863                 dev_err(dev, "Failed to enable clock\n");
864                 return ret;
865         }
866
867         priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
868
869         priv->dcfg->init(priv);
870
871         spin_lock_init(&priv->xcr_lock);
872
873         priv->mbox.dev = dev;
874         priv->mbox.ops = &imx_mu_ops;
875         priv->mbox.chans = priv->mbox_chans;
876         priv->mbox.txdone_irq = true;
877
878         platform_set_drvdata(pdev, priv);
879
880         ret = devm_mbox_controller_register(dev, &priv->mbox);
881         if (ret) {
882                 clk_disable_unprepare(priv->clk);
883                 return ret;
884         }
885
886         pm_runtime_enable(dev);
887
888         ret = pm_runtime_resume_and_get(dev);
889         if (ret < 0)
890                 goto disable_runtime_pm;
891
892         ret = pm_runtime_put_sync(dev);
893         if (ret < 0)
894                 goto disable_runtime_pm;
895
896         clk_disable_unprepare(priv->clk);
897
898         return 0;
899
900 disable_runtime_pm:
901         pm_runtime_disable(dev);
902         clk_disable_unprepare(priv->clk);
903         return ret;
904 }
905
906 static int imx_mu_remove(struct platform_device *pdev)
907 {
908         struct imx_mu_priv *priv = platform_get_drvdata(pdev);
909
910         pm_runtime_disable(priv->dev);
911
912         return 0;
913 }
914
915 static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
916         .tx     = imx_mu_generic_tx,
917         .rx     = imx_mu_generic_rx,
918         .rxdb   = imx_mu_generic_rxdb,
919         .init   = imx_mu_init_generic,
920         .xTR    = 0x0,
921         .xRR    = 0x10,
922         .xSR    = {0x20, 0x20, 0x20, 0x20},
923         .xCR    = {0x24, 0x24, 0x24, 0x24, 0x24},
924 };
925
926 static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
927         .tx     = imx_mu_generic_tx,
928         .rx     = imx_mu_generic_rx,
929         .rxdb   = imx_mu_generic_rxdb,
930         .init   = imx_mu_init_generic,
931         .xTR    = 0x20,
932         .xRR    = 0x40,
933         .xSR    = {0x60, 0x60, 0x60, 0x60},
934         .xCR    = {0x64, 0x64, 0x64, 0x64, 0x64},
935 };
936
937 static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
938         .tx     = imx_mu_generic_tx,
939         .rx     = imx_mu_generic_rx,
940         .rxdb   = imx_mu_generic_rxdb,
941         .init   = imx_mu_init_generic,
942         .type   = IMX_MU_V2,
943         .xTR    = 0x200,
944         .xRR    = 0x280,
945         .xSR    = {0xC, 0x118, 0x124, 0x12C},
946         .xCR    = {0x8, 0x110, 0x114, 0x120, 0x128},
947 };
948
949 static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
950         .tx     = imx_mu_specific_tx,
951         .rx     = imx_mu_specific_rx,
952         .init   = imx_mu_init_specific,
953         .type   = IMX_MU_V2 | IMX_MU_V2_S4,
954         .xTR    = 0x200,
955         .xRR    = 0x280,
956         .xSR    = {0xC, 0x118, 0x124, 0x12C},
957         .xCR    = {0x8, 0x110, 0x114, 0x120, 0x128},
958 };
959
960 static const struct imx_mu_dcfg imx_mu_cfg_imx93_s4 = {
961         .tx     = imx_mu_specific_tx,
962         .rx     = imx_mu_specific_rx,
963         .init   = imx_mu_init_specific,
964         .type   = IMX_MU_V2 | IMX_MU_V2_S4 | IMX_MU_V2_IRQ,
965         .xTR    = 0x200,
966         .xRR    = 0x280,
967         .xSR    = {0xC, 0x118, 0x124, 0x12C},
968         .xCR    = {0x8, 0x110, 0x114, 0x120, 0x128},
969 };
970
971 static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
972         .tx     = imx_mu_specific_tx,
973         .rx     = imx_mu_specific_rx,
974         .init   = imx_mu_init_specific,
975         .rxdb   = imx_mu_generic_rxdb,
976         .xTR    = 0x0,
977         .xRR    = 0x10,
978         .xSR    = {0x20, 0x20, 0x20, 0x20},
979         .xCR    = {0x24, 0x24, 0x24, 0x24, 0x24},
980 };
981
982 static const struct imx_mu_dcfg imx_mu_cfg_imx8_seco = {
983         .tx     = imx_mu_seco_tx,
984         .rx     = imx_mu_generic_rx,
985         .rxdb   = imx_mu_seco_rxdb,
986         .init   = imx_mu_init_seco,
987         .xTR    = 0x0,
988         .xRR    = 0x10,
989         .xSR    = {0x20, 0x20, 0x20, 0x20},
990         .xCR    = {0x24, 0x24, 0x24, 0x24, 0x24},
991 };
992
993 static const struct of_device_id imx_mu_dt_ids[] = {
994         { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
995         { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
996         { .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
997         { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
998         { .compatible = "fsl,imx93-mu-s4", .data = &imx_mu_cfg_imx93_s4 },
999         { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
1000         { .compatible = "fsl,imx8-mu-seco", .data = &imx_mu_cfg_imx8_seco },
1001         { },
1002 };
1003 MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
1004
1005 static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
1006 {
1007         struct imx_mu_priv *priv = dev_get_drvdata(dev);
1008         int i;
1009
1010         if (!priv->clk) {
1011                 for (i = 0; i < IMX_MU_xCR_MAX; i++)
1012                         priv->xcr[i] = imx_mu_read(priv, priv->dcfg->xCR[i]);
1013         }
1014
1015         priv->suspend = true;
1016
1017         return 0;
1018 }
1019
1020 static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
1021 {
1022         struct imx_mu_priv *priv = dev_get_drvdata(dev);
1023         int i;
1024
1025         /*
1026          * ONLY restore MU when context lost, the TIE could
1027          * be set during noirq resume as there is MU data
1028          * communication going on, and restore the saved
1029          * value will overwrite the TIE and cause MU data
1030          * send failed, may lead to system freeze. This issue
1031          * is observed by testing freeze mode suspend.
1032          */
1033         if (!priv->clk && !imx_mu_read(priv, priv->dcfg->xCR[0])) {
1034                 for (i = 0; i < IMX_MU_xCR_MAX; i++)
1035                         imx_mu_write(priv, priv->xcr[i], priv->dcfg->xCR[i]);
1036         }
1037
1038         priv->suspend = false;
1039
1040         return 0;
1041 }
1042
1043 static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
1044 {
1045         struct imx_mu_priv *priv = dev_get_drvdata(dev);
1046
1047         clk_disable_unprepare(priv->clk);
1048
1049         return 0;
1050 }
1051
1052 static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
1053 {
1054         struct imx_mu_priv *priv = dev_get_drvdata(dev);
1055         int ret;
1056
1057         ret = clk_prepare_enable(priv->clk);
1058         if (ret)
1059                 dev_err(dev, "failed to enable clock\n");
1060
1061         return ret;
1062 }
1063
1064 static const struct dev_pm_ops imx_mu_pm_ops = {
1065         SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
1066                                       imx_mu_resume_noirq)
1067         SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
1068                            imx_mu_runtime_resume, NULL)
1069 };
1070
1071 static struct platform_driver imx_mu_driver = {
1072         .probe          = imx_mu_probe,
1073         .remove         = imx_mu_remove,
1074         .driver = {
1075                 .name   = "imx_mu",
1076                 .of_match_table = imx_mu_dt_ids,
1077                 .pm = &imx_mu_pm_ops,
1078         },
1079 };
1080 module_platform_driver(imx_mu_driver);
1081
1082 MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
1083 MODULE_DESCRIPTION("Message Unit driver for i.MX");
1084 MODULE_LICENSE("GPL v2");