Merge tag 'drm-intel-next-2023-03-07' of git://anongit.freedesktop.org/drm/drm-intel...
[sfrench/cifs-2.6.git] / drivers / dma / ptdma / ptdma-dmaengine.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Passthrough DMA device driver
4  * -- Based on the CCP driver
5  *
6  * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
7  *
8  * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9  * Author: Gary R Hook <gary.hook@amd.com>
10  */
11
12 #include "ptdma.h"
13 #include "../dmaengine.h"
14 #include "../virt-dma.h"
15
16 static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
17 {
18         return container_of(dma_chan, struct pt_dma_chan, vc.chan);
19 }
20
21 static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
22 {
23         return container_of(vd, struct pt_dma_desc, vd);
24 }
25
26 static void pt_free_chan_resources(struct dma_chan *dma_chan)
27 {
28         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
29
30         vchan_free_chan_resources(&chan->vc);
31 }
32
33 static void pt_synchronize(struct dma_chan *dma_chan)
34 {
35         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
36
37         vchan_synchronize(&chan->vc);
38 }
39
40 static void pt_do_cleanup(struct virt_dma_desc *vd)
41 {
42         struct pt_dma_desc *desc = to_pt_desc(vd);
43         struct pt_device *pt = desc->pt;
44
45         kmem_cache_free(pt->dma_desc_cache, desc);
46 }
47
48 static int pt_dma_start_desc(struct pt_dma_desc *desc)
49 {
50         struct pt_passthru_engine *pt_engine;
51         struct pt_device *pt;
52         struct pt_cmd *pt_cmd;
53         struct pt_cmd_queue *cmd_q;
54
55         desc->issued_to_hw = 1;
56
57         pt_cmd = &desc->pt_cmd;
58         pt = pt_cmd->pt;
59         cmd_q = &pt->cmd_q;
60         pt_engine = &pt_cmd->passthru;
61
62         pt->tdata.cmd = pt_cmd;
63
64         /* Execute the command */
65         pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
66
67         return 0;
68 }
69
70 static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
71 {
72         /* Get the next DMA descriptor on the active list */
73         struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
74
75         return vd ? to_pt_desc(vd) : NULL;
76 }
77
78 static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
79                                                  struct pt_dma_desc *desc)
80 {
81         struct dma_async_tx_descriptor *tx_desc;
82         struct virt_dma_desc *vd;
83         unsigned long flags;
84
85         /* Loop over descriptors until one is found with commands */
86         do {
87                 if (desc) {
88                         if (!desc->issued_to_hw) {
89                                 /* No errors, keep going */
90                                 if (desc->status != DMA_ERROR)
91                                         return desc;
92                         }
93
94                         tx_desc = &desc->vd.tx;
95                         vd = &desc->vd;
96                 } else {
97                         tx_desc = NULL;
98                 }
99
100                 spin_lock_irqsave(&chan->vc.lock, flags);
101
102                 if (desc) {
103                         if (desc->status != DMA_COMPLETE) {
104                                 if (desc->status != DMA_ERROR)
105                                         desc->status = DMA_COMPLETE;
106
107                                 dma_cookie_complete(tx_desc);
108                                 dma_descriptor_unmap(tx_desc);
109                                 list_del(&desc->vd.node);
110                         } else {
111                                 /* Don't handle it twice */
112                                 tx_desc = NULL;
113                         }
114                 }
115
116                 desc = pt_next_dma_desc(chan);
117
118                 spin_unlock_irqrestore(&chan->vc.lock, flags);
119
120                 if (tx_desc) {
121                         dmaengine_desc_get_callback_invoke(tx_desc, NULL);
122                         dma_run_dependencies(tx_desc);
123                         vchan_vdesc_fini(vd);
124                 }
125         } while (desc);
126
127         return NULL;
128 }
129
130 static void pt_cmd_callback(void *data, int err)
131 {
132         struct pt_dma_desc *desc = data;
133         struct dma_chan *dma_chan;
134         struct pt_dma_chan *chan;
135         int ret;
136
137         if (err == -EINPROGRESS)
138                 return;
139
140         dma_chan = desc->vd.tx.chan;
141         chan = to_pt_chan(dma_chan);
142
143         if (err)
144                 desc->status = DMA_ERROR;
145
146         while (true) {
147                 /* Check for DMA descriptor completion */
148                 desc = pt_handle_active_desc(chan, desc);
149
150                 /* Don't submit cmd if no descriptor or DMA is paused */
151                 if (!desc)
152                         break;
153
154                 ret = pt_dma_start_desc(desc);
155                 if (!ret)
156                         break;
157
158                 desc->status = DMA_ERROR;
159         }
160 }
161
162 static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
163                                              unsigned long flags)
164 {
165         struct pt_dma_desc *desc;
166
167         desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
168         if (!desc)
169                 return NULL;
170
171         vchan_tx_prep(&chan->vc, &desc->vd, flags);
172
173         desc->pt = chan->pt;
174         desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
175         desc->issued_to_hw = 0;
176         desc->status = DMA_IN_PROGRESS;
177
178         return desc;
179 }
180
181 static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
182                                           dma_addr_t dst,
183                                           dma_addr_t src,
184                                           unsigned int len,
185                                           unsigned long flags)
186 {
187         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
188         struct pt_passthru_engine *pt_engine;
189         struct pt_dma_desc *desc;
190         struct pt_cmd *pt_cmd;
191
192         desc = pt_alloc_dma_desc(chan, flags);
193         if (!desc)
194                 return NULL;
195
196         pt_cmd = &desc->pt_cmd;
197         pt_cmd->pt = chan->pt;
198         pt_engine = &pt_cmd->passthru;
199         pt_cmd->engine = PT_ENGINE_PASSTHRU;
200         pt_engine->src_dma = src;
201         pt_engine->dst_dma = dst;
202         pt_engine->src_len = len;
203         pt_cmd->pt_cmd_callback = pt_cmd_callback;
204         pt_cmd->data = desc;
205
206         desc->len = len;
207
208         return desc;
209 }
210
211 static struct dma_async_tx_descriptor *
212 pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
213                    dma_addr_t src, size_t len, unsigned long flags)
214 {
215         struct pt_dma_desc *desc;
216
217         desc = pt_create_desc(dma_chan, dst, src, len, flags);
218         if (!desc)
219                 return NULL;
220
221         return &desc->vd.tx;
222 }
223
224 static struct dma_async_tx_descriptor *
225 pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
226 {
227         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
228         struct pt_dma_desc *desc;
229
230         desc = pt_alloc_dma_desc(chan, flags);
231         if (!desc)
232                 return NULL;
233
234         return &desc->vd.tx;
235 }
236
237 static void pt_issue_pending(struct dma_chan *dma_chan)
238 {
239         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
240         struct pt_dma_desc *desc;
241         unsigned long flags;
242         bool engine_is_idle = true;
243
244         spin_lock_irqsave(&chan->vc.lock, flags);
245
246         desc = pt_next_dma_desc(chan);
247         if (desc)
248                 engine_is_idle = false;
249
250         vchan_issue_pending(&chan->vc);
251
252         desc = pt_next_dma_desc(chan);
253
254         spin_unlock_irqrestore(&chan->vc.lock, flags);
255
256         /* If there was nothing active, start processing */
257         if (engine_is_idle && desc)
258                 pt_cmd_callback(desc, 0);
259 }
260
261 static enum dma_status
262 pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
263                 struct dma_tx_state *txstate)
264 {
265         struct pt_device *pt = to_pt_chan(c)->pt;
266         struct pt_cmd_queue *cmd_q = &pt->cmd_q;
267
268         pt_check_status_trans(pt, cmd_q);
269         return dma_cookie_status(c, cookie, txstate);
270 }
271
272 static int pt_pause(struct dma_chan *dma_chan)
273 {
274         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
275         unsigned long flags;
276
277         spin_lock_irqsave(&chan->vc.lock, flags);
278         pt_stop_queue(&chan->pt->cmd_q);
279         spin_unlock_irqrestore(&chan->vc.lock, flags);
280
281         return 0;
282 }
283
284 static int pt_resume(struct dma_chan *dma_chan)
285 {
286         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
287         struct pt_dma_desc *desc = NULL;
288         unsigned long flags;
289
290         spin_lock_irqsave(&chan->vc.lock, flags);
291         pt_start_queue(&chan->pt->cmd_q);
292         desc = pt_next_dma_desc(chan);
293         spin_unlock_irqrestore(&chan->vc.lock, flags);
294
295         /* If there was something active, re-start */
296         if (desc)
297                 pt_cmd_callback(desc, 0);
298
299         return 0;
300 }
301
302 static int pt_terminate_all(struct dma_chan *dma_chan)
303 {
304         struct pt_dma_chan *chan = to_pt_chan(dma_chan);
305         unsigned long flags;
306         struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q;
307         LIST_HEAD(head);
308
309         iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
310         spin_lock_irqsave(&chan->vc.lock, flags);
311         vchan_get_all_descriptors(&chan->vc, &head);
312         spin_unlock_irqrestore(&chan->vc.lock, flags);
313
314         vchan_dma_desc_free_list(&chan->vc, &head);
315         vchan_free_chan_resources(&chan->vc);
316
317         return 0;
318 }
319
320 int pt_dmaengine_register(struct pt_device *pt)
321 {
322         struct pt_dma_chan *chan;
323         struct dma_device *dma_dev = &pt->dma_dev;
324         char *cmd_cache_name;
325         char *desc_cache_name;
326         int ret;
327
328         pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
329                                        GFP_KERNEL);
330         if (!pt->pt_dma_chan)
331                 return -ENOMEM;
332
333         cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
334                                         "%s-dmaengine-cmd-cache",
335                                         dev_name(pt->dev));
336         if (!cmd_cache_name)
337                 return -ENOMEM;
338
339         desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
340                                          "%s-dmaengine-desc-cache",
341                                          dev_name(pt->dev));
342         if (!desc_cache_name) {
343                 ret = -ENOMEM;
344                 goto err_cache;
345         }
346
347         pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
348                                                sizeof(struct pt_dma_desc), 0,
349                                                SLAB_HWCACHE_ALIGN, NULL);
350         if (!pt->dma_desc_cache) {
351                 ret = -ENOMEM;
352                 goto err_cache;
353         }
354
355         dma_dev->dev = pt->dev;
356         dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
357         dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
358         dma_dev->directions = DMA_MEM_TO_MEM;
359         dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
360         dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
361         dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
362
363         /*
364          * PTDMA is intended to be used with the AMD NTB devices, hence
365          * marking it as DMA_PRIVATE.
366          */
367         dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
368
369         INIT_LIST_HEAD(&dma_dev->channels);
370
371         chan = pt->pt_dma_chan;
372         chan->pt = pt;
373
374         /* Set base and prep routines */
375         dma_dev->device_free_chan_resources = pt_free_chan_resources;
376         dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
377         dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
378         dma_dev->device_issue_pending = pt_issue_pending;
379         dma_dev->device_tx_status = pt_tx_status;
380         dma_dev->device_pause = pt_pause;
381         dma_dev->device_resume = pt_resume;
382         dma_dev->device_terminate_all = pt_terminate_all;
383         dma_dev->device_synchronize = pt_synchronize;
384
385         chan->vc.desc_free = pt_do_cleanup;
386         vchan_init(&chan->vc, dma_dev);
387
388         dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64));
389
390         ret = dma_async_device_register(dma_dev);
391         if (ret)
392                 goto err_reg;
393
394         return 0;
395
396 err_reg:
397         kmem_cache_destroy(pt->dma_desc_cache);
398
399 err_cache:
400         kmem_cache_destroy(pt->dma_cmd_cache);
401
402         return ret;
403 }
404
405 void pt_dmaengine_unregister(struct pt_device *pt)
406 {
407         struct dma_device *dma_dev = &pt->dma_dev;
408
409         dma_async_device_unregister(dma_dev);
410
411         kmem_cache_destroy(pt->dma_desc_cache);
412         kmem_cache_destroy(pt->dma_cmd_cache);
413 }