sched/headers: Prepare for new header dependencies before moving code to <linux/sched...
[sfrench/cifs-2.6.git] / drivers / misc / cxl / native.c
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/mm.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <asm/synch.h>
19 #include <misc/cxl-base.h>
20
21 #include "cxl.h"
22 #include "trace.h"
23
24 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
25                        u64 result, u64 mask, bool enabled)
26 {
27         u64 AFU_Cntl;
28         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
29         int rc = 0;
30
31         spin_lock(&afu->afu_cntl_lock);
32         pr_devel("AFU command starting: %llx\n", command);
33
34         trace_cxl_afu_ctrl(afu, command);
35
36         AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37         cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
38
39         AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
40         while ((AFU_Cntl & mask) != result) {
41                 if (time_after_eq(jiffies, timeout)) {
42                         dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
43                         rc = -EBUSY;
44                         goto out;
45                 }
46
47                 if (!cxl_ops->link_ok(afu->adapter, afu)) {
48                         afu->enabled = enabled;
49                         rc = -EIO;
50                         goto out;
51                 }
52
53                 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
54                                      AFU_Cntl | command);
55                 cpu_relax();
56                 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
57         }
58
59         if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
60                 /*
61                  * Workaround for a bug in the XSL used in the Mellanox CX4
62                  * that fails to clear the RA bit after an AFU reset,
63                  * preventing subsequent AFU resets from working.
64                  */
65                 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
66         }
67
68         pr_devel("AFU command complete: %llx\n", command);
69         afu->enabled = enabled;
70 out:
71         trace_cxl_afu_ctrl_done(afu, command, rc);
72         spin_unlock(&afu->afu_cntl_lock);
73
74         return rc;
75 }
76
77 static int afu_enable(struct cxl_afu *afu)
78 {
79         pr_devel("AFU enable request\n");
80
81         return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
82                            CXL_AFU_Cntl_An_ES_Enabled,
83                            CXL_AFU_Cntl_An_ES_MASK, true);
84 }
85
86 int cxl_afu_disable(struct cxl_afu *afu)
87 {
88         pr_devel("AFU disable request\n");
89
90         return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
91                            CXL_AFU_Cntl_An_ES_Disabled,
92                            CXL_AFU_Cntl_An_ES_MASK, false);
93 }
94
95 /* This will disable as well as reset */
96 static int native_afu_reset(struct cxl_afu *afu)
97 {
98         pr_devel("AFU reset request\n");
99
100         return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
101                            CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
102                            CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
103                            false);
104 }
105
106 static int native_afu_check_and_enable(struct cxl_afu *afu)
107 {
108         if (!cxl_ops->link_ok(afu->adapter, afu)) {
109                 WARN(1, "Refusing to enable afu while link down!\n");
110                 return -EIO;
111         }
112         if (afu->enabled)
113                 return 0;
114         return afu_enable(afu);
115 }
116
117 int cxl_psl_purge(struct cxl_afu *afu)
118 {
119         u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
120         u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
121         u64 dsisr, dar;
122         u64 start, end;
123         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
124         int rc = 0;
125
126         trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
127
128         pr_devel("PSL purge request\n");
129
130         if (!cxl_ops->link_ok(afu->adapter, afu)) {
131                 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
132                 rc = -EIO;
133                 goto out;
134         }
135
136         if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
137                 WARN(1, "psl_purge request while AFU not disabled!\n");
138                 cxl_afu_disable(afu);
139         }
140
141         cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
142                        PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
143         start = local_clock();
144         PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
145         while ((PSL_CNTL &  CXL_PSL_SCNTL_An_Ps_MASK)
146                         == CXL_PSL_SCNTL_An_Ps_Pending) {
147                 if (time_after_eq(jiffies, timeout)) {
148                         dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
149                         rc = -EBUSY;
150                         goto out;
151                 }
152                 if (!cxl_ops->link_ok(afu->adapter, afu)) {
153                         rc = -EIO;
154                         goto out;
155                 }
156
157                 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
158                 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
159                 if (dsisr & CXL_PSL_DSISR_TRANS) {
160                         dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
161                         dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
162                         cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
163                 } else if (dsisr) {
164                         dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
165                         cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
166                 } else {
167                         cpu_relax();
168                 }
169                 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
170         }
171         end = local_clock();
172         pr_devel("PSL purged in %lld ns\n", end - start);
173
174         cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
175                        PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
176 out:
177         trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
178         return rc;
179 }
180
181 static int spa_max_procs(int spa_size)
182 {
183         /*
184          * From the CAIA:
185          *    end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
186          * Most of that junk is really just an overly-complicated way of saying
187          * the last 256 bytes are __aligned(128), so it's really:
188          *    end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
189          * and
190          *    end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
191          * so
192          *    sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
193          * Ignore the alignment (which is safe in this case as long as we are
194          * careful with our rounding) and solve for n:
195          */
196         return ((spa_size / 8) - 96) / 17;
197 }
198
199 int cxl_alloc_spa(struct cxl_afu *afu)
200 {
201         unsigned spa_size;
202
203         /* Work out how many pages to allocate */
204         afu->native->spa_order = -1;
205         do {
206                 afu->native->spa_order++;
207                 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
208
209                 if (spa_size > 0x100000) {
210                         dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
211                                         afu->native->spa_max_procs, afu->native->spa_size);
212                         afu->num_procs = afu->native->spa_max_procs;
213                         break;
214                 }
215
216                 afu->native->spa_size = spa_size;
217                 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
218         } while (afu->native->spa_max_procs < afu->num_procs);
219
220         if (!(afu->native->spa = (struct cxl_process_element *)
221               __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
222                 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
223                 return -ENOMEM;
224         }
225         pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
226                  1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
227
228         return 0;
229 }
230
231 static void attach_spa(struct cxl_afu *afu)
232 {
233         u64 spap;
234
235         afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
236                                             ((afu->native->spa_max_procs + 3) * 128));
237
238         spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
239         spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
240         spap |= CXL_PSL_SPAP_V;
241         pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
242                 afu->native->spa, afu->native->spa_max_procs,
243                 afu->native->sw_command_status, spap);
244         cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
245 }
246
247 static inline void detach_spa(struct cxl_afu *afu)
248 {
249         cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
250 }
251
252 void cxl_release_spa(struct cxl_afu *afu)
253 {
254         if (afu->native->spa) {
255                 free_pages((unsigned long) afu->native->spa,
256                         afu->native->spa_order);
257                 afu->native->spa = NULL;
258         }
259 }
260
261 int cxl_tlb_slb_invalidate(struct cxl *adapter)
262 {
263         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
264
265         pr_devel("CXL adapter wide TLBIA & SLBIA\n");
266
267         cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
268
269         cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
270         while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
271                 if (time_after_eq(jiffies, timeout)) {
272                         dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
273                         return -EBUSY;
274                 }
275                 if (!cxl_ops->link_ok(adapter, NULL))
276                         return -EIO;
277                 cpu_relax();
278         }
279
280         cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
281         while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
282                 if (time_after_eq(jiffies, timeout)) {
283                         dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
284                         return -EBUSY;
285                 }
286                 if (!cxl_ops->link_ok(adapter, NULL))
287                         return -EIO;
288                 cpu_relax();
289         }
290         return 0;
291 }
292
293 int cxl_data_cache_flush(struct cxl *adapter)
294 {
295         u64 reg;
296         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
297
298         pr_devel("Flushing data cache\n");
299
300         reg = cxl_p1_read(adapter, CXL_PSL_Control);
301         reg |= CXL_PSL_Control_Fr;
302         cxl_p1_write(adapter, CXL_PSL_Control, reg);
303
304         reg = cxl_p1_read(adapter, CXL_PSL_Control);
305         while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
306                 if (time_after_eq(jiffies, timeout)) {
307                         dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
308                         return -EBUSY;
309                 }
310
311                 if (!cxl_ops->link_ok(adapter, NULL)) {
312                         dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
313                         return -EIO;
314                 }
315                 cpu_relax();
316                 reg = cxl_p1_read(adapter, CXL_PSL_Control);
317         }
318
319         reg &= ~CXL_PSL_Control_Fr;
320         cxl_p1_write(adapter, CXL_PSL_Control, reg);
321         return 0;
322 }
323
324 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
325 {
326         int rc;
327
328         /* 1. Disable SSTP by writing 0 to SSTP1[V] */
329         cxl_p2n_write(afu, CXL_SSTP1_An, 0);
330
331         /* 2. Invalidate all SLB entries */
332         if ((rc = cxl_afu_slbia(afu)))
333                 return rc;
334
335         /* 3. Set SSTP0_An */
336         cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
337
338         /* 4. Set SSTP1_An */
339         cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
340
341         return 0;
342 }
343
344 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
345 static void slb_invalid(struct cxl_context *ctx)
346 {
347         struct cxl *adapter = ctx->afu->adapter;
348         u64 slbia;
349
350         WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
351
352         cxl_p1_write(adapter, CXL_PSL_LBISEL,
353                         ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
354                         be32_to_cpu(ctx->elem->lpid));
355         cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
356
357         while (1) {
358                 if (!cxl_ops->link_ok(adapter, NULL))
359                         break;
360                 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
361                 if (!(slbia & CXL_TLB_SLB_P))
362                         break;
363                 cpu_relax();
364         }
365 }
366
367 static int do_process_element_cmd(struct cxl_context *ctx,
368                                   u64 cmd, u64 pe_state)
369 {
370         u64 state;
371         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
372         int rc = 0;
373
374         trace_cxl_llcmd(ctx, cmd);
375
376         WARN_ON(!ctx->afu->enabled);
377
378         ctx->elem->software_state = cpu_to_be32(pe_state);
379         smp_wmb();
380         *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
381         smp_mb();
382         cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
383         while (1) {
384                 if (time_after_eq(jiffies, timeout)) {
385                         dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
386                         rc = -EBUSY;
387                         goto out;
388                 }
389                 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
390                         dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
391                         rc = -EIO;
392                         goto out;
393                 }
394                 state = be64_to_cpup(ctx->afu->native->sw_command_status);
395                 if (state == ~0ULL) {
396                         pr_err("cxl: Error adding process element to AFU\n");
397                         rc = -1;
398                         goto out;
399                 }
400                 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK  | CXL_SPA_SW_LINK_MASK)) ==
401                     (cmd | (cmd >> 16) | ctx->pe))
402                         break;
403                 /*
404                  * The command won't finish in the PSL if there are
405                  * outstanding DSIs.  Hence we need to yield here in
406                  * case there are outstanding DSIs that we need to
407                  * service.  Tuning possiblity: we could wait for a
408                  * while before sched
409                  */
410                 schedule();
411
412         }
413 out:
414         trace_cxl_llcmd_done(ctx, cmd, rc);
415         return rc;
416 }
417
418 static int add_process_element(struct cxl_context *ctx)
419 {
420         int rc = 0;
421
422         mutex_lock(&ctx->afu->native->spa_mutex);
423         pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
424         if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
425                 ctx->pe_inserted = true;
426         pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
427         mutex_unlock(&ctx->afu->native->spa_mutex);
428         return rc;
429 }
430
431 static int terminate_process_element(struct cxl_context *ctx)
432 {
433         int rc = 0;
434
435         /* fast path terminate if it's already invalid */
436         if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
437                 return rc;
438
439         mutex_lock(&ctx->afu->native->spa_mutex);
440         pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
441         /* We could be asked to terminate when the hw is down. That
442          * should always succeed: it's not running if the hw has gone
443          * away and is being reset.
444          */
445         if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
446                 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
447                                             CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
448         ctx->elem->software_state = 0;  /* Remove Valid bit */
449         pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
450         mutex_unlock(&ctx->afu->native->spa_mutex);
451         return rc;
452 }
453
454 static int remove_process_element(struct cxl_context *ctx)
455 {
456         int rc = 0;
457
458         mutex_lock(&ctx->afu->native->spa_mutex);
459         pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
460
461         /* We could be asked to remove when the hw is down. Again, if
462          * the hw is down, the PE is gone, so we succeed.
463          */
464         if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
465                 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
466
467         if (!rc)
468                 ctx->pe_inserted = false;
469         slb_invalid(ctx);
470         pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
471         mutex_unlock(&ctx->afu->native->spa_mutex);
472
473         return rc;
474 }
475
476 void cxl_assign_psn_space(struct cxl_context *ctx)
477 {
478         if (!ctx->afu->pp_size || ctx->master) {
479                 ctx->psn_phys = ctx->afu->psn_phys;
480                 ctx->psn_size = ctx->afu->adapter->ps_size;
481         } else {
482                 ctx->psn_phys = ctx->afu->psn_phys +
483                         (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
484                 ctx->psn_size = ctx->afu->pp_size;
485         }
486 }
487
488 static int activate_afu_directed(struct cxl_afu *afu)
489 {
490         int rc;
491
492         dev_info(&afu->dev, "Activating AFU directed mode\n");
493
494         afu->num_procs = afu->max_procs_virtualised;
495         if (afu->native->spa == NULL) {
496                 if (cxl_alloc_spa(afu))
497                         return -ENOMEM;
498         }
499         attach_spa(afu);
500
501         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
502         cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
503         cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
504
505         afu->current_mode = CXL_MODE_DIRECTED;
506
507         if ((rc = cxl_chardev_m_afu_add(afu)))
508                 return rc;
509
510         if ((rc = cxl_sysfs_afu_m_add(afu)))
511                 goto err;
512
513         if ((rc = cxl_chardev_s_afu_add(afu)))
514                 goto err1;
515
516         return 0;
517 err1:
518         cxl_sysfs_afu_m_remove(afu);
519 err:
520         cxl_chardev_afu_remove(afu);
521         return rc;
522 }
523
524 #ifdef CONFIG_CPU_LITTLE_ENDIAN
525 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
526 #else
527 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
528 #endif
529
530 static u64 calculate_sr(struct cxl_context *ctx)
531 {
532         u64 sr = 0;
533
534         set_endian(sr);
535         if (ctx->master)
536                 sr |= CXL_PSL_SR_An_MP;
537         if (mfspr(SPRN_LPCR) & LPCR_TC)
538                 sr |= CXL_PSL_SR_An_TC;
539         if (ctx->kernel) {
540                 if (!ctx->real_mode)
541                         sr |= CXL_PSL_SR_An_R;
542                 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
543         } else {
544                 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
545                 sr &= ~(CXL_PSL_SR_An_HV);
546                 if (!test_tsk_thread_flag(current, TIF_32BIT))
547                         sr |= CXL_PSL_SR_An_SF;
548         }
549         return sr;
550 }
551
552 static void update_ivtes_directed(struct cxl_context *ctx)
553 {
554         bool need_update = (ctx->status == STARTED);
555         int r;
556
557         if (need_update) {
558                 WARN_ON(terminate_process_element(ctx));
559                 WARN_ON(remove_process_element(ctx));
560         }
561
562         for (r = 0; r < CXL_IRQ_RANGES; r++) {
563                 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
564                 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
565         }
566
567         /*
568          * Theoretically we could use the update llcmd, instead of a
569          * terminate/remove/add (or if an atomic update was required we could
570          * do a suspend/update/resume), however it seems there might be issues
571          * with the update llcmd on some cards (including those using an XSL on
572          * an ASIC) so for now it's safest to go with the commands that are
573          * known to work. In the future if we come across a situation where the
574          * card may be performing transactions using the same PE while we are
575          * doing this update we might need to revisit this.
576          */
577         if (need_update)
578                 WARN_ON(add_process_element(ctx));
579 }
580
581 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
582 {
583         u32 pid;
584         int result;
585
586         cxl_assign_psn_space(ctx);
587
588         ctx->elem->ctxtime = 0; /* disable */
589         ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
590         ctx->elem->haurp = 0; /* disable */
591         ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
592
593         pid = current->pid;
594         if (ctx->kernel)
595                 pid = 0;
596         ctx->elem->common.tid = 0;
597         ctx->elem->common.pid = cpu_to_be32(pid);
598
599         ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
600
601         ctx->elem->common.csrp = 0; /* disable */
602         ctx->elem->common.aurp0 = 0; /* disable */
603         ctx->elem->common.aurp1 = 0; /* disable */
604
605         cxl_prefault(ctx, wed);
606
607         ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
608         ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
609
610         /*
611          * Ensure we have the multiplexed PSL interrupt set up to take faults
612          * for kernel contexts that may not have allocated any AFU IRQs at all:
613          */
614         if (ctx->irqs.range[0] == 0) {
615                 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
616                 ctx->irqs.range[0] = 1;
617         }
618
619         update_ivtes_directed(ctx);
620
621         ctx->elem->common.amr = cpu_to_be64(amr);
622         ctx->elem->common.wed = cpu_to_be64(wed);
623
624         /* first guy needs to enable */
625         if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
626                 return result;
627
628         return add_process_element(ctx);
629 }
630
631 static int deactivate_afu_directed(struct cxl_afu *afu)
632 {
633         dev_info(&afu->dev, "Deactivating AFU directed mode\n");
634
635         afu->current_mode = 0;
636         afu->num_procs = 0;
637
638         cxl_sysfs_afu_m_remove(afu);
639         cxl_chardev_afu_remove(afu);
640
641         /*
642          * The CAIA section 2.2.1 indicates that the procedure for starting and
643          * stopping an AFU in AFU directed mode is AFU specific, which is not
644          * ideal since this code is generic and with one exception has no
645          * knowledge of the AFU. This is in contrast to the procedure for
646          * disabling a dedicated process AFU, which is documented to just
647          * require a reset. The architecture does indicate that both an AFU
648          * reset and an AFU disable should result in the AFU being disabled and
649          * we do both followed by a PSL purge for safety.
650          *
651          * Notably we used to have some issues with the disable sequence on PSL
652          * cards, which is why we ended up using this heavy weight procedure in
653          * the first place, however a bug was discovered that had rendered the
654          * disable operation ineffective, so it is conceivable that was the
655          * sole explanation for those difficulties. Careful regression testing
656          * is recommended if anyone attempts to remove or reorder these
657          * operations.
658          *
659          * The XSL on the Mellanox CX4 behaves a little differently from the
660          * PSL based cards and will time out an AFU reset if the AFU is still
661          * enabled. That card is special in that we do have a means to identify
662          * it from this code, so in that case we skip the reset and just use a
663          * disable/purge to avoid the timeout and corresponding noise in the
664          * kernel log.
665          */
666         if (afu->adapter->native->sl_ops->needs_reset_before_disable)
667                 cxl_ops->afu_reset(afu);
668         cxl_afu_disable(afu);
669         cxl_psl_purge(afu);
670
671         return 0;
672 }
673
674 static int activate_dedicated_process(struct cxl_afu *afu)
675 {
676         dev_info(&afu->dev, "Activating dedicated process mode\n");
677
678         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
679
680         cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
681         cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);    /* disable */
682         cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
683         cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
684         cxl_p1n_write(afu, CXL_HAURP_An, 0);       /* disable */
685         cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
686
687         cxl_p2n_write(afu, CXL_CSRP_An, 0);        /* disable */
688         cxl_p2n_write(afu, CXL_AURP0_An, 0);       /* disable */
689         cxl_p2n_write(afu, CXL_AURP1_An, 0);       /* disable */
690
691         afu->current_mode = CXL_MODE_DEDICATED;
692         afu->num_procs = 1;
693
694         return cxl_chardev_d_afu_add(afu);
695 }
696
697 static void update_ivtes_dedicated(struct cxl_context *ctx)
698 {
699         struct cxl_afu *afu = ctx->afu;
700
701         cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
702                        (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
703                        (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
704                        (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
705                         ((u64)ctx->irqs.offset[3] & 0xffff));
706         cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
707                        (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
708                        (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
709                        (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
710                         ((u64)ctx->irqs.range[3] & 0xffff));
711 }
712
713 static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
714 {
715         struct cxl_afu *afu = ctx->afu;
716         u64 pid;
717         int rc;
718
719         pid = (u64)current->pid << 32;
720         if (ctx->kernel)
721                 pid = 0;
722         cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
723
724         cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
725
726         if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
727                 return rc;
728
729         cxl_prefault(ctx, wed);
730
731         update_ivtes_dedicated(ctx);
732
733         cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
734
735         /* master only context for dedicated */
736         cxl_assign_psn_space(ctx);
737
738         if ((rc = cxl_ops->afu_reset(afu)))
739                 return rc;
740
741         cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
742
743         return afu_enable(afu);
744 }
745
746 static int deactivate_dedicated_process(struct cxl_afu *afu)
747 {
748         dev_info(&afu->dev, "Deactivating dedicated process mode\n");
749
750         afu->current_mode = 0;
751         afu->num_procs = 0;
752
753         cxl_chardev_afu_remove(afu);
754
755         return 0;
756 }
757
758 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
759 {
760         if (mode == CXL_MODE_DIRECTED)
761                 return deactivate_afu_directed(afu);
762         if (mode == CXL_MODE_DEDICATED)
763                 return deactivate_dedicated_process(afu);
764         return 0;
765 }
766
767 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
768 {
769         if (!mode)
770                 return 0;
771         if (!(mode & afu->modes_supported))
772                 return -EINVAL;
773
774         if (!cxl_ops->link_ok(afu->adapter, afu)) {
775                 WARN(1, "Device link is down, refusing to activate!\n");
776                 return -EIO;
777         }
778
779         if (mode == CXL_MODE_DIRECTED)
780                 return activate_afu_directed(afu);
781         if (mode == CXL_MODE_DEDICATED)
782                 return activate_dedicated_process(afu);
783
784         return -EINVAL;
785 }
786
787 static int native_attach_process(struct cxl_context *ctx, bool kernel,
788                                 u64 wed, u64 amr)
789 {
790         if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
791                 WARN(1, "Device link is down, refusing to attach process!\n");
792                 return -EIO;
793         }
794
795         ctx->kernel = kernel;
796         if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
797                 return attach_afu_directed(ctx, wed, amr);
798
799         if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
800                 return attach_dedicated(ctx, wed, amr);
801
802         return -EINVAL;
803 }
804
805 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
806 {
807         /*
808          * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
809          * stop the AFU in dedicated mode (we therefore do not make that
810          * optional like we do in the afu directed path). It does not indicate
811          * that we need to do an explicit disable (which should occur
812          * implicitly as part of the reset) or purge, but we do these as well
813          * to be on the safe side.
814          *
815          * Notably we used to have some issues with the disable sequence
816          * (before the sequence was spelled out in the architecture) which is
817          * why we were so heavy weight in the first place, however a bug was
818          * discovered that had rendered the disable operation ineffective, so
819          * it is conceivable that was the sole explanation for those
820          * difficulties. Point is, we should be careful and do some regression
821          * testing if we ever attempt to remove any part of this procedure.
822          */
823         cxl_ops->afu_reset(ctx->afu);
824         cxl_afu_disable(ctx->afu);
825         cxl_psl_purge(ctx->afu);
826         return 0;
827 }
828
829 static void native_update_ivtes(struct cxl_context *ctx)
830 {
831         if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
832                 return update_ivtes_directed(ctx);
833         if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
834                 return update_ivtes_dedicated(ctx);
835         WARN(1, "native_update_ivtes: Bad mode\n");
836 }
837
838 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
839 {
840         if (!ctx->pe_inserted)
841                 return 0;
842         if (terminate_process_element(ctx))
843                 return -1;
844         if (remove_process_element(ctx))
845                 return -1;
846
847         return 0;
848 }
849
850 static int native_detach_process(struct cxl_context *ctx)
851 {
852         trace_cxl_detach(ctx);
853
854         if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
855                 return detach_process_native_dedicated(ctx);
856
857         return detach_process_native_afu_directed(ctx);
858 }
859
860 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
861 {
862         u64 pidtid;
863
864         /* If the adapter has gone away, we can't get any meaningful
865          * information.
866          */
867         if (!cxl_ops->link_ok(afu->adapter, afu))
868                 return -EIO;
869
870         info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
871         info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
872         info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
873         pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
874         info->pid = pidtid >> 32;
875         info->tid = pidtid & 0xffffffff;
876         info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
877         info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
878         info->proc_handle = 0;
879
880         return 0;
881 }
882
883 void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
884 {
885         u64 fir1, fir2, fir_slice, serr, afu_debug;
886
887         fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
888         fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
889         fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
890         afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
891
892         dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
893         dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
894         if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
895                 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
896                 cxl_afu_decode_psl_serr(ctx->afu, serr);
897         }
898         dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
899         dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
900 }
901
902 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
903                                                 u64 dsisr, u64 errstat)
904 {
905
906         dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
907
908         if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
909                 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
910
911         if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
912                 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
913                 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
914         }
915
916         return cxl_ops->ack_irq(ctx, 0, errstat);
917 }
918
919 static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
920 {
921         if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
922                 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
923         else
924                 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
925
926         return IRQ_HANDLED;
927 }
928
929 static irqreturn_t native_irq_multiplexed(int irq, void *data)
930 {
931         struct cxl_afu *afu = data;
932         struct cxl_context *ctx;
933         struct cxl_irq_info irq_info;
934         u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
935         int ph, ret;
936
937         /* check if eeh kicked in while the interrupt was in flight */
938         if (unlikely(phreg == ~0ULL)) {
939                 dev_warn(&afu->dev,
940                          "Ignoring slice interrupt(%d) due to fenced card",
941                          irq);
942                 return IRQ_HANDLED;
943         }
944         /* Mask the pe-handle from register value */
945         ph = phreg & 0xffff;
946         if ((ret = native_get_irq_info(afu, &irq_info))) {
947                 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
948                 return fail_psl_irq(afu, &irq_info);
949         }
950
951         rcu_read_lock();
952         ctx = idr_find(&afu->contexts_idr, ph);
953         if (ctx) {
954                 ret = cxl_irq(irq, ctx, &irq_info);
955                 rcu_read_unlock();
956                 return ret;
957         }
958         rcu_read_unlock();
959
960         WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
961                 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
962                 " with outstanding transactions?)\n", ph, irq_info.dsisr,
963                 irq_info.dar);
964         return fail_psl_irq(afu, &irq_info);
965 }
966
967 static void native_irq_wait(struct cxl_context *ctx)
968 {
969         u64 dsisr;
970         int timeout = 1000;
971         int ph;
972
973         /*
974          * Wait until no further interrupts are presented by the PSL
975          * for this context.
976          */
977         while (timeout--) {
978                 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
979                 if (ph != ctx->pe)
980                         return;
981                 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
982                 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
983                         return;
984                 /*
985                  * We are waiting for the workqueue to process our
986                  * irq, so need to let that run here.
987                  */
988                 msleep(1);
989         }
990
991         dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
992                  " DSISR %016llx!\n", ph, dsisr);
993         return;
994 }
995
996 static irqreturn_t native_slice_irq_err(int irq, void *data)
997 {
998         struct cxl_afu *afu = data;
999         u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
1000
1001         /*
1002          * slice err interrupt is only used with full PSL (no XSL)
1003          */
1004         serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1005         fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1006         errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1007         afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1008         afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1009         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1010         cxl_afu_decode_psl_serr(afu, serr);
1011         dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1012         dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
1013         dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1014         dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
1015         dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1016
1017         cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1018
1019         return IRQ_HANDLED;
1020 }
1021
1022 void cxl_native_err_irq_dump_regs(struct cxl *adapter)
1023 {
1024         u64 fir1, fir2;
1025
1026         fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
1027         fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1028
1029         dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
1030 }
1031
1032 static irqreturn_t native_irq_err(int irq, void *data)
1033 {
1034         struct cxl *adapter = data;
1035         u64 err_ivte;
1036
1037         WARN(1, "CXL ERROR interrupt %i\n", irq);
1038
1039         err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1040         dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1041
1042         if (adapter->native->sl_ops->debugfs_stop_trace) {
1043                 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1044                 adapter->native->sl_ops->debugfs_stop_trace(adapter);
1045         }
1046
1047         if (adapter->native->sl_ops->err_irq_dump_registers)
1048                 adapter->native->sl_ops->err_irq_dump_registers(adapter);
1049
1050         return IRQ_HANDLED;
1051 }
1052
1053 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1054 {
1055         int rc;
1056
1057         adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1058                                       dev_name(&adapter->dev));
1059         if (!adapter->irq_name)
1060                 return -ENOMEM;
1061
1062         if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1063                                        &adapter->native->err_hwirq,
1064                                        &adapter->native->err_virq,
1065                                        adapter->irq_name))) {
1066                 kfree(adapter->irq_name);
1067                 adapter->irq_name = NULL;
1068                 return rc;
1069         }
1070
1071         cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1072
1073         return 0;
1074 }
1075
1076 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1077 {
1078         if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
1079                 return;
1080
1081         cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1082         cxl_unmap_irq(adapter->native->err_virq, adapter);
1083         cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1084         kfree(adapter->irq_name);
1085 }
1086
1087 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1088 {
1089         u64 serr;
1090         int rc;
1091
1092         afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1093                                       dev_name(&afu->dev));
1094         if (!afu->err_irq_name)
1095                 return -ENOMEM;
1096
1097         if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1098                                        &afu->serr_hwirq,
1099                                        &afu->serr_virq, afu->err_irq_name))) {
1100                 kfree(afu->err_irq_name);
1101                 afu->err_irq_name = NULL;
1102                 return rc;
1103         }
1104
1105         serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1106         serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1107         cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1108
1109         return 0;
1110 }
1111
1112 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1113 {
1114         if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1115                 return;
1116
1117         cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1118         cxl_unmap_irq(afu->serr_virq, afu);
1119         cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1120         kfree(afu->err_irq_name);
1121 }
1122
1123 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1124 {
1125         int rc;
1126
1127         afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1128                                       dev_name(&afu->dev));
1129         if (!afu->psl_irq_name)
1130                 return -ENOMEM;
1131
1132         if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1133                                     afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1134                                     afu->psl_irq_name))) {
1135                 kfree(afu->psl_irq_name);
1136                 afu->psl_irq_name = NULL;
1137         }
1138         return rc;
1139 }
1140
1141 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1142 {
1143         if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
1144                 return;
1145
1146         cxl_unmap_irq(afu->native->psl_virq, afu);
1147         cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1148         kfree(afu->psl_irq_name);
1149 }
1150
1151 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1152 {
1153         u64 dsisr;
1154
1155         pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1156
1157         /* Clear PSL_DSISR[PE] */
1158         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1159         cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1160
1161         /* Write 1s to clear error status bits */
1162         cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1163 }
1164
1165 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1166 {
1167         trace_cxl_psl_irq_ack(ctx, tfc);
1168         if (tfc)
1169                 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1170         if (psl_reset_mask)
1171                 recover_psl_err(ctx->afu, psl_reset_mask);
1172
1173         return 0;
1174 }
1175
1176 int cxl_check_error(struct cxl_afu *afu)
1177 {
1178         return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1179 }
1180
1181 static bool native_support_attributes(const char *attr_name,
1182                                       enum cxl_attrs type)
1183 {
1184         return true;
1185 }
1186
1187 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1188 {
1189         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1190                 return -EIO;
1191         if (unlikely(off >= afu->crs_len))
1192                 return -ERANGE;
1193         *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1194                 (cr * afu->crs_len) + off);
1195         return 0;
1196 }
1197
1198 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1199 {
1200         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1201                 return -EIO;
1202         if (unlikely(off >= afu->crs_len))
1203                 return -ERANGE;
1204         *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1205                 (cr * afu->crs_len) + off);
1206         return 0;
1207 }
1208
1209 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1210 {
1211         u64 aligned_off = off & ~0x3L;
1212         u32 val;
1213         int rc;
1214
1215         rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1216         if (!rc)
1217                 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1218         return rc;
1219 }
1220
1221 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1222 {
1223         u64 aligned_off = off & ~0x3L;
1224         u32 val;
1225         int rc;
1226
1227         rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1228         if (!rc)
1229                 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1230         return rc;
1231 }
1232
1233 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1234 {
1235         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1236                 return -EIO;
1237         if (unlikely(off >= afu->crs_len))
1238                 return -ERANGE;
1239         out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1240                 (cr * afu->crs_len) + off, in);
1241         return 0;
1242 }
1243
1244 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1245 {
1246         u64 aligned_off = off & ~0x3L;
1247         u32 val32, mask, shift;
1248         int rc;
1249
1250         rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1251         if (rc)
1252                 return rc;
1253         shift = (off & 0x3) * 8;
1254         WARN_ON(shift == 24);
1255         mask = 0xffff << shift;
1256         val32 = (val32 & ~mask) | (in << shift);
1257
1258         rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1259         return rc;
1260 }
1261
1262 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1263 {
1264         u64 aligned_off = off & ~0x3L;
1265         u32 val32, mask, shift;
1266         int rc;
1267
1268         rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1269         if (rc)
1270                 return rc;
1271         shift = (off & 0x3) * 8;
1272         mask = 0xff << shift;
1273         val32 = (val32 & ~mask) | (in << shift);
1274
1275         rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1276         return rc;
1277 }
1278
1279 const struct cxl_backend_ops cxl_native_ops = {
1280         .module = THIS_MODULE,
1281         .adapter_reset = cxl_pci_reset,
1282         .alloc_one_irq = cxl_pci_alloc_one_irq,
1283         .release_one_irq = cxl_pci_release_one_irq,
1284         .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1285         .release_irq_ranges = cxl_pci_release_irq_ranges,
1286         .setup_irq = cxl_pci_setup_irq,
1287         .handle_psl_slice_error = native_handle_psl_slice_error,
1288         .psl_interrupt = NULL,
1289         .ack_irq = native_ack_irq,
1290         .irq_wait = native_irq_wait,
1291         .attach_process = native_attach_process,
1292         .detach_process = native_detach_process,
1293         .update_ivtes = native_update_ivtes,
1294         .support_attributes = native_support_attributes,
1295         .link_ok = cxl_adapter_link_ok,
1296         .release_afu = cxl_pci_release_afu,
1297         .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1298         .afu_check_and_enable = native_afu_check_and_enable,
1299         .afu_activate_mode = native_afu_activate_mode,
1300         .afu_deactivate_mode = native_afu_deactivate_mode,
1301         .afu_reset = native_afu_reset,
1302         .afu_cr_read8 = native_afu_cr_read8,
1303         .afu_cr_read16 = native_afu_cr_read16,
1304         .afu_cr_read32 = native_afu_cr_read32,
1305         .afu_cr_read64 = native_afu_cr_read64,
1306         .afu_cr_write8 = native_afu_cr_write8,
1307         .afu_cr_write16 = native_afu_cr_write16,
1308         .afu_cr_write32 = native_afu_cr_write32,
1309         .read_adapter_vpd = cxl_pci_read_adapter_vpd,
1310 };