Merge tag 'reset-for-v5.3' of git://git.pengutronix.de/git/pza/linux into arm/drivers
[sfrench/cifs-2.6.git] / drivers / misc / cxl / native.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2014 IBM Corp.
4  */
5
6 #include <linux/spinlock.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/slab.h>
10 #include <linux/mutex.h>
11 #include <linux/mm.h>
12 #include <linux/uaccess.h>
13 #include <linux/delay.h>
14 #include <asm/synch.h>
15 #include <asm/switch_to.h>
16 #include <misc/cxl-base.h>
17
18 #include "cxl.h"
19 #include "trace.h"
20
21 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
22                        u64 result, u64 mask, bool enabled)
23 {
24         u64 AFU_Cntl;
25         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
26         int rc = 0;
27
28         spin_lock(&afu->afu_cntl_lock);
29         pr_devel("AFU command starting: %llx\n", command);
30
31         trace_cxl_afu_ctrl(afu, command);
32
33         AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
34         cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
35
36         AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37         while ((AFU_Cntl & mask) != result) {
38                 if (time_after_eq(jiffies, timeout)) {
39                         dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
40                         rc = -EBUSY;
41                         goto out;
42                 }
43
44                 if (!cxl_ops->link_ok(afu->adapter, afu)) {
45                         afu->enabled = enabled;
46                         rc = -EIO;
47                         goto out;
48                 }
49
50                 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
51                                      AFU_Cntl | command);
52                 cpu_relax();
53                 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
54         }
55
56         if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
57                 /*
58                  * Workaround for a bug in the XSL used in the Mellanox CX4
59                  * that fails to clear the RA bit after an AFU reset,
60                  * preventing subsequent AFU resets from working.
61                  */
62                 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
63         }
64
65         pr_devel("AFU command complete: %llx\n", command);
66         afu->enabled = enabled;
67 out:
68         trace_cxl_afu_ctrl_done(afu, command, rc);
69         spin_unlock(&afu->afu_cntl_lock);
70
71         return rc;
72 }
73
74 static int afu_enable(struct cxl_afu *afu)
75 {
76         pr_devel("AFU enable request\n");
77
78         return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
79                            CXL_AFU_Cntl_An_ES_Enabled,
80                            CXL_AFU_Cntl_An_ES_MASK, true);
81 }
82
83 int cxl_afu_disable(struct cxl_afu *afu)
84 {
85         pr_devel("AFU disable request\n");
86
87         return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
88                            CXL_AFU_Cntl_An_ES_Disabled,
89                            CXL_AFU_Cntl_An_ES_MASK, false);
90 }
91
92 /* This will disable as well as reset */
93 static int native_afu_reset(struct cxl_afu *afu)
94 {
95         int rc;
96         u64 serr;
97
98         pr_devel("AFU reset request\n");
99
100         rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
101                            CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
102                            CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
103                            false);
104
105         /*
106          * Re-enable any masked interrupts when the AFU is not
107          * activated to avoid side effects after attaching a process
108          * in dedicated mode.
109          */
110         if (afu->current_mode == 0) {
111                 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
112                 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
113                 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
114         }
115
116         return rc;
117 }
118
119 static int native_afu_check_and_enable(struct cxl_afu *afu)
120 {
121         if (!cxl_ops->link_ok(afu->adapter, afu)) {
122                 WARN(1, "Refusing to enable afu while link down!\n");
123                 return -EIO;
124         }
125         if (afu->enabled)
126                 return 0;
127         return afu_enable(afu);
128 }
129
130 int cxl_psl_purge(struct cxl_afu *afu)
131 {
132         u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
133         u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
134         u64 dsisr, dar;
135         u64 start, end;
136         u64 trans_fault = 0x0ULL;
137         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
138         int rc = 0;
139
140         trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
141
142         pr_devel("PSL purge request\n");
143
144         if (cxl_is_power8())
145                 trans_fault = CXL_PSL_DSISR_TRANS;
146         if (cxl_is_power9())
147                 trans_fault = CXL_PSL9_DSISR_An_TF;
148
149         if (!cxl_ops->link_ok(afu->adapter, afu)) {
150                 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
151                 rc = -EIO;
152                 goto out;
153         }
154
155         if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
156                 WARN(1, "psl_purge request while AFU not disabled!\n");
157                 cxl_afu_disable(afu);
158         }
159
160         cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
161                        PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
162         start = local_clock();
163         PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
164         while ((PSL_CNTL &  CXL_PSL_SCNTL_An_Ps_MASK)
165                         == CXL_PSL_SCNTL_An_Ps_Pending) {
166                 if (time_after_eq(jiffies, timeout)) {
167                         dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
168                         rc = -EBUSY;
169                         goto out;
170                 }
171                 if (!cxl_ops->link_ok(afu->adapter, afu)) {
172                         rc = -EIO;
173                         goto out;
174                 }
175
176                 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
177                 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n",
178                                      PSL_CNTL, dsisr);
179
180                 if (dsisr & trans_fault) {
181                         dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
182                         dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
183                                    dsisr, dar);
184                         cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
185                 } else if (dsisr) {
186                         dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
187                                    dsisr);
188                         cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
189                 } else {
190                         cpu_relax();
191                 }
192                 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
193         }
194         end = local_clock();
195         pr_devel("PSL purged in %lld ns\n", end - start);
196
197         cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
198                        PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
199 out:
200         trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
201         return rc;
202 }
203
204 static int spa_max_procs(int spa_size)
205 {
206         /*
207          * From the CAIA:
208          *    end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
209          * Most of that junk is really just an overly-complicated way of saying
210          * the last 256 bytes are __aligned(128), so it's really:
211          *    end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
212          * and
213          *    end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
214          * so
215          *    sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
216          * Ignore the alignment (which is safe in this case as long as we are
217          * careful with our rounding) and solve for n:
218          */
219         return ((spa_size / 8) - 96) / 17;
220 }
221
222 static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
223 {
224         unsigned spa_size;
225
226         /* Work out how many pages to allocate */
227         afu->native->spa_order = -1;
228         do {
229                 afu->native->spa_order++;
230                 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
231
232                 if (spa_size > 0x100000) {
233                         dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
234                                         afu->native->spa_max_procs, afu->native->spa_size);
235                         if (mode != CXL_MODE_DEDICATED)
236                                 afu->num_procs = afu->native->spa_max_procs;
237                         break;
238                 }
239
240                 afu->native->spa_size = spa_size;
241                 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
242         } while (afu->native->spa_max_procs < afu->num_procs);
243
244         if (!(afu->native->spa = (struct cxl_process_element *)
245               __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
246                 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
247                 return -ENOMEM;
248         }
249         pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
250                  1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
251
252         return 0;
253 }
254
255 static void attach_spa(struct cxl_afu *afu)
256 {
257         u64 spap;
258
259         afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
260                                             ((afu->native->spa_max_procs + 3) * 128));
261
262         spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
263         spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
264         spap |= CXL_PSL_SPAP_V;
265         pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
266                 afu->native->spa, afu->native->spa_max_procs,
267                 afu->native->sw_command_status, spap);
268         cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
269 }
270
271 static inline void detach_spa(struct cxl_afu *afu)
272 {
273         cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
274 }
275
276 void cxl_release_spa(struct cxl_afu *afu)
277 {
278         if (afu->native->spa) {
279                 free_pages((unsigned long) afu->native->spa,
280                         afu->native->spa_order);
281                 afu->native->spa = NULL;
282         }
283 }
284
285 /*
286  * Invalidation of all ERAT entries is no longer required by CAIA2. Use
287  * only for debug.
288  */
289 int cxl_invalidate_all_psl9(struct cxl *adapter)
290 {
291         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
292         u64 ierat;
293
294         pr_devel("CXL adapter - invalidation of all ERAT entries\n");
295
296         /* Invalidates all ERAT entries for Radix or HPT */
297         ierat = CXL_XSL9_IERAT_IALL;
298         if (radix_enabled())
299                 ierat |= CXL_XSL9_IERAT_INVR;
300         cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
301
302         while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
303                 if (time_after_eq(jiffies, timeout)) {
304                         dev_warn(&adapter->dev,
305                         "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
306                         return -EBUSY;
307                 }
308                 if (!cxl_ops->link_ok(adapter, NULL))
309                         return -EIO;
310                 cpu_relax();
311         }
312         return 0;
313 }
314
315 int cxl_invalidate_all_psl8(struct cxl *adapter)
316 {
317         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
318
319         pr_devel("CXL adapter wide TLBIA & SLBIA\n");
320
321         cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
322
323         cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
324         while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
325                 if (time_after_eq(jiffies, timeout)) {
326                         dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
327                         return -EBUSY;
328                 }
329                 if (!cxl_ops->link_ok(adapter, NULL))
330                         return -EIO;
331                 cpu_relax();
332         }
333
334         cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
335         while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
336                 if (time_after_eq(jiffies, timeout)) {
337                         dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
338                         return -EBUSY;
339                 }
340                 if (!cxl_ops->link_ok(adapter, NULL))
341                         return -EIO;
342                 cpu_relax();
343         }
344         return 0;
345 }
346
347 int cxl_data_cache_flush(struct cxl *adapter)
348 {
349         u64 reg;
350         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
351
352         /*
353          * Do a datacache flush only if datacache is available.
354          * In case of PSL9D datacache absent hence flush operation.
355          * would timeout.
356          */
357         if (adapter->native->no_data_cache) {
358                 pr_devel("No PSL data cache. Ignoring cache flush req.\n");
359                 return 0;
360         }
361
362         pr_devel("Flushing data cache\n");
363         reg = cxl_p1_read(adapter, CXL_PSL_Control);
364         reg |= CXL_PSL_Control_Fr;
365         cxl_p1_write(adapter, CXL_PSL_Control, reg);
366
367         reg = cxl_p1_read(adapter, CXL_PSL_Control);
368         while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
369                 if (time_after_eq(jiffies, timeout)) {
370                         dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
371                         return -EBUSY;
372                 }
373
374                 if (!cxl_ops->link_ok(adapter, NULL)) {
375                         dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
376                         return -EIO;
377                 }
378                 cpu_relax();
379                 reg = cxl_p1_read(adapter, CXL_PSL_Control);
380         }
381
382         reg &= ~CXL_PSL_Control_Fr;
383         cxl_p1_write(adapter, CXL_PSL_Control, reg);
384         return 0;
385 }
386
387 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
388 {
389         int rc;
390
391         /* 1. Disable SSTP by writing 0 to SSTP1[V] */
392         cxl_p2n_write(afu, CXL_SSTP1_An, 0);
393
394         /* 2. Invalidate all SLB entries */
395         if ((rc = cxl_afu_slbia(afu)))
396                 return rc;
397
398         /* 3. Set SSTP0_An */
399         cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
400
401         /* 4. Set SSTP1_An */
402         cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
403
404         return 0;
405 }
406
407 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
408 static void slb_invalid(struct cxl_context *ctx)
409 {
410         struct cxl *adapter = ctx->afu->adapter;
411         u64 slbia;
412
413         WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
414
415         cxl_p1_write(adapter, CXL_PSL_LBISEL,
416                         ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
417                         be32_to_cpu(ctx->elem->lpid));
418         cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
419
420         while (1) {
421                 if (!cxl_ops->link_ok(adapter, NULL))
422                         break;
423                 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
424                 if (!(slbia & CXL_TLB_SLB_P))
425                         break;
426                 cpu_relax();
427         }
428 }
429
430 static int do_process_element_cmd(struct cxl_context *ctx,
431                                   u64 cmd, u64 pe_state)
432 {
433         u64 state;
434         unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
435         int rc = 0;
436
437         trace_cxl_llcmd(ctx, cmd);
438
439         WARN_ON(!ctx->afu->enabled);
440
441         ctx->elem->software_state = cpu_to_be32(pe_state);
442         smp_wmb();
443         *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
444         smp_mb();
445         cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
446         while (1) {
447                 if (time_after_eq(jiffies, timeout)) {
448                         dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
449                         rc = -EBUSY;
450                         goto out;
451                 }
452                 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
453                         dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
454                         rc = -EIO;
455                         goto out;
456                 }
457                 state = be64_to_cpup(ctx->afu->native->sw_command_status);
458                 if (state == ~0ULL) {
459                         pr_err("cxl: Error adding process element to AFU\n");
460                         rc = -1;
461                         goto out;
462                 }
463                 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK  | CXL_SPA_SW_LINK_MASK)) ==
464                     (cmd | (cmd >> 16) | ctx->pe))
465                         break;
466                 /*
467                  * The command won't finish in the PSL if there are
468                  * outstanding DSIs.  Hence we need to yield here in
469                  * case there are outstanding DSIs that we need to
470                  * service.  Tuning possiblity: we could wait for a
471                  * while before sched
472                  */
473                 schedule();
474
475         }
476 out:
477         trace_cxl_llcmd_done(ctx, cmd, rc);
478         return rc;
479 }
480
481 static int add_process_element(struct cxl_context *ctx)
482 {
483         int rc = 0;
484
485         mutex_lock(&ctx->afu->native->spa_mutex);
486         pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
487         if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
488                 ctx->pe_inserted = true;
489         pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
490         mutex_unlock(&ctx->afu->native->spa_mutex);
491         return rc;
492 }
493
494 static int terminate_process_element(struct cxl_context *ctx)
495 {
496         int rc = 0;
497
498         /* fast path terminate if it's already invalid */
499         if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
500                 return rc;
501
502         mutex_lock(&ctx->afu->native->spa_mutex);
503         pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
504         /* We could be asked to terminate when the hw is down. That
505          * should always succeed: it's not running if the hw has gone
506          * away and is being reset.
507          */
508         if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
509                 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
510                                             CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
511         ctx->elem->software_state = 0;  /* Remove Valid bit */
512         pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
513         mutex_unlock(&ctx->afu->native->spa_mutex);
514         return rc;
515 }
516
517 static int remove_process_element(struct cxl_context *ctx)
518 {
519         int rc = 0;
520
521         mutex_lock(&ctx->afu->native->spa_mutex);
522         pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
523
524         /* We could be asked to remove when the hw is down. Again, if
525          * the hw is down, the PE is gone, so we succeed.
526          */
527         if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
528                 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
529
530         if (!rc)
531                 ctx->pe_inserted = false;
532         if (cxl_is_power8())
533                 slb_invalid(ctx);
534         pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
535         mutex_unlock(&ctx->afu->native->spa_mutex);
536
537         return rc;
538 }
539
540 void cxl_assign_psn_space(struct cxl_context *ctx)
541 {
542         if (!ctx->afu->pp_size || ctx->master) {
543                 ctx->psn_phys = ctx->afu->psn_phys;
544                 ctx->psn_size = ctx->afu->adapter->ps_size;
545         } else {
546                 ctx->psn_phys = ctx->afu->psn_phys +
547                         (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
548                 ctx->psn_size = ctx->afu->pp_size;
549         }
550 }
551
552 static int activate_afu_directed(struct cxl_afu *afu)
553 {
554         int rc;
555
556         dev_info(&afu->dev, "Activating AFU directed mode\n");
557
558         afu->num_procs = afu->max_procs_virtualised;
559         if (afu->native->spa == NULL) {
560                 if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
561                         return -ENOMEM;
562         }
563         attach_spa(afu);
564
565         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
566         if (cxl_is_power8())
567                 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
568         cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
569
570         afu->current_mode = CXL_MODE_DIRECTED;
571
572         if ((rc = cxl_chardev_m_afu_add(afu)))
573                 return rc;
574
575         if ((rc = cxl_sysfs_afu_m_add(afu)))
576                 goto err;
577
578         if ((rc = cxl_chardev_s_afu_add(afu)))
579                 goto err1;
580
581         return 0;
582 err1:
583         cxl_sysfs_afu_m_remove(afu);
584 err:
585         cxl_chardev_afu_remove(afu);
586         return rc;
587 }
588
589 #ifdef CONFIG_CPU_LITTLE_ENDIAN
590 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
591 #else
592 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
593 #endif
594
595 u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
596 {
597         u64 sr = 0;
598
599         set_endian(sr);
600         if (master)
601                 sr |= CXL_PSL_SR_An_MP;
602         if (mfspr(SPRN_LPCR) & LPCR_TC)
603                 sr |= CXL_PSL_SR_An_TC;
604
605         if (kernel) {
606                 if (!real_mode)
607                         sr |= CXL_PSL_SR_An_R;
608                 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
609         } else {
610                 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
611                 if (radix_enabled())
612                         sr |= CXL_PSL_SR_An_HV;
613                 else
614                         sr &= ~(CXL_PSL_SR_An_HV);
615                 if (!test_tsk_thread_flag(current, TIF_32BIT))
616                         sr |= CXL_PSL_SR_An_SF;
617         }
618         if (p9) {
619                 if (radix_enabled())
620                         sr |= CXL_PSL_SR_An_XLAT_ror;
621                 else
622                         sr |= CXL_PSL_SR_An_XLAT_hpt;
623         }
624         return sr;
625 }
626
627 static u64 calculate_sr(struct cxl_context *ctx)
628 {
629         return cxl_calculate_sr(ctx->master, ctx->kernel, false,
630                                 cxl_is_power9());
631 }
632
633 static void update_ivtes_directed(struct cxl_context *ctx)
634 {
635         bool need_update = (ctx->status == STARTED);
636         int r;
637
638         if (need_update) {
639                 WARN_ON(terminate_process_element(ctx));
640                 WARN_ON(remove_process_element(ctx));
641         }
642
643         for (r = 0; r < CXL_IRQ_RANGES; r++) {
644                 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
645                 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
646         }
647
648         /*
649          * Theoretically we could use the update llcmd, instead of a
650          * terminate/remove/add (or if an atomic update was required we could
651          * do a suspend/update/resume), however it seems there might be issues
652          * with the update llcmd on some cards (including those using an XSL on
653          * an ASIC) so for now it's safest to go with the commands that are
654          * known to work. In the future if we come across a situation where the
655          * card may be performing transactions using the same PE while we are
656          * doing this update we might need to revisit this.
657          */
658         if (need_update)
659                 WARN_ON(add_process_element(ctx));
660 }
661
662 static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
663 {
664         u32 pid;
665         int rc;
666
667         cxl_assign_psn_space(ctx);
668
669         ctx->elem->ctxtime = 0; /* disable */
670         ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
671         ctx->elem->haurp = 0; /* disable */
672
673         if (ctx->kernel)
674                 pid = 0;
675         else {
676                 if (ctx->mm == NULL) {
677                         pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
678                                 __func__, ctx->pe, pid_nr(ctx->pid));
679                         return -EINVAL;
680                 }
681                 pid = ctx->mm->context.id;
682         }
683
684         /* Assign a unique TIDR (thread id) for the current thread */
685         if (!(ctx->tidr) && (ctx->assign_tidr)) {
686                 rc = set_thread_tidr(current);
687                 if (rc)
688                         return -ENODEV;
689                 ctx->tidr = current->thread.tidr;
690                 pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
691         }
692
693         ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
694         ctx->elem->common.pid = cpu_to_be32(pid);
695
696         ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
697
698         ctx->elem->common.csrp = 0; /* disable */
699
700         cxl_prefault(ctx, wed);
701
702         /*
703          * Ensure we have the multiplexed PSL interrupt set up to take faults
704          * for kernel contexts that may not have allocated any AFU IRQs at all:
705          */
706         if (ctx->irqs.range[0] == 0) {
707                 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
708                 ctx->irqs.range[0] = 1;
709         }
710
711         ctx->elem->common.amr = cpu_to_be64(amr);
712         ctx->elem->common.wed = cpu_to_be64(wed);
713
714         return 0;
715 }
716
717 int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
718 {
719         int result;
720
721         /* fill the process element entry */
722         result = process_element_entry_psl9(ctx, wed, amr);
723         if (result)
724                 return result;
725
726         update_ivtes_directed(ctx);
727
728         /* first guy needs to enable */
729         result = cxl_ops->afu_check_and_enable(ctx->afu);
730         if (result)
731                 return result;
732
733         return add_process_element(ctx);
734 }
735
736 int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
737 {
738         u32 pid;
739         int result;
740
741         cxl_assign_psn_space(ctx);
742
743         ctx->elem->ctxtime = 0; /* disable */
744         ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
745         ctx->elem->haurp = 0; /* disable */
746         ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
747
748         pid = current->pid;
749         if (ctx->kernel)
750                 pid = 0;
751         ctx->elem->common.tid = 0;
752         ctx->elem->common.pid = cpu_to_be32(pid);
753
754         ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
755
756         ctx->elem->common.csrp = 0; /* disable */
757         ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
758         ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
759
760         cxl_prefault(ctx, wed);
761
762         ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
763         ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
764
765         /*
766          * Ensure we have the multiplexed PSL interrupt set up to take faults
767          * for kernel contexts that may not have allocated any AFU IRQs at all:
768          */
769         if (ctx->irqs.range[0] == 0) {
770                 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
771                 ctx->irqs.range[0] = 1;
772         }
773
774         update_ivtes_directed(ctx);
775
776         ctx->elem->common.amr = cpu_to_be64(amr);
777         ctx->elem->common.wed = cpu_to_be64(wed);
778
779         /* first guy needs to enable */
780         if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
781                 return result;
782
783         return add_process_element(ctx);
784 }
785
786 static int deactivate_afu_directed(struct cxl_afu *afu)
787 {
788         dev_info(&afu->dev, "Deactivating AFU directed mode\n");
789
790         afu->current_mode = 0;
791         afu->num_procs = 0;
792
793         cxl_sysfs_afu_m_remove(afu);
794         cxl_chardev_afu_remove(afu);
795
796         /*
797          * The CAIA section 2.2.1 indicates that the procedure for starting and
798          * stopping an AFU in AFU directed mode is AFU specific, which is not
799          * ideal since this code is generic and with one exception has no
800          * knowledge of the AFU. This is in contrast to the procedure for
801          * disabling a dedicated process AFU, which is documented to just
802          * require a reset. The architecture does indicate that both an AFU
803          * reset and an AFU disable should result in the AFU being disabled and
804          * we do both followed by a PSL purge for safety.
805          *
806          * Notably we used to have some issues with the disable sequence on PSL
807          * cards, which is why we ended up using this heavy weight procedure in
808          * the first place, however a bug was discovered that had rendered the
809          * disable operation ineffective, so it is conceivable that was the
810          * sole explanation for those difficulties. Careful regression testing
811          * is recommended if anyone attempts to remove or reorder these
812          * operations.
813          *
814          * The XSL on the Mellanox CX4 behaves a little differently from the
815          * PSL based cards and will time out an AFU reset if the AFU is still
816          * enabled. That card is special in that we do have a means to identify
817          * it from this code, so in that case we skip the reset and just use a
818          * disable/purge to avoid the timeout and corresponding noise in the
819          * kernel log.
820          */
821         if (afu->adapter->native->sl_ops->needs_reset_before_disable)
822                 cxl_ops->afu_reset(afu);
823         cxl_afu_disable(afu);
824         cxl_psl_purge(afu);
825
826         return 0;
827 }
828
829 int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
830 {
831         dev_info(&afu->dev, "Activating dedicated process mode\n");
832
833         /*
834          * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
835          * XSL and AFU are programmed to work with a single context.
836          * The context information should be configured in the SPA area
837          * index 0 (so PSL_SPAP must be configured before enabling the
838          * AFU).
839          */
840         afu->num_procs = 1;
841         if (afu->native->spa == NULL) {
842                 if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
843                         return -ENOMEM;
844         }
845         attach_spa(afu);
846
847         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
848         cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
849
850         afu->current_mode = CXL_MODE_DEDICATED;
851
852         return cxl_chardev_d_afu_add(afu);
853 }
854
855 int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
856 {
857         dev_info(&afu->dev, "Activating dedicated process mode\n");
858
859         cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
860
861         cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
862         cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);    /* disable */
863         cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
864         cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
865         cxl_p1n_write(afu, CXL_HAURP_An, 0);       /* disable */
866         cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
867
868         cxl_p2n_write(afu, CXL_CSRP_An, 0);        /* disable */
869         cxl_p2n_write(afu, CXL_AURP0_An, 0);       /* disable */
870         cxl_p2n_write(afu, CXL_AURP1_An, 0);       /* disable */
871
872         afu->current_mode = CXL_MODE_DEDICATED;
873         afu->num_procs = 1;
874
875         return cxl_chardev_d_afu_add(afu);
876 }
877
878 void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
879 {
880         int r;
881
882         for (r = 0; r < CXL_IRQ_RANGES; r++) {
883                 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
884                 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
885         }
886 }
887
888 void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
889 {
890         struct cxl_afu *afu = ctx->afu;
891
892         cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
893                        (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
894                        (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
895                        (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
896                         ((u64)ctx->irqs.offset[3] & 0xffff));
897         cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
898                        (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
899                        (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
900                        (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
901                         ((u64)ctx->irqs.range[3] & 0xffff));
902 }
903
904 int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
905 {
906         struct cxl_afu *afu = ctx->afu;
907         int result;
908
909         /* fill the process element entry */
910         result = process_element_entry_psl9(ctx, wed, amr);
911         if (result)
912                 return result;
913
914         if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
915                 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
916
917         ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
918         /*
919          * Ideally we should do a wmb() here to make sure the changes to the
920          * PE are visible to the card before we call afu_enable.
921          * On ppc64 though all mmios are preceded by a 'sync' instruction hence
922          * we dont dont need one here.
923          */
924
925         result = cxl_ops->afu_reset(afu);
926         if (result)
927                 return result;
928
929         return afu_enable(afu);
930 }
931
932 int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
933 {
934         struct cxl_afu *afu = ctx->afu;
935         u64 pid;
936         int rc;
937
938         pid = (u64)current->pid << 32;
939         if (ctx->kernel)
940                 pid = 0;
941         cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
942
943         cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
944
945         if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
946                 return rc;
947
948         cxl_prefault(ctx, wed);
949
950         if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
951                 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
952
953         cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
954
955         /* master only context for dedicated */
956         cxl_assign_psn_space(ctx);
957
958         if ((rc = cxl_ops->afu_reset(afu)))
959                 return rc;
960
961         cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
962
963         return afu_enable(afu);
964 }
965
966 static int deactivate_dedicated_process(struct cxl_afu *afu)
967 {
968         dev_info(&afu->dev, "Deactivating dedicated process mode\n");
969
970         afu->current_mode = 0;
971         afu->num_procs = 0;
972
973         cxl_chardev_afu_remove(afu);
974
975         return 0;
976 }
977
978 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
979 {
980         if (mode == CXL_MODE_DIRECTED)
981                 return deactivate_afu_directed(afu);
982         if (mode == CXL_MODE_DEDICATED)
983                 return deactivate_dedicated_process(afu);
984         return 0;
985 }
986
987 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
988 {
989         if (!mode)
990                 return 0;
991         if (!(mode & afu->modes_supported))
992                 return -EINVAL;
993
994         if (!cxl_ops->link_ok(afu->adapter, afu)) {
995                 WARN(1, "Device link is down, refusing to activate!\n");
996                 return -EIO;
997         }
998
999         if (mode == CXL_MODE_DIRECTED)
1000                 return activate_afu_directed(afu);
1001         if ((mode == CXL_MODE_DEDICATED) &&
1002             (afu->adapter->native->sl_ops->activate_dedicated_process))
1003                 return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
1004
1005         return -EINVAL;
1006 }
1007
1008 static int native_attach_process(struct cxl_context *ctx, bool kernel,
1009                                 u64 wed, u64 amr)
1010 {
1011         if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
1012                 WARN(1, "Device link is down, refusing to attach process!\n");
1013                 return -EIO;
1014         }
1015
1016         ctx->kernel = kernel;
1017         if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
1018             (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
1019                 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
1020
1021         if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1022             (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
1023                 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
1024
1025         return -EINVAL;
1026 }
1027
1028 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
1029 {
1030         /*
1031          * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
1032          * stop the AFU in dedicated mode (we therefore do not make that
1033          * optional like we do in the afu directed path). It does not indicate
1034          * that we need to do an explicit disable (which should occur
1035          * implicitly as part of the reset) or purge, but we do these as well
1036          * to be on the safe side.
1037          *
1038          * Notably we used to have some issues with the disable sequence
1039          * (before the sequence was spelled out in the architecture) which is
1040          * why we were so heavy weight in the first place, however a bug was
1041          * discovered that had rendered the disable operation ineffective, so
1042          * it is conceivable that was the sole explanation for those
1043          * difficulties. Point is, we should be careful and do some regression
1044          * testing if we ever attempt to remove any part of this procedure.
1045          */
1046         cxl_ops->afu_reset(ctx->afu);
1047         cxl_afu_disable(ctx->afu);
1048         cxl_psl_purge(ctx->afu);
1049         return 0;
1050 }
1051
1052 static void native_update_ivtes(struct cxl_context *ctx)
1053 {
1054         if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
1055                 return update_ivtes_directed(ctx);
1056         if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1057             (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
1058                 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
1059         WARN(1, "native_update_ivtes: Bad mode\n");
1060 }
1061
1062 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
1063 {
1064         if (!ctx->pe_inserted)
1065                 return 0;
1066         if (terminate_process_element(ctx))
1067                 return -1;
1068         if (remove_process_element(ctx))
1069                 return -1;
1070
1071         return 0;
1072 }
1073
1074 static int native_detach_process(struct cxl_context *ctx)
1075 {
1076         trace_cxl_detach(ctx);
1077
1078         if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
1079                 return detach_process_native_dedicated(ctx);
1080
1081         return detach_process_native_afu_directed(ctx);
1082 }
1083
1084 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
1085 {
1086         /* If the adapter has gone away, we can't get any meaningful
1087          * information.
1088          */
1089         if (!cxl_ops->link_ok(afu->adapter, afu))
1090                 return -EIO;
1091
1092         info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1093         info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
1094         if (cxl_is_power8())
1095                 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
1096         info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1097         info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1098         info->proc_handle = 0;
1099
1100         return 0;
1101 }
1102
1103 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
1104 {
1105         u64 fir1, serr;
1106
1107         fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
1108
1109         dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1110         if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1111                 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1112                 cxl_afu_decode_psl_serr(ctx->afu, serr);
1113         }
1114 }
1115
1116 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
1117 {
1118         u64 fir1, fir2, fir_slice, serr, afu_debug;
1119
1120         fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
1121         fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
1122         fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
1123         afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
1124
1125         dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1126         dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
1127         if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1128                 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1129                 cxl_afu_decode_psl_serr(ctx->afu, serr);
1130         }
1131         dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1132         dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1133 }
1134
1135 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
1136                                                 u64 dsisr, u64 errstat)
1137 {
1138
1139         dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
1140
1141         if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
1142                 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
1143
1144         if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
1145                 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
1146                 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
1147         }
1148
1149         return cxl_ops->ack_irq(ctx, 0, errstat);
1150 }
1151
1152 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1153 {
1154         if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1155                 return true;
1156
1157         if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1158                 return true;
1159
1160         return false;
1161 }
1162
1163 irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
1164 {
1165         if (cxl_is_translation_fault(afu, irq_info->dsisr))
1166                 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1167         else
1168                 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1169
1170         return IRQ_HANDLED;
1171 }
1172
1173 static irqreturn_t native_irq_multiplexed(int irq, void *data)
1174 {
1175         struct cxl_afu *afu = data;
1176         struct cxl_context *ctx;
1177         struct cxl_irq_info irq_info;
1178         u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
1179         int ph, ret = IRQ_HANDLED, res;
1180
1181         /* check if eeh kicked in while the interrupt was in flight */
1182         if (unlikely(phreg == ~0ULL)) {
1183                 dev_warn(&afu->dev,
1184                          "Ignoring slice interrupt(%d) due to fenced card",
1185                          irq);
1186                 return IRQ_HANDLED;
1187         }
1188         /* Mask the pe-handle from register value */
1189         ph = phreg & 0xffff;
1190         if ((res = native_get_irq_info(afu, &irq_info))) {
1191                 WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
1192                 if (afu->adapter->native->sl_ops->fail_irq)
1193                         return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1194                 return ret;
1195         }
1196
1197         rcu_read_lock();
1198         ctx = idr_find(&afu->contexts_idr, ph);
1199         if (ctx) {
1200                 if (afu->adapter->native->sl_ops->handle_interrupt)
1201                         ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
1202                 rcu_read_unlock();
1203                 return ret;
1204         }
1205         rcu_read_unlock();
1206
1207         WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
1208                 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
1209                 " with outstanding transactions?)\n", ph, irq_info.dsisr,
1210                 irq_info.dar);
1211         if (afu->adapter->native->sl_ops->fail_irq)
1212                 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1213         return ret;
1214 }
1215
1216 static void native_irq_wait(struct cxl_context *ctx)
1217 {
1218         u64 dsisr;
1219         int timeout = 1000;
1220         int ph;
1221
1222         /*
1223          * Wait until no further interrupts are presented by the PSL
1224          * for this context.
1225          */
1226         while (timeout--) {
1227                 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
1228                 if (ph != ctx->pe)
1229                         return;
1230                 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1231                 if (cxl_is_power8() &&
1232                    ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
1233                         return;
1234                 if (cxl_is_power9() &&
1235                    ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1236                         return;
1237                 /*
1238                  * We are waiting for the workqueue to process our
1239                  * irq, so need to let that run here.
1240                  */
1241                 msleep(1);
1242         }
1243
1244         dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
1245                  " DSISR %016llx!\n", ph, dsisr);
1246         return;
1247 }
1248
1249 static irqreturn_t native_slice_irq_err(int irq, void *data)
1250 {
1251         struct cxl_afu *afu = data;
1252         u64 errstat, serr, afu_error, dsisr;
1253         u64 fir_slice, afu_debug, irq_mask;
1254
1255         /*
1256          * slice err interrupt is only used with full PSL (no XSL)
1257          */
1258         serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1259         errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1260         afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1261         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1262         cxl_afu_decode_psl_serr(afu, serr);
1263
1264         if (cxl_is_power8()) {
1265                 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1266                 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1267                 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1268                 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1269         }
1270         dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
1271         dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
1272         dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1273
1274         /* mask off the IRQ so it won't retrigger until the AFU is reset */
1275         irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
1276         serr |= irq_mask;
1277         cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1278         dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
1279
1280         return IRQ_HANDLED;
1281 }
1282
1283 void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
1284 {
1285         u64 fir1;
1286
1287         fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
1288         dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
1289 }
1290
1291 void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
1292 {
1293         u64 fir1, fir2;
1294
1295         fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
1296         fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1297         dev_crit(&adapter->dev,
1298                  "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
1299                  fir1, fir2);
1300 }
1301
1302 static irqreturn_t native_irq_err(int irq, void *data)
1303 {
1304         struct cxl *adapter = data;
1305         u64 err_ivte;
1306
1307         WARN(1, "CXL ERROR interrupt %i\n", irq);
1308
1309         err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1310         dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1311
1312         if (adapter->native->sl_ops->debugfs_stop_trace) {
1313                 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1314                 adapter->native->sl_ops->debugfs_stop_trace(adapter);
1315         }
1316
1317         if (adapter->native->sl_ops->err_irq_dump_registers)
1318                 adapter->native->sl_ops->err_irq_dump_registers(adapter);
1319
1320         return IRQ_HANDLED;
1321 }
1322
1323 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1324 {
1325         int rc;
1326
1327         adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1328                                       dev_name(&adapter->dev));
1329         if (!adapter->irq_name)
1330                 return -ENOMEM;
1331
1332         if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1333                                        &adapter->native->err_hwirq,
1334                                        &adapter->native->err_virq,
1335                                        adapter->irq_name))) {
1336                 kfree(adapter->irq_name);
1337                 adapter->irq_name = NULL;
1338                 return rc;
1339         }
1340
1341         cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1342
1343         return 0;
1344 }
1345
1346 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1347 {
1348         if (adapter->native->err_virq == 0 ||
1349             adapter->native->err_virq !=
1350             irq_find_mapping(NULL, adapter->native->err_hwirq))
1351                 return;
1352
1353         cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1354         cxl_unmap_irq(adapter->native->err_virq, adapter);
1355         cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1356         kfree(adapter->irq_name);
1357         adapter->native->err_virq = 0;
1358 }
1359
1360 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1361 {
1362         u64 serr;
1363         int rc;
1364
1365         afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1366                                       dev_name(&afu->dev));
1367         if (!afu->err_irq_name)
1368                 return -ENOMEM;
1369
1370         if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1371                                        &afu->serr_hwirq,
1372                                        &afu->serr_virq, afu->err_irq_name))) {
1373                 kfree(afu->err_irq_name);
1374                 afu->err_irq_name = NULL;
1375                 return rc;
1376         }
1377
1378         serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1379         if (cxl_is_power8())
1380                 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1381         if (cxl_is_power9()) {
1382                 /*
1383                  * By default, all errors are masked. So don't set all masks.
1384                  * Slice errors will be transfered.
1385                  */
1386                 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
1387         }
1388         cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1389
1390         return 0;
1391 }
1392
1393 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1394 {
1395         if (afu->serr_virq == 0 ||
1396             afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1397                 return;
1398
1399         cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1400         cxl_unmap_irq(afu->serr_virq, afu);
1401         cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1402         kfree(afu->err_irq_name);
1403         afu->serr_virq = 0;
1404 }
1405
1406 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1407 {
1408         int rc;
1409
1410         afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1411                                       dev_name(&afu->dev));
1412         if (!afu->psl_irq_name)
1413                 return -ENOMEM;
1414
1415         if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1416                                     afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1417                                     afu->psl_irq_name))) {
1418                 kfree(afu->psl_irq_name);
1419                 afu->psl_irq_name = NULL;
1420         }
1421         return rc;
1422 }
1423
1424 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1425 {
1426         if (afu->native->psl_virq == 0 ||
1427             afu->native->psl_virq !=
1428             irq_find_mapping(NULL, afu->native->psl_hwirq))
1429                 return;
1430
1431         cxl_unmap_irq(afu->native->psl_virq, afu);
1432         cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1433         kfree(afu->psl_irq_name);
1434         afu->native->psl_virq = 0;
1435 }
1436
1437 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1438 {
1439         u64 dsisr;
1440
1441         pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1442
1443         /* Clear PSL_DSISR[PE] */
1444         dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1445         cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1446
1447         /* Write 1s to clear error status bits */
1448         cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1449 }
1450
1451 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1452 {
1453         trace_cxl_psl_irq_ack(ctx, tfc);
1454         if (tfc)
1455                 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1456         if (psl_reset_mask)
1457                 recover_psl_err(ctx->afu, psl_reset_mask);
1458
1459         return 0;
1460 }
1461
1462 int cxl_check_error(struct cxl_afu *afu)
1463 {
1464         return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1465 }
1466
1467 static bool native_support_attributes(const char *attr_name,
1468                                       enum cxl_attrs type)
1469 {
1470         return true;
1471 }
1472
1473 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1474 {
1475         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1476                 return -EIO;
1477         if (unlikely(off >= afu->crs_len))
1478                 return -ERANGE;
1479         *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1480                 (cr * afu->crs_len) + off);
1481         return 0;
1482 }
1483
1484 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1485 {
1486         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1487                 return -EIO;
1488         if (unlikely(off >= afu->crs_len))
1489                 return -ERANGE;
1490         *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1491                 (cr * afu->crs_len) + off);
1492         return 0;
1493 }
1494
1495 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1496 {
1497         u64 aligned_off = off & ~0x3L;
1498         u32 val;
1499         int rc;
1500
1501         rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1502         if (!rc)
1503                 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1504         return rc;
1505 }
1506
1507 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1508 {
1509         u64 aligned_off = off & ~0x3L;
1510         u32 val;
1511         int rc;
1512
1513         rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1514         if (!rc)
1515                 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1516         return rc;
1517 }
1518
1519 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1520 {
1521         if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1522                 return -EIO;
1523         if (unlikely(off >= afu->crs_len))
1524                 return -ERANGE;
1525         out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1526                 (cr * afu->crs_len) + off, in);
1527         return 0;
1528 }
1529
1530 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1531 {
1532         u64 aligned_off = off & ~0x3L;
1533         u32 val32, mask, shift;
1534         int rc;
1535
1536         rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1537         if (rc)
1538                 return rc;
1539         shift = (off & 0x3) * 8;
1540         WARN_ON(shift == 24);
1541         mask = 0xffff << shift;
1542         val32 = (val32 & ~mask) | (in << shift);
1543
1544         rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1545         return rc;
1546 }
1547
1548 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1549 {
1550         u64 aligned_off = off & ~0x3L;
1551         u32 val32, mask, shift;
1552         int rc;
1553
1554         rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1555         if (rc)
1556                 return rc;
1557         shift = (off & 0x3) * 8;
1558         mask = 0xff << shift;
1559         val32 = (val32 & ~mask) | (in << shift);
1560
1561         rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1562         return rc;
1563 }
1564
1565 const struct cxl_backend_ops cxl_native_ops = {
1566         .module = THIS_MODULE,
1567         .adapter_reset = cxl_pci_reset,
1568         .alloc_one_irq = cxl_pci_alloc_one_irq,
1569         .release_one_irq = cxl_pci_release_one_irq,
1570         .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1571         .release_irq_ranges = cxl_pci_release_irq_ranges,
1572         .setup_irq = cxl_pci_setup_irq,
1573         .handle_psl_slice_error = native_handle_psl_slice_error,
1574         .psl_interrupt = NULL,
1575         .ack_irq = native_ack_irq,
1576         .irq_wait = native_irq_wait,
1577         .attach_process = native_attach_process,
1578         .detach_process = native_detach_process,
1579         .update_ivtes = native_update_ivtes,
1580         .support_attributes = native_support_attributes,
1581         .link_ok = cxl_adapter_link_ok,
1582         .release_afu = cxl_pci_release_afu,
1583         .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1584         .afu_check_and_enable = native_afu_check_and_enable,
1585         .afu_activate_mode = native_afu_activate_mode,
1586         .afu_deactivate_mode = native_afu_deactivate_mode,
1587         .afu_reset = native_afu_reset,
1588         .afu_cr_read8 = native_afu_cr_read8,
1589         .afu_cr_read16 = native_afu_cr_read16,
1590         .afu_cr_read32 = native_afu_cr_read32,
1591         .afu_cr_read64 = native_afu_cr_read64,
1592         .afu_cr_write8 = native_afu_cr_write8,
1593         .afu_cr_write16 = native_afu_cr_write16,
1594         .afu_cr_write32 = native_afu_cr_write32,
1595         .read_adapter_vpd = cxl_pci_read_adapter_vpd,
1596 };