1 /*
2 * Copyright 2014 IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/mm.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <asm/synch.h>
19 #include <asm/switch_to.h>
20 #include <misc/cxl-base.h>
21
22 #include "cxl.h"
23 #include "trace.h"
24
afu_control(struct cxl_afu * afu,u64 command,u64 clear,u64 result,u64 mask,bool enabled)25 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
26 u64 result, u64 mask, bool enabled)
27 {
28 u64 AFU_Cntl;
29 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
30 int rc = 0;
31
32 spin_lock(&afu->afu_cntl_lock);
33 pr_devel("AFU command starting: %llx\n", command);
34
35 trace_cxl_afu_ctrl(afu, command);
36
37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
38 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
39
40 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
41 while ((AFU_Cntl & mask) != result) {
42 if (time_after_eq(jiffies, timeout)) {
43 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
44 rc = -EBUSY;
45 goto out;
46 }
47
48 if (!cxl_ops->link_ok(afu->adapter, afu)) {
49 afu->enabled = enabled;
50 rc = -EIO;
51 goto out;
52 }
53
54 pr_devel_ratelimited("AFU control... (0x%016llx)\n",
55 AFU_Cntl | command);
56 cpu_relax();
57 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
58 }
59
60 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
61 /*
62 * Workaround for a bug in the XSL used in the Mellanox CX4
63 * that fails to clear the RA bit after an AFU reset,
64 * preventing subsequent AFU resets from working.
65 */
66 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
67 }
68
69 pr_devel("AFU command complete: %llx\n", command);
70 afu->enabled = enabled;
71 out:
72 trace_cxl_afu_ctrl_done(afu, command, rc);
73 spin_unlock(&afu->afu_cntl_lock);
74
75 return rc;
76 }
77
afu_enable(struct cxl_afu * afu)78 static int afu_enable(struct cxl_afu *afu)
79 {
80 pr_devel("AFU enable request\n");
81
82 return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
83 CXL_AFU_Cntl_An_ES_Enabled,
84 CXL_AFU_Cntl_An_ES_MASK, true);
85 }
86
cxl_afu_disable(struct cxl_afu * afu)87 int cxl_afu_disable(struct cxl_afu *afu)
88 {
89 pr_devel("AFU disable request\n");
90
91 return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
92 CXL_AFU_Cntl_An_ES_Disabled,
93 CXL_AFU_Cntl_An_ES_MASK, false);
94 }
95
96 /* This will disable as well as reset */
native_afu_reset(struct cxl_afu * afu)97 static int native_afu_reset(struct cxl_afu *afu)
98 {
99 int rc;
100 u64 serr;
101
102 pr_devel("AFU reset request\n");
103
104 rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
105 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
106 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
107 false);
108
109 /*
110 * Re-enable any masked interrupts when the AFU is not
111 * activated to avoid side effects after attaching a process
112 * in dedicated mode.
113 */
114 if (afu->current_mode == 0) {
115 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
116 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
117 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
118 }
119
120 return rc;
121 }
122
native_afu_check_and_enable(struct cxl_afu * afu)123 static int native_afu_check_and_enable(struct cxl_afu *afu)
124 {
125 if (!cxl_ops->link_ok(afu->adapter, afu)) {
126 WARN(1, "Refusing to enable afu while link down!\n");
127 return -EIO;
128 }
129 if (afu->enabled)
130 return 0;
131 return afu_enable(afu);
132 }
133
cxl_psl_purge(struct cxl_afu * afu)134 int cxl_psl_purge(struct cxl_afu *afu)
135 {
136 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
137 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
138 u64 dsisr, dar;
139 u64 start, end;
140 u64 trans_fault = 0x0ULL;
141 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
142 int rc = 0;
143
144 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
145
146 pr_devel("PSL purge request\n");
147
148 if (cxl_is_power8())
149 trans_fault = CXL_PSL_DSISR_TRANS;
150 if (cxl_is_power9())
151 trans_fault = CXL_PSL9_DSISR_An_TF;
152
153 if (!cxl_ops->link_ok(afu->adapter, afu)) {
154 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
155 rc = -EIO;
156 goto out;
157 }
158
159 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
160 WARN(1, "psl_purge request while AFU not disabled!\n");
161 cxl_afu_disable(afu);
162 }
163
164 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
165 PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
166 start = local_clock();
167 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
168 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
169 == CXL_PSL_SCNTL_An_Ps_Pending) {
170 if (time_after_eq(jiffies, timeout)) {
171 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
172 rc = -EBUSY;
173 goto out;
174 }
175 if (!cxl_ops->link_ok(afu->adapter, afu)) {
176 rc = -EIO;
177 goto out;
178 }
179
180 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
181 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n",
182 PSL_CNTL, dsisr);
183
184 if (dsisr & trans_fault) {
185 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
186 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
187 dsisr, dar);
188 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
189 } else if (dsisr) {
190 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
191 dsisr);
192 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
193 } else {
194 cpu_relax();
195 }
196 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
197 }
198 end = local_clock();
199 pr_devel("PSL purged in %lld ns\n", end - start);
200
201 cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
202 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
203 out:
204 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
205 return rc;
206 }
207
spa_max_procs(int spa_size)208 static int spa_max_procs(int spa_size)
209 {
210 /*
211 * From the CAIA:
212 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
213 * Most of that junk is really just an overly-complicated way of saying
214 * the last 256 bytes are __aligned(128), so it's really:
215 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
216 * and
217 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
218 * so
219 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
220 * Ignore the alignment (which is safe in this case as long as we are
221 * careful with our rounding) and solve for n:
222 */
223 return ((spa_size / 8) - 96) / 17;
224 }
225
cxl_alloc_spa(struct cxl_afu * afu,int mode)226 static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
227 {
228 unsigned spa_size;
229
230 /* Work out how many pages to allocate */
231 afu->native->spa_order = -1;
232 do {
233 afu->native->spa_order++;
234 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
235
236 if (spa_size > 0x100000) {
237 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
238 afu->native->spa_max_procs, afu->native->spa_size);
239 if (mode != CXL_MODE_DEDICATED)
240 afu->num_procs = afu->native->spa_max_procs;
241 break;
242 }
243
244 afu->native->spa_size = spa_size;
245 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
246 } while (afu->native->spa_max_procs < afu->num_procs);
247
248 if (!(afu->native->spa = (struct cxl_process_element *)
249 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
250 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
251 return -ENOMEM;
252 }
253 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
254 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
255
256 return 0;
257 }
258
attach_spa(struct cxl_afu * afu)259 static void attach_spa(struct cxl_afu *afu)
260 {
261 u64 spap;
262
263 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
264 ((afu->native->spa_max_procs + 3) * 128));
265
266 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
267 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
268 spap |= CXL_PSL_SPAP_V;
269 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
270 afu->native->spa, afu->native->spa_max_procs,
271 afu->native->sw_command_status, spap);
272 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
273 }
274
detach_spa(struct cxl_afu * afu)275 static inline void detach_spa(struct cxl_afu *afu)
276 {
277 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
278 }
279
cxl_release_spa(struct cxl_afu * afu)280 void cxl_release_spa(struct cxl_afu *afu)
281 {
282 if (afu->native->spa) {
283 free_pages((unsigned long) afu->native->spa,
284 afu->native->spa_order);
285 afu->native->spa = NULL;
286 }
287 }
288
289 /*
290 * Invalidation of all ERAT entries is no longer required by CAIA2. Use
291 * only for debug.
292 */
cxl_invalidate_all_psl9(struct cxl * adapter)293 int cxl_invalidate_all_psl9(struct cxl *adapter)
294 {
295 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
296 u64 ierat;
297
298 pr_devel("CXL adapter - invalidation of all ERAT entries\n");
299
300 /* Invalidates all ERAT entries for Radix or HPT */
301 ierat = CXL_XSL9_IERAT_IALL;
302 if (radix_enabled())
303 ierat |= CXL_XSL9_IERAT_INVR;
304 cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
305
306 while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
307 if (time_after_eq(jiffies, timeout)) {
308 dev_warn(&adapter->dev,
309 "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
310 return -EBUSY;
311 }
312 if (!cxl_ops->link_ok(adapter, NULL))
313 return -EIO;
314 cpu_relax();
315 }
316 return 0;
317 }
318
cxl_invalidate_all_psl8(struct cxl * adapter)319 int cxl_invalidate_all_psl8(struct cxl *adapter)
320 {
321 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
322
323 pr_devel("CXL adapter wide TLBIA & SLBIA\n");
324
325 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
326
327 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
328 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
329 if (time_after_eq(jiffies, timeout)) {
330 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
331 return -EBUSY;
332 }
333 if (!cxl_ops->link_ok(adapter, NULL))
334 return -EIO;
335 cpu_relax();
336 }
337
338 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
339 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
340 if (time_after_eq(jiffies, timeout)) {
341 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
342 return -EBUSY;
343 }
344 if (!cxl_ops->link_ok(adapter, NULL))
345 return -EIO;
346 cpu_relax();
347 }
348 return 0;
349 }
350
cxl_data_cache_flush(struct cxl * adapter)351 int cxl_data_cache_flush(struct cxl *adapter)
352 {
353 u64 reg;
354 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
355
356 /*
357 * Do a datacache flush only if datacache is available.
358 * In case of PSL9D datacache absent hence flush operation.
359 * would timeout.
360 */
361 if (adapter->native->no_data_cache) {
362 pr_devel("No PSL data cache. Ignoring cache flush req.\n");
363 return 0;
364 }
365
366 pr_devel("Flushing data cache\n");
367 reg = cxl_p1_read(adapter, CXL_PSL_Control);
368 reg |= CXL_PSL_Control_Fr;
369 cxl_p1_write(adapter, CXL_PSL_Control, reg);
370
371 reg = cxl_p1_read(adapter, CXL_PSL_Control);
372 while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
373 if (time_after_eq(jiffies, timeout)) {
374 dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
375 return -EBUSY;
376 }
377
378 if (!cxl_ops->link_ok(adapter, NULL)) {
379 dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
380 return -EIO;
381 }
382 cpu_relax();
383 reg = cxl_p1_read(adapter, CXL_PSL_Control);
384 }
385
386 reg &= ~CXL_PSL_Control_Fr;
387 cxl_p1_write(adapter, CXL_PSL_Control, reg);
388 return 0;
389 }
390
cxl_write_sstp(struct cxl_afu * afu,u64 sstp0,u64 sstp1)391 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
392 {
393 int rc;
394
395 /* 1. Disable SSTP by writing 0 to SSTP1[V] */
396 cxl_p2n_write(afu, CXL_SSTP1_An, 0);
397
398 /* 2. Invalidate all SLB entries */
399 if ((rc = cxl_afu_slbia(afu)))
400 return rc;
401
402 /* 3. Set SSTP0_An */
403 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
404
405 /* 4. Set SSTP1_An */
406 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
407
408 return 0;
409 }
410
411 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
slb_invalid(struct cxl_context * ctx)412 static void slb_invalid(struct cxl_context *ctx)
413 {
414 struct cxl *adapter = ctx->afu->adapter;
415 u64 slbia;
416
417 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
418
419 cxl_p1_write(adapter, CXL_PSL_LBISEL,
420 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
421 be32_to_cpu(ctx->elem->lpid));
422 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
423
424 while (1) {
425 if (!cxl_ops->link_ok(adapter, NULL))
426 break;
427 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
428 if (!(slbia & CXL_TLB_SLB_P))
429 break;
430 cpu_relax();
431 }
432 }
433
do_process_element_cmd(struct cxl_context * ctx,u64 cmd,u64 pe_state)434 static int do_process_element_cmd(struct cxl_context *ctx,
435 u64 cmd, u64 pe_state)
436 {
437 u64 state;
438 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
439 int rc = 0;
440
441 trace_cxl_llcmd(ctx, cmd);
442
443 WARN_ON(!ctx->afu->enabled);
444
445 ctx->elem->software_state = cpu_to_be32(pe_state);
446 smp_wmb();
447 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
448 smp_mb();
449 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
450 while (1) {
451 if (time_after_eq(jiffies, timeout)) {
452 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
453 rc = -EBUSY;
454 goto out;
455 }
456 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
457 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
458 rc = -EIO;
459 goto out;
460 }
461 state = be64_to_cpup(ctx->afu->native->sw_command_status);
462 if (state == ~0ULL) {
463 pr_err("cxl: Error adding process element to AFU\n");
464 rc = -1;
465 goto out;
466 }
467 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
468 (cmd | (cmd >> 16) | ctx->pe))
469 break;
470 /*
471 * The command won't finish in the PSL if there are
472 * outstanding DSIs. Hence we need to yield here in
473 * case there are outstanding DSIs that we need to
474 * service. Tuning possiblity: we could wait for a
475 * while before sched
476 */
477 schedule();
478
479 }
480 out:
481 trace_cxl_llcmd_done(ctx, cmd, rc);
482 return rc;
483 }
484
add_process_element(struct cxl_context * ctx)485 static int add_process_element(struct cxl_context *ctx)
486 {
487 int rc = 0;
488
489 mutex_lock(&ctx->afu->native->spa_mutex);
490 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
491 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
492 ctx->pe_inserted = true;
493 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
494 mutex_unlock(&ctx->afu->native->spa_mutex);
495 return rc;
496 }
497
terminate_process_element(struct cxl_context * ctx)498 static int terminate_process_element(struct cxl_context *ctx)
499 {
500 int rc = 0;
501
502 /* fast path terminate if it's already invalid */
503 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
504 return rc;
505
506 mutex_lock(&ctx->afu->native->spa_mutex);
507 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
508 /* We could be asked to terminate when the hw is down. That
509 * should always succeed: it's not running if the hw has gone
510 * away and is being reset.
511 */
512 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
513 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
514 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
515 ctx->elem->software_state = 0; /* Remove Valid bit */
516 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
517 mutex_unlock(&ctx->afu->native->spa_mutex);
518 return rc;
519 }
520
remove_process_element(struct cxl_context * ctx)521 static int remove_process_element(struct cxl_context *ctx)
522 {
523 int rc = 0;
524
525 mutex_lock(&ctx->afu->native->spa_mutex);
526 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
527
528 /* We could be asked to remove when the hw is down. Again, if
529 * the hw is down, the PE is gone, so we succeed.
530 */
531 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
532 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
533
534 if (!rc)
535 ctx->pe_inserted = false;
536 if (cxl_is_power8())
537 slb_invalid(ctx);
538 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
539 mutex_unlock(&ctx->afu->native->spa_mutex);
540
541 return rc;
542 }
543
cxl_assign_psn_space(struct cxl_context * ctx)544 void cxl_assign_psn_space(struct cxl_context *ctx)
545 {
546 if (!ctx->afu->pp_size || ctx->master) {
547 ctx->psn_phys = ctx->afu->psn_phys;
548 ctx->psn_size = ctx->afu->adapter->ps_size;
549 } else {
550 ctx->psn_phys = ctx->afu->psn_phys +
551 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
552 ctx->psn_size = ctx->afu->pp_size;
553 }
554 }
555
activate_afu_directed(struct cxl_afu * afu)556 static int activate_afu_directed(struct cxl_afu *afu)
557 {
558 int rc;
559
560 dev_info(&afu->dev, "Activating AFU directed mode\n");
561
562 afu->num_procs = afu->max_procs_virtualised;
563 if (afu->native->spa == NULL) {
564 if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
565 return -ENOMEM;
566 }
567 attach_spa(afu);
568
569 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
570 if (cxl_is_power8())
571 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
572 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
573
574 afu->current_mode = CXL_MODE_DIRECTED;
575
576 if ((rc = cxl_chardev_m_afu_add(afu)))
577 return rc;
578
579 if ((rc = cxl_sysfs_afu_m_add(afu)))
580 goto err;
581
582 if ((rc = cxl_chardev_s_afu_add(afu)))
583 goto err1;
584
585 return 0;
586 err1:
587 cxl_sysfs_afu_m_remove(afu);
588 err:
589 cxl_chardev_afu_remove(afu);
590 return rc;
591 }
592
593 #ifdef CONFIG_CPU_LITTLE_ENDIAN
594 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
595 #else
596 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
597 #endif
598
cxl_calculate_sr(bool master,bool kernel,bool real_mode,bool p9)599 u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
600 {
601 u64 sr = 0;
602
603 set_endian(sr);
604 if (master)
605 sr |= CXL_PSL_SR_An_MP;
606 if (mfspr(SPRN_LPCR) & LPCR_TC)
607 sr |= CXL_PSL_SR_An_TC;
608
609 if (kernel) {
610 if (!real_mode)
611 sr |= CXL_PSL_SR_An_R;
612 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
613 } else {
614 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
615 if (radix_enabled())
616 sr |= CXL_PSL_SR_An_HV;
617 else
618 sr &= ~(CXL_PSL_SR_An_HV);
619 if (!test_tsk_thread_flag(current, TIF_32BIT))
620 sr |= CXL_PSL_SR_An_SF;
621 }
622 if (p9) {
623 if (radix_enabled())
624 sr |= CXL_PSL_SR_An_XLAT_ror;
625 else
626 sr |= CXL_PSL_SR_An_XLAT_hpt;
627 }
628 return sr;
629 }
630
calculate_sr(struct cxl_context * ctx)631 static u64 calculate_sr(struct cxl_context *ctx)
632 {
633 return cxl_calculate_sr(ctx->master, ctx->kernel, false,
634 cxl_is_power9());
635 }
636
update_ivtes_directed(struct cxl_context * ctx)637 static void update_ivtes_directed(struct cxl_context *ctx)
638 {
639 bool need_update = (ctx->status == STARTED);
640 int r;
641
642 if (need_update) {
643 WARN_ON(terminate_process_element(ctx));
644 WARN_ON(remove_process_element(ctx));
645 }
646
647 for (r = 0; r < CXL_IRQ_RANGES; r++) {
648 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
649 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
650 }
651
652 /*
653 * Theoretically we could use the update llcmd, instead of a
654 * terminate/remove/add (or if an atomic update was required we could
655 * do a suspend/update/resume), however it seems there might be issues
656 * with the update llcmd on some cards (including those using an XSL on
657 * an ASIC) so for now it's safest to go with the commands that are
658 * known to work. In the future if we come across a situation where the
659 * card may be performing transactions using the same PE while we are
660 * doing this update we might need to revisit this.
661 */
662 if (need_update)
663 WARN_ON(add_process_element(ctx));
664 }
665
process_element_entry_psl9(struct cxl_context * ctx,u64 wed,u64 amr)666 static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
667 {
668 u32 pid;
669 int rc;
670
671 cxl_assign_psn_space(ctx);
672
673 ctx->elem->ctxtime = 0; /* disable */
674 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
675 ctx->elem->haurp = 0; /* disable */
676
677 if (ctx->kernel)
678 pid = 0;
679 else {
680 if (ctx->mm == NULL) {
681 pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
682 __func__, ctx->pe, pid_nr(ctx->pid));
683 return -EINVAL;
684 }
685 pid = ctx->mm->context.id;
686 }
687
688 /* Assign a unique TIDR (thread id) for the current thread */
689 if (!(ctx->tidr) && (ctx->assign_tidr)) {
690 rc = set_thread_tidr(current);
691 if (rc)
692 return -ENODEV;
693 ctx->tidr = current->thread.tidr;
694 pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
695 }
696
697 ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
698 ctx->elem->common.pid = cpu_to_be32(pid);
699
700 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
701
702 ctx->elem->common.csrp = 0; /* disable */
703
704 cxl_prefault(ctx, wed);
705
706 /*
707 * Ensure we have the multiplexed PSL interrupt set up to take faults
708 * for kernel contexts that may not have allocated any AFU IRQs at all:
709 */
710 if (ctx->irqs.range[0] == 0) {
711 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
712 ctx->irqs.range[0] = 1;
713 }
714
715 ctx->elem->common.amr = cpu_to_be64(amr);
716 ctx->elem->common.wed = cpu_to_be64(wed);
717
718 return 0;
719 }
720
cxl_attach_afu_directed_psl9(struct cxl_context * ctx,u64 wed,u64 amr)721 int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
722 {
723 int result;
724
725 /* fill the process element entry */
726 result = process_element_entry_psl9(ctx, wed, amr);
727 if (result)
728 return result;
729
730 update_ivtes_directed(ctx);
731
732 /* first guy needs to enable */
733 result = cxl_ops->afu_check_and_enable(ctx->afu);
734 if (result)
735 return result;
736
737 return add_process_element(ctx);
738 }
739
cxl_attach_afu_directed_psl8(struct cxl_context * ctx,u64 wed,u64 amr)740 int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
741 {
742 u32 pid;
743 int result;
744
745 cxl_assign_psn_space(ctx);
746
747 ctx->elem->ctxtime = 0; /* disable */
748 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
749 ctx->elem->haurp = 0; /* disable */
750 ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
751
752 pid = current->pid;
753 if (ctx->kernel)
754 pid = 0;
755 ctx->elem->common.tid = 0;
756 ctx->elem->common.pid = cpu_to_be32(pid);
757
758 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
759
760 ctx->elem->common.csrp = 0; /* disable */
761 ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
762 ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
763
764 cxl_prefault(ctx, wed);
765
766 ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
767 ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
768
769 /*
770 * Ensure we have the multiplexed PSL interrupt set up to take faults
771 * for kernel contexts that may not have allocated any AFU IRQs at all:
772 */
773 if (ctx->irqs.range[0] == 0) {
774 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
775 ctx->irqs.range[0] = 1;
776 }
777
778 update_ivtes_directed(ctx);
779
780 ctx->elem->common.amr = cpu_to_be64(amr);
781 ctx->elem->common.wed = cpu_to_be64(wed);
782
783 /* first guy needs to enable */
784 if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
785 return result;
786
787 return add_process_element(ctx);
788 }
789
deactivate_afu_directed(struct cxl_afu * afu)790 static int deactivate_afu_directed(struct cxl_afu *afu)
791 {
792 dev_info(&afu->dev, "Deactivating AFU directed mode\n");
793
794 afu->current_mode = 0;
795 afu->num_procs = 0;
796
797 cxl_sysfs_afu_m_remove(afu);
798 cxl_chardev_afu_remove(afu);
799
800 /*
801 * The CAIA section 2.2.1 indicates that the procedure for starting and
802 * stopping an AFU in AFU directed mode is AFU specific, which is not
803 * ideal since this code is generic and with one exception has no
804 * knowledge of the AFU. This is in contrast to the procedure for
805 * disabling a dedicated process AFU, which is documented to just
806 * require a reset. The architecture does indicate that both an AFU
807 * reset and an AFU disable should result in the AFU being disabled and
808 * we do both followed by a PSL purge for safety.
809 *
810 * Notably we used to have some issues with the disable sequence on PSL
811 * cards, which is why we ended up using this heavy weight procedure in
812 * the first place, however a bug was discovered that had rendered the
813 * disable operation ineffective, so it is conceivable that was the
814 * sole explanation for those difficulties. Careful regression testing
815 * is recommended if anyone attempts to remove or reorder these
816 * operations.
817 *
818 * The XSL on the Mellanox CX4 behaves a little differently from the
819 * PSL based cards and will time out an AFU reset if the AFU is still
820 * enabled. That card is special in that we do have a means to identify
821 * it from this code, so in that case we skip the reset and just use a
822 * disable/purge to avoid the timeout and corresponding noise in the
823 * kernel log.
824 */
825 if (afu->adapter->native->sl_ops->needs_reset_before_disable)
826 cxl_ops->afu_reset(afu);
827 cxl_afu_disable(afu);
828 cxl_psl_purge(afu);
829
830 return 0;
831 }
832
cxl_activate_dedicated_process_psl9(struct cxl_afu * afu)833 int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
834 {
835 dev_info(&afu->dev, "Activating dedicated process mode\n");
836
837 /*
838 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
839 * XSL and AFU are programmed to work with a single context.
840 * The context information should be configured in the SPA area
841 * index 0 (so PSL_SPAP must be configured before enabling the
842 * AFU).
843 */
844 afu->num_procs = 1;
845 if (afu->native->spa == NULL) {
846 if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
847 return -ENOMEM;
848 }
849 attach_spa(afu);
850
851 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
852 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
853
854 afu->current_mode = CXL_MODE_DEDICATED;
855
856 return cxl_chardev_d_afu_add(afu);
857 }
858
cxl_activate_dedicated_process_psl8(struct cxl_afu * afu)859 int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
860 {
861 dev_info(&afu->dev, "Activating dedicated process mode\n");
862
863 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
864
865 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
866 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
867 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
868 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
869 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
870 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
871
872 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
873 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
874 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
875
876 afu->current_mode = CXL_MODE_DEDICATED;
877 afu->num_procs = 1;
878
879 return cxl_chardev_d_afu_add(afu);
880 }
881
cxl_update_dedicated_ivtes_psl9(struct cxl_context * ctx)882 void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
883 {
884 int r;
885
886 for (r = 0; r < CXL_IRQ_RANGES; r++) {
887 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
888 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
889 }
890 }
891
cxl_update_dedicated_ivtes_psl8(struct cxl_context * ctx)892 void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
893 {
894 struct cxl_afu *afu = ctx->afu;
895
896 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
897 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
898 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
899 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
900 ((u64)ctx->irqs.offset[3] & 0xffff));
901 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
902 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
903 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
904 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
905 ((u64)ctx->irqs.range[3] & 0xffff));
906 }
907
cxl_attach_dedicated_process_psl9(struct cxl_context * ctx,u64 wed,u64 amr)908 int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
909 {
910 struct cxl_afu *afu = ctx->afu;
911 int result;
912
913 /* fill the process element entry */
914 result = process_element_entry_psl9(ctx, wed, amr);
915 if (result)
916 return result;
917
918 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
919 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
920
921 ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
922 /*
923 * Ideally we should do a wmb() here to make sure the changes to the
924 * PE are visible to the card before we call afu_enable.
925 * On ppc64 though all mmios are preceded by a 'sync' instruction hence
926 * we dont dont need one here.
927 */
928
929 result = cxl_ops->afu_reset(afu);
930 if (result)
931 return result;
932
933 return afu_enable(afu);
934 }
935
cxl_attach_dedicated_process_psl8(struct cxl_context * ctx,u64 wed,u64 amr)936 int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
937 {
938 struct cxl_afu *afu = ctx->afu;
939 u64 pid;
940 int rc;
941
942 pid = (u64)current->pid << 32;
943 if (ctx->kernel)
944 pid = 0;
945 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
946
947 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
948
949 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
950 return rc;
951
952 cxl_prefault(ctx, wed);
953
954 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
955 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
956
957 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
958
959 /* master only context for dedicated */
960 cxl_assign_psn_space(ctx);
961
962 if ((rc = cxl_ops->afu_reset(afu)))
963 return rc;
964
965 cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
966
967 return afu_enable(afu);
968 }
969
deactivate_dedicated_process(struct cxl_afu * afu)970 static int deactivate_dedicated_process(struct cxl_afu *afu)
971 {
972 dev_info(&afu->dev, "Deactivating dedicated process mode\n");
973
974 afu->current_mode = 0;
975 afu->num_procs = 0;
976
977 cxl_chardev_afu_remove(afu);
978
979 return 0;
980 }
981
native_afu_deactivate_mode(struct cxl_afu * afu,int mode)982 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
983 {
984 if (mode == CXL_MODE_DIRECTED)
985 return deactivate_afu_directed(afu);
986 if (mode == CXL_MODE_DEDICATED)
987 return deactivate_dedicated_process(afu);
988 return 0;
989 }
990
native_afu_activate_mode(struct cxl_afu * afu,int mode)991 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
992 {
993 if (!mode)
994 return 0;
995 if (!(mode & afu->modes_supported))
996 return -EINVAL;
997
998 if (!cxl_ops->link_ok(afu->adapter, afu)) {
999 WARN(1, "Device link is down, refusing to activate!\n");
1000 return -EIO;
1001 }
1002
1003 if (mode == CXL_MODE_DIRECTED)
1004 return activate_afu_directed(afu);
1005 if ((mode == CXL_MODE_DEDICATED) &&
1006 (afu->adapter->native->sl_ops->activate_dedicated_process))
1007 return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
1008
1009 return -EINVAL;
1010 }
1011
native_attach_process(struct cxl_context * ctx,bool kernel,u64 wed,u64 amr)1012 static int native_attach_process(struct cxl_context *ctx, bool kernel,
1013 u64 wed, u64 amr)
1014 {
1015 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
1016 WARN(1, "Device link is down, refusing to attach process!\n");
1017 return -EIO;
1018 }
1019
1020 ctx->kernel = kernel;
1021 if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
1022 (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
1023 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
1024
1025 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1026 (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
1027 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
1028
1029 return -EINVAL;
1030 }
1031
detach_process_native_dedicated(struct cxl_context * ctx)1032 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
1033 {
1034 /*
1035 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
1036 * stop the AFU in dedicated mode (we therefore do not make that
1037 * optional like we do in the afu directed path). It does not indicate
1038 * that we need to do an explicit disable (which should occur
1039 * implicitly as part of the reset) or purge, but we do these as well
1040 * to be on the safe side.
1041 *
1042 * Notably we used to have some issues with the disable sequence
1043 * (before the sequence was spelled out in the architecture) which is
1044 * why we were so heavy weight in the first place, however a bug was
1045 * discovered that had rendered the disable operation ineffective, so
1046 * it is conceivable that was the sole explanation for those
1047 * difficulties. Point is, we should be careful and do some regression
1048 * testing if we ever attempt to remove any part of this procedure.
1049 */
1050 cxl_ops->afu_reset(ctx->afu);
1051 cxl_afu_disable(ctx->afu);
1052 cxl_psl_purge(ctx->afu);
1053 return 0;
1054 }
1055
native_update_ivtes(struct cxl_context * ctx)1056 static void native_update_ivtes(struct cxl_context *ctx)
1057 {
1058 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
1059 return update_ivtes_directed(ctx);
1060 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1061 (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
1062 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
1063 WARN(1, "native_update_ivtes: Bad mode\n");
1064 }
1065
detach_process_native_afu_directed(struct cxl_context * ctx)1066 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
1067 {
1068 if (!ctx->pe_inserted)
1069 return 0;
1070 if (terminate_process_element(ctx))
1071 return -1;
1072 if (remove_process_element(ctx))
1073 return -1;
1074
1075 return 0;
1076 }
1077
native_detach_process(struct cxl_context * ctx)1078 static int native_detach_process(struct cxl_context *ctx)
1079 {
1080 trace_cxl_detach(ctx);
1081
1082 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
1083 return detach_process_native_dedicated(ctx);
1084
1085 return detach_process_native_afu_directed(ctx);
1086 }
1087
native_get_irq_info(struct cxl_afu * afu,struct cxl_irq_info * info)1088 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
1089 {
1090 /* If the adapter has gone away, we can't get any meaningful
1091 * information.
1092 */
1093 if (!cxl_ops->link_ok(afu->adapter, afu))
1094 return -EIO;
1095
1096 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1097 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
1098 if (cxl_is_power8())
1099 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
1100 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1101 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1102 info->proc_handle = 0;
1103
1104 return 0;
1105 }
1106
cxl_native_irq_dump_regs_psl9(struct cxl_context * ctx)1107 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
1108 {
1109 u64 fir1, serr;
1110
1111 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
1112
1113 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1114 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1115 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1116 cxl_afu_decode_psl_serr(ctx->afu, serr);
1117 }
1118 }
1119
cxl_native_irq_dump_regs_psl8(struct cxl_context * ctx)1120 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
1121 {
1122 u64 fir1, fir2, fir_slice, serr, afu_debug;
1123
1124 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
1125 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
1126 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
1127 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
1128
1129 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1130 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
1131 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1132 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1133 cxl_afu_decode_psl_serr(ctx->afu, serr);
1134 }
1135 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1136 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1137 }
1138
native_handle_psl_slice_error(struct cxl_context * ctx,u64 dsisr,u64 errstat)1139 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
1140 u64 dsisr, u64 errstat)
1141 {
1142
1143 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
1144
1145 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
1146 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
1147
1148 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
1149 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
1150 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
1151 }
1152
1153 return cxl_ops->ack_irq(ctx, 0, errstat);
1154 }
1155
cxl_is_translation_fault(struct cxl_afu * afu,u64 dsisr)1156 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1157 {
1158 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1159 return true;
1160
1161 if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1162 return true;
1163
1164 return false;
1165 }
1166
cxl_fail_irq_psl(struct cxl_afu * afu,struct cxl_irq_info * irq_info)1167 irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
1168 {
1169 if (cxl_is_translation_fault(afu, irq_info->dsisr))
1170 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1171 else
1172 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1173
1174 return IRQ_HANDLED;
1175 }
1176
native_irq_multiplexed(int irq,void * data)1177 static irqreturn_t native_irq_multiplexed(int irq, void *data)
1178 {
1179 struct cxl_afu *afu = data;
1180 struct cxl_context *ctx;
1181 struct cxl_irq_info irq_info;
1182 u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
1183 int ph, ret = IRQ_HANDLED, res;
1184
1185 /* check if eeh kicked in while the interrupt was in flight */
1186 if (unlikely(phreg == ~0ULL)) {
1187 dev_warn(&afu->dev,
1188 "Ignoring slice interrupt(%d) due to fenced card",
1189 irq);
1190 return IRQ_HANDLED;
1191 }
1192 /* Mask the pe-handle from register value */
1193 ph = phreg & 0xffff;
1194 if ((res = native_get_irq_info(afu, &irq_info))) {
1195 WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
1196 if (afu->adapter->native->sl_ops->fail_irq)
1197 return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1198 return ret;
1199 }
1200
1201 rcu_read_lock();
1202 ctx = idr_find(&afu->contexts_idr, ph);
1203 if (ctx) {
1204 if (afu->adapter->native->sl_ops->handle_interrupt)
1205 ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
1206 rcu_read_unlock();
1207 return ret;
1208 }
1209 rcu_read_unlock();
1210
1211 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
1212 " %016llx\n(Possible AFU HW issue - was a term/remove acked"
1213 " with outstanding transactions?)\n", ph, irq_info.dsisr,
1214 irq_info.dar);
1215 if (afu->adapter->native->sl_ops->fail_irq)
1216 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1217 return ret;
1218 }
1219
native_irq_wait(struct cxl_context * ctx)1220 static void native_irq_wait(struct cxl_context *ctx)
1221 {
1222 u64 dsisr;
1223 int timeout = 1000;
1224 int ph;
1225
1226 /*
1227 * Wait until no further interrupts are presented by the PSL
1228 * for this context.
1229 */
1230 while (timeout--) {
1231 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
1232 if (ph != ctx->pe)
1233 return;
1234 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1235 if (cxl_is_power8() &&
1236 ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
1237 return;
1238 if (cxl_is_power9() &&
1239 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1240 return;
1241 /*
1242 * We are waiting for the workqueue to process our
1243 * irq, so need to let that run here.
1244 */
1245 msleep(1);
1246 }
1247
1248 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
1249 " DSISR %016llx!\n", ph, dsisr);
1250 return;
1251 }
1252
native_slice_irq_err(int irq,void * data)1253 static irqreturn_t native_slice_irq_err(int irq, void *data)
1254 {
1255 struct cxl_afu *afu = data;
1256 u64 errstat, serr, afu_error, dsisr;
1257 u64 fir_slice, afu_debug, irq_mask;
1258
1259 /*
1260 * slice err interrupt is only used with full PSL (no XSL)
1261 */
1262 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1263 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1264 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1265 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1266 cxl_afu_decode_psl_serr(afu, serr);
1267
1268 if (cxl_is_power8()) {
1269 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1270 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1271 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1272 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1273 }
1274 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
1275 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
1276 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1277
1278 /* mask off the IRQ so it won't retrigger until the AFU is reset */
1279 irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
1280 serr |= irq_mask;
1281 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1282 dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
1283
1284 return IRQ_HANDLED;
1285 }
1286
cxl_native_err_irq_dump_regs_psl9(struct cxl * adapter)1287 void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
1288 {
1289 u64 fir1;
1290
1291 fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
1292 dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
1293 }
1294
cxl_native_err_irq_dump_regs_psl8(struct cxl * adapter)1295 void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
1296 {
1297 u64 fir1, fir2;
1298
1299 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
1300 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1301 dev_crit(&adapter->dev,
1302 "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
1303 fir1, fir2);
1304 }
1305
native_irq_err(int irq,void * data)1306 static irqreturn_t native_irq_err(int irq, void *data)
1307 {
1308 struct cxl *adapter = data;
1309 u64 err_ivte;
1310
1311 WARN(1, "CXL ERROR interrupt %i\n", irq);
1312
1313 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1314 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1315
1316 if (adapter->native->sl_ops->debugfs_stop_trace) {
1317 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1318 adapter->native->sl_ops->debugfs_stop_trace(adapter);
1319 }
1320
1321 if (adapter->native->sl_ops->err_irq_dump_registers)
1322 adapter->native->sl_ops->err_irq_dump_registers(adapter);
1323
1324 return IRQ_HANDLED;
1325 }
1326
cxl_native_register_psl_err_irq(struct cxl * adapter)1327 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1328 {
1329 int rc;
1330
1331 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1332 dev_name(&adapter->dev));
1333 if (!adapter->irq_name)
1334 return -ENOMEM;
1335
1336 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1337 &adapter->native->err_hwirq,
1338 &adapter->native->err_virq,
1339 adapter->irq_name))) {
1340 kfree(adapter->irq_name);
1341 adapter->irq_name = NULL;
1342 return rc;
1343 }
1344
1345 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1346
1347 return 0;
1348 }
1349
cxl_native_release_psl_err_irq(struct cxl * adapter)1350 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1351 {
1352 if (adapter->native->err_virq == 0 ||
1353 adapter->native->err_virq !=
1354 irq_find_mapping(NULL, adapter->native->err_hwirq))
1355 return;
1356
1357 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1358 cxl_unmap_irq(adapter->native->err_virq, adapter);
1359 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1360 kfree(adapter->irq_name);
1361 adapter->native->err_virq = 0;
1362 }
1363
cxl_native_register_serr_irq(struct cxl_afu * afu)1364 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1365 {
1366 u64 serr;
1367 int rc;
1368
1369 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1370 dev_name(&afu->dev));
1371 if (!afu->err_irq_name)
1372 return -ENOMEM;
1373
1374 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1375 &afu->serr_hwirq,
1376 &afu->serr_virq, afu->err_irq_name))) {
1377 kfree(afu->err_irq_name);
1378 afu->err_irq_name = NULL;
1379 return rc;
1380 }
1381
1382 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1383 if (cxl_is_power8())
1384 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1385 if (cxl_is_power9()) {
1386 /*
1387 * By default, all errors are masked. So don't set all masks.
1388 * Slice errors will be transfered.
1389 */
1390 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
1391 }
1392 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1393
1394 return 0;
1395 }
1396
cxl_native_release_serr_irq(struct cxl_afu * afu)1397 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1398 {
1399 if (afu->serr_virq == 0 ||
1400 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1401 return;
1402
1403 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1404 cxl_unmap_irq(afu->serr_virq, afu);
1405 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1406 kfree(afu->err_irq_name);
1407 afu->serr_virq = 0;
1408 }
1409
cxl_native_register_psl_irq(struct cxl_afu * afu)1410 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1411 {
1412 int rc;
1413
1414 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1415 dev_name(&afu->dev));
1416 if (!afu->psl_irq_name)
1417 return -ENOMEM;
1418
1419 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1420 afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1421 afu->psl_irq_name))) {
1422 kfree(afu->psl_irq_name);
1423 afu->psl_irq_name = NULL;
1424 }
1425 return rc;
1426 }
1427
cxl_native_release_psl_irq(struct cxl_afu * afu)1428 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1429 {
1430 if (afu->native->psl_virq == 0 ||
1431 afu->native->psl_virq !=
1432 irq_find_mapping(NULL, afu->native->psl_hwirq))
1433 return;
1434
1435 cxl_unmap_irq(afu->native->psl_virq, afu);
1436 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1437 kfree(afu->psl_irq_name);
1438 afu->native->psl_virq = 0;
1439 }
1440
recover_psl_err(struct cxl_afu * afu,u64 errstat)1441 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1442 {
1443 u64 dsisr;
1444
1445 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1446
1447 /* Clear PSL_DSISR[PE] */
1448 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1449 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1450
1451 /* Write 1s to clear error status bits */
1452 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1453 }
1454
native_ack_irq(struct cxl_context * ctx,u64 tfc,u64 psl_reset_mask)1455 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1456 {
1457 trace_cxl_psl_irq_ack(ctx, tfc);
1458 if (tfc)
1459 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1460 if (psl_reset_mask)
1461 recover_psl_err(ctx->afu, psl_reset_mask);
1462
1463 return 0;
1464 }
1465
cxl_check_error(struct cxl_afu * afu)1466 int cxl_check_error(struct cxl_afu *afu)
1467 {
1468 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1469 }
1470
native_support_attributes(const char * attr_name,enum cxl_attrs type)1471 static bool native_support_attributes(const char *attr_name,
1472 enum cxl_attrs type)
1473 {
1474 return true;
1475 }
1476
native_afu_cr_read64(struct cxl_afu * afu,int cr,u64 off,u64 * out)1477 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1478 {
1479 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1480 return -EIO;
1481 if (unlikely(off >= afu->crs_len))
1482 return -ERANGE;
1483 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1484 (cr * afu->crs_len) + off);
1485 return 0;
1486 }
1487
native_afu_cr_read32(struct cxl_afu * afu,int cr,u64 off,u32 * out)1488 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1489 {
1490 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1491 return -EIO;
1492 if (unlikely(off >= afu->crs_len))
1493 return -ERANGE;
1494 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1495 (cr * afu->crs_len) + off);
1496 return 0;
1497 }
1498
native_afu_cr_read16(struct cxl_afu * afu,int cr,u64 off,u16 * out)1499 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1500 {
1501 u64 aligned_off = off & ~0x3L;
1502 u32 val;
1503 int rc;
1504
1505 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1506 if (!rc)
1507 *out = (val >> ((off & 0x3) * 8)) & 0xffff;
1508 return rc;
1509 }
1510
native_afu_cr_read8(struct cxl_afu * afu,int cr,u64 off,u8 * out)1511 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1512 {
1513 u64 aligned_off = off & ~0x3L;
1514 u32 val;
1515 int rc;
1516
1517 rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1518 if (!rc)
1519 *out = (val >> ((off & 0x3) * 8)) & 0xff;
1520 return rc;
1521 }
1522
native_afu_cr_write32(struct cxl_afu * afu,int cr,u64 off,u32 in)1523 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1524 {
1525 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1526 return -EIO;
1527 if (unlikely(off >= afu->crs_len))
1528 return -ERANGE;
1529 out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1530 (cr * afu->crs_len) + off, in);
1531 return 0;
1532 }
1533
native_afu_cr_write16(struct cxl_afu * afu,int cr,u64 off,u16 in)1534 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1535 {
1536 u64 aligned_off = off & ~0x3L;
1537 u32 val32, mask, shift;
1538 int rc;
1539
1540 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1541 if (rc)
1542 return rc;
1543 shift = (off & 0x3) * 8;
1544 WARN_ON(shift == 24);
1545 mask = 0xffff << shift;
1546 val32 = (val32 & ~mask) | (in << shift);
1547
1548 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1549 return rc;
1550 }
1551
native_afu_cr_write8(struct cxl_afu * afu,int cr,u64 off,u8 in)1552 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1553 {
1554 u64 aligned_off = off & ~0x3L;
1555 u32 val32, mask, shift;
1556 int rc;
1557
1558 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1559 if (rc)
1560 return rc;
1561 shift = (off & 0x3) * 8;
1562 mask = 0xff << shift;
1563 val32 = (val32 & ~mask) | (in << shift);
1564
1565 rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1566 return rc;
1567 }
1568
1569 const struct cxl_backend_ops cxl_native_ops = {
1570 .module = THIS_MODULE,
1571 .adapter_reset = cxl_pci_reset,
1572 .alloc_one_irq = cxl_pci_alloc_one_irq,
1573 .release_one_irq = cxl_pci_release_one_irq,
1574 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1575 .release_irq_ranges = cxl_pci_release_irq_ranges,
1576 .setup_irq = cxl_pci_setup_irq,
1577 .handle_psl_slice_error = native_handle_psl_slice_error,
1578 .psl_interrupt = NULL,
1579 .ack_irq = native_ack_irq,
1580 .irq_wait = native_irq_wait,
1581 .attach_process = native_attach_process,
1582 .detach_process = native_detach_process,
1583 .update_ivtes = native_update_ivtes,
1584 .support_attributes = native_support_attributes,
1585 .link_ok = cxl_adapter_link_ok,
1586 .release_afu = cxl_pci_release_afu,
1587 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1588 .afu_check_and_enable = native_afu_check_and_enable,
1589 .afu_activate_mode = native_afu_activate_mode,
1590 .afu_deactivate_mode = native_afu_deactivate_mode,
1591 .afu_reset = native_afu_reset,
1592 .afu_cr_read8 = native_afu_cr_read8,
1593 .afu_cr_read16 = native_afu_cr_read16,
1594 .afu_cr_read32 = native_afu_cr_read32,
1595 .afu_cr_read64 = native_afu_cr_read64,
1596 .afu_cr_write8 = native_afu_cr_write8,
1597 .afu_cr_write16 = native_afu_cr_write16,
1598 .afu_cr_write32 = native_afu_cr_write32,
1599 .read_adapter_vpd = cxl_pci_read_adapter_vpd,
1600 };
1601