Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.12 1558 lines 44 kB view raw
1/* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10#include <linux/spinlock.h> 11#include <linux/sched.h> 12#include <linux/sched/clock.h> 13#include <linux/slab.h> 14#include <linux/mutex.h> 15#include <linux/mm.h> 16#include <linux/uaccess.h> 17#include <linux/delay.h> 18#include <asm/synch.h> 19#include <misc/cxl-base.h> 20 21#include "cxl.h" 22#include "trace.h" 23 24static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, 25 u64 result, u64 mask, bool enabled) 26{ 27 u64 AFU_Cntl; 28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 29 int rc = 0; 30 31 spin_lock(&afu->afu_cntl_lock); 32 pr_devel("AFU command starting: %llx\n", command); 33 34 trace_cxl_afu_ctrl(afu, command); 35 36 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 37 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); 38 39 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 40 while ((AFU_Cntl & mask) != result) { 41 if (time_after_eq(jiffies, timeout)) { 42 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); 43 rc = -EBUSY; 44 goto out; 45 } 46 47 if (!cxl_ops->link_ok(afu->adapter, afu)) { 48 afu->enabled = enabled; 49 rc = -EIO; 50 goto out; 51 } 52 53 pr_devel_ratelimited("AFU control... (0x%016llx)\n", 54 AFU_Cntl | command); 55 cpu_relax(); 56 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 57 } 58 59 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { 60 /* 61 * Workaround for a bug in the XSL used in the Mellanox CX4 62 * that fails to clear the RA bit after an AFU reset, 63 * preventing subsequent AFU resets from working. 64 */ 65 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA); 66 } 67 68 pr_devel("AFU command complete: %llx\n", command); 69 afu->enabled = enabled; 70out: 71 trace_cxl_afu_ctrl_done(afu, command, rc); 72 spin_unlock(&afu->afu_cntl_lock); 73 74 return rc; 75} 76 77static int afu_enable(struct cxl_afu *afu) 78{ 79 pr_devel("AFU enable request\n"); 80 81 return afu_control(afu, CXL_AFU_Cntl_An_E, 0, 82 CXL_AFU_Cntl_An_ES_Enabled, 83 CXL_AFU_Cntl_An_ES_MASK, true); 84} 85 86int cxl_afu_disable(struct cxl_afu *afu) 87{ 88 pr_devel("AFU disable request\n"); 89 90 return afu_control(afu, 0, CXL_AFU_Cntl_An_E, 91 CXL_AFU_Cntl_An_ES_Disabled, 92 CXL_AFU_Cntl_An_ES_MASK, false); 93} 94 95/* This will disable as well as reset */ 96static int native_afu_reset(struct cxl_afu *afu) 97{ 98 int rc; 99 u64 serr; 100 101 pr_devel("AFU reset request\n"); 102 103 rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0, 104 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, 105 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 106 false); 107 108 /* 109 * Re-enable any masked interrupts when the AFU is not 110 * activated to avoid side effects after attaching a process 111 * in dedicated mode. 112 */ 113 if (afu->current_mode == 0) { 114 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 115 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; 116 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 117 } 118 119 return rc; 120} 121 122static int native_afu_check_and_enable(struct cxl_afu *afu) 123{ 124 if (!cxl_ops->link_ok(afu->adapter, afu)) { 125 WARN(1, "Refusing to enable afu while link down!\n"); 126 return -EIO; 127 } 128 if (afu->enabled) 129 return 0; 130 return afu_enable(afu); 131} 132 133int cxl_psl_purge(struct cxl_afu *afu) 134{ 135 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 136 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 137 u64 dsisr, dar; 138 u64 start, end; 139 u64 trans_fault = 0x0ULL; 140 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 141 int rc = 0; 142 143 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc); 144 145 pr_devel("PSL purge request\n"); 146 147 if (cxl_is_power8()) 148 trans_fault = CXL_PSL_DSISR_TRANS; 149 if (cxl_is_power9()) 150 trans_fault = CXL_PSL9_DSISR_An_TF; 151 152 if (!cxl_ops->link_ok(afu->adapter, afu)) { 153 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); 154 rc = -EIO; 155 goto out; 156 } 157 158 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { 159 WARN(1, "psl_purge request while AFU not disabled!\n"); 160 cxl_afu_disable(afu); 161 } 162 163 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 164 PSL_CNTL | CXL_PSL_SCNTL_An_Pc); 165 start = local_clock(); 166 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 167 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) 168 == CXL_PSL_SCNTL_An_Ps_Pending) { 169 if (time_after_eq(jiffies, timeout)) { 170 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); 171 rc = -EBUSY; 172 goto out; 173 } 174 if (!cxl_ops->link_ok(afu->adapter, afu)) { 175 rc = -EIO; 176 goto out; 177 } 178 179 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 180 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", 181 PSL_CNTL, dsisr); 182 183 if (dsisr & trans_fault) { 184 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); 185 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", 186 dsisr, dar); 187 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 188 } else if (dsisr) { 189 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", 190 dsisr); 191 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 192 } else { 193 cpu_relax(); 194 } 195 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 196 } 197 end = local_clock(); 198 pr_devel("PSL purged in %lld ns\n", end - start); 199 200 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 201 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); 202out: 203 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc); 204 return rc; 205} 206 207static int spa_max_procs(int spa_size) 208{ 209 /* 210 * From the CAIA: 211 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 212 * Most of that junk is really just an overly-complicated way of saying 213 * the last 256 bytes are __aligned(128), so it's really: 214 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 215 * and 216 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 217 * so 218 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 219 * Ignore the alignment (which is safe in this case as long as we are 220 * careful with our rounding) and solve for n: 221 */ 222 return ((spa_size / 8) - 96) / 17; 223} 224 225static int cxl_alloc_spa(struct cxl_afu *afu, int mode) 226{ 227 unsigned spa_size; 228 229 /* Work out how many pages to allocate */ 230 afu->native->spa_order = -1; 231 do { 232 afu->native->spa_order++; 233 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; 234 235 if (spa_size > 0x100000) { 236 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", 237 afu->native->spa_max_procs, afu->native->spa_size); 238 if (mode != CXL_MODE_DEDICATED) 239 afu->num_procs = afu->native->spa_max_procs; 240 break; 241 } 242 243 afu->native->spa_size = spa_size; 244 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); 245 } while (afu->native->spa_max_procs < afu->num_procs); 246 247 if (!(afu->native->spa = (struct cxl_process_element *) 248 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { 249 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); 250 return -ENOMEM; 251 } 252 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 253 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); 254 255 return 0; 256} 257 258static void attach_spa(struct cxl_afu *afu) 259{ 260 u64 spap; 261 262 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + 263 ((afu->native->spa_max_procs + 3) * 128)); 264 265 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; 266 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; 267 spap |= CXL_PSL_SPAP_V; 268 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", 269 afu->native->spa, afu->native->spa_max_procs, 270 afu->native->sw_command_status, spap); 271 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); 272} 273 274static inline void detach_spa(struct cxl_afu *afu) 275{ 276 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); 277} 278 279void cxl_release_spa(struct cxl_afu *afu) 280{ 281 if (afu->native->spa) { 282 free_pages((unsigned long) afu->native->spa, 283 afu->native->spa_order); 284 afu->native->spa = NULL; 285 } 286} 287 288/* 289 * Invalidation of all ERAT entries is no longer required by CAIA2. Use 290 * only for debug. 291 */ 292int cxl_invalidate_all_psl9(struct cxl *adapter) 293{ 294 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 295 u64 ierat; 296 297 pr_devel("CXL adapter - invalidation of all ERAT entries\n"); 298 299 /* Invalidates all ERAT entries for Radix or HPT */ 300 ierat = CXL_XSL9_IERAT_IALL; 301 if (radix_enabled()) 302 ierat |= CXL_XSL9_IERAT_INVR; 303 cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat); 304 305 while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) { 306 if (time_after_eq(jiffies, timeout)) { 307 dev_warn(&adapter->dev, 308 "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n"); 309 return -EBUSY; 310 } 311 if (!cxl_ops->link_ok(adapter, NULL)) 312 return -EIO; 313 cpu_relax(); 314 } 315 return 0; 316} 317 318int cxl_invalidate_all_psl8(struct cxl *adapter) 319{ 320 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 321 322 pr_devel("CXL adapter wide TLBIA & SLBIA\n"); 323 324 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); 325 326 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); 327 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { 328 if (time_after_eq(jiffies, timeout)) { 329 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); 330 return -EBUSY; 331 } 332 if (!cxl_ops->link_ok(adapter, NULL)) 333 return -EIO; 334 cpu_relax(); 335 } 336 337 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); 338 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { 339 if (time_after_eq(jiffies, timeout)) { 340 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); 341 return -EBUSY; 342 } 343 if (!cxl_ops->link_ok(adapter, NULL)) 344 return -EIO; 345 cpu_relax(); 346 } 347 return 0; 348} 349 350int cxl_data_cache_flush(struct cxl *adapter) 351{ 352 u64 reg; 353 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 354 355 pr_devel("Flushing data cache\n"); 356 357 reg = cxl_p1_read(adapter, CXL_PSL_Control); 358 reg |= CXL_PSL_Control_Fr; 359 cxl_p1_write(adapter, CXL_PSL_Control, reg); 360 361 reg = cxl_p1_read(adapter, CXL_PSL_Control); 362 while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) { 363 if (time_after_eq(jiffies, timeout)) { 364 dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n"); 365 return -EBUSY; 366 } 367 368 if (!cxl_ops->link_ok(adapter, NULL)) { 369 dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n"); 370 return -EIO; 371 } 372 cpu_relax(); 373 reg = cxl_p1_read(adapter, CXL_PSL_Control); 374 } 375 376 reg &= ~CXL_PSL_Control_Fr; 377 cxl_p1_write(adapter, CXL_PSL_Control, reg); 378 return 0; 379} 380 381static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) 382{ 383 int rc; 384 385 /* 1. Disable SSTP by writing 0 to SSTP1[V] */ 386 cxl_p2n_write(afu, CXL_SSTP1_An, 0); 387 388 /* 2. Invalidate all SLB entries */ 389 if ((rc = cxl_afu_slbia(afu))) 390 return rc; 391 392 /* 3. Set SSTP0_An */ 393 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); 394 395 /* 4. Set SSTP1_An */ 396 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); 397 398 return 0; 399} 400 401/* Using per slice version may improve performance here. (ie. SLBIA_An) */ 402static void slb_invalid(struct cxl_context *ctx) 403{ 404 struct cxl *adapter = ctx->afu->adapter; 405 u64 slbia; 406 407 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); 408 409 cxl_p1_write(adapter, CXL_PSL_LBISEL, 410 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | 411 be32_to_cpu(ctx->elem->lpid)); 412 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); 413 414 while (1) { 415 if (!cxl_ops->link_ok(adapter, NULL)) 416 break; 417 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); 418 if (!(slbia & CXL_TLB_SLB_P)) 419 break; 420 cpu_relax(); 421 } 422} 423 424static int do_process_element_cmd(struct cxl_context *ctx, 425 u64 cmd, u64 pe_state) 426{ 427 u64 state; 428 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 429 int rc = 0; 430 431 trace_cxl_llcmd(ctx, cmd); 432 433 WARN_ON(!ctx->afu->enabled); 434 435 ctx->elem->software_state = cpu_to_be32(pe_state); 436 smp_wmb(); 437 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); 438 smp_mb(); 439 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); 440 while (1) { 441 if (time_after_eq(jiffies, timeout)) { 442 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); 443 rc = -EBUSY; 444 goto out; 445 } 446 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { 447 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); 448 rc = -EIO; 449 goto out; 450 } 451 state = be64_to_cpup(ctx->afu->native->sw_command_status); 452 if (state == ~0ULL) { 453 pr_err("cxl: Error adding process element to AFU\n"); 454 rc = -1; 455 goto out; 456 } 457 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == 458 (cmd | (cmd >> 16) | ctx->pe)) 459 break; 460 /* 461 * The command won't finish in the PSL if there are 462 * outstanding DSIs. Hence we need to yield here in 463 * case there are outstanding DSIs that we need to 464 * service. Tuning possiblity: we could wait for a 465 * while before sched 466 */ 467 schedule(); 468 469 } 470out: 471 trace_cxl_llcmd_done(ctx, cmd, rc); 472 return rc; 473} 474 475static int add_process_element(struct cxl_context *ctx) 476{ 477 int rc = 0; 478 479 mutex_lock(&ctx->afu->native->spa_mutex); 480 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); 481 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) 482 ctx->pe_inserted = true; 483 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); 484 mutex_unlock(&ctx->afu->native->spa_mutex); 485 return rc; 486} 487 488static int terminate_process_element(struct cxl_context *ctx) 489{ 490 int rc = 0; 491 492 /* fast path terminate if it's already invalid */ 493 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) 494 return rc; 495 496 mutex_lock(&ctx->afu->native->spa_mutex); 497 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); 498 /* We could be asked to terminate when the hw is down. That 499 * should always succeed: it's not running if the hw has gone 500 * away and is being reset. 501 */ 502 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 503 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, 504 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); 505 ctx->elem->software_state = 0; /* Remove Valid bit */ 506 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); 507 mutex_unlock(&ctx->afu->native->spa_mutex); 508 return rc; 509} 510 511static int remove_process_element(struct cxl_context *ctx) 512{ 513 int rc = 0; 514 515 mutex_lock(&ctx->afu->native->spa_mutex); 516 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); 517 518 /* We could be asked to remove when the hw is down. Again, if 519 * the hw is down, the PE is gone, so we succeed. 520 */ 521 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 522 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); 523 524 if (!rc) 525 ctx->pe_inserted = false; 526 if (cxl_is_power8()) 527 slb_invalid(ctx); 528 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); 529 mutex_unlock(&ctx->afu->native->spa_mutex); 530 531 return rc; 532} 533 534void cxl_assign_psn_space(struct cxl_context *ctx) 535{ 536 if (!ctx->afu->pp_size || ctx->master) { 537 ctx->psn_phys = ctx->afu->psn_phys; 538 ctx->psn_size = ctx->afu->adapter->ps_size; 539 } else { 540 ctx->psn_phys = ctx->afu->psn_phys + 541 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); 542 ctx->psn_size = ctx->afu->pp_size; 543 } 544} 545 546static int activate_afu_directed(struct cxl_afu *afu) 547{ 548 int rc; 549 550 dev_info(&afu->dev, "Activating AFU directed mode\n"); 551 552 afu->num_procs = afu->max_procs_virtualised; 553 if (afu->native->spa == NULL) { 554 if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED)) 555 return -ENOMEM; 556 } 557 attach_spa(afu); 558 559 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); 560 if (cxl_is_power8()) 561 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 562 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); 563 564 afu->current_mode = CXL_MODE_DIRECTED; 565 566 if ((rc = cxl_chardev_m_afu_add(afu))) 567 return rc; 568 569 if ((rc = cxl_sysfs_afu_m_add(afu))) 570 goto err; 571 572 if ((rc = cxl_chardev_s_afu_add(afu))) 573 goto err1; 574 575 return 0; 576err1: 577 cxl_sysfs_afu_m_remove(afu); 578err: 579 cxl_chardev_afu_remove(afu); 580 return rc; 581} 582 583#ifdef CONFIG_CPU_LITTLE_ENDIAN 584#define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) 585#else 586#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) 587#endif 588 589static u64 calculate_sr(struct cxl_context *ctx) 590{ 591 u64 sr = 0; 592 593 set_endian(sr); 594 if (ctx->master) 595 sr |= CXL_PSL_SR_An_MP; 596 if (mfspr(SPRN_LPCR) & LPCR_TC) 597 sr |= CXL_PSL_SR_An_TC; 598 if (ctx->kernel) { 599 if (!ctx->real_mode) 600 sr |= CXL_PSL_SR_An_R; 601 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; 602 } else { 603 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; 604 if (radix_enabled()) 605 sr |= CXL_PSL_SR_An_HV; 606 else 607 sr &= ~(CXL_PSL_SR_An_HV); 608 if (!test_tsk_thread_flag(current, TIF_32BIT)) 609 sr |= CXL_PSL_SR_An_SF; 610 } 611 if (cxl_is_power9()) { 612 if (radix_enabled()) 613 sr |= CXL_PSL_SR_An_XLAT_ror; 614 else 615 sr |= CXL_PSL_SR_An_XLAT_hpt; 616 } 617 return sr; 618} 619 620static void update_ivtes_directed(struct cxl_context *ctx) 621{ 622 bool need_update = (ctx->status == STARTED); 623 int r; 624 625 if (need_update) { 626 WARN_ON(terminate_process_element(ctx)); 627 WARN_ON(remove_process_element(ctx)); 628 } 629 630 for (r = 0; r < CXL_IRQ_RANGES; r++) { 631 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); 632 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); 633 } 634 635 /* 636 * Theoretically we could use the update llcmd, instead of a 637 * terminate/remove/add (or if an atomic update was required we could 638 * do a suspend/update/resume), however it seems there might be issues 639 * with the update llcmd on some cards (including those using an XSL on 640 * an ASIC) so for now it's safest to go with the commands that are 641 * known to work. In the future if we come across a situation where the 642 * card may be performing transactions using the same PE while we are 643 * doing this update we might need to revisit this. 644 */ 645 if (need_update) 646 WARN_ON(add_process_element(ctx)); 647} 648 649static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr) 650{ 651 u32 pid; 652 653 cxl_assign_psn_space(ctx); 654 655 ctx->elem->ctxtime = 0; /* disable */ 656 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 657 ctx->elem->haurp = 0; /* disable */ 658 659 if (ctx->kernel) 660 pid = 0; 661 else { 662 if (ctx->mm == NULL) { 663 pr_devel("%s: unable to get mm for pe=%d pid=%i\n", 664 __func__, ctx->pe, pid_nr(ctx->pid)); 665 return -EINVAL; 666 } 667 pid = ctx->mm->context.id; 668 } 669 670 ctx->elem->common.tid = 0; 671 ctx->elem->common.pid = cpu_to_be32(pid); 672 673 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); 674 675 ctx->elem->common.csrp = 0; /* disable */ 676 677 cxl_prefault(ctx, wed); 678 679 /* 680 * Ensure we have the multiplexed PSL interrupt set up to take faults 681 * for kernel contexts that may not have allocated any AFU IRQs at all: 682 */ 683 if (ctx->irqs.range[0] == 0) { 684 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 685 ctx->irqs.range[0] = 1; 686 } 687 688 ctx->elem->common.amr = cpu_to_be64(amr); 689 ctx->elem->common.wed = cpu_to_be64(wed); 690 691 return 0; 692} 693 694int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr) 695{ 696 int result; 697 698 /* fill the process element entry */ 699 result = process_element_entry_psl9(ctx, wed, amr); 700 if (result) 701 return result; 702 703 update_ivtes_directed(ctx); 704 705 /* first guy needs to enable */ 706 result = cxl_ops->afu_check_and_enable(ctx->afu); 707 if (result) 708 return result; 709 710 return add_process_element(ctx); 711} 712 713int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) 714{ 715 u32 pid; 716 int result; 717 718 cxl_assign_psn_space(ctx); 719 720 ctx->elem->ctxtime = 0; /* disable */ 721 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 722 ctx->elem->haurp = 0; /* disable */ 723 ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); 724 725 pid = current->pid; 726 if (ctx->kernel) 727 pid = 0; 728 ctx->elem->common.tid = 0; 729 ctx->elem->common.pid = cpu_to_be32(pid); 730 731 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); 732 733 ctx->elem->common.csrp = 0; /* disable */ 734 ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ 735 ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ 736 737 cxl_prefault(ctx, wed); 738 739 ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); 740 ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); 741 742 /* 743 * Ensure we have the multiplexed PSL interrupt set up to take faults 744 * for kernel contexts that may not have allocated any AFU IRQs at all: 745 */ 746 if (ctx->irqs.range[0] == 0) { 747 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 748 ctx->irqs.range[0] = 1; 749 } 750 751 update_ivtes_directed(ctx); 752 753 ctx->elem->common.amr = cpu_to_be64(amr); 754 ctx->elem->common.wed = cpu_to_be64(wed); 755 756 /* first guy needs to enable */ 757 if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) 758 return result; 759 760 return add_process_element(ctx); 761} 762 763static int deactivate_afu_directed(struct cxl_afu *afu) 764{ 765 dev_info(&afu->dev, "Deactivating AFU directed mode\n"); 766 767 afu->current_mode = 0; 768 afu->num_procs = 0; 769 770 cxl_sysfs_afu_m_remove(afu); 771 cxl_chardev_afu_remove(afu); 772 773 /* 774 * The CAIA section 2.2.1 indicates that the procedure for starting and 775 * stopping an AFU in AFU directed mode is AFU specific, which is not 776 * ideal since this code is generic and with one exception has no 777 * knowledge of the AFU. This is in contrast to the procedure for 778 * disabling a dedicated process AFU, which is documented to just 779 * require a reset. The architecture does indicate that both an AFU 780 * reset and an AFU disable should result in the AFU being disabled and 781 * we do both followed by a PSL purge for safety. 782 * 783 * Notably we used to have some issues with the disable sequence on PSL 784 * cards, which is why we ended up using this heavy weight procedure in 785 * the first place, however a bug was discovered that had rendered the 786 * disable operation ineffective, so it is conceivable that was the 787 * sole explanation for those difficulties. Careful regression testing 788 * is recommended if anyone attempts to remove or reorder these 789 * operations. 790 * 791 * The XSL on the Mellanox CX4 behaves a little differently from the 792 * PSL based cards and will time out an AFU reset if the AFU is still 793 * enabled. That card is special in that we do have a means to identify 794 * it from this code, so in that case we skip the reset and just use a 795 * disable/purge to avoid the timeout and corresponding noise in the 796 * kernel log. 797 */ 798 if (afu->adapter->native->sl_ops->needs_reset_before_disable) 799 cxl_ops->afu_reset(afu); 800 cxl_afu_disable(afu); 801 cxl_psl_purge(afu); 802 803 return 0; 804} 805 806int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu) 807{ 808 dev_info(&afu->dev, "Activating dedicated process mode\n"); 809 810 /* 811 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the 812 * XSL and AFU are programmed to work with a single context. 813 * The context information should be configured in the SPA area 814 * index 0 (so PSL_SPAP must be configured before enabling the 815 * AFU). 816 */ 817 afu->num_procs = 1; 818 if (afu->native->spa == NULL) { 819 if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED)) 820 return -ENOMEM; 821 } 822 attach_spa(afu); 823 824 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); 825 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); 826 827 afu->current_mode = CXL_MODE_DEDICATED; 828 829 return cxl_chardev_d_afu_add(afu); 830} 831 832int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) 833{ 834 dev_info(&afu->dev, "Activating dedicated process mode\n"); 835 836 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); 837 838 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ 839 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ 840 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 841 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); 842 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ 843 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); 844 845 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ 846 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ 847 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ 848 849 afu->current_mode = CXL_MODE_DEDICATED; 850 afu->num_procs = 1; 851 852 return cxl_chardev_d_afu_add(afu); 853} 854 855void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx) 856{ 857 int r; 858 859 for (r = 0; r < CXL_IRQ_RANGES; r++) { 860 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); 861 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); 862 } 863} 864 865void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) 866{ 867 struct cxl_afu *afu = ctx->afu; 868 869 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 870 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | 871 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | 872 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | 873 ((u64)ctx->irqs.offset[3] & 0xffff)); 874 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) 875 (((u64)ctx->irqs.range[0] & 0xffff) << 48) | 876 (((u64)ctx->irqs.range[1] & 0xffff) << 32) | 877 (((u64)ctx->irqs.range[2] & 0xffff) << 16) | 878 ((u64)ctx->irqs.range[3] & 0xffff)); 879} 880 881int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr) 882{ 883 struct cxl_afu *afu = ctx->afu; 884 int result; 885 886 /* fill the process element entry */ 887 result = process_element_entry_psl9(ctx, wed, amr); 888 if (result) 889 return result; 890 891 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) 892 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); 893 894 result = cxl_ops->afu_reset(afu); 895 if (result) 896 return result; 897 898 return afu_enable(afu); 899} 900 901int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) 902{ 903 struct cxl_afu *afu = ctx->afu; 904 u64 pid; 905 int rc; 906 907 pid = (u64)current->pid << 32; 908 if (ctx->kernel) 909 pid = 0; 910 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid); 911 912 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx)); 913 914 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) 915 return rc; 916 917 cxl_prefault(ctx, wed); 918 919 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) 920 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); 921 922 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); 923 924 /* master only context for dedicated */ 925 cxl_assign_psn_space(ctx); 926 927 if ((rc = cxl_ops->afu_reset(afu))) 928 return rc; 929 930 cxl_p2n_write(afu, CXL_PSL_WED_An, wed); 931 932 return afu_enable(afu); 933} 934 935static int deactivate_dedicated_process(struct cxl_afu *afu) 936{ 937 dev_info(&afu->dev, "Deactivating dedicated process mode\n"); 938 939 afu->current_mode = 0; 940 afu->num_procs = 0; 941 942 cxl_chardev_afu_remove(afu); 943 944 return 0; 945} 946 947static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) 948{ 949 if (mode == CXL_MODE_DIRECTED) 950 return deactivate_afu_directed(afu); 951 if (mode == CXL_MODE_DEDICATED) 952 return deactivate_dedicated_process(afu); 953 return 0; 954} 955 956static int native_afu_activate_mode(struct cxl_afu *afu, int mode) 957{ 958 if (!mode) 959 return 0; 960 if (!(mode & afu->modes_supported)) 961 return -EINVAL; 962 963 if (!cxl_ops->link_ok(afu->adapter, afu)) { 964 WARN(1, "Device link is down, refusing to activate!\n"); 965 return -EIO; 966 } 967 968 if (mode == CXL_MODE_DIRECTED) 969 return activate_afu_directed(afu); 970 if ((mode == CXL_MODE_DEDICATED) && 971 (afu->adapter->native->sl_ops->activate_dedicated_process)) 972 return afu->adapter->native->sl_ops->activate_dedicated_process(afu); 973 974 return -EINVAL; 975} 976 977static int native_attach_process(struct cxl_context *ctx, bool kernel, 978 u64 wed, u64 amr) 979{ 980 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { 981 WARN(1, "Device link is down, refusing to attach process!\n"); 982 return -EIO; 983 } 984 985 ctx->kernel = kernel; 986 if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) && 987 (ctx->afu->adapter->native->sl_ops->attach_afu_directed)) 988 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr); 989 990 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && 991 (ctx->afu->adapter->native->sl_ops->attach_dedicated_process)) 992 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr); 993 994 return -EINVAL; 995} 996 997static inline int detach_process_native_dedicated(struct cxl_context *ctx) 998{ 999 /* 1000 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to 1001 * stop the AFU in dedicated mode (we therefore do not make that 1002 * optional like we do in the afu directed path). It does not indicate 1003 * that we need to do an explicit disable (which should occur 1004 * implicitly as part of the reset) or purge, but we do these as well 1005 * to be on the safe side. 1006 * 1007 * Notably we used to have some issues with the disable sequence 1008 * (before the sequence was spelled out in the architecture) which is 1009 * why we were so heavy weight in the first place, however a bug was 1010 * discovered that had rendered the disable operation ineffective, so 1011 * it is conceivable that was the sole explanation for those 1012 * difficulties. Point is, we should be careful and do some regression 1013 * testing if we ever attempt to remove any part of this procedure. 1014 */ 1015 cxl_ops->afu_reset(ctx->afu); 1016 cxl_afu_disable(ctx->afu); 1017 cxl_psl_purge(ctx->afu); 1018 return 0; 1019} 1020 1021static void native_update_ivtes(struct cxl_context *ctx) 1022{ 1023 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 1024 return update_ivtes_directed(ctx); 1025 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && 1026 (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)) 1027 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); 1028 WARN(1, "native_update_ivtes: Bad mode\n"); 1029} 1030 1031static inline int detach_process_native_afu_directed(struct cxl_context *ctx) 1032{ 1033 if (!ctx->pe_inserted) 1034 return 0; 1035 if (terminate_process_element(ctx)) 1036 return -1; 1037 if (remove_process_element(ctx)) 1038 return -1; 1039 1040 return 0; 1041} 1042 1043static int native_detach_process(struct cxl_context *ctx) 1044{ 1045 trace_cxl_detach(ctx); 1046 1047 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) 1048 return detach_process_native_dedicated(ctx); 1049 1050 return detach_process_native_afu_directed(ctx); 1051} 1052 1053static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) 1054{ 1055 /* If the adapter has gone away, we can't get any meaningful 1056 * information. 1057 */ 1058 if (!cxl_ops->link_ok(afu->adapter, afu)) 1059 return -EIO; 1060 1061 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 1062 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); 1063 if (cxl_is_power8()) 1064 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); 1065 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); 1066 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 1067 info->proc_handle = 0; 1068 1069 return 0; 1070} 1071 1072void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx) 1073{ 1074 u64 fir1, fir2, serr; 1075 1076 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1); 1077 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR2); 1078 1079 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 1080 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); 1081 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { 1082 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); 1083 cxl_afu_decode_psl_serr(ctx->afu, serr); 1084 } 1085} 1086 1087void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) 1088{ 1089 u64 fir1, fir2, fir_slice, serr, afu_debug; 1090 1091 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); 1092 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); 1093 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); 1094 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); 1095 1096 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 1097 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); 1098 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { 1099 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); 1100 cxl_afu_decode_psl_serr(ctx->afu, serr); 1101 } 1102 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 1103 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 1104} 1105 1106static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, 1107 u64 dsisr, u64 errstat) 1108{ 1109 1110 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); 1111 1112 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) 1113 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); 1114 1115 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { 1116 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); 1117 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); 1118 } 1119 1120 return cxl_ops->ack_irq(ctx, 0, errstat); 1121} 1122 1123static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) 1124{ 1125 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) 1126 return true; 1127 1128 if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) 1129 return true; 1130 1131 return false; 1132} 1133 1134irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) 1135{ 1136 if (cxl_is_translation_fault(afu, irq_info->dsisr)) 1137 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 1138 else 1139 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 1140 1141 return IRQ_HANDLED; 1142} 1143 1144static irqreturn_t native_irq_multiplexed(int irq, void *data) 1145{ 1146 struct cxl_afu *afu = data; 1147 struct cxl_context *ctx; 1148 struct cxl_irq_info irq_info; 1149 u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An); 1150 int ph, ret = IRQ_HANDLED, res; 1151 1152 /* check if eeh kicked in while the interrupt was in flight */ 1153 if (unlikely(phreg == ~0ULL)) { 1154 dev_warn(&afu->dev, 1155 "Ignoring slice interrupt(%d) due to fenced card", 1156 irq); 1157 return IRQ_HANDLED; 1158 } 1159 /* Mask the pe-handle from register value */ 1160 ph = phreg & 0xffff; 1161 if ((res = native_get_irq_info(afu, &irq_info))) { 1162 WARN(1, "Unable to get CXL IRQ Info: %i\n", res); 1163 if (afu->adapter->native->sl_ops->fail_irq) 1164 return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); 1165 return ret; 1166 } 1167 1168 rcu_read_lock(); 1169 ctx = idr_find(&afu->contexts_idr, ph); 1170 if (ctx) { 1171 if (afu->adapter->native->sl_ops->handle_interrupt) 1172 ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info); 1173 rcu_read_unlock(); 1174 return ret; 1175 } 1176 rcu_read_unlock(); 1177 1178 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" 1179 " %016llx\n(Possible AFU HW issue - was a term/remove acked" 1180 " with outstanding transactions?)\n", ph, irq_info.dsisr, 1181 irq_info.dar); 1182 if (afu->adapter->native->sl_ops->fail_irq) 1183 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); 1184 return ret; 1185} 1186 1187static void native_irq_wait(struct cxl_context *ctx) 1188{ 1189 u64 dsisr; 1190 int timeout = 1000; 1191 int ph; 1192 1193 /* 1194 * Wait until no further interrupts are presented by the PSL 1195 * for this context. 1196 */ 1197 while (timeout--) { 1198 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; 1199 if (ph != ctx->pe) 1200 return; 1201 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 1202 if (cxl_is_power8() && 1203 ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) 1204 return; 1205 if (cxl_is_power9() && 1206 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) 1207 return; 1208 /* 1209 * We are waiting for the workqueue to process our 1210 * irq, so need to let that run here. 1211 */ 1212 msleep(1); 1213 } 1214 1215 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" 1216 " DSISR %016llx!\n", ph, dsisr); 1217 return; 1218} 1219 1220static irqreturn_t native_slice_irq_err(int irq, void *data) 1221{ 1222 struct cxl_afu *afu = data; 1223 u64 errstat, serr, afu_error, dsisr; 1224 u64 fir_slice, afu_debug, irq_mask; 1225 1226 /* 1227 * slice err interrupt is only used with full PSL (no XSL) 1228 */ 1229 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 1230 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 1231 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); 1232 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 1233 cxl_afu_decode_psl_serr(afu, serr); 1234 1235 if (cxl_is_power8()) { 1236 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); 1237 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); 1238 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 1239 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 1240 } 1241 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); 1242 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); 1243 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); 1244 1245 /* mask off the IRQ so it won't retrigger until the AFU is reset */ 1246 irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32; 1247 serr |= irq_mask; 1248 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 1249 dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n"); 1250 1251 return IRQ_HANDLED; 1252} 1253 1254void cxl_native_err_irq_dump_regs(struct cxl *adapter) 1255{ 1256 u64 fir1, fir2; 1257 1258 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); 1259 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); 1260 1261 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); 1262} 1263 1264static irqreturn_t native_irq_err(int irq, void *data) 1265{ 1266 struct cxl *adapter = data; 1267 u64 err_ivte; 1268 1269 WARN(1, "CXL ERROR interrupt %i\n", irq); 1270 1271 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); 1272 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); 1273 1274 if (adapter->native->sl_ops->debugfs_stop_trace) { 1275 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); 1276 adapter->native->sl_ops->debugfs_stop_trace(adapter); 1277 } 1278 1279 if (adapter->native->sl_ops->err_irq_dump_registers) 1280 adapter->native->sl_ops->err_irq_dump_registers(adapter); 1281 1282 return IRQ_HANDLED; 1283} 1284 1285int cxl_native_register_psl_err_irq(struct cxl *adapter) 1286{ 1287 int rc; 1288 1289 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 1290 dev_name(&adapter->dev)); 1291 if (!adapter->irq_name) 1292 return -ENOMEM; 1293 1294 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, 1295 &adapter->native->err_hwirq, 1296 &adapter->native->err_virq, 1297 adapter->irq_name))) { 1298 kfree(adapter->irq_name); 1299 adapter->irq_name = NULL; 1300 return rc; 1301 } 1302 1303 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); 1304 1305 return 0; 1306} 1307 1308void cxl_native_release_psl_err_irq(struct cxl *adapter) 1309{ 1310 if (adapter->native->err_virq == 0 || 1311 adapter->native->err_virq != 1312 irq_find_mapping(NULL, adapter->native->err_hwirq)) 1313 return; 1314 1315 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1316 cxl_unmap_irq(adapter->native->err_virq, adapter); 1317 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 1318 kfree(adapter->irq_name); 1319 adapter->native->err_virq = 0; 1320} 1321 1322int cxl_native_register_serr_irq(struct cxl_afu *afu) 1323{ 1324 u64 serr; 1325 int rc; 1326 1327 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 1328 dev_name(&afu->dev)); 1329 if (!afu->err_irq_name) 1330 return -ENOMEM; 1331 1332 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, 1333 &afu->serr_hwirq, 1334 &afu->serr_virq, afu->err_irq_name))) { 1335 kfree(afu->err_irq_name); 1336 afu->err_irq_name = NULL; 1337 return rc; 1338 } 1339 1340 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 1341 if (cxl_is_power8()) 1342 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); 1343 if (cxl_is_power9()) { 1344 /* 1345 * By default, all errors are masked. So don't set all masks. 1346 * Slice errors will be transfered. 1347 */ 1348 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); 1349 } 1350 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 1351 1352 return 0; 1353} 1354 1355void cxl_native_release_serr_irq(struct cxl_afu *afu) 1356{ 1357 if (afu->serr_virq == 0 || 1358 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1359 return; 1360 1361 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 1362 cxl_unmap_irq(afu->serr_virq, afu); 1363 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 1364 kfree(afu->err_irq_name); 1365 afu->serr_virq = 0; 1366} 1367 1368int cxl_native_register_psl_irq(struct cxl_afu *afu) 1369{ 1370 int rc; 1371 1372 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", 1373 dev_name(&afu->dev)); 1374 if (!afu->psl_irq_name) 1375 return -ENOMEM; 1376 1377 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, 1378 afu, &afu->native->psl_hwirq, &afu->native->psl_virq, 1379 afu->psl_irq_name))) { 1380 kfree(afu->psl_irq_name); 1381 afu->psl_irq_name = NULL; 1382 } 1383 return rc; 1384} 1385 1386void cxl_native_release_psl_irq(struct cxl_afu *afu) 1387{ 1388 if (afu->native->psl_virq == 0 || 1389 afu->native->psl_virq != 1390 irq_find_mapping(NULL, afu->native->psl_hwirq)) 1391 return; 1392 1393 cxl_unmap_irq(afu->native->psl_virq, afu); 1394 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 1395 kfree(afu->psl_irq_name); 1396 afu->native->psl_virq = 0; 1397} 1398 1399static void recover_psl_err(struct cxl_afu *afu, u64 errstat) 1400{ 1401 u64 dsisr; 1402 1403 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat); 1404 1405 /* Clear PSL_DSISR[PE] */ 1406 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 1407 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); 1408 1409 /* Write 1s to clear error status bits */ 1410 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); 1411} 1412 1413static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 1414{ 1415 trace_cxl_psl_irq_ack(ctx, tfc); 1416 if (tfc) 1417 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); 1418 if (psl_reset_mask) 1419 recover_psl_err(ctx->afu, psl_reset_mask); 1420 1421 return 0; 1422} 1423 1424int cxl_check_error(struct cxl_afu *afu) 1425{ 1426 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); 1427} 1428 1429static bool native_support_attributes(const char *attr_name, 1430 enum cxl_attrs type) 1431{ 1432 return true; 1433} 1434 1435static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) 1436{ 1437 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1438 return -EIO; 1439 if (unlikely(off >= afu->crs_len)) 1440 return -ERANGE; 1441 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + 1442 (cr * afu->crs_len) + off); 1443 return 0; 1444} 1445 1446static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) 1447{ 1448 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1449 return -EIO; 1450 if (unlikely(off >= afu->crs_len)) 1451 return -ERANGE; 1452 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + 1453 (cr * afu->crs_len) + off); 1454 return 0; 1455} 1456 1457static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) 1458{ 1459 u64 aligned_off = off & ~0x3L; 1460 u32 val; 1461 int rc; 1462 1463 rc = native_afu_cr_read32(afu, cr, aligned_off, &val); 1464 if (!rc) 1465 *out = (val >> ((off & 0x3) * 8)) & 0xffff; 1466 return rc; 1467} 1468 1469static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) 1470{ 1471 u64 aligned_off = off & ~0x3L; 1472 u32 val; 1473 int rc; 1474 1475 rc = native_afu_cr_read32(afu, cr, aligned_off, &val); 1476 if (!rc) 1477 *out = (val >> ((off & 0x3) * 8)) & 0xff; 1478 return rc; 1479} 1480 1481static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 1482{ 1483 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1484 return -EIO; 1485 if (unlikely(off >= afu->crs_len)) 1486 return -ERANGE; 1487 out_le32(afu->native->afu_desc_mmio + afu->crs_offset + 1488 (cr * afu->crs_len) + off, in); 1489 return 0; 1490} 1491 1492static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 1493{ 1494 u64 aligned_off = off & ~0x3L; 1495 u32 val32, mask, shift; 1496 int rc; 1497 1498 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); 1499 if (rc) 1500 return rc; 1501 shift = (off & 0x3) * 8; 1502 WARN_ON(shift == 24); 1503 mask = 0xffff << shift; 1504 val32 = (val32 & ~mask) | (in << shift); 1505 1506 rc = native_afu_cr_write32(afu, cr, aligned_off, val32); 1507 return rc; 1508} 1509 1510static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 1511{ 1512 u64 aligned_off = off & ~0x3L; 1513 u32 val32, mask, shift; 1514 int rc; 1515 1516 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); 1517 if (rc) 1518 return rc; 1519 shift = (off & 0x3) * 8; 1520 mask = 0xff << shift; 1521 val32 = (val32 & ~mask) | (in << shift); 1522 1523 rc = native_afu_cr_write32(afu, cr, aligned_off, val32); 1524 return rc; 1525} 1526 1527const struct cxl_backend_ops cxl_native_ops = { 1528 .module = THIS_MODULE, 1529 .adapter_reset = cxl_pci_reset, 1530 .alloc_one_irq = cxl_pci_alloc_one_irq, 1531 .release_one_irq = cxl_pci_release_one_irq, 1532 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, 1533 .release_irq_ranges = cxl_pci_release_irq_ranges, 1534 .setup_irq = cxl_pci_setup_irq, 1535 .handle_psl_slice_error = native_handle_psl_slice_error, 1536 .psl_interrupt = NULL, 1537 .ack_irq = native_ack_irq, 1538 .irq_wait = native_irq_wait, 1539 .attach_process = native_attach_process, 1540 .detach_process = native_detach_process, 1541 .update_ivtes = native_update_ivtes, 1542 .support_attributes = native_support_attributes, 1543 .link_ok = cxl_adapter_link_ok, 1544 .release_afu = cxl_pci_release_afu, 1545 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, 1546 .afu_check_and_enable = native_afu_check_and_enable, 1547 .afu_activate_mode = native_afu_activate_mode, 1548 .afu_deactivate_mode = native_afu_deactivate_mode, 1549 .afu_reset = native_afu_reset, 1550 .afu_cr_read8 = native_afu_cr_read8, 1551 .afu_cr_read16 = native_afu_cr_read16, 1552 .afu_cr_read32 = native_afu_cr_read32, 1553 .afu_cr_read64 = native_afu_cr_read64, 1554 .afu_cr_write8 = native_afu_cr_write8, 1555 .afu_cr_write16 = native_afu_cr_write16, 1556 .afu_cr_write32 = native_afu_cr_write32, 1557 .read_adapter_vpd = cxl_pci_read_adapter_vpd, 1558};