Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cxl: Export library to support IBM XSL

This patch exports a in-kernel 'library' API which can be called by
other drivers to help interacting with an IBM XSL on a POWER9 system.

The XSL (Translation Service Layer) is a stripped down version of the
PSL (Power Service Layer) used in some cards such as the Mellanox CX5.
Like the PSL, it implements the CAIA architecture, but has a number
of differences, mostly in it's implementation dependent registers.

The XSL also uses a special DMA cxl mode, which uses a slightly
different init sequence for the CAPP and PHB.

Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Christophe Lombard <clombard@linux.vnet.ibm.com>
Acked-by: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Christophe Lombard and committed by
Michael Ellerman
3ced8d73 218ea310

+449 -30
+1
arch/powerpc/include/asm/opal-api.h
··· 948 948 OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2, 949 949 OPAL_PHB_CAPI_MODE_SNOOP_ON = 3, 950 950 OPAL_PHB_CAPI_MODE_DMA = 4, 951 + OPAL_PHB_CAPI_MODE_DMA_TVT1 = 5, 951 952 }; 952 953 953 954 /* OPAL I2C request */
+5
drivers/misc/cxl/Kconfig
··· 11 11 bool 12 12 default n 13 13 14 + config CXL_LIB 15 + bool 16 + default n 17 + 14 18 config CXL 15 19 tristate "Support for IBM Coherent Accelerators (CXL)" 16 20 depends on PPC_POWERNV && PCI_MSI && EEH 17 21 select CXL_BASE 18 22 select CXL_AFU_DRIVER_OPS 23 + select CXL_LIB 19 24 default m 20 25 help 21 26 Select this option to enable driver support for IBM Coherent
+1 -1
drivers/misc/cxl/Makefile
··· 3 3 4 4 cxl-y += main.o file.o irq.o fault.o native.o 5 5 cxl-y += context.o sysfs.o pci.o trace.o 6 - cxl-y += vphb.o phb.o api.o 6 + cxl-y += vphb.o phb.o api.o cxllib.o 7 7 cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o 8 8 cxl-$(CONFIG_DEBUG_FS) += debugfs.o 9 9 obj-$(CONFIG_CXL) += cxl.o
+6
drivers/misc/cxl/cxl.h
··· 1010 1010 1011 1011 void cxl_handle_fault(struct work_struct *work); 1012 1012 void cxl_prefault(struct cxl_context *ctx, u64 wed); 1013 + int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar); 1013 1014 1014 1015 struct cxl *get_cxl_adapter(int num); 1015 1016 int cxl_alloc_sst(struct cxl_context *ctx); ··· 1062 1061 int cxl_data_cache_flush(struct cxl *adapter); 1063 1062 int cxl_afu_disable(struct cxl_afu *afu); 1064 1063 int cxl_psl_purge(struct cxl_afu *afu); 1064 + int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, 1065 + u32 *phb_index, u64 *capp_unit_id); 1066 + int cxl_slot_is_switched(struct pci_dev *dev); 1067 + int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg); 1068 + u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9); 1065 1069 1066 1070 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx); 1067 1071 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
+246
drivers/misc/cxl/cxllib.c
··· 1 + /* 2 + * Copyright 2017 IBM Corp. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation; either version 7 + * 2 of the License, or (at your option) any later version. 8 + */ 9 + 10 + #include <linux/hugetlb.h> 11 + #include <linux/sched/mm.h> 12 + #include <asm/pnv-pci.h> 13 + #include <misc/cxllib.h> 14 + 15 + #include "cxl.h" 16 + 17 + #define CXL_INVALID_DRA ~0ull 18 + #define CXL_DUMMY_READ_SIZE 128 19 + #define CXL_DUMMY_READ_ALIGN 8 20 + #define CXL_CAPI_WINDOW_START 0x2000000000000ull 21 + #define CXL_CAPI_WINDOW_LOG_SIZE 48 22 + #define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1 23 + 24 + 25 + bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags) 26 + { 27 + int rc; 28 + u32 phb_index; 29 + u64 chip_id, capp_unit_id; 30 + 31 + /* No flags currently supported */ 32 + if (flags) 33 + return false; 34 + 35 + if (!cpu_has_feature(CPU_FTR_HVMODE)) 36 + return false; 37 + 38 + if (!cxl_is_power9()) 39 + return false; 40 + 41 + if (cxl_slot_is_switched(dev)) 42 + return false; 43 + 44 + /* on p9, some pci slots are not connected to a CAPP unit */ 45 + rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id); 46 + if (rc) 47 + return false; 48 + 49 + return true; 50 + } 51 + EXPORT_SYMBOL_GPL(cxllib_slot_is_supported); 52 + 53 + static DEFINE_MUTEX(dra_mutex); 54 + static u64 dummy_read_addr = CXL_INVALID_DRA; 55 + 56 + static int allocate_dummy_read_buf(void) 57 + { 58 + u64 buf, vaddr; 59 + size_t buf_size; 60 + 61 + /* 62 + * Dummy read buffer is 128-byte long, aligned on a 63 + * 256-byte boundary and we need the physical address. 64 + */ 65 + buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN); 66 + buf = (u64) kzalloc(buf_size, GFP_KERNEL); 67 + if (!buf) 68 + return -ENOMEM; 69 + 70 + vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) & 71 + (~0ull << CXL_DUMMY_READ_ALIGN); 72 + 73 + WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size), 74 + "Dummy read buffer alignment issue"); 75 + dummy_read_addr = virt_to_phys((void *) vaddr); 76 + return 0; 77 + } 78 + 79 + int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg) 80 + { 81 + int rc; 82 + u32 phb_index; 83 + u64 chip_id, capp_unit_id; 84 + 85 + if (!cpu_has_feature(CPU_FTR_HVMODE)) 86 + return -EINVAL; 87 + 88 + mutex_lock(&dra_mutex); 89 + if (dummy_read_addr == CXL_INVALID_DRA) { 90 + rc = allocate_dummy_read_buf(); 91 + if (rc) { 92 + mutex_unlock(&dra_mutex); 93 + return rc; 94 + } 95 + } 96 + mutex_unlock(&dra_mutex); 97 + 98 + rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id); 99 + if (rc) 100 + return rc; 101 + 102 + rc = cxl_get_xsl9_dsnctl(capp_unit_id, &cfg->dsnctl); 103 + if (rc) 104 + return rc; 105 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { 106 + /* workaround for DD1 - nbwind = capiind */ 107 + cfg->dsnctl |= ((u64)0x02 << (63-47)); 108 + } 109 + 110 + cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION; 111 + cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE; 112 + cfg->bar_addr = CXL_CAPI_WINDOW_START; 113 + cfg->dra = dummy_read_addr; 114 + return 0; 115 + } 116 + EXPORT_SYMBOL_GPL(cxllib_get_xsl_config); 117 + 118 + int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode, 119 + unsigned long flags) 120 + { 121 + int rc = 0; 122 + 123 + if (!cpu_has_feature(CPU_FTR_HVMODE)) 124 + return -EINVAL; 125 + 126 + switch (mode) { 127 + case CXL_MODE_PCI: 128 + /* 129 + * We currently don't support going back to PCI mode 130 + * However, we'll turn the invalidations off, so that 131 + * the firmware doesn't have to ack them and can do 132 + * things like reset, etc.. with no worries. 133 + * So always return EPERM (can't go back to PCI) or 134 + * EBUSY if we couldn't even turn off snooping 135 + */ 136 + rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF); 137 + if (rc) 138 + rc = -EBUSY; 139 + else 140 + rc = -EPERM; 141 + break; 142 + case CXL_MODE_CXL: 143 + /* DMA only supported on TVT1 for the time being */ 144 + if (flags != CXL_MODE_DMA_TVT1) 145 + return -EINVAL; 146 + rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1); 147 + if (rc) 148 + return rc; 149 + rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON); 150 + break; 151 + default: 152 + rc = -EINVAL; 153 + } 154 + return rc; 155 + } 156 + EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode); 157 + 158 + /* 159 + * When switching the PHB to capi mode, the TVT#1 entry for 160 + * the Partitionable Endpoint is set in bypass mode, like 161 + * in PCI mode. 162 + * Configure the device dma to use TVT#1, which is done 163 + * by calling dma_set_mask() with a mask large enough. 164 + */ 165 + int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags) 166 + { 167 + int rc; 168 + 169 + if (flags) 170 + return -EINVAL; 171 + 172 + rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); 173 + return rc; 174 + } 175 + EXPORT_SYMBOL_GPL(cxllib_set_device_dma); 176 + 177 + int cxllib_get_PE_attributes(struct task_struct *task, 178 + unsigned long translation_mode, 179 + struct cxllib_pe_attributes *attr) 180 + { 181 + struct mm_struct *mm = NULL; 182 + 183 + if (translation_mode != CXL_TRANSLATED_MODE && 184 + translation_mode != CXL_REAL_MODE) 185 + return -EINVAL; 186 + 187 + attr->sr = cxl_calculate_sr(false, 188 + task == NULL, 189 + translation_mode == CXL_REAL_MODE, 190 + true); 191 + attr->lpid = mfspr(SPRN_LPID); 192 + if (task) { 193 + mm = get_task_mm(task); 194 + if (mm == NULL) 195 + return -EINVAL; 196 + /* 197 + * Caller is keeping a reference on mm_users for as long 198 + * as XSL uses the memory context 199 + */ 200 + attr->pid = mm->context.id; 201 + mmput(mm); 202 + } else { 203 + attr->pid = 0; 204 + } 205 + attr->tid = 0; 206 + return 0; 207 + } 208 + EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes); 209 + 210 + int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) 211 + { 212 + int rc; 213 + u64 dar; 214 + struct vm_area_struct *vma = NULL; 215 + unsigned long page_size; 216 + 217 + if (mm == NULL) 218 + return -EFAULT; 219 + 220 + down_read(&mm->mmap_sem); 221 + 222 + for (dar = addr; dar < addr + size; dar += page_size) { 223 + if (!vma || dar < vma->vm_start || dar > vma->vm_end) { 224 + vma = find_vma(mm, addr); 225 + if (!vma) { 226 + pr_err("Can't find vma for addr %016llx\n", addr); 227 + rc = -EFAULT; 228 + goto out; 229 + } 230 + /* get the size of the pages allocated */ 231 + page_size = vma_kernel_pagesize(vma); 232 + } 233 + 234 + rc = cxl_handle_mm_fault(mm, flags, dar); 235 + if (rc) { 236 + pr_err("cxl_handle_mm_fault failed %d", rc); 237 + rc = -EFAULT; 238 + goto out; 239 + } 240 + } 241 + rc = 0; 242 + out: 243 + up_read(&mm->mmap_sem); 244 + return rc; 245 + } 246 + EXPORT_SYMBOL_GPL(cxllib_handle_fault);
+19 -10
drivers/misc/cxl/fault.c
··· 132 132 return IRQ_HANDLED; 133 133 } 134 134 135 - static void cxl_handle_page_fault(struct cxl_context *ctx, 136 - struct mm_struct *mm, u64 dsisr, u64 dar) 135 + int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar) 137 136 { 138 137 unsigned flt = 0; 139 138 int result; 140 139 unsigned long access, flags, inv_flags = 0; 141 140 142 - trace_cxl_pte_miss(ctx, dsisr, dar); 143 - 144 141 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { 145 142 pr_devel("copro_handle_mm_fault failed: %#x\n", result); 146 - return cxl_ack_ae(ctx); 143 + return result; 147 144 } 148 145 149 146 if (!radix_enabled()) { ··· 152 155 if (dsisr & CXL_PSL_DSISR_An_S) 153 156 access |= _PAGE_WRITE; 154 157 155 - access |= _PAGE_PRIVILEGED; 156 - if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) 157 - access &= ~_PAGE_PRIVILEGED; 158 + if (!mm && (REGION_ID(dar) != USER_REGION_ID)) 159 + access |= _PAGE_PRIVILEGED; 158 160 159 161 if (dsisr & DSISR_NOHPTE) 160 162 inv_flags |= HPTE_NOHPTE_UPDATE; ··· 162 166 hash_page_mm(mm, dar, access, 0x300, inv_flags); 163 167 local_irq_restore(flags); 164 168 } 165 - pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); 166 - cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); 169 + return 0; 170 + } 171 + 172 + static void cxl_handle_page_fault(struct cxl_context *ctx, 173 + struct mm_struct *mm, 174 + u64 dsisr, u64 dar) 175 + { 176 + trace_cxl_pte_miss(ctx, dsisr, dar); 177 + 178 + if (cxl_handle_mm_fault(mm, dsisr, dar)) { 179 + cxl_ack_ae(ctx); 180 + } else { 181 + pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); 182 + cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); 183 + } 167 184 } 168 185 169 186 /*
+11 -5
drivers/misc/cxl/native.c
··· 586 586 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) 587 587 #endif 588 588 589 - static u64 calculate_sr(struct cxl_context *ctx) 589 + u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) 590 590 { 591 591 u64 sr = 0; 592 592 593 593 set_endian(sr); 594 - if (ctx->master) 594 + if (master) 595 595 sr |= CXL_PSL_SR_An_MP; 596 596 if (mfspr(SPRN_LPCR) & LPCR_TC) 597 597 sr |= CXL_PSL_SR_An_TC; 598 - if (ctx->kernel) { 599 - if (!ctx->real_mode) 598 + if (kernel) { 599 + if (!real_mode) 600 600 sr |= CXL_PSL_SR_An_R; 601 601 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; 602 602 } else { ··· 608 608 if (!test_tsk_thread_flag(current, TIF_32BIT)) 609 609 sr |= CXL_PSL_SR_An_SF; 610 610 } 611 - if (cxl_is_power9()) { 611 + if (p9) { 612 612 if (radix_enabled()) 613 613 sr |= CXL_PSL_SR_An_XLAT_ror; 614 614 else 615 615 sr |= CXL_PSL_SR_An_XLAT_hpt; 616 616 } 617 617 return sr; 618 + } 619 + 620 + static u64 calculate_sr(struct cxl_context *ctx) 621 + { 622 + return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode, 623 + cxl_is_power9()); 618 624 } 619 625 620 626 static void update_ivtes_directed(struct cxl_context *ctx)
+27 -14
drivers/misc/cxl/pci.c
··· 375 375 return 0; 376 376 } 377 377 378 - static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, 378 + int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid, 379 379 u32 *phb_index, u64 *capp_unit_id) 380 380 { 381 381 int rc; ··· 408 408 return 0; 409 409 } 410 410 411 - static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci_dev *dev) 411 + int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg) 412 412 { 413 - u64 xsl_dsnctl, psl_fircntl; 414 - u64 chipid; 415 - u32 phb_index; 416 - u64 capp_unit_id; 417 - int rc; 418 - 419 - rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); 420 - if (rc) 421 - return rc; 413 + u64 xsl_dsnctl; 422 414 423 415 /* 424 416 * CAPI Identifier bits [0:7] ··· 445 453 */ 446 454 xsl_dsnctl |= ((u64)0x04 << (63-55)); 447 455 } 456 + 457 + *reg = xsl_dsnctl; 458 + return 0; 459 + } 460 + 461 + static int init_implementation_adapter_regs_psl9(struct cxl *adapter, 462 + struct pci_dev *dev) 463 + { 464 + u64 xsl_dsnctl, psl_fircntl; 465 + u64 chipid; 466 + u32 phb_index; 467 + u64 capp_unit_id; 468 + int rc; 469 + 470 + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); 471 + if (rc) 472 + return rc; 473 + 474 + rc = cxl_get_xsl9_dsnctl(capp_unit_id, &xsl_dsnctl); 475 + if (rc) 476 + return rc; 448 477 449 478 cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl); 450 479 ··· 518 505 u64 capp_unit_id; 519 506 int rc; 520 507 521 - rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); 508 + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); 522 509 if (rc) 523 510 return rc; 524 511 ··· 551 538 u64 capp_unit_id; 552 539 int rc; 553 540 554 - rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); 541 + rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id); 555 542 if (rc) 556 543 return rc; 557 544 ··· 1910 1897 1911 1898 #define CXL_MAX_PCIEX_PARENT 2 1912 1899 1913 - static int cxl_slot_is_switched(struct pci_dev *dev) 1900 + int cxl_slot_is_switched(struct pci_dev *dev) 1914 1901 { 1915 1902 struct device_node *np; 1916 1903 int depth = 0;
+133
include/misc/cxllib.h
··· 1 + /* 2 + * Copyright 2017 IBM Corp. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation; either version 7 + * 2 of the License, or (at your option) any later version. 8 + */ 9 + 10 + #ifndef _MISC_CXLLIB_H 11 + #define _MISC_CXLLIB_H 12 + 13 + #include <linux/pci.h> 14 + #include <asm/reg.h> 15 + 16 + /* 17 + * cxl driver exports a in-kernel 'library' API which can be called by 18 + * other drivers to help interacting with an IBM XSL. 19 + */ 20 + 21 + /* 22 + * tells whether capi is supported on the PCIe slot where the 23 + * device is seated 24 + * 25 + * Input: 26 + * dev: device whose slot needs to be checked 27 + * flags: 0 for the time being 28 + */ 29 + bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags); 30 + 31 + 32 + /* 33 + * Returns the configuration parameters to be used by the XSL or device 34 + * 35 + * Input: 36 + * dev: device, used to find PHB 37 + * Output: 38 + * struct cxllib_xsl_config: 39 + * version 40 + * capi BAR address, i.e. 0x2000000000000-0x2FFFFFFFFFFFF 41 + * capi BAR size 42 + * data send control (XSL_DSNCTL) 43 + * dummy read address (XSL_DRA) 44 + */ 45 + #define CXL_XSL_CONFIG_VERSION1 1 46 + struct cxllib_xsl_config { 47 + u32 version; /* format version for register encoding */ 48 + u32 log_bar_size;/* log size of the capi_window */ 49 + u64 bar_addr; /* address of the start of capi window */ 50 + u64 dsnctl; /* matches definition of XSL_DSNCTL */ 51 + u64 dra; /* real address that can be used for dummy read */ 52 + }; 53 + 54 + int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg); 55 + 56 + 57 + /* 58 + * Activate capi for the pci host bridge associated with the device. 59 + * Can be extended to deactivate once we know how to do it. 60 + * Device must be ready to accept messages from the CAPP unit and 61 + * respond accordingly (TLB invalidates, ...) 62 + * 63 + * PHB is switched to capi mode through calls to skiboot. 64 + * CAPP snooping is activated 65 + * 66 + * Input: 67 + * dev: device whose PHB should switch mode 68 + * mode: mode to switch to i.e. CAPI or PCI 69 + * flags: options related to the mode 70 + */ 71 + enum cxllib_mode { 72 + CXL_MODE_CXL, 73 + CXL_MODE_PCI, 74 + }; 75 + 76 + #define CXL_MODE_NO_DMA 0 77 + #define CXL_MODE_DMA_TVT0 1 78 + #define CXL_MODE_DMA_TVT1 2 79 + 80 + int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode, 81 + unsigned long flags); 82 + 83 + 84 + /* 85 + * Set the device for capi DMA. 86 + * Define its dma_ops and dma offset so that allocations will be using TVT#1 87 + * 88 + * Input: 89 + * dev: device to set 90 + * flags: options. CXL_MODE_DMA_TVT1 should be used 91 + */ 92 + int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags); 93 + 94 + 95 + /* 96 + * Get the Process Element structure for the given thread 97 + * 98 + * Input: 99 + * task: task_struct for the context of the translation 100 + * translation_mode: whether addresses should be translated 101 + * Output: 102 + * attr: attributes to fill up the Process Element structure from CAIA 103 + */ 104 + struct cxllib_pe_attributes { 105 + u64 sr; 106 + u32 lpid; 107 + u32 tid; 108 + u32 pid; 109 + }; 110 + #define CXL_TRANSLATED_MODE 0 111 + #define CXL_REAL_MODE 1 112 + 113 + int cxllib_get_PE_attributes(struct task_struct *task, 114 + unsigned long translation_mode, struct cxllib_pe_attributes *attr); 115 + 116 + 117 + /* 118 + * Handle memory fault. 119 + * Fault in all the pages of the specified buffer for the permissions 120 + * provided in ‘flags’ 121 + * 122 + * Shouldn't be called from interrupt context 123 + * 124 + * Input: 125 + * mm: struct mm for the thread faulting the pages 126 + * addr: base address of the buffer to page in 127 + * size: size of the buffer to page in 128 + * flags: permission requested (DSISR_ISSTORE...) 129 + */ 130 + int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags); 131 + 132 + 133 + #endif /* _MISC_CXLLIB_H */