Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/pci_dma: handle dma table failures

We use lazy allocation for translation table entries but don't handle
allocation (and other) failures during translation table updates.

Handle these failures and undo translation table updates when it's
meaningful.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Sebastian Ott and committed by
Martin Schwidefsky
66728eee 4d5a6b72

+48 -17
+3 -1
arch/s390/include/asm/pci_dma.h
··· 195 195 void dma_free_seg_table(unsigned long); 196 196 unsigned long *dma_alloc_cpu_table(void); 197 197 void dma_cleanup_tables(unsigned long *); 198 - void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int); 198 + unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); 199 + void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags); 200 + 199 201 #endif
+24 -14
arch/s390/pci/pci_dma.c
··· 95 95 return pto; 96 96 } 97 97 98 - static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 98 + unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) 99 99 { 100 100 unsigned long *sto, *pto; 101 101 unsigned int rtx, sx, px; ··· 114 114 return &pto[px]; 115 115 } 116 116 117 - void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr, 118 - dma_addr_t dma_addr, int flags) 117 + void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) 119 118 { 120 - unsigned long *entry; 121 - 122 - entry = dma_walk_cpu_trans(dma_table, dma_addr); 123 - if (!entry) { 124 - WARN_ON_ONCE(1); 125 - return; 126 - } 127 - 128 119 if (flags & ZPCI_PTE_INVALID) { 129 120 invalidate_pt_entry(entry); 130 121 } else { ··· 136 145 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 137 146 dma_addr_t start_dma_addr = dma_addr; 138 147 unsigned long irq_flags; 148 + unsigned long *entry; 139 149 int i, rc = 0; 140 150 141 151 if (!nr_pages) 142 152 return -EINVAL; 143 153 144 154 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); 145 - if (!zdev->dma_table) 155 + if (!zdev->dma_table) { 156 + rc = -EINVAL; 146 157 goto no_refresh; 158 + } 147 159 148 160 for (i = 0; i < nr_pages; i++) { 149 - dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr, 150 - flags); 161 + entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); 162 + if (!entry) { 163 + rc = -ENOMEM; 164 + goto undo_cpu_trans; 165 + } 166 + dma_update_cpu_trans(entry, page_addr, flags); 151 167 page_addr += PAGE_SIZE; 152 168 dma_addr += PAGE_SIZE; 153 169 } ··· 173 175 174 176 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 175 177 nr_pages * PAGE_SIZE); 178 + undo_cpu_trans: 179 + if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { 180 + flags = ZPCI_PTE_INVALID; 181 + while (i-- > 0) { 182 + page_addr -= PAGE_SIZE; 183 + dma_addr -= PAGE_SIZE; 184 + entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); 185 + if (!entry) 186 + break; 187 + dma_update_cpu_trans(entry, page_addr, flags); 188 + } 189 + } 176 190 177 191 no_refresh: 178 192 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
+21 -2
drivers/iommu/s390-iommu.c
··· 216 216 u8 *page_addr = (u8 *) (pa & PAGE_MASK); 217 217 dma_addr_t start_dma_addr = dma_addr; 218 218 unsigned long irq_flags, nr_pages, i; 219 + unsigned long *entry; 219 220 int rc = 0; 220 221 221 222 if (dma_addr < s390_domain->domain.geometry.aperture_start || ··· 229 228 230 229 spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags); 231 230 for (i = 0; i < nr_pages; i++) { 232 - dma_update_cpu_trans(s390_domain->dma_table, page_addr, 233 - dma_addr, flags); 231 + entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr); 232 + if (!entry) { 233 + rc = -ENOMEM; 234 + goto undo_cpu_trans; 235 + } 236 + dma_update_cpu_trans(entry, page_addr, flags); 234 237 page_addr += PAGE_SIZE; 235 238 dma_addr += PAGE_SIZE; 236 239 } ··· 247 242 break; 248 243 } 249 244 spin_unlock(&s390_domain->list_lock); 245 + 246 + undo_cpu_trans: 247 + if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { 248 + flags = ZPCI_PTE_INVALID; 249 + while (i-- > 0) { 250 + page_addr -= PAGE_SIZE; 251 + dma_addr -= PAGE_SIZE; 252 + entry = dma_walk_cpu_trans(s390_domain->dma_table, 253 + dma_addr); 254 + if (!entry) 255 + break; 256 + dma_update_cpu_trans(entry, page_addr, flags); 257 + } 258 + } 250 259 spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags); 251 260 252 261 return rc;