Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Heiko Carstens:

- Various virtual vs physical address usage fixes

- Add new bitwise types and helper functions and use them in s390
specific drivers and code to make it easier to find virtual vs
physical address usage bugs.

Right now virtual and physical addresses are identical for s390,
except for module, vmalloc, and similar areas. This will be changed,
hopefully with the next merge window, so that e.g. the kernel image
and modules will be located close to each other, allowing for direct
branches and also for some other simplifications.

As a prerequisite this requires to fix all misuses of virtual and
physical addresses. As it turned out people are so used to the
concept that virtual and physical addresses are the same, that new
bugs got added to code which was already fixed. In order to avoid
that even more code gets merged which adds such bugs add and use new
bitwise types, so that sparse can be used to find such usage bugs.

Most likely the new types can go away again after some time

- Provide a simple ARCH_HAS_DEBUG_VIRTUAL implementation

- Fix kprobe branch handling: if an out-of-line single stepped relative
branch instruction has a target address within a certain address area
in the entry code, the program check handler may incorrectly execute
cleanup code as if KVM code was executed, leading to crashes

- Fix reference counting of zcrypt card objects

- Various other small fixes and cleanups

* tag 's390-6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (41 commits)
s390/entry: compare gmap asce to determine guest/host fault
s390/entry: remove OUTSIDE macro
s390/entry: add CIF_SIE flag and remove sie64a() address check
s390/cio: use while (i--) pattern to clean up
s390/raw3270: make class3270 constant
s390/raw3270: improve raw3270_init() readability
s390/tape: make tape_class constant
s390/vmlogrdr: make vmlogrdr_class constant
s390/vmur: make vmur_class constant
s390/zcrypt: make zcrypt_class constant
s390/mm: provide simple ARCH_HAS_DEBUG_VIRTUAL support
s390/vfio_ccw_cp: use new address translation helpers
s390/iucv: use new address translation helpers
s390/ctcm: use new address translation helpers
s390/lcs: use new address translation helpers
s390/qeth: use new address translation helpers
s390/zfcp: use new address translation helpers
s390/tape: fix virtual vs physical address confusion
s390/3270: use new address translation helpers
s390/3215: use new address translation helpers
...

+771 -553
+1
arch/s390/Kconfig
··· 63 63 select ARCH_ENABLE_MEMORY_HOTREMOVE 64 64 select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 65 65 select ARCH_HAS_CURRENT_STACK_POINTER 66 + select ARCH_HAS_DEBUG_VIRTUAL 66 67 select ARCH_HAS_DEBUG_VM_PGTABLE 67 68 select ARCH_HAS_DEBUG_WX 68 69 select ARCH_HAS_DEVMEM_IS_ALLOWED
+1
arch/s390/Makefile
··· 29 29 endif 30 30 KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack 31 31 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY 32 + KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR 32 33 KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain 33 34 KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables 34 35 KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
+1
arch/s390/configs/debug_defconfig
··· 810 810 CONFIG_DEBUG_STACK_USAGE=y 811 811 CONFIG_DEBUG_VM=y 812 812 CONFIG_DEBUG_VM_PGFLAGS=y 813 + CONFIG_DEBUG_VIRTUAL=y 813 814 CONFIG_DEBUG_MEMORY_INIT=y 814 815 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m 815 816 CONFIG_DEBUG_PER_CPU_MAPS=y
+2 -1
arch/s390/include/asm/ccwdev.h
··· 217 217 extern int ccw_device_enable_console(struct ccw_device *); 218 218 extern void ccw_device_wait_idle(struct ccw_device *); 219 219 220 - extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size); 220 + extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size, 221 + dma32_t *dma_handle); 221 222 extern void ccw_device_dma_free(struct ccw_device *cdev, 222 223 void *cpu_addr, size_t size); 223 224
+6 -3
arch/s390/include/asm/cio.h
··· 7 7 8 8 #include <linux/bitops.h> 9 9 #include <linux/genalloc.h> 10 + #include <asm/dma-types.h> 10 11 #include <asm/types.h> 11 12 #include <asm/tpi.h> 12 13 ··· 33 32 __u8 cmd_code; 34 33 __u8 flags; 35 34 __u16 count; 36 - __u32 cda; 35 + dma32_t cda; 37 36 } __attribute__ ((packed,aligned(8))); 38 37 39 38 /** ··· 153 152 struct esw0 { 154 153 struct sublog sublog; 155 154 struct erw erw; 156 - __u32 faddr[2]; 157 - __u32 saddr; 155 + dma32_t faddr[2]; 156 + dma32_t saddr; 158 157 } __attribute__ ((packed)); 159 158 160 159 /** ··· 365 364 366 365 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, 367 366 size_t size); 367 + void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, 368 + size_t size, dma32_t *dma_handle); 368 369 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size); 369 370 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev); 370 371 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages);
+103
arch/s390/include/asm/dma-types.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef _ASM_S390_DMA_TYPES_H_ 4 + #define _ASM_S390_DMA_TYPES_H_ 5 + 6 + #include <linux/types.h> 7 + #include <linux/io.h> 8 + 9 + /* 10 + * typedef dma32_t 11 + * Contains a 31 bit absolute address to a DMA capable piece of storage. 12 + * 13 + * For CIO, DMA addresses are always absolute addresses. These addresses tend 14 + * to be used in architectured memory blocks (like ORB, IDAW, MIDAW). Under 15 + * certain circumstances 31 bit wide addresses must be used because the 16 + * address must fit in 31 bits. 17 + * 18 + * This type is to be used when such fields can be modelled as 32 bit wide. 19 + */ 20 + typedef u32 __bitwise dma32_t; 21 + 22 + /* 23 + * typedef dma64_t 24 + * Contains a 64 bit absolute address to a DMA capable piece of storage. 25 + * 26 + * For CIO, DMA addresses are always absolute addresses. These addresses tend 27 + * to be used in architectured memory blocks (like ORB, IDAW, MIDAW). 28 + * 29 + * This type is to be used to model such 64 bit wide fields. 30 + */ 31 + typedef u64 __bitwise dma64_t; 32 + 33 + /* 34 + * Although DMA addresses should be obtained using the DMA API, in cases when 35 + * it is known that the first argument holds a virtual address that points to 36 + * DMA-able 31 bit addressable storage, then this function can be safely used. 37 + */ 38 + static inline dma32_t virt_to_dma32(void *ptr) 39 + { 40 + return (__force dma32_t)__pa32(ptr); 41 + } 42 + 43 + static inline void *dma32_to_virt(dma32_t addr) 44 + { 45 + return __va((__force unsigned long)addr); 46 + } 47 + 48 + static inline dma32_t u32_to_dma32(u32 addr) 49 + { 50 + return (__force dma32_t)addr; 51 + } 52 + 53 + static inline u32 dma32_to_u32(dma32_t addr) 54 + { 55 + return (__force u32)addr; 56 + } 57 + 58 + static inline dma32_t dma32_add(dma32_t a, u32 b) 59 + { 60 + return (__force dma32_t)((__force u32)a + b); 61 + } 62 + 63 + static inline dma32_t dma32_and(dma32_t a, u32 b) 64 + { 65 + return (__force dma32_t)((__force u32)a & b); 66 + } 67 + 68 + /* 69 + * Although DMA addresses should be obtained using the DMA API, in cases when 70 + * it is known that the first argument holds a virtual address that points to 71 + * DMA-able storage, then this function can be safely used. 72 + */ 73 + static inline dma64_t virt_to_dma64(void *ptr) 74 + { 75 + return (__force dma64_t)__pa(ptr); 76 + } 77 + 78 + static inline void *dma64_to_virt(dma64_t addr) 79 + { 80 + return __va((__force unsigned long)addr); 81 + } 82 + 83 + static inline dma64_t u64_to_dma64(u64 addr) 84 + { 85 + return (__force dma64_t)addr; 86 + } 87 + 88 + static inline u64 dma64_to_u64(dma64_t addr) 89 + { 90 + return (__force u64)addr; 91 + } 92 + 93 + static inline dma64_t dma64_add(dma64_t a, u64 b) 94 + { 95 + return (__force dma64_t)((__force u64)a + b); 96 + } 97 + 98 + static inline dma64_t dma64_and(dma64_t a, u64 b) 99 + { 100 + return (__force dma64_t)((__force u64)a & b); 101 + } 102 + 103 + #endif /* _ASM_S390_DMA_TYPES_H_ */
+3 -2
arch/s390/include/asm/eadm.h
··· 5 5 #include <linux/types.h> 6 6 #include <linux/device.h> 7 7 #include <linux/blk_types.h> 8 + #include <asm/dma-types.h> 8 9 9 10 struct arqb { 10 11 u64 data; ··· 46 45 u16:12; 47 46 u16 bs:4; 48 47 u32 blk_count; 49 - u64 data_addr; 48 + dma64_t data_addr; 50 49 u64 scm_addr; 51 50 u64:64; 52 51 } __packed; ··· 55 54 u8 flags; 56 55 u32 :24; 57 56 u32 :32; 58 - u64 data_addr; 57 + dma64_t data_addr; 59 58 } __packed; 60 59 61 60 #define MSB_OC_CLEAR 0
+7 -6
arch/s390/include/asm/fcx.h
··· 10 10 #define _ASM_S390_FCX_H 11 11 12 12 #include <linux/types.h> 13 + #include <asm/dma-types.h> 13 14 14 15 #define TCW_FORMAT_DEFAULT 0 15 16 #define TCW_TIDAW_FORMAT_DEFAULT 0 ··· 44 43 u32 r:1; 45 44 u32 w:1; 46 45 u32 :16; 47 - u64 output; 48 - u64 input; 49 - u64 tsb; 50 - u64 tccb; 46 + dma64_t output; 47 + dma64_t input; 48 + dma64_t tsb; 49 + dma64_t tccb; 51 50 u32 output_count; 52 51 u32 input_count; 53 52 u32 :32; 54 53 u32 :32; 55 54 u32 :32; 56 - u32 intrg; 55 + dma32_t intrg; 57 56 } __attribute__ ((packed, aligned(64))); 58 57 59 58 #define TIDAW_FLAGS_LAST (1 << (7 - 0)) ··· 74 73 u32 flags:8; 75 74 u32 :24; 76 75 u32 count; 77 - u64 addr; 76 + dma64_t addr; 78 77 } __attribute__ ((packed, aligned(16))); 79 78 80 79 /**
+96 -80
arch/s390/include/asm/idals.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 2 + /* 3 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 5 * Bugreports.to..: <Linux390@de.ibm.com> ··· 17 17 #include <linux/err.h> 18 18 #include <linux/types.h> 19 19 #include <linux/slab.h> 20 - #include <asm/cio.h> 21 20 #include <linux/uaccess.h> 21 + #include <asm/dma-types.h> 22 + #include <asm/cio.h> 22 23 23 - #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ 24 - #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) 24 + #define IDA_SIZE_SHIFT 12 25 + #define IDA_BLOCK_SIZE (1UL << IDA_SIZE_SHIFT) 25 26 26 - #define IDA_2K_SIZE_LOG 11 27 - #define IDA_2K_BLOCK_SIZE (1L << IDA_2K_SIZE_LOG) 27 + #define IDA_2K_SIZE_SHIFT 11 28 + #define IDA_2K_BLOCK_SIZE (1UL << IDA_2K_SIZE_SHIFT) 28 29 29 30 /* 30 31 * Test if an address/length pair needs an idal list. 31 32 */ 32 - static inline int 33 - idal_is_needed(void *vaddr, unsigned int length) 33 + static inline bool idal_is_needed(void *vaddr, unsigned int length) 34 34 { 35 - return ((__pa(vaddr) + length - 1) >> 31) != 0; 36 - } 35 + dma64_t paddr = virt_to_dma64(vaddr); 37 36 37 + return (((__force unsigned long)(paddr) + length - 1) >> 31) != 0; 38 + } 38 39 39 40 /* 40 41 * Return the number of idal words needed for an address/length pair. 41 42 */ 42 43 static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) 43 44 { 44 - return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + 45 - (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; 45 + unsigned int cidaw; 46 + 47 + cidaw = (unsigned long)vaddr & (IDA_BLOCK_SIZE - 1); 48 + cidaw += length + IDA_BLOCK_SIZE - 1; 49 + cidaw >>= IDA_SIZE_SHIFT; 50 + return cidaw; 46 51 } 47 52 48 53 /* ··· 55 50 */ 56 51 static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length) 57 52 { 58 - return ((__pa(vaddr) & (IDA_2K_BLOCK_SIZE - 1)) + length + 59 - (IDA_2K_BLOCK_SIZE - 1)) >> IDA_2K_SIZE_LOG; 53 + unsigned int cidaw; 54 + 55 + cidaw = (unsigned long)vaddr & (IDA_2K_BLOCK_SIZE - 1); 56 + cidaw += length + IDA_2K_BLOCK_SIZE - 1; 57 + cidaw >>= IDA_2K_SIZE_SHIFT; 58 + return cidaw; 60 59 } 61 60 62 61 /* 63 62 * Create the list of idal words for an address/length pair. 64 63 */ 65 - static inline unsigned long *idal_create_words(unsigned long *idaws, 66 - void *vaddr, unsigned int length) 64 + static inline dma64_t *idal_create_words(dma64_t *idaws, void *vaddr, unsigned int length) 67 65 { 68 - unsigned long paddr; 66 + dma64_t paddr = virt_to_dma64(vaddr); 69 67 unsigned int cidaw; 70 68 71 - paddr = __pa(vaddr); 72 - cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length + 73 - (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; 74 69 *idaws++ = paddr; 75 - paddr &= -IDA_BLOCK_SIZE; 70 + cidaw = idal_nr_words(vaddr, length); 71 + paddr = dma64_and(paddr, -IDA_BLOCK_SIZE); 76 72 while (--cidaw > 0) { 77 - paddr += IDA_BLOCK_SIZE; 73 + paddr = dma64_add(paddr, IDA_BLOCK_SIZE); 78 74 *idaws++ = paddr; 79 75 } 80 76 return idaws; ··· 85 79 * Sets the address of the data in CCW. 86 80 * If necessary it allocates an IDAL and sets the appropriate flags. 87 81 */ 88 - static inline int 89 - set_normalized_cda(struct ccw1 * ccw, void *vaddr) 82 + static inline int set_normalized_cda(struct ccw1 *ccw, void *vaddr) 90 83 { 91 84 unsigned int nridaws; 92 - unsigned long *idal; 85 + dma64_t *idal; 93 86 94 87 if (ccw->flags & CCW_FLAG_IDA) 95 88 return -EINVAL; 96 89 nridaws = idal_nr_words(vaddr, ccw->count); 97 90 if (nridaws > 0) { 98 - idal = kmalloc(nridaws * sizeof(unsigned long), 99 - GFP_ATOMIC | GFP_DMA ); 100 - if (idal == NULL) 91 + idal = kcalloc(nridaws, sizeof(*idal), GFP_ATOMIC | GFP_DMA); 92 + if (!idal) 101 93 return -ENOMEM; 102 94 idal_create_words(idal, vaddr, ccw->count); 103 95 ccw->flags |= CCW_FLAG_IDA; 104 96 vaddr = idal; 105 97 } 106 - ccw->cda = (__u32)(unsigned long) vaddr; 98 + ccw->cda = virt_to_dma32(vaddr); 107 99 return 0; 108 100 } 109 101 110 102 /* 111 103 * Releases any allocated IDAL related to the CCW. 112 104 */ 113 - static inline void 114 - clear_normalized_cda(struct ccw1 * ccw) 105 + static inline void clear_normalized_cda(struct ccw1 *ccw) 115 106 { 116 107 if (ccw->flags & CCW_FLAG_IDA) { 117 - kfree((void *)(unsigned long) ccw->cda); 108 + kfree(dma32_to_virt(ccw->cda)); 118 109 ccw->flags &= ~CCW_FLAG_IDA; 119 110 } 120 111 ccw->cda = 0; ··· 123 120 struct idal_buffer { 124 121 size_t size; 125 122 size_t page_order; 126 - void *data[]; 123 + dma64_t data[]; 127 124 }; 128 125 129 126 /* 130 127 * Allocate an idal buffer 131 128 */ 132 - static inline struct idal_buffer * 133 - idal_buffer_alloc(size_t size, int page_order) 129 + static inline struct idal_buffer *idal_buffer_alloc(size_t size, int page_order) 134 130 { 135 - struct idal_buffer *ib; 136 131 int nr_chunks, nr_ptrs, i; 132 + struct idal_buffer *ib; 133 + void *vaddr; 137 134 138 - nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; 139 - nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG; 135 + nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT; 136 + nr_chunks = (PAGE_SIZE << page_order) >> IDA_SIZE_SHIFT; 140 137 ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL); 141 - if (ib == NULL) 138 + if (!ib) 142 139 return ERR_PTR(-ENOMEM); 143 140 ib->size = size; 144 141 ib->page_order = page_order; 145 142 for (i = 0; i < nr_ptrs; i++) { 146 - if ((i & (nr_chunks - 1)) != 0) { 147 - ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE; 143 + if (i & (nr_chunks - 1)) { 144 + ib->data[i] = dma64_add(ib->data[i - 1], IDA_BLOCK_SIZE); 148 145 continue; 149 146 } 150 - ib->data[i] = (void *) 151 - __get_free_pages(GFP_KERNEL, page_order); 152 - if (ib->data[i] != NULL) 153 - continue; 154 - // Not enough memory 155 - while (i >= nr_chunks) { 156 - i -= nr_chunks; 157 - free_pages((unsigned long) ib->data[i], 158 - ib->page_order); 159 - } 160 - kfree(ib); 161 - return ERR_PTR(-ENOMEM); 147 + vaddr = (void *)__get_free_pages(GFP_KERNEL, page_order); 148 + if (!vaddr) 149 + goto error; 150 + ib->data[i] = virt_to_dma64(vaddr); 162 151 } 163 152 return ib; 153 + error: 154 + while (i >= nr_chunks) { 155 + i -= nr_chunks; 156 + vaddr = dma64_to_virt(ib->data[i]); 157 + free_pages((unsigned long)vaddr, ib->page_order); 158 + } 159 + kfree(ib); 160 + return ERR_PTR(-ENOMEM); 164 161 } 165 162 166 163 /* 167 164 * Free an idal buffer. 168 165 */ 169 - static inline void 170 - idal_buffer_free(struct idal_buffer *ib) 166 + static inline void idal_buffer_free(struct idal_buffer *ib) 171 167 { 172 168 int nr_chunks, nr_ptrs, i; 169 + void *vaddr; 173 170 174 - nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; 175 - nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG; 176 - for (i = 0; i < nr_ptrs; i += nr_chunks) 177 - free_pages((unsigned long) ib->data[i], ib->page_order); 171 + nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT; 172 + nr_chunks = (PAGE_SIZE << ib->page_order) >> IDA_SIZE_SHIFT; 173 + for (i = 0; i < nr_ptrs; i += nr_chunks) { 174 + vaddr = dma64_to_virt(ib->data[i]); 175 + free_pages((unsigned long)vaddr, ib->page_order); 176 + } 178 177 kfree(ib); 179 178 } 180 179 181 180 /* 182 181 * Test if a idal list is really needed. 183 182 */ 184 - static inline int 185 - __idal_buffer_is_needed(struct idal_buffer *ib) 183 + static inline bool __idal_buffer_is_needed(struct idal_buffer *ib) 186 184 { 187 - return ib->size > (4096ul << ib->page_order) || 188 - idal_is_needed(ib->data[0], ib->size); 185 + if (ib->size > (PAGE_SIZE << ib->page_order)) 186 + return true; 187 + return idal_is_needed(dma64_to_virt(ib->data[0]), ib->size); 189 188 } 190 189 191 190 /* 192 191 * Set channel data address to idal buffer. 193 192 */ 194 - static inline void 195 - idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) 193 + static inline void idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) 196 194 { 195 + void *vaddr; 196 + 197 197 if (__idal_buffer_is_needed(ib)) { 198 - // setup idals; 199 - ccw->cda = (u32)(addr_t) ib->data; 198 + /* Setup idals */ 199 + ccw->cda = virt_to_dma32(ib->data); 200 200 ccw->flags |= CCW_FLAG_IDA; 201 - } else 202 - // we do not need idals - use direct addressing 203 - ccw->cda = (u32)(addr_t) ib->data[0]; 201 + } else { 202 + /* 203 + * No idals needed - use direct addressing. Convert from 204 + * dma64_t to virt and then to dma32_t only because of type 205 + * checking. The physical address is known to be below 2GB. 206 + */ 207 + vaddr = dma64_to_virt(ib->data[0]); 208 + ccw->cda = virt_to_dma32(vaddr); 209 + } 204 210 ccw->count = ib->size; 205 211 } 206 212 207 213 /* 208 214 * Copy count bytes from an idal buffer to user memory 209 215 */ 210 - static inline size_t 211 - idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) 216 + static inline size_t idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) 212 217 { 213 218 size_t left; 219 + void *vaddr; 214 220 int i; 215 221 216 222 BUG_ON(count > ib->size); 217 223 for (i = 0; count > IDA_BLOCK_SIZE; i++) { 218 - left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE); 224 + vaddr = dma64_to_virt(ib->data[i]); 225 + left = copy_to_user(to, vaddr, IDA_BLOCK_SIZE); 219 226 if (left) 220 227 return left + count - IDA_BLOCK_SIZE; 221 - to = (void __user *) to + IDA_BLOCK_SIZE; 228 + to = (void __user *)to + IDA_BLOCK_SIZE; 222 229 count -= IDA_BLOCK_SIZE; 223 230 } 224 - return copy_to_user(to, ib->data[i], count); 231 + vaddr = dma64_to_virt(ib->data[i]); 232 + return copy_to_user(to, vaddr, count); 225 233 } 226 234 227 235 /* 228 236 * Copy count bytes from user memory to an idal buffer 229 237 */ 230 - static inline size_t 231 - idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) 238 + static inline size_t idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) 232 239 { 233 240 size_t left; 241 + void *vaddr; 234 242 int i; 235 243 236 244 BUG_ON(count > ib->size); 237 245 for (i = 0; count > IDA_BLOCK_SIZE; i++) { 238 - left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE); 246 + vaddr = dma64_to_virt(ib->data[i]); 247 + left = copy_from_user(vaddr, from, IDA_BLOCK_SIZE); 239 248 if (left) 240 249 return left + count - IDA_BLOCK_SIZE; 241 - from = (void __user *) from + IDA_BLOCK_SIZE; 250 + from = (void __user *)from + IDA_BLOCK_SIZE; 242 251 count -= IDA_BLOCK_SIZE; 243 252 } 244 - return copy_from_user(ib->data[i], from, count); 253 + vaddr = dma64_to_virt(ib->data[i]); 254 + return copy_from_user(vaddr, from, count); 245 255 } 246 256 247 257 #endif
+28 -2
arch/s390/include/asm/page.h
··· 181 181 #define __PAGE_OFFSET 0x0UL 182 182 #define PAGE_OFFSET 0x0UL 183 183 184 - #define __pa(x) ((unsigned long)(x)) 184 + #define __pa_nodebug(x) ((unsigned long)(x)) 185 + 186 + #ifdef __DECOMPRESSOR 187 + 188 + #define __pa(x) __pa_nodebug(x) 189 + #define __pa32(x) __pa(x) 185 190 #define __va(x) ((void *)(unsigned long)(x)) 191 + 192 + #else /* __DECOMPRESSOR */ 193 + 194 + #ifdef CONFIG_DEBUG_VIRTUAL 195 + 196 + unsigned long __phys_addr(unsigned long x, bool is_31bit); 197 + 198 + #else /* CONFIG_DEBUG_VIRTUAL */ 199 + 200 + static inline unsigned long __phys_addr(unsigned long x, bool is_31bit) 201 + { 202 + return __pa_nodebug(x); 203 + } 204 + 205 + #endif /* CONFIG_DEBUG_VIRTUAL */ 206 + 207 + #define __pa(x) __phys_addr((unsigned long)(x), false) 208 + #define __pa32(x) __phys_addr((unsigned long)(x), true) 209 + #define __va(x) ((void *)(unsigned long)(x)) 210 + 211 + #endif /* __DECOMPRESSOR */ 186 212 187 213 #define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) 188 214 #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) ··· 231 205 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) 232 206 #define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) 233 207 234 - #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) 208 + #define virt_addr_valid(kaddr) pfn_valid(phys_to_pfn(__pa_nodebug(kaddr))) 235 209 236 210 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 237 211
+2
arch/s390/include/asm/processor.h
··· 14 14 15 15 #include <linux/bits.h> 16 16 17 + #define CIF_SIE 0 /* CPU needs SIE exit cleanup */ 17 18 #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ 18 19 #define CIF_ENABLED_WAIT 5 /* in enabled wait state */ 19 20 #define CIF_MCCK_GUEST 6 /* machine check happening in guest */ 20 21 #define CIF_DEDICATED_CPU 7 /* this CPU is dedicated */ 21 22 23 + #define _CIF_SIE BIT(CIF_SIE) 22 24 #define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) 23 25 #define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) 24 26 #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
-2
arch/s390/include/asm/ptrace.h
··· 14 14 #define PIF_SYSCALL 0 /* inside a system call */ 15 15 #define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */ 16 16 #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ 17 - #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ 18 17 #define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */ 19 18 20 19 #define _PIF_SYSCALL BIT(PIF_SYSCALL) 21 20 #define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART) 22 21 #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) 23 - #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) 24 22 #define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS) 25 23 26 24 #define PSW32_MASK_PER _AC(0x40000000, UL)
+9 -8
arch/s390/include/asm/qdio.h
··· 9 9 #define __QDIO_H__ 10 10 11 11 #include <linux/interrupt.h> 12 - #include <asm/cio.h> 12 + #include <asm/dma-types.h> 13 13 #include <asm/ccwdev.h> 14 + #include <asm/cio.h> 14 15 15 16 /* only use 4 queues to save some cachelines */ 16 17 #define QDIO_MAX_QUEUES_PER_IRQ 4 ··· 35 34 * @dkey: access key for SLSB 36 35 */ 37 36 struct qdesfmt0 { 38 - u64 sliba; 39 - u64 sla; 40 - u64 slsba; 37 + dma64_t sliba; 38 + dma64_t sla; 39 + dma64_t slsba; 41 40 u32 : 32; 42 41 u32 akey : 4; 43 42 u32 bkey : 4; ··· 75 74 /* private: */ 76 75 u32 res[9]; 77 76 /* public: */ 78 - u64 qiba; 77 + dma64_t qiba; 79 78 u32 : 32; 80 79 u32 qkey : 4; 81 80 u32 : 28; ··· 147 146 u8 flags; 148 147 u16 cbtbs; 149 148 u8 sb_count; 150 - u64 sba[QDIO_MAX_ELEMENTS_PER_BUFFER]; 149 + dma64_t sba[QDIO_MAX_ELEMENTS_PER_BUFFER]; 151 150 u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER]; 152 151 u64 user0; 153 152 u64 res4[2]; ··· 209 208 u8 scount; 210 209 u8 sflags; 211 210 u32 length; 212 - u64 addr; 211 + dma64_t addr; 213 212 } __attribute__ ((packed, aligned(16))); 214 213 215 214 /** ··· 225 224 * @sbal: absolute SBAL address 226 225 */ 227 226 struct sl_element { 228 - u64 sbal; 227 + dma64_t sbal; 229 228 } __attribute__ ((packed)); 230 229 231 230 /**
+4 -3
arch/s390/include/asm/scsw.h
··· 11 11 12 12 #include <linux/types.h> 13 13 #include <asm/css_chars.h> 14 + #include <asm/dma-types.h> 14 15 #include <asm/cio.h> 15 16 16 17 /** ··· 54 53 __u32 fctl : 3; 55 54 __u32 actl : 7; 56 55 __u32 stctl : 5; 57 - __u32 cpa; 56 + dma32_t cpa; 58 57 __u32 dstat : 8; 59 58 __u32 cstat : 8; 60 59 __u32 count : 16; ··· 94 93 u32 fctl:3; 95 94 u32 actl:7; 96 95 u32 stctl:5; 97 - u32 tcw; 96 + dma32_t tcw; 98 97 u32 dstat:8; 99 98 u32 cstat:8; 100 99 u32 fcxs:8; ··· 126 125 u32 fctl:3; 127 126 u32 actl:7; 128 127 u32 stctl:5; 129 - u32 aob; 128 + dma32_t aob; 130 129 u32 dstat:8; 131 130 u32 cstat:8; 132 131 u32:16;
+31 -42
arch/s390/kernel/entry.S
··· 119 119 .endm 120 120 121 121 #if IS_ENABLED(CONFIG_KVM) 122 - /* 123 - * The OUTSIDE macro jumps to the provided label in case the value 124 - * in the provided register is outside of the provided range. The 125 - * macro is useful for checking whether a PSW stored in a register 126 - * pair points inside or outside of a block of instructions. 127 - * @reg: register to check 128 - * @start: start of the range 129 - * @end: end of the range 130 - * @outside_label: jump here if @reg is outside of [@start..@end) 131 - */ 132 - .macro OUTSIDE reg,start,end,outside_label 133 - lgr %r14,\reg 134 - larl %r13,\start 135 - slgr %r14,%r13 136 - clgfrl %r14,.Lrange_size\@ 137 - jhe \outside_label 138 - .section .rodata, "a" 139 - .balign 4 140 - .Lrange_size\@: 141 - .long \end - \start 142 - .previous 143 - .endm 144 - 145 - .macro SIEEXIT 146 - lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer 122 + .macro SIEEXIT sie_control 123 + lg %r9,\sie_control # get control block pointer 147 124 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 148 125 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 126 + ni __LC_CPU_FLAGS+7,255-_CIF_SIE 149 127 larl %r9,sie_exit # skip forward to sie_exit 150 128 .endm 151 129 #endif ··· 192 214 lg %r14,__LC_GMAP # get gmap pointer 193 215 ltgr %r14,%r14 194 216 jz .Lsie_gmap 217 + oi __LC_CPU_FLAGS+7,_CIF_SIE 195 218 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 196 219 .Lsie_gmap: 197 220 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ··· 213 234 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 214 235 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 215 236 lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce 216 - .Lsie_done: 237 + ni __LC_CPU_FLAGS+7,255-_CIF_SIE 217 238 # some program checks are suppressing. C code (e.g. do_protection_exception) 218 239 # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 219 240 # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. ··· 316 337 stpt __LC_SYS_ENTER_TIMER 317 338 BPOFF 318 339 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 319 - lghi %r10,0 340 + lgr %r10,%r15 320 341 lmg %r8,%r9,__LC_PGM_OLD_PSW 321 342 tmhh %r8,0x0001 # coming from user space? 322 343 jno .Lpgm_skip_asce 323 344 lctlg %c1,%c1,__LC_KERNEL_ASCE 324 345 j 3f # -> fault in user space 325 346 .Lpgm_skip_asce: 326 - #if IS_ENABLED(CONFIG_KVM) 327 - # cleanup critical section for program checks in __sie64a 328 - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f 329 - BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 330 - SIEEXIT 331 - lghi %r10,_PIF_GUEST_FAULT 332 - #endif 333 347 1: tmhh %r8,0x4000 # PER bit set in old PSW ? 334 348 jnz 2f # -> enabled, can't be a double fault 335 349 tm __LC_PGM_ILC+3,0x80 # check for per exception ··· 333 361 CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 334 362 3: lg %r15,__LC_KERNEL_STACK 335 363 4: la %r11,STACK_FRAME_OVERHEAD(%r15) 336 - stg %r10,__PT_FLAGS(%r11) 364 + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 337 365 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 338 366 stmg %r0,%r7,__PT_R0(%r11) 339 367 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 340 368 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK 341 - stmg %r8,%r9,__PT_PSW(%r11) 342 - 369 + stctg %c1,%c1,__PT_CR1(%r11) 370 + #if IS_ENABLED(CONFIG_KVM) 371 + lg %r12,__LC_GMAP 372 + clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) 373 + jne 5f 374 + BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST 375 + SIEEXIT __SF_SIE_CONTROL(%r10) 376 + #endif 377 + 5: stmg %r8,%r9,__PT_PSW(%r11) 343 378 # clear user controlled registers to prevent speculative use 344 379 xgr %r0,%r0 345 380 xgr %r1,%r1 ··· 395 416 tmhh %r8,0x0001 # interrupting from user ? 396 417 jnz 1f 397 418 #if IS_ENABLED(CONFIG_KVM) 398 - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f 419 + TSTMSK __LC_CPU_FLAGS,_CIF_SIE 420 + jz 0f 399 421 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 400 - SIEEXIT 422 + SIEEXIT __SF_SIE_CONTROL(%r15) 401 423 #endif 402 424 0: CHECK_STACK __LC_SAVE_AREA_ASYNC 403 425 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) ··· 493 513 TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID 494 514 jno .Lmcck_panic 495 515 #if IS_ENABLED(CONFIG_KVM) 496 - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_user 497 - OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f 516 + TSTMSK __LC_CPU_FLAGS,_CIF_SIE 517 + jz .Lmcck_user 518 + # Need to compare the address instead of a CIF_SIE* flag. 519 + # Otherwise there would be a race between setting the flag 520 + # and entering SIE (or leaving and clearing the flag). This 521 + # would cause machine checks targeted at the guest to be 522 + # handled by the host. 523 + larl %r14,.Lsie_entry 524 + clgrjl %r9,%r14, 4f 525 + larl %r14,.Lsie_leave 526 + clgrjhe %r9,%r14, 4f 498 527 oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 499 528 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 500 - SIEEXIT 529 + SIEEXIT __SF_SIE_CONTROL(%r15) 501 530 #endif 502 531 .Lmcck_user: 503 532 lg %r15,__LC_MCCK_STACK
+1 -1
arch/s390/kernel/sysinfo.c
··· 397 397 { 398 398 char *query_buffer, *str; 399 399 400 - query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); 400 + query_buffer = kmalloc(1024, GFP_KERNEL); 401 401 if (!query_buffer) 402 402 return; 403 403 cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL);
+2 -2
arch/s390/kernel/vtime.c
··· 210 210 virt_timer_expire(); 211 211 212 212 steal = S390_lowcore.steal_timer; 213 - avg_steal = S390_lowcore.avg_steal_timer / 2; 213 + avg_steal = S390_lowcore.avg_steal_timer; 214 214 if ((s64) steal > 0) { 215 215 S390_lowcore.steal_timer = 0; 216 216 account_steal_time(cputime_to_nsecs(steal)); 217 217 avg_steal += steal; 218 218 } 219 - S390_lowcore.avg_steal_timer = avg_steal; 219 + S390_lowcore.avg_steal_timer = avg_steal / 2; 220 220 } 221 221 222 222 static u64 vtime_delta(void)
+1
arch/s390/mm/Makefile
··· 7 7 obj-y += page-states.o pageattr.o pgtable.o pgalloc.o extable.o 8 8 9 9 obj-$(CONFIG_CMM) += cmm.o 10 + obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o 10 11 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 11 12 obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o 12 13 obj-$(CONFIG_PGSTE) += gmap.o
+3 -1
arch/s390/mm/fault.c
··· 67 67 static enum fault_type get_fault_type(struct pt_regs *regs) 68 68 { 69 69 union teid teid = { .val = regs->int_parm_long }; 70 + struct gmap *gmap; 70 71 71 72 if (likely(teid.as == PSW_BITS_AS_PRIMARY)) { 72 73 if (user_mode(regs)) 73 74 return USER_FAULT; 74 75 if (!IS_ENABLED(CONFIG_PGSTE)) 75 76 return KERNEL_FAULT; 76 - if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) 77 + gmap = (struct gmap *)S390_lowcore.gmap; 78 + if (regs->cr1 == gmap->asce) 77 79 return GMAP_FAULT; 78 80 return KERNEL_FAULT; 79 81 }
+15
arch/s390/mm/physaddr.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/mmdebug.h> 3 + #include <linux/export.h> 4 + #include <linux/mm.h> 5 + #include <asm/page.h> 6 + 7 + unsigned long __phys_addr(unsigned long x, bool is_31bit) 8 + { 9 + VIRTUAL_BUG_ON(is_vmalloc_or_module_addr((void *)(x))); 10 + x = __pa_nodebug(x); 11 + if (is_31bit) 12 + VIRTUAL_BUG_ON(x >> 31); 13 + return x; 14 + } 15 + EXPORT_SYMBOL(__phys_addr);
+2 -2
drivers/s390/block/dasd.c
··· 3976 3976 3977 3977 ccw = cqr->cpaddr; 3978 3978 ccw->cmd_code = CCW_CMD_RDC; 3979 - ccw->cda = (__u32)virt_to_phys(cqr->data); 3979 + ccw->cda = virt_to_dma32(cqr->data); 3980 3980 ccw->flags = 0; 3981 3981 ccw->count = rdc_buffer_size; 3982 3982 cqr->startdev = device; ··· 4020 4020 4021 4021 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4022 4022 if (irb->scsw.tm.tcw) 4023 - tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); 4023 + tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw)); 4024 4024 if (tsb && tsb->length == 64 && tsb->flags) 4025 4025 switch (tsb->flags & 0x07) { 4026 4026 case 1: /* tsa_iostat */
+7 -7
drivers/s390/block/dasd_3990_erp.c
··· 216 216 memset(ccw, 0, sizeof(struct ccw1)); 217 217 ccw->cmd_code = CCW_CMD_DCTL; 218 218 ccw->count = 4; 219 - ccw->cda = (__u32)virt_to_phys(DCTL_data); 219 + ccw->cda = virt_to_dma32(DCTL_data); 220 220 dctl_cqr->flags = erp->flags; 221 221 dctl_cqr->function = dasd_3990_erp_DCTL; 222 222 dctl_cqr->refers = erp; ··· 1589 1589 { 1590 1590 1591 1591 struct dasd_device *device = default_erp->startdev; 1592 - __u32 cpa = 0; 1592 + dma32_t cpa = 0; 1593 1593 struct dasd_ccw_req *cqr; 1594 1594 struct dasd_ccw_req *erp; 1595 1595 struct DE_eckd_data *DE_data; ··· 1693 1693 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 1694 1694 ccw->flags = CCW_FLAG_CC; 1695 1695 ccw->count = 16; 1696 - ccw->cda = (__u32)virt_to_phys(DE_data); 1696 + ccw->cda = virt_to_dma32(DE_data); 1697 1697 1698 1698 /* create LO ccw */ 1699 1699 ccw++; ··· 1701 1701 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 1702 1702 ccw->flags = CCW_FLAG_CC; 1703 1703 ccw->count = 16; 1704 - ccw->cda = (__u32)virt_to_phys(LO_data); 1704 + ccw->cda = virt_to_dma32(LO_data); 1705 1705 1706 1706 /* TIC to the failed ccw */ 1707 1707 ccw++; ··· 1747 1747 { 1748 1748 1749 1749 struct dasd_device *device = previous_erp->startdev; 1750 - __u32 cpa = 0; 1750 + dma32_t cpa = 0; 1751 1751 struct dasd_ccw_req *cqr; 1752 1752 struct dasd_ccw_req *erp; 1753 1753 char *LO_data; /* struct LO_eckd_data */ ··· 2386 2386 tcw = erp->cpaddr; 2387 2387 tsb = (struct tsb *) &tcw[1]; 2388 2388 *tcw = *((struct tcw *)cqr->cpaddr); 2389 - tcw->tsb = virt_to_phys(tsb); 2389 + tcw->tsb = virt_to_dma64(tsb); 2390 2390 } else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) { 2391 2391 /* PSF cannot be chained from NOOP/TIC */ 2392 2392 erp->cpaddr = cqr->cpaddr; ··· 2397 2397 ccw->flags = CCW_FLAG_CC; 2398 2398 ccw++; 2399 2399 ccw->cmd_code = CCW_CMD_TIC; 2400 - ccw->cda = (__u32)virt_to_phys(cqr->cpaddr); 2400 + ccw->cda = virt_to_dma32(cqr->cpaddr); 2401 2401 } 2402 2402 2403 2403 erp->flags = cqr->flags;
+3 -3
drivers/s390/block/dasd_alias.c
··· 435 435 ccw->cmd_code = DASD_ECKD_CCW_PSF; 436 436 ccw->count = sizeof(struct dasd_psf_prssd_data); 437 437 ccw->flags |= CCW_FLAG_CC; 438 - ccw->cda = (__u32)virt_to_phys(prssdp); 438 + ccw->cda = virt_to_dma32(prssdp); 439 439 440 440 /* Read Subsystem Data - feature codes */ 441 441 memset(lcu->uac, 0, sizeof(*(lcu->uac))); ··· 443 443 ccw++; 444 444 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 445 445 ccw->count = sizeof(*(lcu->uac)); 446 - ccw->cda = (__u32)virt_to_phys(lcu->uac); 446 + ccw->cda = virt_to_dma32(lcu->uac); 447 447 448 448 cqr->buildclk = get_tod_clock(); 449 449 cqr->status = DASD_CQR_FILLED; ··· 739 739 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 740 740 ccw->flags = CCW_FLAG_SLI; 741 741 ccw->count = 16; 742 - ccw->cda = (__u32)virt_to_phys(cqr->data); 742 + ccw->cda = virt_to_dma32(cqr->data); 743 743 ((char *)cqr->data)[0] = reason; 744 744 745 745 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+58 -60
drivers/s390/block/dasd_eckd.c
··· 283 283 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 284 284 ccw->flags = 0; 285 285 ccw->count = 16; 286 - ccw->cda = (__u32)virt_to_phys(data); 286 + ccw->cda = virt_to_dma32(data); 287 287 } 288 288 289 289 memset(data, 0, sizeof(struct DE_eckd_data)); ··· 393 393 ccw->count = 22; 394 394 else 395 395 ccw->count = 20; 396 - ccw->cda = (__u32)virt_to_phys(data); 396 + ccw->cda = virt_to_dma32(data); 397 397 } 398 398 399 399 memset(data, 0, sizeof(*data)); ··· 539 539 ccw->flags = 0; 540 540 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 541 541 ccw->count = sizeof(*pfxdata) + 2; 542 - ccw->cda = (__u32)virt_to_phys(pfxdata); 542 + ccw->cda = virt_to_dma32(pfxdata); 543 543 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 544 544 } else { 545 545 ccw->count = sizeof(*pfxdata); 546 - ccw->cda = (__u32)virt_to_phys(pfxdata); 546 + ccw->cda = virt_to_dma32(pfxdata); 547 547 memset(pfxdata, 0, sizeof(*pfxdata)); 548 548 } 549 549 ··· 610 610 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 611 611 ccw->flags = 0; 612 612 ccw->count = 16; 613 - ccw->cda = (__u32)virt_to_phys(data); 613 + ccw->cda = virt_to_dma32(data); 614 614 615 615 memset(data, 0, sizeof(struct LO_eckd_data)); 616 616 sector = 0; ··· 825 825 ccw = cqr->cpaddr; 826 826 ccw->cmd_code = DASD_ECKD_CCW_RCD; 827 827 ccw->flags = 0; 828 - ccw->cda = (__u32)virt_to_phys(rcd_buffer); 828 + ccw->cda = virt_to_dma32(rcd_buffer); 829 829 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 830 830 cqr->magic = DASD_ECKD_MAGIC; 831 831 ··· 853 853 854 854 if (cqr->status != DASD_CQR_DONE) { 855 855 ccw = cqr->cpaddr; 856 - rcd_buffer = phys_to_virt(ccw->cda); 856 + rcd_buffer = dma32_to_virt(ccw->cda); 857 857 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 858 858 859 859 rcd_buffer[0] = 0xE5; ··· 1534 1534 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1535 1535 ccw->count = sizeof(struct dasd_psf_prssd_data); 1536 1536 ccw->flags |= CCW_FLAG_CC; 1537 - ccw->cda = (__u32)virt_to_phys(prssdp); 1537 + ccw->cda = virt_to_dma32(prssdp); 1538 1538 1539 1539 /* Read Subsystem Data - feature codes */ 1540 1540 features = (struct dasd_rssd_features *) (prssdp + 1); ··· 1543 1543 ccw++; 1544 1544 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1545 1545 ccw->count = sizeof(struct dasd_rssd_features); 1546 - ccw->cda = (__u32)virt_to_phys(features); 1546 + ccw->cda = virt_to_dma32(features); 1547 1547 1548 1548 cqr->buildclk = get_tod_clock(); 1549 1549 cqr->status = DASD_CQR_FILLED; ··· 1603 1603 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1604 1604 ccw->count = sizeof(*prssdp); 1605 1605 ccw->flags |= CCW_FLAG_CC; 1606 - ccw->cda = (__u32)virt_to_phys(prssdp); 1606 + ccw->cda = virt_to_dma32(prssdp); 1607 1607 1608 1608 /* Read Subsystem Data - Volume Storage Query */ 1609 1609 vsq = (struct dasd_rssd_vsq *)(prssdp + 1); ··· 1613 1613 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1614 1614 ccw->count = sizeof(*vsq); 1615 1615 ccw->flags |= CCW_FLAG_SLI; 1616 - ccw->cda = (__u32)virt_to_phys(vsq); 1616 + ccw->cda = virt_to_dma32(vsq); 1617 1617 1618 1618 cqr->buildclk = get_tod_clock(); 1619 1619 cqr->status = DASD_CQR_FILLED; ··· 1788 1788 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1789 1789 ccw->count = sizeof(*prssdp); 1790 1790 ccw->flags |= CCW_FLAG_CC; 1791 - ccw->cda = (__u32)virt_to_phys(prssdp); 1791 + ccw->cda = virt_to_dma32(prssdp); 1792 1792 1793 1793 lcq = (struct dasd_rssd_lcq *)(prssdp + 1); 1794 1794 memset(lcq, 0, sizeof(*lcq)); ··· 1797 1797 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1798 1798 ccw->count = sizeof(*lcq); 1799 1799 ccw->flags |= CCW_FLAG_SLI; 1800 - ccw->cda = (__u32)virt_to_phys(lcq); 1800 + ccw->cda = virt_to_dma32(lcq); 1801 1801 1802 1802 cqr->buildclk = get_tod_clock(); 1803 1803 cqr->status = DASD_CQR_FILLED; ··· 1894 1894 } 1895 1895 ccw = cqr->cpaddr; 1896 1896 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1897 - ccw->cda = (__u32)virt_to_phys(psf_ssc_data); 1897 + ccw->cda = virt_to_dma32(psf_ssc_data); 1898 1898 ccw->count = 66; 1899 1899 1900 1900 cqr->startdev = device; ··· 2250 2250 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2251 2251 ccw->flags = 0; 2252 2252 ccw->count = 8; 2253 - ccw->cda = (__u32)virt_to_phys(count_data); 2253 + ccw->cda = virt_to_dma32(count_data); 2254 2254 ccw++; 2255 2255 count_data++; 2256 2256 } ··· 2264 2264 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2265 2265 ccw->flags = 0; 2266 2266 ccw->count = 8; 2267 - ccw->cda = (__u32)virt_to_phys(count_data); 2267 + ccw->cda = virt_to_dma32(count_data); 2268 2268 2269 2269 cqr->block = NULL; 2270 2270 cqr->startdev = device; ··· 2635 2635 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2636 2636 ccw->flags = CCW_FLAG_SLI; 2637 2637 ccw->count = 8; 2638 - ccw->cda = (__u32)virt_to_phys(fmt_buffer); 2638 + ccw->cda = virt_to_dma32(fmt_buffer); 2639 2639 ccw++; 2640 2640 fmt_buffer++; 2641 2641 } ··· 2845 2845 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2846 2846 ccw->flags = CCW_FLAG_SLI; 2847 2847 ccw->count = 8; 2848 - ccw->cda = (__u32)virt_to_phys(ect); 2848 + ccw->cda = virt_to_dma32(ect); 2849 2849 ccw++; 2850 2850 } 2851 2851 if ((intensity & ~0x08) & 0x04) { /* erase track */ ··· 2860 2860 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2861 2861 ccw->flags = CCW_FLAG_SLI; 2862 2862 ccw->count = 8; 2863 - ccw->cda = (__u32)virt_to_phys(ect); 2863 + ccw->cda = virt_to_dma32(ect); 2864 2864 } else { /* write remaining records */ 2865 2865 for (i = 0; i < rpt; i++) { 2866 2866 ect = (struct eckd_count *) data; ··· 2895 2895 DASD_ECKD_CCW_WRITE_CKD_MT; 2896 2896 ccw->flags = CCW_FLAG_SLI; 2897 2897 ccw->count = 8; 2898 - ccw->cda = (__u32)virt_to_phys(ect); 2898 + ccw->cda = virt_to_dma32(ect); 2899 2899 ccw++; 2900 2900 } 2901 2901 } ··· 3836 3836 } 3837 3837 3838 3838 ccw = cqr->cpaddr; 3839 - ccw->cda = (__u32)virt_to_phys(cqr->data); 3839 + ccw->cda = virt_to_dma32(cqr->data); 3840 3840 ccw->cmd_code = DASD_ECKD_CCW_DSO; 3841 3841 ccw->count = size; 3842 3842 ··· 3961 3961 unsigned int blksize) 3962 3962 { 3963 3963 struct dasd_eckd_private *private; 3964 - unsigned long *idaws; 3964 + dma64_t *idaws; 3965 3965 struct LO_eckd_data *LO_data; 3966 3966 struct dasd_ccw_req *cqr; 3967 3967 struct ccw1 *ccw; ··· 4039 4039 dasd_sfree_request(cqr, startdev); 4040 4040 return ERR_PTR(-EAGAIN); 4041 4041 } 4042 - idaws = (unsigned long *) (cqr->data + 4043 - sizeof(struct PFX_eckd_data)); 4042 + idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data)); 4044 4043 } else { 4045 4044 if (define_extent(ccw++, cqr->data, first_trk, 4046 4045 last_trk, cmd, basedev, 0) == -EAGAIN) { ··· 4049 4050 dasd_sfree_request(cqr, startdev); 4050 4051 return ERR_PTR(-EAGAIN); 4051 4052 } 4052 - idaws = (unsigned long *) (cqr->data + 4053 - sizeof(struct DE_eckd_data)); 4053 + idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data)); 4054 4054 } 4055 4055 /* Build locate_record+read/write/ccws. */ 4056 4056 LO_data = (struct LO_eckd_data *) (idaws + cidaw); ··· 4103 4105 ccw->cmd_code = rcmd; 4104 4106 ccw->count = count; 4105 4107 if (idal_is_needed(dst, blksize)) { 4106 - ccw->cda = (__u32)virt_to_phys(idaws); 4108 + ccw->cda = virt_to_dma32(idaws); 4107 4109 ccw->flags = CCW_FLAG_IDA; 4108 4110 idaws = idal_create_words(idaws, dst, blksize); 4109 4111 } else { 4110 - ccw->cda = (__u32)virt_to_phys(dst); 4112 + ccw->cda = virt_to_dma32(dst); 4111 4113 ccw->flags = 0; 4112 4114 } 4113 4115 ccw++; ··· 4150 4152 unsigned int blk_per_trk, 4151 4153 unsigned int blksize) 4152 4154 { 4153 - unsigned long *idaws; 4155 + dma64_t *idaws; 4154 4156 struct dasd_ccw_req *cqr; 4155 4157 struct ccw1 *ccw; 4156 4158 struct req_iterator iter; ··· 4220 4222 * (or 2K blocks on 31-bit) 4221 4223 * - the scope of a ccw and it's idal ends with the track boundaries 4222 4224 */ 4223 - idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 4225 + idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data)); 4224 4226 recid = first_rec; 4225 4227 new_track = 1; 4226 4228 end_idaw = 0; ··· 4241 4243 ccw[-1].flags |= CCW_FLAG_CC; 4242 4244 ccw->cmd_code = cmd; 4243 4245 ccw->count = len_to_track_end; 4244 - ccw->cda = (__u32)virt_to_phys(idaws); 4246 + ccw->cda = virt_to_dma32(idaws); 4245 4247 ccw->flags = CCW_FLAG_IDA; 4246 4248 ccw++; 4247 4249 recid += count; ··· 4257 4259 * idaw ends 4258 4260 */ 4259 4261 if (!idaw_dst) { 4260 - if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) { 4262 + if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) { 4261 4263 dasd_sfree_request(cqr, startdev); 4262 4264 return ERR_PTR(-ERANGE); 4263 4265 } else ··· 4277 4279 * idal_create_words will handle cases where idaw_len 4278 4280 * is larger then IDA_BLOCK_SIZE 4279 4281 */ 4280 - if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1))) 4282 + if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1))) 4281 4283 end_idaw = 1; 4282 4284 /* We also need to end the idaw at track end */ 4283 4285 if (!len_to_track_end) { ··· 4736 4738 struct req_iterator iter; 4737 4739 struct dasd_ccw_req *cqr; 4738 4740 unsigned int trkcount; 4739 - unsigned long *idaws; 4740 4741 unsigned int size; 4741 4742 unsigned char cmd; 4742 4743 struct bio_vec bv; 4743 4744 struct ccw1 *ccw; 4745 + dma64_t *idaws; 4744 4746 int use_prefix; 4745 4747 void *data; 4746 4748 char *dst; ··· 4821 4823 trkcount, cmd, basedev, 0, 0); 4822 4824 } 4823 4825 4824 - idaws = (unsigned long *)(cqr->data + size); 4826 + idaws = (dma64_t *)(cqr->data + size); 4825 4827 len_to_track_end = 0; 4826 4828 if (start_padding_sectors) { 4827 4829 ccw[-1].flags |= CCW_FLAG_CC; ··· 4830 4832 ccw->count = 57326; 4831 4833 /* 64k map to one track */ 4832 4834 len_to_track_end = 65536 - start_padding_sectors * 512; 4833 - ccw->cda = (__u32)virt_to_phys(idaws); 4835 + ccw->cda = virt_to_dma32(idaws); 4834 4836 ccw->flags |= CCW_FLAG_IDA; 4835 4837 ccw->flags |= CCW_FLAG_SLI; 4836 4838 ccw++; ··· 4849 4851 ccw->count = 57326; 4850 4852 /* 64k map to one track */ 4851 4853 len_to_track_end = 65536; 4852 - ccw->cda = (__u32)virt_to_phys(idaws); 4854 + ccw->cda = virt_to_dma32(idaws); 4853 4855 ccw->flags |= CCW_FLAG_IDA; 4854 4856 ccw->flags |= CCW_FLAG_SLI; 4855 4857 ccw++; ··· 4906 4908 ccw++; 4907 4909 if (dst) { 4908 4910 if (ccw->flags & CCW_FLAG_IDA) 4909 - cda = *((char **)phys_to_virt(ccw->cda)); 4911 + cda = *((char **)dma32_to_virt(ccw->cda)); 4910 4912 else 4911 - cda = phys_to_virt(ccw->cda); 4913 + cda = dma32_to_virt(ccw->cda); 4912 4914 if (dst != cda) { 4913 4915 if (rq_data_dir(req) == READ) 4914 4916 memcpy(dst, cda, bv.bv_len); ··· 5058 5060 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 5059 5061 ccw->flags |= CCW_FLAG_SLI; 5060 5062 ccw->count = 32; 5061 - ccw->cda = (__u32)virt_to_phys(cqr->data); 5063 + ccw->cda = virt_to_dma32(cqr->data); 5062 5064 cqr->startdev = device; 5063 5065 cqr->memdev = device; 5064 5066 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); ··· 5113 5115 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 5114 5116 ccw->flags |= CCW_FLAG_SLI; 5115 5117 ccw->count = 32; 5116 - ccw->cda = (__u32)virt_to_phys(cqr->data); 5118 + ccw->cda = virt_to_dma32(cqr->data); 5117 5119 cqr->startdev = device; 5118 5120 cqr->memdev = device; 5119 5121 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); ··· 5167 5169 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 5168 5170 ccw->flags |= CCW_FLAG_SLI; 5169 5171 ccw->count = 32; 5170 - ccw->cda = (__u32)virt_to_phys(cqr->data); 5172 + ccw->cda = virt_to_dma32(cqr->data); 5171 5173 cqr->startdev = device; 5172 5174 cqr->memdev = device; 5173 5175 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); ··· 5228 5230 ccw->cmd_code = DASD_ECKD_CCW_SNID; 5229 5231 ccw->flags |= CCW_FLAG_SLI; 5230 5232 ccw->count = 12; 5231 - ccw->cda = (__u32)virt_to_phys(cqr->data); 5233 + ccw->cda = virt_to_dma32(cqr->data); 5232 5234 cqr->startdev = device; 5233 5235 cqr->memdev = device; 5234 5236 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); ··· 5295 5297 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5296 5298 ccw->count = sizeof(struct dasd_psf_prssd_data); 5297 5299 ccw->flags |= CCW_FLAG_CC; 5298 - ccw->cda = (__u32)virt_to_phys(prssdp); 5300 + ccw->cda = virt_to_dma32(prssdp); 5299 5301 5300 5302 /* Read Subsystem Data - Performance Statistics */ 5301 5303 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); ··· 5304 5306 ccw++; 5305 5307 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5306 5308 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 5307 - ccw->cda = (__u32)virt_to_phys(stats); 5309 + ccw->cda = virt_to_dma32(stats); 5308 5310 5309 5311 cqr->buildclk = get_tod_clock(); 5310 5312 cqr->status = DASD_CQR_FILLED; ··· 5448 5450 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5449 5451 ccw->count = usrparm.psf_data_len; 5450 5452 ccw->flags |= CCW_FLAG_CC; 5451 - ccw->cda = (__u32)virt_to_phys(psf_data); 5453 + ccw->cda = virt_to_dma32(psf_data); 5452 5454 5453 5455 ccw++; 5454 5456 ··· 5456 5458 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5457 5459 ccw->count = usrparm.rssd_result_len; 5458 5460 ccw->flags = CCW_FLAG_SLI ; 5459 - ccw->cda = (__u32)virt_to_phys(rssd_result); 5461 + ccw->cda = virt_to_dma32(rssd_result); 5460 5462 5461 5463 rc = dasd_sleep_on(cqr); 5462 5464 if (rc) ··· 5525 5527 5526 5528 /* get pointer to data (consider IDALs) */ 5527 5529 if (from->flags & CCW_FLAG_IDA) 5528 - datap = (char *)*((addr_t *)phys_to_virt(from->cda)); 5530 + datap = (char *)*((addr_t *)dma32_to_virt(from->cda)); 5529 5531 else 5530 - datap = phys_to_virt(from->cda); 5532 + datap = dma32_to_virt(from->cda); 5531 5533 5532 5534 /* dump data (max 128 bytes) */ 5533 5535 for (count = 0; count < from->count && count < 128; count++) { ··· 5596 5598 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5597 5599 req ? req->intrc : 0); 5598 5600 len += sprintf(page + len, "Failing CCW: %px\n", 5599 - phys_to_virt(irb->scsw.cmd.cpa)); 5601 + dma32_to_virt(irb->scsw.cmd.cpa)); 5600 5602 if (irb->esw.esw0.erw.cons) { 5601 5603 for (sl = 0; sl < 4; sl++) { 5602 5604 len += sprintf(page + len, "Sense(hex) %2d-%2d:", ··· 5639 5641 /* print failing CCW area (maximum 4) */ 5640 5642 /* scsw->cda is either valid or zero */ 5641 5643 from = ++to; 5642 - fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */ 5644 + fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */ 5643 5645 if (from < fail - 2) { 5644 5646 from = fail - 2; /* there is a gap - print header */ 5645 5647 dev_err(dev, "......\n"); ··· 5689 5691 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, 5690 5692 req ? req->intrc : 0); 5691 5693 len += sprintf(page + len, "Failing TCW: %px\n", 5692 - phys_to_virt(irb->scsw.tm.tcw)); 5694 + dma32_to_virt(irb->scsw.tm.tcw)); 5693 5695 5694 5696 tsb = NULL; 5695 5697 sense = NULL; 5696 5698 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 5697 - tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); 5699 + tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw)); 5698 5700 5699 5701 if (tsb) { 5700 5702 len += sprintf(page + len, "tsb->length %d\n", tsb->length); ··· 5904 5906 ccw->count = sizeof(struct dasd_psf_prssd_data); 5905 5907 ccw->flags |= CCW_FLAG_CC; 5906 5908 ccw->flags |= CCW_FLAG_SLI; 5907 - ccw->cda = (__u32)virt_to_phys(prssdp); 5909 + ccw->cda = virt_to_dma32(prssdp); 5908 5910 5909 5911 /* Read Subsystem Data - message buffer */ 5910 5912 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); ··· 5914 5916 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5915 5917 ccw->count = sizeof(struct dasd_rssd_messages); 5916 5918 ccw->flags |= CCW_FLAG_SLI; 5917 - ccw->cda = (__u32)virt_to_phys(message_buf); 5919 + ccw->cda = virt_to_dma32(message_buf); 5918 5920 5919 5921 cqr->buildclk = get_tod_clock(); 5920 5922 cqr->status = DASD_CQR_FILLED; ··· 5995 5997 ccw->count = sizeof(struct dasd_psf_prssd_data); 5996 5998 ccw->flags |= CCW_FLAG_CC; 5997 5999 ccw->flags |= CCW_FLAG_SLI; 5998 - ccw->cda = (__u32)virt_to_phys(prssdp); 6000 + ccw->cda = virt_to_dma32(prssdp); 5999 6001 6000 6002 /* Read Subsystem Data - query host access */ 6001 6003 ccw++; 6002 6004 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 6003 6005 ccw->count = sizeof(struct dasd_psf_query_host_access); 6004 6006 ccw->flags |= CCW_FLAG_SLI; 6005 - ccw->cda = (__u32)virt_to_phys(host_access); 6007 + ccw->cda = virt_to_dma32(host_access); 6006 6008 6007 6009 cqr->buildclk = get_tod_clock(); 6008 6010 cqr->status = DASD_CQR_FILLED; ··· 6237 6239 ccw->count = sizeof(struct dasd_psf_prssd_data); 6238 6240 ccw->flags |= CCW_FLAG_CC; 6239 6241 ccw->flags |= CCW_FLAG_SLI; 6240 - ccw->cda = (__u32)(addr_t)prssdp; 6242 + ccw->cda = virt_to_dma32(prssdp); 6241 6243 6242 6244 /* Read Subsystem Data - query host access */ 6243 6245 ccw++; 6244 6246 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 6245 6247 ccw->count = sizeof(*pprc_data); 6246 6248 ccw->flags |= CCW_FLAG_SLI; 6247 - ccw->cda = (__u32)(addr_t)pprc_data; 6249 + ccw->cda = virt_to_dma32(pprc_data); 6248 6250 6249 6251 cqr->buildclk = get_tod_clock(); 6250 6252 cqr->status = DASD_CQR_FILLED; ··· 6338 6340 psf_cuir->ssid = device->path[pos].ssid; 6339 6341 ccw = cqr->cpaddr; 6340 6342 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6341 - ccw->cda = (__u32)virt_to_phys(psf_cuir); 6343 + ccw->cda = virt_to_dma32(psf_cuir); 6342 6344 ccw->flags = CCW_FLAG_SLI; 6343 6345 ccw->count = sizeof(struct dasd_psf_cuir_response); 6344 6346
+1 -1
drivers/s390/block/dasd_eer.c
··· 485 485 ccw->cmd_code = DASD_ECKD_CCW_SNSS; 486 486 ccw->count = SNSS_DATA_SIZE; 487 487 ccw->flags = 0; 488 - ccw->cda = (__u32)virt_to_phys(cqr->data); 488 + ccw->cda = virt_to_dma32(cqr->data); 489 489 490 490 cqr->buildclk = get_tod_clock(); 491 491 cqr->status = DASD_CQR_FILLED;
+16 -16
drivers/s390/block/dasd_fba.c
··· 78 78 ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT; 79 79 ccw->flags = 0; 80 80 ccw->count = 16; 81 - ccw->cda = (__u32)virt_to_phys(data); 81 + ccw->cda = virt_to_dma32(data); 82 82 memset(data, 0, sizeof (struct DE_fba_data)); 83 83 if (rw == WRITE) 84 84 (data->mask).perm = 0x0; ··· 98 98 ccw->cmd_code = DASD_FBA_CCW_LOCATE; 99 99 ccw->flags = 0; 100 100 ccw->count = 8; 101 - ccw->cda = (__u32)virt_to_phys(data); 101 + ccw->cda = virt_to_dma32(data); 102 102 memset(data, 0, sizeof (struct LO_fba_data)); 103 103 if (rw == WRITE) 104 104 data->operation.cmd = 0x5; ··· 257 257 ccw->cmd_code = DASD_FBA_CCW_WRITE; 258 258 ccw->flags |= CCW_FLAG_SLI; 259 259 ccw->count = count; 260 - ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page); 260 + ccw->cda = virt_to_dma32(dasd_fba_zero_page); 261 261 } 262 262 263 263 /* ··· 427 427 struct request *req) 428 428 { 429 429 struct dasd_fba_private *private = block->base->private; 430 - unsigned long *idaws; 430 + dma64_t *idaws; 431 431 struct LO_fba_data *LO_data; 432 432 struct dasd_ccw_req *cqr; 433 433 struct ccw1 *ccw; ··· 487 487 define_extent(ccw++, cqr->data, rq_data_dir(req), 488 488 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); 489 489 /* Build locate_record + read/write ccws. */ 490 - idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 490 + idaws = (dma64_t *)(cqr->data + sizeof(struct DE_fba_data)); 491 491 LO_data = (struct LO_fba_data *) (idaws + cidaw); 492 492 /* Locate record for all blocks for smart devices. */ 493 493 if (private->rdc_data.mode.bits.data_chain != 0) { ··· 523 523 ccw->cmd_code = cmd; 524 524 ccw->count = block->bp_block; 525 525 if (idal_is_needed(dst, blksize)) { 526 - ccw->cda = (__u32)virt_to_phys(idaws); 526 + ccw->cda = virt_to_dma32(idaws); 527 527 ccw->flags = CCW_FLAG_IDA; 528 528 idaws = idal_create_words(idaws, dst, blksize); 529 529 } else { 530 - ccw->cda = (__u32)virt_to_phys(dst); 530 + ccw->cda = virt_to_dma32(dst); 531 531 ccw->flags = 0; 532 532 } 533 533 ccw++; ··· 585 585 ccw++; 586 586 if (dst) { 587 587 if (ccw->flags & CCW_FLAG_IDA) 588 - cda = *((char **)phys_to_virt(ccw->cda)); 588 + cda = *((char **)dma32_to_virt(ccw->cda)); 589 589 else 590 - cda = phys_to_virt(ccw->cda); 590 + cda = dma32_to_virt(ccw->cda); 591 591 if (dst != cda) { 592 592 if (rq_data_dir(req) == READ) 593 593 memcpy(dst, cda, bv.bv_len); ··· 672 672 len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n", 673 673 req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 674 674 len += sprintf(page + len, "Failing CCW: %px\n", 675 - (void *) (addr_t) irb->scsw.cmd.cpa); 675 + (void *)(u64)dma32_to_u32(irb->scsw.cmd.cpa)); 676 676 if (irb->esw.esw0.erw.cons) { 677 677 for (sl = 0; sl < 4; sl++) { 678 678 len += sprintf(page + len, "Sense(hex) %2d-%2d:", ··· 701 701 for (count = 0; count < 32 && count < act->count; 702 702 count += sizeof(int)) 703 703 len += sprintf(page + len, " %08X", 704 - ((int *) (addr_t) act->cda) 704 + ((int *)dma32_to_virt(act->cda)) 705 705 [(count>>2)]); 706 706 len += sprintf(page + len, "\n"); 707 707 act++; ··· 710 710 711 711 /* print failing CCW area */ 712 712 len = 0; 713 - if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { 714 - act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; 713 + if (act < ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2) { 714 + act = ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2; 715 715 len += sprintf(page + len, "......\n"); 716 716 } 717 - end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); 717 + end = min((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) + 2, last); 718 718 while (act <= end) { 719 719 len += sprintf(page + len, "CCW %px: %08X %08X DAT:", 720 720 act, ((int *) act)[0], ((int *) act)[1]); 721 721 for (count = 0; count < 32 && count < act->count; 722 722 count += sizeof(int)) 723 723 len += sprintf(page + len, " %08X", 724 - ((int *) (addr_t) act->cda) 724 + ((int *)dma32_to_virt(act->cda)) 725 725 [(count>>2)]); 726 726 len += sprintf(page + len, "\n"); 727 727 act++; ··· 738 738 for (count = 0; count < 32 && count < act->count; 739 739 count += sizeof(int)) 740 740 len += sprintf(page + len, " %08X", 741 - ((int *) (addr_t) act->cda) 741 + ((int *)dma32_to_virt(act->cda)) 742 742 [(count>>2)]); 743 743 len += sprintf(page + len, "\n"); 744 744 act++;
+1 -1
drivers/s390/block/dcssblk.c
··· 920 920 921 921 dev_sz = dev_info->end - dev_info->start + 1; 922 922 if (kaddr) 923 - *kaddr = (void *) dev_info->start + offset; 923 + *kaddr = __va(dev_info->start + offset); 924 924 if (pfn) 925 925 *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 926 926 PFN_DEV|PFN_SPECIAL);
+3 -3
drivers/s390/block/scm_blk.c
··· 131 131 132 132 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { 133 133 msb = &scmrq->aob->msb[i]; 134 - aidaw = (u64)phys_to_virt(msb->data_addr); 134 + aidaw = (u64)dma64_to_virt(msb->data_addr); 135 135 136 136 if ((msb->flags & MSB_FLAG_IDA) && aidaw && 137 137 IS_ALIGNED(aidaw, PAGE_SIZE)) ··· 196 196 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 197 197 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; 198 198 msb->flags |= MSB_FLAG_IDA; 199 - msb->data_addr = (u64)virt_to_phys(aidaw); 199 + msb->data_addr = virt_to_dma64(aidaw); 200 200 201 201 rq_for_each_segment(bv, req, iter) { 202 202 WARN_ON(bv.bv_offset); 203 203 msb->blk_count += bv.bv_len >> 12; 204 - aidaw->data_addr = virt_to_phys(page_address(bv.bv_page)); 204 + aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page)); 205 205 aidaw++; 206 206 } 207 207
+2 -2
drivers/s390/char/con3215.c
··· 159 159 ccw->cmd_code = 0x0A; /* read inquiry */ 160 160 ccw->flags = 0x20; /* ignore incorrect length */ 161 161 ccw->count = 160; 162 - ccw->cda = (__u32)__pa(raw->inbuf); 162 + ccw->cda = virt_to_dma32(raw->inbuf); 163 163 } 164 164 165 165 /* ··· 218 218 ccw[-1].flags |= 0x40; /* use command chaining */ 219 219 ccw->cmd_code = 0x01; /* write, auto carrier return */ 220 220 ccw->flags = 0x20; /* ignore incorrect length ind. */ 221 - ccw->cda = (__u32)__pa(raw->buffer + ix); 221 + ccw->cda = virt_to_dma32(raw->buffer + ix); 222 222 count = len; 223 223 if (ix + count > RAW3215_BUFFER_SIZE) 224 224 count = RAW3215_BUFFER_SIZE - ix;
+7 -7
drivers/s390/char/fs3270.c
··· 126 126 raw3270_request_set_cmd(fp->init, TC_EWRITEA); 127 127 raw3270_request_set_idal(fp->init, fp->rdbuf); 128 128 fp->init->rescnt = 0; 129 - cp = fp->rdbuf->data[0]; 129 + cp = dma64_to_virt(fp->rdbuf->data[0]); 130 130 if (fp->rdbuf_size == 0) { 131 131 /* No saved buffer. Just clear the screen. */ 132 132 fp->init->ccw.count = 1; ··· 164 164 fp = (struct fs3270 *)rq->view; 165 165 166 166 /* Correct idal buffer element 0 address. */ 167 - fp->rdbuf->data[0] -= 5; 167 + fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], -5); 168 168 fp->rdbuf->size += 5; 169 169 170 170 /* ··· 202 202 * room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence 203 203 * in the activation command. 204 204 */ 205 - fp->rdbuf->data[0] += 5; 205 + fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], 5); 206 206 fp->rdbuf->size -= 5; 207 207 raw3270_request_set_idal(fp->init, fp->rdbuf); 208 208 fp->init->rescnt = 0; ··· 521 521 static void fs3270_create_cb(int minor) 522 522 { 523 523 __register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops); 524 - device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), 524 + device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor), 525 525 NULL, "3270/tub%d", minor); 526 526 } 527 527 528 528 static void fs3270_destroy_cb(int minor) 529 529 { 530 - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor)); 530 + device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, minor)); 531 531 __unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub"); 532 532 } 533 533 ··· 546 546 rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops); 547 547 if (rc) 548 548 return rc; 549 - device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), 549 + device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0), 550 550 NULL, "3270/tub"); 551 551 raw3270_register_notifier(&fs3270_notifier); 552 552 return 0; ··· 555 555 static void __exit fs3270_exit(void) 556 556 { 557 557 raw3270_unregister_notifier(&fs3270_notifier); 558 - device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0)); 558 + device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, 0)); 559 559 __unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270"); 560 560 } 561 561
+23 -19
drivers/s390/char/raw3270.c
··· 29 29 #include <linux/device.h> 30 30 #include <linux/mutex.h> 31 31 32 - struct class *class3270; 32 + const struct class class3270 = { 33 + .name = "3270", 34 + }; 33 35 EXPORT_SYMBOL(class3270); 34 36 35 37 /* The main 3270 data structure. */ ··· 162 160 /* 163 161 * Setup ccw. 164 162 */ 165 - rq->ccw.cda = __pa(rq->buffer); 163 + rq->ccw.cda = virt_to_dma32(rq->buffer); 166 164 rq->ccw.flags = CCW_FLAG_SLI; 167 165 168 166 return rq; ··· 188 186 return -EBUSY; 189 187 rq->ccw.cmd_code = 0; 190 188 rq->ccw.count = 0; 191 - rq->ccw.cda = __pa(rq->buffer); 189 + rq->ccw.cda = virt_to_dma32(rq->buffer); 192 190 rq->ccw.flags = CCW_FLAG_SLI; 193 191 rq->rescnt = 0; 194 192 rq->rc = 0; ··· 223 221 */ 224 222 void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size) 225 223 { 226 - rq->ccw.cda = __pa(data); 224 + rq->ccw.cda = virt_to_dma32(data); 227 225 rq->ccw.count = size; 228 226 } 229 227 EXPORT_SYMBOL(raw3270_request_set_data); ··· 233 231 */ 234 232 void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib) 235 233 { 236 - rq->ccw.cda = __pa(ib->data); 234 + rq->ccw.cda = virt_to_dma32(ib->data); 237 235 rq->ccw.count = ib->size; 238 236 rq->ccw.flags |= CCW_FLAG_IDA; 239 237 } ··· 579 577 rp->init_readmod.ccw.cmd_code = TC_READMOD; 580 578 rp->init_readmod.ccw.flags = CCW_FLAG_SLI; 581 579 rp->init_readmod.ccw.count = sizeof(rp->init_data); 582 - rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data); 580 + rp->init_readmod.ccw.cda = virt_to_dma32(rp->init_data); 583 581 rp->init_readmod.callback = raw3270_read_modified_cb; 584 582 rp->init_readmod.callback_data = rp->init_data; 585 583 rp->state = RAW3270_STATE_READMOD; ··· 599 597 rp->init_readpart.ccw.cmd_code = TC_WRITESF; 600 598 rp->init_readpart.ccw.flags = CCW_FLAG_SLI; 601 599 rp->init_readpart.ccw.count = sizeof(wbuf); 602 - rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data); 600 + rp->init_readpart.ccw.cda = virt_to_dma32(&rp->init_data); 603 601 rp->state = RAW3270_STATE_W4ATTN; 604 602 raw3270_start_irq(&rp->init_view, &rp->init_readpart); 605 603 } ··· 637 635 rp->init_reset.ccw.cmd_code = TC_EWRITEA; 638 636 rp->init_reset.ccw.flags = CCW_FLAG_SLI; 639 637 rp->init_reset.ccw.count = 1; 640 - rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data); 638 + rp->init_reset.ccw.cda = virt_to_dma32(rp->init_data); 641 639 rp->init_reset.callback = raw3270_reset_device_cb; 642 640 rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset); 643 641 if (rc == 0 && rp->state == RAW3270_STATE_INIT) ··· 1318 1316 return 0; 1319 1317 raw3270_registered = 1; 1320 1318 rc = ccw_driver_register(&raw3270_ccw_driver); 1321 - if (rc == 0) { 1322 - /* Create attributes for early (= console) device. */ 1323 - mutex_lock(&raw3270_mutex); 1324 - class3270 = class_create("3270"); 1325 - list_for_each_entry(rp, &raw3270_devices, list) { 1326 - get_device(&rp->cdev->dev); 1327 - raw3270_create_attributes(rp); 1328 - } 1329 - mutex_unlock(&raw3270_mutex); 1319 + if (rc) 1320 + return rc; 1321 + rc = class_register(&class3270); 1322 + if (rc) 1323 + return rc; 1324 + /* Create attributes for early (= console) device. */ 1325 + mutex_lock(&raw3270_mutex); 1326 + list_for_each_entry(rp, &raw3270_devices, list) { 1327 + get_device(&rp->cdev->dev); 1328 + raw3270_create_attributes(rp); 1330 1329 } 1331 - return rc; 1330 + mutex_unlock(&raw3270_mutex); 1331 + return 0; 1332 1332 } 1333 1333 1334 1334 static void raw3270_exit(void) 1335 1335 { 1336 1336 ccw_driver_unregister(&raw3270_ccw_driver); 1337 - class_destroy(class3270); 1337 + class_unregister(&class3270); 1338 1338 } 1339 1339 1340 1340 MODULE_LICENSE("GPL");
+1 -1
drivers/s390/char/raw3270.h
··· 14 14 15 15 struct raw3270; 16 16 struct raw3270_view; 17 - extern struct class *class3270; 17 + extern const struct class class3270; 18 18 19 19 /* 3270 CCW request */ 20 20 struct raw3270_request {
+8 -4
drivers/s390/char/tape.h
··· 305 305 ccw->cmd_code = cmd_code; 306 306 ccw->flags = CCW_FLAG_CC; 307 307 ccw->count = memsize; 308 - ccw->cda = (__u32)(addr_t) cda; 308 + ccw->cda = 0; 309 + if (cda) 310 + ccw->cda = virt_to_dma32(cda); 309 311 return ccw + 1; 310 312 } 311 313 ··· 317 315 ccw->cmd_code = cmd_code; 318 316 ccw->flags = 0; 319 317 ccw->count = memsize; 320 - ccw->cda = (__u32)(addr_t) cda; 318 + ccw->cda = 0; 319 + if (cda) 320 + ccw->cda = virt_to_dma32(cda); 321 321 return ccw + 1; 322 322 } 323 323 ··· 329 325 ccw->cmd_code = cmd_code; 330 326 ccw->flags = 0; 331 327 ccw->count = 0; 332 - ccw->cda = (__u32)(addr_t) &ccw->cmd_code; 328 + ccw->cda = virt_to_dma32(&ccw->cmd_code); 333 329 return ccw + 1; 334 330 } 335 331 ··· 340 336 ccw->cmd_code = cmd_code; 341 337 ccw->flags = CCW_FLAG_CC; 342 338 ccw->count = 0; 343 - ccw->cda = (__u32)(addr_t) &ccw->cmd_code; 339 + ccw->cda = virt_to_dma32(&ccw->cmd_code); 344 340 ccw++; 345 341 } 346 342 return ccw;
+8 -9
drivers/s390/char/tape_class.c
··· 22 22 ); 23 23 MODULE_LICENSE("GPL"); 24 24 25 - static struct class *tape_class; 25 + static const struct class tape_class = { 26 + .name = "tape390", 27 + }; 26 28 27 29 /* 28 30 * Register a tape device and return a pointer to the cdev structure. ··· 76 74 if (rc) 77 75 goto fail_with_cdev; 78 76 79 - tcd->class_device = device_create(tape_class, device, 77 + tcd->class_device = device_create(&tape_class, device, 80 78 tcd->char_device->dev, NULL, 81 79 "%s", tcd->device_name); 82 80 rc = PTR_ERR_OR_ZERO(tcd->class_device); ··· 93 91 return tcd; 94 92 95 93 fail_with_class_device: 96 - device_destroy(tape_class, tcd->char_device->dev); 94 + device_destroy(&tape_class, tcd->char_device->dev); 97 95 98 96 fail_with_cdev: 99 97 cdev_del(tcd->char_device); ··· 109 107 { 110 108 if (tcd != NULL && !IS_ERR(tcd)) { 111 109 sysfs_remove_link(&device->kobj, tcd->mode_name); 112 - device_destroy(tape_class, tcd->char_device->dev); 110 + device_destroy(&tape_class, tcd->char_device->dev); 113 111 cdev_del(tcd->char_device); 114 112 kfree(tcd); 115 113 } ··· 119 117 120 118 static int __init tape_init(void) 121 119 { 122 - tape_class = class_create("tape390"); 123 - 124 - return 0; 120 + return class_register(&tape_class); 125 121 } 126 122 127 123 static void __exit tape_exit(void) 128 124 { 129 - class_destroy(tape_class); 130 - tape_class = NULL; 125 + class_unregister(&tape_class); 131 126 } 132 127 133 128 postcore_initcall(tape_init);
+8 -10
drivers/s390/char/vmlogrdr.c
··· 679 679 NULL, 680 680 }; 681 681 682 - static struct class *vmlogrdr_class; 682 + static const struct class vmlogrdr_class = { 683 + .name = "vmlogrdr_class", 684 + }; 683 685 static struct device_driver vmlogrdr_driver = { 684 686 .name = "vmlogrdr", 685 687 .bus = &iucv_bus, ··· 701 699 if (ret) 702 700 goto out_iucv; 703 701 704 - vmlogrdr_class = class_create("vmlogrdr"); 705 - if (IS_ERR(vmlogrdr_class)) { 706 - ret = PTR_ERR(vmlogrdr_class); 707 - vmlogrdr_class = NULL; 702 + ret = class_register(&vmlogrdr_class); 703 + if (ret) 708 704 goto out_driver; 709 - } 710 705 return 0; 711 706 712 707 out_driver: ··· 717 718 718 719 static void vmlogrdr_unregister_driver(void) 719 720 { 720 - class_destroy(vmlogrdr_class); 721 - vmlogrdr_class = NULL; 721 + class_unregister(&vmlogrdr_class); 722 722 driver_unregister(&vmlogrdr_driver); 723 723 iucv_unregister(&vmlogrdr_iucv_handler, 1); 724 724 } ··· 752 754 return ret; 753 755 } 754 756 755 - priv->class_device = device_create(vmlogrdr_class, dev, 757 + priv->class_device = device_create(&vmlogrdr_class, dev, 756 758 MKDEV(vmlogrdr_major, 757 759 priv->minor_num), 758 760 priv, "%s", dev_name(dev)); ··· 769 771 770 772 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) 771 773 { 772 - device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); 774 + device_destroy(&vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); 773 775 if (priv->device != NULL) { 774 776 device_unregister(priv->device); 775 777 priv->device=NULL;
+11 -11
drivers/s390/char/vmur.c
··· 48 48 MODULE_LICENSE("GPL"); 49 49 50 50 static dev_t ur_first_dev_maj_min; 51 - static struct class *vmur_class; 51 + static const struct class vmur_class = { 52 + .name = "vmur", 53 + }; 52 54 static struct debug_info *vmur_dbf; 53 55 54 56 /* We put the device's record length (for writes) in the driver_info field */ ··· 197 195 struct ccw1 *ptr = cpa; 198 196 199 197 while (ptr->cda) { 200 - kfree(phys_to_virt(ptr->cda)); 198 + kfree(dma32_to_virt(ptr->cda)); 201 199 ptr++; 202 200 } 203 201 kfree(cpa); ··· 239 237 free_chan_prog(cpa); 240 238 return ERR_PTR(-ENOMEM); 241 239 } 242 - cpa[i].cda = (u32)virt_to_phys(kbuf); 240 + cpa[i].cda = virt_to_dma32(kbuf); 243 241 if (copy_from_user(kbuf, ubuf, reclen)) { 244 242 free_chan_prog(cpa); 245 243 return ERR_PTR(-EFAULT); ··· 914 912 goto fail_free_cdev; 915 913 } 916 914 917 - urd->device = device_create(vmur_class, &cdev->dev, 915 + urd->device = device_create(&vmur_class, &cdev->dev, 918 916 urd->char_device->dev, NULL, "%s", node_id); 919 917 if (IS_ERR(urd->device)) { 920 918 rc = PTR_ERR(urd->device); ··· 960 958 /* Work not run yet - need to release reference here */ 961 959 urdev_put(urd); 962 960 } 963 - device_destroy(vmur_class, urd->char_device->dev); 961 + device_destroy(&vmur_class, urd->char_device->dev); 964 962 cdev_del(urd->char_device); 965 963 urd->char_device = NULL; 966 964 rc = 0; ··· 1024 1022 1025 1023 debug_set_level(vmur_dbf, 6); 1026 1024 1027 - vmur_class = class_create("vmur"); 1028 - if (IS_ERR(vmur_class)) { 1029 - rc = PTR_ERR(vmur_class); 1025 + rc = class_register(&vmur_class); 1026 + if (rc) 1030 1027 goto fail_free_dbf; 1031 - } 1032 1028 1033 1029 rc = ccw_driver_register(&ur_driver); 1034 1030 if (rc) ··· 1046 1046 fail_unregister_driver: 1047 1047 ccw_driver_unregister(&ur_driver); 1048 1048 fail_class_destroy: 1049 - class_destroy(vmur_class); 1049 + class_unregister(&vmur_class); 1050 1050 fail_free_dbf: 1051 1051 debug_unregister(vmur_dbf); 1052 1052 return rc; ··· 1056 1056 { 1057 1057 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1058 1058 ccw_driver_unregister(&ur_driver); 1059 - class_destroy(vmur_class); 1059 + class_unregister(&vmur_class); 1060 1060 debug_unregister(vmur_dbf); 1061 1061 pr_info("%s unloaded.\n", ur_banner); 1062 1062 }
+2 -2
drivers/s390/cio/ccwgroup.c
··· 240 240 rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, 241 241 &gdev->dev.kobj, "group_device"); 242 242 if (rc) { 243 - for (--i; i >= 0; i--) 243 + while (i--) 244 244 sysfs_remove_link(&gdev->cdev[i]->dev.kobj, 245 245 "group_device"); 246 246 return rc; ··· 251 251 rc = sysfs_create_link(&gdev->dev.kobj, 252 252 &gdev->cdev[i]->dev.kobj, str); 253 253 if (rc) { 254 - for (--i; i >= 0; i--) { 254 + while (i--) { 255 255 sprintf(str, "cdev%d", i); 256 256 sysfs_remove_link(&gdev->dev.kobj, str); 257 257 }
+6 -6
drivers/s390/cio/chsc.c
··· 191 191 * Returns 0 on success. 192 192 */ 193 193 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 194 - u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc) 194 + dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc) 195 195 { 196 196 memset(scssc, 0, sizeof(*scssc)); 197 197 scssc->request.length = 0x0fe0; ··· 844 844 } 845 845 return ret; 846 846 cleanup: 847 - for (--i; i >= 0; i--) { 847 + while (i--) { 848 848 if (!css->chps[i]) 849 849 continue; 850 850 chp_remove_cmg_attr(css->chps[i]); ··· 861 861 u32 key : 4; 862 862 u32 : 28; 863 863 u32 zeroes1; 864 - u32 cub_addr1; 864 + dma32_t cub_addr1; 865 865 u32 zeroes2; 866 - u32 cub_addr2; 866 + dma32_t cub_addr2; 867 867 u32 reserved[13]; 868 868 struct chsc_header response; 869 869 u32 status : 8; ··· 881 881 secm_area->request.code = 0x0016; 882 882 883 883 secm_area->key = PAGE_DEFAULT_KEY >> 4; 884 - secm_area->cub_addr1 = virt_to_phys(css->cub_addr1); 885 - secm_area->cub_addr2 = virt_to_phys(css->cub_addr2); 884 + secm_area->cub_addr1 = virt_to_dma32(css->cub_addr1); 885 + secm_area->cub_addr2 = virt_to_dma32(css->cub_addr2); 886 886 887 887 secm_area->operation_code = enable ? 0 : 1; 888 888
+3 -3
drivers/s390/cio/chsc.h
··· 91 91 u16:16; 92 92 u32:32; 93 93 u32:32; 94 - u64 summary_indicator_addr; 95 - u64 subchannel_indicator_addr; 94 + dma64_t summary_indicator_addr; 95 + dma64_t subchannel_indicator_addr; 96 96 u32 ks:4; 97 97 u32 kc:4; 98 98 u32:21; ··· 164 164 int chsc_get_channel_measurement_chars(struct channel_path *chp); 165 165 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); 166 166 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 167 - u64 summary_indicator_addr, u64 subchannel_indicator_addr, 167 + dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, 168 168 u8 isc); 169 169 int chsc_sgib(u32 origin); 170 170 int chsc_error_from_response(int response);
+2 -2
drivers/s390/cio/cio.c
··· 148 148 orb->cmd.i2k = 0; 149 149 orb->cmd.key = key >> 4; 150 150 /* issue "Start Subchannel" */ 151 - orb->cmd.cpa = (u32)virt_to_phys(cpa); 151 + orb->cmd.cpa = virt_to_dma32(cpa); 152 152 ccode = ssch(sch->schid, orb); 153 153 154 154 /* process condition code */ ··· 717 717 orb->tm.key = key >> 4; 718 718 orb->tm.b = 1; 719 719 orb->tm.lpm = lpm ? lpm : sch->lpm; 720 - orb->tm.tcw = (u32)virt_to_phys(tcw); 720 + orb->tm.tcw = virt_to_dma32(tcw); 721 721 cc = ssch(sch->schid, orb); 722 722 switch (cc) { 723 723 case 0:
+16 -9
drivers/s390/cio/css.c
··· 1114 1114 return 0; 1115 1115 } 1116 1116 1117 - void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, 1118 - size_t size) 1117 + void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, 1118 + size_t size, dma32_t *dma_handle) 1119 1119 { 1120 1120 dma_addr_t dma_addr; 1121 - unsigned long addr; 1122 1121 size_t chunk_size; 1122 + void *addr; 1123 1123 1124 1124 if (!gp_dma) 1125 1125 return NULL; 1126 - addr = gen_pool_alloc(gp_dma, size); 1126 + addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr); 1127 1127 while (!addr) { 1128 1128 chunk_size = round_up(size, PAGE_SIZE); 1129 - addr = (unsigned long) dma_alloc_coherent(dma_dev, 1130 - chunk_size, &dma_addr, CIO_DMA_GFP); 1129 + addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP); 1131 1130 if (!addr) 1132 1131 return NULL; 1133 - gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1); 1134 - addr = gen_pool_alloc(gp_dma, size); 1132 + gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1); 1133 + addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL); 1135 1134 } 1136 - return (void *) addr; 1135 + if (dma_handle) 1136 + *dma_handle = (__force dma32_t)dma_addr; 1137 + return addr; 1138 + } 1139 + 1140 + void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, 1141 + size_t size) 1142 + { 1143 + return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL); 1137 1144 } 1138 1145 1139 1146 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
+4 -4
drivers/s390/cio/device_fsm.c
··· 64 64 printk(KERN_WARNING "cio: orb indicates transport mode\n"); 65 65 printk(KERN_WARNING "cio: last tcw:\n"); 66 66 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 67 - phys_to_virt(orb->tm.tcw), 67 + dma32_to_virt(orb->tm.tcw), 68 68 sizeof(struct tcw), 0); 69 69 } else { 70 70 printk(KERN_WARNING "cio: orb indicates command mode\n"); 71 - if ((void *)(addr_t)orb->cmd.cpa == 71 + if (dma32_to_virt(orb->cmd.cpa) == 72 72 &private->dma_area->sense_ccw || 73 - (void *)(addr_t)orb->cmd.cpa == 73 + dma32_to_virt(orb->cmd.cpa) == 74 74 cdev->private->dma_area->iccws) 75 75 printk(KERN_WARNING "cio: last channel program " 76 76 "(intern):\n"); ··· 78 78 printk(KERN_WARNING "cio: last channel program:\n"); 79 79 80 80 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 81 - phys_to_virt(orb->cmd.cpa), 81 + dma32_to_virt(orb->cmd.cpa), 82 82 sizeof(struct ccw1), 0); 83 83 } 84 84 printk(KERN_WARNING "cio: ccw device state: %d\n",
+1 -1
drivers/s390/cio/device_id.c
··· 210 210 snsid_init(cdev); 211 211 /* Channel program setup. */ 212 212 cp->cmd_code = CCW_CMD_SENSE_ID; 213 - cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->senseid); 213 + cp->cda = virt_to_dma32(&cdev->private->dma_area->senseid); 214 214 cp->count = sizeof(struct senseid); 215 215 cp->flags = CCW_FLAG_SLI; 216 216 /* Request setup. */
+3 -2
drivers/s390/cio/device_ops.c
··· 823 823 * the subchannels dma pool. Maximal size of allocation supported 824 824 * is PAGE_SIZE. 825 825 */ 826 - void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size) 826 + void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size, 827 + dma32_t *dma_handle) 827 828 { 828 829 void *addr; 829 830 830 831 if (!get_device(&cdev->dev)) 831 832 return NULL; 832 - addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size); 833 + addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle); 833 834 if (IS_ERR_OR_NULL(addr)) 834 835 put_device(&cdev->dev); 835 836 return addr;
+4 -4
drivers/s390/cio/device_pgid.c
··· 141 141 142 142 pgid->inf.fc = fn; 143 143 cp->cmd_code = CCW_CMD_SET_PGID; 144 - cp->cda = (u32)virt_to_phys(pgid); 144 + cp->cda = virt_to_dma32(pgid); 145 145 cp->count = sizeof(*pgid); 146 146 cp->flags = CCW_FLAG_SLI; 147 147 req->cp = cp; ··· 442 442 443 443 /* Channel program setup. */ 444 444 cp->cmd_code = CCW_CMD_SENSE_PGID; 445 - cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]); 445 + cp->cda = virt_to_dma32(&cdev->private->dma_area->pgid[i]); 446 446 cp->count = sizeof(struct pgid); 447 447 cp->flags = CCW_FLAG_SLI; 448 448 req->cp = cp; ··· 632 632 struct ccw1 *cp = cdev->private->dma_area->iccws; 633 633 634 634 cp[0].cmd_code = CCW_CMD_STLCK; 635 - cp[0].cda = (u32)virt_to_phys(buf1); 635 + cp[0].cda = virt_to_dma32(buf1); 636 636 cp[0].count = 32; 637 637 cp[0].flags = CCW_FLAG_CC; 638 638 cp[1].cmd_code = CCW_CMD_RELEASE; 639 - cp[1].cda = (u32)virt_to_phys(buf2); 639 + cp[1].cda = virt_to_dma32(buf2); 640 640 cp[1].count = 32; 641 641 cp[1].flags = 0; 642 642 req->cp = cp;
+1 -1
drivers/s390/cio/device_status.c
··· 332 332 */ 333 333 sense_ccw = &to_io_private(sch)->dma_area->sense_ccw; 334 334 sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE; 335 - sense_ccw->cda = virt_to_phys(cdev->private->dma_area->irb.ecw); 335 + sense_ccw->cda = virt_to_dma32(cdev->private->dma_area->irb.ecw); 336 336 sense_ccw->count = SENSE_MAX_COUNT; 337 337 sense_ccw->flags = CCW_FLAG_SLI; 338 338
+2 -2
drivers/s390/cio/eadm_sch.c
··· 63 63 int cc; 64 64 65 65 orb_init(orb); 66 - orb->eadm.aob = (u32)virt_to_phys(aob); 66 + orb->eadm.aob = virt_to_dma32(aob); 67 67 orb->eadm.intparm = (u32)virt_to_phys(sch); 68 68 orb->eadm.key = PAGE_DEFAULT_KEY >> 4; 69 69 ··· 147 147 css_sched_sch_todo(sch, SCH_TODO_EVAL); 148 148 return; 149 149 } 150 - scm_irq_handler(phys_to_virt(scsw->aob), error); 150 + scm_irq_handler(dma32_to_virt(scsw->aob), error); 151 151 private->state = EADM_IDLE; 152 152 153 153 if (private->completion)
+11 -11
drivers/s390/cio/fcx.c
··· 25 25 */ 26 26 struct tcw *tcw_get_intrg(struct tcw *tcw) 27 27 { 28 - return phys_to_virt(tcw->intrg); 28 + return dma32_to_virt(tcw->intrg); 29 29 } 30 30 EXPORT_SYMBOL(tcw_get_intrg); 31 31 ··· 40 40 void *tcw_get_data(struct tcw *tcw) 41 41 { 42 42 if (tcw->r) 43 - return phys_to_virt(tcw->input); 43 + return dma64_to_virt(tcw->input); 44 44 if (tcw->w) 45 - return phys_to_virt(tcw->output); 45 + return dma64_to_virt(tcw->output); 46 46 return NULL; 47 47 } 48 48 EXPORT_SYMBOL(tcw_get_data); ··· 55 55 */ 56 56 struct tccb *tcw_get_tccb(struct tcw *tcw) 57 57 { 58 - return phys_to_virt(tcw->tccb); 58 + return dma64_to_virt(tcw->tccb); 59 59 } 60 60 EXPORT_SYMBOL(tcw_get_tccb); 61 61 ··· 67 67 */ 68 68 struct tsb *tcw_get_tsb(struct tcw *tcw) 69 69 { 70 - return phys_to_virt(tcw->tsb); 70 + return dma64_to_virt(tcw->tsb); 71 71 } 72 72 EXPORT_SYMBOL(tcw_get_tsb); 73 73 ··· 190 190 */ 191 191 void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw) 192 192 { 193 - tcw->intrg = (u32)virt_to_phys(intrg_tcw); 193 + tcw->intrg = virt_to_dma32(intrg_tcw); 194 194 } 195 195 EXPORT_SYMBOL(tcw_set_intrg); 196 196 ··· 208 208 void tcw_set_data(struct tcw *tcw, void *data, int use_tidal) 209 209 { 210 210 if (tcw->r) { 211 - tcw->input = virt_to_phys(data); 211 + tcw->input = virt_to_dma64(data); 212 212 if (use_tidal) 213 213 tcw->flags |= TCW_FLAGS_INPUT_TIDA; 214 214 } else if (tcw->w) { 215 - tcw->output = virt_to_phys(data); 215 + tcw->output = virt_to_dma64(data); 216 216 if (use_tidal) 217 217 tcw->flags |= TCW_FLAGS_OUTPUT_TIDA; 218 218 } ··· 228 228 */ 229 229 void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb) 230 230 { 231 - tcw->tccb = virt_to_phys(tccb); 231 + tcw->tccb = virt_to_dma64(tccb); 232 232 } 233 233 EXPORT_SYMBOL(tcw_set_tccb); 234 234 ··· 241 241 */ 242 242 void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb) 243 243 { 244 - tcw->tsb = virt_to_phys(tsb); 244 + tcw->tsb = virt_to_dma64(tsb); 245 245 } 246 246 EXPORT_SYMBOL(tcw_set_tsb); 247 247 ··· 346 346 memset(tidaw, 0, sizeof(struct tidaw)); 347 347 tidaw->flags = flags; 348 348 tidaw->count = count; 349 - tidaw->addr = virt_to_phys(addr); 349 + tidaw->addr = virt_to_dma64(addr); 350 350 return tidaw; 351 351 } 352 352 EXPORT_SYMBOL(tcw_add_tidaw);
+6 -3
drivers/s390/cio/orb.h
··· 12 12 #ifndef S390_ORB_H 13 13 #define S390_ORB_H 14 14 15 + #include <linux/types.h> 16 + #include <asm/dma-types.h> 17 + 15 18 /* 16 19 * Command-mode operation request block 17 20 */ ··· 37 34 u32 ils:1; /* incorrect length */ 38 35 u32 zero:6; /* reserved zeros */ 39 36 u32 orbx:1; /* ORB extension control */ 40 - u32 cpa; /* channel program address */ 37 + dma32_t cpa; /* channel program address */ 41 38 } __packed __aligned(4); 42 39 43 40 /* ··· 52 49 u32 lpm:8; 53 50 u32:7; 54 51 u32 x:1; 55 - u32 tcw; 52 + dma32_t tcw; 56 53 u32 prio:8; 57 54 u32:8; 58 55 u32 rsvpgm:8; ··· 74 71 u32 compat2:1; 75 72 u32:21; 76 73 u32 x:1; 77 - u32 aob; 74 + dma32_t aob; 78 75 u32 css_prio:8; 79 76 u32:8; 80 77 u32 scm_prio:8;
+6 -6
drivers/s390/cio/qdio_main.c
··· 82 82 */ 83 83 static inline int do_siga_output(unsigned long schid, unsigned long mask, 84 84 unsigned int *bb, unsigned long fc, 85 - unsigned long aob) 85 + dma64_t aob) 86 86 { 87 87 int cc; 88 88 ··· 321 321 } 322 322 323 323 static int qdio_siga_output(struct qdio_q *q, unsigned int count, 324 - unsigned int *busy_bit, unsigned long aob) 324 + unsigned int *busy_bit, dma64_t aob) 325 325 { 326 326 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 327 327 unsigned int fc = QDIO_SIGA_WRITE; ··· 628 628 EXPORT_SYMBOL_GPL(qdio_inspect_output_queue); 629 629 630 630 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, 631 - unsigned long aob) 631 + dma64_t aob) 632 632 { 633 633 int retries = 0, cc; 634 634 unsigned int busy_bit; ··· 1070 1070 irq_ptr->ccw->cmd_code = ciw->cmd; 1071 1071 irq_ptr->ccw->flags = CCW_FLAG_SLI; 1072 1072 irq_ptr->ccw->count = ciw->count; 1073 - irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr); 1073 + irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr); 1074 1074 1075 1075 spin_lock_irq(get_ccwdev_lock(cdev)); 1076 1076 ccw_device_set_options_mask(cdev, 0); ··· 1263 1263 qperf_inc(q, outbound_queue_full); 1264 1264 1265 1265 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1266 - unsigned long phys_aob = aob ? virt_to_phys(aob) : 0; 1266 + dma64_t phys_aob = aob ? virt_to_dma64(aob) : 0; 1267 1267 1268 - WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256)); 1268 + WARN_ON_ONCE(!IS_ALIGNED(dma64_to_u64(phys_aob), 256)); 1269 1269 rc = qdio_kick_outbound_q(q, count, phys_aob); 1270 1270 } else if (qdio_need_siga_sync(q->irq_ptr)) { 1271 1271 rc = qdio_sync_output_queue(q);
+5 -5
drivers/s390/cio/qdio_setup.c
··· 179 179 180 180 /* fill in sl */ 181 181 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 182 - q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); 182 + q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]); 183 183 } 184 184 185 185 static void setup_queues(struct qdio_irq *irq_ptr, ··· 291 291 292 292 static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue) 293 293 { 294 - desc->sliba = virt_to_phys(queue->slib); 295 - desc->sla = virt_to_phys(queue->sl); 296 - desc->slsba = virt_to_phys(&queue->slsb); 294 + desc->sliba = virt_to_dma64(queue->slib); 295 + desc->sla = virt_to_dma64(queue->sl); 296 + desc->slsba = virt_to_dma64(&queue->slsb); 297 297 298 298 desc->akey = PAGE_DEFAULT_KEY >> 4; 299 299 desc->bkey = PAGE_DEFAULT_KEY >> 4; ··· 315 315 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 316 316 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 317 317 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 318 - irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib); 318 + irq_ptr->qdr->qiba = virt_to_dma64(&irq_ptr->qib); 319 319 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; 320 320 321 321 for (i = 0; i < qdio_init->no_input_qs; i++)
+3 -3
drivers/s390/cio/qdio_thinint.c
··· 137 137 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 138 138 { 139 139 struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; 140 - u64 summary_indicator_addr, subchannel_indicator_addr; 140 + dma64_t summary_indicator_addr, subchannel_indicator_addr; 141 141 int rc; 142 142 143 143 if (reset) { 144 144 summary_indicator_addr = 0; 145 145 subchannel_indicator_addr = 0; 146 146 } else { 147 - summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); 148 - subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); 147 + summary_indicator_addr = virt_to_dma64(tiqdio_airq.lsi_ptr); 148 + subchannel_indicator_addr = virt_to_dma64(irq_ptr->dsci); 149 149 } 150 150 151 151 rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
+44 -38
drivers/s390/cio/vfio_ccw_cp.c
··· 190 190 } 191 191 /* Create the list of IDAL words for a page_array. */ 192 192 static inline void page_array_idal_create_words(struct page_array *pa, 193 - unsigned long *idaws) 193 + dma64_t *idaws) 194 194 { 195 195 int i; 196 196 ··· 203 203 */ 204 204 205 205 for (i = 0; i < pa->pa_nr; i++) { 206 - idaws[i] = page_to_phys(pa->pa_page[i]); 206 + idaws[i] = virt_to_dma64(page_to_virt(pa->pa_page[i])); 207 207 208 208 /* Incorporate any offset from each starting address */ 209 - idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1); 209 + idaws[i] = dma64_add(idaws[i], pa->pa_iova[i] & ~PAGE_MASK); 210 210 } 211 211 } 212 212 ··· 227 227 pccw1->flags = ccw0.flags; 228 228 pccw1->count = ccw0.count; 229 229 } 230 - pccw1->cda = ccw0.cda; 230 + pccw1->cda = u32_to_dma32(ccw0.cda); 231 231 pccw1++; 232 232 } 233 233 } ··· 299 299 * 300 300 * Returns 1 if yes, 0 if no. 301 301 */ 302 - static inline int is_cpa_within_range(u32 cpa, u32 head, int len) 302 + static inline int is_cpa_within_range(dma32_t cpa, u32 head, int len) 303 303 { 304 304 u32 tail = head + (len - 1) * sizeof(struct ccw1); 305 + u32 gcpa = dma32_to_u32(cpa); 305 306 306 - return (head <= cpa && cpa <= tail); 307 + return head <= gcpa && gcpa <= tail; 307 308 } 308 309 309 310 static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len) ··· 357 356 if (ccw_is_tic(ccw)) 358 357 return; 359 358 360 - kfree(phys_to_virt(ccw->cda)); 359 + kfree(dma32_to_virt(ccw->cda)); 361 360 } 362 361 363 362 /** ··· 418 417 static int ccwchain_loop_tic(struct ccwchain *chain, 419 418 struct channel_program *cp); 420 419 421 - static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) 420 + static int ccwchain_handle_ccw(dma32_t cda, struct channel_program *cp) 422 421 { 423 422 struct vfio_device *vdev = 424 423 &container_of(cp, struct vfio_ccw_private, cp)->vdev; 425 424 struct ccwchain *chain; 426 425 int len, ret; 426 + u32 gcda; 427 427 428 + gcda = dma32_to_u32(cda); 428 429 /* Copy 2K (the most we support today) of possible CCWs */ 429 - ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false); 430 + ret = vfio_dma_rw(vdev, gcda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false); 430 431 if (ret) 431 432 return ret; 432 433 ··· 437 434 convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX); 438 435 439 436 /* Count the CCWs in the current chain */ 440 - len = ccwchain_calc_length(cda, cp); 437 + len = ccwchain_calc_length(gcda, cp); 441 438 if (len < 0) 442 439 return len; 443 440 ··· 447 444 return -ENOMEM; 448 445 449 446 chain->ch_len = len; 450 - chain->ch_iova = cda; 447 + chain->ch_iova = gcda; 451 448 452 449 /* Copy the actual CCWs into the new chain */ 453 450 memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1)); ··· 490 487 struct channel_program *cp) 491 488 { 492 489 struct ccwchain *iter; 493 - u32 ccw_head; 490 + u32 cda, ccw_head; 494 491 495 492 list_for_each_entry(iter, &cp->ccwchain_list, next) { 496 493 ccw_head = iter->ch_iova; 497 494 if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) { 498 - ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + 499 - (ccw->cda - ccw_head)); 495 + cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head; 496 + ccw->cda = u32_to_dma32(cda); 500 497 return 0; 501 498 } 502 499 } ··· 504 501 return -EFAULT; 505 502 } 506 503 507 - static unsigned long *get_guest_idal(struct ccw1 *ccw, 508 - struct channel_program *cp, 509 - int idaw_nr) 504 + static dma64_t *get_guest_idal(struct ccw1 *ccw, struct channel_program *cp, int idaw_nr) 510 505 { 511 506 struct vfio_device *vdev = 512 507 &container_of(cp, struct vfio_ccw_private, cp)->vdev; 513 - unsigned long *idaws; 514 - unsigned int *idaws_f1; 508 + dma64_t *idaws; 509 + dma32_t *idaws_f1; 515 510 int idal_len = idaw_nr * sizeof(*idaws); 516 511 int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE; 517 512 int idaw_mask = ~(idaw_size - 1); ··· 521 520 522 521 if (ccw_is_idal(ccw)) { 523 522 /* Copy IDAL from guest */ 524 - ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false); 523 + ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), idaws, idal_len, false); 525 524 if (ret) { 526 525 kfree(idaws); 527 526 return ERR_PTR(ret); ··· 529 528 } else { 530 529 /* Fabricate an IDAL based off CCW data address */ 531 530 if (cp->orb.cmd.c64) { 532 - idaws[0] = ccw->cda; 533 - for (i = 1; i < idaw_nr; i++) 534 - idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask; 531 + idaws[0] = u64_to_dma64(dma32_to_u32(ccw->cda)); 532 + for (i = 1; i < idaw_nr; i++) { 533 + idaws[i] = dma64_add(idaws[i - 1], idaw_size); 534 + idaws[i] = dma64_and(idaws[i], idaw_mask); 535 + } 535 536 } else { 536 - idaws_f1 = (unsigned int *)idaws; 537 + idaws_f1 = (dma32_t *)idaws; 537 538 idaws_f1[0] = ccw->cda; 538 - for (i = 1; i < idaw_nr; i++) 539 - idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask; 539 + for (i = 1; i < idaw_nr; i++) { 540 + idaws_f1[i] = dma32_add(idaws_f1[i - 1], idaw_size); 541 + idaws_f1[i] = dma32_and(idaws_f1[i], idaw_mask); 542 + } 540 543 } 541 544 } 542 545 ··· 577 572 if (ccw_is_idal(ccw)) { 578 573 /* Read first IDAW to check its starting address. */ 579 574 /* All subsequent IDAWs will be 2K- or 4K-aligned. */ 580 - ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false); 575 + ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), &iova, size, false); 581 576 if (ret) 582 577 return ret; 583 578 ··· 588 583 if (!cp->orb.cmd.c64) 589 584 iova = iova >> 32; 590 585 } else { 591 - iova = ccw->cda; 586 + iova = dma32_to_u32(ccw->cda); 592 587 } 593 588 594 589 /* Format-1 IDAWs operate on 2K each */ ··· 609 604 { 610 605 struct vfio_device *vdev = 611 606 &container_of(cp, struct vfio_ccw_private, cp)->vdev; 612 - unsigned long *idaws; 613 - unsigned int *idaws_f1; 607 + dma64_t *idaws; 608 + dma32_t *idaws_f1; 614 609 int ret; 615 610 int idaw_nr; 616 611 int i; ··· 641 636 * Copy guest IDAWs into page_array, in case the memory they 642 637 * occupy is not contiguous. 643 638 */ 644 - idaws_f1 = (unsigned int *)idaws; 639 + idaws_f1 = (dma32_t *)idaws; 645 640 for (i = 0; i < idaw_nr; i++) { 646 641 if (cp->orb.cmd.c64) 647 - pa->pa_iova[i] = idaws[i]; 642 + pa->pa_iova[i] = dma64_to_u64(idaws[i]); 648 643 else 649 - pa->pa_iova[i] = idaws_f1[i]; 644 + pa->pa_iova[i] = dma32_to_u32(idaws_f1[i]); 650 645 } 651 646 652 647 if (ccw_does_data_transfer(ccw)) { ··· 657 652 pa->pa_nr = 0; 658 653 } 659 654 660 - ccw->cda = (__u32) virt_to_phys(idaws); 655 + ccw->cda = virt_to_dma32(idaws); 661 656 ccw->flags |= CCW_FLAG_IDA; 662 657 663 658 /* Populate the IDAL with pinned/translated addresses from page */ ··· 879 874 880 875 chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next); 881 876 cpa = chain->ch_ccw; 882 - orb->cmd.cpa = (__u32)virt_to_phys(cpa); 877 + orb->cmd.cpa = virt_to_dma32(cpa); 883 878 884 879 return orb; 885 880 } ··· 901 896 void cp_update_scsw(struct channel_program *cp, union scsw *scsw) 902 897 { 903 898 struct ccwchain *chain; 904 - u32 cpa = scsw->cmd.cpa; 899 + dma32_t cpa = scsw->cmd.cpa; 905 900 u32 ccw_head; 906 901 907 902 if (!cp->initialized) ··· 924 919 * (cpa - ccw_head) is the offset value of the host 925 920 * physical ccw to its chain head. 926 921 * Adding this value to the guest physical ccw chain 927 - * head gets us the guest cpa. 922 + * head gets us the guest cpa: 923 + * cpa = chain->ch_iova + (cpa - ccw_head) 928 924 */ 929 - cpa = chain->ch_iova + (cpa - ccw_head); 925 + cpa = dma32_add(cpa, chain->ch_iova - ccw_head); 930 926 break; 931 927 } 932 928 }
+1 -1
drivers/s390/cio/vfio_ccw_fsm.c
··· 378 378 379 379 spin_lock_irq(&sch->lock); 380 380 sch->isc = VFIO_CCW_ISC; 381 - ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 381 + ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch)); 382 382 if (ret) 383 383 goto err_unlock; 384 384
+21 -18
drivers/s390/crypto/zcrypt_api.c
··· 107 107 108 108 struct zcdn_device; 109 109 110 - static struct class *zcrypt_class; 110 + static void zcdn_device_release(struct device *dev); 111 + static const struct class zcrypt_class = { 112 + .name = ZCRYPT_NAME, 113 + .dev_release = zcdn_device_release, 114 + }; 111 115 static dev_t zcrypt_devt; 112 116 static struct cdev zcrypt_cdev; 113 117 ··· 134 130 */ 135 131 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 136 132 { 137 - struct device *dev = class_find_device_by_name(zcrypt_class, name); 133 + struct device *dev = class_find_device_by_name(&zcrypt_class, name); 138 134 139 135 return dev ? to_zcdn_dev(dev) : NULL; 140 136 } ··· 146 142 */ 147 143 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 148 144 { 149 - struct device *dev = class_find_device_by_devt(zcrypt_class, devt); 145 + struct device *dev = class_find_device_by_devt(&zcrypt_class, devt); 150 146 151 147 return dev ? to_zcdn_dev(dev) : NULL; 152 148 } ··· 400 396 goto unlockout; 401 397 } 402 398 zcdndev->device.release = zcdn_device_release; 403 - zcdndev->device.class = zcrypt_class; 399 + zcdndev->device.class = &zcrypt_class; 404 400 zcdndev->device.devt = devt; 405 401 zcdndev->device.groups = zcdn_dev_attr_groups; 406 402 if (name[0]) ··· 577 573 { 578 574 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 579 575 return NULL; 576 + zcrypt_card_get(zc); 580 577 zcrypt_queue_get(zq); 581 578 get_device(&zq->queue->ap_dev.device); 582 579 atomic_add(weight, &zc->load); ··· 597 592 atomic_sub(weight, &zq->load); 598 593 put_device(&zq->queue->ap_dev.device); 599 594 zcrypt_queue_put(zq); 595 + zcrypt_card_put(zc); 600 596 module_put(mod); 601 597 } 602 598 ··· 2081 2075 int rc; 2082 2076 2083 2077 /* create a new class 'zcrypt' */ 2084 - zcrypt_class = class_create(ZCRYPT_NAME); 2085 - if (IS_ERR(zcrypt_class)) { 2086 - rc = PTR_ERR(zcrypt_class); 2087 - goto out_class_create_failed; 2088 - } 2089 - zcrypt_class->dev_release = zcdn_device_release; 2078 + rc = class_register(&zcrypt_class); 2079 + if (rc) 2080 + goto out_class_register_failed; 2090 2081 2091 2082 /* alloc device minor range */ 2092 2083 rc = alloc_chrdev_region(&zcrypt_devt, ··· 2099 2096 goto out_cdev_add_failed; 2100 2097 2101 2098 /* need some class specific sysfs attributes */ 2102 - rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 2099 + rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create); 2103 2100 if (rc) 2104 2101 goto out_class_create_file_1_failed; 2105 - rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 2102 + rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy); 2106 2103 if (rc) 2107 2104 goto out_class_create_file_2_failed; 2108 2105 2109 2106 return 0; 2110 2107 2111 2108 out_class_create_file_2_failed: 2112 - class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2109 + class_remove_file(&zcrypt_class, &class_attr_zcdn_create); 2113 2110 out_class_create_file_1_failed: 2114 2111 cdev_del(&zcrypt_cdev); 2115 2112 out_cdev_add_failed: 2116 2113 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2117 2114 out_alloc_chrdev_failed: 2118 - class_destroy(zcrypt_class); 2119 - out_class_create_failed: 2115 + class_unregister(&zcrypt_class); 2116 + out_class_register_failed: 2120 2117 return rc; 2121 2118 } 2122 2119 2123 2120 static void zcdn_exit(void) 2124 2121 { 2125 - class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2126 - class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 2122 + class_remove_file(&zcrypt_class, &class_attr_zcdn_create); 2123 + class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy); 2127 2124 zcdn_destroy_all(); 2128 2125 cdev_del(&zcrypt_cdev); 2129 2126 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2130 - class_destroy(zcrypt_class); 2127 + class_unregister(&zcrypt_class); 2131 2128 } 2132 2129 2133 2130 /*
+2 -2
drivers/s390/net/ctcm_fsms.c
··· 1325 1325 clear_normalized_cda(&ch->ccw[1]); 1326 1326 1327 1327 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1328 - (void *)(unsigned long)ch->ccw[1].cda, 1328 + (void *)(u64)dma32_to_u32(ch->ccw[1].cda), 1329 1329 ch->trans_skb->data); 1330 1330 ch->ccw[1].count = ch->max_bufsize; 1331 1331 ··· 1340 1340 } 1341 1341 1342 1342 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1343 - (void *)(unsigned long)ch->ccw[1].cda, 1343 + (void *)(u64)dma32_to_u32(ch->ccw[1].cda), 1344 1344 ch->trans_skb->data); 1345 1345 1346 1346 ch->ccw[1].count = ch->trans_skb->len;
+1 -1
drivers/s390/net/ctcm_main.c
··· 1389 1389 ch->ccw[15].cmd_code = CCW_CMD_WRITE; 1390 1390 ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1391 1391 ch->ccw[15].count = TH_HEADER_LENGTH; 1392 - ch->ccw[15].cda = virt_to_phys(ch->discontact_th); 1392 + ch->ccw[15].cda = virt_to_dma32(ch->discontact_th); 1393 1393 1394 1394 ch->ccw[16].cmd_code = CCW_CMD_NOOP; 1395 1395 ch->ccw[16].flags = CCW_FLAG_SLI;
+10 -10
drivers/s390/net/ctcm_mpc.c
··· 1708 1708 ch->ccw[9].cmd_code = CCW_CMD_WRITE; 1709 1709 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1710 1710 ch->ccw[9].count = TH_HEADER_LENGTH; 1711 - ch->ccw[9].cda = virt_to_phys(ch->xid_th); 1711 + ch->ccw[9].cda = virt_to_dma32(ch->xid_th); 1712 1712 1713 1713 if (ch->xid == NULL) 1714 1714 goto done; 1715 1715 ch->ccw[10].cmd_code = CCW_CMD_WRITE; 1716 1716 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1717 1717 ch->ccw[10].count = XID2_LENGTH; 1718 - ch->ccw[10].cda = virt_to_phys(ch->xid); 1718 + ch->ccw[10].cda = virt_to_dma32(ch->xid); 1719 1719 1720 1720 ch->ccw[11].cmd_code = CCW_CMD_READ; 1721 1721 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1722 1722 ch->ccw[11].count = TH_HEADER_LENGTH; 1723 - ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th); 1723 + ch->ccw[11].cda = virt_to_dma32(ch->rcvd_xid_th); 1724 1724 1725 1725 ch->ccw[12].cmd_code = CCW_CMD_READ; 1726 1726 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1727 1727 ch->ccw[12].count = XID2_LENGTH; 1728 - ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid); 1728 + ch->ccw[12].cda = virt_to_dma32(ch->rcvd_xid); 1729 1729 1730 1730 ch->ccw[13].cmd_code = CCW_CMD_READ; 1731 - ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id); 1731 + ch->ccw[13].cda = virt_to_dma32(ch->rcvd_xid_id); 1732 1732 1733 1733 } else { /* side == YSIDE : mpc_action_yside_xid */ 1734 1734 ch->ccw[9].cmd_code = CCW_CMD_READ; 1735 1735 ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1736 1736 ch->ccw[9].count = TH_HEADER_LENGTH; 1737 - ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th); 1737 + ch->ccw[9].cda = virt_to_dma32(ch->rcvd_xid_th); 1738 1738 1739 1739 ch->ccw[10].cmd_code = CCW_CMD_READ; 1740 1740 ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1741 1741 ch->ccw[10].count = XID2_LENGTH; 1742 - ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); 1742 + ch->ccw[10].cda = virt_to_dma32(ch->rcvd_xid); 1743 1743 1744 1744 if (ch->xid_th == NULL) 1745 1745 goto done; 1746 1746 ch->ccw[11].cmd_code = CCW_CMD_WRITE; 1747 1747 ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1748 1748 ch->ccw[11].count = TH_HEADER_LENGTH; 1749 - ch->ccw[11].cda = virt_to_phys(ch->xid_th); 1749 + ch->ccw[11].cda = virt_to_dma32(ch->xid_th); 1750 1750 1751 1751 if (ch->xid == NULL) 1752 1752 goto done; 1753 1753 ch->ccw[12].cmd_code = CCW_CMD_WRITE; 1754 1754 ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 1755 1755 ch->ccw[12].count = XID2_LENGTH; 1756 - ch->ccw[12].cda = virt_to_phys(ch->xid); 1756 + ch->ccw[12].cda = virt_to_dma32(ch->xid); 1757 1757 1758 1758 if (ch->xid_id == NULL) 1759 1759 goto done; 1760 1760 ch->ccw[13].cmd_code = CCW_CMD_WRITE; 1761 - ch->ccw[13].cda = virt_to_phys(ch->xid_id); 1761 + ch->ccw[13].cda = virt_to_dma32(ch->xid_id); 1762 1762 1763 1763 } 1764 1764 ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+5 -7
drivers/s390/net/lcs.c
··· 218 218 * we do not need to do set_normalized_cda. 219 219 */ 220 220 card->read.ccws[cnt].cda = 221 - (__u32)virt_to_phys(card->read.iob[cnt].data); 221 + virt_to_dma32(card->read.iob[cnt].data); 222 222 ((struct lcs_header *) 223 223 card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; 224 224 card->read.iob[cnt].callback = lcs_get_frames_cb; ··· 230 230 card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; 231 231 /* Last ccw is a tic (transfer in channel). */ 232 232 card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; 233 - card->read.ccws[LCS_NUM_BUFFS].cda = 234 - (__u32)virt_to_phys(card->read.ccws); 233 + card->read.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->read.ccws); 235 234 /* Setg initial state of the read channel. */ 236 235 card->read.state = LCS_CH_STATE_INIT; 237 236 ··· 272 273 * we do not need to do set_normalized_cda. 273 274 */ 274 275 card->write.ccws[cnt].cda = 275 - (__u32)virt_to_phys(card->write.iob[cnt].data); 276 + virt_to_dma32(card->write.iob[cnt].data); 276 277 } 277 278 /* Last ccw is a tic (transfer in channel). */ 278 279 card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; 279 - card->write.ccws[LCS_NUM_BUFFS].cda = 280 - (__u32)virt_to_phys(card->write.ccws); 280 + card->write.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->write.ccws); 281 281 /* Set initial state of the write channel. */ 282 282 card->read.state = LCS_CH_STATE_INIT; 283 283 ··· 1397 1399 if ((channel->state != LCS_CH_STATE_INIT) && 1398 1400 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1399 1401 (irb->scsw.cmd.cpa != 0)) { 1400 - index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) 1402 + index = (struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) 1401 1403 - channel->ccws; 1402 1404 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || 1403 1405 (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
+12 -12
drivers/s390/net/qeth_core_main.c
··· 426 426 ccw->cmd_code = cmd_code; 427 427 ccw->flags = flags | CCW_FLAG_SLI; 428 428 ccw->count = len; 429 - ccw->cda = (__u32)virt_to_phys(data); 429 + ccw->cda = virt_to_dma32(data); 430 430 } 431 431 432 432 static int __qeth_issue_next_read(struct qeth_card *card) ··· 1359 1359 qeth_tx_complete_buf(queue, buf, error, budget); 1360 1360 1361 1361 for (i = 0; i < queue->max_elements; ++i) { 1362 - void *data = phys_to_virt(buf->buffer->element[i].addr); 1362 + void *data = dma64_to_virt(buf->buffer->element[i].addr); 1363 1363 1364 1364 if (__test_and_clear_bit(i, buf->from_kmem_cache) && data) 1365 1365 kmem_cache_free(qeth_core_header_cache, data); ··· 1404 1404 for (i = 0; 1405 1405 i < aob->sb_count && i < queue->max_elements; 1406 1406 i++) { 1407 - void *data = phys_to_virt(aob->sba[i]); 1407 + void *data = dma64_to_virt(aob->sba[i]); 1408 1408 1409 1409 if (test_bit(i, buf->from_kmem_cache) && data) 1410 1410 kmem_cache_free(qeth_core_header_cache, ··· 2918 2918 */ 2919 2919 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2920 2920 buf->buffer->element[i].length = PAGE_SIZE; 2921 - buf->buffer->element[i].addr = 2922 - page_to_phys(pool_entry->elements[i]); 2921 + buf->buffer->element[i].addr = u64_to_dma64( 2922 + page_to_phys(pool_entry->elements[i])); 2923 2923 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2924 2924 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; 2925 2925 else ··· 3765 3765 3766 3766 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && 3767 3767 buffer->element[e].addr) { 3768 - unsigned long phys_aob_addr = buffer->element[e].addr; 3768 + dma64_t phys_aob_addr = buffer->element[e].addr; 3769 3769 3770 - qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr)); 3770 + qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr)); 3771 3771 ++e; 3772 3772 } 3773 3773 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); ··· 4042 4042 if (hd_len) { 4043 4043 is_first_elem = false; 4044 4044 4045 - buffer->element[element].addr = virt_to_phys(hdr); 4045 + buffer->element[element].addr = virt_to_dma64(hdr); 4046 4046 buffer->element[element].length = hd_len; 4047 4047 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; 4048 4048 ··· 4063 4063 elem_length = min_t(unsigned int, length, 4064 4064 PAGE_SIZE - offset_in_page(data)); 4065 4065 4066 - buffer->element[element].addr = virt_to_phys(data); 4066 + buffer->element[element].addr = virt_to_dma64(data); 4067 4067 buffer->element[element].length = elem_length; 4068 4068 length -= elem_length; 4069 4069 if (is_first_elem) { ··· 4093 4093 elem_length = min_t(unsigned int, length, 4094 4094 PAGE_SIZE - offset_in_page(data)); 4095 4095 4096 - buffer->element[element].addr = virt_to_phys(data); 4096 + buffer->element[element].addr = virt_to_dma64(data); 4097 4097 buffer->element[element].length = elem_length; 4098 4098 buffer->element[element].eflags = 4099 4099 SBAL_EFLAGS_MIDDLE_FRAG; ··· 5569 5569 offset = 0; 5570 5570 } 5571 5571 5572 - hdr = phys_to_virt(element->addr) + offset; 5572 + hdr = dma64_to_virt(element->addr) + offset; 5573 5573 offset += sizeof(*hdr); 5574 5574 skb = NULL; 5575 5575 ··· 5661 5661 walk_packet: 5662 5662 while (skb_len) { 5663 5663 int data_len = min(skb_len, (int)(element->length - offset)); 5664 - char *data = phys_to_virt(element->addr) + offset; 5664 + char *data = dma64_to_virt(element->addr) + offset; 5665 5665 5666 5666 skb_len -= data_len; 5667 5667 offset += data_len;
+1 -1
drivers/s390/scsi/zfcp_fsf.c
··· 2742 2742 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2743 2743 2744 2744 sbale = &sbal->element[idx]; 2745 - req_id = sbale->addr; 2745 + req_id = dma64_to_u64(sbale->addr); 2746 2746 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2747 2747 2748 2748 if (!fsf_req) {
+2 -2
drivers/s390/scsi/zfcp_qdio.c
··· 125 125 memset(pl, 0, 126 126 ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); 127 127 sbale = qdio->res_q[idx]->element; 128 - req_id = sbale->addr; 128 + req_id = dma64_to_u64(sbale->addr); 129 129 scount = min(sbale->scount + 1, 130 130 ZFCP_QDIO_MAX_SBALS_PER_REQ + 1); 131 131 /* incl. signaling SBAL */ ··· 256 256 q_req->sbal_number); 257 257 return -EINVAL; 258 258 } 259 - sbale->addr = sg_phys(sg); 259 + sbale->addr = u64_to_dma64(sg_phys(sg)); 260 260 sbale->length = sg->length; 261 261 } 262 262 return 0;
+3 -3
drivers/s390/scsi/zfcp_qdio.h
··· 129 129 % QDIO_MAX_BUFFERS_PER_Q; 130 130 131 131 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 - sbale->addr = req_id; 132 + sbale->addr = u64_to_dma64(req_id); 133 133 sbale->eflags = 0; 134 134 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype; 135 135 136 136 if (unlikely(!data)) 137 137 return; 138 138 sbale++; 139 - sbale->addr = virt_to_phys(data); 139 + sbale->addr = virt_to_dma64(data); 140 140 sbale->length = len; 141 141 } 142 142 ··· 159 159 BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); 160 160 q_req->sbale_curr++; 161 161 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 162 - sbale->addr = virt_to_phys(data); 162 + sbale->addr = virt_to_dma64(data); 163 163 sbale->length = len; 164 164 } 165 165
+102 -68
drivers/s390/virtio/virtio_ccw.c
··· 72 72 unsigned int config_ready; 73 73 void *airq_info; 74 74 struct vcdev_dma_area *dma_area; 75 + dma32_t dma_area_addr; 75 76 }; 76 77 77 78 static inline unsigned long *indicators(struct virtio_ccw_device *vcdev) ··· 85 84 return &vcdev->dma_area->indicators2; 86 85 } 87 86 87 + /* Spec stipulates a 64 bit address */ 88 + static inline dma64_t indicators_dma(struct virtio_ccw_device *vcdev) 89 + { 90 + u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr); 91 + 92 + return dma64_add(u64_to_dma64(dma_area_addr), 93 + offsetof(struct vcdev_dma_area, indicators)); 94 + } 95 + 96 + /* Spec stipulates a 64 bit address */ 97 + static inline dma64_t indicators2_dma(struct virtio_ccw_device *vcdev) 98 + { 99 + u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr); 100 + 101 + return dma64_add(u64_to_dma64(dma_area_addr), 102 + offsetof(struct vcdev_dma_area, indicators2)); 103 + } 104 + 105 + static inline dma32_t config_block_dma(struct virtio_ccw_device *vcdev) 106 + { 107 + return dma32_add(vcdev->dma_area_addr, 108 + offsetof(struct vcdev_dma_area, config_block)); 109 + } 110 + 111 + static inline dma32_t status_dma(struct virtio_ccw_device *vcdev) 112 + { 113 + return dma32_add(vcdev->dma_area_addr, 114 + offsetof(struct vcdev_dma_area, status)); 115 + } 116 + 88 117 struct vq_info_block_legacy { 89 - __u64 queue; 118 + dma64_t queue; 90 119 __u32 align; 91 120 __u16 index; 92 121 __u16 num; 93 122 } __packed; 94 123 95 124 struct vq_info_block { 96 - __u64 desc; 125 + dma64_t desc; 97 126 __u32 res0; 98 127 __u16 index; 99 128 __u16 num; 100 - __u64 avail; 101 - __u64 used; 129 + dma64_t avail; 130 + dma64_t used; 102 131 } __packed; 103 132 104 133 struct virtio_feature_desc { ··· 137 106 } __packed; 138 107 139 108 struct virtio_thinint_area { 140 - unsigned long summary_indicator; 141 - unsigned long indicator; 109 + dma64_t summary_indicator; 110 + dma64_t indicator; 142 111 u64 bit_nr; 143 112 u8 isc; 144 113 } __packed; ··· 154 123 155 124 struct virtio_ccw_vq_info { 156 125 struct virtqueue *vq; 126 + dma32_t info_block_addr; 157 127 int num; 158 128 union { 159 129 struct vq_info_block s; ··· 186 154 static inline u8 *get_summary_indicator(struct airq_info *info) 187 155 { 188 156 return summary_indicators + info->summary_indicator_idx; 157 + } 158 + 159 + static inline dma64_t get_summary_indicator_dma(struct airq_info *info) 160 + { 161 + return virt_to_dma64(get_summary_indicator(info)); 189 162 } 190 163 191 164 #define CCW_CMD_SET_VQ 0x13 ··· 297 260 return info; 298 261 } 299 262 300 - static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 301 - u64 *first, void **airq_info) 263 + static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs, 264 + u64 *first, void **airq_info) 302 265 { 303 266 int i, j; 304 267 struct airq_info *info; 305 - unsigned long indicator_addr = 0; 268 + unsigned long *indicator_addr = NULL; 306 269 unsigned long bit, flags; 307 270 308 271 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { ··· 312 275 info = airq_areas[i]; 313 276 mutex_unlock(&airq_areas_lock); 314 277 if (!info) 315 - return 0; 278 + return NULL; 316 279 write_lock_irqsave(&info->lock, flags); 317 280 bit = airq_iv_alloc(info->aiv, nvqs); 318 281 if (bit == -1UL) { ··· 322 285 } 323 286 *first = bit; 324 287 *airq_info = info; 325 - indicator_addr = (unsigned long)info->aiv->vector; 288 + indicator_addr = info->aiv->vector; 326 289 for (j = 0; j < nvqs; j++) { 327 290 airq_iv_set_ptr(info->aiv, bit + j, 328 291 (unsigned long)vqs[j]); ··· 385 348 struct ccw1 *ccw) 386 349 { 387 350 int ret; 388 - unsigned long *indicatorp = NULL; 389 351 struct virtio_thinint_area *thinint_area = NULL; 390 352 struct airq_info *airq_info = vcdev->airq_info; 353 + dma64_t *indicatorp = NULL; 391 354 392 355 if (vcdev->is_thinint) { 393 356 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 394 - sizeof(*thinint_area)); 357 + sizeof(*thinint_area), 358 + &ccw->cda); 395 359 if (!thinint_area) 396 360 return; 397 361 thinint_area->summary_indicator = 398 - (unsigned long) get_summary_indicator(airq_info); 362 + get_summary_indicator_dma(airq_info); 399 363 thinint_area->isc = VIRTIO_AIRQ_ISC; 400 364 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 401 365 ccw->count = sizeof(*thinint_area); 402 - ccw->cda = (__u32)virt_to_phys(thinint_area); 403 366 } else { 404 367 /* payload is the address of the indicators */ 405 368 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 406 - sizeof(indicators(vcdev))); 369 + sizeof(*indicatorp), 370 + &ccw->cda); 407 371 if (!indicatorp) 408 372 return; 409 373 *indicatorp = 0; 410 374 ccw->cmd_code = CCW_CMD_SET_IND; 411 - ccw->count = sizeof(indicators(vcdev)); 412 - ccw->cda = (__u32)virt_to_phys(indicatorp); 375 + ccw->count = sizeof(*indicatorp); 413 376 } 414 377 /* Deregister indicators from host. */ 415 378 *indicators(vcdev) = 0; ··· 423 386 "Failed to deregister indicators (%d)\n", ret); 424 387 else if (vcdev->is_thinint) 425 388 virtio_ccw_drop_indicators(vcdev); 426 - ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev))); 389 + ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(*indicatorp)); 427 390 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 428 391 } 429 392 ··· 463 426 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 464 427 ccw->flags = 0; 465 428 ccw->count = sizeof(struct vq_config_block); 466 - ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->config_block); 429 + ccw->cda = config_block_dma(vcdev); 467 430 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 468 431 if (ret) 469 432 return ret; ··· 500 463 } 501 464 ccw->cmd_code = CCW_CMD_SET_VQ; 502 465 ccw->flags = 0; 503 - ccw->cda = (__u32)virt_to_phys(info->info_block); 466 + ccw->cda = info->info_block_addr; 504 467 ret = ccw_io_helper(vcdev, ccw, 505 468 VIRTIO_CCW_DOING_SET_VQ | index); 506 469 /* ··· 523 486 struct ccw1 *ccw; 524 487 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 525 488 526 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 489 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 527 490 if (!ccw) 528 491 return; 529 492 ··· 562 525 goto out_err; 563 526 } 564 527 info->info_block = ccw_device_dma_zalloc(vcdev->cdev, 565 - sizeof(*info->info_block)); 528 + sizeof(*info->info_block), 529 + &info->info_block_addr); 566 530 if (!info->info_block) { 567 531 dev_warn(&vcdev->cdev->dev, "no info block\n"); 568 532 err = -ENOMEM; ··· 594 556 /* Register it with the host. */ 595 557 queue = virtqueue_get_desc_addr(vq); 596 558 if (vcdev->revision == 0) { 597 - info->info_block->l.queue = queue; 559 + info->info_block->l.queue = u64_to_dma64(queue); 598 560 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 599 561 info->info_block->l.index = i; 600 562 info->info_block->l.num = info->num; 601 563 ccw->count = sizeof(info->info_block->l); 602 564 } else { 603 - info->info_block->s.desc = queue; 565 + info->info_block->s.desc = u64_to_dma64(queue); 604 566 info->info_block->s.index = i; 605 567 info->info_block->s.num = info->num; 606 - info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); 607 - info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); 568 + info->info_block->s.avail = u64_to_dma64(virtqueue_get_avail_addr(vq)); 569 + info->info_block->s.used = u64_to_dma64(virtqueue_get_used_addr(vq)); 608 570 ccw->count = sizeof(info->info_block->s); 609 571 } 610 572 ccw->cmd_code = CCW_CMD_SET_VQ; 611 573 ccw->flags = 0; 612 - ccw->cda = (__u32)virt_to_phys(info->info_block); 574 + ccw->cda = info->info_block_addr; 613 575 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 614 576 if (err) { 615 577 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); ··· 643 605 { 644 606 int ret; 645 607 struct virtio_thinint_area *thinint_area = NULL; 646 - unsigned long indicator_addr; 608 + unsigned long *indicator_addr; 647 609 struct airq_info *info; 648 610 649 611 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 650 - sizeof(*thinint_area)); 612 + sizeof(*thinint_area), 613 + &ccw->cda); 651 614 if (!thinint_area) { 652 615 ret = -ENOMEM; 653 616 goto out; ··· 661 622 ret = -ENOSPC; 662 623 goto out; 663 624 } 664 - thinint_area->indicator = virt_to_phys((void *)indicator_addr); 625 + thinint_area->indicator = virt_to_dma64(indicator_addr); 665 626 info = vcdev->airq_info; 666 - thinint_area->summary_indicator = 667 - virt_to_phys(get_summary_indicator(info)); 627 + thinint_area->summary_indicator = get_summary_indicator_dma(info); 668 628 thinint_area->isc = VIRTIO_AIRQ_ISC; 669 629 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 670 630 ccw->flags = CCW_FLAG_SLI; 671 631 ccw->count = sizeof(*thinint_area); 672 - ccw->cda = (__u32)virt_to_phys(thinint_area); 673 632 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 674 633 if (ret) { 675 634 if (ret == -EOPNOTSUPP) { ··· 695 658 struct irq_affinity *desc) 696 659 { 697 660 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 698 - unsigned long *indicatorp = NULL; 661 + dma64_t *indicatorp = NULL; 699 662 int ret, i, queue_idx = 0; 700 663 struct ccw1 *ccw; 701 664 702 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 665 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 703 666 if (!ccw) 704 667 return -ENOMEM; 705 668 ··· 724 687 * the address of the indicators. 725 688 */ 726 689 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 727 - sizeof(indicators(vcdev))); 690 + sizeof(*indicatorp), 691 + &ccw->cda); 728 692 if (!indicatorp) 729 693 goto out; 730 - *indicatorp = (unsigned long) indicators(vcdev); 694 + *indicatorp = indicators_dma(vcdev); 731 695 if (vcdev->is_thinint) { 732 696 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 733 697 if (ret) ··· 740 702 *indicators(vcdev) = 0; 741 703 ccw->cmd_code = CCW_CMD_SET_IND; 742 704 ccw->flags = 0; 743 - ccw->count = sizeof(indicators(vcdev)); 744 - ccw->cda = (__u32)virt_to_phys(indicatorp); 705 + ccw->count = sizeof(*indicatorp); 745 706 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 746 707 if (ret) 747 708 goto out; 748 709 } 749 710 /* Register indicators2 with host for config changes */ 750 - *indicatorp = (unsigned long) indicators2(vcdev); 711 + *indicatorp = indicators2_dma(vcdev); 751 712 *indicators2(vcdev) = 0; 752 713 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 753 714 ccw->flags = 0; 754 - ccw->count = sizeof(indicators2(vcdev)); 755 - ccw->cda = (__u32)virt_to_phys(indicatorp); 715 + ccw->count = sizeof(*indicatorp); 756 716 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 757 717 if (ret) 758 718 goto out; 759 719 760 720 if (indicatorp) 761 721 ccw_device_dma_free(vcdev->cdev, indicatorp, 762 - sizeof(indicators(vcdev))); 722 + sizeof(*indicatorp)); 763 723 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 764 724 return 0; 765 725 out: 766 726 if (indicatorp) 767 727 ccw_device_dma_free(vcdev->cdev, indicatorp, 768 - sizeof(indicators(vcdev))); 728 + sizeof(*indicatorp)); 769 729 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 770 730 virtio_ccw_del_vqs(vdev); 771 731 return ret; ··· 774 738 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 775 739 struct ccw1 *ccw; 776 740 777 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 741 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 778 742 if (!ccw) 779 743 return; 780 744 ··· 798 762 u64 rc; 799 763 struct ccw1 *ccw; 800 764 801 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 765 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 802 766 if (!ccw) 803 767 return 0; 804 768 805 - features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 769 + features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features), 770 + &ccw->cda); 806 771 if (!features) { 807 772 rc = 0; 808 773 goto out_free; ··· 813 776 ccw->cmd_code = CCW_CMD_READ_FEAT; 814 777 ccw->flags = 0; 815 778 ccw->count = sizeof(*features); 816 - ccw->cda = (__u32)virt_to_phys(features); 817 779 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 818 780 if (ret) { 819 781 rc = 0; ··· 829 793 ccw->cmd_code = CCW_CMD_READ_FEAT; 830 794 ccw->flags = 0; 831 795 ccw->count = sizeof(*features); 832 - ccw->cda = (__u32)virt_to_phys(features); 833 796 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 834 797 if (ret == 0) 835 798 rc |= (u64)le32_to_cpu(features->features) << 32; ··· 860 825 return -EINVAL; 861 826 } 862 827 863 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 828 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 864 829 if (!ccw) 865 830 return -ENOMEM; 866 831 867 - features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 832 + features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features), 833 + &ccw->cda); 868 834 if (!features) { 869 835 ret = -ENOMEM; 870 836 goto out_free; ··· 882 846 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 883 847 ccw->flags = 0; 884 848 ccw->count = sizeof(*features); 885 - ccw->cda = (__u32)virt_to_phys(features); 886 849 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 887 850 if (ret) 888 851 goto out_free; ··· 895 860 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 896 861 ccw->flags = 0; 897 862 ccw->count = sizeof(*features); 898 - ccw->cda = (__u32)virt_to_phys(features); 899 863 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 900 864 901 865 out_free: ··· 913 879 void *config_area; 914 880 unsigned long flags; 915 881 916 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 882 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 917 883 if (!ccw) 918 884 return; 919 885 920 886 config_area = ccw_device_dma_zalloc(vcdev->cdev, 921 - VIRTIO_CCW_CONFIG_SIZE); 887 + VIRTIO_CCW_CONFIG_SIZE, 888 + &ccw->cda); 922 889 if (!config_area) 923 890 goto out_free; 924 891 ··· 927 892 ccw->cmd_code = CCW_CMD_READ_CONF; 928 893 ccw->flags = 0; 929 894 ccw->count = offset + len; 930 - ccw->cda = (__u32)virt_to_phys(config_area); 931 895 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 932 896 if (ret) 933 897 goto out_free; ··· 953 919 void *config_area; 954 920 unsigned long flags; 955 921 956 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 922 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 957 923 if (!ccw) 958 924 return; 959 925 960 926 config_area = ccw_device_dma_zalloc(vcdev->cdev, 961 - VIRTIO_CCW_CONFIG_SIZE); 927 + VIRTIO_CCW_CONFIG_SIZE, 928 + &ccw->cda); 962 929 if (!config_area) 963 930 goto out_free; 964 931 ··· 974 939 ccw->cmd_code = CCW_CMD_WRITE_CONF; 975 940 ccw->flags = 0; 976 941 ccw->count = offset + len; 977 - ccw->cda = (__u32)virt_to_phys(config_area); 978 942 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 979 943 980 944 out_free: ··· 990 956 if (vcdev->revision < 2) 991 957 return vcdev->dma_area->status; 992 958 993 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 959 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 994 960 if (!ccw) 995 961 return old_status; 996 962 997 963 ccw->cmd_code = CCW_CMD_READ_STATUS; 998 964 ccw->flags = 0; 999 965 ccw->count = sizeof(vcdev->dma_area->status); 1000 - ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status); 966 + ccw->cda = status_dma(vcdev); 1001 967 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); 1002 968 /* 1003 969 * If the channel program failed (should only happen if the device ··· 1017 983 struct ccw1 *ccw; 1018 984 int ret; 1019 985 1020 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 986 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 1021 987 if (!ccw) 1022 988 return; 1023 989 ··· 1026 992 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 1027 993 ccw->flags = 0; 1028 994 ccw->count = sizeof(status); 1029 - ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status); 1030 995 /* We use ssch for setting the status which is a serializing 1031 996 * instruction that guarantees the memory writes have 1032 997 * completed before ssch. 1033 998 */ 999 + ccw->cda = status_dma(vcdev); 1034 1000 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 1035 1001 /* Write failed? We assume status is unchanged. */ 1036 1002 if (ret) ··· 1312 1278 struct ccw1 *ccw; 1313 1279 int ret; 1314 1280 1315 - ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 1281 + ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL); 1316 1282 if (!ccw) 1317 1283 return -ENOMEM; 1318 - rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev)); 1284 + rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev), &ccw->cda); 1319 1285 if (!rev) { 1320 1286 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1321 1287 return -ENOMEM; ··· 1325 1291 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1326 1292 ccw->flags = 0; 1327 1293 ccw->count = sizeof(*rev); 1328 - ccw->cda = (__u32)virt_to_phys(rev); 1329 1294 1330 1295 vcdev->revision = VIRTIO_CCW_REV_MAX; 1331 1296 do { ··· 1366 1333 vcdev->vdev.dev.parent = &cdev->dev; 1367 1334 vcdev->cdev = cdev; 1368 1335 vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev, 1369 - sizeof(*vcdev->dma_area)); 1336 + sizeof(*vcdev->dma_area), 1337 + &vcdev->dma_area_addr); 1370 1338 if (!vcdev->dma_area) { 1371 1339 ret = -ENOMEM; 1372 1340 goto out_free;
+2 -1
include/net/iucv/iucv.h
··· 30 30 31 31 #include <linux/types.h> 32 32 #include <linux/slab.h> 33 + #include <asm/dma-types.h> 33 34 #include <asm/debug.h> 34 35 35 36 /* ··· 77 76 * and iucv_message_reply if IUCV_IPBUFLST or IUCV_IPANSLST are used. 78 77 */ 79 78 struct iucv_array { 80 - u32 address; 79 + dma32_t address; 81 80 u32 length; 82 81 } __attribute__ ((aligned (8))); 83 82
+4 -4
net/iucv/af_iucv.c
··· 1060 1060 int i; 1061 1061 1062 1062 /* skip iucv_array lying in the headroom */ 1063 - iba[0].address = (u32)virt_to_phys(skb->data); 1063 + iba[0].address = virt_to_dma32(skb->data); 1064 1064 iba[0].length = (u32)skb_headlen(skb); 1065 1065 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1066 1066 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1067 1067 1068 - iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag)); 1068 + iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); 1069 1069 iba[i + 1].length = (u32)skb_frag_size(frag); 1070 1070 } 1071 1071 err = pr_iucv->message_send(iucv->path, &txmsg, ··· 1161 1161 struct iucv_array *iba = (struct iucv_array *)skb->head; 1162 1162 int i; 1163 1163 1164 - iba[0].address = (u32)virt_to_phys(skb->data); 1164 + iba[0].address = virt_to_dma32(skb->data); 1165 1165 iba[0].length = (u32)skb_headlen(skb); 1166 1166 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1167 1167 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1168 1168 1169 - iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag)); 1169 + iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); 1170 1170 iba[i + 1].length = (u32)skb_frag_size(frag); 1171 1171 } 1172 1172 rc = pr_iucv->message_receive(path, msg,
+11 -12
net/iucv/iucv.c
··· 210 210 u8 iprmmsg[8]; 211 211 u32 ipsrccls; 212 212 u32 ipmsgtag; 213 - u32 ipbfadr2; 213 + dma32_t ipbfadr2; 214 214 u32 ipbfln2f; 215 215 u32 res; 216 216 } __attribute__ ((packed,aligned(8))); ··· 226 226 u8 iprcode; 227 227 u32 ipmsgid; 228 228 u32 iptrgcls; 229 - u32 ipbfadr1; 229 + dma32_t ipbfadr1; 230 230 u32 ipbfln1f; 231 231 u32 ipsrccls; 232 232 u32 ipmsgtag; 233 - u32 ipbfadr2; 233 + dma32_t ipbfadr2; 234 234 u32 ipbfln2f; 235 235 u32 res; 236 236 } __attribute__ ((packed,aligned(8))); ··· 432 432 /* Declare interrupt buffer. */ 433 433 parm = iucv_param_irq[cpu]; 434 434 memset(parm, 0, sizeof(union iucv_param)); 435 - parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); 435 + parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]); 436 436 rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); 437 437 if (rc) { 438 438 char *err = "Unknown"; ··· 1081 1081 size = (size < 8) ? size : 8; 1082 1082 for (array = buffer; size > 0; array++) { 1083 1083 copy = min_t(size_t, size, array->length); 1084 - memcpy((u8 *)(addr_t) array->address, 1085 - rmmsg, copy); 1084 + memcpy(dma32_to_virt(array->address), rmmsg, copy); 1086 1085 rmmsg += copy; 1087 1086 size -= copy; 1088 1087 } ··· 1123 1124 1124 1125 parm = iucv_param[smp_processor_id()]; 1125 1126 memset(parm, 0, sizeof(union iucv_param)); 1126 - parm->db.ipbfadr1 = (u32)virt_to_phys(buffer); 1127 + parm->db.ipbfadr1 = virt_to_dma32(buffer); 1127 1128 parm->db.ipbfln1f = (u32) size; 1128 1129 parm->db.ipmsgid = msg->id; 1129 1130 parm->db.ippathid = path->pathid; ··· 1241 1242 parm->dpl.iptrgcls = msg->class; 1242 1243 memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); 1243 1244 } else { 1244 - parm->db.ipbfadr1 = (u32)virt_to_phys(reply); 1245 + parm->db.ipbfadr1 = virt_to_dma32(reply); 1245 1246 parm->db.ipbfln1f = (u32) size; 1246 1247 parm->db.ippathid = path->pathid; 1247 1248 parm->db.ipflags1 = flags; ··· 1293 1294 parm->dpl.ipmsgtag = msg->tag; 1294 1295 memcpy(parm->dpl.iprmmsg, buffer, 8); 1295 1296 } else { 1296 - parm->db.ipbfadr1 = (u32)virt_to_phys(buffer); 1297 + parm->db.ipbfadr1 = virt_to_dma32(buffer); 1297 1298 parm->db.ipbfln1f = (u32) size; 1298 1299 parm->db.ippathid = path->pathid; 1299 1300 parm->db.ipflags1 = flags | IUCV_IPNORPY; ··· 1378 1379 parm->dpl.iptrgcls = msg->class; 1379 1380 parm->dpl.ipsrccls = srccls; 1380 1381 parm->dpl.ipmsgtag = msg->tag; 1381 - parm->dpl.ipbfadr2 = (u32)virt_to_phys(answer); 1382 + parm->dpl.ipbfadr2 = virt_to_dma32(answer); 1382 1383 parm->dpl.ipbfln2f = (u32) asize; 1383 1384 memcpy(parm->dpl.iprmmsg, buffer, 8); 1384 1385 } else { ··· 1387 1388 parm->db.iptrgcls = msg->class; 1388 1389 parm->db.ipsrccls = srccls; 1389 1390 parm->db.ipmsgtag = msg->tag; 1390 - parm->db.ipbfadr1 = (u32)virt_to_phys(buffer); 1391 + parm->db.ipbfadr1 = virt_to_dma32(buffer); 1391 1392 parm->db.ipbfln1f = (u32) size; 1392 - parm->db.ipbfadr2 = (u32)virt_to_phys(answer); 1393 + parm->db.ipbfadr2 = virt_to_dma32(answer); 1393 1394 parm->db.ipbfln2f = (u32) asize; 1394 1395 } 1395 1396 rc = iucv_call_b2f0(IUCV_SEND, parm);