Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rapidio/mport_cdev: fix uapi type definitions

Fix problems in uapi definitions reported by Gabriel Laskar: (see
https://lkml.org/lkml/2016/4/5/205 for details)

- move public header file rio_mport_cdev.h to include/uapi/linux directory
- change types in data structures passed as IOCTL parameters
- improve parameter checking in some IOCTL service routines

Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com>
Reported-by: Gabriel Laskar <gabriel@lse.epita.fr>
Tested-by: Barry Wood <barry.wood@idt.com>
Cc: Gabriel Laskar <gabriel@lse.epita.fr>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
Cc: Andre van Herk <andre.van.herk@prodrive-technologies.com>
Cc: Barry Wood <barry.wood@idt.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Alexandre Bounine and committed by
Linus Torvalds
4e1016da 4550c4e1

+139 -120
+64 -51
drivers/rapidio/devices/rio_mport_cdev.c
··· 126 126 struct list_head node; 127 127 struct mport_dev *md; 128 128 enum rio_mport_map_dir dir; 129 - u32 rioid; 129 + u16 rioid; 130 130 u64 rio_addr; 131 131 dma_addr_t phys_addr; /* for mmap */ 132 132 void *virt_addr; /* kernel address, for dma_free_coherent */ ··· 137 137 138 138 struct rio_mport_dma_map { 139 139 int valid; 140 - uint64_t length; 140 + u64 length; 141 141 void *vaddr; 142 142 dma_addr_t paddr; 143 143 }; ··· 208 208 struct kfifo event_fifo; 209 209 wait_queue_head_t event_rx_wait; 210 210 spinlock_t fifo_lock; 211 - unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 211 + u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ 212 212 #ifdef CONFIG_RAPIDIO_DMA_ENGINE 213 213 struct dma_chan *dmach; 214 214 struct list_head async_list; ··· 276 276 return -EFAULT; 277 277 278 278 if ((maint_io.offset % 4) || 279 - (maint_io.length == 0) || (maint_io.length % 4)) 279 + (maint_io.length == 0) || (maint_io.length % 4) || 280 + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) 280 281 return -EINVAL; 281 282 282 283 buffer = vmalloc(maint_io.length); ··· 299 298 offset += 4; 300 299 } 301 300 302 - if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) 301 + if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer, 302 + buffer, maint_io.length))) 303 303 ret = -EFAULT; 304 304 out: 305 305 vfree(buffer); ··· 321 319 return -EFAULT; 322 320 323 321 if ((maint_io.offset % 4) || 324 - (maint_io.length == 0) || (maint_io.length % 4)) 322 + (maint_io.length == 0) || (maint_io.length % 4) || 323 + (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ) 325 324 return -EINVAL; 326 325 327 326 buffer = vmalloc(maint_io.length); ··· 330 327 return -ENOMEM; 331 328 length = maint_io.length; 332 329 333 - if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { 330 + if (unlikely(copy_from_user(buffer, 331 + (void __user *)(uintptr_t)maint_io.buffer, length))) { 334 332 ret = -EFAULT; 335 333 goto out; 336 334 } ··· 364 360 */ 365 361 static int 366 362 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, 367 - u32 rioid, u64 raddr, u32 size, 363 + u16 rioid, u64 raddr, u32 size, 368 364 dma_addr_t *paddr) 369 365 { 370 366 struct rio_mport *mport = md->mport; ··· 373 369 374 370 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); 375 371 376 - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 372 + map = kzalloc(sizeof(*map), GFP_KERNEL); 377 373 if (map == NULL) 378 374 return -ENOMEM; 379 375 ··· 398 394 399 395 static int 400 396 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, 401 - u32 rioid, u64 raddr, u32 size, 397 + u16 rioid, u64 raddr, u32 size, 402 398 dma_addr_t *paddr) 403 399 { 404 400 struct rio_mport_mapping *map; ··· 437 433 dma_addr_t paddr; 438 434 int ret; 439 435 440 - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 436 + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 441 437 return -EFAULT; 442 438 443 439 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", ··· 452 448 453 449 map.handle = paddr; 454 450 455 - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) 451 + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) 456 452 return -EFAULT; 457 453 return 0; 458 454 } ··· 473 469 if (!md->mport->ops->unmap_outb) 474 470 return -EPROTONOSUPPORT; 475 471 476 - if (copy_from_user(&handle, arg, sizeof(u64))) 472 + if (copy_from_user(&handle, arg, sizeof(handle))) 477 473 return -EFAULT; 478 474 479 475 rmcd_debug(OBW, "h=0x%llx", handle); ··· 502 498 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) 503 499 { 504 500 struct mport_dev *md = priv->md; 505 - uint16_t hdid; 501 + u16 hdid; 506 502 507 - if (copy_from_user(&hdid, arg, sizeof(uint16_t))) 503 + if (copy_from_user(&hdid, arg, sizeof(hdid))) 508 504 return -EFAULT; 509 505 510 506 md->mport->host_deviceid = hdid; ··· 524 520 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) 525 521 { 526 522 struct mport_dev *md = priv->md; 527 - uint32_t comptag; 523 + u32 comptag; 528 524 529 - if (copy_from_user(&comptag, arg, sizeof(uint32_t))) 525 + if (copy_from_user(&comptag, arg, sizeof(comptag))) 530 526 return -EFAULT; 531 527 532 528 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); ··· 841 837 * @xfer: data transfer descriptor structure 842 838 */ 843 839 static int 844 - rio_dma_transfer(struct file *filp, uint32_t transfer_mode, 840 + rio_dma_transfer(struct file *filp, u32 transfer_mode, 845 841 enum rio_transfer_sync sync, enum dma_data_direction dir, 846 842 struct rio_transfer_io *xfer) 847 843 { ··· 879 875 unsigned long offset; 880 876 long pinned; 881 877 882 - offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; 878 + offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK; 883 879 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; 884 880 885 881 page_list = kmalloc_array(nr_pages, ··· 1019 1015 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) 1020 1016 return -EFAULT; 1021 1017 1022 - if (transaction.count != 1) 1018 + if (transaction.count != 1) /* only single transfer for now */ 1023 1019 return -EINVAL; 1024 1020 1025 1021 if ((transaction.transfer_mode & 1026 1022 priv->md->properties.transfer_mode) == 0) 1027 1023 return -ENODEV; 1028 1024 1029 - transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); 1025 + transfer = vmalloc(transaction.count * sizeof(*transfer)); 1030 1026 if (!transfer) 1031 1027 return -ENOMEM; 1032 1028 1033 - if (unlikely(copy_from_user(transfer, transaction.block, 1034 - transaction.count * sizeof(struct rio_transfer_io)))) { 1029 + if (unlikely(copy_from_user(transfer, 1030 + (void __user *)(uintptr_t)transaction.block, 1031 + transaction.count * sizeof(*transfer)))) { 1035 1032 ret = -EFAULT; 1036 1033 goto out_free; 1037 1034 } ··· 1043 1038 ret = rio_dma_transfer(filp, transaction.transfer_mode, 1044 1039 transaction.sync, dir, &transfer[i]); 1045 1040 1046 - if (unlikely(copy_to_user(transaction.block, transfer, 1047 - transaction.count * sizeof(struct rio_transfer_io)))) 1041 + if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, 1042 + transfer, 1043 + transaction.count * sizeof(*transfer)))) 1048 1044 ret = -EFAULT; 1049 1045 1050 1046 out_free: ··· 1135 1129 } 1136 1130 1137 1131 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, 1138 - uint64_t size, struct rio_mport_mapping **mapping) 1132 + u64 size, struct rio_mport_mapping **mapping) 1139 1133 { 1140 1134 struct rio_mport_mapping *map; 1141 1135 1142 - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1136 + map = kzalloc(sizeof(*map), GFP_KERNEL); 1143 1137 if (map == NULL) 1144 1138 return -ENOMEM; 1145 1139 ··· 1171 1165 struct rio_mport_mapping *mapping = NULL; 1172 1166 int ret; 1173 1167 1174 - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) 1168 + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 1175 1169 return -EFAULT; 1176 1170 1177 1171 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); ··· 1180 1174 1181 1175 map.dma_handle = mapping->phys_addr; 1182 1176 1183 - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { 1177 + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { 1184 1178 mutex_lock(&md->buf_mutex); 1185 1179 kref_put(&mapping->ref, mport_release_mapping); 1186 1180 mutex_unlock(&md->buf_mutex); ··· 1198 1192 int ret = -EFAULT; 1199 1193 struct rio_mport_mapping *map, *_map; 1200 1194 1201 - if (copy_from_user(&handle, arg, sizeof(u64))) 1195 + if (copy_from_user(&handle, arg, sizeof(handle))) 1202 1196 return -EFAULT; 1203 1197 rmcd_debug(EXIT, "filp=%p", filp); 1204 1198 ··· 1248 1242 1249 1243 static int 1250 1244 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, 1251 - u64 raddr, u32 size, 1245 + u64 raddr, u64 size, 1252 1246 struct rio_mport_mapping **mapping) 1253 1247 { 1254 1248 struct rio_mport *mport = md->mport; 1255 1249 struct rio_mport_mapping *map; 1256 1250 int ret; 1257 1251 1258 - map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); 1252 + /* rio_map_inb_region() accepts u32 size */ 1253 + if (size > 0xffffffff) 1254 + return -EINVAL; 1255 + 1256 + map = kzalloc(sizeof(*map), GFP_KERNEL); 1259 1257 if (map == NULL) 1260 1258 return -ENOMEM; 1261 1259 ··· 1272 1262 1273 1263 if (raddr == RIO_MAP_ANY_ADDR) 1274 1264 raddr = map->phys_addr; 1275 - ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); 1265 + ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); 1276 1266 if (ret < 0) 1277 1267 goto err_map_inb; 1278 1268 ··· 1298 1288 1299 1289 static int 1300 1290 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, 1301 - u64 raddr, u32 size, 1291 + u64 raddr, u64 size, 1302 1292 struct rio_mport_mapping **mapping) 1303 1293 { 1304 1294 struct rio_mport_mapping *map; ··· 1341 1331 1342 1332 if (!md->mport->ops->map_inb) 1343 1333 return -EPROTONOSUPPORT; 1344 - if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) 1334 + if (unlikely(copy_from_user(&map, arg, sizeof(map)))) 1345 1335 return -EFAULT; 1346 1336 1347 1337 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); ··· 1354 1344 map.handle = mapping->phys_addr; 1355 1345 map.rio_addr = mapping->rio_addr; 1356 1346 1357 - if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { 1347 + if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { 1358 1348 /* Delete mapping if it was created by this request */ 1359 1349 if (ret == 0 && mapping->filp == filp) { 1360 1350 mutex_lock(&md->buf_mutex); ··· 1385 1375 if (!md->mport->ops->unmap_inb) 1386 1376 return -EPROTONOSUPPORT; 1387 1377 1388 - if (copy_from_user(&handle, arg, sizeof(u64))) 1378 + if (copy_from_user(&handle, arg, sizeof(handle))) 1389 1379 return -EFAULT; 1390 1380 1391 1381 mutex_lock(&md->buf_mutex); ··· 1411 1401 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) 1412 1402 { 1413 1403 struct mport_dev *md = priv->md; 1414 - uint32_t port_idx = md->mport->index; 1404 + u32 port_idx = md->mport->index; 1415 1405 1416 1406 rmcd_debug(MPORT, "port_index=%d", port_idx); 1417 1407 ··· 1461 1451 handled = 0; 1462 1452 spin_lock(&data->db_lock); 1463 1453 list_for_each_entry(db_filter, &data->doorbells, data_node) { 1464 - if (((db_filter->filter.rioid == 0xffffffff || 1454 + if (((db_filter->filter.rioid == RIO_INVALID_DESTID || 1465 1455 db_filter->filter.rioid == src)) && 1466 1456 info >= db_filter->filter.low && 1467 1457 info <= db_filter->filter.high) { ··· 1534 1524 1535 1525 if (copy_from_user(&filter, arg, sizeof(filter))) 1536 1526 return -EFAULT; 1527 + 1528 + if (filter.low > filter.high) 1529 + return -EINVAL; 1537 1530 1538 1531 spin_lock_irqsave(&priv->md->db_lock, flags); 1539 1532 list_for_each_entry(db_filter, &priv->db_filters, priv_node) { ··· 1750 1737 return -EEXIST; 1751 1738 } 1752 1739 1753 - size = sizeof(struct rio_dev); 1740 + size = sizeof(*rdev); 1754 1741 mport = md->mport; 1755 - destid = (u16)dev_info.destid; 1756 - hopcount = (u8)dev_info.hopcount; 1742 + destid = dev_info.destid; 1743 + hopcount = dev_info.hopcount; 1757 1744 1758 1745 if (rio_mport_read_config_32(mport, destid, hopcount, 1759 1746 RIO_PEF_CAR, &rval)) ··· 1885 1872 do { 1886 1873 rdev = rio_get_comptag(dev_info.comptag, rdev); 1887 1874 if (rdev && rdev->dev.parent == &mport->net->dev && 1888 - rdev->destid == (u16)dev_info.destid && 1889 - rdev->hopcount == (u8)dev_info.hopcount) 1875 + rdev->destid == dev_info.destid && 1876 + rdev->hopcount == dev_info.hopcount) 1890 1877 break; 1891 1878 } while (rdev); 1892 1879 } ··· 2159 2146 return maint_port_idx_get(data, (void __user *)arg); 2160 2147 case RIO_MPORT_GET_PROPERTIES: 2161 2148 md->properties.hdid = md->mport->host_deviceid; 2162 - if (copy_to_user((void __user *)arg, &(data->md->properties), 2163 - sizeof(data->md->properties))) 2149 + if (copy_to_user((void __user *)arg, &(md->properties), 2150 + sizeof(md->properties))) 2164 2151 return -EFAULT; 2165 2152 return 0; 2166 2153 case RIO_ENABLE_DOORBELL_RANGE: ··· 2172 2159 case RIO_DISABLE_PORTWRITE_RANGE: 2173 2160 return rio_mport_remove_pw_filter(data, (void __user *)arg); 2174 2161 case RIO_SET_EVENT_MASK: 2175 - data->event_mask = arg; 2162 + data->event_mask = (u32)arg; 2176 2163 return 0; 2177 2164 case RIO_GET_EVENT_MASK: 2178 2165 if (copy_to_user((void __user *)arg, &data->event_mask, 2179 - sizeof(data->event_mask))) 2166 + sizeof(u32))) 2180 2167 return -EFAULT; 2181 2168 return 0; 2182 2169 case RIO_MAP_OUTBOUND: ··· 2387 2374 return -EINVAL; 2388 2375 2389 2376 ret = rio_mport_send_doorbell(mport, 2390 - (u16)event.u.doorbell.rioid, 2377 + event.u.doorbell.rioid, 2391 2378 event.u.doorbell.payload); 2392 2379 if (ret < 0) 2393 2380 return ret; ··· 2434 2421 struct mport_dev *md; 2435 2422 struct rio_mport_attr attr; 2436 2423 2437 - md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); 2424 + md = kzalloc(sizeof(*md), GFP_KERNEL); 2438 2425 if (!md) { 2439 2426 rmcd_error("Unable allocate a device object"); 2440 2427 return NULL; ··· 2483 2470 /* The transfer_mode property will be returned through mport query 2484 2471 * interface 2485 2472 */ 2486 - #ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ 2473 + #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */ 2487 2474 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; 2488 2475 #else 2489 2476 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
+75 -69
include/linux/rio_mport_cdev.h include/uapi/linux/rio_mport_cdev.h
··· 39 39 #ifndef _RIO_MPORT_CDEV_H_ 40 40 #define _RIO_MPORT_CDEV_H_ 41 41 42 - #ifndef __user 43 - #define __user 44 - #endif 42 + #include <linux/ioctl.h> 43 + #include <linux/types.h> 45 44 46 45 struct rio_mport_maint_io { 47 - uint32_t rioid; /* destID of remote device */ 48 - uint32_t hopcount; /* hopcount to remote device */ 49 - uint32_t offset; /* offset in register space */ 50 - size_t length; /* length in bytes */ 51 - void __user *buffer; /* data buffer */ 46 + __u16 rioid; /* destID of remote device */ 47 + __u8 hopcount; /* hopcount to remote device */ 48 + __u8 pad0[5]; 49 + __u32 offset; /* offset in register space */ 50 + __u32 length; /* length in bytes */ 51 + __u64 buffer; /* pointer to data buffer */ 52 52 }; 53 53 54 54 /* ··· 66 66 #define RIO_CAP_MAP_INB (1 << 7) 67 67 68 68 struct rio_mport_properties { 69 - uint16_t hdid; 70 - uint8_t id; /* Physical port ID */ 71 - uint8_t index; 72 - uint32_t flags; 73 - uint32_t sys_size; /* Default addressing size */ 74 - uint8_t port_ok; 75 - uint8_t link_speed; 76 - uint8_t link_width; 77 - uint32_t dma_max_sge; 78 - uint32_t dma_max_size; 79 - uint32_t dma_align; 80 - uint32_t transfer_mode; /* Default transfer mode */ 81 - uint32_t cap_sys_size; /* Capable system sizes */ 82 - uint32_t cap_addr_size; /* Capable addressing sizes */ 83 - uint32_t cap_transfer_mode; /* Capable transfer modes */ 84 - uint32_t cap_mport; /* Mport capabilities */ 69 + __u16 hdid; 70 + __u8 id; /* Physical port ID */ 71 + __u8 index; 72 + __u32 flags; 73 + __u32 sys_size; /* Default addressing size */ 74 + __u8 port_ok; 75 + __u8 link_speed; 76 + __u8 link_width; 77 + __u8 pad0; 78 + __u32 dma_max_sge; 79 + __u32 dma_max_size; 80 + __u32 dma_align; 81 + __u32 transfer_mode; /* Default transfer mode */ 82 + __u32 cap_sys_size; /* Capable system sizes */ 83 + __u32 cap_addr_size; /* Capable addressing sizes */ 84 + __u32 cap_transfer_mode; /* Capable transfer modes */ 85 + __u32 cap_mport; /* Mport capabilities */ 85 86 }; 86 87 87 88 /* ··· 94 93 #define RIO_PORTWRITE (1 << 1) 95 94 96 95 struct rio_doorbell { 97 - uint32_t rioid; 98 - uint16_t payload; 96 + __u16 rioid; 97 + __u16 payload; 99 98 }; 100 99 101 100 struct rio_doorbell_filter { 102 - uint32_t rioid; /* 0xffffffff to match all ids */ 103 - uint16_t low; 104 - uint16_t high; 101 + __u16 rioid; /* Use RIO_INVALID_DESTID to match all ids */ 102 + __u16 low; 103 + __u16 high; 104 + __u16 pad0; 105 105 }; 106 106 107 107 108 108 struct rio_portwrite { 109 - uint32_t payload[16]; 109 + __u32 payload[16]; 110 110 }; 111 111 112 112 struct rio_pw_filter { 113 - uint32_t mask; 114 - uint32_t low; 115 - uint32_t high; 113 + __u32 mask; 114 + __u32 low; 115 + __u32 high; 116 + __u32 pad0; 116 117 }; 117 118 118 119 /* RapidIO base address for inbound requests set to value defined below 119 120 * indicates that no specific RIO-to-local address translation is requested 120 121 * and driver should use direct (one-to-one) address mapping. 121 122 */ 122 - #define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) 123 + #define RIO_MAP_ANY_ADDR (__u64)(~((__u64) 0)) 123 124 124 125 struct rio_mmap { 125 - uint32_t rioid; 126 - uint64_t rio_addr; 127 - uint64_t length; 128 - uint64_t handle; 129 - void *address; 126 + __u16 rioid; 127 + __u16 pad0[3]; 128 + __u64 rio_addr; 129 + __u64 length; 130 + __u64 handle; 131 + __u64 address; 130 132 }; 131 133 132 134 struct rio_dma_mem { 133 - uint64_t length; /* length of DMA memory */ 134 - uint64_t dma_handle; /* handle associated with this memory */ 135 - void *buffer; /* pointer to this memory */ 135 + __u64 length; /* length of DMA memory */ 136 + __u64 dma_handle; /* handle associated with this memory */ 137 + __u64 address; 136 138 }; 137 139 138 - 139 140 struct rio_event { 140 - unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ 141 + __u32 header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ 141 142 union { 142 143 struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ 143 144 struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ 144 145 } u; 146 + __u32 pad0; 145 147 }; 146 148 147 149 enum rio_transfer_sync { ··· 188 184 }; 189 185 190 186 struct rio_transfer_io { 191 - uint32_t rioid; /* Target destID */ 192 - uint64_t rio_addr; /* Address in target's RIO mem space */ 193 - enum rio_exchange method; /* Data exchange method */ 194 - void __user *loc_addr; 195 - uint64_t handle; 196 - uint64_t offset; /* Offset in buffer */ 197 - uint64_t length; /* Length in bytes */ 198 - uint32_t completion_code; /* Completion code for this transfer */ 187 + __u64 rio_addr; /* Address in target's RIO mem space */ 188 + __u64 loc_addr; 189 + __u64 handle; 190 + __u64 offset; /* Offset in buffer */ 191 + __u64 length; /* Length in bytes */ 192 + __u16 rioid; /* Target destID */ 193 + __u16 method; /* Data exchange method, one of rio_exchange enum */ 194 + __u32 completion_code; /* Completion code for this transfer */ 199 195 }; 200 196 201 197 struct rio_transaction { 202 - uint32_t transfer_mode; /* Data transfer mode */ 203 - enum rio_transfer_sync sync; /* Synchronization method */ 204 - enum rio_transfer_dir dir; /* Transfer direction */ 205 - size_t count; /* Number of transfers */ 206 - struct rio_transfer_io __user *block; /* Array of <count> transfers */ 198 + __u64 block; /* Pointer to array of <count> transfers */ 199 + __u32 count; /* Number of transfers */ 200 + __u32 transfer_mode; /* Data transfer mode */ 201 + __u16 sync; /* Synch method, one of rio_transfer_sync enum */ 202 + __u16 dir; /* Transfer direction, one of rio_transfer_dir enum */ 203 + __u32 pad0; 207 204 }; 208 205 209 206 struct rio_async_tx_wait { 210 - uint32_t token; /* DMA transaction ID token */ 211 - uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ 207 + __u32 token; /* DMA transaction ID token */ 208 + __u32 timeout; /* Wait timeout in msec, if 0 use default TO */ 212 209 }; 213 210 214 211 #define RIO_MAX_DEVNAME_SZ 20 215 212 216 213 struct rio_rdev_info { 217 - uint32_t destid; 218 - uint8_t hopcount; 219 - uint32_t comptag; 214 + __u16 destid; 215 + __u8 hopcount; 216 + __u8 pad0; 217 + __u32 comptag; 220 218 char name[RIO_MAX_DEVNAME_SZ + 1]; 221 219 }; 222 220 ··· 226 220 #define RIO_MPORT_DRV_MAGIC 'm' 227 221 228 222 #define RIO_MPORT_MAINT_HDID_SET \ 229 - _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) 223 + _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16) 230 224 #define RIO_MPORT_MAINT_COMPTAG_SET \ 231 - _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) 225 + _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32) 232 226 #define RIO_MPORT_MAINT_PORT_IDX_GET \ 233 - _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) 227 + _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32) 234 228 #define RIO_MPORT_GET_PROPERTIES \ 235 229 _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) 236 230 #define RIO_MPORT_MAINT_READ_LOCAL \ ··· 250 244 #define RIO_DISABLE_PORTWRITE_RANGE \ 251 245 _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) 252 246 #define RIO_SET_EVENT_MASK \ 253 - _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) 247 + _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32) 254 248 #define RIO_GET_EVENT_MASK \ 255 - _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) 249 + _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32) 256 250 #define RIO_MAP_OUTBOUND \ 257 251 _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) 258 252 #define RIO_UNMAP_OUTBOUND \ ··· 260 254 #define RIO_MAP_INBOUND \ 261 255 _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) 262 256 #define RIO_UNMAP_INBOUND \ 263 - _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) 257 + _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64) 264 258 #define RIO_ALLOC_DMA \ 265 259 _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) 266 260 #define RIO_FREE_DMA \ 267 - _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) 261 + _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64) 268 262 #define RIO_TRANSFER \ 269 263 _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) 270 264 #define RIO_WAIT_FOR_ASYNC \